revert: [multiple changes]

2013-01-03  Richard Biener  <rguenther@suse.de>

        Revert
        2013-01-03  Richard Biener  <rguenther@suse.de>

        PR tree-optimization/55857
        * tree-vect-stmts.c (vectorizable_load): Do not setup
        re-alignment for invariant loads.

        2013-01-02  Richard Biener  <rguenther@suse.de>

        * tree-vect-stmts.c (vectorizable_load): When vectorizing an
        invariant load do not generate a vector load from the scalar
        location.

From-SVN: r194856
This commit is contained in:
Richard Biener 2013-01-03 15:57:15 +00:00 committed by Richard Biener
parent fc883b8407
commit 59fd17e3a6
2 changed files with 27 additions and 15 deletions

View file

@ -1,3 +1,18 @@
2013-01-03 Richard Biener <rguenther@suse.de>
Revert
2013-01-03 Richard Biener <rguenther@suse.de>
PR tree-optimization/55857
* tree-vect-stmts.c (vectorizable_load): Do not setup
re-alignment for invariant loads.
2013-01-02 Richard Biener <rguenther@suse.de>
* tree-vect-stmts.c (vectorizable_load): When vectorizing an
invariant load do not generate a vector load from the scalar
location.
2013-01-03 Richard Biener <rguenther@suse.de>
* tree-vect-loop.c (vect_analyze_loop_form): Clarify reason

View file

@ -4927,8 +4927,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if ((alignment_support_scheme == dr_explicit_realign_optimized
|| alignment_support_scheme == dr_explicit_realign)
&& !compute_in_loop
&& !integer_zerop (DR_STEP (dr)))
&& !compute_in_loop)
{
msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
alignment_support_scheme, NULL_TREE,
@ -4989,19 +4988,6 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/* Record the mapping between SSA_NAMEs and statements. */
vect_record_grouped_load_vectors (stmt, dr_chain);
}
/* Handle invariant-load. */
else if (inv_p && !bb_vinfo)
{
gimple_stmt_iterator gsi2 = *gsi;
gcc_assert (!grouped_load && !slp_perm);
gsi_next (&gsi2);
new_temp = vect_init_vector (stmt, scalar_dest,
vectype, &gsi2);
new_stmt = SSA_NAME_DEF_STMT (new_temp);
/* Store vector loads in the corresponding SLP_NODE. */
if (slp)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
else
{
for (i = 0; i < vec_num; i++)
@ -5149,6 +5135,17 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
}
}
/* 4. Handle invariant-load. */
if (inv_p && !bb_vinfo)
{
gimple_stmt_iterator gsi2 = *gsi;
gcc_assert (!grouped_load);
gsi_next (&gsi2);
new_temp = vect_init_vector (stmt, scalar_dest,
vectype, &gsi2);
new_stmt = SSA_NAME_DEF_STMT (new_temp);
}
if (negative)
{
tree perm_mask = perm_mask_for_reverse (vectype);