From 59fd17e3a69e7b826e4926e44476e7ba08962aa9 Mon Sep 17 00:00:00 2001 From: Richard Biener Date: Thu, 3 Jan 2013 15:57:15 +0000 Subject: [PATCH] revert: [multiple changes] 2013-01-03 Richard Biener Revert 2013-01-03 Richard Biener PR tree-optimization/55857 * tree-vect-stmts.c (vectorizable_load): Do not setup re-alignment for invariant loads. 2013-01-02 Richard Biener * tree-vect-stmts.c (vectorizable_load): When vectorizing an invariant load do not generate a vector load from the scalar location. From-SVN: r194856 --- gcc/ChangeLog | 15 +++++++++++++++ gcc/tree-vect-stmts.c | 27 ++++++++++++--------------- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index bdbbb71c411..5e5885ee7c4 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,18 @@ +2013-01-03 Richard Biener + + Revert + 2013-01-03 Richard Biener + + PR tree-optimization/55857 + * tree-vect-stmts.c (vectorizable_load): Do not setup + re-alignment for invariant loads. + + 2013-01-02 Richard Biener + + * tree-vect-stmts.c (vectorizable_load): When vectorizing an + invariant load do not generate a vector load from the scalar + location. + 2013-01-03 Richard Biener * tree-vect-loop.c (vect_analyze_loop_form): Clarify reason diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index dfbce969fdb..1e8d7ee4401 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -4927,8 +4927,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, if ((alignment_support_scheme == dr_explicit_realign_optimized || alignment_support_scheme == dr_explicit_realign) - && !compute_in_loop - && !integer_zerop (DR_STEP (dr))) + && !compute_in_loop) { msq = vect_setup_realignment (first_stmt, gsi, &realignment_token, alignment_support_scheme, NULL_TREE, @@ -4989,19 +4988,6 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, /* Record the mapping between SSA_NAMEs and statements. */ vect_record_grouped_load_vectors (stmt, dr_chain); } - /* Handle invariant-load. */ - else if (inv_p && !bb_vinfo) - { - gimple_stmt_iterator gsi2 = *gsi; - gcc_assert (!grouped_load && !slp_perm); - gsi_next (&gsi2); - new_temp = vect_init_vector (stmt, scalar_dest, - vectype, &gsi2); - new_stmt = SSA_NAME_DEF_STMT (new_temp); - /* Store vector loads in the corresponding SLP_NODE. */ - if (slp) - SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); - } else { for (i = 0; i < vec_num; i++) @@ -5149,6 +5135,17 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, } } + /* 4. Handle invariant-load. */ + if (inv_p && !bb_vinfo) + { + gimple_stmt_iterator gsi2 = *gsi; + gcc_assert (!grouped_load); + gsi_next (&gsi2); + new_temp = vect_init_vector (stmt, scalar_dest, + vectype, &gsi2); + new_stmt = SSA_NAME_DEF_STMT (new_temp); + } + if (negative) { tree perm_mask = perm_mask_for_reverse (vectype);