Improve match_simplify_replacement in phi-opt

This improves match_simplify_replace in phi-opt to handle the
case where there is one cheap (non-call) preparation statement in the
middle basic block similar to xor_replacement and others.
This allows to remove xor_replacement which it does too.

OK?  Bootstrapped and tested on x86_64-linux-gnu with no regressions.

Thanks,
Andrew Pinski

Changes since v1:
v3 - Just minor changes to using gimple_assign_lhs
instead of gimple_lhs and fixing a comment.
v2 - change the check on the preparation statement to
allow only assignments and no calls and only assignments
that feed into the phi.

gcc/ChangeLog:

	PR tree-optimization/25290
	* tree-ssa-phiopt.c (xor_replacement): Delete.
	(tree_ssa_phiopt_worker): Delete use of xor_replacement.
	(match_simplify_replacement): Allow one cheap preparation
	statement that can be moved to before the if.

gcc/testsuite/ChangeLog:

	* gcc.dg/tree-ssa/pr96928-1.c: Fix testcase for now that ~
	happens on the outside of the bit_xor.
This commit is contained in:
Andrew Pinski 2021-06-01 06:48:05 +00:00
parent 61fc01806f
commit c4574d23cb
2 changed files with 54 additions and 114 deletions

View file

@ -1,9 +1,9 @@
/* PR tree-optimization/96928 */
/* { dg-do compile } */
/* { dg-options "-O2 -fdump-tree-phiopt2" } */
/* { dg-options "-O2 -fdump-tree-phiopt2 -fdump-tree-optimized" } */
/* { dg-final { scan-tree-dump-times " = a_\[0-9]*\\\(D\\\) >> " 5 "phiopt2" } } */
/* { dg-final { scan-tree-dump-times " = ~c_\[0-9]*\\\(D\\\);" 1 "phiopt2" } } */
/* { dg-final { scan-tree-dump-times " = ~" 1 "phiopt2" } } */
/* { dg-final { scan-tree-dump-times " = ~" 1 "optimized" } } */
/* { dg-final { scan-tree-dump-times " = \[abc_0-9\\\(\\\)D]* \\\^ " 5 "phiopt2" } } */
/* { dg-final { scan-tree-dump-not "a < 0" "phiopt2" } } */

View file

@ -28,6 +28,7 @@ along with GCC; see the file COPYING3. If not see
#include "cfghooks.h"
#include "tree-pass.h"
#include "ssa.h"
#include "tree-ssa.h"
#include "optabs-tree.h"
#include "insn-config.h"
#include "gimple-pretty-print.h"
@ -63,8 +64,6 @@ static bool minmax_replacement (basic_block, basic_block,
edge, edge, gphi *, tree, tree);
static bool abs_replacement (basic_block, basic_block,
edge, edge, gphi *, tree, tree);
static bool xor_replacement (basic_block, basic_block,
edge, edge, gphi *, tree, tree);
static bool spaceship_replacement (basic_block, basic_block,
edge, edge, gphi *, tree, tree);
static bool cond_removal_in_popcount_clz_ctz_pattern (basic_block, basic_block,
@ -352,9 +351,6 @@ tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads, bool early_p)
cfgchanged = true;
else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
cfgchanged = true;
else if (!early_p
&& xor_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
cfgchanged = true;
else if (!early_p
&& cond_removal_in_popcount_clz_ctz_pattern (bb, bb1, e1,
e2, phi, arg0,
@ -801,14 +797,51 @@ match_simplify_replacement (basic_block cond_bb, basic_block middle_bb,
edge true_edge, false_edge;
gimple_seq seq = NULL;
tree result;
if (!empty_block_p (middle_bb))
return false;
gimple *stmt_to_move = NULL;
/* Special case A ? B : B as this will always simplify to B. */
if (operand_equal_for_phi_arg_p (arg0, arg1))
return false;
/* If the basic block only has a cheap preparation statement,
allow it and move it once the transformation is done. */
if (!empty_block_p (middle_bb))
{
stmt_to_move = last_and_only_stmt (middle_bb);
if (!stmt_to_move)
return false;
if (gimple_vuse (stmt_to_move))
return false;
if (gimple_could_trap_p (stmt_to_move)
|| gimple_has_side_effects (stmt_to_move))
return false;
if (gimple_uses_undefined_value_p (stmt_to_move))
return false;
/* Allow assignments and not no calls.
As const calls don't match any of the above, yet they could
still have some side-effects - they could contain
gimple_could_trap_p statements, like floating point
exceptions or integer division by zero. See PR70586.
FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
should handle this. */
if (!is_gimple_assign (stmt_to_move))
return false;
tree lhs = gimple_assign_lhs (stmt_to_move);
gimple *use_stmt;
use_operand_p use_p;
/* Allow only a statement which feeds into the phi. */
if (!lhs || TREE_CODE (lhs) != SSA_NAME
|| !single_imm_use (lhs, &use_p, &use_stmt)
|| use_stmt != phi)
return false;
}
/* At this point we know we have a GIMPLE_COND with two successors.
One successor is BB, the other successor is an empty block which
falls through into BB.
@ -844,7 +877,17 @@ match_simplify_replacement (basic_block cond_bb, basic_block middle_bb,
return false;
gsi = gsi_last_bb (cond_bb);
if (stmt_to_move)
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "statement un-sinked:\n");
print_gimple_stmt (dump_file, stmt_to_move, 0,
TDF_VOPS|TDF_MEMSYMS);
}
gimple_stmt_iterator gsi1 = gsi_for_stmt (stmt_to_move);
gsi_move_before (&gsi1, &gsi);
}
if (seq)
gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
@ -2592,109 +2635,6 @@ abs_replacement (basic_block cond_bb, basic_block middle_bb,
return true;
}
/* Optimize x < 0 ? ~y : y into (x >> (prec-1)) ^ y. */
static bool
xor_replacement (basic_block cond_bb, basic_block middle_bb,
edge e0 ATTRIBUTE_UNUSED, edge e1,
gphi *phi, tree arg0, tree arg1)
{
if (!INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
return false;
/* OTHER_BLOCK must have only one executable statement which must have the
form arg0 = ~arg1 or arg1 = ~arg0. */
gimple *assign = last_and_only_stmt (middle_bb);
/* If we did not find the proper one's complement assignment, then we cannot
optimize. */
if (assign == NULL)
return false;
/* If we got here, then we have found the only executable statement
in OTHER_BLOCK. If it is anything other than arg = ~arg1 or
arg1 = ~arg0, then we cannot optimize. */
if (!is_gimple_assign (assign))
return false;
if (gimple_assign_rhs_code (assign) != BIT_NOT_EXPR)
return false;
tree lhs = gimple_assign_lhs (assign);
tree rhs = gimple_assign_rhs1 (assign);
/* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
if (!(lhs == arg0 && rhs == arg1) && !(lhs == arg1 && rhs == arg0))
return false;
gimple *cond = last_stmt (cond_bb);
tree result = PHI_RESULT (phi);
/* Only relationals comparing arg[01] against zero are interesting. */
enum tree_code cond_code = gimple_cond_code (cond);
if (cond_code != LT_EXPR && cond_code != GE_EXPR)
return false;
/* Make sure the conditional is x OP 0. */
tree clhs = gimple_cond_lhs (cond);
if (TREE_CODE (clhs) != SSA_NAME
|| !INTEGRAL_TYPE_P (TREE_TYPE (clhs))
|| TYPE_UNSIGNED (TREE_TYPE (clhs))
|| TYPE_PRECISION (TREE_TYPE (clhs)) != TYPE_PRECISION (TREE_TYPE (arg1))
|| !integer_zerop (gimple_cond_rhs (cond)))
return false;
/* We need to know which is the true edge and which is the false
edge so that we know if have xor or inverted xor. */
edge true_edge, false_edge;
extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
/* For GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
will need to invert the result. Similarly for LT_EXPR if
the false edge goes to OTHER_BLOCK. */
edge e;
if (cond_code == GE_EXPR)
e = true_edge;
else
e = false_edge;
bool invert = e->dest == middle_bb;
result = duplicate_ssa_name (result, NULL);
gimple_stmt_iterator gsi = gsi_last_bb (cond_bb);
int prec = TYPE_PRECISION (TREE_TYPE (clhs));
gimple *new_stmt
= gimple_build_assign (make_ssa_name (TREE_TYPE (clhs)), RSHIFT_EXPR, clhs,
build_int_cst (integer_type_node, prec - 1));
gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (clhs)))
{
new_stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (result)),
NOP_EXPR, gimple_assign_lhs (new_stmt));
gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
}
lhs = gimple_assign_lhs (new_stmt);
if (invert)
{
new_stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (result)),
BIT_NOT_EXPR, rhs);
gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
rhs = gimple_assign_lhs (new_stmt);
}
new_stmt = gimple_build_assign (result, BIT_XOR_EXPR, lhs, rhs);
gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
replace_phi_edge_with_variable (cond_bb, e1, phi, result);
/* Note that we optimized this PHI. */
return true;
}
/* Auxiliary functions to determine the set of memory accesses which
can't trap because they are preceded by accesses to the same memory
portion. We do that for MEM_REFs, so we only need to track