Move statics to threader pass class.

This patch moves all the static functions into the pass class, and
cleans up things a little.  The goal is to shuffle things around such
that we can add debug counters that depend on different threading
passes, but it's a clean-up on its own right.

Tested on x86-64 Linux.

gcc/ChangeLog:

	* tree-ssa-threadbackward.c (BT_NONE): New.
	(BT_SPEED): New.
	(BT_RESOLVE): New.
	(back_threader::back_threader): Add flags.
	Move loop initialization here.
	(back_threader::~back_threader): New.
	(back_threader::find_taken_edge_switch): Change solver and ranger
	to pointers.
	(back_threader::find_taken_edge_cond): Same.
	(back_threader::find_paths_to_names): Same.
	(back_threader::find_paths): Same.
	(back_threader::dump): Same.
	(try_thread_blocks): Merge into thread_blocks.
	(back_threader::thread_blocks): New.
	(do_early_thread_jumps): Merge into thread_blocks.
	(do_thread_jumps): Merge into thread_blocks.
	(back_threader::thread_through_all_blocks): Remove.
This commit is contained in:
Aldy Hernandez 2021-10-29 17:28:41 +02:00
parent 0187c03be3
commit 4e0f56d7af

View file

@ -69,13 +69,22 @@ private:
const bool m_speed_p;
};
// Back threader flags.
#define BT_NONE 0
// Generate fast code at the expense of code size.
#define BT_SPEED 1
// Resolve unknown SSAs on entry to a threading path. If set, use the
// ranger. If not, assume all ranges on entry to a path are VARYING.
#define BT_RESOLVE 2
class back_threader
{
public:
back_threader (bool speed_p, bool resolve);
void maybe_thread_block (basic_block bb);
bool thread_through_all_blocks (bool may_peel_loop_headers);
back_threader (function *fun, unsigned flags);
~back_threader ();
unsigned thread_blocks ();
private:
void maybe_thread_block (basic_block bb);
void find_paths (basic_block bb, tree name);
edge maybe_register_path ();
bool find_paths_to_names (basic_block bb, bitmap imports);
@ -89,8 +98,8 @@ private:
back_threader_registry m_registry;
back_threader_profitability m_profit;
gimple_ranger m_ranger;
path_range_query m_solver;
gimple_ranger *m_ranger;
path_range_query *m_solver;
// Current path being analyzed.
auto_vec<basic_block> m_path;
@ -109,19 +118,35 @@ private:
// Set to TRUE if unknown SSA names along a path should be resolved
// with the ranger. Otherwise, unknown SSA names are assumed to be
// VARYING. Setting to true is more precise but slower.
bool m_resolve;
function *m_fun;
unsigned m_flags;
};
// Used to differentiate unreachable edges, so we may stop the search
// in a the given direction.
const edge back_threader::UNREACHABLE_EDGE = (edge) -1;
back_threader::back_threader (bool speed_p, bool resolve)
: m_profit (speed_p),
m_solver (m_ranger, resolve)
back_threader::back_threader (function *fun, unsigned flags)
: m_profit (flags & BT_SPEED)
{
if (flags & BT_SPEED)
loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
else
loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
m_fun = fun;
m_flags = flags;
m_ranger = new gimple_ranger;
m_solver = new path_range_query (*m_ranger, flags & BT_RESOLVE);
m_last_stmt = NULL;
m_resolve = resolve;
}
back_threader::~back_threader ()
{
delete m_solver;
delete m_ranger;
loop_optimizer_finalize ();
}
// Register the current path for jump threading if it's profitable to
@ -186,8 +211,8 @@ back_threader::find_taken_edge_switch (const vec<basic_block> &path,
tree name = gimple_switch_index (sw);
int_range_max r;
m_solver.compute_ranges (path, m_imports);
m_solver.range_of_expr (r, name, sw);
m_solver->compute_ranges (path, m_imports);
m_solver->range_of_expr (r, name, sw);
if (r.undefined_p ())
return UNREACHABLE_EDGE;
@ -210,10 +235,10 @@ back_threader::find_taken_edge_cond (const vec<basic_block> &path,
{
int_range_max r;
m_solver.compute_ranges (path, m_imports);
m_solver.range_of_stmt (r, cond);
m_solver->compute_ranges (path, m_imports);
m_solver->range_of_stmt (r, cond);
if (m_solver.unreachable_path_p ())
if (m_solver->unreachable_path_p ())
return UNREACHABLE_EDGE;
int_range<2> true_range (boolean_true_node, boolean_true_node);
@ -381,7 +406,7 @@ back_threader::find_paths_to_names (basic_block bb, bitmap interesting)
// Examine blocks that define or export an interesting SSA,
// since they may compute a range which resolve this path.
if ((def_bb == bb
|| bitmap_bit_p (m_ranger.gori ().exports (bb), i))
|| bitmap_bit_p (m_ranger->gori ().exports (bb), i))
&& m_path.length () > 1)
{
if (maybe_register_path ())
@ -436,7 +461,7 @@ back_threader::find_paths (basic_block bb, tree name)
bitmap_clear (m_imports);
auto_bitmap interesting;
bitmap_copy (m_imports, m_ranger.gori ().imports (bb));
bitmap_copy (m_imports, m_ranger->gori ().imports (bb));
bitmap_copy (interesting, m_imports);
find_paths_to_names (bb, interesting);
}
@ -486,14 +511,6 @@ back_threader::maybe_thread_block (basic_block bb)
find_paths (bb, name);
}
// Perform the actual jump threading for the all queued paths.
bool
back_threader::thread_through_all_blocks (bool may_peel_loop_headers)
{
return m_registry.thread_through_all_blocks (may_peel_loop_headers);
}
// Dump a sequence of BBs through the CFG.
DEBUG_FUNCTION void
@ -517,7 +534,7 @@ debug (const vec <basic_block> &path)
void
back_threader::dump (FILE *out)
{
m_solver.dump (out);
m_solver->dump (out);
fprintf (out, "\nCandidates for pre-computation:\n");
fprintf (out, "===================================\n");
@ -883,45 +900,24 @@ back_threader_registry::register_path (const vec<basic_block> &m_path,
return true;
}
// Try to thread blocks in FUN. RESOLVE is TRUE when fully resolving
// unknown SSAs. SPEED is TRUE when optimizing for speed.
// Thread all suitable paths in the current function.
//
// Return TRUE if any jump thread paths were registered.
// Return TODO_flags.
static bool
try_thread_blocks (function *fun, bool resolve, bool speed)
unsigned int
back_threader::thread_blocks ()
{
back_threader threader (speed, resolve);
basic_block bb;
FOR_EACH_BB_FN (bb, fun)
{
if (EDGE_COUNT (bb->succs) > 1)
threader.maybe_thread_block (bb);
}
return threader.thread_through_all_blocks (/*peel_loop_headers=*/true);
}
FOR_EACH_BB_FN (bb, m_fun)
if (EDGE_COUNT (bb->succs) > 1)
maybe_thread_block (bb);
static unsigned int
do_early_thread_jumps (function *fun, bool resolve)
{
loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
bool changed = m_registry.thread_through_all_blocks (true);
try_thread_blocks (fun, resolve, /*speed=*/false);
if (m_flags & BT_SPEED)
return changed ? TODO_cleanup_cfg : 0;
loop_optimizer_finalize ();
return 0;
}
static unsigned int
do_thread_jumps (function *fun, bool resolve)
{
loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
bool changed = try_thread_blocks (fun, resolve, /*speed=*/true);
loop_optimizer_finalize ();
return changed ? TODO_cleanup_cfg : 0;
return false;
}
namespace {
@ -983,7 +979,8 @@ public:
}
unsigned int execute (function *fun) override
{
return do_early_thread_jumps (fun, /*resolve=*/false);
back_threader threader (fun, BT_NONE);
return threader.thread_blocks ();
}
};
@ -1004,7 +1001,8 @@ public:
}
unsigned int execute (function *fun) override
{
return do_thread_jumps (fun, /*resolve=*/false);
back_threader threader (fun, BT_SPEED);
return threader.thread_blocks ();
}
};
@ -1025,7 +1023,8 @@ public:
}
unsigned int execute (function *fun) override
{
return do_thread_jumps (fun, /*resolve=*/true);
back_threader threader (fun, BT_SPEED | BT_RESOLVE);
return threader.thread_blocks ();
}
};