openmp: Avoid ICEs due to orphaned labels in OpenMP regions [PR99322]

When performing cfg cleanup at the end of cfg pass, if there are any OpenMP
regions and some basic blocks are unreachable and contain forced labels,
remove_bb moves the labels to previous bb, but if the two bb belong to different
OpenMP regions, that means it will end up in a different function from where
it was assumed to be and checked e.g. during gimplification or OpenMP region
SESE checking.

The following patch will place the labels to some bb from the right OpenMP
region if the previous bb is not that.  I think it should happen very rarely,
normally the bbs from each OpenMP region should be from the before-cfg pass
adjacent and the problems will usually be only if the OpenMP regions are
no-return, so I hope it isn't fatal that it searches through all bbs on the miss.
If it turns out to be a problem, it can always lazily create some better data
structure and maintain it through bb removals when it reaches that case the
first time.

2021-03-05  Jakub Jelinek  <jakub@redhat.com>

	PR middle-end/99322
	* tree-cfg.c (bb_to_omp_idx): New variable.
	(execute_build_cfg): Release the bb_to_omp_idx vector after
	cleanup_tree_cfg returns.
	(handle_abnormal_edges): Remove bb_to_omp_idx argument, adjust
	for bb_to_omp_idx being a vec<int> instead of pointer to array
	of ints.
	(make_edges): Remove bb_to_omp_idx local variable, don't pass
	it to handle_abnormal_edges, adjust for bb_to_omp_idx being a
	vec<int> instead of pointer to array of ints and don't free/release
	it at the end.
	(remove_bb): When removing a bb and placing forced label somewhere
	else, ensure it is put into the same OpenMP region during cfg
	pass if possible or to entry successor as fallback.  Unregister
	bb from bb_to_omp_idx.

	* c-c++-common/gomp/pr99322.c: New test.
This commit is contained in:
Jakub Jelinek 2021-03-05 21:52:35 +01:00
parent 9e64dd6b3f
commit a3ad6489d3
2 changed files with 74 additions and 20 deletions

View file

@ -0,0 +1,26 @@
/* PR middle-end/99322 */
/* { dg-do compile } */
void foo (void);
void qux (void *);
void
bar (void)
{
#pragma omp parallel
for (;;)
for (int i = 0; i < 8; ++i)
foo ();
{ lab:; }
qux (&&lab);
}
void
baz (void)
{
qux (&&lab);
#pragma omp parallel
for (;;)
;
lab:;
}

View file

@ -93,6 +93,9 @@ static hash_map<edge, tree> *edge_to_cases;
static bitmap touched_switch_bbs;
/* OpenMP region idxs for blocks during cfg pass. */
static vec<int> bb_to_omp_idx;
/* CFG statistics. */
struct cfg_stats_d
{
@ -372,6 +375,9 @@ execute_build_cfg (void)
dump_scope_blocks (dump_file, dump_flags);
}
cleanup_tree_cfg ();
bb_to_omp_idx.release ();
loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
replace_loop_annotate ();
return 0;
@ -724,8 +730,7 @@ get_abnormal_succ_dispatcher (basic_block bb)
if COMPUTED_GOTO is false, otherwise factor the computed gotos. */
static void
handle_abnormal_edges (basic_block *dispatcher_bbs,
basic_block for_bb, int *bb_to_omp_idx,
handle_abnormal_edges (basic_block *dispatcher_bbs, basic_block for_bb,
auto_vec<basic_block> *bbs, bool computed_goto)
{
basic_block *dispatcher = dispatcher_bbs + (computed_goto ? 1 : 0);
@ -733,7 +738,7 @@ handle_abnormal_edges (basic_block *dispatcher_bbs,
basic_block bb;
bool inner = false;
if (bb_to_omp_idx)
if (!bb_to_omp_idx.is_empty ())
{
dispatcher = dispatcher_bbs + 2 * bb_to_omp_idx[for_bb->index];
if (bb_to_omp_idx[for_bb->index] != 0)
@ -748,7 +753,7 @@ handle_abnormal_edges (basic_block *dispatcher_bbs,
/* Check if there are any basic blocks that need to have
abnormal edges to this dispatcher. If there are none, return
early. */
if (bb_to_omp_idx == NULL)
if (bb_to_omp_idx.is_empty ())
{
if (bbs->is_empty ())
return;
@ -790,7 +795,7 @@ handle_abnormal_edges (basic_block *dispatcher_bbs,
FOR_EACH_VEC_ELT (*bbs, idx, bb)
{
if (bb_to_omp_idx
if (!bb_to_omp_idx.is_empty ()
&& bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index])
continue;
@ -820,7 +825,7 @@ handle_abnormal_edges (basic_block *dispatcher_bbs,
/* Create predecessor edges of the dispatcher. */
FOR_EACH_VEC_ELT (*bbs, idx, bb)
{
if (bb_to_omp_idx
if (!bb_to_omp_idx.is_empty ()
&& bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index])
continue;
make_edge (bb, *dispatcher, EDGE_ABNORMAL);
@ -957,7 +962,6 @@ make_edges (void)
struct omp_region *cur_region = NULL;
auto_vec<basic_block> ab_edge_goto;
auto_vec<basic_block> ab_edge_call;
int *bb_to_omp_idx = NULL;
int cur_omp_region_idx = 0;
/* Create an edge from entry to the first block with executable
@ -971,7 +975,7 @@ make_edges (void)
{
int mer;
if (bb_to_omp_idx)
if (!bb_to_omp_idx.is_empty ())
bb_to_omp_idx[bb->index] = cur_omp_region_idx;
mer = make_edges_bb (bb, &cur_region, &cur_omp_region_idx);
@ -980,8 +984,8 @@ make_edges (void)
else if (mer == 2)
ab_edge_call.safe_push (bb);
if (cur_region && bb_to_omp_idx == NULL)
bb_to_omp_idx = XCNEWVEC (int, n_basic_blocks_for_fn (cfun));
if (cur_region && bb_to_omp_idx.is_empty ())
bb_to_omp_idx.safe_grow_cleared (n_basic_blocks_for_fn (cfun), true);
}
/* Computed gotos are hell to deal with, especially if there are
@ -1006,7 +1010,7 @@ make_edges (void)
basic_block *dispatcher_bbs = dispatcher_bb_array;
int count = n_basic_blocks_for_fn (cfun);
if (bb_to_omp_idx)
if (!bb_to_omp_idx.is_empty ())
dispatcher_bbs = XCNEWVEC (basic_block, 2 * count);
FOR_EACH_BB_FN (bb, cfun)
@ -1024,12 +1028,12 @@ make_edges (void)
/* Make an edge to every label block that has been marked as a
potential target for a computed goto or a non-local goto. */
if (FORCED_LABEL (target))
handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx,
&ab_edge_goto, true);
handle_abnormal_edges (dispatcher_bbs, bb, &ab_edge_goto,
true);
if (DECL_NONLOCAL (target))
{
handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx,
&ab_edge_call, false);
handle_abnormal_edges (dispatcher_bbs, bb, &ab_edge_call,
false);
break;
}
}
@ -1044,17 +1048,15 @@ make_edges (void)
&& ((gimple_call_flags (call_stmt) & ECF_RETURNS_TWICE)
|| gimple_call_builtin_p (call_stmt,
BUILT_IN_SETJMP_RECEIVER)))
handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx,
&ab_edge_call, false);
handle_abnormal_edges (dispatcher_bbs, bb, &ab_edge_call,
false);
}
}
if (bb_to_omp_idx)
if (!bb_to_omp_idx.is_empty ())
XDELETE (dispatcher_bbs);
}
XDELETE (bb_to_omp_idx);
omp_free_regions ();
}
@ -2301,6 +2303,30 @@ remove_bb (basic_block bb)
new_bb = single_succ (new_bb);
gcc_assert (new_bb != bb);
}
if ((unsigned) bb->index < bb_to_omp_idx.length ()
&& ((unsigned) new_bb->index >= bb_to_omp_idx.length ()
|| (bb_to_omp_idx[bb->index]
!= bb_to_omp_idx[new_bb->index])))
{
/* During cfg pass make sure to put orphaned labels
into the right OMP region. */
unsigned int i;
int idx;
new_bb = NULL;
FOR_EACH_VEC_ELT (bb_to_omp_idx, i, idx)
if (i >= NUM_FIXED_BLOCKS
&& idx == bb_to_omp_idx[bb->index]
&& i != (unsigned) bb->index)
{
new_bb = BASIC_BLOCK_FOR_FN (cfun, i);
break;
}
if (new_bb == NULL)
{
new_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (new_bb != bb);
}
}
new_gsi = gsi_after_labels (new_bb);
gsi_remove (&i, false);
gsi_insert_before (&new_gsi, stmt, GSI_NEW_STMT);
@ -2319,6 +2345,8 @@ remove_bb (basic_block bb)
}
}
if ((unsigned) bb->index < bb_to_omp_idx.length ())
bb_to_omp_idx[bb->index] = -1;
remove_phi_nodes_and_edges_for_unreachable_block (bb);
bb->il.gimple.seq = NULL;
bb->il.gimple.phi_nodes = NULL;