re PR target/83712 ("Unable to find a register to spill" when compiling for thumb1)
2018-03-09 Vladimir Makarov <vmakarov@redhat.com> PR target/83712 * lra-assigns.c (assign_by_spills): Return a flag of reload assignment failure. Do not process the reload assignment failures. Do not spill other reload pseudos if they has the same reg class. (lra_assign): Add a return arg. Set up from the result of assign_by_spills call. (find_reload_regno_insns, lra_split_hard_reg_for): New functions. * lra-constraints.c (split_reg): Add a new arg. Use it instead of usage_insns if it is not NULL. (spill_hard_reg_in_range): New function. (split_if_necessary, inherit_in_ebb): Pass a new arg to split_reg. * lra-int.h (spill_hard_reg_in_range, lra_split_hard_reg_for): New function prototypes. (lra_assign): Change prototype. * lra.c (lra): Add code to deal with fails by splitting hard reg live ranges. 2018-03-09 Vladimir Makarov <vmakarov@redhat.com> PR target/83712 * gcc.target/arm/pr83712.c: New. From-SVN: r258390
This commit is contained in:
parent
a4017ff772
commit
e0cd6bc009
7 changed files with 315 additions and 115 deletions
|
@ -1,3 +1,23 @@
|
|||
2018-03-09 Vladimir Makarov <vmakarov@redhat.com>
|
||||
|
||||
PR target/83712
|
||||
* lra-assigns.c (assign_by_spills): Return a flag of reload
|
||||
assignment failure. Do not process the reload assignment
|
||||
failures. Do not spill other reload pseudos if they has the same
|
||||
reg class.
|
||||
(lra_assign): Add a return arg. Set up from the result of
|
||||
assign_by_spills call.
|
||||
(find_reload_regno_insns, lra_split_hard_reg_for): New functions.
|
||||
* lra-constraints.c (split_reg): Add a new arg. Use it instead of
|
||||
usage_insns if it is not NULL.
|
||||
(spill_hard_reg_in_range): New function.
|
||||
(split_if_necessary, inherit_in_ebb): Pass a new arg to split_reg.
|
||||
* lra-int.h (spill_hard_reg_in_range, lra_split_hard_reg_for): New
|
||||
function prototypes.
|
||||
(lra_assign): Change prototype.
|
||||
* lra.c (lra): Add code to deal with fails by splitting hard reg
|
||||
live ranges.
|
||||
|
||||
2018-03-09 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
|
||||
|
||||
PR target/83193
|
||||
|
|
|
@ -1346,17 +1346,18 @@ find_all_spills_for (int regno)
|
|||
}
|
||||
}
|
||||
|
||||
/* Assign hard registers to reload pseudos and other pseudos. */
|
||||
static void
|
||||
/* Assign hard registers to reload pseudos and other pseudos. Return
|
||||
true if we was not able to assign hard registers to all reload
|
||||
pseudos. */
|
||||
static bool
|
||||
assign_by_spills (void)
|
||||
{
|
||||
int i, n, nfails, iter, regno, hard_regno, cost;
|
||||
int i, n, nfails, iter, regno, regno2, hard_regno, cost;
|
||||
rtx restore_rtx;
|
||||
rtx_insn *insn;
|
||||
bitmap_head changed_insns, do_not_assign_nonreload_pseudos;
|
||||
unsigned int u, conflict_regno;
|
||||
bitmap_iterator bi;
|
||||
bool reload_p;
|
||||
bool reload_p, fails_p = false;
|
||||
int max_regno = max_reg_num ();
|
||||
|
||||
for (n = 0, i = lra_constraint_new_regno_start; i < max_regno; i++)
|
||||
|
@ -1399,8 +1400,13 @@ assign_by_spills (void)
|
|||
hard_regno = spill_for (regno, &all_spilled_pseudos, iter == 1);
|
||||
if (hard_regno < 0)
|
||||
{
|
||||
if (reload_p)
|
||||
if (reload_p) {
|
||||
/* Put unassigned reload pseudo first in the
|
||||
array. */
|
||||
regno2 = sorted_pseudos[nfails];
|
||||
sorted_pseudos[nfails++] = regno;
|
||||
sorted_pseudos[i] = regno2;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1415,61 +1421,9 @@ assign_by_spills (void)
|
|||
bitmap_set_bit (&changed_pseudo_bitmap, regno);
|
||||
}
|
||||
}
|
||||
if (nfails == 0)
|
||||
break;
|
||||
if (iter > 0)
|
||||
if (nfails == 0 || iter > 0)
|
||||
{
|
||||
/* We did not assign hard regs to reload pseudos after two iterations.
|
||||
Either it's an asm and something is wrong with the constraints, or
|
||||
we have run out of spill registers; error out in either case. */
|
||||
bool asm_p = false;
|
||||
bitmap_head failed_reload_insns;
|
||||
|
||||
bitmap_initialize (&failed_reload_insns, ®_obstack);
|
||||
for (i = 0; i < nfails; i++)
|
||||
{
|
||||
regno = sorted_pseudos[i];
|
||||
bitmap_ior_into (&failed_reload_insns,
|
||||
&lra_reg_info[regno].insn_bitmap);
|
||||
/* Assign an arbitrary hard register of regno class to
|
||||
avoid further trouble with this insn. */
|
||||
bitmap_clear_bit (&all_spilled_pseudos, regno);
|
||||
assign_hard_regno
|
||||
(ira_class_hard_regs[regno_allocno_class_array[regno]][0],
|
||||
regno);
|
||||
}
|
||||
EXECUTE_IF_SET_IN_BITMAP (&failed_reload_insns, 0, u, bi)
|
||||
{
|
||||
insn = lra_insn_recog_data[u]->insn;
|
||||
if (asm_noperands (PATTERN (insn)) >= 0)
|
||||
{
|
||||
asm_p = true;
|
||||
error_for_asm (insn,
|
||||
"%<asm%> operand has impossible constraints");
|
||||
/* Avoid further trouble with this insn.
|
||||
For asm goto, instead of fixing up all the edges
|
||||
just clear the template and clear input operands
|
||||
(asm goto doesn't have any output operands). */
|
||||
if (JUMP_P (insn))
|
||||
{
|
||||
rtx asm_op = extract_asm_operands (PATTERN (insn));
|
||||
ASM_OPERANDS_TEMPLATE (asm_op) = ggc_strdup ("");
|
||||
ASM_OPERANDS_INPUT_VEC (asm_op) = rtvec_alloc (0);
|
||||
ASM_OPERANDS_INPUT_CONSTRAINT_VEC (asm_op) = rtvec_alloc (0);
|
||||
lra_update_insn_regno_info (insn);
|
||||
}
|
||||
else
|
||||
{
|
||||
PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx);
|
||||
lra_set_insn_deleted (insn);
|
||||
}
|
||||
}
|
||||
else if (!asm_p)
|
||||
{
|
||||
error ("unable to find a register to spill");
|
||||
fatal_insn ("this is the insn:", insn);
|
||||
}
|
||||
}
|
||||
fails_p = nfails != 0;
|
||||
break;
|
||||
}
|
||||
/* This is a very rare event. We can not assign a hard register
|
||||
|
@ -1497,6 +1451,14 @@ assign_by_spills (void)
|
|||
}
|
||||
EXECUTE_IF_SET_IN_SPARSESET (live_range_hard_reg_pseudos, conflict_regno)
|
||||
{
|
||||
if ((int) conflict_regno >= lra_constraint_new_regno_start
|
||||
/* There is no sense to spill another reload pseudo if
|
||||
it has the same class. */
|
||||
&& regno_allocno_class_array[conflict_regno] == regno_allocno_class_array[regno]
|
||||
&& ! bitmap_bit_p (&lra_inheritance_pseudos, conflict_regno)
|
||||
&& ! bitmap_bit_p (&lra_split_regs, conflict_regno)
|
||||
&& ! bitmap_bit_p (&lra_optional_reload_pseudos, conflict_regno))
|
||||
continue;
|
||||
if ((int) conflict_regno >= lra_constraint_new_regno_start)
|
||||
{
|
||||
sorted_pseudos[nfails++] = conflict_regno;
|
||||
|
@ -1518,7 +1480,6 @@ assign_by_spills (void)
|
|||
update_lives (conflict_regno, true);
|
||||
lra_setup_reg_renumber (conflict_regno, -1, false);
|
||||
}
|
||||
n = nfails;
|
||||
}
|
||||
improve_inheritance (&changed_pseudo_bitmap);
|
||||
bitmap_clear (&non_reload_pseudos);
|
||||
|
@ -1604,9 +1565,9 @@ assign_by_spills (void)
|
|||
bitmap_clear (&best_spill_pseudos_bitmap);
|
||||
bitmap_clear (&spill_pseudos_bitmap);
|
||||
bitmap_clear (&insn_conflict_pseudos);
|
||||
return fails_p;
|
||||
}
|
||||
|
||||
|
||||
/* Entry function to assign hard registers to new reload pseudos
|
||||
starting with LRA_CONSTRAINT_NEW_REGNO_START (by possible spilling
|
||||
of old pseudos) and possibly to the old pseudos. The function adds
|
||||
|
@ -1615,9 +1576,10 @@ assign_by_spills (void)
|
|||
changed allocation.
|
||||
|
||||
Return true if we did not spill any non-reload and non-inheritance
|
||||
pseudos. */
|
||||
pseudos. Set up FAILS_P if we failed to assign hard registers to
|
||||
all reload pseudos. */
|
||||
bool
|
||||
lra_assign (void)
|
||||
lra_assign (bool &fails_p)
|
||||
{
|
||||
int i;
|
||||
unsigned int u;
|
||||
|
@ -1661,7 +1623,7 @@ lra_assign (void)
|
|||
/* Setup insns to process on the next constraint pass. */
|
||||
bitmap_initialize (&changed_pseudo_bitmap, ®_obstack);
|
||||
init_live_reload_and_inheritance_pseudos ();
|
||||
assign_by_spills ();
|
||||
fails_p = assign_by_spills ();
|
||||
finish_live_reload_and_inheritance_pseudos ();
|
||||
bitmap_ior_into (&changed_pseudo_bitmap, &all_spilled_pseudos);
|
||||
no_spills_p = true;
|
||||
|
@ -1707,3 +1669,137 @@ lra_assign (void)
|
|||
return no_spills_p;
|
||||
}
|
||||
|
||||
/* Find start and finish insns for reload pseudo REGNO. Return true
|
||||
if we managed to find the expected insns. Return false,
|
||||
otherwise. */
|
||||
static bool
|
||||
find_reload_regno_insns (int regno, rtx_insn * &start, rtx_insn * &finish)
|
||||
{
|
||||
unsigned int uid;
|
||||
bitmap_iterator bi;
|
||||
int n = 0;
|
||||
rtx_insn *prev_insn, *next_insn;
|
||||
rtx_insn *start_insn = NULL, *first_insn = NULL, *second_insn = NULL;
|
||||
|
||||
EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi)
|
||||
{
|
||||
if (start_insn == NULL)
|
||||
start_insn = lra_insn_recog_data[uid]->insn;
|
||||
n++;
|
||||
}
|
||||
/* For reload pseudo we should have at most 3 insns referring for it:
|
||||
input/output reload insns and the original insn. */
|
||||
if (n > 3)
|
||||
return false;
|
||||
if (n > 1)
|
||||
{
|
||||
for (prev_insn = PREV_INSN (start_insn),
|
||||
next_insn = NEXT_INSN (start_insn);
|
||||
n != 1 && (prev_insn != NULL || next_insn != NULL); )
|
||||
{
|
||||
if (prev_insn != NULL && first_insn == NULL)
|
||||
{
|
||||
if (! bitmap_bit_p (&lra_reg_info[regno].insn_bitmap,
|
||||
INSN_UID (prev_insn)))
|
||||
prev_insn = PREV_INSN (prev_insn);
|
||||
else
|
||||
{
|
||||
first_insn = prev_insn;
|
||||
n--;
|
||||
}
|
||||
}
|
||||
if (next_insn != NULL && second_insn == NULL)
|
||||
{
|
||||
if (! bitmap_bit_p (&lra_reg_info[regno].insn_bitmap,
|
||||
INSN_UID (next_insn)))
|
||||
next_insn = NEXT_INSN (next_insn);
|
||||
else
|
||||
{
|
||||
second_insn = next_insn;
|
||||
n--;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (n > 1)
|
||||
return false;
|
||||
}
|
||||
start = first_insn != NULL ? first_insn : start_insn;
|
||||
finish = second_insn != NULL ? second_insn : start_insn;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Process reload pseudos which did not get a hard reg, split a hard
|
||||
reg live range in live range of a reload pseudo, and the return.
|
||||
If we did not split a hard reg live range, report an error. */
|
||||
void
|
||||
lra_split_hard_reg_for (void)
|
||||
{
|
||||
int i, regno, n;
|
||||
rtx_insn *insn, *first, *last;
|
||||
unsigned int u;
|
||||
bitmap_iterator bi;
|
||||
int max_regno = max_reg_num ();
|
||||
/* We did not assign hard regs to reload pseudos after two
|
||||
iterations. Either it's an asm and something is wrong with the
|
||||
constraints, or we have run out of spill registers; error out in
|
||||
either case. */
|
||||
bool asm_p = false;
|
||||
bitmap_head failed_reload_insns;
|
||||
|
||||
if (lra_dump_file != NULL)
|
||||
fprintf (lra_dump_file,
|
||||
"\n****** Splitting a hard reg after assignment #%d: ******\n\n",
|
||||
lra_assignment_iter);
|
||||
for (n = 0, i = lra_constraint_new_regno_start; i < max_regno; i++)
|
||||
if (reg_renumber[i] < 0 && lra_reg_info[i].nrefs != 0
|
||||
&& regno_allocno_class_array[i] != NO_REGS
|
||||
&& ! bitmap_bit_p (&non_reload_pseudos, i))
|
||||
{
|
||||
sorted_pseudos[n++] = i;
|
||||
if (! find_reload_regno_insns (i, first, last))
|
||||
continue;
|
||||
if (spill_hard_reg_in_range (i, regno_allocno_class_array[i],
|
||||
first, last))
|
||||
return;
|
||||
}
|
||||
bitmap_initialize (&failed_reload_insns, ®_obstack);
|
||||
for (i = 0; i < n; i++)
|
||||
{
|
||||
regno = sorted_pseudos[i];
|
||||
bitmap_ior_into (&failed_reload_insns,
|
||||
&lra_reg_info[regno].insn_bitmap);
|
||||
lra_setup_reg_renumber (regno, ira_class_hard_regs[regno_allocno_class_array[regno]][0], false);
|
||||
}
|
||||
EXECUTE_IF_SET_IN_BITMAP (&failed_reload_insns, 0, u, bi)
|
||||
{
|
||||
insn = lra_insn_recog_data[u]->insn;
|
||||
if (asm_noperands (PATTERN (insn)) >= 0)
|
||||
{
|
||||
asm_p = true;
|
||||
error_for_asm (insn,
|
||||
"%<asm%> operand has impossible constraints");
|
||||
/* Avoid further trouble with this insn.
|
||||
For asm goto, instead of fixing up all the edges
|
||||
just clear the template and clear input operands
|
||||
(asm goto doesn't have any output operands). */
|
||||
if (JUMP_P (insn))
|
||||
{
|
||||
rtx asm_op = extract_asm_operands (PATTERN (insn));
|
||||
ASM_OPERANDS_TEMPLATE (asm_op) = ggc_strdup ("");
|
||||
ASM_OPERANDS_INPUT_VEC (asm_op) = rtvec_alloc (0);
|
||||
ASM_OPERANDS_INPUT_CONSTRAINT_VEC (asm_op) = rtvec_alloc (0);
|
||||
lra_update_insn_regno_info (insn);
|
||||
}
|
||||
else
|
||||
{
|
||||
PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx);
|
||||
lra_set_insn_deleted (insn);
|
||||
}
|
||||
}
|
||||
else if (!asm_p)
|
||||
{
|
||||
error ("unable to find a register to spill");
|
||||
fatal_insn ("this is the insn:", insn);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5445,7 +5445,8 @@ lra_copy_reg_equiv (unsigned int new_regno, unsigned int original_regno)
|
|||
/* Do split transformations for insn INSN, which defines or uses
|
||||
ORIGINAL_REGNO. NEXT_USAGE_INSNS specifies which instruction in
|
||||
the EBB next uses ORIGINAL_REGNO; it has the same form as the
|
||||
"insns" field of usage_insns.
|
||||
"insns" field of usage_insns. If TO is not NULL, we don't use
|
||||
usage_insns, we put restore insns after TO insn.
|
||||
|
||||
The transformations look like:
|
||||
|
||||
|
@ -5467,7 +5468,7 @@ lra_copy_reg_equiv (unsigned int new_regno, unsigned int original_regno)
|
|||
transformation. */
|
||||
static bool
|
||||
split_reg (bool before_p, int original_regno, rtx_insn *insn,
|
||||
rtx next_usage_insns)
|
||||
rtx next_usage_insns, rtx_insn *to)
|
||||
{
|
||||
enum reg_class rclass;
|
||||
rtx original_reg;
|
||||
|
@ -5600,29 +5601,37 @@ split_reg (bool before_p, int original_regno, rtx_insn *insn,
|
|||
if (!HARD_REGISTER_NUM_P (original_regno)
|
||||
&& mode == PSEUDO_REGNO_MODE (original_regno))
|
||||
lra_copy_reg_equiv (new_regno, original_regno);
|
||||
after_p = usage_insns[original_regno].after_p;
|
||||
lra_reg_info[new_regno].restore_rtx = regno_reg_rtx[original_regno];
|
||||
bitmap_set_bit (&check_only_regs, new_regno);
|
||||
bitmap_set_bit (&check_only_regs, original_regno);
|
||||
bitmap_set_bit (&lra_split_regs, new_regno);
|
||||
for (;;)
|
||||
if (to != NULL)
|
||||
{
|
||||
if (GET_CODE (next_usage_insns) != INSN_LIST)
|
||||
usage_insn = to;
|
||||
after_p = TRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
after_p = usage_insns[original_regno].after_p;
|
||||
for (;;)
|
||||
{
|
||||
usage_insn = next_usage_insns;
|
||||
break;
|
||||
}
|
||||
usage_insn = XEXP (next_usage_insns, 0);
|
||||
lra_assert (DEBUG_INSN_P (usage_insn));
|
||||
next_usage_insns = XEXP (next_usage_insns, 1);
|
||||
lra_substitute_pseudo (&usage_insn, original_regno, new_reg, false,
|
||||
true);
|
||||
lra_update_insn_regno_info (as_a <rtx_insn *> (usage_insn));
|
||||
if (lra_dump_file != NULL)
|
||||
{
|
||||
fprintf (lra_dump_file, " Split reuse change %d->%d:\n",
|
||||
original_regno, new_regno);
|
||||
dump_insn_slim (lra_dump_file, as_a <rtx_insn *> (usage_insn));
|
||||
if (GET_CODE (next_usage_insns) != INSN_LIST)
|
||||
{
|
||||
usage_insn = next_usage_insns;
|
||||
break;
|
||||
}
|
||||
usage_insn = XEXP (next_usage_insns, 0);
|
||||
lra_assert (DEBUG_INSN_P (usage_insn));
|
||||
next_usage_insns = XEXP (next_usage_insns, 1);
|
||||
lra_substitute_pseudo (&usage_insn, original_regno, new_reg, false,
|
||||
true);
|
||||
lra_update_insn_regno_info (as_a <rtx_insn *> (usage_insn));
|
||||
if (lra_dump_file != NULL)
|
||||
{
|
||||
fprintf (lra_dump_file, " Split reuse change %d->%d:\n",
|
||||
original_regno, new_regno);
|
||||
dump_insn_slim (lra_dump_file, as_a <rtx_insn *> (usage_insn));
|
||||
}
|
||||
}
|
||||
}
|
||||
lra_assert (NOTE_P (usage_insn) || NONDEBUG_INSN_P (usage_insn));
|
||||
|
@ -5649,6 +5658,35 @@ split_reg (bool before_p, int original_regno, rtx_insn *insn,
|
|||
return true;
|
||||
}
|
||||
|
||||
/* Split a hard reg for reload pseudo REGNO having RCLASS and living
|
||||
in the range [FROM, TO]. Return true if did a split. Otherwise,
|
||||
return false. */
|
||||
bool
|
||||
spill_hard_reg_in_range (int regno, enum reg_class rclass, rtx_insn *from, rtx_insn *to)
|
||||
{
|
||||
int i, hard_regno;
|
||||
int rclass_size;
|
||||
rtx_insn *insn;
|
||||
|
||||
lra_assert (from != NULL && to != NULL);
|
||||
rclass_size = ira_class_hard_regs_num[rclass];
|
||||
for (i = 0; i < rclass_size; i++)
|
||||
{
|
||||
hard_regno = ira_class_hard_regs[rclass][i];
|
||||
if (! TEST_HARD_REG_BIT (lra_reg_info[regno].conflict_hard_regs, hard_regno))
|
||||
continue;
|
||||
for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn))
|
||||
if (bitmap_bit_p (&lra_reg_info[hard_regno].insn_bitmap,
|
||||
INSN_UID (insn)))
|
||||
break;
|
||||
if (insn != NEXT_INSN (to))
|
||||
continue;
|
||||
if (split_reg (TRUE, hard_regno, from, NULL, to))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Recognize that we need a split transformation for insn INSN, which
|
||||
defines or uses REGNO in its insn biggest MODE (we use it only if
|
||||
REGNO is a hard register). POTENTIAL_RELOAD_HARD_REGS contains
|
||||
|
@ -5676,7 +5714,7 @@ split_if_necessary (int regno, machine_mode mode,
|
|||
|| (GET_CODE (next_usage_insns) == INSN_LIST
|
||||
&& (INSN_UID (XEXP (next_usage_insns, 0)) < max_uid)))
|
||||
&& need_for_split_p (potential_reload_hard_regs, regno + i)
|
||||
&& split_reg (before_p, regno + i, insn, next_usage_insns))
|
||||
&& split_reg (before_p, regno + i, insn, next_usage_insns, NULL))
|
||||
res = true;
|
||||
return res;
|
||||
}
|
||||
|
@ -6454,7 +6492,7 @@ inherit_in_ebb (rtx_insn *head, rtx_insn *tail)
|
|||
head_p = false;
|
||||
}
|
||||
if (split_reg (false, j, bb_note (curr_bb),
|
||||
next_usage_insns))
|
||||
next_usage_insns, NULL))
|
||||
change_p = true;
|
||||
}
|
||||
usage_insns[j].check = 0;
|
||||
|
|
|
@ -356,6 +356,7 @@ extern bool lra_constrain_insn (rtx_insn *);
|
|||
extern bool lra_constraints (bool);
|
||||
extern void lra_constraints_init (void);
|
||||
extern void lra_constraints_finish (void);
|
||||
extern bool spill_hard_reg_in_range (int, enum reg_class, rtx_insn *, rtx_insn *);
|
||||
extern void lra_inheritance (void);
|
||||
extern bool lra_undo_inheritance (void);
|
||||
|
||||
|
@ -389,8 +390,8 @@ extern void lra_setup_reload_pseudo_preferenced_hard_reg (int, int, int);
|
|||
extern int lra_assignment_iter;
|
||||
extern int lra_assignment_iter_after_spill;
|
||||
extern void lra_setup_reg_renumber (int, int, bool);
|
||||
extern bool lra_assign (void);
|
||||
|
||||
extern bool lra_assign (bool &);
|
||||
extern void lra_split_hard_reg_for (void);
|
||||
|
||||
/* lra-coalesce.c: */
|
||||
|
||||
|
|
69
gcc/lra.c
69
gcc/lra.c
|
@ -2460,38 +2460,53 @@ lra (FILE *f)
|
|||
}
|
||||
if (live_p)
|
||||
lra_clear_live_ranges ();
|
||||
/* We need live ranges for lra_assign -- so build them. But
|
||||
don't remove dead insns or change global live info as we
|
||||
can undo inheritance transformations after inheritance
|
||||
pseudo assigning. */
|
||||
lra_create_live_ranges (true, false);
|
||||
live_p = true;
|
||||
/* If we don't spill non-reload and non-inheritance pseudos,
|
||||
there is no sense to run memory-memory move coalescing.
|
||||
If inheritance pseudos were spilled, the memory-memory
|
||||
moves involving them will be removed by pass undoing
|
||||
inheritance. */
|
||||
if (lra_simple_p)
|
||||
lra_assign ();
|
||||
else
|
||||
bool fails_p;
|
||||
do
|
||||
{
|
||||
bool spill_p = !lra_assign ();
|
||||
|
||||
if (lra_undo_inheritance ())
|
||||
live_p = false;
|
||||
if (spill_p)
|
||||
/* We need live ranges for lra_assign -- so build them.
|
||||
But don't remove dead insns or change global live
|
||||
info as we can undo inheritance transformations after
|
||||
inheritance pseudo assigning. */
|
||||
lra_create_live_ranges (true, false);
|
||||
live_p = true;
|
||||
/* If we don't spill non-reload and non-inheritance
|
||||
pseudos, there is no sense to run memory-memory move
|
||||
coalescing. If inheritance pseudos were spilled, the
|
||||
memory-memory moves involving them will be removed by
|
||||
pass undoing inheritance. */
|
||||
if (lra_simple_p)
|
||||
lra_assign (fails_p);
|
||||
else
|
||||
{
|
||||
if (! live_p)
|
||||
{
|
||||
lra_create_live_ranges (true, true);
|
||||
live_p = true;
|
||||
}
|
||||
if (lra_coalesce ())
|
||||
bool spill_p = !lra_assign (fails_p);
|
||||
|
||||
if (lra_undo_inheritance ())
|
||||
live_p = false;
|
||||
if (spill_p && ! fails_p)
|
||||
{
|
||||
if (! live_p)
|
||||
{
|
||||
lra_create_live_ranges (true, true);
|
||||
live_p = true;
|
||||
}
|
||||
if (lra_coalesce ())
|
||||
live_p = false;
|
||||
}
|
||||
if (! live_p)
|
||||
lra_clear_live_ranges ();
|
||||
}
|
||||
if (fails_p)
|
||||
{
|
||||
/* It is a very rare case. It is the last hope to
|
||||
split a hard regno live range for a reload
|
||||
pseudo. */
|
||||
if (live_p)
|
||||
lra_clear_live_ranges ();
|
||||
live_p = false;
|
||||
lra_split_hard_reg_for ();
|
||||
}
|
||||
if (! live_p)
|
||||
lra_clear_live_ranges ();
|
||||
}
|
||||
while (fails_p);
|
||||
}
|
||||
/* Don't clear optional reloads bitmap until all constraints are
|
||||
satisfied as we need to differ them from regular reloads. */
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
2018-03-09 Vladimir Makarov <vmakarov@redhat.com>
|
||||
|
||||
PR target/83712
|
||||
* gcc.target/arm/pr83712.c: New.
|
||||
|
||||
2018-03-09 Richard Biener <rguenther@suse.de>
|
||||
|
||||
PR tree-optimization/84775
|
||||
|
|
25
gcc/testsuite/gcc.target/arm/pr83712.c
Normal file
25
gcc/testsuite/gcc.target/arm/pr83712.c
Normal file
|
@ -0,0 +1,25 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-mfloat-abi=softfp -mthumb -march=armv5t -O2" } */
|
||||
#pragma GCC optimize ("-O2")
|
||||
|
||||
struct itrm {
|
||||
int std_in;
|
||||
int std_out;
|
||||
int sock_in;
|
||||
int sock_out;
|
||||
};
|
||||
|
||||
struct itrm *alloc(void);
|
||||
void queue_event(struct itrm *itrm, unsigned char *data, int len);
|
||||
|
||||
void handle_trm(int std_in, int std_out, int sock_in, int sock_out, int ctl_in, void *init_string, int init_len)
|
||||
{
|
||||
struct itrm *itrm;
|
||||
struct itrm ev = { 0, 80, 24, 0 };
|
||||
itrm = alloc();
|
||||
itrm->std_in = std_in;
|
||||
itrm->std_out = std_out;
|
||||
itrm->sock_in = sock_in;
|
||||
itrm->sock_out = sock_out;
|
||||
queue_event(itrm, (unsigned char *)&ev, sizeof(struct itrm));
|
||||
}
|
Loading…
Add table
Reference in a new issue