Remove TARGET_foo (ix86_tune == PROCESSOR_foo) macros.
gcc/ChangeLog: * config/i386/i386-expand.c (decide_alignment): Use newly named macro TARGET_CPU_P. * config/i386/i386.c (ix86_decompose_address): Likewise. (ix86_address_cost): Likewise. (ix86_lea_outperforms): Likewise. (ix86_avoid_lea_for_addr): Likewise. (ix86_add_stmt_cost): Likewise. * config/i386/i386.h (TARGET_*): Remove. (TARGET_CPU_P): New macro. * config/i386/i386.md: Use newly named macro TARGET_CPU_P. * config/i386/x86-tune-sched-atom.c (do_reorder_for_imul): Likewise. (swap_top_of_ready_list): Likewise. (ix86_atom_sched_reorder): Likewise. * config/i386/x86-tune-sched-bd.c (ix86_bd_has_dispatch): Likewise. * config/i386/x86-tune-sched.c (ix86_adjust_cost): Likewise.
This commit is contained in:
parent
1751bec027
commit
f23881fcf0
7 changed files with 23 additions and 63 deletions
|
@ -7055,7 +7055,7 @@ decide_alignment (int align,
|
|||
desired_align = GET_MODE_SIZE (move_mode);
|
||||
/* PentiumPro has special logic triggering for 8 byte aligned blocks.
|
||||
copying whole cacheline at once. */
|
||||
if (TARGET_PENTIUMPRO
|
||||
if (TARGET_CPU_P (PENTIUMPRO)
|
||||
&& (alg == rep_prefix_4_byte || alg == rep_prefix_1_byte))
|
||||
desired_align = 8;
|
||||
|
||||
|
|
|
@ -10179,7 +10179,7 @@ ix86_decompose_address (rtx addr, struct ix86_address *out)
|
|||
Avoid this by transforming to [%esi+0].
|
||||
Reload calls address legitimization without cfun defined, so we need
|
||||
to test cfun for being non-NULL. */
|
||||
if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
|
||||
if (TARGET_CPU_P (K6) && cfun && optimize_function_for_speed_p (cfun)
|
||||
&& base_reg && !index_reg && !disp
|
||||
&& REGNO (base_reg) == SI_REG)
|
||||
disp = const0_rtx;
|
||||
|
@ -10257,7 +10257,7 @@ ix86_address_cost (rtx x, machine_mode, addr_space_t, bool)
|
|||
memory address, but I don't have AMD-K6 machine handy to check this
|
||||
theory. */
|
||||
|
||||
if (TARGET_K6
|
||||
if (TARGET_CPU_P (K6)
|
||||
&& ((!parts.disp && parts.base && parts.index && parts.scale != 1)
|
||||
|| (parts.disp && !parts.base && parts.index && parts.scale != 1)
|
||||
|| (!parts.disp && parts.base && parts.index && parts.scale == 1)))
|
||||
|
@ -14940,7 +14940,7 @@ ix86_lea_outperforms (rtx_insn *insn, unsigned int regno0, unsigned int regno1,
|
|||
/* For Atom processors newer than Bonnell, if using a 2-source or
|
||||
3-source LEA for non-destructive destination purposes, or due to
|
||||
wanting ability to use SCALE, the use of LEA is justified. */
|
||||
if (!TARGET_BONNELL)
|
||||
if (!TARGET_CPU_P (BONNELL))
|
||||
{
|
||||
if (has_scale)
|
||||
return true;
|
||||
|
@ -15082,7 +15082,7 @@ ix86_avoid_lea_for_addr (rtx_insn *insn, rtx operands[])
|
|||
than lea for most processors. For the processors like BONNELL, if
|
||||
the destination register of LEA holds an actual address which will
|
||||
be used soon, LEA is better and otherwise ADD is better. */
|
||||
if (!TARGET_BONNELL
|
||||
if (!TARGET_CPU_P (BONNELL)
|
||||
&& parts.scale == 1
|
||||
&& (!parts.disp || parts.disp == const0_rtx)
|
||||
&& (regno0 == regno1 || regno0 == regno2))
|
||||
|
@ -22387,7 +22387,7 @@ ix86_add_stmt_cost (class vec_info *vinfo, void *data, int count,
|
|||
stmt_cost = ix86_builtin_vectorization_cost (kind, vectype, misalign);
|
||||
|
||||
/* Penalize DFmode vector operations for Bonnell. */
|
||||
if (TARGET_BONNELL && kind == vector_stmt
|
||||
if (TARGET_CPU_P (BONNELL) && kind == vector_stmt
|
||||
&& vectype && GET_MODE_INNER (TYPE_MODE (vectype)) == DFmode)
|
||||
stmt_cost *= 5; /* FIXME: The value here is arbitrary. */
|
||||
|
||||
|
@ -22403,8 +22403,10 @@ ix86_add_stmt_cost (class vec_info *vinfo, void *data, int count,
|
|||
/* We need to multiply all vector stmt cost by 1.7 (estimated cost)
|
||||
for Silvermont as it has out of order integer pipeline and can execute
|
||||
2 scalar instruction per tick, but has in order SIMD pipeline. */
|
||||
if ((TARGET_SILVERMONT || TARGET_GOLDMONT || TARGET_GOLDMONT_PLUS
|
||||
|| TARGET_TREMONT || TARGET_INTEL) && stmt_info && stmt_info->stmt)
|
||||
if ((TARGET_CPU_P (SILVERMONT) || TARGET_CPU_P (GOLDMONT)
|
||||
|| TARGET_CPU_P (GOLDMONT_PLUS) || TARGET_CPU_P (TREMONT)
|
||||
|| TARGET_CPU_P (INTEL))
|
||||
&& stmt_info && stmt_info->stmt)
|
||||
{
|
||||
tree lhs_op = gimple_get_lhs (stmt_info->stmt);
|
||||
if (lhs_op && TREE_CODE (TREE_TYPE (lhs_op)) == INTEGER_TYPE)
|
||||
|
|
|
@ -263,51 +263,7 @@ extern const struct processor_costs ix86_size_cost;
|
|||
#define HAS_LONG_COND_BRANCH 1
|
||||
#define HAS_LONG_UNCOND_BRANCH 1
|
||||
|
||||
#define TARGET_386 (ix86_tune == PROCESSOR_I386)
|
||||
#define TARGET_486 (ix86_tune == PROCESSOR_I486)
|
||||
#define TARGET_PENTIUM (ix86_tune == PROCESSOR_PENTIUM)
|
||||
#define TARGET_PENTIUMPRO (ix86_tune == PROCESSOR_PENTIUMPRO)
|
||||
#define TARGET_GEODE (ix86_tune == PROCESSOR_GEODE)
|
||||
#define TARGET_K6 (ix86_tune == PROCESSOR_K6)
|
||||
#define TARGET_ATHLON (ix86_tune == PROCESSOR_ATHLON)
|
||||
#define TARGET_PENTIUM4 (ix86_tune == PROCESSOR_PENTIUM4)
|
||||
#define TARGET_K8 (ix86_tune == PROCESSOR_K8)
|
||||
#define TARGET_ATHLON_K8 (TARGET_K8 || TARGET_ATHLON)
|
||||
#define TARGET_NOCONA (ix86_tune == PROCESSOR_NOCONA)
|
||||
#define TARGET_CORE2 (ix86_tune == PROCESSOR_CORE2)
|
||||
#define TARGET_NEHALEM (ix86_tune == PROCESSOR_NEHALEM)
|
||||
#define TARGET_SANDYBRIDGE (ix86_tune == PROCESSOR_SANDYBRIDGE)
|
||||
#define TARGET_HASWELL (ix86_tune == PROCESSOR_HASWELL)
|
||||
#define TARGET_BONNELL (ix86_tune == PROCESSOR_BONNELL)
|
||||
#define TARGET_SILVERMONT (ix86_tune == PROCESSOR_SILVERMONT)
|
||||
#define TARGET_GOLDMONT (ix86_tune == PROCESSOR_GOLDMONT)
|
||||
#define TARGET_GOLDMONT_PLUS (ix86_tune == PROCESSOR_GOLDMONT_PLUS)
|
||||
#define TARGET_TREMONT (ix86_tune == PROCESSOR_TREMONT)
|
||||
#define TARGET_KNL (ix86_tune == PROCESSOR_KNL)
|
||||
#define TARGET_KNM (ix86_tune == PROCESSOR_KNM)
|
||||
#define TARGET_SKYLAKE (ix86_tune == PROCESSOR_SKYLAKE)
|
||||
#define TARGET_SKYLAKE_AVX512 (ix86_tune == PROCESSOR_SKYLAKE_AVX512)
|
||||
#define TARGET_CANNONLAKE (ix86_tune == PROCESSOR_CANNONLAKE)
|
||||
#define TARGET_ICELAKE_CLIENT (ix86_tune == PROCESSOR_ICELAKE_CLIENT)
|
||||
#define TARGET_ICELAKE_SERVER (ix86_tune == PROCESSOR_ICELAKE_SERVER)
|
||||
#define TARGET_CASCADELAKE (ix86_tune == PROCESSOR_CASCADELAKE)
|
||||
#define TARGET_TIGERLAKE (ix86_tune == PROCESSOR_TIGERLAKE)
|
||||
#define TARGET_COOPERLAKE (ix86_tune == PROCESSOR_COOPERLAKE)
|
||||
#define TARGET_SAPPHIRERAPIDS (ix86_tune == PROCESSOR_SAPPHIRERAPIDS)
|
||||
#define TARGET_ALDERLAKE (ix86_tune == PROCESSOR_ALDERLAKE)
|
||||
#define TARGET_ROCKETLAKE (ix86_tune == PROCESSOR_ROCKETLAKE)
|
||||
#define TARGET_INTEL (ix86_tune == PROCESSOR_INTEL)
|
||||
#define TARGET_GENERIC (ix86_tune == PROCESSOR_GENERIC)
|
||||
#define TARGET_AMDFAM10 (ix86_tune == PROCESSOR_AMDFAM10)
|
||||
#define TARGET_BDVER1 (ix86_tune == PROCESSOR_BDVER1)
|
||||
#define TARGET_BDVER2 (ix86_tune == PROCESSOR_BDVER2)
|
||||
#define TARGET_BDVER3 (ix86_tune == PROCESSOR_BDVER3)
|
||||
#define TARGET_BDVER4 (ix86_tune == PROCESSOR_BDVER4)
|
||||
#define TARGET_BTVER1 (ix86_tune == PROCESSOR_BTVER1)
|
||||
#define TARGET_BTVER2 (ix86_tune == PROCESSOR_BTVER2)
|
||||
#define TARGET_ZNVER1 (ix86_tune == PROCESSOR_ZNVER1)
|
||||
#define TARGET_ZNVER2 (ix86_tune == PROCESSOR_ZNVER2)
|
||||
#define TARGET_ZNVER3 (ix86_tune == PROCESSOR_ZNVER3)
|
||||
#define TARGET_CPU_P(CPU) (ix86_tune == PROCESSOR_ ## CPU)
|
||||
|
||||
/* Feature tests against the various tunings. */
|
||||
enum ix86_tune_indices {
|
||||
|
|
|
@ -14302,13 +14302,13 @@
|
|||
return "tzcnt{<imodesuffix>}\t{%1, %0|%0, %1}";
|
||||
else if (optimize_function_for_size_p (cfun))
|
||||
;
|
||||
else if (TARGET_GENERIC)
|
||||
else if (TARGET_CPU_P (GENERIC))
|
||||
/* tzcnt expands to 'rep bsf' and we can use it even if !TARGET_BMI. */
|
||||
return "rep%; bsf{<imodesuffix>}\t{%1, %0|%0, %1}";
|
||||
|
||||
return "bsf{<imodesuffix>}\t{%1, %0|%0, %1}";
|
||||
}
|
||||
"(TARGET_BMI || TARGET_GENERIC)
|
||||
"(TARGET_BMI || TARGET_CPU_P (GENERIC))
|
||||
&& TARGET_AVOID_FALSE_DEP_FOR_BMI && epilogue_completed
|
||||
&& optimize_function_for_speed_p (cfun)
|
||||
&& !reg_mentioned_p (operands[0], operands[1])"
|
||||
|
@ -14324,7 +14324,7 @@
|
|||
(if_then_else
|
||||
(ior (match_test "TARGET_BMI")
|
||||
(and (not (match_test "optimize_function_for_size_p (cfun)"))
|
||||
(match_test "TARGET_GENERIC")))
|
||||
(match_test "TARGET_CPU_P (GENERIC)")))
|
||||
(const_string "1")
|
||||
(const_string "0")))
|
||||
(set_attr "mode" "<MODE>")])
|
||||
|
@ -14343,7 +14343,7 @@
|
|||
{
|
||||
if (TARGET_BMI)
|
||||
return "tzcnt{<imodesuffix>}\t{%1, %0|%0, %1}";
|
||||
else if (TARGET_GENERIC)
|
||||
else if (TARGET_CPU_P (GENERIC))
|
||||
/* tzcnt expands to 'rep bsf' and we can use it even if !TARGET_BMI. */
|
||||
return "rep%; bsf{<imodesuffix>}\t{%1, %0|%0, %1}";
|
||||
else
|
||||
|
|
|
@ -51,7 +51,7 @@ do_reorder_for_imul (rtx_insn **ready, int n_ready)
|
|||
int index = -1;
|
||||
int i;
|
||||
|
||||
if (!TARGET_BONNELL)
|
||||
if (!TARGET_CPU_P (BONNELL))
|
||||
return index;
|
||||
|
||||
/* Check that IMUL instruction is on the top of ready list. */
|
||||
|
@ -131,7 +131,7 @@ swap_top_of_ready_list (rtx_insn **ready, int n_ready)
|
|||
int clock2 = -1;
|
||||
#define INSN_TICK(INSN) (HID (INSN)->tick)
|
||||
|
||||
if (!TARGET_SILVERMONT && !TARGET_INTEL)
|
||||
if (!TARGET_CPU_P (SILVERMONT) && !TARGET_CPU_P (INTEL))
|
||||
return false;
|
||||
|
||||
if (!NONDEBUG_INSN_P (top))
|
||||
|
@ -204,7 +204,8 @@ ix86_atom_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready,
|
|||
issue_rate = ix86_issue_rate ();
|
||||
|
||||
/* Do reodering for BONNELL/SILVERMONT only. */
|
||||
if (!TARGET_BONNELL && !TARGET_SILVERMONT && !TARGET_INTEL)
|
||||
if (!TARGET_CPU_P (BONNELL) && !TARGET_CPU_P (SILVERMONT)
|
||||
&& !TARGET_CPU_P (INTEL))
|
||||
return issue_rate;
|
||||
|
||||
/* Nothing to do if ready list contains only 1 instruction. */
|
||||
|
|
|
@ -800,8 +800,9 @@ bool
|
|||
ix86_bd_has_dispatch (rtx_insn *insn, int action)
|
||||
{
|
||||
/* Current implementation of dispatch scheduler models buldozer only. */
|
||||
if ((TARGET_BDVER1 || TARGET_BDVER2 || TARGET_BDVER3
|
||||
|| TARGET_BDVER4) && flag_dispatch_scheduler)
|
||||
if ((TARGET_CPU_P (BDVER1) || TARGET_CPU_P (BDVER2)
|
||||
|| TARGET_CPU_P (BDVER3) || TARGET_CPU_P (BDVER4))
|
||||
&& flag_dispatch_scheduler)
|
||||
switch (action)
|
||||
{
|
||||
default:
|
||||
|
|
|
@ -386,7 +386,7 @@ ix86_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
|
|||
if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
|
||||
loadcost = 3;
|
||||
else
|
||||
loadcost = TARGET_ATHLON ? 2 : 0;
|
||||
loadcost = TARGET_CPU_P (ATHLON) ? 2 : 0;
|
||||
|
||||
if (cost >= loadcost)
|
||||
cost -= loadcost;
|
||||
|
|
Loading…
Add table
Reference in a new issue