* config/aarch64/aarch64-protos.h

(aarch64_gen_atomic_cas): Declare.
	* config/aarch64/aarch64.c (aarch64_expand_compare_and_swap):
	Choose appropriate instruction pattern for the target.
	(aarch64_gen_atomic_cas): New.
	* config/aarch64/atomics.md (UNSPECV_ATOMIC_CAS): New.
	(atomic_compare_and_swap<mode>_1): Rename to
	aarch64_compare_and_swap<mode>.  Fix some indentation.
	(aarch64_compare_and_swap<mode>_lse): New.
	(aarch64_atomic_cas<mode>): New.

From-SVN: r226858
This commit is contained in:
Matthew Wahab 2015-08-13 11:28:42 +00:00 committed by Matthew Wahab
parent 045c2d32d9
commit b0770c0f18
4 changed files with 191 additions and 18 deletions

View file

@ -1,3 +1,16 @@
2015-08-13 Matthew Wahab <matthew.wahab@arm.com>
* config/aarch64/aarch64-protos.h
(aarch64_gen_atomic_cas): Declare.
* config/aarch64/aarch64.c (aarch64_expand_compare_and_swap):
Choose appropriate instruction pattern for the target.
(aarch64_gen_atomic_cas): New.
* config/aarch64/atomics.md (UNSPECV_ATOMIC_CAS): New.
(atomic_compare_and_swap<mode>_1): Rename to
aarch64_compare_and_swap<mode>. Fix some indentation.
(aarch64_compare_and_swap<mode>_lse): New.
(aarch64_atomic_cas<mode>): New.
2015-08-13 Matthew Wahab <matthew.wahab@arm.com>
* config/aarch64/aarch64.h (AARCH64_ISA_LSE): New.

View file

@ -362,6 +362,7 @@ rtx aarch64_load_tp (rtx);
void aarch64_expand_compare_and_swap (rtx op[]);
void aarch64_split_compare_and_swap (rtx op[]);
void aarch64_gen_atomic_cas (rtx, rtx, rtx, rtx, rtx);
void aarch64_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx);
bool aarch64_gen_adjusted_ldpstp (rtx *, bool, enum machine_mode, RTX_CODE);

View file

@ -10754,7 +10754,23 @@ aarch64_expand_compare_and_swap (rtx operands[])
{
rtx bval, rval, mem, oldval, newval, is_weak, mod_s, mod_f, x;
machine_mode mode, cmp_mode;
rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
typedef rtx (*gen_cas_fn) (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
int idx;
gen_cas_fn gen;
const gen_cas_fn split_cas[] =
{
gen_aarch64_compare_and_swapqi,
gen_aarch64_compare_and_swaphi,
gen_aarch64_compare_and_swapsi,
gen_aarch64_compare_and_swapdi
};
const gen_cas_fn atomic_cas[] =
{
gen_aarch64_compare_and_swapqi_lse,
gen_aarch64_compare_and_swaphi_lse,
gen_aarch64_compare_and_swapsi_lse,
gen_aarch64_compare_and_swapdi_lse
};
bval = operands[0];
rval = operands[1];
@ -10799,13 +10815,17 @@ aarch64_expand_compare_and_swap (rtx operands[])
switch (mode)
{
case QImode: gen = gen_atomic_compare_and_swapqi_1; break;
case HImode: gen = gen_atomic_compare_and_swaphi_1; break;
case SImode: gen = gen_atomic_compare_and_swapsi_1; break;
case DImode: gen = gen_atomic_compare_and_swapdi_1; break;
case QImode: idx = 0; break;
case HImode: idx = 1; break;
case SImode: idx = 2; break;
case DImode: idx = 3; break;
default:
gcc_unreachable ();
}
if (TARGET_LSE)
gen = atomic_cas[idx];
else
gen = split_cas[idx];
emit_insn (gen (rval, mem, oldval, newval, is_weak, mod_s, mod_f));
@ -10834,6 +10854,42 @@ aarch64_emit_post_barrier (enum memmodel model)
}
}
/* Emit an atomic compare-and-swap operation. RVAL is the destination register
for the data in memory. EXPECTED is the value expected to be in memory.
DESIRED is the value to store to memory. MEM is the memory location. MODEL
is the memory ordering to use. */
void
aarch64_gen_atomic_cas (rtx rval, rtx mem,
rtx expected, rtx desired,
rtx model)
{
rtx (*gen) (rtx, rtx, rtx, rtx);
machine_mode mode;
mode = GET_MODE (mem);
switch (mode)
{
case QImode: gen = gen_aarch64_atomic_casqi; break;
case HImode: gen = gen_aarch64_atomic_cashi; break;
case SImode: gen = gen_aarch64_atomic_cassi; break;
case DImode: gen = gen_aarch64_atomic_casdi; break;
default:
gcc_unreachable ();
}
/* Move the expected value into the CAS destination register. */
emit_insn (gen_rtx_SET (rval, expected));
/* Emit the CAS. */
emit_insn (gen (rval, mem, desired, model));
/* Compare the expected value with the value loaded by the CAS, to establish
whether the swap was made. */
aarch64_gen_compare_reg (EQ, rval, expected);
}
/* Split a compare and swap pattern. */
void

View file

@ -26,6 +26,7 @@
UNSPECV_STL ; Represent an atomic store or store-release.
UNSPECV_ATOMIC_CMPSW ; Represent an atomic compare swap.
UNSPECV_ATOMIC_EXCHG ; Represent an atomic exchange.
UNSPECV_ATOMIC_CAS ; Represent an atomic CAS.
UNSPECV_ATOMIC_OP ; Represent an atomic operation.
])
@ -45,10 +46,10 @@
}
)
(define_insn_and_split "atomic_compare_and_swap<mode>_1"
(define_insn_and_split "aarch64_compare_and_swap<mode>"
[(set (reg:CC CC_REGNUM) ;; bool out
(unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
(set (match_operand:SI 0 "register_operand" "=&r") ;; val out
(set (match_operand:SI 0 "register_operand" "=&r") ;; val out
(zero_extend:SI
(match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory
(set (match_dup 1)
@ -57,6 +58,30 @@
(match_operand:SHORT 3 "register_operand" "r") ;; desired
(match_operand:SI 4 "const_int_operand") ;; is_weak
(match_operand:SI 5 "const_int_operand") ;; mod_s
(match_operand:SI 6 "const_int_operand")] ;; mod_f
UNSPECV_ATOMIC_CMPSW))
(clobber (match_scratch:SI 7 "=&r"))]
""
"#"
"&& reload_completed"
[(const_int 0)]
{
aarch64_split_compare_and_swap (operands);
DONE;
}
)
(define_insn_and_split "aarch64_compare_and_swap<mode>"
[(set (reg:CC CC_REGNUM) ;; bool out
(unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
(set (match_operand:GPI 0 "register_operand" "=&r") ;; val out
(match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
(set (match_dup 1)
(unspec_volatile:GPI
[(match_operand:GPI 2 "aarch64_plus_operand" "rI") ;; expect
(match_operand:GPI 3 "register_operand" "r") ;; desired
(match_operand:SI 4 "const_int_operand") ;; is_weak
(match_operand:SI 5 "const_int_operand") ;; mod_s
(match_operand:SI 6 "const_int_operand")] ;; mod_f
UNSPECV_ATOMIC_CMPSW))
(clobber (match_scratch:SI 7 "=&r"))]
@ -70,26 +95,53 @@
}
)
(define_insn_and_split "atomic_compare_and_swap<mode>_1"
(define_insn_and_split "aarch64_compare_and_swap<mode>_lse"
[(set (reg:CC CC_REGNUM) ;; bool out
(unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
(set (match_operand:GPI 0 "register_operand" "=&r") ;; val out
(match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
(set (match_operand:SI 0 "register_operand" "=&r") ;; val out
(zero_extend:SI
(match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory
(set (match_dup 1)
(unspec_volatile:GPI
[(match_operand:GPI 2 "aarch64_plus_operand" "rI") ;; expect
(match_operand:GPI 3 "register_operand" "r") ;; desired
(unspec_volatile:SHORT
[(match_operand:SI 2 "aarch64_plus_operand" "rI") ;; expected
(match_operand:SHORT 3 "register_operand" "r") ;; desired
(match_operand:SI 4 "const_int_operand") ;; is_weak
(match_operand:SI 5 "const_int_operand") ;; mod_s
(match_operand:SI 6 "const_int_operand")] ;; mod_f
UNSPECV_ATOMIC_CMPSW))
(clobber (match_scratch:SI 7 "=&r"))]
""
(match_operand:SI 6 "const_int_operand")] ;; mod_f
UNSPECV_ATOMIC_CMPSW))]
"TARGET_LSE"
"#"
"&& reload_completed"
[(const_int 0)]
{
aarch64_split_compare_and_swap (operands);
aarch64_gen_atomic_cas (operands[0], operands[1],
operands[2], operands[3],
operands[5]);
DONE;
}
)
(define_insn_and_split "aarch64_compare_and_swap<mode>_lse"
[(set (reg:CC CC_REGNUM) ;; bool out
(unspec_volatile:CC [(const_int 0)] UNSPECV_ATOMIC_CMPSW))
(set (match_operand:GPI 0 "register_operand" "=&r") ;; val out
(match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory
(set (match_dup 1)
(unspec_volatile:GPI
[(match_operand:GPI 2 "aarch64_plus_operand" "rI") ;; expect
(match_operand:GPI 3 "register_operand" "r") ;; desired
(match_operand:SI 4 "const_int_operand") ;; is_weak
(match_operand:SI 5 "const_int_operand") ;; mod_s
(match_operand:SI 6 "const_int_operand")] ;; mod_f
UNSPECV_ATOMIC_CMPSW))]
"TARGET_LSE "
"#"
"&& reload_completed"
[(const_int 0)]
{
aarch64_gen_atomic_cas (operands[0], operands[1],
operands[2], operands[3],
operands[5]);
DONE;
}
)
@ -370,3 +422,54 @@
return "dmb\\tish";
}
)
;; ARMv8.1 LSE instructions.
;; Atomic compare-and-swap: HI and smaller modes.
(define_insn "aarch64_atomic_cas<mode>"
[(set (match_operand:SI 0 "register_operand" "+&r") ;; out
(zero_extend:SI
(match_operand:SHORT 1 "aarch64_sync_memory_operand" "+Q"))) ;; memory.
(set (match_dup 1)
(unspec_volatile:SHORT
[(match_dup 0)
(match_operand:SHORT 2 "register_operand" "r") ;; value.
(match_operand:SI 3 "const_int_operand" "")] ;; model.
UNSPECV_ATOMIC_CAS))]
"TARGET_LSE && reload_completed"
{
enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
if (is_mm_relaxed (model))
return "cas<atomic_sfx>\t%<w>0, %<w>2, %1";
else if (is_mm_acquire (model) || is_mm_consume (model))
return "casa<atomic_sfx>\t%<w>0, %<w>2, %1";
else if (is_mm_release (model))
return "casl<atomic_sfx>\t%<w>0, %<w>2, %1";
else
return "casal<atomic_sfx>\t%<w>0, %<w>2, %1";
})
;; Atomic compare-and-swap: SI and larger modes.
(define_insn "aarch64_atomic_cas<mode>"
[(set (match_operand:GPI 0 "register_operand" "+&r") ;; out
(match_operand:GPI 1 "aarch64_sync_memory_operand" "+Q")) ;; memory.
(set (match_dup 1)
(unspec_volatile:GPI
[(match_dup 0)
(match_operand:GPI 2 "register_operand" "r") ;; value.
(match_operand:SI 3 "const_int_operand" "")] ;; model.
UNSPECV_ATOMIC_CAS))]
"TARGET_LSE && reload_completed"
{
enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
if (is_mm_relaxed (model))
return "cas<atomic_sfx>\t%<w>0, %<w>2, %1";
else if (is_mm_acquire (model) || is_mm_consume (model))
return "casa<atomic_sfx>\t%<w>0, %<w>2, %1";
else if (is_mm_release (model))
return "casl<atomic_sfx>\t%<w>0, %<w>2, %1";
else
return "casal<atomic_sfx>\t%<w>0, %<w>2, %1";
})