aarch64: Reimplement vrshrn* intrinsics using builtins
This patch moves the vrshrn* intrinsics to builtins away from inline asm. It's a bit of code, but it's very similar to the recent vsrhn* reimplementation except that we use an unspec rather than standard RTL codes for the functionality. gcc/ChangeLog: * config/aarch64/aarch64-simd-builtins.def (rshrn, rshrn2): Define builtins. * config/aarch64/aarch64-simd.md (aarch64_rshrn<mode>_insn_le): Define. (aarch64_rshrn<mode>_insn_be): Likewise. (aarch64_rshrn<mode>): Likewise. (aarch64_rshrn2<mode>_insn_le): Likewise. (aarch64_rshrn2<mode>_insn_be): Likewise. (aarch64_rshrn2<mode>): Likewise. * config/aarch64/aarch64.md (unspec): Add UNSPEC_RSHRN. * config/aarch64/arm_neon.h (vrshrn_high_n_s16): Reimplement using builtin. (vrshrn_high_n_s32): Likewise. (vrshrn_high_n_s64): Likewise. (vrshrn_high_n_u16): Likewise. (vrshrn_high_n_u32): Likewise. (vrshrn_high_n_u64): Likewise. (vrshrn_n_s16): Likewise. (vrshrn_n_s32): Likewise. (vrshrn_n_s64): Likewise. (vrshrn_n_u16): Likewise. (vrshrn_n_u32): Likewise. (vrshrn_n_u64): Likewise. gcc/testsuite/ChangeLog: * gcc.target/aarch64/narrow_high-intrinsics.c: Adjust rshrn2 assembly scan.
This commit is contained in:
parent
11d4ec5d45
commit
850e5878f8
5 changed files with 171 additions and 151 deletions
|
@ -214,6 +214,12 @@
|
|||
/* Implemented by aarch64_shrn2<mode>. */
|
||||
BUILTIN_VQN (SHIFTACC, shrn2, 0, NONE)
|
||||
|
||||
/* Implemented by aarch64_rshrn<mode>". */
|
||||
BUILTIN_VQN (SHIFTIMM, rshrn, 0, NONE)
|
||||
|
||||
/* Implemented by aarch64_rshrn2<mode>. */
|
||||
BUILTIN_VQN (SHIFTACC, rshrn2, 0, NONE)
|
||||
|
||||
/* Implemented by aarch64_<su>mlsl<mode>. */
|
||||
BUILTIN_VD_BHSI (TERNOP, smlsl, 0, NONE)
|
||||
BUILTIN_VD_BHSI (TERNOPU, umlsl, 0, NONE)
|
||||
|
|
|
@ -1752,6 +1752,54 @@
|
|||
}
|
||||
)
|
||||
|
||||
(define_insn "aarch64_rshrn<mode>_insn_le"
|
||||
[(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
|
||||
(vec_concat:<VNARROWQ2>
|
||||
(unspec:<VNARROWQ> [(match_operand:VQN 1 "register_operand" "w")
|
||||
(match_operand:VQN 2 "aarch64_simd_rshift_imm")
|
||||
] UNSPEC_RSHRN)
|
||||
(match_operand:<VNARROWQ> 3 "aarch64_simd_or_scalar_imm_zero")))]
|
||||
"TARGET_SIMD && !BYTES_BIG_ENDIAN"
|
||||
"rshrn\\t%0.<Vntype>, %1.<Vtype>, %2"
|
||||
[(set_attr "type" "neon_shift_imm_narrow_q")]
|
||||
)
|
||||
|
||||
(define_insn "aarch64_rshrn<mode>_insn_be"
|
||||
[(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
|
||||
(vec_concat:<VNARROWQ2>
|
||||
(match_operand:<VNARROWQ> 3 "aarch64_simd_or_scalar_imm_zero")
|
||||
(unspec:<VNARROWQ> [(match_operand:VQN 1 "register_operand" "w")
|
||||
(match_operand:VQN 2 "aarch64_simd_rshift_imm")
|
||||
] UNSPEC_RSHRN)))]
|
||||
"TARGET_SIMD && BYTES_BIG_ENDIAN"
|
||||
"rshrn\\t%0.<Vntype>, %1.<Vtype>, %2"
|
||||
[(set_attr "type" "neon_shift_imm_narrow_q")]
|
||||
)
|
||||
|
||||
(define_expand "aarch64_rshrn<mode>"
|
||||
[(match_operand:<VNARROWQ> 0 "register_operand")
|
||||
(match_operand:VQN 1 "register_operand")
|
||||
(match_operand:SI 2 "aarch64_simd_shift_imm_offset_<vn_mode>")]
|
||||
"TARGET_SIMD"
|
||||
{
|
||||
operands[2] = aarch64_simd_gen_const_vector_dup (<MODE>mode,
|
||||
INTVAL (operands[2]));
|
||||
rtx tmp = gen_reg_rtx (<VNARROWQ2>mode);
|
||||
if (BYTES_BIG_ENDIAN)
|
||||
emit_insn (gen_aarch64_rshrn<mode>_insn_be (tmp, operands[1],
|
||||
operands[2], CONST0_RTX (<VNARROWQ>mode)));
|
||||
else
|
||||
emit_insn (gen_aarch64_rshrn<mode>_insn_le (tmp, operands[1],
|
||||
operands[2], CONST0_RTX (<VNARROWQ>mode)));
|
||||
|
||||
/* The intrinsic expects a narrow result, so emit a subreg that will get
|
||||
optimized away as appropriate. */
|
||||
emit_move_insn (operands[0], lowpart_subreg (<VNARROWQ>mode, tmp,
|
||||
<VNARROWQ2>mode));
|
||||
DONE;
|
||||
}
|
||||
)
|
||||
|
||||
(define_insn "aarch64_shrn2<mode>_insn_le"
|
||||
[(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
|
||||
(vec_concat:<VNARROWQ2>
|
||||
|
@ -1795,6 +1843,46 @@
|
|||
}
|
||||
)
|
||||
|
||||
(define_insn "aarch64_rshrn2<mode>_insn_le"
|
||||
[(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
|
||||
(vec_concat:<VNARROWQ2>
|
||||
(match_operand:<VNARROWQ> 1 "register_operand" "0")
|
||||
(unspec:<VNARROWQ> [(match_operand:VQN 2 "register_operand" "w")
|
||||
(match_operand:VQN 3 "aarch64_simd_rshift_imm")] UNSPEC_RSHRN)))]
|
||||
"TARGET_SIMD && !BYTES_BIG_ENDIAN"
|
||||
"rshrn2\\t%0.<V2ntype>, %2.<Vtype>, %3"
|
||||
[(set_attr "type" "neon_shift_imm_narrow_q")]
|
||||
)
|
||||
|
||||
(define_insn "aarch64_rshrn2<mode>_insn_be"
|
||||
[(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
|
||||
(vec_concat:<VNARROWQ2>
|
||||
(unspec:<VNARROWQ> [(match_operand:VQN 2 "register_operand" "w")
|
||||
(match_operand:VQN 3 "aarch64_simd_rshift_imm")] UNSPEC_RSHRN)
|
||||
(match_operand:<VNARROWQ> 1 "register_operand" "0")))]
|
||||
"TARGET_SIMD && BYTES_BIG_ENDIAN"
|
||||
"rshrn2\\t%0.<V2ntype>, %2.<Vtype>, %3"
|
||||
[(set_attr "type" "neon_shift_imm_narrow_q")]
|
||||
)
|
||||
|
||||
(define_expand "aarch64_rshrn2<mode>"
|
||||
[(match_operand:<VNARROWQ2> 0 "register_operand")
|
||||
(match_operand:<VNARROWQ> 1 "register_operand")
|
||||
(match_operand:VQN 2 "register_operand")
|
||||
(match_operand:SI 3 "aarch64_simd_shift_imm_offset_<vn_mode>")]
|
||||
"TARGET_SIMD"
|
||||
{
|
||||
operands[3] = aarch64_simd_gen_const_vector_dup (<MODE>mode,
|
||||
INTVAL (operands[3]));
|
||||
if (BYTES_BIG_ENDIAN)
|
||||
emit_insn (gen_aarch64_rshrn2<mode>_insn_be (operands[0], operands[1],
|
||||
operands[2], operands[3]));
|
||||
else
|
||||
emit_insn (gen_aarch64_rshrn2<mode>_insn_le (operands[0], operands[1],
|
||||
operands[2], operands[3]));
|
||||
DONE;
|
||||
}
|
||||
)
|
||||
|
||||
;; For quads.
|
||||
|
||||
|
|
|
@ -230,6 +230,7 @@
|
|||
UNSPEC_SSP_SYSREG
|
||||
UNSPEC_SP_SET
|
||||
UNSPEC_SP_TEST
|
||||
UNSPEC_RSHRN
|
||||
UNSPEC_RSQRT
|
||||
UNSPEC_RSQRTE
|
||||
UNSPEC_RSQRTS
|
||||
|
|
|
@ -9311,167 +9311,92 @@ vqshrun_high_n_s64 (uint32x2_t __a, int64x2_t __b, const int __c)
|
|||
return __builtin_aarch64_sqshrun2_nv2di_uuss (__a, __b, __c);
|
||||
}
|
||||
|
||||
#define vrshrn_high_n_s16(a, b, c) \
|
||||
__extension__ \
|
||||
({ \
|
||||
int16x8_t b_ = (b); \
|
||||
int8x8_t a_ = (a); \
|
||||
int8x16_t result = vcombine_s8 \
|
||||
(a_, vcreate_s8 \
|
||||
(__AARCH64_UINT64_C (0x0))); \
|
||||
__asm__ ("rshrn2 %0.16b,%1.8h,#%2" \
|
||||
: "+w"(result) \
|
||||
: "w"(b_), "i"(c) \
|
||||
: /* No clobbers */); \
|
||||
result; \
|
||||
})
|
||||
__extension__ extern __inline int8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vrshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c)
|
||||
{
|
||||
return __builtin_aarch64_rshrn2v8hi (__a, __b, __c);
|
||||
}
|
||||
|
||||
#define vrshrn_high_n_s32(a, b, c) \
|
||||
__extension__ \
|
||||
({ \
|
||||
int32x4_t b_ = (b); \
|
||||
int16x4_t a_ = (a); \
|
||||
int16x8_t result = vcombine_s16 \
|
||||
(a_, vcreate_s16 \
|
||||
(__AARCH64_UINT64_C (0x0))); \
|
||||
__asm__ ("rshrn2 %0.8h,%1.4s,#%2" \
|
||||
: "+w"(result) \
|
||||
: "w"(b_), "i"(c) \
|
||||
: /* No clobbers */); \
|
||||
result; \
|
||||
})
|
||||
__extension__ extern __inline int16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vrshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c)
|
||||
{
|
||||
return __builtin_aarch64_rshrn2v4si (__a, __b, __c);
|
||||
}
|
||||
|
||||
#define vrshrn_high_n_s64(a, b, c) \
|
||||
__extension__ \
|
||||
({ \
|
||||
int64x2_t b_ = (b); \
|
||||
int32x2_t a_ = (a); \
|
||||
int32x4_t result = vcombine_s32 \
|
||||
(a_, vcreate_s32 \
|
||||
(__AARCH64_UINT64_C (0x0))); \
|
||||
__asm__ ("rshrn2 %0.4s,%1.2d,#%2" \
|
||||
: "+w"(result) \
|
||||
: "w"(b_), "i"(c) \
|
||||
: /* No clobbers */); \
|
||||
result; \
|
||||
})
|
||||
__extension__ extern __inline int32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vrshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c)
|
||||
{
|
||||
return __builtin_aarch64_rshrn2v2di (__a, __b, __c);
|
||||
}
|
||||
|
||||
#define vrshrn_high_n_u16(a, b, c) \
|
||||
__extension__ \
|
||||
({ \
|
||||
uint16x8_t b_ = (b); \
|
||||
uint8x8_t a_ = (a); \
|
||||
uint8x16_t result = vcombine_u8 \
|
||||
(a_, vcreate_u8 \
|
||||
(__AARCH64_UINT64_C (0x0))); \
|
||||
__asm__ ("rshrn2 %0.16b,%1.8h,#%2" \
|
||||
: "+w"(result) \
|
||||
: "w"(b_), "i"(c) \
|
||||
: /* No clobbers */); \
|
||||
result; \
|
||||
})
|
||||
__extension__ extern __inline uint8x16_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vrshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c)
|
||||
{
|
||||
return (uint8x16_t) __builtin_aarch64_rshrn2v8hi ((int8x8_t) __a,
|
||||
(int16x8_t) __b, __c);
|
||||
}
|
||||
|
||||
#define vrshrn_high_n_u32(a, b, c) \
|
||||
__extension__ \
|
||||
({ \
|
||||
uint32x4_t b_ = (b); \
|
||||
uint16x4_t a_ = (a); \
|
||||
uint16x8_t result = vcombine_u16 \
|
||||
(a_, vcreate_u16 \
|
||||
(__AARCH64_UINT64_C (0x0))); \
|
||||
__asm__ ("rshrn2 %0.8h,%1.4s,#%2" \
|
||||
: "+w"(result) \
|
||||
: "w"(b_), "i"(c) \
|
||||
: /* No clobbers */); \
|
||||
result; \
|
||||
})
|
||||
__extension__ extern __inline uint16x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vrshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c)
|
||||
{
|
||||
return (uint16x8_t) __builtin_aarch64_rshrn2v4si ((int16x4_t) __a,
|
||||
(int32x4_t) __b, __c);
|
||||
}
|
||||
|
||||
#define vrshrn_high_n_u64(a, b, c) \
|
||||
__extension__ \
|
||||
({ \
|
||||
uint64x2_t b_ = (b); \
|
||||
uint32x2_t a_ = (a); \
|
||||
uint32x4_t result = vcombine_u32 \
|
||||
(a_, vcreate_u32 \
|
||||
(__AARCH64_UINT64_C (0x0))); \
|
||||
__asm__ ("rshrn2 %0.4s,%1.2d,#%2" \
|
||||
: "+w"(result) \
|
||||
: "w"(b_), "i"(c) \
|
||||
: /* No clobbers */); \
|
||||
result; \
|
||||
})
|
||||
__extension__ extern __inline uint32x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vrshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c)
|
||||
{
|
||||
return (uint32x4_t) __builtin_aarch64_rshrn2v2di ((int32x2_t)__a,
|
||||
(int64x2_t)__b, __c);
|
||||
}
|
||||
|
||||
#define vrshrn_n_s16(a, b) \
|
||||
__extension__ \
|
||||
({ \
|
||||
int16x8_t a_ = (a); \
|
||||
int8x8_t result; \
|
||||
__asm__ ("rshrn %0.8b,%1.8h,%2" \
|
||||
: "=w"(result) \
|
||||
: "w"(a_), "i"(b) \
|
||||
: /* No clobbers */); \
|
||||
result; \
|
||||
})
|
||||
__extension__ extern __inline int8x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vrshrn_n_s16 (int16x8_t __a, const int __b)
|
||||
{
|
||||
return __builtin_aarch64_rshrnv8hi (__a, __b);
|
||||
}
|
||||
|
||||
#define vrshrn_n_s32(a, b) \
|
||||
__extension__ \
|
||||
({ \
|
||||
int32x4_t a_ = (a); \
|
||||
int16x4_t result; \
|
||||
__asm__ ("rshrn %0.4h,%1.4s,%2" \
|
||||
: "=w"(result) \
|
||||
: "w"(a_), "i"(b) \
|
||||
: /* No clobbers */); \
|
||||
result; \
|
||||
})
|
||||
__extension__ extern __inline int16x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vrshrn_n_s32 (int32x4_t __a, const int __b)
|
||||
{
|
||||
return __builtin_aarch64_rshrnv4si (__a, __b);
|
||||
}
|
||||
|
||||
#define vrshrn_n_s64(a, b) \
|
||||
__extension__ \
|
||||
({ \
|
||||
int64x2_t a_ = (a); \
|
||||
int32x2_t result; \
|
||||
__asm__ ("rshrn %0.2s,%1.2d,%2" \
|
||||
: "=w"(result) \
|
||||
: "w"(a_), "i"(b) \
|
||||
: /* No clobbers */); \
|
||||
result; \
|
||||
})
|
||||
__extension__ extern __inline int32x2_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vrshrn_n_s64 (int64x2_t __a, const int __b)
|
||||
{
|
||||
return __builtin_aarch64_rshrnv2di (__a, __b);
|
||||
}
|
||||
|
||||
#define vrshrn_n_u16(a, b) \
|
||||
__extension__ \
|
||||
({ \
|
||||
uint16x8_t a_ = (a); \
|
||||
uint8x8_t result; \
|
||||
__asm__ ("rshrn %0.8b,%1.8h,%2" \
|
||||
: "=w"(result) \
|
||||
: "w"(a_), "i"(b) \
|
||||
: /* No clobbers */); \
|
||||
result; \
|
||||
})
|
||||
__extension__ extern __inline uint8x8_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vrshrn_n_u16 (uint16x8_t __a, const int __b)
|
||||
{
|
||||
return (uint8x8_t) __builtin_aarch64_rshrnv8hi ((int16x8_t) __a, __b);
|
||||
}
|
||||
|
||||
#define vrshrn_n_u32(a, b) \
|
||||
__extension__ \
|
||||
({ \
|
||||
uint32x4_t a_ = (a); \
|
||||
uint16x4_t result; \
|
||||
__asm__ ("rshrn %0.4h,%1.4s,%2" \
|
||||
: "=w"(result) \
|
||||
: "w"(a_), "i"(b) \
|
||||
: /* No clobbers */); \
|
||||
result; \
|
||||
})
|
||||
__extension__ extern __inline uint16x4_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vrshrn_n_u32 (uint32x4_t __a, const int __b)
|
||||
{
|
||||
return (uint16x4_t) __builtin_aarch64_rshrnv4si ((int32x4_t) __a, __b);
|
||||
}
|
||||
|
||||
#define vrshrn_n_u64(a, b) \
|
||||
__extension__ \
|
||||
({ \
|
||||
uint64x2_t a_ = (a); \
|
||||
uint32x2_t result; \
|
||||
__asm__ ("rshrn %0.2s,%1.2d,%2" \
|
||||
: "=w"(result) \
|
||||
: "w"(a_), "i"(b) \
|
||||
: /* No clobbers */); \
|
||||
result; \
|
||||
})
|
||||
__extension__ extern __inline uint32x2_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vrshrn_n_u64 (uint64x2_t __a, const int __b)
|
||||
{
|
||||
return (uint32x2_t) __builtin_aarch64_rshrnv2di ((int64x2_t) __a, __b);
|
||||
}
|
||||
|
||||
__extension__ extern __inline uint32x2_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
|
|
|
@ -111,7 +111,7 @@ ONE (vmovn_high, uint32x4_t, uint32x2_t, uint64x2_t, u64)
|
|||
/* { dg-final { scan-assembler-times "\\taddhn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "rsubhn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "raddhn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\trshrn2 v" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\trshrn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\tshrn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "sqshrun2\\tv" 3} } */
|
||||
/* { dg-final { scan-assembler-times "sqrshrun2\\tv" 3} } */
|
||||
|
|
Loading…
Add table
Reference in a new issue