From b5ecca346f3aead5755f4b5df10d7d8c0cabd773 Mon Sep 17 00:00:00 2001 From: Kyrylo Tkachov Date: Wed, 7 Jun 2023 15:24:46 +0100 Subject: [PATCH] aarch64: Handle ASHIFTRT in patterns for shrn2 Similar to the low-half patterns, we want to match both ashiftrt and lshiftrt with the truncate for SHRN2. We reuse the SHIFTRT iterator and the AARCH64_VALID_SHRN_OP check to help, but because we expand the high-half patterns by their gen_* names we need to disambiguate all the different trunc+shift combinations in the pattern name, which leads to a slight renaming of the builtins. The AARCH64_VALID_SHRN_OP check on the expander and the define_insns ensures that no invalid combination ends up getting matched. Bootstrapped and tested on aarch64-none-linux-gnu and aarch64_be-none-elf. gcc/ChangeLog: * config/aarch64/aarch64-simd-builtins.def (shrn2_n): Rename builtins to... (ushrn2_n): ... This. (sqshrn2_n): Rename builtins to... (ssqshrn2_n): ... This. (uqshrn2_n): Rename builtins to... (uqushrn2_n): ... This. * config/aarch64/arm_neon.h (vqshrn_high_n_s16): Adjust for the above. (vqshrn_high_n_s32): Likewise. (vqshrn_high_n_s64): Likewise. (vqshrn_high_n_u16): Likewise. (vqshrn_high_n_u32): Likewise. (vqshrn_high_n_u64): Likewise. (vshrn_high_n_s16): Likewise. (vshrn_high_n_s32): Likewise. (vshrn_high_n_s64): Likewise. (vshrn_high_n_u16): Likewise. (vshrn_high_n_u32): Likewise. (vshrn_high_n_u64): Likewise. * config/aarch64/aarch64-simd.md (aarch64_shrn2_n_insn_le): Rename to... (aarch64_shrn2_n_insn_le): ... This. Use SHIFTRT iterator and AARCH64_VALID_SHRN_OP check. (aarch64_shrn2_n_insn_be): Rename to... (aarch64_shrn2_n_insn_be): ... This. Use SHIFTRT iterator and AARCH64_VALID_SHRN_OP check. (aarch64_shrn2_n): Rename to... (aarch64_shrn2_n): ... This. Update expander for the above. --- gcc/config/aarch64/aarch64-simd-builtins.def | 8 +++--- gcc/config/aarch64/aarch64-simd.md | 28 +++++++++++--------- gcc/config/aarch64/arm_neon.h | 24 ++++++++--------- 3 files changed, 31 insertions(+), 29 deletions(-) diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 01cd85d64fd..e2b94ad8247 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -263,8 +263,8 @@ BUILTIN_VQN (SHIFTIMM, shrn_n, 0, NONE) BUILTIN_VQN (USHIFTIMM, shrn_n, 0, NONE) - BUILTIN_VQN (SHIFT2IMM, shrn2_n, 0, NONE) - BUILTIN_VQN (USHIFT2IMM, shrn2_n, 0, NONE) + BUILTIN_VQN (SHIFT2IMM, ushrn2_n, 0, NONE) + BUILTIN_VQN (USHIFT2IMM, ushrn2_n, 0, NONE) BUILTIN_VQN (SHIFTIMM, rshrn_n, 0, NONE) BUILTIN_VQN (USHIFTIMM, rshrn_n, 0, NONE) @@ -480,8 +480,8 @@ BUILTIN_SD_HSDI (USHIFTIMM, uqrshrn_n, 0, NONE) BUILTIN_VQN (SHIFT2IMM_UUSS, sqshrun2_n, 0, NONE) BUILTIN_VQN (SHIFT2IMM_UUSS, sqrshrun2_n, 0, NONE) - BUILTIN_VQN (SHIFT2IMM, sqshrn2_n, 0, NONE) - BUILTIN_VQN (USHIFT2IMM, uqshrn2_n, 0, NONE) + BUILTIN_VQN (SHIFT2IMM, sqsshrn2_n, 0, NONE) + BUILTIN_VQN (USHIFT2IMM, uqushrn2_n, 0, NONE) BUILTIN_VQN (SHIFT2IMM, sqrshrn2_n, 0, NONE) BUILTIN_VQN (USHIFT2IMM, uqrshrn2_n, 0, NONE) /* Implemented by aarch64_si_n. */ diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index b31c7130708..cd04cbd6f72 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -6773,49 +6773,51 @@ } ) -(define_insn "aarch64_shrn2_n_insn_le" +(define_insn "aarch64_shrn2_n_insn_le" [(set (match_operand: 0 "register_operand" "=w") (vec_concat: (match_operand: 1 "register_operand" "0") (ALL_TRUNC: - (:VQN + (SHIFTRT:VQN (match_operand:VQN 2 "register_operand" "w") (match_operand:VQN 3 "aarch64_simd_shift_imm_vec_")))))] - "TARGET_SIMD && !BYTES_BIG_ENDIAN" + "TARGET_SIMD && !BYTES_BIG_ENDIAN + && AARCH64_VALID_SHRN_OP (, )" "shrn2\t%0., %2., %3" [(set_attr "type" "neon_shift_imm_narrow_q")] ) -(define_insn "aarch64_shrn2_n_insn_be" +(define_insn "aarch64_shrn2_n_insn_be" [(set (match_operand: 0 "register_operand" "=w") (vec_concat: (ALL_TRUNC: - (:VQN + (SHIFTRT:VQN (match_operand:VQN 2 "register_operand" "w") (match_operand:VQN 3 "aarch64_simd_shift_imm_vec_"))) (match_operand: 1 "register_operand" "0")))] - "TARGET_SIMD && BYTES_BIG_ENDIAN" + "TARGET_SIMD && BYTES_BIG_ENDIAN + && AARCH64_VALID_SHRN_OP (, )" "shrn2\t%0., %2., %3" [(set_attr "type" "neon_shift_imm_narrow_q")] ) -(define_expand "aarch64_shrn2_n" +(define_expand "aarch64_shrn2_n" [(match_operand: 0 "register_operand") (match_operand: 1 "register_operand") (ALL_TRUNC: - (match_operand:VQN 2 "register_operand")) + (SHIFTRT:VQN (match_operand:VQN 2 "register_operand"))) (match_operand:SI 3 "aarch64_simd_shift_imm_offset_")] - "TARGET_SIMD" + "TARGET_SIMD && AARCH64_VALID_SHRN_OP (, )" { operands[3] = aarch64_simd_gen_const_vector_dup (mode, INTVAL (operands[3])); if (BYTES_BIG_ENDIAN) - emit_insn (gen_aarch64_shrn2_n_insn_be (operands[0], - operands[1], operands[2], operands[3])); + emit_insn (gen_aarch64_shrn2_n_insn_be ( + operands[0], operands[1], operands[2], operands[3])); else - emit_insn (gen_aarch64_shrn2_n_insn_le (operands[0], - operands[1], operands[2], operands[3])); + emit_insn (gen_aarch64_shrn2_n_insn_le ( + operands[0], operands[1], operands[2], operands[3])); DONE; } ) diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index d350d9e7c01..0ace1eeddb9 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -5469,42 +5469,42 @@ __extension__ extern __inline int8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c) { - return __builtin_aarch64_sqshrn2_nv8hi (__a, __b, __c); + return __builtin_aarch64_sqsshrn2_nv8hi (__a, __b, __c); } __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c) { - return __builtin_aarch64_sqshrn2_nv4si (__a, __b, __c); + return __builtin_aarch64_sqsshrn2_nv4si (__a, __b, __c); } __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c) { - return __builtin_aarch64_sqshrn2_nv2di (__a, __b, __c); + return __builtin_aarch64_sqsshrn2_nv2di (__a, __b, __c); } __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c) { - return __builtin_aarch64_uqshrn2_nv8hi_uuus (__a, __b, __c); + return __builtin_aarch64_uqushrn2_nv8hi_uuus (__a, __b, __c); } __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c) { - return __builtin_aarch64_uqshrn2_nv4si_uuus (__a, __b, __c); + return __builtin_aarch64_uqushrn2_nv4si_uuus (__a, __b, __c); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vqshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c) { - return __builtin_aarch64_uqshrn2_nv2di_uuus (__a, __b, __c); + return __builtin_aarch64_uqushrn2_nv2di_uuus (__a, __b, __c); } __extension__ extern __inline uint8x16_t @@ -5630,42 +5630,42 @@ __extension__ extern __inline int8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c) { - return __builtin_aarch64_shrn2_nv8hi (__a, __b, __c); + return __builtin_aarch64_ushrn2_nv8hi (__a, __b, __c); } __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c) { - return __builtin_aarch64_shrn2_nv4si (__a, __b, __c); + return __builtin_aarch64_ushrn2_nv4si (__a, __b, __c); } __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c) { - return __builtin_aarch64_shrn2_nv2di (__a, __b, __c); + return __builtin_aarch64_ushrn2_nv2di (__a, __b, __c); } __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c) { - return __builtin_aarch64_shrn2_nv8hi_uuus (__a, __b, __c); + return __builtin_aarch64_ushrn2_nv8hi_uuus (__a, __b, __c); } __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c) { - return __builtin_aarch64_shrn2_nv4si_uuus (__a, __b, __c); + return __builtin_aarch64_ushrn2_nv4si_uuus (__a, __b, __c); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c) { - return __builtin_aarch64_shrn2_nv2di_uuus (__a, __b, __c); + return __builtin_aarch64_ushrn2_nv2di_uuus (__a, __b, __c); } __extension__ extern __inline poly8x8_t