AArch64: Gate various crypto intrinsics availability based on features
The 64-bit variant of PMULL{2} and AES instructions are available if FEAT_AES is implemented according to the Arm ARM [1]. Similarly FEAT_SHA1 and FEAT_SHA256 enable the use of SHA1 and SHA256 instruction variants. This patch fixes arm_neon.h to correctly reflect the feature availability based on '+aes' and '+sha2' as opposed to the ambiguous catch-all '+crypto'. [1] Section D17.2.61, C7.2.215 2022-01-11 Tejas Belagod <tejas.belagod@arm.com> gcc/ChangeLog: * config/aarch64/arm_neon.h (vmull_p64, vmull_high_p64, vaeseq_u8, vaesdq_u8, vaesmcq_u8, vaesimcq_u8): Gate under "nothing+aes". (vsha1*_u32, vsha256*_u32): Gate under "nothing+sha2". gcc/testsuite/ChangeLog: * gcc.target/aarch64/acle/pmull64.c: New. * gcc.target/aarch64/aes-fuse-1.c: Replace '+crypto' with corresponding feature flag based on the intrinsic. * gcc.target/aarch64/aes-fuse-2.c: Likewise. * gcc.target/aarch64/aes_1.c: Likewise. * gcc.target/aarch64/aes_2.c: Likewise. * gcc.target/aarch64/aes_xor_combine.c: Likewise. * gcc.target/aarch64/sha1_1.c: Likewise. * gcc.target/aarch64/sha256_1.c: Likewise. * gcc.target/aarch64/target_attr_crypto_ice_1.c: Likewise.
This commit is contained in:
parent
16bd9e14f2
commit
99ea0d7611
10 changed files with 44 additions and 27 deletions
|
@ -7496,7 +7496,7 @@ vqrdmlshs_laneq_s32 (int32_t __a, int32_t __b, int32x4_t __c, const int __d)
|
|||
#pragma GCC pop_options
|
||||
|
||||
#pragma GCC push_options
|
||||
#pragma GCC target ("+nothing+crypto")
|
||||
#pragma GCC target ("+nothing+aes")
|
||||
/* vaes */
|
||||
|
||||
__extension__ extern __inline uint8x16_t
|
||||
|
@ -7526,6 +7526,22 @@ vaesimcq_u8 (uint8x16_t data)
|
|||
{
|
||||
return __builtin_aarch64_crypto_aesimcv16qi_uu (data);
|
||||
}
|
||||
|
||||
__extension__ extern __inline poly128_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vmull_p64 (poly64_t __a, poly64_t __b)
|
||||
{
|
||||
return
|
||||
__builtin_aarch64_crypto_pmulldi_ppp (__a, __b);
|
||||
}
|
||||
|
||||
__extension__ extern __inline poly128_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vmull_high_p64 (poly64x2_t __a, poly64x2_t __b)
|
||||
{
|
||||
return __builtin_aarch64_crypto_pmullv2di_ppp (__a, __b);
|
||||
}
|
||||
|
||||
#pragma GCC pop_options
|
||||
|
||||
/* vcage */
|
||||
|
@ -20772,7 +20788,7 @@ vrsrad_n_u64 (uint64_t __a, uint64_t __b, const int __c)
|
|||
}
|
||||
|
||||
#pragma GCC push_options
|
||||
#pragma GCC target ("+nothing+crypto")
|
||||
#pragma GCC target ("+nothing+sha2")
|
||||
|
||||
/* vsha1 */
|
||||
|
||||
|
@ -20849,21 +20865,6 @@ vsha256su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w8_11, uint32x4_t __w12_15)
|
|||
__w12_15);
|
||||
}
|
||||
|
||||
__extension__ extern __inline poly128_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vmull_p64 (poly64_t __a, poly64_t __b)
|
||||
{
|
||||
return
|
||||
__builtin_aarch64_crypto_pmulldi_ppp (__a, __b);
|
||||
}
|
||||
|
||||
__extension__ extern __inline poly128_t
|
||||
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
||||
vmull_high_p64 (poly64x2_t __a, poly64x2_t __b)
|
||||
{
|
||||
return __builtin_aarch64_crypto_pmullv2di_ppp (__a, __b);
|
||||
}
|
||||
|
||||
#pragma GCC pop_options
|
||||
|
||||
/* vshl */
|
||||
|
|
14
gcc/testsuite/gcc.target/aarch64/acle/pmull64.c
Normal file
14
gcc/testsuite/gcc.target/aarch64/acle/pmull64.c
Normal file
|
@ -0,0 +1,14 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-additional-options "-march=armv8.2-a" } */
|
||||
|
||||
#pragma push_options
|
||||
#pragma GCC target ("+aes")
|
||||
|
||||
#include "arm_neon.h"
|
||||
|
||||
int foo (poly64_t a, poly64_t b)
|
||||
{
|
||||
return vgetq_lane_s32 (vreinterpretq_s32_p128 (vmull_p64 (a, b)), 0);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "\tpmull\tv" } } */
|
|
@ -1,6 +1,6 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-O3 -mcpu=cortex-a72+crypto -dp" } */
|
||||
/* { dg-additional-options "-march=armv8-a+crypto" { target { aarch64*-*-* } } }*/
|
||||
/* { dg-options "-O3 -mcpu=cortex-a72+aes -dp" } */
|
||||
/* { dg-additional-options "-march=armv8-a+aes" { target { aarch64*-*-* } } }*/
|
||||
|
||||
#include <arm_neon.h>
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-O3 -mcpu=cortex-a72+crypto -dp" } */
|
||||
/* { dg-additional-options "-march=armv8-a+crypto" { target { aarch64*-*-* } } }*/
|
||||
/* { dg-options "-O3 -mcpu=cortex-a72+aes -dp" } */
|
||||
/* { dg-additional-options "-march=armv8-a+aes" { target { aarch64*-*-* } } }*/
|
||||
|
||||
#include <arm_neon.h>
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
|
||||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=armv8-a+crypto" } */
|
||||
/* { dg-options "-march=armv8-a+aes" } */
|
||||
|
||||
#include "arm_neon.h"
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
|
||||
/* { dg-do compile } */
|
||||
/* { dg-options "-O3 -march=armv8-a+crypto" } */
|
||||
/* { dg-options "-O3 -march=armv8-a+aes" } */
|
||||
|
||||
#include "arm_neon.h"
|
||||
|
||||
|
@ -76,4 +76,6 @@ test7 (uint8x16_t a, uint8x16_t b)
|
|||
return result;
|
||||
}
|
||||
/* { dg-final { scan-assembler-not "mov" } } */
|
||||
/* { dg-final { scan-assembler "aesd\tv" } } */
|
||||
/* { dg-final { scan-assembler "aese\tv" } } */
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* { dg-do compile } */
|
||||
/* { dg-options "-O3 -mcpu=cortex-a55+crypto" } */
|
||||
/* { dg-options "-O3 -mcpu=cortex-a55+aes" } */
|
||||
#include <arm_neon.h>
|
||||
|
||||
#define AESE(r, v, key) (r = vaeseq_u8 ((v), (key)));
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
|
||||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=armv8-a+crypto" } */
|
||||
/* { dg-options "-march=armv8-a+sha2" } */
|
||||
|
||||
#include "arm_neon.h"
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
|
||||
/* { dg-do compile } */
|
||||
/* { dg-options "-march=armv8-a+crypto" } */
|
||||
/* { dg-options "-march=armv8-a+sha2" } */
|
||||
|
||||
#include "arm_neon.h"
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
/* Unless we do something about re-laying out the SIMD builtin types
|
||||
this testcase ICEs during expansion of the crypto builtin. */
|
||||
|
||||
__attribute__ ((target ("cpu=cortex-a57+crypto")))
|
||||
__attribute__ ((target ("cpu=cortex-a57+sha2")))
|
||||
uint32x4_t
|
||||
test_vsha1cq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
|
||||
{
|
||||
|
|
Loading…
Add table
Reference in a new issue