aarch64: Remove architecture dependencies from intrinsics

Many intrinsics currently depend on both an architecture version and a
feature, despite the corresponding instructions being available within
GCC at lower architecture versions.

LLVM has already removed these explicit architecture version
dependences; this patch does the same for GCC. Note that +fp16 does not
imply +simd, so we need to add an explicit +simd for the Neon fp16
intrinsics.

Binutils did not previously support all of these architecture+feature
combinations, but this problem is already reachable from GCC.  For
example, compiling the test gcc.target/aarch64/usadv16qi-dotprod.c
with -O3 -march=armv8-a+dotprod has resulted in an assembler error since
GCC 10.  This is fixed in Binutils 2.41.

This patch retains explicit architecture version dependencies for
features that do not currently have a separate feature flag.

gcc/ChangeLog:

	* config/aarch64/aarch64.h (TARGET_MEMTAG): Remove armv8.5
	dependency.
	* config/aarch64/arm_acle.h: Remove unnecessary armv8.x
	dependencies from target pragmas.
	* config/aarch64/arm_fp16.h (target): Likewise.
	* config/aarch64/arm_neon.h (target): Likewise.

gcc/testsuite/ChangeLog:

	* gcc.target/aarch64/feature-bf16-backport.c: New test.
	* gcc.target/aarch64/feature-dotprod-backport.c: New test.
	* gcc.target/aarch64/feature-fp16-backport.c: New test.
	* gcc.target/aarch64/feature-fp16-scalar-backport.c: New test.
	* gcc.target/aarch64/feature-fp16fml-backport.c: New test.
	* gcc.target/aarch64/feature-i8mm-backport.c: New test.
	* gcc.target/aarch64/feature-memtag-backport.c: New test.
	* gcc.target/aarch64/feature-sha3-backport.c: New test.
	* gcc.target/aarch64/feature-sm4-backport.c: New test.
This commit is contained in:
Andrew Carlotti 2023-03-07 14:37:00 +00:00
parent 8c79b49cd4
commit 73d3bc3481
13 changed files with 100 additions and 10 deletions

View file

@ -292,7 +292,7 @@ enum class aarch64_feature : unsigned char {
#define TARGET_RNG (AARCH64_ISA_RNG)
/* Memory Tagging instructions optional to Armv8.5 enabled through +memtag. */
#define TARGET_MEMTAG (AARCH64_ISA_V8_5A && AARCH64_ISA_MEMTAG)
#define TARGET_MEMTAG (AARCH64_ISA_MEMTAG)
/* I8MM instructions are enabled through +i8mm. */
#define TARGET_I8MM (AARCH64_ISA_I8MM)

View file

@ -292,7 +292,7 @@ __rndrrs (uint64_t *__res)
#pragma GCC pop_options
#pragma GCC push_options
#pragma GCC target ("arch=armv8.5-a+memtag")
#pragma GCC target ("+nothing+memtag")
#define __arm_mte_create_random_tag(__ptr, __u64_mask) \
__builtin_aarch64_memtag_irg(__ptr, __u64_mask)

View file

@ -30,7 +30,7 @@
#include <stdint.h>
#pragma GCC push_options
#pragma GCC target ("arch=armv8.2-a+fp16")
#pragma GCC target ("+nothing+fp16")
typedef __fp16 float16_t;

View file

@ -25590,7 +25590,7 @@ __INTERLEAVE_LIST (zip)
#include "arm_fp16.h"
#pragma GCC push_options
#pragma GCC target ("arch=armv8.2-a+fp16")
#pragma GCC target ("+nothing+simd+fp16")
/* ARMv8.2-A FP16 one operand vector intrinsics. */
@ -26753,7 +26753,7 @@ vminnmvq_f16 (float16x8_t __a)
/* AdvSIMD Dot Product intrinsics. */
#pragma GCC push_options
#pragma GCC target ("arch=armv8.2-a+dotprod")
#pragma GCC target ("+nothing+dotprod")
__extension__ extern __inline uint32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
@ -26844,7 +26844,7 @@ vdotq_laneq_s32 (int32x4_t __r, int8x16_t __a, int8x16_t __b, const int __index)
#pragma GCC pop_options
#pragma GCC push_options
#pragma GCC target ("arch=armv8.2-a+sm4")
#pragma GCC target ("+nothing+sm4")
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
@ -26911,7 +26911,7 @@ vsm4ekeyq_u32 (uint32x4_t __a, uint32x4_t __b)
#pragma GCC pop_options
#pragma GCC push_options
#pragma GCC target ("arch=armv8.2-a+sha3")
#pragma GCC target ("+nothing+sha3")
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
@ -27547,7 +27547,7 @@ vcmlaq_rot270_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b,
#pragma GCC pop_options
#pragma GCC push_options
#pragma GCC target ("arch=armv8.2-a+fp16fml")
#pragma GCC target ("+nothing+fp16fml")
__extension__ extern __inline float32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
@ -27856,7 +27856,7 @@ vrnd64xq_f64 (float64x2_t __a)
#include "arm_bf16.h"
#pragma GCC push_options
#pragma GCC target ("arch=armv8.2-a+bf16")
#pragma GCC target ("+nothing+bf16")
__extension__ extern __inline bfloat16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
@ -28535,7 +28535,7 @@ vst4q_lane_bf16 (bfloat16_t *__ptr, bfloat16x8x4_t __val, const int __lane)
/* AdvSIMD 8-bit Integer Matrix Multiply (I8MM) intrinsics. */
#pragma GCC push_options
#pragma GCC target ("arch=armv8.2-a+i8mm")
#pragma GCC target ("+nothing+i8mm")
__extension__ extern __inline int32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))

View file

@ -0,0 +1,10 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=armv8-a+bf16" } */
#include <arm_neon.h>
float32x4_t bar (float32x4_t r, bfloat16x8_t a, bfloat16x8_t b) {
return vbfmlalbq_f32 (r, a, b);
}
/* { dg-final { scan-assembler {\tbfmlalb\t} } } */

View file

@ -0,0 +1,10 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=armv8-a+dotprod" } */
#include <arm_neon.h>
uint32x4_t bar (uint32x4_t r, uint8x16_t a, uint8x16_t b) {
return vdotq_u32(r, a, b);
}
/* { dg-final { scan-assembler {\tudot\t} } } */

View file

@ -0,0 +1,10 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=armv8-a+fp16" } */
#include <arm_neon.h>
float16x8_t bar (float16x8_t a, float16x8_t b) {
return vaddq_f16(a, b);
}
/* { dg-final { scan-assembler {\tfadd\t} } } */

View file

@ -0,0 +1,10 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=armv8-a+fp16+nosimd" } */
#include <arm_fp16.h>
float16_t bar (float16_t a, float16_t b) {
return vaddh_f16(a, b);
}
/* { dg-final { scan-assembler {\tfadd\t} } } */

View file

@ -0,0 +1,10 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=armv8-a+fp16fml" } */
#include <arm_neon.h>
float32x4_t bar (float32x4_t r, float16x8_t a, float16x8_t b) {
return vfmlalq_high_f16 (r, a, b);
}
/* { dg-final { scan-assembler {\tfmlal2\t} } } */

View file

@ -0,0 +1,10 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=armv8-a+i8mm" } */
#include <arm_neon.h>
int32x4_t bar (int32x4_t r, int8x16_t a, int8x16_t b) {
return vmmlaq_s32 (r, a, b);
}
/* { dg-final { scan-assembler {\tsmmla\t} } } */

View file

@ -0,0 +1,10 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=armv8-a+memtag" } */
#include <arm_acle.h>
int *bar (int *src) {
return __arm_mte_create_random_tag(src, 2<<16-1);
}
/* { dg-final { scan-assembler {\tirg\t} } } */

View file

@ -0,0 +1,10 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=armv8-a+sha3" } */
#include <arm_neon.h>
uint64x2_t bar (uint64x2_t a, uint64x2_t b, uint64x2_t c) {
return vsha512hq_u64(a, b, c);
}
/* { dg-final { scan-assembler {\tsha512h\t} } } */

View file

@ -0,0 +1,10 @@
/* { dg-do compile } */
/* { dg-additional-options "-march=armv8-a+sm4" } */
#include <arm_neon.h>
uint32x4_t bar (uint32x4_t a, uint32x4_t b, uint32x4_t c) {
return vsm3tt1aq_u32(a, b, c, 2);
}
/* { dg-final { scan-assembler {\tsm3tt1a\t} } } */