AVX10.2 ymm rounding: Support vcvtph2p{s,d,sx} and vcvtph2{,u}{dq,qq} intrins

gcc/ChangeLog:

	* config/i386/avx10_2roundingintrin.h: New intrins.
	* config/i386/i386-builtin-types.def: Add new DEF_FUNCTION_TYPE.
	* config/i386/i386-builtin.def (BDESC): Add new builtins.
	* config/i386/i386-expand.cc (ix86_expand_round_builtin): Handle
	V8SF_FTYPE_V8HF_V8SF_UQI_INT, V8SI_FTYPE_V8HF_V8SI_UQI_INT,
	V4DF_FTYPE_V8HF_V4DF_UQI_INT, V4DI_FTYPE_V8HF_V4DI_UQI_INT.
	* config/i386/sse.md:
	(avx512fp16_float_extend_ph<mode>2<mask_name><round_saeonly_name>):
	Add condition check.
	(avx512fp16_vcvtph2<sseintconvertsignprefix><sseintconvert>_<mode>
	<mask_name><round_name>):
	Ditto.
	(avx512fp16_float_extend_ph<mode>2<mask_name>): Extend round saeonly.
	(vcvtph2ps256<mask_name>): Ditto.
	* config/i386/subst.md
	(round_saeonly_applied): New condition.

gcc/testsuite/ChangeLog:

	* gcc.target/i386/avx-1.c: Add new builtin test.
	* gcc.target/i386/sse-13.c: Ditto.
	* gcc.target/i386/sse-14.c: Ditto.
	* gcc.target/i386/sse-22.c: Add new macro test.
	* gcc.target/i386/sse-23.c: Ditto.
	* gcc.target/i386/avx10_2-rounding-1.c: Add test.
This commit is contained in:
Hu, Lin1 2024-08-19 10:08:56 +08:00 committed by Haochen Jiang
parent 508ac49e1a
commit 6f2eac53b6
12 changed files with 529 additions and 9 deletions

View file

@ -486,6 +486,246 @@ _mm256_maskz_cvt_roundpd_epu64 (__mmask8 __U, __m256d __A, const int __R)
(__mmask8) __U,
__R);
}
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_cvt_roundph_epi32 (__m128h __A, const int __R)
{
return
(__m256i) __builtin_ia32_vcvtph2dq256_mask_round ((__v8hf) __A,
(__v8si)
_mm256_setzero_si256 (),
(__mmask8) -1,
__R);
}
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_cvt_roundph_epi32 (__m256i __W, __mmask8 __U, __m128h __A,
const int __R)
{
return (__m256i) __builtin_ia32_vcvtph2dq256_mask_round ((__v8hf) __A,
(__v8si) __W,
(__mmask8) __U,
__R);
}
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskz_cvt_roundph_epi32 (__mmask8 __U, __m128h __A, const int __R)
{
return
(__m256i) __builtin_ia32_vcvtph2dq256_mask_round ((__v8hf) __A,
(__v8si)
_mm256_setzero_si256 (),
(__mmask8) __U,
__R);
}
extern __inline __m256d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_cvt_roundph_pd (__m128h __A, const int __R)
{
return (__m256d) __builtin_ia32_vcvtph2pd256_mask_round ((__v8hf) __A,
(__v4df)
_mm256_setzero_pd (),
(__mmask8) -1,
__R);
}
extern __inline __m256d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_cvt_roundph_pd (__m256d __W, __mmask8 __U, __m128h __A,
const int __R)
{
return (__m256d) __builtin_ia32_vcvtph2pd256_mask_round ((__v8hf) __A,
(__v4df) __W,
(__mmask8) __U,
__R);
}
extern __inline __m256d
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskz_cvt_roundph_pd (__mmask8 __U, __m128h __A, const int __R)
{
return (__m256d) __builtin_ia32_vcvtph2pd256_mask_round ((__v8hf) __A,
(__v4df)
_mm256_setzero_pd (),
(__mmask8) __U,
__R);
}
extern __inline __m256
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_cvt_roundph_ps (__m128h __A, const int __R)
{
return
(__m256) __builtin_ia32_vcvtph2ps256_mask_round ((__v8hf) __A,
(__v8sf)
_mm256_undefined_ps (),
(__mmask8) -1,
__R);
}
extern __inline __m256
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_cvt_roundph_ps (__m256 __W, __mmask8 __U, __m128h __A,
const int __R)
{
return (__m256) __builtin_ia32_vcvtph2ps256_mask_round ((__v8hf) __A,
(__v8sf) __W,
(__mmask8) __U,
__R);
}
extern __inline __m256
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskz_cvt_roundph_ps (__mmask8 __U, __m128h __A, const int __R)
{
return (__m256) __builtin_ia32_vcvtph2ps256_mask_round ((__v8hf) __A,
(__v8sf)
_mm256_setzero_ps (),
(__mmask8) __U,
__R);
}
extern __inline __m256
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_cvtx_roundph_ps (__m128h __A, const int __R)
{
return (__m256) __builtin_ia32_vcvtph2psx256_mask_round ((__v8hf) __A,
(__v8sf)
_mm256_setzero_ps (),
(__mmask8) -1,
__R);
}
extern __inline __m256
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_cvtx_roundph_ps (__m256 __W, __mmask8 __U, __m128h __A,
const int __R)
{
return (__m256) __builtin_ia32_vcvtph2psx256_mask_round ((__v8hf) __A,
(__v8sf) __W,
(__mmask8) __U,
__R);
}
extern __inline __m256
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskz_cvtx_roundph_ps (__mmask8 __U, __m128h __A, const int __R)
{
return (__m256) __builtin_ia32_vcvtph2psx256_mask_round ((__v8hf) __A,
(__v8sf)
_mm256_setzero_ps (),
(__mmask8) __U,
__R);
}
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_cvt_roundph_epi64 (__m128h __A, const int __R)
{
return
(__m256i) __builtin_ia32_vcvtph2qq256_mask_round ((__v8hf) __A,
(__v4di)
_mm256_setzero_si256 (),
(__mmask8) -1,
__R);
}
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_cvt_roundph_epi64 (__m256i __W, __mmask8 __U, __m128h __A,
const int __R)
{
return (__m256i) __builtin_ia32_vcvtph2qq256_mask_round ((__v8hf) __A,
(__v4di) __W,
(__mmask8) __U,
__R);
}
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskz_cvt_roundph_epi64 (__mmask8 __U, __m128h __A, const int __R)
{
return
(__m256i) __builtin_ia32_vcvtph2qq256_mask_round ((__v8hf) __A,
(__v4di)
_mm256_setzero_si256 (),
(__mmask8) __U,
__R);
}
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_cvt_roundph_epu32 (__m128h __A, const int __R)
{
return
(__m256i) __builtin_ia32_vcvtph2udq256_mask_round ((__v8hf) __A,
(__v8si)
_mm256_setzero_si256 (),
(__mmask8) -1,
__R);
}
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_cvt_roundph_epu32 (__m256i __W, __mmask8 __U, __m128h __A,
const int __R)
{
return (__m256i) __builtin_ia32_vcvtph2udq256_mask_round ((__v8hf) __A,
(__v8si) __W,
(__mmask8) __U,
__R);
}
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskz_cvt_roundph_epu32 (__mmask8 __U, __m128h __A, const int __R)
{
return
(__m256i) __builtin_ia32_vcvtph2udq256_mask_round ((__v8hf) __A,
(__v8si)
_mm256_setzero_si256 (),
(__mmask8) __U,
__R);
}
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_cvt_roundph_epu64 (__m128h __A, const int __R)
{
return
(__m256i) __builtin_ia32_vcvtph2uqq256_mask_round ((__v8hf) __A,
(__v4di)
_mm256_setzero_si256 (),
(__mmask8) -1,
__R);
}
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_mask_cvt_roundph_epu64 (__m256i __W, __mmask8 __U, __m128h __A,
const int __R)
{
return (__m256i) __builtin_ia32_vcvtph2uqq256_mask_round ((__v8hf) __A,
(__v4di) __W,
(__mmask8) __U,
__R);
}
extern __inline __m256i
__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
_mm256_maskz_cvt_roundph_epu64 (__mmask8 __U, __m128h __A, const int __R)
{
return
(__m256i) __builtin_ia32_vcvtph2uqq256_mask_round ((__v8hf) __A,
(__v4di)
_mm256_setzero_si256 (),
(__mmask8) __U,
__R);
}
#else
#define _mm256_add_round_pd(A, B, R) \
((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
@ -755,6 +995,150 @@ _mm256_maskz_cvt_roundpd_epu64 (__mmask8 __U, __m256d __A, const int __R)
(_mm256_setzero_si256 ()),\
(__mmask8) (U), \
(R)))
#define _mm256_cvt_roundph_epi32(A, R) \
((__m256i) __builtin_ia32_vcvtph2dq256_mask_round ((__v8hf) (A), \
(__v8si) \
(_mm256_setzero_si256 ()),\
(__mmask8) (-1), \
(R)))
#define _mm256_mask_cvt_roundph_epi32(W, U, A, R) \
((__m256i) __builtin_ia32_vcvtph2dq256_mask_round ((__v8hf) (A), \
(__v8si) (W), \
(__mmask8) (U), \
(R)))
#define _mm256_maskz_cvt_roundph_epi32(U, A, R) \
((__m256i) __builtin_ia32_vcvtph2dq256_mask_round ((__v8hf) (A), \
(__v8si) \
(_mm256_setzero_si256 ()),\
(__mmask8) (U), \
(R)))
#define _mm256_cvt_roundph_pd(A, R) \
((__m256d) __builtin_ia32_vcvtph2pd256_mask_round ((__v8hf) (A), \
(__v4df) \
(_mm256_setzero_pd ()), \
(__mmask8) (-1), \
(R)))
#define _mm256_mask_cvt_roundph_pd(W, U, A, R) \
((__m256d) __builtin_ia32_vcvtph2pd256_mask_round ((__v8hf) (A), \
(__v4df) (W), \
(__mmask8) (U), \
(R)))
#define _mm256_maskz_cvt_roundph_pd(U, A, R) \
((__m256d) __builtin_ia32_vcvtph2pd256_mask_round ((__v8hf) (A), \
(__v4df) \
(_mm256_setzero_pd ()), \
(__mmask8) (U), \
(R)))
#define _mm256_cvt_roundph_ps(A, R) \
((__m256) __builtin_ia32_vcvtph2ps256_mask_round ((__v8hf) (A), \
(__v8sf) \
(_mm256_undefined_ps ()), \
(__mmask8) (-1), \
(R)))
#define _mm256_mask_cvt_roundph_ps(W, U, A, R) \
((__m256) __builtin_ia32_vcvtph2ps256_mask_round ((__v8hf) (A), \
(__v8sf) (W), \
(__mmask8) (U), \
(R)))
#define _mm256_maskz_cvt_roundph_ps(U, A, R) \
((__m256) __builtin_ia32_vcvtph2ps256_mask_round ((__v8hf) (A), \
(__v8sf) \
(_mm256_setzero_ps ()), \
(__mmask8) (U), \
(R)))
#define _mm256_cvtx_roundph_ps(A, R) \
((__m256) __builtin_ia32_vcvtph2psx256_mask_round ((__v8hf) (A), \
(__v8sf) \
(_mm256_setzero_ps ()), \
(__mmask8) (-1), \
(R)))
#define _mm256_mask_cvtx_roundph_ps(W, U, A, R) \
((__m256) __builtin_ia32_vcvtph2psx256_mask_round ((__v8hf) (A), \
(__v8sf) (W), \
(__mmask8) (U), \
(R)))
#define _mm256_maskz_cvtx_roundph_ps(U, A, R) \
((__m256) __builtin_ia32_vcvtph2psx256_mask_round ((__v8hf) (A), \
(__v8sf) \
(_mm256_setzero_ps ()), \
(__mmask8) (U), \
(R)))
#define _mm256_cvt_roundph_epi64(A, R) \
((__m256i) __builtin_ia32_vcvtph2qq256_mask_round ((__v8hf) (A), \
(__v4di) \
(_mm256_setzero_si256 ()),\
(__mmask8) (-1), \
(R)))
#define _mm256_mask_cvt_roundph_epi64(W, U, A, R) \
((__m256i) __builtin_ia32_vcvtph2qq256_mask_round ((__v8hf) (A), \
(__v4di) (W), \
(__mmask8) (U), \
(R)))
#define _mm256_maskz_cvt_roundph_epi64(U, A, R) \
((__m256i) __builtin_ia32_vcvtph2qq256_mask_round ((__v8hf) (A), \
(__v4di) \
(_mm256_setzero_si256 ()),\
(__mmask8) (U), \
(R)))
#define _mm256_cvt_roundph_epu32(A, R) \
((__m256i) \
__builtin_ia32_vcvtph2udq256_mask_round ((__v8hf) (A), \
(__v8si) \
(_mm256_setzero_si256 ()), \
(__mmask8) (-1), \
(R)))
#define _mm256_mask_cvt_roundph_epu32(W, U, A, R) \
((__m256i) __builtin_ia32_vcvtph2udq256_mask_round ((__v8hf) (A), \
(__v8si) (W), \
(__mmask8) (U), \
(R)))
#define _mm256_maskz_cvt_roundph_epu32(U, A, R) \
((__m256i) \
__builtin_ia32_vcvtph2udq256_mask_round ((__v8hf) (A), \
(__v8si) \
(_mm256_setzero_si256 ()), \
(__mmask8) (U), \
(R)))
#define _mm256_cvt_roundph_epu64(A, R) \
((__m256i) \
__builtin_ia32_vcvtph2uqq256_mask_round ((__v8hf) (A), \
(__v4di) \
(_mm256_setzero_si256 ()), \
(__mmask8) (-1), \
(R)))
#define _mm256_mask_cvt_roundph_epu64(W, U, A, R) \
((__m256i) __builtin_ia32_vcvtph2uqq256_mask_round ((__v8hf) (A), \
(__v4di) (W), \
(__mmask8) (U), \
(R)))
#define _mm256_maskz_cvt_roundph_epu64(U, A, R) \
((__m256i) \
__builtin_ia32_vcvtph2uqq256_mask_round ((__v8hf) (A), \
(__v4di) \
(_mm256_setzero_si256 ()), \
(__mmask8) (U), \
(R)))
#endif
#ifdef __DISABLE_AVX10_2_256__

View file

@ -1427,3 +1427,7 @@ DEF_FUNCTION_TYPE (V8HF, V4DF, V8HF, UQI, INT)
DEF_FUNCTION_TYPE (V4SF, V4DF, V4SF, UQI, INT)
DEF_FUNCTION_TYPE (V4SI, V4DF, V4SI, UQI, INT)
DEF_FUNCTION_TYPE (V4DI, V4DF, V4DI, UQI, INT)
DEF_FUNCTION_TYPE (V8SI, V8HF, V8SI, UQI, INT)
DEF_FUNCTION_TYPE (V4DF, V8HF, V4DF, UQI, INT)
DEF_FUNCTION_TYPE (V8SF, V8HF, V8SF, UQI, INT)
DEF_FUNCTION_TYPE (V4DI, V8HF, V4DI, UQI, INT)

View file

@ -3333,6 +3333,13 @@ BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx_cvtpd2dq256_mask_round, "__
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_fix_notruncv4dfv4di2_mask_round, "__builtin_ia32_cvtpd2qq256_mask_round", IX86_BUILTIN_CVTPD2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4DF_V4DI_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_fixuns_notruncv4dfv4si2_mask_round, "__builtin_ia32_cvtpd2udq256_mask_round", IX86_BUILTIN_CVTPD2UDQ256_MASK_ROUND, UNKNOWN, (int) V4SI_FTYPE_V4DF_V4SI_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_fixuns_notruncv4dfv4di2_mask_round, "__builtin_ia32_cvtpd2uqq256_mask_round", IX86_BUILTIN_CVTPD2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V4DF_V4DI_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2dq_v8si_mask_round, "__builtin_ia32_vcvtph2dq256_mask_round", IX86_BUILTIN_VCVTPH2DQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8HF_V8SI_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_float_extend_phv4df2_mask_round, "__builtin_ia32_vcvtph2pd256_mask_round", IX86_BUILTIN_VCVTPH2PD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V8HF_V4DF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_vcvtph2ps256_mask_round, "__builtin_ia32_vcvtph2ps256_mask_round", IX86_BUILTIN_VCVTPH2PS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8HF_V8SF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_float_extend_phv8sf2_mask_round, "__builtin_ia32_vcvtph2psx256_mask_round", IX86_BUILTIN_VCVTPH2PSX256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8HF_V8SF_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2qq_v4di_mask_round, "__builtin_ia32_vcvtph2qq256_mask_round", IX86_BUILTIN_VCVTPH2QQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2udq_v8si_mask_round, "__builtin_ia32_vcvtph2udq256_mask_round", IX86_BUILTIN_VCVTPH2UDQ256_MASK_ROUND, UNKNOWN, (int) V8SI_FTYPE_V8HF_V8SI_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512fp16_vcvtph2uqq_v4di_mask_round, "__builtin_ia32_vcvtph2uqq256_mask_round", IX86_BUILTIN_VCVTPH2UQQ256_MASK_ROUND, UNKNOWN, (int) V4DI_FTYPE_V8HF_V4DI_UQI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)

View file

@ -12402,6 +12402,10 @@ ix86_expand_round_builtin (const struct builtin_description *d,
case V8DF_FTYPE_V8SF_V8DF_QI_INT:
case V16SF_FTYPE_V16HI_V16SF_HI_INT:
case V8SF_FTYPE_V8SI_V8SF_UQI_INT:
case V8SF_FTYPE_V8HF_V8SF_UQI_INT:
case V8SI_FTYPE_V8HF_V8SI_UQI_INT:
case V4DF_FTYPE_V8HF_V4DF_UQI_INT:
case V4DI_FTYPE_V8HF_V4DI_UQI_INT:
case V4DI_FTYPE_V4DF_V4DI_UQI_INT:
case V2DF_FTYPE_V2DF_V2DF_V2DF_INT:
case V4SI_FTYPE_V4DF_V4SI_UQI_INT:

View file

@ -7368,7 +7368,7 @@
(unspec:VI248_AVX512VL
[(match_operand:<ssePHmode> 1 "<round_nimm_predicate>" "<round_constraint>")]
UNSPEC_US_FIX_NOTRUNC))]
"TARGET_AVX512FP16"
"TARGET_AVX512FP16 && <round_mode_condition>"
"vcvtph2<sseintconvertsignprefix><sseintconvert>\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@ -7756,7 +7756,7 @@
[(set (match_operand:VF48H_AVX512VL 0 "register_operand" "=v")
(float_extend:VF48H_AVX512VL
(match_operand:<ssePHmode> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
"TARGET_AVX512FP16"
"TARGET_AVX512FP16 && <round_saeonly_mode_condition>"
"vcvtph2<castmode><ph2pssuffix>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@ -7779,14 +7779,14 @@
}
})
(define_insn "avx512fp16_float_extend_ph<mode>2<mask_name>"
(define_insn "avx512fp16_float_extend_ph<mode>2<mask_name><round_saeonly_name>"
[(set (match_operand:VF4_128_8_256 0 "register_operand" "=v")
(float_extend:VF4_128_8_256
(vec_select:V4HF
(match_operand:V8HF 1 "register_operand" "v")
(parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))]
"TARGET_AVX512FP16 && TARGET_AVX512VL"
"vcvtph2<castmode><ph2pssuffix>\t{%1, %0<mask_operand2>|%0<mask_operand2>, %q1}"
"TARGET_AVX512FP16 && TARGET_AVX512VL && <round_saeonly_mode_condition>"
"vcvtph2<castmode><ph2pssuffix>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %q1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
@ -28778,12 +28778,13 @@
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
(define_insn "vcvtph2ps256<mask_name>"
(define_insn "vcvtph2ps256<mask_name><round_saeonly_name>"
[(set (match_operand:V8SF 0 "register_operand" "=v")
(unspec:V8SF [(match_operand:V8HI 1 "nonimmediate_operand" "vm")]
(unspec:V8SF [(match_operand:V8HI 1 "<round_saeonly_nimm_scalar_predicate>" "<round_saeonly_constraint>")]
UNSPEC_VCVTPH2PS))]
"TARGET_F16C || TARGET_AVX512VL"
"vcvtph2ps\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
"(TARGET_F16C || TARGET_AVX512VL)
&& (!<round_saeonly_applied> || TARGET_AVX10_2_256)"
"vcvtph2ps\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "vex")
(set_attr "btver2_decode" "double")

View file

@ -272,6 +272,7 @@
|| <MODE>mode == V4DImode
|| <MODE>mode == V8SImode
|| <MODE>mode == V16HFmode)))")
(define_subst_attr "round_saeonly_applied" "round_saeonly" "false" "true")
(define_subst "round_saeonly"

View file

@ -857,6 +857,13 @@
#define __builtin_ia32_cvtpd2qq256_mask_round(A, B, C, D) __builtin_ia32_cvtpd2qq256_mask_round(A, B, C, 8)
#define __builtin_ia32_cvtpd2udq256_mask_round(A, B, C, D) __builtin_ia32_cvtpd2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_cvtpd2uqq256_mask_round(A, B, C, D) __builtin_ia32_cvtpd2uqq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2dq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2dq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2pd256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2ps256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2ps256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2psx256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2psx256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, 8)
#include <wmmintrin.h>
#include <immintrin.h>

View file

@ -39,6 +39,27 @@
/* { dg-final { scan-assembler-times "vcvtpd2uqq\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtpd2uqq\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtpd2uqq\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2dq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2dq\[ \\t\]+\{rn-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2dq\[ \\t\]+\{rz-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2pd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2pd\[ \\t\]+\{sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2pd\[ \\t\]+\{sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2ps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2ps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2ps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2psx\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2psx\[ \\t\]+\{sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\{\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2psx\[ \\t\]+\{sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2qq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2qq\[ \\t\]+\{rn-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2qq\[ \\t\]+\{rz-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2udq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2udq\[ \\t\]+\{rn-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2udq\[ \\t\]+\{rz-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2uqq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2uqq\[ \\t\]+\{rn-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vcvtph2uqq\[ \\t\]+\{rz-sae\}\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
@ -125,3 +146,39 @@ avx10_2_test_5 (void)
xi = _mm256_mask_cvt_roundpd_epu64 (xi, m8, xd, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
xi = _mm256_maskz_cvt_roundpd_epu64 (m8, xd, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
}
void extern
avx10_2_test_6 (void)
{
xi = _mm256_cvt_roundph_epi32 (hxh, 4);
xi = _mm256_mask_cvt_roundph_epi32 (xi, m8, hxh, 8);
xi = _mm256_maskz_cvt_roundph_epi32 (m8, hxh, 11);
xi = _mm256_cvt_roundph_epi64 (hxh, 4);
xi = _mm256_mask_cvt_roundph_epi64 (xi, m8, hxh, 8);
xi = _mm256_maskz_cvt_roundph_epi64 (m8, hxh, 11);
xi = _mm256_cvt_roundph_epu32 (hxh, 4);
xi = _mm256_mask_cvt_roundph_epu32 (xi, m8, hxh, 8);
xi = _mm256_maskz_cvt_roundph_epu32 (m8, hxh, 11);
xi = _mm256_cvt_roundph_epu64 (hxh, 4);
xi = _mm256_mask_cvt_roundph_epu64 (xi, m8, hxh, 8);
xi = _mm256_maskz_cvt_roundph_epu64 (m8, hxh, 11);
}
void extern
avx10_2_test_7 (void)
{
xd = _mm256_cvt_roundph_pd (hxh, 4);
xd = _mm256_mask_cvt_roundph_pd (xd, m8, hxh, 8);
xd = _mm256_maskz_cvt_roundph_pd (m8, hxh, 8);
x = _mm256_cvt_roundph_ps (hxh, _MM_FROUND_NO_EXC);
x = _mm256_mask_cvt_roundph_ps (x, 4, hxh, _MM_FROUND_NO_EXC);
x = _mm256_maskz_cvt_roundph_ps (6, hxh, _MM_FROUND_NO_EXC);
x = _mm256_cvtx_roundph_ps (hxh, 4);
x = _mm256_mask_cvtx_roundph_ps (x, m8, hxh, 8);
x = _mm256_maskz_cvtx_roundph_ps (m8, hxh, 8);
}

View file

@ -864,5 +864,12 @@
#define __builtin_ia32_cvtpd2qq256_mask_round(A, B, C, D) __builtin_ia32_cvtpd2qq256_mask_round(A, B, C, 8)
#define __builtin_ia32_cvtpd2udq256_mask_round(A, B, C, D) __builtin_ia32_cvtpd2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_cvtpd2uqq256_mask_round(A, B, C, D) __builtin_ia32_cvtpd2uqq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2dq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2dq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2pd256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2ps256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2ps256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2psx256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2psx256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, 8)
#include <x86intrin.h>

View file

@ -1029,6 +1029,12 @@ test_1 (_mm256_cvt_roundpd_epi32, __m128i, __m256d, 9)
test_1 (_mm256_cvt_roundpd_epi64, __m256i, __m256d, 9)
test_1 (_mm256_cvt_roundpd_epu32, __m128i, __m256d, 9)
test_1 (_mm256_cvt_roundpd_epu64, __m256i, __m256d, 9)
test_1 (_mm256_cvt_roundph_epi32, __m256i, __m128h, 8)
test_1 (_mm256_cvt_roundph_pd, __m256d, __m128h, 8)
test_1 (_mm256_cvt_roundph_ps, __m256, __m128i, 8)
test_1 (_mm256_cvtx_roundph_ps, __m256, __m128h, 8)
test_1 (_mm256_cvt_roundph_epi64, __m256i, __m128h, 8)
test_1 (_mm256_cvt_roundph_epu64, __m256i, __m128h, 8)
test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
@ -1040,6 +1046,13 @@ test_2 (_mm256_maskz_cvt_roundpd_epi32, __m128i, __mmask8, __m256d, 9)
test_2 (_mm256_maskz_cvt_roundpd_epi64, __m256i, __mmask8, __m256d, 9)
test_2 (_mm256_maskz_cvt_roundpd_epu32, __m128i, __mmask8, __m256d, 9)
test_2 (_mm256_maskz_cvt_roundpd_epu64, __m256i, __mmask8, __m256d, 9)
test_2 (_mm256_maskz_cvt_roundph_epi32, __m256i, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvt_roundph_pd, __m256d, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvt_roundph_ps, __m256, __mmask8, __m128i, 8)
test_2 (_mm256_maskz_cvtx_roundph_ps, __m256, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvt_roundph_epi64, __m256i, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvt_roundph_epu32, __m256i, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvt_roundph_epu64, __m256i, __mmask8, __m128h, 8)
test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
@ -1054,6 +1067,13 @@ test_3 (_mm256_mask_cvt_roundpd_epi32, __m128i, __m128i, __mmask8, __m256d, 9)
test_3 (_mm256_mask_cvt_roundpd_epu32, __m128i, __m128i, __mmask8, __m256d, 9)
test_3 (_mm256_mask_cvt_roundpd_epi64, __m256i, __m256i, __mmask8, __m256d, 9)
test_3 (_mm256_mask_cvt_roundpd_epu64, __m256i, __m256i, __mmask8, __m256d, 9)
test_3 (_mm256_mask_cvt_roundph_epi32, __m256i, __m256i, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvt_roundph_pd, __m256d, __m256d, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvt_roundph_ps, __m256, __m256, __mmask8, __m128i, 8)
test_3 (_mm256_mask_cvtx_roundph_ps, __m256, __m256, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvt_roundph_epi64, __m256i, __m256i, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvt_roundph_epu32, __m256i, __m256i, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvt_roundph_epu64, __m256i, __m256i, __mmask8, __m128h, 8)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)

View file

@ -1070,6 +1070,13 @@ test_1 (_mm256_cvt_roundpd_epi32, __m128i, __m256d, 9)
test_1 (_mm256_cvt_roundpd_epi64, __m256i, __m256d, 9)
test_1 (_mm256_cvt_roundpd_epu32, __m128i, __m256d, 9)
test_1 (_mm256_cvt_roundpd_epu64, __m256i, __m256d, 9)
test_1 (_mm256_cvt_roundph_epi32, __m256i, __m128h, 8)
test_1 (_mm256_cvt_roundph_pd, __m256d, __m128h, 8)
test_1 (_mm256_cvt_roundph_ps, __m256, __m128i, 8)
test_1 (_mm256_cvtx_roundph_ps, __m256, __m128h, 8)
test_1 (_mm256_cvt_roundph_epi64, __m256i, __m128h, 8)
test_1 (_mm256_cvt_roundph_epu32, __m256i, __m128h, 8)
test_1 (_mm256_cvt_roundph_epu64, __m256i, __m128h, 8)
test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9)
test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8)
test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9)
@ -1081,6 +1088,13 @@ test_2 (_mm256_maskz_cvt_roundpd_epi32, __m128i, __mmask8, __m256d, 9)
test_2 (_mm256_maskz_cvt_roundpd_epi64, __m256i, __mmask8, __m256d, 9)
test_2 (_mm256_maskz_cvt_roundpd_epu32, __m128i, __mmask8, __m256d, 9)
test_2 (_mm256_maskz_cvt_roundpd_epu64, __m256i, __mmask8, __m256d, 9)
test_2 (_mm256_maskz_cvt_roundph_epi32, __m256i, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvt_roundph_pd, __m256d, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvt_roundph_ps, __m256, __mmask8, __m128i, 8)
test_2 (_mm256_maskz_cvtx_roundph_ps, __m256, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvt_roundph_epi64, __m256i, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvt_roundph_epu32, __m256i, __mmask8, __m128h, 8)
test_2 (_mm256_maskz_cvt_roundph_epu64, __m256i, __mmask8, __m128h, 8)
test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8)
test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8)
test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8)
@ -1095,6 +1109,13 @@ test_3 (_mm256_mask_cvt_roundpd_epi32, __m128i, __m128i, __mmask8, __m256d, 9)
test_3 (_mm256_mask_cvt_roundpd_epu32, __m128i, __m128i, __mmask8, __m256d, 9)
test_3 (_mm256_mask_cvt_roundpd_epi64, __m256i, __m256i, __mmask8, __m256d, 9)
test_3 (_mm256_mask_cvt_roundpd_epu64, __m256i, __m256i, __mmask8, __m256d, 9)
test_3 (_mm256_mask_cvt_roundph_epi32, __m256i, __m256i, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvt_roundph_pd, __m256d, __m256d, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvt_roundph_ps, __m256, __m256, __mmask8, __m128i, 8)
test_3 (_mm256_mask_cvtx_roundph_ps, __m256, __m256, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvt_roundph_epi64, __m256i, __m256i, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvt_roundph_epu32, __m256i, __m256i, __mmask8, __m128h, 8)
test_3 (_mm256_mask_cvt_roundph_epu64, __m256i, __m256i, __mmask8, __m128h, 8)
test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8)
test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8)
test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8)

View file

@ -839,6 +839,13 @@
#define __builtin_ia32_cvtpd2qq256_mask_round(A, B, C, D) __builtin_ia32_cvtpd2qq256_mask_round(A, B, C, 8)
#define __builtin_ia32_cvtpd2udq256_mask_round(A, B, C, D) __builtin_ia32_cvtpd2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_cvtpd2uqq256_mask_round(A, B, C, D) __builtin_ia32_cvtpd2uqq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2dq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2dq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2pd256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2pd256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2ps256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2ps256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2psx256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2psx256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2qq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2udq256_mask_round(A, B, C, 8)
#define __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, D) __builtin_ia32_vcvtph2uqq256_mask_round(A, B, C, 8)
#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,sha,xsavec,xsaves,clflushopt,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,vpclmulqdq,pconfig,wbnoinvd,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avxifma,avxvnniint8,avxneconvert,cmpccxadd,amx-fp16,prefetchi,raoint,amx-complex,avxvnniint16,sm3,sha512,sm4,avx10.2-512")