From e1139c5b0daa6bfdb0d8d5df2fc7f23a2840af25 Mon Sep 17 00:00:00 2001 From: Haochen Jiang Date: Mon, 24 Mar 2025 14:24:33 +0800 Subject: [PATCH] Revert "AVX10.2 ymm rounding: Support vadd{s,d,h} and vcmp{s,d,h} intrins" This reverts commit e22e3af1954469c40b139b7cfa8e7708592f4bfd. --- gcc/config.gcc | 3 +- gcc/config/i386/avx10_2roundingintrin.h | 337 ------------------ gcc/config/i386/i386-builtin-types.def | 6 - gcc/config/i386/i386-builtin.def | 6 - gcc/config/i386/i386-expand.cc | 6 - gcc/config/i386/immintrin.h | 2 - gcc/config/i386/sse.md | 116 +++--- gcc/config/i386/subst.md | 32 +- gcc/testsuite/gcc.target/i386/avx-1.c | 8 - .../gcc.target/i386/avx10_2-rounding-1.c | 64 ---- gcc/testsuite/gcc.target/i386/sse-13.c | 8 - gcc/testsuite/gcc.target/i386/sse-14.c | 17 - gcc/testsuite/gcc.target/i386/sse-22.c | 17 - gcc/testsuite/gcc.target/i386/sse-23.c | 8 - 14 files changed, 69 insertions(+), 561 deletions(-) delete mode 100644 gcc/config/i386/avx10_2roundingintrin.h delete mode 100644 gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c diff --git a/gcc/config.gcc b/gcc/config.gcc index a518e976b82..f7f2002a45f 100644 --- a/gcc/config.gcc +++ b/gcc/config.gcc @@ -450,8 +450,7 @@ i[34567]86-*-* | x86_64-*-*) avxvnniint8intrin.h avxneconvertintrin.h cmpccxaddintrin.h amxfp16intrin.h prfchiintrin.h raointintrin.h amxcomplexintrin.h avxvnniint16intrin.h - sm3intrin.h sha512intrin.h sm4intrin.h - usermsrintrin.h avx10_2roundingintrin.h + sm3intrin.h sha512intrin.h sm4intrin.h usermsrintrin.h avx10_2mediaintrin.h avx10_2-512mediaintrin.h avx10_2convertintrin.h avx10_2-512convertintrin.h avx10_2bf16intrin.h avx10_2-512bf16intrin.h diff --git a/gcc/config/i386/avx10_2roundingintrin.h b/gcc/config/i386/avx10_2roundingintrin.h deleted file mode 100644 index 9d6a49742ae..00000000000 --- a/gcc/config/i386/avx10_2roundingintrin.h +++ /dev/null @@ -1,337 +0,0 @@ -/* Copyright (C) 2024-2025 Free Software Foundation, Inc. - - This file is part of GCC. - - GCC is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - GCC is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -#ifndef _IMMINTRIN_H_INCLUDED -#error "Never use directly; include instead." -#endif - -#ifndef _AVX10_2ROUNDINGINTRIN_H_INCLUDED -#define _AVX10_2ROUNDINGINTRIN_H_INCLUDED - -#ifndef __AVX10_2_256__ -#pragma GCC push_options -#pragma GCC target("avx10.2-256") -#define __DISABLE_AVX10_2_256__ -#endif /* __AVX10_2_256__ */ - -#ifdef __OPTIMIZE__ -extern __inline __m256d -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_add_round_pd (__m256d __A, __m256d __B, const int __R) -{ - return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A, - (__v4df) __B, - (__v4df) - _mm256_undefined_pd (), - (__mmask8) -1, - __R); -} - -extern __inline __m256d -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_add_round_pd (__m256d __W, __mmask8 __U, __m256d __A, - __m256d __B, const int __R) -{ - return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A, - (__v4df) __B, - (__v4df) __W, - (__mmask8) __U, - __R); -} - -extern __inline __m256d -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_add_round_pd (__mmask8 __U, __m256d __A, __m256d __B, - const int __R) -{ - return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A, - (__v4df) __B, - (__v4df) - _mm256_setzero_pd (), - (__mmask8) __U, - __R); -} - -extern __inline __m256h -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_add_round_ph (__m256h __A, __m256h __B, const int __R) -{ - return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A, - (__v16hf) __B, - (__v16hf) - _mm256_undefined_ph (), - (__mmask16) -1, - __R); -} - -extern __inline __m256h -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_add_round_ph (__m256h __W, __mmask16 __U, __m256h __A, - __m256h __B, const int __R) -{ - return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A, - (__v16hf) __B, - (__v16hf) __W, - (__mmask16) __U, - __R); -} - -extern __inline __m256h -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_add_round_ph (__mmask16 __U, __m256h __A, __m256h __B, - const int __R) -{ - return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A, - (__v16hf) __B, - (__v16hf) - _mm256_setzero_ph (), - (__mmask16) __U, - __R); -} - -extern __inline __m256 -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_add_round_ps (__m256 __A, __m256 __B, const int __R) -{ - return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) - _mm256_undefined_ps (), - (__mmask8) -1, - __R); -} - -extern __inline __m256 -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_add_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B, - const int __R) -{ - return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) __W, - (__mmask8) __U, - __R); -} - -extern __inline __m256 -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_maskz_add_round_ps (__mmask8 __U, __m256 __A, __m256 __B, - const int __R) -{ - return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) - _mm256_setzero_ps (), - (__mmask8) __U, - __R); -} - -extern __inline __mmask8 -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_cmp_round_pd_mask (__m256d __A, __m256d __B, const int __C, - const int __R) -{ - return (__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) __A, - (__v4df) __B, - __C, - (__mmask8) -1, - __R); -} - -extern __inline __mmask8 -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_cmp_round_pd_mask (__mmask8 __U, __m256d __A, __m256d __B, - const int __C, const int __R) -{ - return (__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) __A, - (__v4df) __B, - __C, - (__mmask8) __U, - __R); -} - -extern __inline __mmask16 -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_cmp_round_ph_mask (__m256h __A, __m256h __B, const int __C, - const int __R) -{ - return (__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) __A, - (__v16hf) __B, - __C, - (__mmask16) -1, - __R); -} - -extern __inline __mmask16 -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_cmp_round_ph_mask (__mmask16 __U, __m256h __A, __m256h __B, - const int __C, const int __R) -{ - return (__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) __A, - (__v16hf) __B, - __C, - (__mmask16) __U, - __R); -} - -extern __inline __mmask8 -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_cmp_round_ps_mask (__m256 __A, __m256 __B, const int __C, const int __R) -{ - return (__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) __A, - (__v8sf) __B, - __C, - (__mmask8) -1, - __R); -} - -extern __inline __mmask8 -__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) -_mm256_mask_cmp_round_ps_mask (__mmask8 __U, __m256 __A, __m256 __B, - const int __C, const int __R) -{ - return (__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) __A, - (__v8sf) __B, - __C, - (__mmask8) __U, - __R); -} -#else -#define _mm256_add_round_pd(A, B, R) \ - ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \ - (__v4df) (B), \ - (__v4df) \ - (_mm256_undefined_pd ()), \ - (__mmask8) (-1), \ - (R))) - -#define _mm256_mask_add_round_pd(W, U, A, B, R) \ - ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \ - (__v4df) (B), \ - (__v4df) (W), \ - (__mmask8) (U), \ - (R))) - -#define _mm256_maskz_add_round_pd(U, A, B, R) \ - ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \ - (__v4df) (B), \ - (__v4df) \ - (_mm256_setzero_pd ()), \ - (__mmask8) (U), \ - (R))) - -#define _mm256_add_round_ph(A, B, R) \ - ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \ - (__v16hf) (B), \ - (__v16hf) \ - (_mm256_undefined_ph ()), \ - (__mmask16) (-1), \ - (R))) - -#define _mm256_mask_add_round_ph(W, U, A, B, R) \ - ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \ - (__v16hf) (B), \ - (__v16hf) (W), \ - (__mmask16) (U), \ - (R))) - -#define _mm256_maskz_add_round_ph(U, A, B, R) \ - ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \ - (__v16hf) (B), \ - (__v16hf) \ - (_mm256_setzero_ph ()), \ - (__mmask16) (U), \ - (R))) - -#define _mm256_add_round_ps(A, B, R) \ - ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \ - (__v8sf) (B), \ - (__v8sf) \ - (_mm256_undefined_ps ()), \ - (__mmask8) (-1), \ - (R))) - -#define _mm256_mask_add_round_ps(W, U, A, B, R) \ - ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \ - (__v8sf) (B), \ - (__v8sf) (W), \ - (__mmask8) (U), \ - (R))) - -#define _mm256_maskz_add_round_ps(U, A, B, R)\ - ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \ - (__v8sf) (B), \ - (__v8sf) \ - (_mm256_setzero_ps ()), \ - (__mmask8) (U), \ - (R))) - -#define _mm256_cmp_round_pd_mask(A, B, C, R) \ - ((__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) (A), \ - (__v4df) (B), \ - (C), \ - (__mmask8) (-1), \ - (R))) - -#define _mm256_mask_cmp_round_pd_mask(U, A, B, C, R) \ - ((__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) (A), \ - (__v4df) (B), \ - (C), \ - (__mmask8) (U), \ - (R))) - -#define _mm256_cmp_round_ph_mask(A, B, C, R) \ - ((__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) (A), \ - (__v16hf) (B), \ - (C), \ - (__mmask16) (-1), \ - (R))) - -#define _mm256_mask_cmp_round_ph_mask(U, A, B, C, R) \ - ((__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) (A), \ - (__v16hf) (B), \ - (C), \ - (__mmask16) (U), \ - (R))) - -#define _mm256_cmp_round_ps_mask(A, B, C, R) \ - ((__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) (A), \ - (__v8sf) (B), \ - (C), \ - (__mmask8) (-1), \ - (R))) - -#define _mm256_mask_cmp_round_ps_mask(U, A, B, C, R) \ - ((__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) (A), \ - (__v8sf) (B), \ - (C), \ - (__mmask8) (U), \ - (R))) -#endif - -#ifdef __DISABLE_AVX10_2_256__ -#undef __DISABLE_AVX10_2_256__ -#pragma GCC pop_options -#endif /* __DISABLE_AVX10_2_256__ */ - -#endif /* _AVX10_2ROUNDINGINTRIN_H_INCLUDED */ diff --git a/gcc/config/i386/i386-builtin-types.def b/gcc/config/i386/i386-builtin-types.def index 1974a35c9ae..64bde021d11 100644 --- a/gcc/config/i386/i386-builtin-types.def +++ b/gcc/config/i386/i386-builtin-types.def @@ -1416,12 +1416,6 @@ DEF_FUNCTION_TYPE (V4DI, V4DI, V4DI, V2DI) DEF_FUNCTION_TYPE (VOID, UINT64, UINT64) # AVX10.2 builtins -DEF_FUNCTION_TYPE (V4DF, V4DF, V4DF, V4DF, UQI, INT) -DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, UHI, INT) -DEF_FUNCTION_TYPE (V8SF, V8SF, V8SF, V8SF, UQI, INT) -DEF_FUNCTION_TYPE (UQI, V4DF, V4DF, INT, UQI, INT) -DEF_FUNCTION_TYPE (UHI, V16HF, V16HF, INT, UHI, INT) -DEF_FUNCTION_TYPE (UQI, V8SF, V8SF, INT, UQI, INT) DEF_FUNCTION_TYPE (V32HF, V16SF, V16SF, V32HF, USI, INT) DEF_FUNCTION_TYPE (V32HF, V16SF, V16SF, V32HF, USI) DEF_FUNCTION_TYPE (V16HF, V8SF, V8SF, V16HF, UHI) diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def index cce2e9568af..712083b7435 100644 --- a/gcc/config/i386/i386-builtin.def +++ b/gcc/config/i386/i386-builtin.def @@ -3660,12 +3660,6 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_round, " BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_mask_round, "__builtin_ia32_vfmulcsh_mask_round", IX86_BUILTIN_VFMULCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT) /* AVX10.2. */ -BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv4df3_mask_round, "__builtin_ia32_addpd256_mask_round", IX86_BUILTIN_ADDPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT) -BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv16hf3_mask_round, "__builtin_ia32_addph256_mask_round", IX86_BUILTIN_ADDPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT) -BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv8sf3_mask_round, "__builtin_ia32_addps256_mask_round", IX86_BUILTIN_ADDPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT) -BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv4df3_mask_round, "__builtin_ia32_cmppd256_mask_round", IX86_BUILTIN_CMPPD256_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V4DF_V4DF_INT_UQI_INT) -BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv16hf3_mask_round, "__builtin_ia32_cmpph256_mask_round", IX86_BUILTIN_CMPPH256_MASK_ROUND, UNKNOWN, (int) UHI_FTYPE_V16HF_V16HF_INT_UHI_INT) -BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv8sf3_mask_round, "__builtin_ia32_cmpps256_mask_round", IX86_BUILTIN_CMPPS256_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V8SF_V8SF_INT_UQI_INT) BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvt2ps2phx_v32hf_mask_round, "__builtin_ia32_vcvt2ps2phx512_mask_round", IX86_BUILTIN_VCVT2PS2PHX_V32HF_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V16SF_V16SF_V32HF_USI_INT) BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtph2ibsv32hf_mask_round, "__builtin_ia32_cvtph2ibs512_mask_round", IX86_BUILTIN_CVTPH2IBS512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT) BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtph2iubsv32hf_mask_round, "__builtin_ia32_cvtph2iubs512_mask_round", IX86_BUILTIN_CVTPH2IUBS512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT) diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc index e0e357ce53b..bceffa05c96 100644 --- a/gcc/config/i386/i386-expand.cc +++ b/gcc/config/i386/i386-expand.cc @@ -12766,14 +12766,11 @@ ix86_expand_round_builtin (const struct builtin_description *d, case INT_FTYPE_V4SF_V4SF_INT_INT: case INT_FTYPE_V2DF_V2DF_INT_INT: return ix86_expand_sse_comi_round (d, exp, target, true); - case V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT: case V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT: case V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT: case V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT: case V4SF_FTYPE_V8HF_V4SF_V4SF_UQI_INT: - case V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT: case V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT: - case V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT: case V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT: case V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT: case V2DF_FTYPE_V8HF_V2DF_V2DF_UQI_INT: @@ -12798,11 +12795,8 @@ ix86_expand_round_builtin (const struct builtin_description *d, nargs = 5; break; case UQI_FTYPE_V8DF_V8DF_INT_UQI_INT: - case UQI_FTYPE_V4DF_V4DF_INT_UQI_INT: case UQI_FTYPE_V2DF_V2DF_INT_UQI_INT: case UHI_FTYPE_V16SF_V16SF_INT_UHI_INT: - case UHI_FTYPE_V16HF_V16HF_INT_UHI_INT: - case UQI_FTYPE_V8SF_V8SF_INT_UQI_INT: case UQI_FTYPE_V4SF_V4SF_INT_UQI_INT: case USI_FTYPE_V32HF_V32HF_INT_USI_INT: case UQI_FTYPE_V8HF_V8HF_INT_UQI_INT: diff --git a/gcc/config/i386/immintrin.h b/gcc/config/i386/immintrin.h index 6907a2c0b3a..c30a4e036d6 100644 --- a/gcc/config/i386/immintrin.h +++ b/gcc/config/i386/immintrin.h @@ -146,8 +146,6 @@ #include -#include - #include #include diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md index 4dbe60e3cf0..0ded0d5f9e5 100644 --- a/gcc/config/i386/sse.md +++ b/gcc/config/i386/sse.md @@ -2649,7 +2649,7 @@ (plusminus:VF_BHSD (match_operand:VF_BHSD 1 "") (match_operand:VF_BHSD 2 "")))] - "TARGET_SSE && && " + "TARGET_SSE && && " "ix86_fixup_binary_operands_no_copy (, mode, operands);") (define_insn "*3" @@ -2658,7 +2658,7 @@ (match_operand:VFH 1 "" "0,v") (match_operand:VFH 2 "" "xBm,")))] "TARGET_SSE && ix86_binary_operator_ok (, mode, operands) - && && " + && && " "@ \t{%2, %0|%0, %2} v\t{%2, %1, %0|%0, %1, %2}" @@ -2738,7 +2738,7 @@ (mult:VF_BHSD (match_operand:VF_BHSD 1 "") (match_operand:VF_BHSD 2 "")))] - "TARGET_SSE && && " + "TARGET_SSE && && " "ix86_fixup_binary_operands_no_copy (MULT, mode, operands);") (define_insn "*mul3" @@ -2747,7 +2747,7 @@ (match_operand:VFH 1 "" "%0,v") (match_operand:VFH 2 "" "xBm,")))] "TARGET_SSE && ix86_binary_operator_ok (MULT, mode, operands) - && && " + && && " "@ mul\t{%2, %0|%0, %2} vmul\t{%2, %1, %0|%0, %1, %2}" @@ -2895,7 +2895,7 @@ (div:VFH (match_operand:VFH 1 "register_operand" "0,v") (match_operand:VFH 2 "" "xBm,")))] - "TARGET_SSE && && " + "TARGET_SSE && && " "@ div\t{%2, %0|%0, %2} vdiv\t{%2, %1, %0|%0, %1, %2}" @@ -3061,7 +3061,7 @@ (define_insn "_sqrt2" [(set (match_operand:VFH 0 "register_operand" "=x,v") (sqrt:VFH (match_operand:VFH 1 "" "xBm,")))] - "TARGET_SSE && && " + "TARGET_SSE && && " "@ sqrt\t{%1, %0|%0, %1} vsqrt\t{%1, %0|%0, %1}" @@ -3277,7 +3277,7 @@ (match_operand:VFH 1 "") (match_operand:VFH 2 "")))] "TARGET_SSE && - && " + && " { if (!flag_finite_math_only || flag_signed_zeros) { @@ -3305,7 +3305,7 @@ "TARGET_SSE && !(MEM_P (operands[1]) && MEM_P (operands[2])) && - && " + && " "@ \t{%2, %0|%0, %2} v\t{%2, %1, %0|%0, %1, %2}" @@ -3392,7 +3392,7 @@ IEEE_MAXMIN))] "TARGET_SSE && - && " + && " "@ \t{%2, %0|%0, %2} v\t{%2, %1, %0|%0, %1, %2}" @@ -4391,7 +4391,7 @@ (match_operand:V48H_AVX512VL 2 "nonimmediate_operand" "") (match_operand:SI 3 "" "n")] UNSPEC_PCMP))] - "TARGET_AVX512F && " + "TARGET_AVX512F && " "vcmp\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") @@ -5897,7 +5897,7 @@ (match_operand:VFH_AVX512VL 2 "") (match_operand:VFH_AVX512VL 3 "") (match_operand: 4 "register_operand")] - "TARGET_AVX512F && " + "TARGET_AVX512F && " { emit_insn (gen_fma_fmadd__maskz_1 ( operands[0], operands[1], operands[2], operands[3], @@ -5939,7 +5939,7 @@ (match_operand:VFH_SF_AVX512VL 1 "" "%0,0,v") (match_operand:VFH_SF_AVX512VL 2 "" ",v,") (match_operand:VFH_SF_AVX512VL 3 "" "v,,0")))] - "TARGET_AVX512F && && " + "TARGET_AVX512F && && " "@ vfmadd132\t{%2, %3, %0|%0, %3, %2} vfmadd213\t{%3, %2, %0|%0, %2, %3} @@ -5980,7 +5980,7 @@ (match_operand:VFH_AVX512VL 3 "" "v,")) (match_dup 1) (match_operand: 4 "register_operand" "Yk,Yk")))] - "TARGET_AVX512F && " + "TARGET_AVX512F && " "@ vfmadd132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2} vfmadd213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}" @@ -6027,7 +6027,7 @@ (match_operand:VFH_AVX512VL 2 "") (match_operand:VFH_AVX512VL 3 "") (match_operand: 4 "register_operand")] - "TARGET_AVX512F && " + "TARGET_AVX512F && " { emit_insn (gen_fma_fmsub__maskz_1 ( operands[0], operands[1], operands[2], operands[3], @@ -6042,7 +6042,7 @@ (match_operand:VFH_SF_AVX512VL 2 "" ",v,") (neg:VFH_SF_AVX512VL (match_operand:VFH_SF_AVX512VL 3 "" "v,,0"))))] - "TARGET_AVX512F && && " + "TARGET_AVX512F && && " "@ vfmsub132\t{%2, %3, %0|%0, %3, %2} vfmsub213\t{%3, %2, %0|%0, %2, %3} @@ -6103,7 +6103,7 @@ (match_operand:VFH_AVX512VL 3 "nonimmediate_operand" "0"))) (match_dup 3) (match_operand: 4 "register_operand" "Yk")))] - "TARGET_AVX512F && " + "TARGET_AVX512F && " "vfmsub231\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "prefix" "evex") @@ -6133,7 +6133,7 @@ (match_operand:VFH_AVX512VL 2 "") (match_operand:VFH_AVX512VL 3 "") (match_operand: 4 "register_operand")] - "TARGET_AVX512F && " + "TARGET_AVX512F && " { emit_insn (gen_fma_fnmadd__maskz_1 ( operands[0], operands[1], operands[2], operands[3], @@ -6148,7 +6148,7 @@ (match_operand:VFH_SF_AVX512VL 1 "" "%0,0,v")) (match_operand:VFH_SF_AVX512VL 2 "" ",v,") (match_operand:VFH_SF_AVX512VL 3 "" "v,,0")))] - "TARGET_AVX512F && && " + "TARGET_AVX512F && && " "@ vfnmadd132\t{%2, %3, %0|%0, %3, %2} vfnmadd213\t{%3, %2, %0|%0, %2, %3} @@ -6191,7 +6191,7 @@ (match_operand:VFH_AVX512VL 3 "" "v,")) (match_dup 1) (match_operand: 4 "register_operand" "Yk,Yk")))] - "TARGET_AVX512F && " + "TARGET_AVX512F && " "@ vfnmadd132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2} vfnmadd213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}" @@ -6209,7 +6209,7 @@ (match_operand:VFH_AVX512VL 3 "nonimmediate_operand" "0")) (match_dup 3) (match_operand: 4 "register_operand" "Yk")))] - "TARGET_AVX512F && " + "TARGET_AVX512F && " "vfnmadd231\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "prefix" "evex") @@ -6240,7 +6240,7 @@ (match_operand:VFH_AVX512VL 2 "") (match_operand:VFH_AVX512VL 3 "") (match_operand: 4 "register_operand")] - "TARGET_AVX512F && " + "TARGET_AVX512F && " { emit_insn (gen_fma_fnmsub__maskz_1 ( operands[0], operands[1], operands[2], operands[3], @@ -6256,7 +6256,7 @@ (match_operand:VFH_SF_AVX512VL 2 "" ",v,") (neg:VFH_SF_AVX512VL (match_operand:VFH_SF_AVX512VL 3 "" "v,,0"))))] - "TARGET_AVX512F && && " + "TARGET_AVX512F && && " "@ vfnmsub132\t{%2, %3, %0|%0, %3, %2} vfnmsub213\t{%3, %2, %0|%0, %2, %3} @@ -6301,7 +6301,7 @@ (match_operand:VFH_AVX512VL 3 "" "v,"))) (match_dup 1) (match_operand: 4 "register_operand" "Yk,Yk")))] - "TARGET_AVX512F && " + "TARGET_AVX512F && " "@ vfnmsub132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2} vfnmsub213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}" @@ -6418,7 +6418,7 @@ (match_operand:VFH_AVX512VL 2 "" ",v,") (match_operand:VFH_AVX512VL 3 "" "v,,0")] UNSPEC_FMADDSUB))] - "TARGET_AVX512F && && " + "TARGET_AVX512F && && " "@ vfmaddsub132\t{%2, %3, %0|%0, %3, %2} vfmaddsub213\t{%3, %2, %0|%0, %2, %3} @@ -6488,7 +6488,7 @@ (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 3 "" "v,,0"))] UNSPEC_FMADDSUB))] - "TARGET_AVX512F && && " + "TARGET_AVX512F && && " "@ vfmsubadd132\t{%2, %3, %0|%0, %3, %2} vfmsubadd213\t{%3, %2, %0|%0, %2, %3} @@ -7057,7 +7057,7 @@ (match_operand:VHF_AVX512VL 2 "") (match_operand:VHF_AVX512VL 3 "") (match_operand: 4 "register_operand")] - "TARGET_AVX512FP16 && " + "TARGET_AVX512FP16 && " { rtx op0, op1, dest; if () @@ -7087,7 +7087,7 @@ (match_operand:VHF_AVX512VL 2 "") (match_operand:VHF_AVX512VL 3 "") (match_operand: 4 "register_operand")] - "TARGET_AVX512FP16 && " + "TARGET_AVX512FP16 && " { emit_insn (gen_fma_fmaddc__maskz_1 ( operands[0], operands[1], operands[2], operands[3], @@ -7101,7 +7101,7 @@ (match_operand:VHF_AVX512VL 2 "") (match_operand:VHF_AVX512VL 3 "") (match_operand: 4 "register_operand")] - "TARGET_AVX512FP16 && " + "TARGET_AVX512FP16 && " { rtx op0, op1, dest; if () @@ -7133,7 +7133,7 @@ (match_operand:VHF_AVX512VL 2 "") (match_operand:VHF_AVX512VL 3 "") (match_operand: 4 "register_operand")] - "TARGET_AVX512FP16 && " + "TARGET_AVX512FP16 && " { emit_insn (gen_fma_fcmaddc__maskz_1 ( operands[0], operands[1], operands[2], operands[3], @@ -7157,7 +7157,7 @@ (match_operand:VHF_AVX512VL 2 "" "") (match_operand:VHF_AVX512VL 3 "" "0")] UNSPEC_COMPLEX_F_C_MA))] - "TARGET_AVX512FP16 && && " + "TARGET_AVX512FP16 && && " "v\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "prefix" "evex") @@ -7295,7 +7295,7 @@ (unspec: [(match_operand: 4 "register_operand" "Yk")] UNSPEC_COMPLEX_MASK)))] - "TARGET_AVX512FP16 && " + "TARGET_AVX512FP16 && " "v\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "prefix" "evex") @@ -7315,7 +7315,7 @@ [(match_operand:VHF_AVX512VL 1 "" "v") (match_operand:VHF_AVX512VL 2 "" "")] UNSPEC_COMPLEX_F_C_MUL))] - "TARGET_AVX512FP16 && " + "TARGET_AVX512FP16 && " { if (TARGET_DEST_FALSE_DEP_FOR_GLC && ) @@ -7332,7 +7332,7 @@ (match_operand:V8HF 2 "") (match_operand:V8HF 3 "") (match_operand:QI 4 "register_operand")] - "TARGET_AVX512FP16 && " + "TARGET_AVX512FP16 && " { emit_insn (gen_avx512fp16_fma_fmaddcsh_v8hf_maskz ( operands[0], operands[1], operands[2], operands[3], @@ -7346,7 +7346,7 @@ (match_operand:V8HF 2 "") (match_operand:V8HF 3 "") (match_operand:QI 4 "register_operand")] - "TARGET_AVX512FP16 && " + "TARGET_AVX512FP16 && " { rtx op0, op1, dest; @@ -7376,7 +7376,7 @@ (match_operand:V8HF 2 "") (match_operand:V8HF 3 "") (match_operand:QI 4 "register_operand")] - "TARGET_AVX512FP16 && " + "TARGET_AVX512FP16 && " { emit_insn (gen_avx512fp16_fma_fcmaddcsh_v8hf_maskz ( operands[0], operands[1], operands[2], operands[3], @@ -7390,7 +7390,7 @@ (match_operand:V8HF 2 "") (match_operand:V8HF 3 "") (match_operand:QI 4 "register_operand")] - "TARGET_AVX512FP16 && " + "TARGET_AVX512FP16 && " { rtx op0, op1, dest; @@ -7420,7 +7420,7 @@ (match_operand:V8HF 2 "") (match_operand:V8HF 3 "") (match_operand:QI 4 "register_operand")] - "TARGET_AVX512FP16 && " + "TARGET_AVX512FP16 && " { rtx dest, op0, op1; @@ -7450,7 +7450,7 @@ (match_operand:V8HF 2 "") (match_operand:V8HF 3 "") (match_operand:QI 4 "register_operand")] - "TARGET_AVX512FP16 && " + "TARGET_AVX512FP16 && " { rtx dest, op0, op1; @@ -8621,7 +8621,7 @@ [(set (match_operand:VF1 0 "register_operand" "=x,v") (float:VF1 (match_operand: 1 "" "xBm,")))] - "TARGET_SSE2 && && " + "TARGET_SSE2 && && " "@ cvtdq2ps\t{%1, %0|%0, %1} vcvtdq2ps\t{%1, %0|%0, %1}" @@ -8700,7 +8700,7 @@ [(set (match_operand:VI8_256_512 0 "register_operand" "=v") (unspec:VI8_256_512 [(match_operand: 1 "nonimmediate_operand" "")] UNSPEC_FIX_NOTRUNC))] - "TARGET_AVX512DQ && " + "TARGET_AVX512DQ && " "vcvtps2qq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -8723,7 +8723,7 @@ [(set (match_operand:VI8_256_512 0 "register_operand" "=v") (unspec:VI8_256_512 [(match_operand: 1 "nonimmediate_operand" "")] UNSPEC_UNSIGNED_FIX_NOTRUNC))] - "TARGET_AVX512DQ && " + "TARGET_AVX512DQ && " "vcvtps2uqq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -9168,7 +9168,7 @@ [(set (match_operand: 0 "register_operand" "=v") (any_float: (match_operand:VI8_256_512 1 "nonimmediate_operand" "")))] - "TARGET_AVX512DQ && " + "TARGET_AVX512DQ && " "vcvtqq2ps\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -9669,7 +9669,7 @@ (unspec: [(match_operand:VF2_AVX512VL 1 "" "")] UNSPEC_VCVTT_U))] - "TARGET_AVX512DQ && " + "TARGET_AVX512DQ && " "vcvttpd2qq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -9679,7 +9679,7 @@ [(set (match_operand: 0 "register_operand" "=v") (any_fix: (match_operand:VF2_AVX512VL 1 "" "")))] - "TARGET_AVX512DQ && " + "TARGET_AVX512DQ && " "vcvttpd2qq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -9690,7 +9690,7 @@ (unspec: [(match_operand:VF2_AVX512VL 1 "" "")] UNSPEC_FIX_NOTRUNC))] - "TARGET_AVX512DQ && " + "TARGET_AVX512DQ && " "vcvtpd2qq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -9701,7 +9701,7 @@ (unspec: [(match_operand:VF2_AVX512VL 1 "nonimmediate_operand" "")] UNSPEC_UNSIGNED_FIX_NOTRUNC))] - "TARGET_AVX512DQ && " + "TARGET_AVX512DQ && " "vcvtpd2uqq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -9712,7 +9712,7 @@ (unspec:VI8_256_512 [(match_operand: 1 "" "")] UNSPEC_VCVTT_U))] - "TARGET_AVX512DQ && " + "TARGET_AVX512DQ && " "vcvttps2qq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -9722,7 +9722,7 @@ [(set (match_operand:VI8_256_512 0 "register_operand" "=v") (any_fix:VI8_256_512 (match_operand: 1 "" "")))] - "TARGET_AVX512DQ && " + "TARGET_AVX512DQ && " "vcvttps2qq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -10200,7 +10200,7 @@ [(set (match_operand:VF2_512_256 0 "register_operand" "=v") (float_extend:VF2_512_256 (match_operand: 1 "" "")))] - "TARGET_AVX && && " + "TARGET_AVX && && " "vcvtps2pd\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "maybe_vex") @@ -29792,7 +29792,7 @@ (match_operand:VF_AVX512VL 2 "" "") (match_operand:SI 3 "const_0_to_15_operand")] UNSPEC_RANGE))] - "TARGET_AVX512DQ && " + "TARGET_AVX512DQ && " { if (TARGET_DEST_FALSE_DEP_FOR_GLC && @@ -31686,7 +31686,7 @@ (match_operand: 2 "" "")) (float_truncate: (match_operand: 1 "register_operand" "v"))))] - "TARGET_AVX10_2_256 && " + "TARGET_AVX10_2_256 && " "vcvt2ps2phx\t{%2, %1, %0|%0, %1, %2}") (define_mode_attr ssebvecmode @@ -32478,7 +32478,7 @@ (unspec: [(match_operand:VHF_AVX10_2 1 "" "")] UNSPEC_CVT_PH_IBS_ITER))] - "TARGET_AVX10_2_256 && " + "TARGET_AVX10_2_256 && " "vcvtph2ibs\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -32493,7 +32493,7 @@ (unspec: [(match_operand:VHF_AVX10_2 1 "" "")] UNSPEC_CVTT_PH_IBS_ITER))] - "TARGET_AVX10_2_256 && " + "TARGET_AVX10_2_256 && " "vcvttph2ibs\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -32508,7 +32508,7 @@ (unspec: [(match_operand:VF1_AVX10_2 1 "" "")] UNSPEC_CVT_PS_IBS_ITER))] - "TARGET_AVX10_2_256 && " + "TARGET_AVX10_2_256 && " "vcvtps2ibs\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -32523,7 +32523,7 @@ (unspec: [(match_operand:VF1_AVX10_2 1 "" "")] UNSPEC_CVTT_PS_IBS_ITER))] - "TARGET_AVX10_2_256 && " + "TARGET_AVX10_2_256 && " "vcvttps2ibs\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -32542,7 +32542,7 @@ (unspec: [(match_operand:VF1_VF2_AVX10_2 1 "" "")] UNSPEC_SAT_CVT_DS_SIGN_ITER))] - "TARGET_AVX10_2_256 && " + "TARGET_AVX10_2_256 && " "vcvtt2dqs\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -32553,7 +32553,7 @@ (unspec: [(match_operand:VF2_AVX10_2 1 "" "")] UNSPEC_SAT_CVT_DS_SIGN_ITER))] - "TARGET_AVX10_2_256 && " + "TARGET_AVX10_2_256 && " "vcvttpd2qqs\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") @@ -32564,7 +32564,7 @@ (unspec:VI8_AVX10_2 [(match_operand: 1 "" "")] UNSPEC_SAT_CVT_DS_SIGN_ITER))] - "TARGET_AVX10_2_256 && " + "TARGET_AVX10_2_256 && " "vcvttps2qqs\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") diff --git a/gcc/config/i386/subst.md b/gcc/config/i386/subst.md index 88d16869fcc..c30b274822e 100644 --- a/gcc/config/i386/subst.md +++ b/gcc/config/i386/subst.md @@ -205,17 +205,11 @@ (define_subst_attr "bcst_round_nimm_predicate" "round" "bcst_vector_operand" "register_operand") (define_subst_attr "round_nimm_scalar_predicate" "round" "nonimmediate_operand" "register_operand") (define_subst_attr "round_prefix" "round" "vex" "evex") -(define_subst_attr "round_mode_condition" "round" "1" "((mode == V16SFmode - || mode == V8DFmode - || mode == V8DImode - || mode == V16SImode - || mode == V32HFmode) - || (TARGET_AVX10_2_256 - && (mode == V8SFmode - || mode == V4DFmode - || mode == V4DImode - || mode == V8SImode - || mode == V16HFmode)))") +(define_subst_attr "round_mode512bit_condition" "round" "1" "(mode == V16SFmode + || mode == V8DFmode + || mode == V8DImode + || mode == V16SImode + || mode == V32HFmode)") (define_subst_attr "round_modev4sf_condition" "round" "1" "(mode == V4SFmode)") (define_subst_attr "round_codefor" "round" "*" "") @@ -256,17 +250,11 @@ (define_subst_attr "round_saeonly_constraint2" "round_saeonly" "m" "v") (define_subst_attr "round_saeonly_nimm_predicate" "round_saeonly" "vector_operand" "register_operand") (define_subst_attr "round_saeonly_nimm_scalar_predicate" "round_saeonly" "nonimmediate_operand" "register_operand") -(define_subst_attr "round_saeonly_mode_condition" "round_saeonly" "1" "((mode == V16SFmode - || mode == V8DFmode - || mode == V8DImode - || mode == V16SImode - || mode == V32HFmode) - || (TARGET_AVX10_2_256 - && (mode == V8SFmode - || mode == V4DFmode - || mode == V4DImode - || mode == V8SImode - || mode == V16HFmode)))") +(define_subst_attr "round_saeonly_mode512bit_condition" "round_saeonly" "1" "(mode == V16SFmode + || mode == V8DFmode + || mode == V8DImode + || mode == V16SImode + || mode == V32HFmode)") (define_subst "round_saeonly" diff --git a/gcc/testsuite/gcc.target/i386/avx-1.c b/gcc/testsuite/gcc.target/i386/avx-1.c index b4f290d8230..33ef076a47a 100644 --- a/gcc/testsuite/gcc.target/i386/avx-1.c +++ b/gcc/testsuite/gcc.target/i386/avx-1.c @@ -842,14 +842,6 @@ /* sm3intrin.h */ #define __builtin_ia32_vsm3rnds2(A, B, C, D) __builtin_ia32_vsm3rnds2 (A, B, C, 1) -/* avx10_2roundingintrin.h */ -#define __builtin_ia32_addpd256_mask_round(A, B, C, D, E) __builtin_ia32_addpd256_mask_round(A, B, C, D, 8) -#define __builtin_ia32_addph256_mask_round(A, B, C, D, E) __builtin_ia32_addph256_mask_round(A, B, C, D, 8) -#define __builtin_ia32_addps256_mask_round(A, B, C, D, E) __builtin_ia32_addps256_mask_round(A, B, C, D, 8) -#define __builtin_ia32_cmppd256_mask_round(A, B, C, D, E) __builtin_ia32_cmppd256_mask_round(A, B, 1, D, 8) -#define __builtin_ia32_cmpph256_mask_round(A, B, C, D, E) __builtin_ia32_cmpph256_mask_round(A, B, 1, D, 8) -#define __builtin_ia32_cmpps256_mask_round(A, B, C, D, E) __builtin_ia32_cmpps256_mask_round(A, B, 1, D, 8) - /* avx10_2-512mediaintrin.h */ #define __builtin_ia32_mpsadbw512(A, B, C) __builtin_ia32_mpsadbw512 (A, B, 1) #define __builtin_ia32_mpsadbw512_mask(A, B, C, D, E) __builtin_ia32_mpsadbw512_mask (A, B, 1, D, E) diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c deleted file mode 100644 index f1f143c1f2c..00000000000 --- a/gcc/testsuite/gcc.target/i386/avx10_2-rounding-1.c +++ /dev/null @@ -1,64 +0,0 @@ -/* { dg-do compile } */ -/* { dg-options "-O2 -march=x86-64-v3 -mavx10.2-256" } */ -/* { dg-final { scan-assembler-times "vaddpd\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ -/* { dg-final { scan-assembler-times "vaddpd\[ \\t\]+\[^\n\]*\{rd-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */ -/* { dg-final { scan-assembler-times "vaddpd\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */ -/* { dg-final { scan-assembler-times "vaddph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ -/* { dg-final { scan-assembler-times "vaddph\[ \\t\]+\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */ -/* { dg-final { scan-assembler-times "vaddph\[ \\t\]+\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\{%k\[0-9\]\}\{z\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */ -/* { dg-final { scan-assembler-times "vaddps\[ \\t\]+\[^\n\]*\{rn-sae\}\[^\{\n\]*%ymm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */ -/* { dg-final { scan-assembler-times "vaddps\[ \\t\]+\[^\n\]*\{ru-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */ -/* { dg-final { scan-assembler-times "vaddps\[ \\t\]+\[^\n\]*\{rz-sae\}\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */ -/* { dg-final { scan-assembler-times "vcmppd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */ -/* { dg-final { scan-assembler-times "vcmppd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\]\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */ -/* { dg-final { scan-assembler-times "vcmpph\[ \\t\]+\\\$3\[^\n\r]*\{sae\}\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%k\[0-9\]\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */ -/* { dg-final { scan-assembler-times "vcmpph\[ \\t\]+\[^\{\n\]*\\\$4\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%ymm\[0-9\]+\[^\n\r]*%k\[0-9\]\{%k\[0-9\]\}\[^\n\r]*(?:\n|\[ \\t\]+#)" 1 } } */ -/* { dg-final { scan-assembler-times "vcmpps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */ -/* { dg-final { scan-assembler-times "vcmpps\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%ymm\[0-9\]+\[^\n^k\]*%k\[0-7\]\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */ - -#include - -volatile __m256 x; -volatile __m256d xd; -volatile __m256h xh; -volatile __mmask8 m8; -volatile __mmask16 m16; -volatile __mmask32 m32; - -void extern -avx10_2_test_1 (void) -{ - xd = _mm256_add_round_pd (xd, xd, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); - xd = _mm256_mask_add_round_pd (xd, m8, xd, xd, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); - xd = _mm256_maskz_add_round_pd (m8, xd, xd, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); - - xh = _mm256_add_round_ph (xh, xh, 8); - xh = _mm256_mask_add_round_ph (xh, m32, xh, xh, 8); - xh = _mm256_maskz_add_round_ph (m32, xh, xh, 11); - - x = _mm256_add_round_ps (x, x, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); - x = _mm256_mask_add_round_ps (x, m16, x, x, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); - x = _mm256_maskz_add_round_ps (m16, x, x, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); - - m8 = _mm256_cmp_round_pd_mask (xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC); - m8 = _mm256_mask_cmp_round_pd_mask (m8, xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC); - - m16 = _mm256_cmp_round_ph_mask (xh, xh, 3, 8); - m16 = _mm256_mask_cmp_round_ph_mask (m16, xh, xh, 4, 4); - - m8 = _mm256_cmp_round_ps_mask (x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC); - m8 = _mm256_mask_cmp_round_ps_mask (m8, x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC); -} - -void extern -avx10_2_test_2 (void) -{ - m8 = _mm256_cmp_round_pd_mask (xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC); - m8 = _mm256_mask_cmp_round_pd_mask (m8, xd, xd, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC); - - m16 = _mm256_cmp_round_ph_mask (xh, xh, 3, 8); - m16 = _mm256_mask_cmp_round_ph_mask (m16, xh, xh, 4, 4); - - m8 = _mm256_cmp_round_ps_mask (x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC); - m8 = _mm256_mask_cmp_round_ps_mask (m8, x, x, _CMP_FALSE_OQ, _MM_FROUND_NO_EXC); -} diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c index 74d9664d8ed..84a825de383 100644 --- a/gcc/testsuite/gcc.target/i386/sse-13.c +++ b/gcc/testsuite/gcc.target/i386/sse-13.c @@ -849,14 +849,6 @@ /* sm3intrin.h */ #define __builtin_ia32_vsm3rnds2(A, B, C, D) __builtin_ia32_vsm3rnds2 (A, B, C, 1) -/* avx10_2roundingintrin.h */ -#define __builtin_ia32_addpd256_mask_round(A, B, C, D, E) __builtin_ia32_addpd256_mask_round(A, B, C, D, 8) -#define __builtin_ia32_addph256_mask_round(A, B, C, D, E) __builtin_ia32_addph256_mask_round(A, B, C, D, 8) -#define __builtin_ia32_addps256_mask_round(A, B, C, D, E) __builtin_ia32_addps256_mask_round(A, B, C, D, 8) -#define __builtin_ia32_cmppd256_mask_round(A, B, C, D, E) __builtin_ia32_cmppd256_mask_round(A, B, 1, D, 8) -#define __builtin_ia32_cmpph256_mask_round(A, B, C, D, E) __builtin_ia32_cmpph256_mask_round(A, B, 1, D, 8) -#define __builtin_ia32_cmpps256_mask_round(A, B, C, D, E) __builtin_ia32_cmpps256_mask_round(A, B, 1, D, 8) - /* avx10_2-512mediaintrin.h */ #define __builtin_ia32_mpsadbw512(A, B, C) __builtin_ia32_mpsadbw512 (A, B, 1) #define __builtin_ia32_mpsadbw512_mask(A, B, C, D, E) __builtin_ia32_mpsadbw512_mask (A, B, 1, D, E) diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c index 1a285653c36..3b1825037b0 100644 --- a/gcc/testsuite/gcc.target/i386/sse-14.c +++ b/gcc/testsuite/gcc.target/i386/sse-14.c @@ -1020,23 +1020,6 @@ test_2 (_mm512_gf2p8affine_epi64_epi8, __m512i, __m512i, __m512i, 1) /* sm3intrin.h */ test_3 (_mm_sm3rnds2_epi32, __m128i, __m128i, __m128i, __m128i, 1) -/* avx10_2roundingintrin.h */ -test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9) -test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8) -test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9) -test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8) -test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8) -test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8) -test_3 (_mm256_maskz_add_round_pd, __m256d, __mmask8, __m256d, __m256d, 9) -test_3 (_mm256_maskz_add_round_ph, __m256h, __mmask16, __m256h, __m256h, 8) -test_3 (_mm256_maskz_add_round_ps, __m256, __mmask8, __m256, __m256, 9) -test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8) -test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8) -test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8) -test_4 (_mm256_mask_add_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9) -test_4 (_mm256_mask_add_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 8) -test_4 (_mm256_mask_add_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9) - /* avx10_2-512mediaintrin.h */ test_2 (_mm512_mpsadbw_epu8, __m512i, __m512i, __m512i, 1) test_3 (_mm512_maskz_mpsadbw_epu8, __m512i, __mmask32, __m512i, __m512i, 1) diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c b/gcc/testsuite/gcc.target/i386/sse-22.c index cbfbb13d75a..c1400366160 100644 --- a/gcc/testsuite/gcc.target/i386/sse-22.c +++ b/gcc/testsuite/gcc.target/i386/sse-22.c @@ -1061,23 +1061,6 @@ test_1 ( __bextri_u64, unsigned long long, unsigned long long, 1) /* sm3intrin.h */ test_3 (_mm_sm3rnds2_epi32, __m128i, __m128i, __m128i, __m128i, 1) -/* avx10_2roundingintrin.h */ -test_2 (_mm256_add_round_pd, __m256d, __m256d, __m256d, 9) -test_2 (_mm256_add_round_ph, __m256h, __m256h, __m256h, 8) -test_2 (_mm256_add_round_ps, __m256, __m256, __m256, 9) -test_2x (_mm256_cmp_round_pd_mask, __mmask8, __m256d, __m256d, 1, 8) -test_2x (_mm256_cmp_round_ph_mask, __mmask16, __m256h, __m256h, 1, 8) -test_2x (_mm256_cmp_round_ps_mask, __mmask8, __m256, __m256, 1, 8) -test_3 (_mm256_maskz_add_round_pd, __m256d, __mmask8, __m256d, __m256d, 9) -test_3 (_mm256_maskz_add_round_ph, __m256h, __mmask16, __m256h, __m256h, 8) -test_3 (_mm256_maskz_add_round_ps, __m256, __mmask8, __m256, __m256, 9) -test_3x (_mm256_mask_cmp_round_pd_mask, __mmask8, __mmask8, __m256d, __m256d, 1, 8) -test_3x (_mm256_mask_cmp_round_ph_mask, __mmask16, __mmask16, __m256h, __m256h, 1, 8) -test_3x (_mm256_mask_cmp_round_ps_mask, __mmask8, __mmask8, __m256, __m256, 1, 8) -test_4 (_mm256_mask_add_round_pd, __m256d, __m256d, __mmask8, __m256d, __m256d, 9) -test_4 (_mm256_mask_add_round_ph, __m256h, __m256h, __mmask16, __m256h, __m256h, 8) -test_4 (_mm256_mask_add_round_ps, __m256, __m256, __mmask8, __m256, __m256, 9) - /* avx10_2-512mediaintrin.h */ test_2 (_mm512_mpsadbw_epu8, __m512i, __m512i, __m512i, 1) test_3 (_mm512_maskz_mpsadbw_epu8, __m512i, __mmask32, __m512i, __m512i, 1) diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c index 8b263a8195f..e38ee99e2fc 100644 --- a/gcc/testsuite/gcc.target/i386/sse-23.c +++ b/gcc/testsuite/gcc.target/i386/sse-23.c @@ -824,14 +824,6 @@ /* sm3intrin.h */ #define __builtin_ia32_vsm3rnds2(A, B, C, D) __builtin_ia32_vsm3rnds2 (A, B, C, 1) -/* avx10_2roundingintrin.h */ -#define __builtin_ia32_addpd256_mask_round(A, B, C, D, E) __builtin_ia32_addpd256_mask_round(A, B, C, D, 8) -#define __builtin_ia32_addph256_mask_round(A, B, C, D, E) __builtin_ia32_addph256_mask_round(A, B, C, D, 8) -#define __builtin_ia32_addps256_mask_round(A, B, C, D, E) __builtin_ia32_addps256_mask_round(A, B, C, D, 8) -#define __builtin_ia32_cmppd256_mask_round(A, B, C, D, E) __builtin_ia32_cmppd256_mask_round(A, B, 1, D, 8) -#define __builtin_ia32_cmpph256_mask_round(A, B, C, D, E) __builtin_ia32_cmpph256_mask_round(A, B, 1, D, 8) -#define __builtin_ia32_cmpps256_mask_round(A, B, C, D, E) __builtin_ia32_cmpps256_mask_round(A, B, 1, D, 8) - /* avx10_2-512mediaintrin.h */ #define __builtin_ia32_mpsadbw512(A, B, C) __builtin_ia32_mpsadbw512 (A, B, 1) #define __builtin_ia32_mpsadbw512_mask(A, B, C, D, E) __builtin_ia32_mpsadbw512_mask (A, B, 1, D, E)