diff --git a/gcc/config.gcc b/gcc/config.gcc
index a518e976b82..f7f2002a45f 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -450,8 +450,7 @@ i[34567]86-*-* | x86_64-*-*)
avxvnniint8intrin.h avxneconvertintrin.h
cmpccxaddintrin.h amxfp16intrin.h prfchiintrin.h
raointintrin.h amxcomplexintrin.h avxvnniint16intrin.h
- sm3intrin.h sha512intrin.h sm4intrin.h
- usermsrintrin.h avx10_2roundingintrin.h
+ sm3intrin.h sha512intrin.h sm4intrin.h usermsrintrin.h
avx10_2mediaintrin.h avx10_2-512mediaintrin.h
avx10_2convertintrin.h avx10_2-512convertintrin.h
avx10_2bf16intrin.h avx10_2-512bf16intrin.h
diff --git a/gcc/config/i386/avx10_2roundingintrin.h b/gcc/config/i386/avx10_2roundingintrin.h
deleted file mode 100644
index 9d6a49742ae..00000000000
--- a/gcc/config/i386/avx10_2roundingintrin.h
+++ /dev/null
@@ -1,337 +0,0 @@
-/* Copyright (C) 2024-2025 Free Software Foundation, Inc.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3, or (at your option)
- any later version.
-
- GCC is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- Under Section 7 of GPL version 3, you are granted additional
- permissions described in the GCC Runtime Library Exception, version
- 3.1, as published by the Free Software Foundation.
-
- You should have received a copy of the GNU General Public License and
- a copy of the GCC Runtime Library Exception along with this program;
- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
- . */
-
-#ifndef _IMMINTRIN_H_INCLUDED
-#error "Never use directly; include instead."
-#endif
-
-#ifndef _AVX10_2ROUNDINGINTRIN_H_INCLUDED
-#define _AVX10_2ROUNDINGINTRIN_H_INCLUDED
-
-#ifndef __AVX10_2_256__
-#pragma GCC push_options
-#pragma GCC target("avx10.2-256")
-#define __DISABLE_AVX10_2_256__
-#endif /* __AVX10_2_256__ */
-
-#ifdef __OPTIMIZE__
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_add_round_pd (__m256d __A, __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_undefined_pd (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_add_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B, const int __R)
-{
- return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256d
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_add_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
- const int __R)
-{
- return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_add_round_ph (__m256h __A, __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_undefined_ph (),
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_add_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
- __m256h __B, const int __R)
-{
- return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf) __W,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256h
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_add_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
- const int __R)
-{
- return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- (__v16hf)
- _mm256_setzero_ph (),
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_add_round_ps (__m256 __A, __m256 __B, const int __R)
-{
- return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_undefined_ps (),
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_add_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __m256
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_maskz_add_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
- const int __R)
-{
- return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cmp_round_pd_mask (__m256d __A, __m256d __B, const int __C,
- const int __R)
-{
- return (__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- __C,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cmp_round_pd_mask (__mmask8 __U, __m256d __A, __m256d __B,
- const int __C, const int __R)
-{
- return (__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) __A,
- (__v4df) __B,
- __C,
- (__mmask8) __U,
- __R);
-}
-
-extern __inline __mmask16
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cmp_round_ph_mask (__m256h __A, __m256h __B, const int __C,
- const int __R)
-{
- return (__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- __C,
- (__mmask16) -1,
- __R);
-}
-
-extern __inline __mmask16
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cmp_round_ph_mask (__mmask16 __U, __m256h __A, __m256h __B,
- const int __C, const int __R)
-{
- return (__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) __A,
- (__v16hf) __B,
- __C,
- (__mmask16) __U,
- __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_cmp_round_ps_mask (__m256 __A, __m256 __B, const int __C, const int __R)
-{
- return (__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- __C,
- (__mmask8) -1,
- __R);
-}
-
-extern __inline __mmask8
-__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
-_mm256_mask_cmp_round_ps_mask (__mmask8 __U, __m256 __A, __m256 __B,
- const int __C, const int __R)
-{
- return (__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) __A,
- (__v8sf) __B,
- __C,
- (__mmask8) __U,
- __R);
-}
-#else
-#define _mm256_add_round_pd(A, B, R) \
- ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_undefined_pd ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_add_round_pd(W, U, A, B, R) \
- ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_add_round_pd(U, A, B, R) \
- ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (__v4df) \
- (_mm256_setzero_pd ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_add_round_ph(A, B, R) \
- ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_undefined_ph ()), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_add_round_ph(W, U, A, B, R) \
- ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) (W), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_maskz_add_round_ph(U, A, B, R) \
- ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (__v16hf) \
- (_mm256_setzero_ph ()), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_add_round_ps(A, B, R) \
- ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_undefined_ps ()), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_add_round_ps(W, U, A, B, R) \
- ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) (W), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_maskz_add_round_ps(U, A, B, R)\
- ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (__v8sf) \
- (_mm256_setzero_ps ()), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cmp_round_pd_mask(A, B, C, R) \
- ((__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (C), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cmp_round_pd_mask(U, A, B, C, R) \
- ((__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) (A), \
- (__v4df) (B), \
- (C), \
- (__mmask8) (U), \
- (R)))
-
-#define _mm256_cmp_round_ph_mask(A, B, C, R) \
- ((__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (C), \
- (__mmask16) (-1), \
- (R)))
-
-#define _mm256_mask_cmp_round_ph_mask(U, A, B, C, R) \
- ((__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) (A), \
- (__v16hf) (B), \
- (C), \
- (__mmask16) (U), \
- (R)))
-
-#define _mm256_cmp_round_ps_mask(A, B, C, R) \
- ((__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (C), \
- (__mmask8) (-1), \
- (R)))
-
-#define _mm256_mask_cmp_round_ps_mask(U, A, B, C, R) \
- ((__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) (A), \
- (__v8sf) (B), \
- (C), \
- (__mmask8) (U), \
- (R)))
-#endif
-
-#ifdef __DISABLE_AVX10_2_256__
-#undef __DISABLE_AVX10_2_256__
-#pragma GCC pop_options
-#endif /* __DISABLE_AVX10_2_256__ */
-
-#endif /* _AVX10_2ROUNDINGINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/i386-builtin-types.def b/gcc/config/i386/i386-builtin-types.def
index 1974a35c9ae..64bde021d11 100644
--- a/gcc/config/i386/i386-builtin-types.def
+++ b/gcc/config/i386/i386-builtin-types.def
@@ -1416,12 +1416,6 @@ DEF_FUNCTION_TYPE (V4DI, V4DI, V4DI, V2DI)
DEF_FUNCTION_TYPE (VOID, UINT64, UINT64)
# AVX10.2 builtins
-DEF_FUNCTION_TYPE (V4DF, V4DF, V4DF, V4DF, UQI, INT)
-DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, UHI, INT)
-DEF_FUNCTION_TYPE (V8SF, V8SF, V8SF, V8SF, UQI, INT)
-DEF_FUNCTION_TYPE (UQI, V4DF, V4DF, INT, UQI, INT)
-DEF_FUNCTION_TYPE (UHI, V16HF, V16HF, INT, UHI, INT)
-DEF_FUNCTION_TYPE (UQI, V8SF, V8SF, INT, UQI, INT)
DEF_FUNCTION_TYPE (V32HF, V16SF, V16SF, V32HF, USI, INT)
DEF_FUNCTION_TYPE (V32HF, V16SF, V16SF, V32HF, USI)
DEF_FUNCTION_TYPE (V16HF, V8SF, V8SF, V16HF, UHI)
diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def
index cce2e9568af..712083b7435 100644
--- a/gcc/config/i386/i386-builtin.def
+++ b/gcc/config/i386/i386-builtin.def
@@ -3660,12 +3660,6 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_round, "
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_mask_round, "__builtin_ia32_vfmulcsh_mask_round", IX86_BUILTIN_VFMULCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
/* AVX10.2. */
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv4df3_mask_round, "__builtin_ia32_addpd256_mask_round", IX86_BUILTIN_ADDPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv16hf3_mask_round, "__builtin_ia32_addph256_mask_round", IX86_BUILTIN_ADDPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv8sf3_mask_round, "__builtin_ia32_addps256_mask_round", IX86_BUILTIN_ADDPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv4df3_mask_round, "__builtin_ia32_cmppd256_mask_round", IX86_BUILTIN_CMPPD256_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V4DF_V4DF_INT_UQI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv16hf3_mask_round, "__builtin_ia32_cmpph256_mask_round", IX86_BUILTIN_CMPPH256_MASK_ROUND, UNKNOWN, (int) UHI_FTYPE_V16HF_V16HF_INT_UHI_INT)
-BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv8sf3_mask_round, "__builtin_ia32_cmpps256_mask_round", IX86_BUILTIN_CMPPS256_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V8SF_V8SF_INT_UQI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvt2ps2phx_v32hf_mask_round, "__builtin_ia32_vcvt2ps2phx512_mask_round", IX86_BUILTIN_VCVT2PS2PHX_V32HF_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V16SF_V16SF_V32HF_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtph2ibsv32hf_mask_round, "__builtin_ia32_cvtph2ibs512_mask_round", IX86_BUILTIN_CVTPH2IBS512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX10_2_512, CODE_FOR_avx10_2_cvtph2iubsv32hf_mask_round, "__builtin_ia32_cvtph2iubs512_mask_round", IX86_BUILTIN_CVTPH2IUBS512_MASK_ROUND, UNKNOWN, (int) V32HI_FTYPE_V32HF_V32HI_USI_INT)
diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc
index e0e357ce53b..bceffa05c96 100644
--- a/gcc/config/i386/i386-expand.cc
+++ b/gcc/config/i386/i386-expand.cc
@@ -12766,14 +12766,11 @@ ix86_expand_round_builtin (const struct builtin_description *d,
case INT_FTYPE_V4SF_V4SF_INT_INT:
case INT_FTYPE_V2DF_V2DF_INT_INT:
return ix86_expand_sse_comi_round (d, exp, target, true);
- case V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT:
case V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT:
case V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT:
case V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT:
case V4SF_FTYPE_V8HF_V4SF_V4SF_UQI_INT:
- case V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT:
case V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT:
- case V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT:
case V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT:
case V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT:
case V2DF_FTYPE_V8HF_V2DF_V2DF_UQI_INT:
@@ -12798,11 +12795,8 @@ ix86_expand_round_builtin (const struct builtin_description *d,
nargs = 5;
break;
case UQI_FTYPE_V8DF_V8DF_INT_UQI_INT:
- case UQI_FTYPE_V4DF_V4DF_INT_UQI_INT:
case UQI_FTYPE_V2DF_V2DF_INT_UQI_INT:
case UHI_FTYPE_V16SF_V16SF_INT_UHI_INT:
- case UHI_FTYPE_V16HF_V16HF_INT_UHI_INT:
- case UQI_FTYPE_V8SF_V8SF_INT_UQI_INT:
case UQI_FTYPE_V4SF_V4SF_INT_UQI_INT:
case USI_FTYPE_V32HF_V32HF_INT_USI_INT:
case UQI_FTYPE_V8HF_V8HF_INT_UQI_INT:
diff --git a/gcc/config/i386/immintrin.h b/gcc/config/i386/immintrin.h
index 6907a2c0b3a..c30a4e036d6 100644
--- a/gcc/config/i386/immintrin.h
+++ b/gcc/config/i386/immintrin.h
@@ -146,8 +146,6 @@
#include
-#include
-
#include
#include
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 4dbe60e3cf0..0ded0d5f9e5 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -2649,7 +2649,7 @@
(plusminus:VF_BHSD
(match_operand:VF_BHSD 1 "")
(match_operand:VF_BHSD 2 "")))]
- "TARGET_SSE && && "
+ "TARGET_SSE && && "
"ix86_fixup_binary_operands_no_copy (, mode, operands);")
(define_insn "*3"
@@ -2658,7 +2658,7 @@
(match_operand:VFH 1 "" "0,v")
(match_operand:VFH 2 "" "xBm,")))]
"TARGET_SSE && ix86_binary_operator_ok (, mode, operands)
- && && "
+ && && "
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
@@ -2738,7 +2738,7 @@
(mult:VF_BHSD
(match_operand:VF_BHSD 1 "")
(match_operand:VF_BHSD 2 "")))]
- "TARGET_SSE && && "
+ "TARGET_SSE && && "
"ix86_fixup_binary_operands_no_copy (MULT, mode, operands);")
(define_insn "*mul3"
@@ -2747,7 +2747,7 @@
(match_operand:VFH 1 "" "%0,v")
(match_operand:VFH 2 "" "xBm,")))]
"TARGET_SSE && ix86_binary_operator_ok (MULT, mode, operands)
- && && "
+ && && "
"@
mul\t{%2, %0|%0, %2}
vmul\t{%2, %1, %0|%0, %1, %2}"
@@ -2895,7 +2895,7 @@
(div:VFH
(match_operand:VFH 1 "register_operand" "0,v")
(match_operand:VFH 2 "" "xBm,")))]
- "TARGET_SSE && && "
+ "TARGET_SSE && && "
"@
div\t{%2, %0|%0, %2}
vdiv\t{%2, %1, %0|%0, %1, %2}"
@@ -3061,7 +3061,7 @@
(define_insn "_sqrt2"
[(set (match_operand:VFH 0 "register_operand" "=x,v")
(sqrt:VFH (match_operand:VFH 1 "" "xBm,")))]
- "TARGET_SSE && && "
+ "TARGET_SSE && && "
"@
sqrt\t{%1, %0|%0, %1}
vsqrt\t{%1, %0|%0, %1}"
@@ -3277,7 +3277,7 @@
(match_operand:VFH 1 "")
(match_operand:VFH 2 "")))]
"TARGET_SSE &&
- && "
+ && "
{
if (!flag_finite_math_only || flag_signed_zeros)
{
@@ -3305,7 +3305,7 @@
"TARGET_SSE
&& !(MEM_P (operands[1]) && MEM_P (operands[2]))
&&
- && "
+ && "
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
@@ -3392,7 +3392,7 @@
IEEE_MAXMIN))]
"TARGET_SSE
&&
- && "
+ && "
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
@@ -4391,7 +4391,7 @@
(match_operand:V48H_AVX512VL 2 "nonimmediate_operand" "")
(match_operand:SI 3 "" "n")]
UNSPEC_PCMP))]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
"vcmp\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1")
@@ -5897,7 +5897,7 @@
(match_operand:VFH_AVX512VL 2 "")
(match_operand:VFH_AVX512VL 3 "")
(match_operand: 4 "register_operand")]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
{
emit_insn (gen_fma_fmadd__maskz_1 (
operands[0], operands[1], operands[2], operands[3],
@@ -5939,7 +5939,7 @@
(match_operand:VFH_SF_AVX512VL 1 "" "%0,0,v")
(match_operand:VFH_SF_AVX512VL 2 "" ",v,")
(match_operand:VFH_SF_AVX512VL 3 "" "v,,0")))]
- "TARGET_AVX512F && && "
+ "TARGET_AVX512F && && "
"@
vfmadd132\t{%2, %3, %0|%0, %3, %2}
vfmadd213\t{%3, %2, %0|%0, %2, %3}
@@ -5980,7 +5980,7 @@
(match_operand:VFH_AVX512VL 3 "" "v,"))
(match_dup 1)
(match_operand: 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
"@
vfmadd132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2}
vfmadd213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}"
@@ -6027,7 +6027,7 @@
(match_operand:VFH_AVX512VL 2 "")
(match_operand:VFH_AVX512VL 3 "")
(match_operand: 4 "register_operand")]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
{
emit_insn (gen_fma_fmsub__maskz_1 (
operands[0], operands[1], operands[2], operands[3],
@@ -6042,7 +6042,7 @@
(match_operand:VFH_SF_AVX512VL 2 "" ",v,")
(neg:VFH_SF_AVX512VL
(match_operand:VFH_SF_AVX512VL 3 "" "v,,0"))))]
- "TARGET_AVX512F && && "
+ "TARGET_AVX512F && && "
"@
vfmsub132\t{%2, %3, %0|%0, %3, %2}
vfmsub213\t{%3, %2, %0|%0, %2, %3}
@@ -6103,7 +6103,7 @@
(match_operand:VFH_AVX512VL 3 "nonimmediate_operand" "0")))
(match_dup 3)
(match_operand: 4 "register_operand" "Yk")))]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
"vfmsub231\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
@@ -6133,7 +6133,7 @@
(match_operand:VFH_AVX512VL 2 "")
(match_operand:VFH_AVX512VL 3 "")
(match_operand: 4 "register_operand")]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
{
emit_insn (gen_fma_fnmadd__maskz_1 (
operands[0], operands[1], operands[2], operands[3],
@@ -6148,7 +6148,7 @@
(match_operand:VFH_SF_AVX512VL 1 "" "%0,0,v"))
(match_operand:VFH_SF_AVX512VL 2 "" ",v,")
(match_operand:VFH_SF_AVX512VL 3 "" "v,,0")))]
- "TARGET_AVX512F && && "
+ "TARGET_AVX512F && && "
"@
vfnmadd132\t{%2, %3, %0|%0, %3, %2}
vfnmadd213\t{%3, %2, %0|%0, %2, %3}
@@ -6191,7 +6191,7 @@
(match_operand:VFH_AVX512VL 3 "" "v,"))
(match_dup 1)
(match_operand: 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
"@
vfnmadd132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2}
vfnmadd213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}"
@@ -6209,7 +6209,7 @@
(match_operand:VFH_AVX512VL 3 "nonimmediate_operand" "0"))
(match_dup 3)
(match_operand: 4 "register_operand" "Yk")))]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
"vfnmadd231\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
@@ -6240,7 +6240,7 @@
(match_operand:VFH_AVX512VL 2 "")
(match_operand:VFH_AVX512VL 3 "")
(match_operand: 4 "register_operand")]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
{
emit_insn (gen_fma_fnmsub__maskz_1 (
operands[0], operands[1], operands[2], operands[3],
@@ -6256,7 +6256,7 @@
(match_operand:VFH_SF_AVX512VL 2 "" ",v,")
(neg:VFH_SF_AVX512VL
(match_operand:VFH_SF_AVX512VL 3 "" "v,,0"))))]
- "TARGET_AVX512F && && "
+ "TARGET_AVX512F && &&