diff --git a/gcc/config.gcc b/gcc/config.gcc
index a36dd1bcbc6..2c0f4518638 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -452,7 +452,7 @@ i[34567]86-*-* | x86_64-*-*)
cmpccxaddintrin.h amxfp16intrin.h prfchiintrin.h
raointintrin.h amxcomplexintrin.h avxvnniint16intrin.h
sm3intrin.h sha512intrin.h sm4intrin.h
- usermsrintrin.h"
+ usermsrintrin.h avx10_2roundingintrin.h"
;;
ia64-*-*)
extra_headers=ia64intrin.h
diff --git a/gcc/config/i386/avx10_2roundingintrin.h b/gcc/config/i386/avx10_2roundingintrin.h
new file mode 100644
index 00000000000..5698ed05c1d
--- /dev/null
+++ b/gcc/config/i386/avx10_2roundingintrin.h
@@ -0,0 +1,337 @@
+/* Copyright (C) 2024 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ . */
+
+#ifndef _IMMINTRIN_H_INCLUDED
+#error "Never use directly; include instead."
+#endif
+
+#ifndef _AVX10_2ROUNDINGINTRIN_H_INCLUDED
+#define _AVX10_2ROUNDINGINTRIN_H_INCLUDED
+
+#ifndef __AVX10_2_256__
+#pragma GCC push_options
+#pragma GCC target("avx10.2-256")
+#define __DISABLE_AVX10_2_256__
+#endif /* __AVX10_2_256__ */
+
+#ifdef __OPTIMIZE__
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_add_round_pd (__m256d __A, __m256d __B, const int __R)
+{
+ return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_undefined_pd (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_round_pd (__m256d __W, __mmask8 __U, __m256d __A,
+ __m256d __B, const int __R)
+{
+ return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256d
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_round_pd (__mmask8 __U, __m256d __A, __m256d __B,
+ const int __R)
+{
+ return (__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df)
+ _mm256_setzero_pd (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_add_round_ph (__m256h __A, __m256h __B, const int __R)
+{
+ return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf)
+ _mm256_undefined_ph (),
+ (__mmask16) -1,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_round_ph (__m256h __W, __mmask16 __U, __m256h __A,
+ __m256h __B, const int __R)
+{
+ return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf) __W,
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_round_ph (__mmask16 __U, __m256h __A, __m256h __B,
+ const int __R)
+{
+ return (__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ (__v16hf)
+ _mm256_setzero_ph (),
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_add_round_ps (__m256 __A, __m256 __B, const int __R)
+{
+ return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_undefined_ps (),
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_add_round_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B,
+ const int __R)
+{
+ return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __W,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __m256
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_maskz_add_round_ps (__mmask8 __U, __m256 __A, __m256 __B,
+ const int __R)
+{
+ return (__m256) __builtin_ia32_addps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf)
+ _mm256_setzero_ps (),
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_round_pd_mask (__m256d __A, __m256d __B, const int __C,
+ const int __R)
+{
+ return (__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ __C,
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_round_pd_mask (__mmask8 __U, __m256d __A, __m256d __B,
+ const int __C, const int __R)
+{
+ return (__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) __A,
+ (__v4df) __B,
+ __C,
+ (__mmask8) __U,
+ __R);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_round_ph_mask (__m256h __A, __m256h __B, const int __C,
+ const int __R)
+{
+ return (__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ __C,
+ (__mmask16) -1,
+ __R);
+}
+
+extern __inline __mmask16
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_round_ph_mask (__mmask16 __U, __m256h __A, __m256h __B,
+ const int __C, const int __R)
+{
+ return (__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) __A,
+ (__v16hf) __B,
+ __C,
+ (__mmask16) __U,
+ __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_cmp_round_ps_mask (__m256 __A, __m256 __B, const int __C, const int __R)
+{
+ return (__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ __C,
+ (__mmask8) -1,
+ __R);
+}
+
+extern __inline __mmask8
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_mask_cmp_round_ps_mask (__mmask8 __U, __m256 __A, __m256 __B,
+ const int __C, const int __R)
+{
+ return (__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) __A,
+ (__v8sf) __B,
+ __C,
+ (__mmask8) __U,
+ __R);
+}
+#else
+#define _mm256_add_round_pd(A, B, R) \
+ ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (__v4df) \
+ (_mm256_undefined_pd ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_add_round_pd(W, U, A, B, R) \
+ ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (__v4df) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_add_round_pd(U, A, B, R) \
+ ((__m256d) __builtin_ia32_addpd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (__v4df) \
+ (_mm256_setzero_pd ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_add_round_ph(A, B, R) \
+ ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (__v16hf) \
+ (_mm256_undefined_ph ()), \
+ (__mmask16) (-1), \
+ (R)))
+
+#define _mm256_mask_add_round_ph(W, U, A, B, R) \
+ ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (__v16hf) (W), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_maskz_add_round_ph(U, A, B, R) \
+ ((__m256h) __builtin_ia32_addph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (__v16hf) \
+ (_mm256_setzero_ph ()), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_add_round_ps(A, B, R) \
+ ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (__v8sf) \
+ (_mm256_undefined_ps ()), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_add_round_ps(W, U, A, B, R) \
+ ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (__v8sf) (W), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_maskz_add_round_ps(U, A, B, R)\
+ ((__m256) __builtin_ia32_addps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (__v8sf) \
+ (_mm256_setzero_ps ()), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cmp_round_pd_mask(A, B, C, R) \
+ ((__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (C), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cmp_round_pd_mask(U, A, B, C, R) \
+ ((__mmask8) __builtin_ia32_cmppd256_mask_round ((__v4df) (A), \
+ (__v4df) (B), \
+ (C), \
+ (__mmask8) (U), \
+ (R)))
+
+#define _mm256_cmp_round_ph_mask(A, B, C, R) \
+ ((__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (C), \
+ (__mmask16) (-1), \
+ (R)))
+
+#define _mm256_mask_cmp_round_ph_mask(U, A, B, C, R) \
+ ((__mmask16) __builtin_ia32_cmpph256_mask_round ((__v16hf) (A), \
+ (__v16hf) (B), \
+ (C), \
+ (__mmask16) (U), \
+ (R)))
+
+#define _mm256_cmp_round_ps_mask(A, B, C, R) \
+ ((__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (C), \
+ (__mmask8) (-1), \
+ (R)))
+
+#define _mm256_mask_cmp_round_ps_mask(U, A, B, C, R) \
+ ((__mmask8) __builtin_ia32_cmpps256_mask_round ((__v8sf) (A), \
+ (__v8sf) (B), \
+ (C), \
+ (__mmask8) (U), \
+ (R)))
+#endif
+
+#ifdef __DISABLE_AVX10_2_256__
+#undef __DISABLE_AVX10_2_256__
+#pragma GCC pop_options
+#endif /* __DISABLE_AVX10_2_256__ */
+
+#endif /* _AVX10_2ROUNDINGINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/i386-builtin-types.def b/gcc/config/i386/i386-builtin-types.def
index 5e1245174a3..f32abfd9d70 100644
--- a/gcc/config/i386/i386-builtin-types.def
+++ b/gcc/config/i386/i386-builtin-types.def
@@ -1413,3 +1413,11 @@ DEF_FUNCTION_TYPE (V4DI, V4DI, V4DI, V2DI)
# USER_MSR builtins
DEF_FUNCTION_TYPE (VOID, UINT64, UINT64)
+
+# AVX10.2 builtins
+DEF_FUNCTION_TYPE (V4DF, V4DF, V4DF, V4DF, UQI, INT)
+DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, UHI, INT)
+DEF_FUNCTION_TYPE (V8SF, V8SF, V8SF, V8SF, UQI, INT)
+DEF_FUNCTION_TYPE (UQI, V4DF, V4DF, INT, UQI, INT)
+DEF_FUNCTION_TYPE (UHI, V16HF, V16HF, INT, UHI, INT)
+DEF_FUNCTION_TYPE (UQI, V8SF, V8SF, INT, UQI, INT)
diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def
index e876e7f5cbe..a7c0884e2ab 100644
--- a/gcc/config/i386/i386-builtin.def
+++ b/gcc/config/i386/i386-builtin.def
@@ -3318,6 +3318,14 @@ BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fcmulcsh_v8hf_mask_ro
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_round, "__builtin_ia32_vfmulcsh_round", IX86_BUILTIN_VFMULCSH_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_fmulcsh_v8hf_mask_round, "__builtin_ia32_vfmulcsh_mask_round", IX86_BUILTIN_VFMULCSH_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+/* AVX10.2. */
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv4df3_mask_round, "__builtin_ia32_addpd256_mask_round", IX86_BUILTIN_ADDPD256_MASK_ROUND, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv16hf3_mask_round, "__builtin_ia32_addph256_mask_round", IX86_BUILTIN_ADDPH256_MASK_ROUND, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_addv8sf3_mask_round, "__builtin_ia32_addps256_mask_round", IX86_BUILTIN_ADDPS256_MASK_ROUND, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv4df3_mask_round, "__builtin_ia32_cmppd256_mask_round", IX86_BUILTIN_CMPPD256_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V4DF_V4DF_INT_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv16hf3_mask_round, "__builtin_ia32_cmpph256_mask_round", IX86_BUILTIN_CMPPH256_MASK_ROUND, UNKNOWN, (int) UHI_FTYPE_V16HF_V16HF_INT_UHI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX10_2_256, CODE_FOR_avx512vl_cmpv8sf3_mask_round, "__builtin_ia32_cmpps256_mask_round", IX86_BUILTIN_CMPPS256_MASK_ROUND, UNKNOWN, (int) UQI_FTYPE_V8SF_V8SF_INT_UQI_INT)
+
BDESC_END (ROUND_ARGS, MULTI_ARG)
/* FMA4 and XOP. */
diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc
index ed546eeed6b..07bd4174cd6 100644
--- a/gcc/config/i386/i386-expand.cc
+++ b/gcc/config/i386/i386-expand.cc
@@ -12417,11 +12417,14 @@ ix86_expand_round_builtin (const struct builtin_description *d,
case INT_FTYPE_V4SF_V4SF_INT_INT:
case INT_FTYPE_V2DF_V2DF_INT_INT:
return ix86_expand_sse_comi_round (d, exp, target);
+ case V4DF_FTYPE_V4DF_V4DF_V4DF_UQI_INT:
case V8DF_FTYPE_V8DF_V8DF_V8DF_UQI_INT:
case V2DF_FTYPE_V2DF_V2DF_V2DF_UQI_INT:
case V4SF_FTYPE_V4SF_V4SF_V4SF_UQI_INT:
case V4SF_FTYPE_V8HF_V4SF_V4SF_UQI_INT:
+ case V8SF_FTYPE_V8SF_V8SF_V8SF_UQI_INT:
case V16SF_FTYPE_V16SF_V16SF_V16SF_HI_INT:
+ case V16HF_FTYPE_V16HF_V16HF_V16HF_UHI_INT:
case V32HF_FTYPE_V32HF_V32HF_V32HF_UHI_INT:
case V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT:
case V2DF_FTYPE_V8HF_V2DF_V2DF_UQI_INT:
@@ -12445,8 +12448,11 @@ ix86_expand_round_builtin (const struct builtin_description *d,
nargs = 5;
break;
case UQI_FTYPE_V8DF_V8DF_INT_UQI_INT:
+ case UQI_FTYPE_V4DF_V4DF_INT_UQI_INT:
case UQI_FTYPE_V2DF_V2DF_INT_UQI_INT:
case UHI_FTYPE_V16SF_V16SF_INT_UHI_INT:
+ case UHI_FTYPE_V16HF_V16HF_INT_UHI_INT:
+ case UQI_FTYPE_V8SF_V8SF_INT_UQI_INT:
case UQI_FTYPE_V4SF_V4SF_INT_UQI_INT:
case USI_FTYPE_V32HF_V32HF_INT_USI_INT:
case UQI_FTYPE_V8HF_V8HF_INT_UQI_INT:
diff --git a/gcc/config/i386/immintrin.h b/gcc/config/i386/immintrin.h
index b1d4cbf9ecd..80357d563ee 100644
--- a/gcc/config/i386/immintrin.h
+++ b/gcc/config/i386/immintrin.h
@@ -138,4 +138,6 @@
#include
+#include
+
#endif /* _IMMINTRIN_H_INCLUDED */
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index f0d94bba4e7..297207f2855 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -2459,7 +2459,7 @@
(plusminus:VFH
(match_operand:VFH 1 "")
(match_operand:VFH 2 "")))]
- "TARGET_SSE && && "
+ "TARGET_SSE && && "
"ix86_fixup_binary_operands_no_copy (, mode, operands);")
(define_insn "*3"
@@ -2468,7 +2468,7 @@
(match_operand:VFH 1 "" "0,v")
(match_operand:VFH 2 "" "xBm,")))]
"TARGET_SSE && ix86_binary_operator_ok (, mode, operands)
- && && "
+ && && "
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
@@ -2548,7 +2548,7 @@
(mult:VFH
(match_operand:VFH 1 "")
(match_operand:VFH 2 "")))]
- "TARGET_SSE && && "
+ "TARGET_SSE && && "
"ix86_fixup_binary_operands_no_copy (MULT, mode, operands);")
(define_insn "*mul3"
@@ -2557,7 +2557,7 @@
(match_operand:VFH 1 "" "%0,v")
(match_operand:VFH 2 "" "xBm,")))]
"TARGET_SSE && ix86_binary_operator_ok (MULT, mode, operands)
- && && "
+ && && "
"@
mul\t{%2, %0|%0, %2}
vmul\t{%2, %1, %0|%0, %1, %2}"
@@ -2685,7 +2685,7 @@
(div:VFH
(match_operand:VFH 1 "register_operand" "0,v")
(match_operand:VFH 2 "" "xBm,")))]
- "TARGET_SSE && && "
+ "TARGET_SSE && && "
"@
div\t{%2, %0|%0, %2}
vdiv\t{%2, %1, %0|%0, %1, %2}"
@@ -2851,7 +2851,7 @@
(define_insn "_sqrt2"
[(set (match_operand:VFH 0 "register_operand" "=x,v")
(sqrt:VFH (match_operand:VFH 1 "" "xBm,")))]
- "TARGET_SSE && && "
+ "TARGET_SSE && && "
"@
sqrt\t{%1, %0|%0, %1}
vsqrt\t{%1, %0|%0, %1}"
@@ -3067,7 +3067,7 @@
(match_operand:VFH 1 "")
(match_operand:VFH 2 "")))]
"TARGET_SSE &&
- && "
+ && "
{
if (!flag_finite_math_only || flag_signed_zeros)
{
@@ -3095,7 +3095,7 @@
"TARGET_SSE
&& !(MEM_P (operands[1]) && MEM_P (operands[2]))
&&
- && "
+ && "
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
@@ -3182,7 +3182,7 @@
IEEE_MAXMIN))]
"TARGET_SSE
&&
- && "
+ && "
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
@@ -4142,7 +4142,7 @@
(match_operand:V48H_AVX512VL 2 "nonimmediate_operand" "")
(match_operand:SI 3 "" "n")]
UNSPEC_PCMP))]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
"vcmp\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1")
@@ -5638,7 +5638,7 @@
(match_operand:VFH_AVX512VL 2 "")
(match_operand:VFH_AVX512VL 3 "")
(match_operand: 4 "register_operand")]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
{
emit_insn (gen_fma_fmadd__maskz_1 (
operands[0], operands[1], operands[2], operands[3],
@@ -5680,7 +5680,7 @@
(match_operand:VFH_SF_AVX512VL 1 "" "%0,0,v")
(match_operand:VFH_SF_AVX512VL 2 "" ",v,")
(match_operand:VFH_SF_AVX512VL 3 "" "v,,0")))]
- "TARGET_AVX512F && && "
+ "TARGET_AVX512F && && "
"@
vfmadd132\t{%2, %3, %0|%0, %3, %2}
vfmadd213\t{%3, %2, %0|%0, %2, %3}
@@ -5721,7 +5721,7 @@
(match_operand:VFH_AVX512VL 3 "" "v,"))
(match_dup 1)
(match_operand: 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
"@
vfmadd132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2}
vfmadd213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}"
@@ -5768,7 +5768,7 @@
(match_operand:VFH_AVX512VL 2 "")
(match_operand:VFH_AVX512VL 3 "")
(match_operand: 4 "register_operand")]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
{
emit_insn (gen_fma_fmsub__maskz_1 (
operands[0], operands[1], operands[2], operands[3],
@@ -5783,7 +5783,7 @@
(match_operand:VFH_SF_AVX512VL 2 "" ",v,")
(neg:VFH_SF_AVX512VL
(match_operand:VFH_SF_AVX512VL 3 "" "v,,0"))))]
- "TARGET_AVX512F && && "
+ "TARGET_AVX512F && && "
"@
vfmsub132\t{%2, %3, %0|%0, %3, %2}
vfmsub213\t{%3, %2, %0|%0, %2, %3}
@@ -5844,7 +5844,7 @@
(match_operand:VFH_AVX512VL 3 "register_operand" "0")))
(match_dup 3)
(match_operand: 4 "register_operand" "Yk")))]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
"vfmsub231\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
@@ -5874,7 +5874,7 @@
(match_operand:VFH_AVX512VL 2 "")
(match_operand:VFH_AVX512VL 3 "")
(match_operand: 4 "register_operand")]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
{
emit_insn (gen_fma_fnmadd__maskz_1 (
operands[0], operands[1], operands[2], operands[3],
@@ -5889,7 +5889,7 @@
(match_operand:VFH_SF_AVX512VL 1 "" "%0,0,v"))
(match_operand:VFH_SF_AVX512VL 2 "" ",v,")
(match_operand:VFH_SF_AVX512VL 3 "" "v,,0")))]
- "TARGET_AVX512F && && "
+ "TARGET_AVX512F && && "
"@
vfnmadd132\t{%2, %3, %0|%0, %3, %2}
vfnmadd213\t{%3, %2, %0|%0, %2, %3}
@@ -5932,7 +5932,7 @@
(match_operand:VFH_AVX512VL 3 "" "v,"))
(match_dup 1)
(match_operand: 4 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
"@
vfnmadd132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2}
vfnmadd213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}"
@@ -5950,7 +5950,7 @@
(match_operand:VFH_AVX512VL 3 "register_operand" "0"))
(match_dup 3)
(match_operand: 4 "register_operand" "Yk")))]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
"vfnmadd231\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
@@ -5981,7 +5981,7 @@
(match_operand:VFH_AVX512VL 2 "")
(match_operand:VFH_AVX512VL 3 "")
(match_operand: 4 "register_operand")]
- "TARGET_AVX512F && "
+ "TARGET_AVX512F && "
{
emit_insn (gen_fma_fnmsub__maskz_1 (
operands[0], operands[1], operands[2], operands[3],
@@ -5997,7 +5997,7 @@
(match_operand:VFH_SF_AVX512VL 2 "" ",v,")
(neg:VFH_SF_AVX512VL
(match_operand:VFH_SF_AVX512VL 3 "" "v,,0"))))]
- "TARGET_AVX512F && && "
+ "TARGET_AVX512F && && "
"@
vfnmsub132\t{