[clang] [Headers][X86] Allow FMA intrinsics to be used in constexpr (PR #156385)
via cfe-commits
cfe-commits at lists.llvm.org
Mon Sep 1 19:28:59 PDT 2025
github-actions[bot] wrote:
<!--LLVM CODE FORMAT COMMENT: {clang-format}-->
:warning: C/C++ code formatter, clang-format found issues in your code. :warning:
<details>
<summary>
You can test this locally with the following command:
</summary>
``````````bash
git-clang-format --diff origin/main HEAD --extensions c,h -- clang/lib/Headers/avx512fintrin.h clang/lib/Headers/avx512vlintrin.h clang/test/CodeGen/X86/avx512f-builtins.c clang/test/CodeGen/X86/avx512vl-builtins.c
``````````
:warning:
The reproduction instructions above might return results for more than one PR
in a stack if you are using a stacked PR workflow. You can limit the results by
changing `origin/main` to the base branch/commit you want to compare against.
:warning:
</details>
<details>
<summary>
View the diff from clang-format here.
</summary>
``````````diff
diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h
index aab9a83a6..7d66d83bf 100644
--- a/clang/lib/Headers/avx512fintrin.h
+++ b/clang/lib/Headers/avx512fintrin.h
@@ -2499,137 +2499,124 @@ _mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-(__v8df)(__m512d)(C), \
(__mmask8)(U), (int)(R)))
-
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C)
-{
- return (__m512d) __builtin_elementwise_fma((__v8df) __A, (__v8df) __B, (__v8df) __C);
+_mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C) {
+ return (__m512d)__builtin_elementwise_fma((__v8df)__A, (__v8df)__B,
+ (__v8df)__C);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
+_mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) {
return (__m512d)__builtin_ia32_selectpd_512(
- (__mmask8) __U,
- __builtin_elementwise_fma((__v8df) __A, (__v8df) __B, (__v8df) __C),
- (__v8df) __A);
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8df)__A, (__v8df)__B, (__v8df)__C),
+ (__v8df)__A);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
+_mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) {
return (__m512d)__builtin_ia32_selectpd_512(
- (__mmask8) __U,
- __builtin_elementwise_fma((__v8df) __A, (__v8df) __B, (__v8df) __C),
- (__v8df) __C);
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8df)__A, (__v8df)__B, (__v8df)__C),
+ (__v8df)__C);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
+_mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) {
return (__m512d)__builtin_ia32_selectpd_512(
- (__mmask8) __U,
- __builtin_elementwise_fma((__v8df) __A, (__v8df) __B, (__v8df) __C),
- (__v8df) _mm512_setzero_pd());
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8df)__A, (__v8df)__B, (__v8df)__C),
+ (__v8df)_mm512_setzero_pd());
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C)
-{
- return (__m512d) __builtin_elementwise_fma((__v8df) __A, (__v8df) __B, -(__v8df) __C);
+_mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C) {
+ return (__m512d)__builtin_elementwise_fma((__v8df)__A, (__v8df)__B,
+ -(__v8df)__C);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
+_mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) {
return (__m512d)__builtin_ia32_selectpd_512(
- (__mmask8) __U,
- __builtin_elementwise_fma((__v8df) __A, (__v8df) __B, -(__v8df) __C),
- (__v8df) __A);
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8df)__A, (__v8df)__B, -(__v8df)__C),
+ (__v8df)__A);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
+_mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) {
return (__m512d)__builtin_ia32_selectpd_512(
- (__mmask8) __U,
- __builtin_elementwise_fma((__v8df) __A, (__v8df) __B, -(__v8df) __C),
- (__v8df) __C);
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8df)__A, (__v8df)__B, -(__v8df)__C),
+ (__v8df)__C);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
+_mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) {
return (__m512d)__builtin_ia32_selectpd_512(
- (__mmask8) __U,
- __builtin_elementwise_fma((__v8df) __A, (__v8df) __B, -(__v8df) __C),
- (__v8df) _mm512_setzero_pd());
+ (__mmask8)__U,
+ __builtin_elementwise_fma((__v8df)__A, (__v8df)__B, -(__v8df)__C),
+ (__v8df)_mm512_setzero_pd());
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C)
-{
- return (__m512d)__builtin_elementwise_fma(-(__v8df) __A, (__v8df) __B, (__v8df) __C);
+_mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C) {
+ return (__m512d)__builtin_elementwise_fma(-(__v8df)__A, (__v8df)__B,
+ (__v8df)__C);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
+_mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) {
return (__m512d)__builtin_ia32_selectpd_512(
- (__mmask8) __U,
- __builtin_elementwise_fma(-(__v8df) __A, (__v8df) __B, (__v8df) __C),
- (__v8df) __A);
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v8df)__A, (__v8df)__B, (__v8df)__C),
+ (__v8df)__A);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
+_mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) {
return (__m512d)__builtin_ia32_selectpd_512(
- (__mmask8) __U,
- __builtin_elementwise_fma(-(__v8df) __A, (__v8df) __B, (__v8df) __C),
- (__v8df) __C);
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v8df)__A, (__v8df)__B, (__v8df)__C),
+ (__v8df)__C);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
+_mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) {
return (__m512d)__builtin_ia32_selectpd_512(
- (__mmask8) __U,
- __builtin_elementwise_fma(-(__v8df) __A, (__v8df) __B, (__v8df) __C),
- (__v8df) _mm512_setzero_pd());
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v8df)__A, (__v8df)__B, (__v8df)__C),
+ (__v8df)_mm512_setzero_pd());
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C)
-{
- return (__m512d) __builtin_elementwise_fma(-(__v8df) __A, (__v8df) __B, -(__v8df) __C);
+_mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C) {
+ return (__m512d)__builtin_elementwise_fma(-(__v8df)__A, (__v8df)__B,
+ -(__v8df)__C);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
+_mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) {
return (__m512d)__builtin_ia32_selectpd_512(
- (__mmask8) __U,
- __builtin_elementwise_fma(-(__v8df) __A, (__v8df) __B, -(__v8df) __C),
- (__v8df) __A);
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v8df)__A, (__v8df)__B, -(__v8df)__C),
+ (__v8df)__A);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
+_mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) {
return (__m512d)__builtin_ia32_selectpd_512(
- (__mmask8) __U,
- __builtin_elementwise_fma(-(__v8df) __A, (__v8df) __B, -(__v8df) __C),
- (__v8df) __C);
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v8df)__A, (__v8df)__B, -(__v8df)__C),
+ (__v8df)__C);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
+_mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) {
return (__m512d)__builtin_ia32_selectpd_512(
- (__mmask8) __U,
- __builtin_elementwise_fma(-(__v8df) __A, (__v8df) __B, -(__v8df) __C),
- (__v8df) _mm512_setzero_pd());
+ (__mmask8)__U,
+ __builtin_elementwise_fma(-(__v8df)__A, (__v8df)__B, -(__v8df)__C),
+ (__v8df)_mm512_setzero_pd());
}
#define _mm512_fmadd_round_ps(A, B, C, R) \
@@ -2715,134 +2702,121 @@ _mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-(__v16sf)(__m512)(C), \
(__mmask16)(U), (int)(R)))
-
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C)
-{
- return (__m512 ) __builtin_elementwise_fma((__v16sf)__A, (__v16sf)__B, (__v16sf)__C);
+_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C) {
+ return (__m512)__builtin_elementwise_fma((__v16sf)__A, (__v16sf)__B,
+ (__v16sf)__C);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
- return (__m512) __builtin_ia32_selectps_512(
+_mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) {
+ return (__m512)__builtin_ia32_selectps_512(
(__mmask16)__U,
__builtin_elementwise_fma((__v16sf)__A, (__v16sf)__B, (__v16sf)__C),
(__v16sf)__A);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
- return (__m512) __builtin_ia32_selectps_512(
+_mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) {
+ return (__m512)__builtin_ia32_selectps_512(
(__mmask16)__U,
__builtin_elementwise_fma((__v16sf)__A, (__v16sf)__B, (__v16sf)__C),
(__v16sf)__C);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
- return (__m512) __builtin_ia32_selectps_512(
+_mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) {
+ return (__m512)__builtin_ia32_selectps_512(
(__mmask16)__U,
__builtin_elementwise_fma((__v16sf)__A, (__v16sf)__B, (__v16sf)__C),
(__v16sf)_mm512_setzero_ps());
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C)
-{
- return (__m512) __builtin_elementwise_fma((__v16sf)__A, (__v16sf)__B, -(__v16sf)__C);
+_mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C) {
+ return (__m512)__builtin_elementwise_fma((__v16sf)__A, (__v16sf)__B,
+ -(__v16sf)__C);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
- return (__m512) __builtin_ia32_selectps_512(
+_mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) {
+ return (__m512)__builtin_ia32_selectps_512(
(__mmask16)__U,
__builtin_elementwise_fma((__v16sf)__A, (__v16sf)__B, -(__v16sf)__C),
(__v16sf)__A);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
- return (__m512) __builtin_ia32_selectps_512(
+_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) {
+ return (__m512)__builtin_ia32_selectps_512(
(__mmask16)__U,
__builtin_elementwise_fma((__v16sf)__A, (__v16sf)__B, -(__v16sf)__C),
(__v16sf)__C);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
- return (__m512) __builtin_ia32_selectps_512(
+_mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) {
+ return (__m512)__builtin_ia32_selectps_512(
(__mmask16)__U,
__builtin_elementwise_fma((__v16sf)__A, (__v16sf)__B, -(__v16sf)__C),
(__v16sf)_mm512_setzero_ps());
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C)
-{
- return (__m512) __builtin_elementwise_fma(-(__v16sf)__A, (__v16sf)__B, (__v16sf)__C);
+_mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C) {
+ return (__m512)__builtin_elementwise_fma(-(__v16sf)__A, (__v16sf)__B,
+ (__v16sf)__C);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
- return (__m512) __builtin_ia32_selectps_512(
+_mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) {
+ return (__m512)__builtin_ia32_selectps_512(
(__mmask16)__U,
__builtin_elementwise_fma(-(__v16sf)__A, (__v16sf)__B, (__v16sf)__C),
(__v16sf)__A);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
- return (__m512) __builtin_ia32_selectps_512(
+_mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) {
+ return (__m512)__builtin_ia32_selectps_512(
(__mmask16)__U,
__builtin_elementwise_fma(-(__v16sf)__A, (__v16sf)__B, (__v16sf)__C),
(__v16sf)__C);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
- return (__m512) __builtin_ia32_selectps_512(
+_mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) {
+ return (__m512)__builtin_ia32_selectps_512(
(__mmask16)__U,
__builtin_elementwise_fma(-(__v16sf)__A, (__v16sf)__B, (__v16sf)__C),
(__v16sf)_mm512_setzero_ps());
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C)
-{
- return (__m512) __builtin_elementwise_fma(-(__v16sf)__A, (__v16sf)__B, -(__v16sf)__C);
+_mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C) {
+ return (__m512)__builtin_elementwise_fma(-(__v16sf)__A, (__v16sf)__B,
+ -(__v16sf)__C);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
- return (__m512) __builtin_ia32_selectps_512(
+_mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) {
+ return (__m512)__builtin_ia32_selectps_512(
(__mmask16)__U,
__builtin_elementwise_fma(-(__v16sf)__A, (__v16sf)__B, -(__v16sf)__C),
(__v16sf)__A);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
- return (__m512) __builtin_ia32_selectps_512(
+_mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) {
+ return (__m512)__builtin_ia32_selectps_512(
(__mmask16)__U,
__builtin_elementwise_fma(-(__v16sf)__A, (__v16sf)__B, -(__v16sf)__C),
(__v16sf)__C);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR
-_mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
- return (__m512) __builtin_ia32_selectps_512(
+_mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) {
+ return (__m512)__builtin_ia32_selectps_512(
(__mmask16)__U,
__builtin_elementwise_fma(-(__v16sf)__A, (__v16sf)__B, -(__v16sf)__C),
(__v16sf)_mm512_setzero_ps());
@@ -3092,8 +3066,6 @@ _mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
(__v8df)(__m512d)(C), \
(__mmask8)(U), (int)(R)))
-
-
#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) \
((__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
@@ -3140,14 +3112,12 @@ _mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
(__v8df)(__m512d)(C), \
(__mmask8)(U), (int)(R)))
-
#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) \
((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-(__v16sf)(__m512)(B), \
(__v16sf)(__m512)(C), \
(__mmask16)(U), (int)(R)))
-
#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) \
((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-(__v8df)(__m512d)(B), \
@@ -3161,7 +3131,6 @@ _mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
(__v8df)(__m512d)(C), \
(__mmask8)(U), (int)(R)))
-
#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) \
((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-(__v16sf)(__m512)(B), \
@@ -3175,8 +3144,6 @@ _mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
(__v16sf)(__m512)(C), \
(__mmask16)(U), (int)(R)))
-
-
/* Vector permutations */
static __inline __m512i __DEFAULT_FN_ATTRS512
diff --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h
index 5df3dcc91..bc23d32b6 100644
--- a/clang/lib/Headers/avx512vlintrin.h
+++ b/clang/lib/Headers/avx512vlintrin.h
@@ -903,8 +903,7 @@ _mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B)
(__mmask8)(m)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
+_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
return (__m128d)__builtin_ia32_selectpd_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
@@ -912,8 +911,7 @@ _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
+_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
return (__m128d)__builtin_ia32_selectpd_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
@@ -921,8 +919,7 @@ _mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
+_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
return (__m128d)__builtin_ia32_selectpd_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
@@ -930,8 +927,7 @@ _mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
+_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
return (__m128d)__builtin_ia32_selectpd_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
@@ -939,8 +935,7 @@ _mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
+_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
return (__m128d)__builtin_ia32_selectpd_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
@@ -948,8 +943,7 @@ _mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
+_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
return (__m128d)__builtin_ia32_selectpd_128(
(__mmask8)__U,
__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, (__v2df)__C),
@@ -957,8 +951,7 @@ _mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
+_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
return (__m128d)__builtin_ia32_selectpd_128(
(__mmask8)__U,
__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, (__v2df)__C),
@@ -966,8 +959,7 @@ _mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
+_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) {
return (__m128d)__builtin_ia32_selectpd_128(
(__mmask8)__U,
__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, -(__v2df)__C),
@@ -975,8 +967,7 @@ _mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
+_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
return (__m256d)__builtin_ia32_selectpd_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
@@ -984,8 +975,7 @@ _mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
+_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
return (__m256d)__builtin_ia32_selectpd_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
@@ -993,8 +983,7 @@ _mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
+_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
return (__m256d)__builtin_ia32_selectpd_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
@@ -1002,8 +991,7 @@ _mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
+_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
return (__m256d)__builtin_ia32_selectpd_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
@@ -1011,8 +999,7 @@ _mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
+_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
return (__m256d)__builtin_ia32_selectpd_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
@@ -1020,8 +1007,7 @@ _mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
+_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
return (__m256d)__builtin_ia32_selectpd_256(
(__mmask8)__U,
__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, (__v4df)__C),
@@ -1029,8 +1015,7 @@ _mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
+_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
return (__m256d)__builtin_ia32_selectpd_256(
(__mmask8)__U,
__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, (__v4df)__C),
@@ -1038,8 +1023,7 @@ _mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
-{
+_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) {
return (__m256d)__builtin_ia32_selectpd_256(
(__mmask8)__U,
__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, -(__v4df)__C),
@@ -1047,8 +1031,7 @@ _mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
+_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
return (__m128)__builtin_ia32_selectps_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
@@ -1056,8 +1039,7 @@ _mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
+_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
return (__m128)__builtin_ia32_selectps_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
@@ -1065,8 +1047,7 @@ _mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
+_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
return (__m128)__builtin_ia32_selectps_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
@@ -1074,8 +1055,7 @@ _mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
+_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
return (__m128)__builtin_ia32_selectps_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
@@ -1083,8 +1063,7 @@ _mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
+_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
return (__m128)__builtin_ia32_selectps_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
@@ -1092,8 +1071,7 @@ _mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
+_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
return (__m128)__builtin_ia32_selectps_128(
(__mmask8)__U,
__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
@@ -1101,8 +1079,7 @@ _mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
+_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
return (__m128)__builtin_ia32_selectps_128(
(__mmask8)__U,
__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
@@ -1110,8 +1087,7 @@ _mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
+_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) {
return (__m128)__builtin_ia32_selectps_128(
(__mmask8)__U,
__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
@@ -1119,8 +1095,7 @@ _mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
-{
+_mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
return (__m256)__builtin_ia32_selectps_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
@@ -1128,8 +1103,7 @@ _mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
+_mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
return (__m256)__builtin_ia32_selectps_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
@@ -1137,8 +1111,7 @@ _mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
+_mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
return (__m256)__builtin_ia32_selectps_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
@@ -1146,8 +1119,7 @@ _mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
-{
+_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
return (__m256)__builtin_ia32_selectps_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
@@ -1155,8 +1127,7 @@ _mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
+_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
return (__m256)__builtin_ia32_selectps_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
@@ -1164,8 +1135,7 @@ _mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
+_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
return (__m256)__builtin_ia32_selectps_256(
(__mmask8)__U,
__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
@@ -1173,8 +1143,7 @@ _mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
+_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
return (__m256)__builtin_ia32_selectps_256(
(__mmask8)__U,
__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
@@ -1182,8 +1151,7 @@ _mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
-{
+_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) {
return (__m256)__builtin_ia32_selectps_256(
(__mmask8)__U,
__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
@@ -1191,8 +1159,7 @@ _mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
+_mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
__builtin_ia32_vfmaddsubpd ((__v2df) __A,
(__v2df) __B,
@@ -1392,8 +1359,7 @@ _mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
+_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
return (__m128d)__builtin_ia32_selectpd_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
@@ -1401,8 +1367,7 @@ _mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
+_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
return (__m256d)__builtin_ia32_selectpd_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
@@ -1410,8 +1375,7 @@ _mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
+_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
return (__m128)__builtin_ia32_selectps_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
@@ -1419,8 +1383,7 @@ _mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
+_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
return (__m256)__builtin_ia32_selectps_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
@@ -1468,8 +1431,7 @@ _mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
+_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
return (__m128d)__builtin_ia32_selectpd_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, (__v2df)__C),
@@ -1477,8 +1439,7 @@ _mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
+_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
return (__m256d)__builtin_ia32_selectpd_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, (__v4df)__C),
@@ -1486,8 +1447,7 @@ _mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
+_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
return (__m128)__builtin_ia32_selectps_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C),
@@ -1495,8 +1455,7 @@ _mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
-{
+_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
return (__m256)__builtin_ia32_selectps_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, (__v8sf)__C),
@@ -1504,8 +1463,7 @@ _mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
-{
+_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) {
return (__m128d)__builtin_ia32_selectpd_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, -(__v2df)__C),
@@ -1513,8 +1471,7 @@ _mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
}
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
-{
+_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) {
return (__m128d)__builtin_ia32_selectpd_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, -(__v2df)__C),
@@ -1522,8 +1479,7 @@ _mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
-{
+_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) {
return (__m256d)__builtin_ia32_selectpd_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, -(__v4df)__C),
@@ -1531,8 +1487,7 @@ _mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
}
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
-{
+_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) {
return (__m256d)__builtin_ia32_selectpd_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, -(__v4df)__C),
@@ -1540,8 +1495,7 @@ _mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
-{
+_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) {
return (__m128)__builtin_ia32_selectps_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C),
@@ -1549,8 +1503,7 @@ _mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
}
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
-{
+_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) {
return (__m128)__builtin_ia32_selectps_128(
(__mmask8)__U,
__builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C),
@@ -1558,8 +1511,7 @@ _mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
-{
+_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) {
return (__m256)__builtin_ia32_selectps_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, -(__v8sf)__C),
@@ -1567,8 +1519,7 @@ _mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
}
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
-{
+_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) {
return (__m256)__builtin_ia32_selectps_256(
(__mmask8)__U,
__builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, -(__v8sf)__C),
``````````
</details>
https://github.com/llvm/llvm-project/pull/156385
More information about the cfe-commits
mailing list