[clang] 96400ae - Recommit "[FPEnv][X86] Platform-specific builtin constrained FP enablement"
Craig Topper via cfe-commits
cfe-commits at lists.llvm.org
Thu Feb 6 16:55:45 PST 2020
Author: Craig Topper
Date: 2020-02-06T16:54:35-08:00
New Revision: 96400ae2a45c5038ebb4f012f90ffc6dfb30369f
URL: https://github.com/llvm/llvm-project/commit/96400ae2a45c5038ebb4f012f90ffc6dfb30369f
DIFF: https://github.com/llvm/llvm-project/commit/96400ae2a45c5038ebb4f012f90ffc6dfb30369f.diff
LOG: Recommit "[FPEnv][X86] Platform-specific builtin constrained FP enablement"
With REQUIRES: x86-register-target added to the tests.
Also remove some unneeded FIXMEs
But add a FIXME for bad IR generation for FMADDSUB/FMSUBADD with
constrained FP.
Original patch by Kevin P. Neal
Added:
clang/test/CodeGen/avx512f-builtins-constrained.c
clang/test/CodeGen/fma-builtins-constrained.c
clang/test/CodeGen/sse-builtins-constrained.c
Modified:
clang/lib/CodeGen/CGBuiltin.cpp
Removed:
################################################################################
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 44947b4b1d64..7e0c53126914 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -10094,8 +10094,14 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
} else {
llvm::Type *Ty = A->getType();
- Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
- Res = CGF.Builder.CreateCall(FMA, {A, B, C} );
+ Function *FMA;
+ if (CGF.Builder.getIsFPConstrained()) {
+ FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
+ Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
+ } else {
+ FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Res = CGF.Builder.CreateCall(FMA, {A, B, C});
+ }
if (IsAddSub) {
// Negate even elts in C using a mask.
@@ -10104,8 +10110,14 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i + (i % 2) * NumElts;
+ // FIXME: This code isn't exception safe for constrained FP. We need to
+ // suppress exceptions on the unselected elements.
Value *NegC = CGF.Builder.CreateFNeg(C);
- Value *FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} );
+ Value *FMSub;
+ if (CGF.Builder.getIsFPConstrained())
+ FMSub = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, NegC} );
+ else
+ FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} );
Res = CGF.Builder.CreateShuffleVector(FMSub, Res, Indices);
}
}
@@ -10164,6 +10176,10 @@ EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
Intrinsic::x86_avx512_vfmadd_f64;
Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
{Ops[0], Ops[1], Ops[2], Ops[4]});
+ } else if (CGF.Builder.getIsFPConstrained()) {
+ Function *FMA = CGF.CGM.getIntrinsic(
+ Intrinsic::experimental_constrained_fma, Ops[0]->getType());
+ Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
} else {
Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
@@ -11892,8 +11908,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_sqrtss:
case X86::BI__builtin_ia32_sqrtsd: {
Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
- A = Builder.CreateCall(F, {A});
+ Function *F;
+ if (Builder.getIsFPConstrained()) {
+ F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
+ A->getType());
+ A = Builder.CreateConstrainedFPCall(F, {A});
+ } else {
+ F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
+ A = Builder.CreateCall(F, {A});
+ }
return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
}
case X86::BI__builtin_ia32_sqrtsd_round_mask:
@@ -11908,8 +11931,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
- A = Builder.CreateCall(F, A);
+ Function *F;
+ if (Builder.getIsFPConstrained()) {
+ F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
+ A->getType());
+ A = Builder.CreateConstrainedFPCall(F, A);
+ } else {
+ F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
+ A = Builder.CreateCall(F, A);
+ }
Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
@@ -11931,8 +11961,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
}
- Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
- return Builder.CreateCall(F, Ops[0]);
+ if (Builder.getIsFPConstrained()) {
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
+ Ops[0]->getType());
+ return Builder.CreateConstrainedFPCall(F, Ops[0]);
+ } else {
+ Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
+ return Builder.CreateCall(F, Ops[0]);
+ }
}
case X86::BI__builtin_ia32_pabsb128:
case X86::BI__builtin_ia32_pabsw128:
diff --git a/clang/test/CodeGen/avx512f-builtins-constrained.c b/clang/test/CodeGen/avx512f-builtins-constrained.c
new file mode 100644
index 000000000000..f4a9697f9ca7
--- /dev/null
+++ b/clang/test/CodeGen/avx512f-builtins-constrained.c
@@ -0,0 +1,127 @@
+// REQUIRES: x86-registered-target
+// RUN: %clang_cc1 -fexperimental-new-pass-manager -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +avx512f -emit-llvm -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
+// RUN: %clang_cc1 -fexperimental-new-pass-manager -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f -emit-llvm -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
+// RUN: %clang_cc1 -fexperimental-new-pass-manager -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +avx512f -ffp-exception-behavior=strict -emit-llvm -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s
+// RUN: %clang_cc1 -fexperimental-new-pass-manager -flax-vector-conversions=none -fms-compatibility -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +avx512f -ffp-exception-behavior=strict -emit-llvm -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s
+// RUN: %clang_cc1 -fexperimental-new-pass-manager -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +avx512f -S -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
+// RUN: %clang_cc1 -fexperimental-new-pass-manager -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +avx512f -ffp-exception-behavior=strict -S -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
+
+#include <immintrin.h>
+
+__m512d test_mm512_sqrt_pd(__m512d a)
+{
+ // COMMON-LABEL: test_mm512_sqrt_pd
+ // UNCONSTRAINED: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}})
+ // CONSTRAINED: call <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vsqrtpd
+ return _mm512_sqrt_pd(a);
+}
+
+__m512d test_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A)
+{
+ // COMMON-LABEL: test_mm512_mask_sqrt_pd
+ // UNCONSTRAINED: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}})
+ // CONSTRAINED: call <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vsqrtpd
+ // COMMONIR: bitcast i8 %{{.*}} to <8 x i1>
+ // COMMONIR: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
+ return _mm512_mask_sqrt_pd (__W,__U,__A);
+}
+
+__m512d test_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A)
+{
+ // COMMON-LABEL: test_mm512_maskz_sqrt_pd
+ // UNCONSTRAINED: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}})
+ // CONSTRAINED: call <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vsqrtpd
+ // COMMONIR: bitcast i8 %{{.*}} to <8 x i1>
+ // COMMONIR: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> {{.*}}
+ return _mm512_maskz_sqrt_pd (__U,__A);
+}
+
+__m512 test_mm512_sqrt_ps(__m512 a)
+{
+ // COMMON-LABEL: test_mm512_sqrt_ps
+ // UNCONSTRAINED: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}})
+ // CONSTRAINED: call <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vsqrtps
+ return _mm512_sqrt_ps(a);
+}
+
+__m512 test_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A)
+{
+ // COMMON-LABEL: test_mm512_mask_sqrt_ps
+ // UNCONSTRAINED: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}})
+ // CONSTRAINED: call <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vsqrtps
+ // COMMONIR: bitcast i16 %{{.*}} to <16 x i1>
+ // COMMONIR: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
+ return _mm512_mask_sqrt_ps( __W, __U, __A);
+}
+
+__m512 test_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A)
+{
+ // COMMON-LABEL: test_mm512_maskz_sqrt_ps
+ // UNCONSTRAINED: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}})
+ // CONSTRAINED: call <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vsqrtps
+ // COMMONIR: bitcast i16 %{{.*}} to <16 x i1>
+ // COMMONIR: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> {{.*}}
+ return _mm512_maskz_sqrt_ps(__U ,__A);
+}
+
+__m128d test_mm_mask_sqrt_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){
+ // COMMON-LABEL: test_mm_mask_sqrt_sd
+ // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
+ // UNCONSTRAINED-NEXT: call double @llvm.sqrt.f64(double %{{.*}})
+ // CONSTRAINED-NEXT: call double @llvm.experimental.constrained.sqrt.f64(double %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vsqrtsd
+ // COMMONIR-NEXT: extractelement <2 x double> %{{.*}}, i64 0
+ // COMMONIR-NEXT: bitcast i8 %{{.*}} to <8 x i1>
+ // COMMONIR-NEXT: extractelement <8 x i1> %{{.*}}, i64 0
+ // COMMONIR-NEXT: select i1 {{.*}}, double {{.*}}, double {{.*}}
+ // COMMONIR-NEXT: insertelement <2 x double> %{{.*}}, double {{.*}}, i64 0
+ return _mm_mask_sqrt_sd(__W,__U,__A,__B);
+}
+
+__m128d test_mm_maskz_sqrt_sd(__mmask8 __U, __m128d __A, __m128d __B){
+ // COMMON-LABEL: test_mm_maskz_sqrt_sd
+ // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
+ // UNCONSTRAINED-NEXT: call double @llvm.sqrt.f64(double %{{.*}})
+ // CONSTRAINED-NEXT: call double @llvm.experimental.constrained.sqrt.f64(double %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vsqrtsd
+ // COMMONIR-NEXT: extractelement <2 x double> %{{.*}}, i64 0
+ // COMMONIR-NEXT: bitcast i8 %{{.*}} to <8 x i1>
+ // COMMONIR-NEXT: extractelement <8 x i1> %{{.*}}, i64 0
+ // COMMONIR-NEXT: select i1 {{.*}}, double {{.*}}, double {{.*}}
+ // COMMONIR-NEXT: insertelement <2 x double> %{{.*}}, double {{.*}}, i64 0
+ return _mm_maskz_sqrt_sd(__U,__A,__B);
+}
+
+__m128 test_mm_mask_sqrt_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){
+ // COMMON-LABEL: test_mm_mask_sqrt_ss
+ // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
+ // UNCONSTRAINED-NEXT: call float @llvm.sqrt.f32(float %{{.*}})
+ // CONSTRAINED-NEXT: call float @llvm.experimental.constrained.sqrt.f32(float %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vsqrtss
+ // COMMONIR-NEXT: extractelement <4 x float> %{{.*}}, i64 0
+ // COMMONIR-NEXT: bitcast i8 %{{.*}} to <8 x i1>
+ // COMMONIR-NEXT: extractelement <8 x i1> %{{.*}}, i64 0
+ // COMMONIR-NEXT: select i1 {{.*}}, float {{.*}}, float {{.*}}
+ // COMMONIR-NEXT: insertelement <4 x float> %{{.*}}, float {{.*}}, i64 0
+ return _mm_mask_sqrt_ss(__W,__U,__A,__B);
+}
+
+__m128 test_mm_maskz_sqrt_ss(__mmask8 __U, __m128 __A, __m128 __B){
+ // COMMON-LABEL: test_mm_maskz_sqrt_ss
+ // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
+ // UNCONSTRAINED-NEXT: call float @llvm.sqrt.f32(float %{{.*}})
+ // CONSTRAINED-NEXT: call float @llvm.experimental.constrained.sqrt.f32(float %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vsqrtss
+ // COMMONIR-NEXT: extractelement <4 x float> %{{.*}}, i64 0
+ // COMMONIR-NEXT: bitcast i8 %{{.*}} to <8 x i1>
+ // COMMONIR-NEXT: extractelement <8 x i1> %{{.*}}, i64 0
+ // COMMONIR-NEXT: select i1 {{.*}}, float {{.*}}, float {{.*}}
+ // COMMONIR-NEXT: insertelement <4 x float> %{{.*}}, float {{.*}}, i64 0
+ return _mm_maskz_sqrt_ss(__U,__A,__B);
+}
diff --git a/clang/test/CodeGen/fma-builtins-constrained.c b/clang/test/CodeGen/fma-builtins-constrained.c
new file mode 100644
index 000000000000..fe5b946fe640
--- /dev/null
+++ b/clang/test/CodeGen/fma-builtins-constrained.c
@@ -0,0 +1,359 @@
+// REQUIRES: x86-registered-target
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +fma -O -emit-llvm -o - | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +fma -ffp-exception-behavior=strict -O -emit-llvm -o - | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +fma -O -S -o - | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM --check-prefix=CHECK-ASM-UNCONSTRAINED %s
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +fma -O -ffp-exception-behavior=strict -S -o - | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM --check-prefix=CHECK-ASM-CONSTRAINED %s
+
+#include <immintrin.h>
+
+__m128 test_mm_fmadd_ps(__m128 a, __m128 b, __m128 c) {
+ // COMMON-LABEL: test_mm_fmadd_ps
+ // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
+ // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfmadd213ps
+ return _mm_fmadd_ps(a, b, c);
+}
+
+__m128d test_mm_fmadd_pd(__m128d a, __m128d b, __m128d c) {
+ // COMMON-LABEL: test_mm_fmadd_pd
+ // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
+ // CONSTRAINED: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfmadd213pd
+ return _mm_fmadd_pd(a, b, c);
+}
+
+__m128 test_mm_fmadd_ss(__m128 a, __m128 b, __m128 c) {
+ // COMMON-LABEL: test_mm_fmadd_ss
+ // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
+ // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
+ // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
+ // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}})
+ // CONSTRAINED: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfmadd213ss
+ // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0
+ return _mm_fmadd_ss(a, b, c);
+}
+
+__m128d test_mm_fmadd_sd(__m128d a, __m128d b, __m128d c) {
+ // COMMON-LABEL: test_mm_fmadd_sd
+ // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
+ // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
+ // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
+ // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}})
+ // CONSTRAINED: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfmadd213sd
+ // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0
+ return _mm_fmadd_sd(a, b, c);
+}
+
+__m128 test_mm_fmsub_ps(__m128 a, __m128 b, __m128 c) {
+ // COMMON-LABEL: test_mm_fmsub_ps
+ // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
+ // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
+ // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfmsub213ps
+ return _mm_fmsub_ps(a, b, c);
+}
+
+__m128d test_mm_fmsub_pd(__m128d a, __m128d b, __m128d c) {
+ // COMMON-LABEL: test_mm_fmsub_pd
+ // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
+ // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
+ // CONSTRAINED: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfmsub213pd
+ return _mm_fmsub_pd(a, b, c);
+}
+
+__m128 test_mm_fmsub_ss(__m128 a, __m128 b, __m128 c) {
+ // COMMON-LABEL: test_mm_fmsub_ss
+ // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
+ // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
+ // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
+ // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
+ // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}})
+ // CONSTRAINED: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfmsub213ss
+ // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0
+ return _mm_fmsub_ss(a, b, c);
+}
+
+__m128d test_mm_fmsub_sd(__m128d a, __m128d b, __m128d c) {
+ // COMMON-LABEL: test_mm_fmsub_sd
+ // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
+ // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
+ // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
+ // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
+ // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}})
+ // CONSTRAINED: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfmsub213sd
+ // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0
+ return _mm_fmsub_sd(a, b, c);
+}
+
+__m128 test_mm_fnmadd_ps(__m128 a, __m128 b, __m128 c) {
+ // COMMON-LABEL: test_mm_fnmadd_ps
+ // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
+ // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
+ // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfnmadd213ps
+ return _mm_fnmadd_ps(a, b, c);
+}
+
+__m128d test_mm_fnmadd_pd(__m128d a, __m128d b, __m128d c) {
+ // COMMON-LABEL: test_mm_fnmadd_pd
+ // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
+ // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
+ // CONSTRAINED: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfnmadd213pd
+ return _mm_fnmadd_pd(a, b, c);
+}
+
+__m128 test_mm_fnmadd_ss(__m128 a, __m128 b, __m128 c) {
+ // COMMON-LABEL: test_mm_fnmadd_ss
+ // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
+ // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
+ // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
+ // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
+ // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}})
+ // CONSTRAINED: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfnmadd213ss
+ // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0
+ return _mm_fnmadd_ss(a, b, c);
+}
+
+__m128d test_mm_fnmadd_sd(__m128d a, __m128d b, __m128d c) {
+ // COMMON-LABEL: test_mm_fnmadd_sd
+ // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
+ // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
+ // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
+ // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
+ // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}})
+ // CONSTRAINED: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfnmadd213sd
+ // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0
+ return _mm_fnmadd_sd(a, b, c);
+}
+
+__m128 test_mm_fnmsub_ps(__m128 a, __m128 b, __m128 c) {
+ // COMMON-LABEL: test_mm_fnmsub_ps
+ // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
+ // COMMONIR: [[NEG2:%.+]] = fneg <4 x float> %{{.+}}
+ // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
+ // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfnmsub213ps
+ return _mm_fnmsub_ps(a, b, c);
+}
+
+__m128d test_mm_fnmsub_pd(__m128d a, __m128d b, __m128d c) {
+ // COMMON-LABEL: test_mm_fnmsub_pd
+ // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
+ // COMMONIR: [[NEG2:%.+]] = fneg <2 x double> %{{.+}}
+ // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
+ // CONSTRAINED: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfnmsub213pd
+ return _mm_fnmsub_pd(a, b, c);
+}
+
+__m128 test_mm_fnmsub_ss(__m128 a, __m128 b, __m128 c) {
+ // COMMON-LABEL: test_mm_fnmsub_ss
+ // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
+ // COMMONIR: [[NEG2:%.+]] = fneg <4 x float> %{{.+}}
+ // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
+ // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
+ // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
+ // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}})
+ // CONSTRAINED: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfnmsub213ss
+ // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0
+ return _mm_fnmsub_ss(a, b, c);
+}
+
+__m128d test_mm_fnmsub_sd(__m128d a, __m128d b, __m128d c) {
+ // COMMON-LABEL: test_mm_fnmsub_sd
+ // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
+ // COMMONIR: [[NEG2:%.+]] = fneg <2 x double> %{{.+}}
+ // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
+ // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
+ // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
+ // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}})
+ // CONSTRAINED: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfnmsub213sd
+ // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0
+ return _mm_fnmsub_sd(a, b, c);
+}
+
+__m128 test_mm_fmaddsub_ps(__m128 a, __m128 b, __m128 c) {
+ // COMMON-LABEL: test_mm_fmaddsub_ps
+ // UNCONSTRAINED: [[ADD:%.+]] = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
+ // CONSTRAINED: [[ADD:%.+]] = tail call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
+ // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
+ // UNCONSTRAINED: [[SUB:%.+]] = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]])
+ // CONSTRAINED: [[SUB:%.+]] = tail call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]], metadata !{{.*}})
+ // CHECK-ASM-UNCONSTRAINED: vfmaddsub213ps
+ // CHECK-ASM-CONSTRAINED-NOT: vfmaddsub213ps
+ // COMMONIR: shufflevector <4 x float> [[SUB]], <4 x float> [[ADD]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ return _mm_fmaddsub_ps(a, b, c);
+}
+
+__m128d test_mm_fmaddsub_pd(__m128d a, __m128d b, __m128d c) {
+ // COMMON-LABEL: test_mm_fmaddsub_pd
+ // UNCONSTRAINED: [[ADD:%.+]] = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
+ // CONSTRAINED: [[ADD:%.+]] = tail call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
+ // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
+ // UNCONSTRAINED: [[SUB:%.+]] = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]])
+ // CONSTRAINED: [[SUB:%.+]] = tail call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]], metadata !{{.*}})
+ // CHECK-ASM-UNCONSTRAINED: vfmaddsub213pd
+ // CHECK-ASM-CONSTRAINED-NOT: vfmaddsub213pd
+ // COMMONIR: shufflevector <2 x double> [[SUB]], <2 x double> [[ADD]], <2 x i32> <i32 0, i32 3>
+ return _mm_fmaddsub_pd(a, b, c);
+}
+
+__m128 test_mm_fmsubadd_ps(__m128 a, __m128 b, __m128 c) {
+ // COMMON-LABEL: test_mm_fmsubadd_ps
+ // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
+ // UNCONSTRAINED: [[SUB:%.+]] = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]]
+ // CONSTRAINED: [[SUB:%.+]] = tail call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]], metadata !{{.*}})
+ // UNCONSTRAINED: [[ADD:%.+]] = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
+ // CONSTRAINED: [[ADD:%.+]] = tail call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM-UNCONSTRAINED: vfmsubadd213ps
+ // CHECK-ASM-CONSTRAINED-NOT: vfmsubadd213ps
+ // COMMONIR: shufflevector <4 x float> [[ADD]], <4 x float> [[SUB]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ return _mm_fmsubadd_ps(a, b, c);
+}
+
+__m128d test_mm_fmsubadd_pd(__m128d a, __m128d b, __m128d c) {
+ // COMMON-LABEL: test_mm_fmsubadd_pd
+ // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
+ // UNCONSTRAINED: [[SUB:%.+]] = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]])
+ // CONSTRAINED: [[SUB:%.+]] = tail call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]], metadata !{{.*}})
+ // UNCONSTRAINED: [[ADD:%.+]] = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
+ // CONSTRAINED: [[ADD:%.+]] = tail call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM-UNCONSTRAINED: vfmsubadd213pd
+ // CHECK-ASM-CONSTRAINED-NOT: vfmsubadd213pd
+ // COMMONIR: shufflevector <2 x double> [[ADD]], <2 x double> [[SUB]], <2 x i32> <i32 0, i32 3>
+ return _mm_fmsubadd_pd(a, b, c);
+}
+
+__m256 test_mm256_fmadd_ps(__m256 a, __m256 b, __m256 c) {
+ // COMMON-LABEL: test_mm256_fmadd_ps
+ // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
+ // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfmadd213ps
+ return _mm256_fmadd_ps(a, b, c);
+}
+
+__m256d test_mm256_fmadd_pd(__m256d a, __m256d b, __m256d c) {
+ // COMMON-LABEL: test_mm256_fmadd_pd
+ // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
+ // CONSTRAINED: call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfmadd213pd
+ return _mm256_fmadd_pd(a, b, c);
+}
+
+__m256 test_mm256_fmsub_ps(__m256 a, __m256 b, __m256 c) {
+ // COMMON-LABEL: test_mm256_fmsub_ps
+ // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
+ // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
+ // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfmsub213ps
+ return _mm256_fmsub_ps(a, b, c);
+}
+
+__m256d test_mm256_fmsub_pd(__m256d a, __m256d b, __m256d c) {
+ // COMMON-LABEL: test_mm256_fmsub_pd
+ // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
+ // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
+ // CONSTRAINED: call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfmsub213pd
+ return _mm256_fmsub_pd(a, b, c);
+}
+
+__m256 test_mm256_fnmadd_ps(__m256 a, __m256 b, __m256 c) {
+ // COMMON-LABEL: test_mm256_fnmadd_ps
+ // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
+ // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
+ // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfnmadd213ps
+ return _mm256_fnmadd_ps(a, b, c);
+}
+
+__m256d test_mm256_fnmadd_pd(__m256d a, __m256d b, __m256d c) {
+ // COMMON-LABEL: test_mm256_fnmadd_pd
+ // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
+ // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
+ // CONSTRAINED: call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfnmadd213pd
+ return _mm256_fnmadd_pd(a, b, c);
+}
+
+__m256 test_mm256_fnmsub_ps(__m256 a, __m256 b, __m256 c) {
+ // COMMON-LABEL: test_mm256_fnmsub_ps
+ // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
+ // COMMONIR: [[NEG2:%.+]] = fneg <8 x float> %{{.*}}
+ // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
+ // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfnmsub213ps
+ return _mm256_fnmsub_ps(a, b, c);
+}
+
+__m256d test_mm256_fnmsub_pd(__m256d a, __m256d b, __m256d c) {
+ // COMMON-LABEL: test_mm256_fnmsub_pd
+ // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
+ // COMMONIR: [[NEG2:%.+]] = fneg <4 x double> %{{.+}}
+ // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
+ // CONSTRAINED: call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM: vfnmsub213pd
+ return _mm256_fnmsub_pd(a, b, c);
+}
+
+__m256 test_mm256_fmaddsub_ps(__m256 a, __m256 b, __m256 c) {
+ // COMMON-LABEL: test_mm256_fmaddsub_ps
+ // UNCONSTRAINED: [[ADD:%.+]] = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
+ // CONSTRAINED: [[ADD:%.+]] = tail call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
+ // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
+ // UNCONSTRAINED: [[SUB:%.+]] = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[NEG]])
+ // CONSTRAINED: [[SUB:%.+]] = tail call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[NEG]], metadata !{{.*}})
+ // CHECK-ASM-UNCONSTRAINED: vfmaddsub213ps
+ // CHECK-ASM-CONSTRAINED-NOT: vfmaddsub213ps
+ // COMMONIR: shufflevector <8 x float> [[SUB]], <8 x float> [[ADD]], <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ return _mm256_fmaddsub_ps(a, b, c);
+}
+
+__m256d test_mm256_fmaddsub_pd(__m256d a, __m256d b, __m256d c) {
+ // COMMON-LABEL: test_mm256_fmaddsub_pd
+ // UNCONSTRAINED: [[ADD:%.+]] = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
+ // CONSTRAINED: [[ADD:%.+]] = tail call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
+ // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
+ // UNCONSTRAINED: [[SUB:%.+]] = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
+ // CONSTRAINED: [[SUB:%.+]] = tail call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM-UNCONSTRAINED: vfmaddsub213pd
+ // CHECK-ASM-CONSTRAINED-NOT: vfmaddsub213pd
+ // COMMONIR: shufflevector <4 x double> [[SUB]], <4 x double> [[ADD]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ return _mm256_fmaddsub_pd(a, b, c);
+}
+
+__m256 test_mm256_fmsubadd_ps(__m256 a, __m256 b, __m256 c) {
+ // COMMON-LABEL: test_mm256_fmsubadd_ps
+ // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
+ // UNCONSTRAINED: [[SUB:%.+]] = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[NEG]])
+ // CONSTRAINED: [[SUB:%.+]] = tail call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[NEG]], metadata !{{.*}})
+ // UNCONSTRAINED: [[ADD:%.+]] = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
+ // CONSTRAINED: [[ADD:%.+]] = tail call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM-UNCONSTRAINED: vfmsubadd213ps
+ // CHECK-ASM-CONSTRAINED-NOT: vfmsubadd213ps
+ // COMMONIR: shufflevector <8 x float> [[ADD]], <8 x float> [[SUB]], <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ return _mm256_fmsubadd_ps(a, b, c);
+}
+
+__m256d test_mm256_fmsubadd_pd(__m256d a, __m256d b, __m256d c) {
+ // COMMON-LABEL: test_mm256_fmsubadd_pd
+ // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
+ // UNCONSTRAINED: [[SUB:%.+]] = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> [[NEG]])
+ // CONSTRAINED: [[SUB:%.+]] = tail call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> [[NEG]], metadata !{{.*}})
+ // UNCONSTRAINED: [[ADD:%.+]] = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
+ // CONSTRAINED: [[ADD:%.+]] = tail call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
+ // CHECK-ASM-UNCONSTRAINED: vfmsubadd213pd
+ // CHECK-ASM-CONSTRAINED-NOT: vfmsubadd213pd
+ // COMMONIR: shufflevector <4 x double> [[ADD]], <4 x double> [[SUB]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ return _mm256_fmsubadd_pd(a, b, c);
+}
diff --git a/clang/test/CodeGen/sse-builtins-constrained.c b/clang/test/CodeGen/sse-builtins-constrained.c
new file mode 100644
index 000000000000..5769fff1a99e
--- /dev/null
+++ b/clang/test/CodeGen/sse-builtins-constrained.c
@@ -0,0 +1,27 @@
+// REQUIRES: x86-registered-target
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +sse -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=UNCONSTRAINED --check-prefix=COMMON --check-prefix=COMMONIR
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +sse -ffp-exception-behavior=strict -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=CONSTRAINED --check-prefix=COMMON --check-prefix=COMMONIR
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +sse -S %s -o - -Wall -Werror | FileCheck %s --check-prefix=CHECK-ASM --check-prefix=COMMON
+// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +sse -ffp-exception-behavior=strict -S %s -o - -Wall -Werror | FileCheck %s --check-prefix=CHECK-ASM --check-prefix=COMMON
+
+
+#include <immintrin.h>
+
+__m128 test_mm_sqrt_ps(__m128 x) {
+ // COMMON-LABEL: test_mm_sqrt_ps
+ // UNCONSTRAINED: call <4 x float> @llvm.sqrt.v4f32(<4 x float> {{.*}})
+ // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float> {{.*}}, metadata !{{.*}})
+ // CHECK-ASM: sqrtps
+ return _mm_sqrt_ps(x);
+}
+
+__m128 test_sqrt_ss(__m128 x) {
+ // COMMON-LABEL: test_sqrt_ss
+ // COMMONIR: extractelement <4 x float> {{.*}}, i64 0
+ // UNCONSTRAINED: call float @llvm.sqrt.f32(float {{.*}})
+ // CONSTRAINED: call float @llvm.experimental.constrained.sqrt.f32(float {{.*}}, metadata !{{.*}})
+ // CHECK-ASM: sqrtss
+ // COMMONIR: insertelement <4 x float> {{.*}}, float {{.*}}, i64 0
+ return _mm_sqrt_ss(x);
+}
+
More information about the cfe-commits
mailing list