[clang] ad0e03f - Revert "[FPEnv][X86] Platform-specific builtin constrained FP enablement"

Craig Topper via cfe-commits cfe-commits at lists.llvm.org
Thu Feb 6 16:56:07 PST 2020


Recommitted at 96400ae2a45c5038ebb4f012f90ffc6dfb30369f

~Craig


On Thu, Feb 6, 2020 at 4:23 PM Craig Topper <craig.topper at gmail.com> wrote:

> You can't generate assembly from a frontend test unless you also put
> "REQUIRES: x86-registered-target" in the test
>
> ~Craig
>
>
> On Thu, Feb 6, 2020 at 4:17 PM Kevin P. Neal via cfe-commits <
> cfe-commits at lists.llvm.org> wrote:
>
>>
>> Author: Kevin P. Neal
>> Date: 2020-02-06T19:17:14-05:00
>> New Revision: ad0e03fd4c8066843f4138e44661ee0287ceb631
>>
>> URL:
>> https://github.com/llvm/llvm-project/commit/ad0e03fd4c8066843f4138e44661ee0287ceb631
>> DIFF:
>> https://github.com/llvm/llvm-project/commit/ad0e03fd4c8066843f4138e44661ee0287ceb631.diff
>>
>> LOG: Revert "[FPEnv][X86] Platform-specific builtin constrained FP
>> enablement"
>>
>> This reverts commit 208470dd5d0a46bc3c24b66489b687eda4954262.
>>
>> Tests fail:
>> error: unable to create target: 'No available targets are compatible with
>> triple "x86_64-apple-darwin"'
>>
>> This happens on clang-hexagon-elf, clang-cmake-armv7-quick, and
>> clang-cmake-armv7-quick bots.
>>
>> If anyone has any suggestions on why then I'm all ears.
>>
>> Differential Revision: https://reviews.llvm.org/D73570
>>
>> Revert "[FPEnv][X86] Speculative fix for failures introduced by
>> eda495426."
>>
>> This reverts commit 80e17e5fcc09dc5baa940022e6988fcb08c5d92d.
>>
>> The speculative fix didn't solve the test failures on Hexagon, ARMv6, and
>> MSVC AArch64.
>>
>> Added:
>>
>>
>> Modified:
>>     clang/lib/CodeGen/CGBuiltin.cpp
>>
>> Removed:
>>     clang/test/CodeGen/avx512f-builtins-constrained.c
>>     clang/test/CodeGen/fma-builtins-constrained.c
>>     clang/test/CodeGen/sse-builtins-constrained.c
>>
>>
>>
>> ################################################################################
>> diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp
>> b/clang/lib/CodeGen/CGBuiltin.cpp
>> index ca41413ae278..44947b4b1d64 100644
>> --- a/clang/lib/CodeGen/CGBuiltin.cpp
>> +++ b/clang/lib/CodeGen/CGBuiltin.cpp
>> @@ -10094,14 +10094,8 @@ static Value *EmitX86FMAExpr(CodeGenFunction
>> &CGF, ArrayRef<Value *> Ops,
>>      Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
>>    } else {
>>      llvm::Type *Ty = A->getType();
>> -    Function *FMA;
>> -    if (CGF.Builder.getIsFPConstrained()) {
>> -      FMA =
>> CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
>> -      Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
>> -    } else {
>> -      FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
>> -      Res = CGF.Builder.CreateCall(FMA, {A, B, C});
>> -    }
>> +    Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
>> +    Res = CGF.Builder.CreateCall(FMA, {A, B, C} );
>>
>>      if (IsAddSub) {
>>        // Negate even elts in C using a mask.
>> @@ -10111,11 +10105,7 @@ static Value *EmitX86FMAExpr(CodeGenFunction
>> &CGF, ArrayRef<Value *> Ops,
>>          Indices[i] = i + (i % 2) * NumElts;
>>
>>        Value *NegC = CGF.Builder.CreateFNeg(C);
>> -      Value *FMSub;
>> -      if (CGF.Builder.getIsFPConstrained())
>> -        FMSub = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, NegC} );
>> -      else
>> -        FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} );
>> +      Value *FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} );
>>        Res = CGF.Builder.CreateShuffleVector(FMSub, Res, Indices);
>>      }
>>    }
>> @@ -10174,10 +10164,6 @@ EmitScalarFMAExpr(CodeGenFunction &CGF,
>> MutableArrayRef<Value *> Ops,
>>                          Intrinsic::x86_avx512_vfmadd_f64;
>>      Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
>>                                   {Ops[0], Ops[1], Ops[2], Ops[4]});
>> -  } else if (CGF.Builder.getIsFPConstrained()) {
>> -    Function *FMA = CGF.CGM.getIntrinsic(
>> -        Intrinsic::experimental_constrained_fma, Ops[0]->getType());
>> -    Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
>>    } else {
>>      Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma,
>> Ops[0]->getType());
>>      Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
>> @@ -11906,15 +11892,8 @@ Value
>> *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
>>    case X86::BI__builtin_ia32_sqrtss:
>>    case X86::BI__builtin_ia32_sqrtsd: {
>>      Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
>> -    Function *F;
>> -    if (Builder.getIsFPConstrained()) {
>> -      F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
>> -                           A->getType());
>> -      A = Builder.CreateConstrainedFPCall(F, {A});
>> -    } else {
>> -      F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
>> -      A = Builder.CreateCall(F, {A});
>> -    }
>> +    Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
>> +    A = Builder.CreateCall(F, {A});
>>      return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
>>    }
>>    case X86::BI__builtin_ia32_sqrtsd_round_mask:
>> @@ -11929,15 +11908,8 @@ Value
>> *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
>>        return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
>>      }
>>      Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
>> -    Function *F;
>> -    if (Builder.getIsFPConstrained()) {
>> -      F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
>> -                           A->getType());
>> -      A = Builder.CreateConstrainedFPCall(F, A);
>> -    } else {
>> -      F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
>> -      A = Builder.CreateCall(F, A);
>> -    }
>> +    Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
>> +    A = Builder.CreateCall(F, A);
>>      Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
>>      A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
>>      return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
>> @@ -11959,14 +11931,8 @@ Value
>> *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
>>          return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
>>        }
>>      }
>> -    if (Builder.getIsFPConstrained()) {
>> -      Function *F =
>> CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
>> -                                     Ops[0]->getType());
>> -      return Builder.CreateConstrainedFPCall(F, Ops[0]);
>> -    } else {
>> -      Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
>> -      return Builder.CreateCall(F, Ops[0]);
>> -    }
>> +    Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
>> +    return Builder.CreateCall(F, Ops[0]);
>>    }
>>    case X86::BI__builtin_ia32_pabsb128:
>>    case X86::BI__builtin_ia32_pabsw128:
>>
>> diff  --git a/clang/test/CodeGen/avx512f-builtins-constrained.c
>> b/clang/test/CodeGen/avx512f-builtins-constrained.c
>> deleted file mode 100644
>> index 75fad73d60f6..000000000000
>> --- a/clang/test/CodeGen/avx512f-builtins-constrained.c
>> +++ /dev/null
>> @@ -1,126 +0,0 @@
>> -// RUN: %clang_cc1 -fexperimental-new-pass-manager
>> -flax-vector-conversions=none -ffreestanding %s
>> -triple=x86_64-unknown-linux-gnu -target-feature +avx512f -emit-llvm -o -
>> -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR
>> --check-prefix=UNCONSTRAINED %s
>> -// RUN: %clang_cc1 -fexperimental-new-pass-manager
>> -flax-vector-conversions=none -fms-extensions -fms-compatibility
>> -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f
>> -emit-llvm -o - -Wall -Werror | FileCheck --check-prefix=COMMON
>> --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
>> -// RUN: %clang_cc1 -fexperimental-new-pass-manager
>> -flax-vector-conversions=none -ffreestanding %s
>> -triple=x86_64-unknown-linux-gnu -target-feature +avx512f
>> -ffp-exception-behavior=strict -emit-llvm -o - -Wall -Werror | FileCheck
>> --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s
>> -// RUN: %clang_cc1 -fexperimental-new-pass-manager
>> -flax-vector-conversions=none -fms-compatibility -ffreestanding %s
>> -triple=x86_64-unknown-linux-gnu -target-feature +avx512f
>> -ffp-exception-behavior=strict -emit-llvm -o - -Wall -Werror | FileCheck
>> --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s
>> -// RUN: %clang_cc1 -fexperimental-new-pass-manager
>> -flax-vector-conversions=none -ffreestanding %s
>> -triple=x86_64-unknown-linux-gnu -target-feature +avx512f -S -o - -Wall
>> -Werror | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s
>> -// RUN: %clang_cc1 -fexperimental-new-pass-manager
>> -flax-vector-conversions=none -ffreestanding %s
>> -triple=x86_64-unknown-linux-gnu -target-feature +avx512f
>> -ffp-exception-behavior=strict -S -o - -Wall -Werror | FileCheck
>> --check-prefix=COMMON --check-prefix=CHECK-ASM %s
>> -
>> -#include <immintrin.h>
>> -
>> -__m512d test_mm512_sqrt_pd(__m512d a)
>> -{
>> -  // COMMON-LABEL: test_mm512_sqrt_pd
>> -  // UNCONSTRAINED: call <8 x double> @llvm.sqrt.v8f64(<8 x double>
>> %{{.*}})
>> -  // CONSTRAINED: call <8 x double>
>> @llvm.experimental.constrained.sqrt.v8f64(<8 x double> %{{.*}}, metadata
>> !{{.*}})
>> -  // CHECK-ASM: vsqrtpd
>> -  return _mm512_sqrt_pd(a);
>> -}
>> -
>> -__m512d test_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A)
>> -{
>> -  // COMMON-LABEL: test_mm512_mask_sqrt_pd
>> -  // UNCONSTRAINED: call <8 x double> @llvm.sqrt.v8f64(<8 x double>
>> %{{.*}})
>> -  // CONSTRAINED: call <8 x double>
>> @llvm.experimental.constrained.sqrt.v8f64(<8 x double> %{{.*}}, metadata
>> !{{.*}})
>> -  // CHECK-ASM: vsqrtpd
>> -  // COMMONIR: bitcast i8 %{{.*}} to <8 x i1>
>> -  // COMMONIR: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x
>> double> %{{.*}}
>> -  return _mm512_mask_sqrt_pd (__W,__U,__A);
>> -}
>> -
>> -__m512d test_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A)
>> -{
>> -  // COMMON-LABEL: test_mm512_maskz_sqrt_pd
>> -  // UNCONSTRAINED: call <8 x double> @llvm.sqrt.v8f64(<8 x double>
>> %{{.*}})
>> -  // CONSTRAINED: call <8 x double>
>> @llvm.experimental.constrained.sqrt.v8f64(<8 x double> %{{.*}}, metadata
>> !{{.*}})
>> -  // CHECK-ASM: vsqrtpd
>> -  // COMMONIR: bitcast i8 %{{.*}} to <8 x i1>
>> -  // COMMONIR: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x
>> double> {{.*}}
>> -  return _mm512_maskz_sqrt_pd (__U,__A);
>> -}
>> -
>> -__m512 test_mm512_sqrt_ps(__m512 a)
>> -{
>> -  // COMMON-LABEL: test_mm512_sqrt_ps
>> -  // UNCONSTRAINED: call <16 x float> @llvm.sqrt.v16f32(<16 x float>
>> %{{.*}})
>> -  // CONSTRAINED: call <16 x float>
>> @llvm.experimental.constrained.sqrt.v16f32(<16 x float> %{{.*}}, metadata
>> !{{.*}})
>> -  // CHECK-ASM: vsqrtps
>> -  return _mm512_sqrt_ps(a);
>> -}
>> -
>> -__m512 test_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A)
>> -{
>> -  // COMMON-LABEL: test_mm512_mask_sqrt_ps
>> -  // UNCONSTRAINED: call <16 x float> @llvm.sqrt.v16f32(<16 x float>
>> %{{.*}})
>> -  // CONSTRAINED: call <16 x float>
>> @llvm.experimental.constrained.sqrt.v16f32(<16 x float> %{{.*}}, metadata
>> !{{.*}})
>> -  // CHECK-ASM: vsqrtps
>> -  // COMMONIR: bitcast i16 %{{.*}} to <16 x i1>
>> -  // COMMONIR: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x
>> float> %{{.*}}
>> -  return _mm512_mask_sqrt_ps( __W, __U, __A);
>> -}
>> -
>> -__m512 test_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A)
>> -{
>> -  // COMMON-LABEL: test_mm512_maskz_sqrt_ps
>> -  // UNCONSTRAINED: call <16 x float> @llvm.sqrt.v16f32(<16 x float>
>> %{{.*}})
>> -  // CONSTRAINED: call <16 x float>
>> @llvm.experimental.constrained.sqrt.v16f32(<16 x float> %{{.*}}, metadata
>> !{{.*}})
>> -  // CHECK-ASM: vsqrtps
>> -  // COMMONIR: bitcast i16 %{{.*}} to <16 x i1>
>> -  // COMMONIR: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x
>> float> {{.*}}
>> -  return _mm512_maskz_sqrt_ps(__U ,__A);
>> -}
>> -
>> -__m128d test_mm_mask_sqrt_sd(__m128d __W, __mmask8 __U, __m128d __A,
>> __m128d __B){
>> -  // COMMON-LABEL: test_mm_mask_sqrt_sd
>> -  // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
>> -  // UNCONSTRAINED-NEXT: call double @llvm.sqrt.f64(double %{{.*}})
>> -  // CONSTRAINED-NEXT: call double
>> @llvm.experimental.constrained.sqrt.f64(double %{{.*}}, metadata !{{.*}})
>> -  // CHECK-ASM: vsqrtsd
>> -  // COMMONIR-NEXT: extractelement <2 x double> %{{.*}}, i64 0
>> -  // COMMONIR-NEXT: bitcast i8 %{{.*}} to <8 x i1>
>> -  // COMMONIR-NEXT: extractelement <8 x i1> %{{.*}}, i64 0
>> -  // COMMONIR-NEXT: select i1 {{.*}}, double {{.*}}, double {{.*}}
>> -  // COMMONIR-NEXT: insertelement <2 x double> %{{.*}}, double {{.*}},
>> i64 0
>> -  return _mm_mask_sqrt_sd(__W,__U,__A,__B);
>> -}
>> -
>> -__m128d test_mm_maskz_sqrt_sd(__mmask8 __U, __m128d __A, __m128d __B){
>> -  // COMMON-LABEL: test_mm_maskz_sqrt_sd
>> -  // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
>> -  // UNCONSTRAINED-NEXT: call double @llvm.sqrt.f64(double %{{.*}})
>> -  // CONSTRAINED-NEXT: call double
>> @llvm.experimental.constrained.sqrt.f64(double %{{.*}}, metadata !{{.*}})
>> -  // CHECK-ASM: vsqrtsd
>> -  // COMMONIR-NEXT: extractelement <2 x double> %{{.*}}, i64 0
>> -  // COMMONIR-NEXT: bitcast i8 %{{.*}} to <8 x i1>
>> -  // COMMONIR-NEXT: extractelement <8 x i1> %{{.*}}, i64 0
>> -  // COMMONIR-NEXT: select i1 {{.*}}, double {{.*}}, double {{.*}}
>> -  // COMMONIR-NEXT: insertelement <2 x double> %{{.*}}, double {{.*}},
>> i64 0
>> -  return _mm_maskz_sqrt_sd(__U,__A,__B);
>> -}
>> -
>> -__m128 test_mm_mask_sqrt_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128
>> __B){
>> -  // COMMON-LABEL: test_mm_mask_sqrt_ss
>> -  // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
>> -  // UNCONSTRAINED-NEXT: call float @llvm.sqrt.f32(float %{{.*}})
>> -  // CONSTRAINED-NEXT: call float
>> @llvm.experimental.constrained.sqrt.f32(float %{{.*}}, metadata !{{.*}})
>> -  // CHECK-ASM: vsqrtss
>> -  // COMMONIR-NEXT: extractelement <4 x float> %{{.*}}, i64 0
>> -  // COMMONIR-NEXT: bitcast i8 %{{.*}} to <8 x i1>
>> -  // COMMONIR-NEXT: extractelement <8 x i1> %{{.*}}, i64 0
>> -  // COMMONIR-NEXT: select i1 {{.*}}, float {{.*}}, float {{.*}}
>> -  // COMMONIR-NEXT: insertelement <4 x float> %{{.*}}, float {{.*}}, i64
>> 0
>> -  return _mm_mask_sqrt_ss(__W,__U,__A,__B);
>> -}
>> -
>> -__m128 test_mm_maskz_sqrt_ss(__mmask8 __U, __m128 __A, __m128 __B){
>> -  // COMMON-LABEL: test_mm_maskz_sqrt_ss
>> -  // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
>> -  // UNCONSTRAINED-NEXT: call float @llvm.sqrt.f32(float %{{.*}})
>> -  // CONSTRAINED-NEXT: call float
>> @llvm.experimental.constrained.sqrt.f32(float %{{.*}}, metadata !{{.*}})
>> -  // CHECK-ASM: vsqrtss
>> -  // COMMONIR-NEXT: extractelement <4 x float> %{{.*}}, i64 0
>> -  // COMMONIR-NEXT: bitcast i8 %{{.*}} to <8 x i1>
>> -  // COMMONIR-NEXT: extractelement <8 x i1> %{{.*}}, i64 0
>> -  // COMMONIR-NEXT: select i1 {{.*}}, float {{.*}}, float {{.*}}
>> -  // COMMONIR-NEXT: insertelement <4 x float> %{{.*}}, float {{.*}}, i64
>> 0
>> -  return _mm_maskz_sqrt_ss(__U,__A,__B);
>> -}
>>
>> diff  --git a/clang/test/CodeGen/fma-builtins-constrained.c
>> b/clang/test/CodeGen/fma-builtins-constrained.c
>> deleted file mode 100644
>> index 8a3ce821a568..000000000000
>> --- a/clang/test/CodeGen/fma-builtins-constrained.c
>> +++ /dev/null
>> @@ -1,352 +0,0 @@
>> -// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu
>> -target-feature +fma -O -emit-llvm -o - | FileCheck --check-prefix=COMMON
>> --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
>> -// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu
>> -target-feature +fma -ffp-exception-behavior=strict -O -emit-llvm -o - |
>> FileCheck --check-prefix=COMMON --check-prefix=COMMONIR
>> --check-prefix=CONSTRAINED %s
>> -// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu
>> -target-feature +fma -O -S -o - | FileCheck --check-prefix=COMMON
>> --check-prefix=CHECK-ASM %s
>> -// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu
>> -target-feature +fma -O -ffp-exception-behavior=strict -S -o - | FileCheck
>> --check-prefix=COMMON --check-prefix=CHECK-ASM %s
>> -
>> -// FIXME: Several of these tests are broken when constrained.
>> -
>> -#include <immintrin.h>
>> -
>> -__m128 test_mm_fmadd_ps(__m128 a, __m128 b, __m128 c) {
>> -  // COMMON-LABEL: test_mm_fmadd_ps
>> -  // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float>
>> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
>> -  // CONSTRAINED: call <4 x float>
>> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float>
>> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
>> -  // CHECK-ASM: vfmadd213ps
>> -  return _mm_fmadd_ps(a, b, c);
>> -}
>> -
>> -__m128d test_mm_fmadd_pd(__m128d a, __m128d b, __m128d c) {
>> -  // COMMON-LABEL: test_mm_fmadd_pd
>> -  // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double>
>> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
>> -  // CONSTRAINED: call <2 x double>
>> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double>
>> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
>> -  // CHECK-ASM: vfmadd213pd
>> -  return _mm_fmadd_pd(a, b, c);
>> -}
>> -
>> -__m128 test_mm_fmadd_ss(__m128 a, __m128 b, __m128 c) {
>> -  // COMMON-LABEL: test_mm_fmadd_ss
>> -  // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
>> -  // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float
>> %{{.*}}, float %{{.*}})
>> -  // CONSTRAINED: call float
>> @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float
>> %{{.*}}, metadata !{{.*}})
>> -  // CHECK-ASM: vfmadd213ss
>> -  // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0
>> -  return _mm_fmadd_ss(a, b, c);
>> -}
>> -
>> -__m128d test_mm_fmadd_sd(__m128d a, __m128d b, __m128d c) {
>> -  // COMMON-LABEL: test_mm_fmadd_sd
>> -  // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
>> -  // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double
>> %{{.*}}, double %{{.*}})
>> -  // CONSTRAINED: call double
>> @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}},
>> double %{{.*}}, metadata !{{.*}})
>> -  // CHECK-ASM: vfmadd213sd
>> -  // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0
>> -  return _mm_fmadd_sd(a, b, c);
>> -}
>> -
>> -__m128 test_mm_fmsub_ps(__m128 a, __m128 b, __m128 c) {
>> -  // COMMON-LABEL: test_mm_fmsub_ps
>> -  // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
>> -  // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float>
>> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
>> -  // CONSTRAINED: call <4 x float>
>> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float>
>> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfmsub213ps
>> -  return _mm_fmsub_ps(a, b, c);
>> -}
>> -
>> -__m128d test_mm_fmsub_pd(__m128d a, __m128d b, __m128d c) {
>> -  // COMMON-LABEL: test_mm_fmsub_pd
>> -  // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
>> -  // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double>
>> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
>> -  // CONSTRAINED: call <2 x double>
>> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double>
>> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfmsub213pd
>> -  return _mm_fmsub_pd(a, b, c);
>> -}
>> -
>> -__m128 test_mm_fmsub_ss(__m128 a, __m128 b, __m128 c) {
>> -  // COMMON-LABEL: test_mm_fmsub_ss
>> -  // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
>> -  // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
>> -  // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float
>> %{{.*}}, float %{{.*}})
>> -  // CONSTRAINED: call float
>> @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float
>> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfmsub213ss
>> -  // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0
>> -  return _mm_fmsub_ss(a, b, c);
>> -}
>> -
>> -__m128d test_mm_fmsub_sd(__m128d a, __m128d b, __m128d c) {
>> -  // COMMON-LABEL: test_mm_fmsub_sd
>> -  // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
>> -  // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
>> -  // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double
>> %{{.*}}, double %{{.*}})
>> -  // CONSTRAINED: call double
>> @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}},
>> double %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfmsub213sd
>> -  // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0
>> -  return _mm_fmsub_sd(a, b, c);
>> -}
>> -
>> -__m128 test_mm_fnmadd_ps(__m128 a, __m128 b, __m128 c) {
>> -  // COMMON-LABEL: test_mm_fnmadd_ps
>> -  // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
>> -  // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float>
>> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
>> -  // CONSTRAINED: call <4 x float>
>> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float>
>> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfnmadd213ps
>> -  return _mm_fnmadd_ps(a, b, c);
>> -}
>> -
>> -__m128d test_mm_fnmadd_pd(__m128d a, __m128d b, __m128d c) {
>> -  // COMMON-LABEL: test_mm_fnmadd_pd
>> -  // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
>> -  // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double>
>> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
>> -  // CONSTRAINED: call <2 x double>
>> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double>
>> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfnmadd213pd
>> -  return _mm_fnmadd_pd(a, b, c);
>> -}
>> -
>> -__m128 test_mm_fnmadd_ss(__m128 a, __m128 b, __m128 c) {
>> -  // COMMON-LABEL: test_mm_fnmadd_ss
>> -  // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
>> -  // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
>> -  // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float
>> %{{.*}}, float %{{.*}})
>> -  // CONSTRAINED: call float
>> @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float
>> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfnmadd213ss
>> -  // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0
>> -  return _mm_fnmadd_ss(a, b, c);
>> -}
>> -
>> -__m128d test_mm_fnmadd_sd(__m128d a, __m128d b, __m128d c) {
>> -  // COMMON-LABEL: test_mm_fnmadd_sd
>> -  // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
>> -  // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
>> -  // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double
>> %{{.*}}, double %{{.*}})
>> -  // CONSTRAINED: call double
>> @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}},
>> double %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfnmadd213sd
>> -  // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0
>> -  return _mm_fnmadd_sd(a, b, c);
>> -}
>> -
>> -__m128 test_mm_fnmsub_ps(__m128 a, __m128 b, __m128 c) {
>> -  // COMMON-LABEL: test_mm_fnmsub_ps
>> -  // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
>> -  // COMMONIR: [[NEG2:%.+]] = fneg <4 x float> %{{.+}}
>> -  // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float>
>> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
>> -  // CONSTRAINED: call <4 x float>
>> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float>
>> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfnmsub213ps
>> -  return _mm_fnmsub_ps(a, b, c);
>> -}
>> -
>> -__m128d test_mm_fnmsub_pd(__m128d a, __m128d b, __m128d c) {
>> -  // COMMON-LABEL: test_mm_fnmsub_pd
>> -  // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
>> -  // COMMONIR: [[NEG2:%.+]] = fneg <2 x double> %{{.+}}
>> -  // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double>
>> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
>> -  // CONSTRAINED: call <2 x double>
>> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double>
>> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfnmsub213pd
>> -  return _mm_fnmsub_pd(a, b, c);
>> -}
>> -
>> -__m128 test_mm_fnmsub_ss(__m128 a, __m128 b, __m128 c) {
>> -  // COMMON-LABEL: test_mm_fnmsub_ss
>> -  // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
>> -  // COMMONIR: [[NEG2:%.+]] = fneg <4 x float> %{{.+}}
>> -  // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
>> -  // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float
>> %{{.*}}, float %{{.*}})
>> -  // CONSTRAINED: call float
>> @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float
>> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfnmsub213ss
>> -  // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0
>> -  return _mm_fnmsub_ss(a, b, c);
>> -}
>> -
>> -__m128d test_mm_fnmsub_sd(__m128d a, __m128d b, __m128d c) {
>> -  // COMMON-LABEL: test_mm_fnmsub_sd
>> -  // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
>> -  // COMMONIR: [[NEG2:%.+]] = fneg <2 x double> %{{.+}}
>> -  // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
>> -  // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
>> -  // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double
>> %{{.*}}, double %{{.*}})
>> -  // CONSTRAINED: call double
>> @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}},
>> double %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfnmsub213sd
>> -  // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0
>> -  return _mm_fnmsub_sd(a, b, c);
>> -}
>> -
>> -__m128 test_mm_fmaddsub_ps(__m128 a, __m128 b, __m128 c) {
>> -  // COMMON-LABEL: test_mm_fmaddsub_ps
>> -  // UNCONSTRAINED: [[ADD:%.+]] = tail call <4 x float>
>> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float>
>> %{{.*}})
>> -  // CONSTRAINED: [[ADD:%.+]] = tail call <4 x float>
>> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float>
>> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
>> -  // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
>> -  // UNCONSTRAINED: [[SUB:%.+]] = tail call <4 x float>
>> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float>
>> [[NEG]])
>> -  // CONSTRAINED: [[SUB:%.+]] = tail call <4 x float>
>> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float>
>> %{{.*}}, <4 x float> [[NEG]], metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfmaddsub213ps
>> -  // COMMONIR: shufflevector <4 x float> [[SUB]], <4 x float> [[ADD]],
>> <4 x i32> <i32 0, i32 5, i32 2, i32 7>
>> -  return _mm_fmaddsub_ps(a, b, c);
>> -}
>> -
>> -__m128d test_mm_fmaddsub_pd(__m128d a, __m128d b, __m128d c) {
>> -  // COMMON-LABEL: test_mm_fmaddsub_pd
>> -  // UNCONSTRAINED: [[ADD:%.+]] = tail call <2 x double>
>> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double>
>> %{{.*}})
>> -  // CONSTRAINED: [[ADD:%.+]] = tail call <2 x double>
>> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double>
>> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
>> -  // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
>> -  // UNCONSTRAINED: [[SUB:%.+]] = tail call <2 x double>
>> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double>
>> [[NEG]])
>> -  // CONSTRAINED: [[SUB:%.+]] = tail call <2 x double>
>> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double>
>> %{{.*}}, <2 x double> [[NEG]], metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfmaddsub213pd
>> -  // COMMONIR: shufflevector <2 x double> [[SUB]], <2 x double> [[ADD]],
>> <2 x i32> <i32 0, i32 3>
>> -  return _mm_fmaddsub_pd(a, b, c);
>> -}
>> -
>> -__m128 test_mm_fmsubadd_ps(__m128 a, __m128 b, __m128 c) {
>> -  // COMMON-LABEL: test_mm_fmsubadd_ps
>> -  // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
>> -  // UNCONSTRAINED: [[SUB:%.+]] = tail call <4 x float>
>> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float>
>> [[NEG]]
>> -  // CONSTRAINED: [[SUB:%.+]] = tail call <4 x float>
>> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float>
>> %{{.*}}, <4 x float> [[NEG]], metadata !{{.*}})
>> -  // UNCONSTRAINED: [[ADD:%.+]] = tail call <4 x float>
>> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float>
>> %{{.*}})
>> -  // CONSTRAINED: [[ADD:%.+]] = tail call <4 x float>
>> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float>
>> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfmsubadd213ps
>> -  // COMMONIR: shufflevector <4 x float> [[ADD]], <4 x float> [[SUB]],
>> <4 x i32> <i32 0, i32 5, i32 2, i32 7>
>> -  return _mm_fmsubadd_ps(a, b, c);
>> -}
>> -
>> -__m128d test_mm_fmsubadd_pd(__m128d a, __m128d b, __m128d c) {
>> -  // COMMON-LABEL: test_mm_fmsubadd_pd
>> -  // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
>> -  // UNCONSTRAINED: [[SUB:%.+]] = tail call <2 x double>
>> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double>
>> [[NEG]])
>> -  // CONSTRAINED: [[SUB:%.+]] = tail call <2 x double>
>> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double>
>> %{{.*}}, <2 x double> [[NEG]], metadata !{{.*}})
>> -  // UNCONSTRAINED: [[ADD:%.+]] = tail call <2 x double>
>> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double>
>> %{{.*}})
>> -  // CONSTRAINED: [[ADD:%.+]] = tail call <2 x double>
>> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double>
>> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfmsubadd213pd
>> -  // COMMONIR: shufflevector <2 x double> [[ADD]], <2 x double> [[SUB]],
>> <2 x i32> <i32 0, i32 3>
>> -  return _mm_fmsubadd_pd(a, b, c);
>> -}
>> -
>> -__m256 test_mm256_fmadd_ps(__m256 a, __m256 b, __m256 c) {
>> -  // COMMON-LABEL: test_mm256_fmadd_ps
>> -  // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float>
>> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
>> -  // CONSTRAINED: call <8 x float>
>> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float>
>> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}})
>> -  // CHECK-ASM: vfmadd213ps
>> -  return _mm256_fmadd_ps(a, b, c);
>> -}
>> -
>> -__m256d test_mm256_fmadd_pd(__m256d a, __m256d b, __m256d c) {
>> -  // COMMON-LABEL: test_mm256_fmadd_pd
>> -  // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double>
>> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
>> -  // CONSTRAINED: call <4 x double>
>> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double>
>> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
>> -  // CHECK-ASM: vfmadd213pd
>> -  return _mm256_fmadd_pd(a, b, c);
>> -}
>> -
>> -__m256 test_mm256_fmsub_ps(__m256 a, __m256 b, __m256 c) {
>> -  // COMMON-LABEL: test_mm256_fmsub_ps
>> -  // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
>> -  // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float>
>> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
>> -  // CONSTRAINED: call <8 x float>
>> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float>
>> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfmsub213ps
>> -  return _mm256_fmsub_ps(a, b, c);
>> -}
>> -
>> -__m256d test_mm256_fmsub_pd(__m256d a, __m256d b, __m256d c) {
>> -  // COMMON-LABEL: test_mm256_fmsub_pd
>> -  // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
>> -  // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double>
>> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
>> -  // CONSTRAINED: call <4 x double>
>> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double>
>> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfmsub213pd
>> -  return _mm256_fmsub_pd(a, b, c);
>> -}
>> -
>> -__m256 test_mm256_fnmadd_ps(__m256 a, __m256 b, __m256 c) {
>> -  // COMMON-LABEL: test_mm256_fnmadd_ps
>> -  // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
>> -  // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float>
>> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
>> -  // CONSTRAINED: call <8 x float>
>> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float>
>> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfnmadd213ps
>> -  return _mm256_fnmadd_ps(a, b, c);
>> -}
>> -
>> -__m256d test_mm256_fnmadd_pd(__m256d a, __m256d b, __m256d c) {
>> -  // COMMON-LABEL: test_mm256_fnmadd_pd
>> -  // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
>> -  // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double>
>> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
>> -  // CONSTRAINED: call <4 x double>
>> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double>
>> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfnmadd213pd
>> -  return _mm256_fnmadd_pd(a, b, c);
>> -}
>> -
>> -__m256 test_mm256_fnmsub_ps(__m256 a, __m256 b, __m256 c) {
>> -  // COMMON-LABEL: test_mm256_fnmsub_ps
>> -  // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
>> -  // COMMONIR: [[NEG2:%.+]] = fneg <8 x float> %{{.*}}
>> -  // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float>
>> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
>> -  // CONSTRAINED: call <8 x float>
>> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float>
>> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfnmsub213ps
>> -  return _mm256_fnmsub_ps(a, b, c);
>> -}
>> -
>> -__m256d test_mm256_fnmsub_pd(__m256d a, __m256d b, __m256d c) {
>> -  // COMMON-LABEL: test_mm256_fnmsub_pd
>> -  // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
>> -  // COMMONIR: [[NEG2:%.+]] = fneg <4 x double> %{{.+}}
>> -  // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double>
>> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
>> -  // CONSTRAINED: call <4 x double>
>> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double>
>> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfnmsub213pd
>> -  return _mm256_fnmsub_pd(a, b, c);
>> -}
>> -
>> -__m256 test_mm256_fmaddsub_ps(__m256 a, __m256 b, __m256 c) {
>> -  // COMMON-LABEL: test_mm256_fmaddsub_ps
>> -  // UNCONSTRAINED: [[ADD:%.+]] = tail call <8 x float>
>> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float>
>> %{{.*}})
>> -  // CONSTRAINED: [[ADD:%.+]] = tail call <8 x float>
>> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float>
>> %{{.*}}, <8 x float> %{{.*}})
>> -  // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
>> -  // UNCONSTRAINED: [[SUB:%.+]] = tail call <8 x float>
>> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float>
>> [[NEG]])
>> -  // CONSTRAINED: [[SUB:%.+]] = tail call <8 x float>
>> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float>
>> %{{.*}}, <8 x float> [[NEG]], metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfmaddsub213ps
>> -  // COMMONIR: shufflevector <8 x float> [[SUB]], <8 x float> [[ADD]],
>> <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
>> -  return _mm256_fmaddsub_ps(a, b, c);
>> -}
>> -
>> -__m256d test_mm256_fmaddsub_pd(__m256d a, __m256d b, __m256d c) {
>> -  // COMMON-LABEL: test_mm256_fmaddsub_pd
>> -  // UNCONSTRAINED: [[ADD:%.+]] = tail call <4 x double>
>> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double>
>> %{{.*}})
>> -  // CONSTRAINED: [[ADD:%.+]] = tail call <4 x double>
>> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double>
>> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
>> -  // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
>> -  // UNCONSTRAINED: [[SUB:%.+]] = tail call <4 x double>
>> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double>
>> %{{.*}})
>> -  // CONSTRAINED: [[SUB:%.+]] = tail call <4 x double>
>> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double>
>> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfmaddsub213pd
>> -  // COMMONIR: shufflevector <4 x double> [[SUB]], <4 x double> [[ADD]],
>> <4 x i32> <i32 0, i32 5, i32 2, i32 7>
>> -  return _mm256_fmaddsub_pd(a, b, c);
>> -}
>> -
>> -__m256 test_mm256_fmsubadd_ps(__m256 a, __m256 b, __m256 c) {
>> -  // COMMON-LABEL: test_mm256_fmsubadd_ps
>> -  // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
>> -  // UNCONSTRAINED: [[SUB:%.+]] = tail call <8 x float>
>> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float>
>> [[NEG]])
>> -  // CONSTRAINED: [[SUB:%.+]] = tail call <8 x float>
>> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float>
>> %{{.*}}, <8 x float> [[NEG]], metadata !{{.*}})
>> -  // UNCONSTRAINED: [[ADD:%.+]] = tail call <8 x float>
>> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float>
>> %{{.*}})
>> -  // CONSTRAINED: [[ADD:%.+]] = tail call <8 x float>
>> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float>
>> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfmsubadd213ps
>> -  // COMMONIR: shufflevector <8 x float> [[ADD]], <8 x float> [[SUB]],
>> <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
>> -  return _mm256_fmsubadd_ps(a, b, c);
>> -}
>> -
>> -__m256d test_mm256_fmsubadd_pd(__m256d a, __m256d b, __m256d c) {
>> -  // COMMON-LABEL: test_mm256_fmsubadd_pd
>> -  // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
>> -  // UNCONSTRAINED: [[SUB:%.+]] = tail call <4 x double>
>> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double>
>> [[NEG]])
>> -  // CONSTRAINED: [[SUB:%.+]] = tail call <4 x double>
>> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double>
>> %{{.*}}, <4 x double> [[NEG]], metadata !{{.*}})
>> -  // UNCONSTRAINED: [[ADD:%.+]] = tail call <4 x double>
>> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double>
>> %{{.*}})
>> -  // CONSTRAINED: [[ADD:%.+]] = tail call <4 x double>
>> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double>
>> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
>> -  // FIXME-CHECK-ASM: vfmsubadd213pd
>> -  // COMMONIR: shufflevector <4 x double> [[ADD]], <4 x double> [[SUB]],
>> <4 x i32> <i32 0, i32 5, i32 2, i32 7>
>> -  return _mm256_fmsubadd_pd(a, b, c);
>> -}
>>
>> diff  --git a/clang/test/CodeGen/sse-builtins-constrained.c
>> b/clang/test/CodeGen/sse-builtins-constrained.c
>> deleted file mode 100644
>> index af8e6e3a5dd7..000000000000
>> --- a/clang/test/CodeGen/sse-builtins-constrained.c
>> +++ /dev/null
>> @@ -1,26 +0,0 @@
>> -// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu
>> -target-feature +sse -emit-llvm -o - -Wall -Werror | FileCheck %s
>> --check-prefix=UNCONSTRAINED --check-prefix=COMMON --check-prefix=COMMONIR
>> -// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu
>> -target-feature +sse -ffp-exception-behavior=strict -emit-llvm -o - -Wall
>> -Werror | FileCheck %s --check-prefix=CONSTRAINED --check-prefix=COMMON
>> --check-prefix=COMMONIR
>> -// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu
>> -target-feature +sse -S %s -o - -Wall -Werror | FileCheck %s
>> --check-prefix=CHECK-ASM --check-prefix=COMMON
>> -// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu
>> -target-feature +sse -ffp-exception-behavior=strict -S %s -o - -Wall
>> -Werror | FileCheck %s --check-prefix=CHECK-ASM --check-prefix=COMMON
>> -
>> -
>> -#include <immintrin.h>
>> -
>> -__m128 test_mm_sqrt_ps(__m128 x) {
>> -  // COMMON-LABEL: test_mm_sqrt_ps
>> -  // UNCONSTRAINED: call <4 x float> @llvm.sqrt.v4f32(<4 x float> {{.*}})
>> -  // CONSTRAINED: call <4 x float>
>> @llvm.experimental.constrained.sqrt.v4f32(<4 x float> {{.*}}, metadata
>> !{{.*}})
>> -  // CHECK-ASM: sqrtps
>> -  return _mm_sqrt_ps(x);
>> -}
>> -
>> -__m128 test_sqrt_ss(__m128 x) {
>> -  // COMMON-LABEL: test_sqrt_ss
>> -  // COMMONIR: extractelement <4 x float> {{.*}}, i64 0
>> -  // UNCONSTRAINED: call float @llvm.sqrt.f32(float {{.*}})
>> -  // CONSTRAINED: call float
>> @llvm.experimental.constrained.sqrt.f32(float {{.*}}, metadata !{{.*}})
>> -  // CHECK-ASM: sqrtss
>> -  // COMMONIR: insertelement <4 x float> {{.*}}, float {{.*}}, i64 0
>> -  return _mm_sqrt_ss(x);
>> -}
>> -
>>
>>
>>
>> _______________________________________________
>> cfe-commits mailing list
>> cfe-commits at lists.llvm.org
>> https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
>>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/cfe-commits/attachments/20200206/8edf599a/attachment-0001.html>


More information about the cfe-commits mailing list