[llvm] r346137 - [NFCI][FPEnv] Split constrained intrinsic tests

Cameron McInally via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 5 07:28:11 PST 2018


Author: mcinally
Date: Mon Nov  5 07:28:10 2018
New Revision: 346137

URL: http://llvm.org/viewvc/llvm-project?rev=346137&view=rev
Log:
[NFCI][FPEnv] Split constrained intrinsic tests

The constrained intrinsic tests have grown in number. Split off
the FMA tests into their own file to reduce double coverage.

Differential Revision: https://reviews.llvm.org/D53932


Added:
    llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics-fma.ll
Modified:
    llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll

Added: llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics-fma.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics-fma.ll?rev=346137&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics-fma.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics-fma.ll Mon Nov  5 07:28:10 2018
@@ -0,0 +1,151 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma < %s | FileCheck %s
+
+define <1 x float> @constrained_vector_fma_v1f32() {
+; CHECK-LABEL: constrained_vector_fma_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + mem
+; CHECK-NEXT:    retq
+entry:
+  %fma = call <1 x float> @llvm.experimental.constrained.fma.v1f32(
+           <1 x float> <float 0.5>,
+           <1 x float> <float 2.5>,
+           <1 x float> <float 4.5>,
+           metadata !"round.dynamic",
+           metadata !"fpexcept.strict")
+  ret <1 x float> %fma
+}
+
+define <2 x double> @constrained_vector_fma_v2f64() {
+; CHECK-LABEL: constrained_vector_fma_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmovapd {{.*#+}} xmm1 = [1.5E+0,5.0E-1]
+; CHECK-NEXT:    vmovapd {{.*#+}} xmm0 = [3.5E+0,2.5E+0]
+; CHECK-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem
+; CHECK-NEXT:    retq
+entry:
+  %fma = call <2 x double> @llvm.experimental.constrained.fma.v2f64(
+           <2 x double> <double 1.5, double 0.5>,
+           <2 x double> <double 3.5, double 2.5>,
+           <2 x double> <double 5.5, double 4.5>,
+           metadata !"round.dynamic",
+           metadata !"fpexcept.strict")
+  ret <2 x double> %fma
+}
+
+define <3 x float> @constrained_vector_fma_v3f32() {
+; CHECK-LABEL: constrained_vector_fma_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm1 = (xmm0 * xmm1) + mem
+; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm2 = (xmm0 * xmm2) + mem
+; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm3 = (xmm0 * xmm3) + mem
+; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0],xmm3[0],xmm2[2,3]
+; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; CHECK-NEXT:    retq
+entry:
+  %fma = call <3 x float> @llvm.experimental.constrained.fma.v3f32(
+           <3 x float> <float 2.5, float 1.5, float 0.5>,
+           <3 x float> <float 5.5, float 4.5, float 3.5>,
+           <3 x float> <float 8.5, float 7.5, float 6.5>,
+           metadata !"round.dynamic",
+           metadata !"fpexcept.strict")
+  ret <3 x float> %fma
+}
+
+define <3 x double> @constrained_vector_fma_v3f64() {
+; CHECK-LABEL: constrained_vector_fma_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    vfmadd213sd {{.*#+}} xmm1 = (xmm0 * xmm1) + mem
+; CHECK-NEXT:    vmovapd {{.*#+}} xmm0 = [2.5E+0,1.5E+0]
+; CHECK-NEXT:    vmovapd {{.*#+}} xmm2 = [5.5E+0,4.5E+0]
+; CHECK-NEXT:    vfmadd213pd {{.*#+}} xmm2 = (xmm0 * xmm2) + mem
+; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm0
+; CHECK-NEXT:    retq
+entry:
+  %fma = call <3 x double> @llvm.experimental.constrained.fma.v3f64(
+           <3 x double> <double 2.5, double 1.5, double 0.5>,
+           <3 x double> <double 5.5, double 4.5, double 3.5>,
+           <3 x double> <double 8.5, double 7.5, double 6.5>,
+           metadata !"round.dynamic",
+           metadata !"fpexcept.strict")
+  ret <3 x double> %fma
+}
+
+define <4 x double> @constrained_vector_fma_v4f64() {
+; CHECK-LABEL: constrained_vector_fma_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmovapd {{.*#+}} ymm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1]
+; CHECK-NEXT:    vmovapd {{.*#+}} ymm0 = [7.5E+0,6.5E+0,5.5E+0,4.5E+0]
+; CHECK-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem
+; CHECK-NEXT:    retq
+entry:
+  %fma = call <4 x double> @llvm.experimental.constrained.fma.v4f64(
+           <4 x double> <double 3.5, double 2.5, double 1.5, double 0.5>,
+           <4 x double> <double 7.5, double 6.5, double 5.5, double 4.5>,
+           <4 x double> <double 11.5, double 10.5, double 9.5, double 8.5>,
+           metadata !"round.dynamic",
+           metadata !"fpexcept.strict")
+  ret <4 x double> %fma
+}
+
+define <4 x float> @constrained_vector_fma_v4f32() {
+; CHECK-LABEL: constrained_vector_fma_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmovaps {{.*#+}} xmm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1]
+; CHECK-NEXT:    vmovaps {{.*#+}} xmm0 = [7.5E+0,6.5E+0,5.5E+0,4.5E+0]
+; CHECK-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + mem
+; CHECK-NEXT:    retq
+entry:
+  %fma = call <4 x float> @llvm.experimental.constrained.fma.v4f32(
+           <4 x float> <float 3.5, float 2.5, float 1.5, float 0.5>,
+           <4 x float> <float 7.5, float 6.5, float 5.5, float 4.5>,
+           <4 x float> <float 11.5, float 10.5, float 9.5, float 8.5>,
+           metadata !"round.dynamic",
+           metadata !"fpexcept.strict")
+  ret <4 x float> %fma
+}
+
+define <8 x float> @constrained_vector_fma_v8f32() {
+; CHECK-LABEL: constrained_vector_fma_v8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmovaps {{.*#+}} ymm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1,7.5E+0,6.5E+0,5.5E+0,4.5E+0]
+; CHECK-NEXT:    vmovaps {{.*#+}} ymm0 = [7.5E+0,6.5E+0,5.5E+0,4.5E+0,1.15E+1,1.05E+1,9.5E+0,8.5E+0]
+; CHECK-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + mem
+; CHECK-NEXT:    retq
+entry:
+  %fma = call <8 x float> @llvm.experimental.constrained.fma.v8f32(
+           <8 x float> <float 3.5, float 2.5, float 1.5, float 0.5,
+                        float 7.5, float 6.5, float 5.5, float 4.5>,
+           <8 x float> <float 7.5, float 6.5, float 5.5, float 4.5,
+                        float 11.5, float 10.5, float 9.5, float 8.5>,
+           <8 x float> <float 11.5, float 10.5, float 9.5, float 8.5,
+                        float 15.5, float 14.5, float 13.5, float 12.5>,
+           metadata !"round.dynamic",
+           metadata !"fpexcept.strict")
+  ret <8 x float> %fma
+}
+
+; Single width declarations
+declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)
+
+; Scalar width declarations
+declare <1 x float> @llvm.experimental.constrained.fma.v1f32(<1 x float>, <1 x float>, <1 x float>, metadata, metadata)
+
+; Illegal width declarations
+declare <3 x float> @llvm.experimental.constrained.fma.v3f32(<3 x float>, <3 x float>, <3 x float>, metadata, metadata)
+declare <3 x double> @llvm.experimental.constrained.fma.v3f64(<3 x double>, <3 x double>, <3 x double>, metadata, metadata)
+
+; Double width declarations
+declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata)
+declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata)

Modified: llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll?rev=346137&r1=346136&r2=346137&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll Mon Nov  5 07:28:10 2018
@@ -1,19 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck --check-prefix=COMMON --check-prefix=NO-FMA --check-prefix=FMACALL64 --check-prefix=FMACALL32 %s
-; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma < %s | FileCheck -check-prefix=COMMON --check-prefix=HAS-FMA --check-prefix=FMA64 --check-prefix=FMA32 %s
+; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s
 
 define <1 x float> @constrained_vector_fdiv_v1f32() {
-; NO-FMA-LABEL: constrained_vector_fdiv_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    divss {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fdiv_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vdivss {{.*}}(%rip), %xmm0, %xmm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fdiv_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    divss {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    retq
 entry:
   %div = call <1 x float> @llvm.experimental.constrained.fdiv.v1f32(
            <1 x float> <float 1.000000e+00>,
@@ -24,17 +17,11 @@ entry:
 }
 
 define <2 x double> @constrained_vector_fdiv_v2f64() {
-; NO-FMA-LABEL: constrained_vector_fdiv_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
-; NO-FMA-NEXT:    divpd {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fdiv_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
-; HAS-FMA-NEXT:    vdivpd {{.*}}(%rip), %xmm0, %xmm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fdiv_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
+; CHECK-NEXT:    divpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    retq
 entry:
   %div = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(
            <2 x double> <double 1.000000e+00, double 2.000000e+00>,
@@ -45,31 +32,18 @@ entry:
 }
 
 define <3 x float> @constrained_vector_fdiv_v3f32() {
-; NO-FMA-LABEL: constrained_vector_fdiv_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    divss %xmm1, %xmm2
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    divss %xmm1, %xmm0
-; NO-FMA-NEXT:    movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    divss %xmm1, %xmm3
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; NO-FMA-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fdiv_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vdivss %xmm0, %xmm1, %xmm1
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vdivss %xmm0, %xmm2, %xmm2
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vdivss %xmm0, %xmm3, %xmm0
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[2,3]
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fdiv_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT:    divss %xmm1, %xmm2
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    divss %xmm1, %xmm0
+; CHECK-NEXT:    movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; CHECK-NEXT:    divss %xmm1, %xmm3
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; CHECK-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; CHECK-NEXT:    retq
 entry:
   %div = call <3 x float> @llvm.experimental.constrained.fdiv.v3f32(
            <3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>,
@@ -80,26 +54,17 @@ entry:
 }
 
 define <3 x double> @constrained_vector_fdiv_v3f64() {
-; NO-FMA-LABEL: constrained_vector_fdiv_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
-; NO-FMA-NEXT:    divpd {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    divsd {{.*}}(%rip), %xmm1
-; NO-FMA-NEXT:    movsd %xmm1, -{{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movapd %xmm0, %xmm1
-; NO-FMA-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    fldl -{{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fdiv_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vdivsd {{.*}}(%rip), %xmm0, %xmm0
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} xmm1 = [1.0E+0,2.0E+0]
-; HAS-FMA-NEXT:    vdivpd {{.*}}(%rip), %xmm1, %xmm1
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fdiv_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
+; CHECK-NEXT:    divpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    divsd {{.*}}(%rip), %xmm1
+; CHECK-NEXT:    movsd %xmm1, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; CHECK-NEXT:    fldl -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    retq
 entry:
   %div = call <3 x double> @llvm.experimental.constrained.fdiv.v3f64(
            <3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>,
@@ -110,20 +75,14 @@ entry:
 }
 
 define <4 x double> @constrained_vector_fdiv_v4f64() {
-; NO-FMA-LABEL: constrained_vector_fdiv_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm2 = [1.0E+1,1.0E+1]
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
-; NO-FMA-NEXT:    divpd %xmm2, %xmm0
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm1 = [3.0E+0,4.0E+0]
-; NO-FMA-NEXT:    divpd %xmm2, %xmm1
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fdiv_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} ymm0 = [1.0E+0,2.0E+0,3.0E+0,4.0E+0]
-; HAS-FMA-NEXT:    vdivpd {{.*}}(%rip), %ymm0, %ymm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fdiv_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movapd {{.*#+}} xmm2 = [1.0E+1,1.0E+1]
+; CHECK-NEXT:    movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
+; CHECK-NEXT:    divpd %xmm2, %xmm0
+; CHECK-NEXT:    movapd {{.*#+}} xmm1 = [3.0E+0,4.0E+0]
+; CHECK-NEXT:    divpd %xmm2, %xmm1
+; CHECK-NEXT:    retq
 entry:
   %div = call <4 x double> @llvm.experimental.constrained.fdiv.v4f64(
            <4 x double> <double 1.000000e+00, double 2.000000e+00,
@@ -136,27 +95,16 @@ entry:
 }
 
 define <1 x float> @constrained_vector_frem_v1f32() {
-; NO-FMA-LABEL: constrained_vector_frem_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmodf
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_frem_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    pushq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 16
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq fmodf
-; HAS-FMA-NEXT:    popq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_frem_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq fmodf
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %rem = call <1 x float> @llvm.experimental.constrained.frem.v1f32(
            <1 x float> <float 1.000000e+00>,
@@ -167,39 +115,22 @@ entry:
 }
 
 define <2 x double> @constrained_vector_frem_v2f64() {
-; NO-FMA-LABEL: constrained_vector_frem_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmod
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmod
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_frem_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 32
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmod
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmod
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    addq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_frem_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmod
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmod
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %rem = call <2 x double> @llvm.experimental.constrained.frem.v2f64(
            <2 x double> <double 1.000000e+00, double 2.000000e+00>,
@@ -210,52 +141,29 @@ entry:
 }
 
 define <3 x float> @constrained_vector_frem_v3f32() {
-; NO-FMA-LABEL: constrained_vector_frem_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmodf
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmodf
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmodf
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_frem_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq fmodf
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq fmodf
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq fmodf
-; HAS-FMA-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_frem_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq fmodf
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq fmodf
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq fmodf
+; CHECK-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %rem = call <3 x float> @llvm.experimental.constrained.frem.v3f32(
            <3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>,
@@ -266,54 +174,30 @@ entry:
 }
 
 define <3 x double> @constrained_vector_frem_v3f64() {
-; NO-FMA-LABEL: constrained_vector_frem_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmod
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmod
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmod
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_frem_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 64
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmod
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmod
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    vzeroupper
-; HAS-FMA-NEXT:    callq fmod
-; HAS-FMA-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    addq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_frem_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmod
+; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmod
+; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmod
+; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT:    # xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT:    # xmm1 = mem[0],zero
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %rem = call <3 x double> @llvm.experimental.constrained.frem.v3f64(
            <3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>,
@@ -324,62 +208,34 @@ entry:
 }
 
 define <4 x double> @constrained_vector_frem_v4f64() {
-; NO-FMA-LABEL: constrained_vector_frem_v4f64:
-; NO-FMA:       # %bb.0:
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmod
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmod
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmod
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmod
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_frem_v4f64:
-; HAS-FMA:       # %bb.0:
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmod
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmod
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmod
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmod
-; HAS-FMA-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_frem_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmod
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmod
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmod
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmod
+; CHECK-NEXT:    movaps %xmm0, %xmm1
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
   %rem = call <4 x double> @llvm.experimental.constrained.frem.v4f64(
            <4 x double> <double 1.000000e+00, double 2.000000e+00,
                          double 3.000000e+00, double 4.000000e+00>,
@@ -391,17 +247,11 @@ define <4 x double> @constrained_vector_
 }
 
 define <1 x float> @constrained_vector_fmul_v1f32() {
-; NO-FMA-LABEL: constrained_vector_fmul_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    mulss {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fmul_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fmul_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    mulss {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    retq
 entry:
   %mul = call <1 x float> @llvm.experimental.constrained.fmul.v1f32(
            <1 x float> <float 0x7FF0000000000000>,
@@ -412,17 +262,11 @@ entry:
 }
 
 define <2 x double> @constrained_vector_fmul_v2f64() {
-; NO-FMA-LABEL: constrained_vector_fmul_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; NO-FMA-NEXT:    mulpd {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fmul_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; HAS-FMA-NEXT:    vmulpd {{.*}}(%rip), %xmm0, %xmm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fmul_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
+; CHECK-NEXT:    mulpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    retq
 entry:
   %mul = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(
            <2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
@@ -433,27 +277,17 @@ entry:
 }
 
 define <3 x float> @constrained_vector_fmul_v3f32() {
-; NO-FMA-LABEL: constrained_vector_fmul_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    mulss %xmm1, %xmm2
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    mulss %xmm1, %xmm0
-; NO-FMA-NEXT:    mulss {{.*}}(%rip), %xmm1
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; NO-FMA-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fmul_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm1
-; HAS-FMA-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm2
-; HAS-FMA-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[2,3]
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fmul_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT:    mulss %xmm1, %xmm2
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    mulss %xmm1, %xmm0
+; CHECK-NEXT:    mulss {{.*}}(%rip), %xmm1
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; CHECK-NEXT:    retq
 entry:
   %mul = call <3 x float> @llvm.experimental.constrained.fmul.v3f32(
            <3 x float> <float 0x7FF0000000000000, float 0x7FF0000000000000,
@@ -465,26 +299,17 @@ entry:
 }
 
 define <3 x double> @constrained_vector_fmul_v3f64() {
-; NO-FMA-LABEL: constrained_vector_fmul_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; NO-FMA-NEXT:    mulpd {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    mulsd {{.*}}(%rip), %xmm1
-; NO-FMA-NEXT:    movsd %xmm1, -{{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movapd %xmm0, %xmm1
-; NO-FMA-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    fldl -{{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fmul_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmulsd {{.*}}(%rip), %xmm0, %xmm0
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; HAS-FMA-NEXT:    vmulpd {{.*}}(%rip), %xmm1, %xmm1
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fmul_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
+; CHECK-NEXT:    mulpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    mulsd {{.*}}(%rip), %xmm1
+; CHECK-NEXT:    movsd %xmm1, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; CHECK-NEXT:    fldl -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    retq
 entry:
   %mul = call <3 x double> @llvm.experimental.constrained.fmul.v3f64(
            <3 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF,
@@ -496,19 +321,13 @@ entry:
 }
 
 define <4 x double> @constrained_vector_fmul_v4f64() {
-; NO-FMA-LABEL: constrained_vector_fmul_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm0 = [2.0E+0,3.0E+0]
-; NO-FMA-NEXT:    mulpd %xmm1, %xmm0
-; NO-FMA-NEXT:    mulpd {{.*}}(%rip), %xmm1
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fmul_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} ymm0 = [1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308]
-; HAS-FMA-NEXT:    vmulpd {{.*}}(%rip), %ymm0, %ymm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fmul_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
+; CHECK-NEXT:    movapd {{.*#+}} xmm0 = [2.0E+0,3.0E+0]
+; CHECK-NEXT:    mulpd %xmm1, %xmm0
+; CHECK-NEXT:    mulpd {{.*}}(%rip), %xmm1
+; CHECK-NEXT:    retq
 entry:
   %mul = call <4 x double> @llvm.experimental.constrained.fmul.v4f64(
            <4 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF,
@@ -521,17 +340,11 @@ entry:
 }
 
 define <1 x float> @constrained_vector_fadd_v1f32() {
-; NO-FMA-LABEL: constrained_vector_fadd_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    addss {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fadd_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vaddss {{.*}}(%rip), %xmm0, %xmm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fadd_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    addss {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    retq
 entry:
   %add = call <1 x float> @llvm.experimental.constrained.fadd.v1f32(
            <1 x float> <float 0x7FF0000000000000>,
@@ -542,17 +355,11 @@ entry:
 }
 
 define <2 x double> @constrained_vector_fadd_v2f64() {
-; NO-FMA-LABEL: constrained_vector_fadd_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; NO-FMA-NEXT:    addpd {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fadd_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; HAS-FMA-NEXT:    vaddpd {{.*}}(%rip), %xmm0, %xmm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fadd_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
+; CHECK-NEXT:    addpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    retq
 entry:
   %add = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(
            <2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
@@ -563,28 +370,17 @@ entry:
 }
 
 define <3 x float> @constrained_vector_fadd_v3f32() {
-; NO-FMA-LABEL: constrained_vector_fadd_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    xorps %xmm1, %xmm1
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    addss %xmm2, %xmm1
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    addss %xmm2, %xmm0
-; NO-FMA-NEXT:    addss {{.*}}(%rip), %xmm2
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; NO-FMA-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fadd_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vaddss %xmm0, %xmm1, %xmm0
-; HAS-FMA-NEXT:    vaddss {{.*}}(%rip), %xmm1, %xmm2
-; HAS-FMA-NEXT:    vaddss {{.*}}(%rip), %xmm1, %xmm1
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fadd_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xorps %xmm1, %xmm1
+; CHECK-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT:    addss %xmm2, %xmm1
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    addss %xmm2, %xmm0
+; CHECK-NEXT:    addss {{.*}}(%rip), %xmm2
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; CHECK-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT:    retq
 entry:
   %add = call <3 x float> @llvm.experimental.constrained.fadd.v3f32(
            <3 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000,
@@ -596,26 +392,17 @@ entry:
 }
 
 define <3 x double> @constrained_vector_fadd_v3f64() {
-; NO-FMA-LABEL: constrained_vector_fadd_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; NO-FMA-NEXT:    addpd {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    xorpd %xmm1, %xmm1
-; NO-FMA-NEXT:    addsd {{.*}}(%rip), %xmm1
-; NO-FMA-NEXT:    movsd %xmm1, -{{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movapd %xmm0, %xmm1
-; NO-FMA-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    fldl -{{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fadd_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
-; HAS-FMA-NEXT:    vaddsd {{.*}}(%rip), %xmm0, %xmm0
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; HAS-FMA-NEXT:    vaddpd {{.*}}(%rip), %xmm1, %xmm1
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fadd_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
+; CHECK-NEXT:    addpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-NEXT:    addsd {{.*}}(%rip), %xmm1
+; CHECK-NEXT:    movsd %xmm1, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; CHECK-NEXT:    fldl -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    retq
 entry:
   %add = call <3 x double> @llvm.experimental.constrained.fadd.v3f64(
            <3 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF,
@@ -627,19 +414,13 @@ entry:
 }
 
 define <4 x double> @constrained_vector_fadd_v4f64() {
-; NO-FMA-LABEL: constrained_vector_fadd_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm0 = [1.0E+0,1.0000000000000001E-1]
-; NO-FMA-NEXT:    addpd %xmm1, %xmm0
-; NO-FMA-NEXT:    addpd {{.*}}(%rip), %xmm1
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fadd_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} ymm0 = [1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308]
-; HAS-FMA-NEXT:    vaddpd {{.*}}(%rip), %ymm0, %ymm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fadd_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
+; CHECK-NEXT:    movapd {{.*#+}} xmm0 = [1.0E+0,1.0000000000000001E-1]
+; CHECK-NEXT:    addpd %xmm1, %xmm0
+; CHECK-NEXT:    addpd {{.*}}(%rip), %xmm1
+; CHECK-NEXT:    retq
 entry:
   %add = call <4 x double> @llvm.experimental.constrained.fadd.v4f64(
            <4 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF,
@@ -652,17 +433,11 @@ entry:
 }
 
 define <1 x float> @constrained_vector_fsub_v1f32() {
-; NO-FMA-LABEL: constrained_vector_fsub_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    subss {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fsub_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vsubss {{.*}}(%rip), %xmm0, %xmm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fsub_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    subss {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    retq
 entry:
   %sub = call <1 x float> @llvm.experimental.constrained.fsub.v1f32(
            <1 x float> <float 0x7FF0000000000000>,
@@ -673,17 +448,11 @@ entry:
 }
 
 define <2 x double> @constrained_vector_fsub_v2f64() {
-; NO-FMA-LABEL: constrained_vector_fsub_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
-; NO-FMA-NEXT:    subpd {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fsub_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
-; HAS-FMA-NEXT:    vsubpd {{.*}}(%rip), %xmm0, %xmm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fsub_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
+; CHECK-NEXT:    subpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    retq
 entry:
   %sub = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(
            <2 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF>,
@@ -694,29 +463,18 @@ entry:
 }
 
 define <3 x float> @constrained_vector_fsub_v3f32() {
-; NO-FMA-LABEL: constrained_vector_fsub_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    xorps %xmm0, %xmm0
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movaps %xmm1, %xmm2
-; NO-FMA-NEXT:    subss %xmm0, %xmm2
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    subss {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    subss {{.*}}(%rip), %xmm1
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; NO-FMA-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fsub_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vsubss %xmm0, %xmm1, %xmm0
-; HAS-FMA-NEXT:    vsubss {{.*}}(%rip), %xmm1, %xmm2
-; HAS-FMA-NEXT:    vsubss {{.*}}(%rip), %xmm1, %xmm1
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fsub_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xorps %xmm0, %xmm0
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movaps %xmm1, %xmm2
+; CHECK-NEXT:    subss %xmm0, %xmm2
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    subss {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    subss {{.*}}(%rip), %xmm1
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; CHECK-NEXT:    retq
 entry:
   %sub = call <3 x float> @llvm.experimental.constrained.fsub.v3f32(
            <3 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000,
@@ -728,28 +486,18 @@ entry:
 }
 
 define <3 x double> @constrained_vector_fsub_v3f64() {
-; NO-FMA-LABEL: constrained_vector_fsub_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    xorpd %xmm0, %xmm0
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    subsd %xmm0, %xmm1
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
-; NO-FMA-NEXT:    subpd {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    movsd %xmm1, -{{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movapd %xmm0, %xmm1
-; NO-FMA-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    fldl -{{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fsub_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    vsubsd %xmm0, %xmm1, %xmm0
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} xmm1 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
-; HAS-FMA-NEXT:    vsubpd {{.*}}(%rip), %xmm1, %xmm1
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fsub_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xorpd %xmm0, %xmm0
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    subsd %xmm0, %xmm1
+; CHECK-NEXT:    movapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
+; CHECK-NEXT:    subpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    movsd %xmm1, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; CHECK-NEXT:    fldl -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    retq
 entry:
   %sub = call <3 x double> @llvm.experimental.constrained.fsub.v3f64(
            <3 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF,
@@ -761,19 +509,13 @@ entry:
 }
 
 define <4 x double> @constrained_vector_fsub_v4f64() {
-; NO-FMA-LABEL: constrained_vector_fsub_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movapd {{.*#+}} xmm1 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
-; NO-FMA-NEXT:    movapd %xmm1, %xmm0
-; NO-FMA-NEXT:    subpd {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    subpd {{.*}}(%rip), %xmm1
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fsub_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} ymm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308,-1.7976931348623157E+308,-1.7976931348623157E+308]
-; HAS-FMA-NEXT:    vsubpd {{.*}}(%rip), %ymm0, %ymm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_fsub_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movapd {{.*#+}} xmm1 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
+; CHECK-NEXT:    movapd %xmm1, %xmm0
+; CHECK-NEXT:    subpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    subpd {{.*}}(%rip), %xmm1
+; CHECK-NEXT:    retq
 entry:
   %sub = call <4 x double> @llvm.experimental.constrained.fsub.v4f64(
            <4 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF,
@@ -785,365 +527,12 @@ entry:
   ret <4 x double> %sub
 }
 
-define <1 x float> @constrained_vector_fma_v1f32() {
-; NO-FMA-LABEL: constrained_vector_fma_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fma_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + mem
-; HAS-FMA-NEXT:    retq
-entry:
-  %fma = call <1 x float> @llvm.experimental.constrained.fma.v1f32(
-           <1 x float> <float 0.5>,
-           <1 x float> <float 2.5>,
-           <1 x float> <float 4.5>,
-           metadata !"round.dynamic",
-           metadata !"fpexcept.strict")
-  ret <1 x float> %fma
-}
-
-define <2 x double> @constrained_vector_fma_v2f64() {
-; NO-FMA-LABEL: constrained_vector_fma_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; NO-FMA-NEXT:    callq fma
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; NO-FMA-NEXT:    callq fma
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fma_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} xmm1 = [1.5E+0,5.0E-1]
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} xmm0 = [3.5E+0,2.5E+0]
-; HAS-FMA-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem
-; HAS-FMA-NEXT:    retq
-entry:
-  %fma = call <2 x double> @llvm.experimental.constrained.fma.v2f64(
-           <2 x double> <double 1.5, double 0.5>,
-           <2 x double> <double 3.5, double 2.5>,
-           <2 x double> <double 5.5, double 4.5>,
-           metadata !"round.dynamic",
-           metadata !"fpexcept.strict")
-  ret <2 x double> %fma
-}
-
-define <3 x float> @constrained_vector_fma_v3f32() {
-; NO-FMA-LABEL: constrained_vector_fma_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fma_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vfmadd213ss {{.*#+}} xmm1 = (xmm0 * xmm1) + mem
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vfmadd213ss {{.*#+}} xmm2 = (xmm0 * xmm2) + mem
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vfmadd213ss {{.*#+}} xmm3 = (xmm0 * xmm3) + mem
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0],xmm3[0],xmm2[2,3]
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
-; HAS-FMA-NEXT:    retq
-entry:
-  %fma = call <3 x float> @llvm.experimental.constrained.fma.v3f32(
-           <3 x float> <float 2.5, float 1.5, float 0.5>,
-           <3 x float> <float 5.5, float 4.5, float 3.5>,
-           <3 x float> <float 8.5, float 7.5, float 6.5>,
-           metadata !"round.dynamic",
-           metadata !"fpexcept.strict")
-  ret <3 x float> %fma
-}
-
-define <3 x double> @constrained_vector_fma_v3f64() {
-; NO-FMA-LABEL: constrained_vector_fma_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; NO-FMA-NEXT:    callq fma
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; NO-FMA-NEXT:    callq fma
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; NO-FMA-NEXT:    callq fma
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fma_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    vfmadd213sd {{.*#+}} xmm1 = (xmm0 * xmm1) + mem
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} xmm0 = [2.5E+0,1.5E+0]
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} xmm2 = [5.5E+0,4.5E+0]
-; HAS-FMA-NEXT:    vfmadd213pd {{.*#+}} xmm2 = (xmm0 * xmm2) + mem
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm0
-; HAS-FMA-NEXT:    retq
-entry:
-  %fma = call <3 x double> @llvm.experimental.constrained.fma.v3f64(
-           <3 x double> <double 2.5, double 1.5, double 0.5>,
-           <3 x double> <double 5.5, double 4.5, double 3.5>,
-           <3 x double> <double 8.5, double 7.5, double 6.5>,
-           metadata !"round.dynamic",
-           metadata !"fpexcept.strict")
-  ret <3 x double> %fma
-}
-
-define <4 x double> @constrained_vector_fma_v4f64() {
-; NO-FMA-LABEL: constrained_vector_fma_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; NO-FMA-NEXT:    callq fma
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; NO-FMA-NEXT:    callq fma
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; NO-FMA-NEXT:    callq fma
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; NO-FMA-NEXT:    callq fma
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fma_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} ymm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1]
-; HAS-FMA-NEXT:    vmovapd {{.*#+}} ymm0 = [7.5E+0,6.5E+0,5.5E+0,4.5E+0]
-; HAS-FMA-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem
-; HAS-FMA-NEXT:    retq
-entry:
-  %fma = call <4 x double> @llvm.experimental.constrained.fma.v4f64(
-           <4 x double> <double 3.5, double 2.5, double 1.5, double 0.5>,
-           <4 x double> <double 7.5, double 6.5, double 5.5, double 4.5>,
-           <4 x double> <double 11.5, double 10.5, double 9.5, double 8.5>,
-           metadata !"round.dynamic",
-           metadata !"fpexcept.strict")
-  ret <4 x double> %fma
-}
-
-define <4 x float> @constrained_vector_fma_v4f32() {
-; NO-FMA-LABEL: constrained_vector_fma_v4f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    unpcklps (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fma_v4f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovaps {{.*#+}} xmm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1]
-; HAS-FMA-NEXT:    vmovaps {{.*#+}} xmm0 = [7.5E+0,6.5E+0,5.5E+0,4.5E+0]
-; HAS-FMA-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + mem
-; HAS-FMA-NEXT:    retq
-entry:
-  %fma = call <4 x float> @llvm.experimental.constrained.fma.v4f32(
-           <4 x float> <float 3.5, float 2.5, float 1.5, float 0.5>,
-           <4 x float> <float 7.5, float 6.5, float 5.5, float 4.5>,
-           <4 x float> <float 11.5, float 10.5, float 9.5, float 8.5>,
-           metadata !"round.dynamic",
-           metadata !"fpexcept.strict")
-  ret <4 x float> %fma
-}
-
-define <8 x float> @constrained_vector_fma_v8f32() {
-; NO-FMA-LABEL: constrained_vector_fma_v8f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $56, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 64
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    unpcklps (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    unpcklps (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaf
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $56, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_fma_v8f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovaps {{.*#+}} ymm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1,7.5E+0,6.5E+0,5.5E+0,4.5E+0]
-; HAS-FMA-NEXT:    vmovaps {{.*#+}} ymm0 = [7.5E+0,6.5E+0,5.5E+0,4.5E+0,1.15E+1,1.05E+1,9.5E+0,8.5E+0]
-; HAS-FMA-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + mem
-; HAS-FMA-NEXT:    retq
-entry:
-  %fma = call <8 x float> @llvm.experimental.constrained.fma.v8f32(
-           <8 x float> <float 3.5, float 2.5, float 1.5, float 0.5,
-                        float 7.5, float 6.5, float 5.5, float 4.5>,
-           <8 x float> <float 7.5, float 6.5, float 5.5, float 4.5,
-                        float 11.5, float 10.5, float 9.5, float 8.5>,
-           <8 x float> <float 11.5, float 10.5, float 9.5, float 8.5,
-                        float 15.5, float 14.5, float 13.5, float 12.5>,
-           metadata !"round.dynamic",
-           metadata !"fpexcept.strict")
-  ret <8 x float> %fma
-}
-
 define <1 x float> @constrained_vector_sqrt_v1f32() {
-; NO-FMA-LABEL: constrained_vector_sqrt_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    sqrtss %xmm0, %xmm0
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_sqrt_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_sqrt_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    sqrtss %xmm0, %xmm0
+; CHECK-NEXT:    retq
 entry:
   %sqrt = call <1 x float> @llvm.experimental.constrained.sqrt.v1f32(
                               <1 x float> <float 42.0>,
@@ -1153,15 +542,10 @@ entry:
 }
 
 define <2 x double> @constrained_vector_sqrt_v2f64() {
-; NO-FMA-LABEL: constrained_vector_sqrt_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    sqrtpd {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_sqrt_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vsqrtpd {{.*}}(%rip), %xmm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_sqrt_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    sqrtpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    retq
 entry:
   %sqrt = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(
                               <2 x double> <double 42.0, double 42.1>,
@@ -1171,29 +555,17 @@ entry:
 }
 
 define <3 x float> @constrained_vector_sqrt_v3f32() {
-; NO-FMA-LABEL: constrained_vector_sqrt_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    sqrtss %xmm0, %xmm1
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    sqrtss %xmm0, %xmm0
-; NO-FMA-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    sqrtss %xmm2, %xmm2
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; NO-FMA-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_sqrt_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vsqrtss %xmm1, %xmm1, %xmm1
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vsqrtss %xmm2, %xmm2, %xmm2
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_sqrt_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    sqrtss %xmm0, %xmm1
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    sqrtss %xmm0, %xmm0
+; CHECK-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT:    sqrtss %xmm2, %xmm2
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; CHECK-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT:    retq
 entry:
   %sqrt = call <3 x float> @llvm.experimental.constrained.sqrt.v3f32(
                               <3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -1203,24 +575,16 @@ entry:
 }
 
 define <3 x double> @constrained_vector_sqrt_v3f64() {
-; NO-FMA-LABEL: constrained_vector_sqrt_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    sqrtsd %xmm0, %xmm1
-; NO-FMA-NEXT:    sqrtpd {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    movsd %xmm1, -{{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movapd %xmm0, %xmm1
-; NO-FMA-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    fldl -{{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_sqrt_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0
-; HAS-FMA-NEXT:    vsqrtpd {{.*}}(%rip), %xmm1
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_sqrt_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    sqrtsd %xmm0, %xmm1
+; CHECK-NEXT:    sqrtpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    movsd %xmm1, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movapd %xmm0, %xmm1
+; CHECK-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; CHECK-NEXT:    fldl -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    retq
 entry:
   %sqrt = call <3 x double> @llvm.experimental.constrained.sqrt.v3f64(
                           <3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -1230,17 +594,12 @@ entry:
 }
 
 define <4 x double> @constrained_vector_sqrt_v4f64() {
-; NO-FMA-LABEL: constrained_vector_sqrt_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    sqrtpd {{.*}}(%rip), %xmm0
-; NO-FMA-NEXT:    sqrtpd {{.*}}(%rip), %xmm1
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_sqrt_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vsqrtpd {{.*}}(%rip), %ymm0
-; HAS-FMA-NEXT:    retq
-entry:
+; CHECK-LABEL: constrained_vector_sqrt_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    sqrtpd {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    sqrtpd {{.*}}(%rip), %xmm1
+; CHECK-NEXT:    retq
+ entry:
   %sqrt = call <4 x double> @llvm.experimental.constrained.sqrt.v4f64(
                               <4 x double> <double 42.0, double 42.1,
                                             double 42.2, double 42.3>,
@@ -1250,27 +609,16 @@ entry:
 }
 
 define <1 x float> @constrained_vector_pow_v1f32() {
-; NO-FMA-LABEL: constrained_vector_pow_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq powf
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_pow_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    pushq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 16
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq powf
-; HAS-FMA-NEXT:    popq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_pow_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq powf
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %pow = call <1 x float> @llvm.experimental.constrained.pow.v1f32(
                              <1 x float> <float 42.0>,
@@ -1281,39 +629,22 @@ entry:
 }
 
 define <2 x double> @constrained_vector_pow_v2f64() {
-; NO-FMA-LABEL: constrained_vector_pow_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq pow
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq pow
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_pow_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 32
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq pow
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq pow
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    addq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_pow_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq pow
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq pow
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %pow = call <2 x double> @llvm.experimental.constrained.pow.v2f64(
                              <2 x double> <double 42.1, double 42.2>,
@@ -1324,52 +655,29 @@ entry:
 }
 
 define <3 x float> @constrained_vector_pow_v3f32() {
-; NO-FMA-LABEL: constrained_vector_pow_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq powf
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq powf
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq powf
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_pow_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq powf
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq powf
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq powf
-; HAS-FMA-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_pow_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq powf
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq powf
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq powf
+; CHECK-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %pow = call <3 x float> @llvm.experimental.constrained.pow.v3f32(
                              <3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -1380,54 +688,30 @@ entry:
 }
 
 define <3 x double> @constrained_vector_pow_v3f64() {
-; NO-FMA-LABEL: constrained_vector_pow_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq pow
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq pow
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq pow
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_pow_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 64
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq pow
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq pow
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    vzeroupper
-; HAS-FMA-NEXT:    callq pow
-; HAS-FMA-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    addq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_pow_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq pow
+; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq pow
+; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq pow
+; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT:    # xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT:    # xmm1 = mem[0],zero
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %pow = call <3 x double> @llvm.experimental.constrained.pow.v3f64(
                           <3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -1438,62 +722,34 @@ entry:
 }
 
 define <4 x double> @constrained_vector_pow_v4f64() {
-; NO-FMA-LABEL: constrained_vector_pow_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq pow
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq pow
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq pow
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq pow
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_pow_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq pow
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq pow
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq pow
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq pow
-; HAS-FMA-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_pow_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq pow
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq pow
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq pow
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq pow
+; CHECK-NEXT:    movaps %xmm0, %xmm1
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %pow = call <4 x double> @llvm.experimental.constrained.pow.v4f64(
                              <4 x double> <double 42.1, double 42.2,
@@ -1506,27 +762,16 @@ entry:
 }
 
 define <1 x float> @constrained_vector_powi_v1f32() {
-; NO-FMA-LABEL: constrained_vector_powi_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movl $3, %edi
-; NO-FMA-NEXT:    callq __powisf2
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_powi_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    pushq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 16
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    movl $3, %edi
-; HAS-FMA-NEXT:    callq __powisf2
-; HAS-FMA-NEXT:    popq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_powi_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movl $3, %edi
+; CHECK-NEXT:    callq __powisf2
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %powi = call <1 x float> @llvm.experimental.constrained.powi.v1f32(
                               <1 x float> <float 42.0>,
@@ -1537,39 +782,22 @@ entry:
 }
 
 define <2 x double> @constrained_vector_powi_v2f64() {
-; NO-FMA-LABEL: constrained_vector_powi_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movl $3, %edi
-; NO-FMA-NEXT:    callq __powidf2
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movl $3, %edi
-; NO-FMA-NEXT:    callq __powidf2
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_powi_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 32
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    movl $3, %edi
-; HAS-FMA-NEXT:    callq __powidf2
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    movl $3, %edi
-; HAS-FMA-NEXT:    callq __powidf2
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    addq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_powi_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movl $3, %edi
+; CHECK-NEXT:    callq __powidf2
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movl $3, %edi
+; CHECK-NEXT:    callq __powidf2
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %powi = call <2 x double> @llvm.experimental.constrained.powi.v2f64(
                               <2 x double> <double 42.1, double 42.2>,
@@ -1580,52 +808,29 @@ entry:
 }
 
 define <3 x float> @constrained_vector_powi_v3f32() {
-; NO-FMA-LABEL: constrained_vector_powi_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movl $3, %edi
-; NO-FMA-NEXT:    callq __powisf2
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movl $3, %edi
-; NO-FMA-NEXT:    callq __powisf2
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movl $3, %edi
-; NO-FMA-NEXT:    callq __powisf2
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_powi_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    movl $3, %edi
-; HAS-FMA-NEXT:    callq __powisf2
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    movl $3, %edi
-; HAS-FMA-NEXT:    callq __powisf2
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    movl $3, %edi
-; HAS-FMA-NEXT:    callq __powisf2
-; HAS-FMA-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_powi_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movl $3, %edi
+; CHECK-NEXT:    callq __powisf2
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movl $3, %edi
+; CHECK-NEXT:    callq __powisf2
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movl $3, %edi
+; CHECK-NEXT:    callq __powisf2
+; CHECK-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %powi = call <3 x float> @llvm.experimental.constrained.powi.v3f32(
                               <3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -1636,54 +841,30 @@ entry:
 }
 
 define <3 x double> @constrained_vector_powi_v3f64() {
-; NO-FMA-LABEL: constrained_vector_powi_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movl $3, %edi
-; NO-FMA-NEXT:    callq __powidf2
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movl $3, %edi
-; NO-FMA-NEXT:    callq __powidf2
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movl $3, %edi
-; NO-FMA-NEXT:    callq __powidf2
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_powi_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 64
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    movl $3, %edi
-; HAS-FMA-NEXT:    callq __powidf2
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    movl $3, %edi
-; HAS-FMA-NEXT:    callq __powidf2
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    movl $3, %edi
-; HAS-FMA-NEXT:    vzeroupper
-; HAS-FMA-NEXT:    callq __powidf2
-; HAS-FMA-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    addq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_powi_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movl $3, %edi
+; CHECK-NEXT:    callq __powidf2
+; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movl $3, %edi
+; CHECK-NEXT:    callq __powidf2
+; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movl $3, %edi
+; CHECK-NEXT:    callq __powidf2
+; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT:    # xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT:    # xmm1 = mem[0],zero
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %powi = call <3 x double> @llvm.experimental.constrained.powi.v3f64(
                           <3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -1694,62 +875,34 @@ entry:
 }
 
 define <4 x double> @constrained_vector_powi_v4f64() {
-; NO-FMA-LABEL: constrained_vector_powi_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movl $3, %edi
-; NO-FMA-NEXT:    callq __powidf2
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movl $3, %edi
-; NO-FMA-NEXT:    callq __powidf2
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movl $3, %edi
-; NO-FMA-NEXT:    callq __powidf2
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movl $3, %edi
-; NO-FMA-NEXT:    callq __powidf2
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_powi_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    movl $3, %edi
-; HAS-FMA-NEXT:    callq __powidf2
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    movl $3, %edi
-; HAS-FMA-NEXT:    callq __powidf2
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    movl $3, %edi
-; HAS-FMA-NEXT:    callq __powidf2
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    movl $3, %edi
-; HAS-FMA-NEXT:    callq __powidf2
-; HAS-FMA-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_powi_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movl $3, %edi
+; CHECK-NEXT:    callq __powidf2
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movl $3, %edi
+; CHECK-NEXT:    callq __powidf2
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movl $3, %edi
+; CHECK-NEXT:    callq __powidf2
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movl $3, %edi
+; CHECK-NEXT:    callq __powidf2
+; CHECK-NEXT:    movaps %xmm0, %xmm1
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %powi = call <4 x double> @llvm.experimental.constrained.powi.v4f64(
                               <4 x double> <double 42.1, double 42.2,
@@ -1761,25 +914,15 @@ entry:
 }
 
 define <1 x float> @constrained_vector_sin_v1f32() {
-; NO-FMA-LABEL: constrained_vector_sin_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq sinf
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_sin_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    pushq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 16
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq sinf
-; HAS-FMA-NEXT:    popq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_sin_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq sinf
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %sin = call <1 x float> @llvm.experimental.constrained.sin.v1f32(
                              <1 x float> <float 42.0>,
@@ -1789,35 +932,20 @@ entry:
 }
 
 define <2 x double> @constrained_vector_sin_v2f64() {
-; NO-FMA-LABEL: constrained_vector_sin_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq sin
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq sin
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_sin_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 32
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq sin
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq sin
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    addq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_sin_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq sin
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq sin
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %sin = call <2 x double> @llvm.experimental.constrained.sin.v2f64(
                              <2 x double> <double 42.0, double 42.1>,
@@ -1827,46 +955,26 @@ entry:
 }
 
 define <3 x float> @constrained_vector_sin_v3f32() {
-; NO-FMA-LABEL: constrained_vector_sin_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq sinf
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq sinf
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq sinf
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_sin_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq sinf
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq sinf
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq sinf
-; HAS-FMA-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_sin_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq sinf
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq sinf
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq sinf
+; CHECK-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %sin = call <3 x float> @llvm.experimental.constrained.sin.v3f32(
                               <3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -1876,48 +984,27 @@ entry:
 }
 
 define <3 x double> @constrained_vector_sin_v3f64() {
-; NO-FMA-LABEL: constrained_vector_sin_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq sin
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq sin
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq sin
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_sin_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 64
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq sin
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq sin
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vzeroupper
-; HAS-FMA-NEXT:    callq sin
-; HAS-FMA-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    addq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_sin_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq sin
+; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq sin
+; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq sin
+; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT:    # xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT:    # xmm1 = mem[0],zero
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %sin = call <3 x double> @llvm.experimental.constrained.sin.v3f64(
                           <3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -1927,54 +1014,30 @@ entry:
 }
 
 define <4 x double> @constrained_vector_sin_v4f64() {
-; NO-FMA-LABEL: constrained_vector_sin_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq sin
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq sin
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq sin
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq sin
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_sin_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq sin
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq sin
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq sin
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq sin
-; HAS-FMA-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_sin_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq sin
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq sin
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq sin
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq sin
+; CHECK-NEXT:    movaps %xmm0, %xmm1
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %sin = call <4 x double> @llvm.experimental.constrained.sin.v4f64(
                              <4 x double> <double 42.0, double 42.1,
@@ -1985,25 +1048,15 @@ entry:
 }
 
 define <1 x float> @constrained_vector_cos_v1f32() {
-; NO-FMA-LABEL: constrained_vector_cos_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq cosf
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_cos_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    pushq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 16
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq cosf
-; HAS-FMA-NEXT:    popq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_cos_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq cosf
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %cos = call <1 x float> @llvm.experimental.constrained.cos.v1f32(
                              <1 x float> <float 42.0>,
@@ -2013,35 +1066,20 @@ entry:
 }
 
 define <2 x double> @constrained_vector_cos_v2f64() {
-; NO-FMA-LABEL: constrained_vector_cos_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq cos
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq cos
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_cos_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 32
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq cos
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq cos
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    addq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_cos_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq cos
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq cos
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %cos = call <2 x double> @llvm.experimental.constrained.cos.v2f64(
                              <2 x double> <double 42.0, double 42.1>,
@@ -2051,46 +1089,26 @@ entry:
 }
 
 define <3 x float> @constrained_vector_cos_v3f32() {
-; NO-FMA-LABEL: constrained_vector_cos_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq cosf
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq cosf
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq cosf
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_cos_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq cosf
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq cosf
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq cosf
-; HAS-FMA-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_cos_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq cosf
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq cosf
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq cosf
+; CHECK-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %cos = call <3 x float> @llvm.experimental.constrained.cos.v3f32(
                               <3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -2100,48 +1118,27 @@ entry:
 }
 
 define <3 x double> @constrained_vector_cos_v3f64() {
-; NO-FMA-LABEL: constrained_vector_cos_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq cos
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq cos
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq cos
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_cos_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 64
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq cos
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq cos
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vzeroupper
-; HAS-FMA-NEXT:    callq cos
-; HAS-FMA-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    addq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_cos_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq cos
+; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq cos
+; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq cos
+; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT:    # xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT:    # xmm1 = mem[0],zero
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %cos = call <3 x double> @llvm.experimental.constrained.cos.v3f64(
                           <3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -2151,54 +1148,30 @@ entry:
 }
 
 define <4 x double> @constrained_vector_cos_v4f64() {
-; NO-FMA-LABEL: constrained_vector_cos_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq cos
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq cos
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq cos
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq cos
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_cos_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq cos
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq cos
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq cos
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq cos
-; HAS-FMA-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_cos_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq cos
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq cos
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq cos
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq cos
+; CHECK-NEXT:    movaps %xmm0, %xmm1
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %cos = call <4 x double> @llvm.experimental.constrained.cos.v4f64(
                              <4 x double> <double 42.0, double 42.1,
@@ -2209,25 +1182,15 @@ entry:
 }
 
 define <1 x float> @constrained_vector_exp_v1f32() {
-; NO-FMA-LABEL: constrained_vector_exp_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq expf
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_exp_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    pushq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 16
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq expf
-; HAS-FMA-NEXT:    popq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_exp_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq expf
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %exp = call <1 x float> @llvm.experimental.constrained.exp.v1f32(
                              <1 x float> <float 42.0>,
@@ -2237,35 +1200,20 @@ entry:
 }
 
 define <2 x double> @constrained_vector_exp_v2f64() {
-; NO-FMA-LABEL: constrained_vector_exp_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_exp_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 32
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    addq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_exp_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %exp = call <2 x double> @llvm.experimental.constrained.exp.v2f64(
                              <2 x double> <double 42.0, double 42.1>,
@@ -2275,46 +1223,26 @@ entry:
 }
 
 define <3 x float> @constrained_vector_exp_v3f32() {
-; NO-FMA-LABEL: constrained_vector_exp_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq expf
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq expf
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq expf
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_exp_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq expf
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq expf
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq expf
-; HAS-FMA-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_exp_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq expf
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq expf
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq expf
+; CHECK-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %exp = call <3 x float> @llvm.experimental.constrained.exp.v3f32(
                               <3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -2324,48 +1252,27 @@ entry:
 }
 
 define <3 x double> @constrained_vector_exp_v3f64() {
-; NO-FMA-LABEL: constrained_vector_exp_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_exp_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 64
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vzeroupper
-; HAS-FMA-NEXT:    callq exp
-; HAS-FMA-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    addq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_exp_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp
+; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp
+; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp
+; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT:    # xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT:    # xmm1 = mem[0],zero
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %exp = call <3 x double> @llvm.experimental.constrained.exp.v3f64(
                           <3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -2375,54 +1282,30 @@ entry:
 }
 
 define <4 x double> @constrained_vector_exp_v4f64() {
-; NO-FMA-LABEL: constrained_vector_exp_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_exp_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp
-; HAS-FMA-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_exp_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp
+; CHECK-NEXT:    movaps %xmm0, %xmm1
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %exp = call <4 x double> @llvm.experimental.constrained.exp.v4f64(
                              <4 x double> <double 42.0, double 42.1,
@@ -2433,25 +1316,15 @@ entry:
 }
 
 define <1 x float> @constrained_vector_exp2_v1f32() {
-; NO-FMA-LABEL: constrained_vector_exp2_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq exp2f
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_exp2_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    pushq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 16
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq exp2f
-; HAS-FMA-NEXT:    popq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_exp2_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq exp2f
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %exp2 = call <1 x float> @llvm.experimental.constrained.exp2.v1f32(
                              <1 x float> <float 42.0>,
@@ -2461,35 +1334,20 @@ entry:
 }
 
 define <2 x double> @constrained_vector_exp2_v2f64() {
-; NO-FMA-LABEL: constrained_vector_exp2_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp2
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp2
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_exp2_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 32
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp2
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp2
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    addq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_exp2_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp2
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp2
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %exp2 = call <2 x double> @llvm.experimental.constrained.exp2.v2f64(
                               <2 x double> <double 42.1, double 42.0>,
@@ -2499,46 +1357,26 @@ entry:
 }
 
 define <3 x float> @constrained_vector_exp2_v3f32() {
-; NO-FMA-LABEL: constrained_vector_exp2_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq exp2f
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq exp2f
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq exp2f
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_exp2_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq exp2f
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq exp2f
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq exp2f
-; HAS-FMA-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_exp2_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq exp2f
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq exp2f
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq exp2f
+; CHECK-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %exp2 = call <3 x float> @llvm.experimental.constrained.exp2.v3f32(
                               <3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -2548,48 +1386,27 @@ entry:
 }
 
 define <3 x double> @constrained_vector_exp2_v3f64() {
-; NO-FMA-LABEL: constrained_vector_exp2_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp2
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp2
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp2
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_exp2_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 64
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp2
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp2
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vzeroupper
-; HAS-FMA-NEXT:    callq exp2
-; HAS-FMA-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    addq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_exp2_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp2
+; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp2
+; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp2
+; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT:    # xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT:    # xmm1 = mem[0],zero
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %exp2 = call <3 x double> @llvm.experimental.constrained.exp2.v3f64(
                           <3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -2599,54 +1416,30 @@ entry:
 }
 
 define <4 x double> @constrained_vector_exp2_v4f64() {
-; NO-FMA-LABEL: constrained_vector_exp2_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp2
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp2
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp2
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq exp2
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_exp2_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp2
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp2
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp2
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq exp2
-; HAS-FMA-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_exp2_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp2
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp2
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp2
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq exp2
+; CHECK-NEXT:    movaps %xmm0, %xmm1
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %exp2 = call <4 x double> @llvm.experimental.constrained.exp2.v4f64(
                               <4 x double> <double 42.1, double 42.2,
@@ -2657,25 +1450,15 @@ entry:
 }
 
 define <1 x float> @constrained_vector_log_v1f32() {
-; NO-FMA-LABEL: constrained_vector_log_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq logf
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    pushq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 16
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq logf
-; HAS-FMA-NEXT:    popq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq logf
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log = call <1 x float> @llvm.experimental.constrained.log.v1f32(
                              <1 x float> <float 42.0>,
@@ -2685,35 +1468,20 @@ entry:
 }
 
 define <2 x double> @constrained_vector_log_v2f64() {
-; NO-FMA-LABEL: constrained_vector_log_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 32
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    addq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log = call <2 x double> @llvm.experimental.constrained.log.v2f64(
                              <2 x double> <double 42.0, double 42.1>,
@@ -2723,46 +1491,26 @@ entry:
 }
 
 define <3 x float> @constrained_vector_log_v3f32() {
-; NO-FMA-LABEL: constrained_vector_log_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq logf
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq logf
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq logf
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq logf
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq logf
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq logf
-; HAS-FMA-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq logf
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq logf
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq logf
+; CHECK-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log = call <3 x float> @llvm.experimental.constrained.log.v3f32(
                               <3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -2772,48 +1520,27 @@ entry:
 }
 
 define <3 x double> @constrained_vector_log_v3f64() {
-; NO-FMA-LABEL: constrained_vector_log_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 64
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vzeroupper
-; HAS-FMA-NEXT:    callq log
-; HAS-FMA-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    addq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log
+; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log
+; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log
+; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT:    # xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT:    # xmm1 = mem[0],zero
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log = call <3 x double> @llvm.experimental.constrained.log.v3f64(
                           <3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -2823,54 +1550,30 @@ entry:
 }
 
 define <4 x double> @constrained_vector_log_v4f64() {
-; NO-FMA-LABEL: constrained_vector_log_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log
-; HAS-FMA-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log
+; CHECK-NEXT:    movaps %xmm0, %xmm1
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log = call <4 x double> @llvm.experimental.constrained.log.v4f64(
                              <4 x double> <double 42.0, double 42.1,
@@ -2881,25 +1584,15 @@ entry:
 }
 
 define <1 x float> @constrained_vector_log10_v1f32() {
-; NO-FMA-LABEL: constrained_vector_log10_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq log10f
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log10_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    pushq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 16
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq log10f
-; HAS-FMA-NEXT:    popq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log10_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq log10f
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log10 = call <1 x float> @llvm.experimental.constrained.log10.v1f32(
                              <1 x float> <float 42.0>,
@@ -2909,35 +1602,20 @@ entry:
 }
 
 define <2 x double> @constrained_vector_log10_v2f64() {
-; NO-FMA-LABEL: constrained_vector_log10_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log10
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log10
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log10_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 32
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log10
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log10
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    addq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log10_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log10
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log10
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log10 = call <2 x double> @llvm.experimental.constrained.log10.v2f64(
                                <2 x double> <double 42.0, double 42.1>,
@@ -2947,46 +1625,26 @@ entry:
 }
 
 define <3 x float> @constrained_vector_log10_v3f32() {
-; NO-FMA-LABEL: constrained_vector_log10_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq log10f
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq log10f
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq log10f
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log10_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq log10f
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq log10f
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq log10f
-; HAS-FMA-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log10_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq log10f
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq log10f
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq log10f
+; CHECK-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log10 = call <3 x float> @llvm.experimental.constrained.log10.v3f32(
                               <3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -2996,48 +1654,27 @@ entry:
 }
 
 define <3 x double> @constrained_vector_log10_v3f64() {
-; NO-FMA-LABEL: constrained_vector_log10_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log10
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log10
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log10
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log10_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 64
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log10
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log10
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vzeroupper
-; HAS-FMA-NEXT:    callq log10
-; HAS-FMA-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    addq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log10_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log10
+; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log10
+; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log10
+; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT:    # xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT:    # xmm1 = mem[0],zero
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log10 = call <3 x double> @llvm.experimental.constrained.log10.v3f64(
                           <3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -3047,54 +1684,30 @@ entry:
 }
 
 define <4 x double> @constrained_vector_log10_v4f64() {
-; NO-FMA-LABEL: constrained_vector_log10_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log10
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log10
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log10
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log10
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log10_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log10
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log10
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log10
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log10
-; HAS-FMA-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log10_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log10
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log10
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log10
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log10
+; CHECK-NEXT:    movaps %xmm0, %xmm1
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log10 = call <4 x double> @llvm.experimental.constrained.log10.v4f64(
                                <4 x double> <double 42.0, double 42.1,
@@ -3105,25 +1718,15 @@ entry:
 }
 
 define <1 x float> @constrained_vector_log2_v1f32() {
-; NO-FMA-LABEL: constrained_vector_log2_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq log2f
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log2_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    pushq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 16
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq log2f
-; HAS-FMA-NEXT:    popq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log2_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq log2f
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log2 = call <1 x float> @llvm.experimental.constrained.log2.v1f32(
                              <1 x float> <float 42.0>,
@@ -3133,35 +1736,20 @@ entry:
 }
 
 define <2 x double> @constrained_vector_log2_v2f64() {
-; NO-FMA-LABEL: constrained_vector_log2_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log2
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log2
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log2_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 32
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log2
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log2
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    addq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log2_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log2
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log2
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log2 = call <2 x double> @llvm.experimental.constrained.log2.v2f64(
                               <2 x double> <double 42.0, double 42.1>,
@@ -3171,46 +1759,26 @@ entry:
 }
 
 define <3 x float> @constrained_vector_log2_v3f32() {
-; NO-FMA-LABEL: constrained_vector_log2_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq log2f
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq log2f
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq log2f
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log2_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq log2f
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq log2f
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq log2f
-; HAS-FMA-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log2_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq log2f
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq log2f
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq log2f
+; CHECK-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log2 = call <3 x float> @llvm.experimental.constrained.log2.v3f32(
                               <3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -3220,48 +1788,27 @@ entry:
 }
 
 define <3 x double> @constrained_vector_log2_v3f64() {
-; NO-FMA-LABEL: constrained_vector_log2_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log2
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log2
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log2
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log2_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 64
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log2
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log2
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vzeroupper
-; HAS-FMA-NEXT:    callq log2
-; HAS-FMA-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    addq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log2_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log2
+; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log2
+; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log2
+; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT:    # xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT:    # xmm1 = mem[0],zero
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log2 = call <3 x double> @llvm.experimental.constrained.log2.v3f64(
                           <3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -3271,54 +1818,30 @@ entry:
 }
 
 define <4 x double> @constrained_vector_log2_v4f64() {
-; NO-FMA-LABEL: constrained_vector_log2_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log2
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log2
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log2
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq log2
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_log2_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log2
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log2
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log2
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    callq log2
-; HAS-FMA-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_log2_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log2
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log2
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log2
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq log2
+; CHECK-NEXT:    movaps %xmm0, %xmm1
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %log2 = call <4 x double> @llvm.experimental.constrained.log2.v4f64(
                               <4 x double> <double 42.0, double 42.1,
@@ -3329,21 +1852,15 @@ entry:
 }
 
 define <1 x float> @constrained_vector_rint_v1f32() {
-; NO-FMA-LABEL: constrained_vector_rint_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq rintf
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_rint_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vroundss $4, %xmm0, %xmm0, %xmm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_rint_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq rintf
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %rint = call <1 x float> @llvm.experimental.constrained.rint.v1f32(
                              <1 x float> <float 42.0>,
@@ -3353,25 +1870,20 @@ entry:
 }
 
 define <2 x double> @constrained_vector_rint_v2f64() {
-; NO-FMA-LABEL: constrained_vector_rint_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq rint
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq rint
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_rint_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vroundpd $4, {{.*}}(%rip), %xmm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_rint_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq rint
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq rint
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %rint = call <2 x double> @llvm.experimental.constrained.rint.v2f64(
                         <2 x double> <double 42.1, double 42.0>,
@@ -3381,39 +1893,27 @@ entry:
 }
 
 define <3 x float> @constrained_vector_rint_v3f32() {
-; NO-FMA-LABEL: constrained_vector_rint_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq rintf
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq rintf
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq rintf
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_rint_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vroundss $4, %xmm0, %xmm0, %xmm0
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vroundss $4, %xmm1, %xmm1, %xmm1
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vroundss $4, %xmm2, %xmm2, %xmm2
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; HAS-FMA-NEXT:    retq
-entry:
+; CHECK-LABEL: constrained_vector_rint_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq rintf
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq rintf
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq rintf
+; CHECK-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
+ entry:
   %rint = call <3 x float> @llvm.experimental.constrained.rint.v3f32(
                               <3 x float> <float 42.0, float 43.0, float 44.0>,
                               metadata !"round.dynamic",
@@ -3422,35 +1922,27 @@ entry:
 }
 
 define <3 x double> @constrained_vector_rint_v3f64() {
-; NO-FMA-LABEL: constrained_vector_rint_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq rint
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq rint
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq rint
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_rint_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vroundsd $4, %xmm0, %xmm0, %xmm0
-; HAS-FMA-NEXT:    vroundpd $4, {{.*}}(%rip), %xmm1
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_rint_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq rint
+; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq rint
+; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq rint
+; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT:    # xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT:    # xmm1 = mem[0],zero
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %rint = call <3 x double> @llvm.experimental.constrained.rint.v3f64(
                           <3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -3460,35 +1952,30 @@ entry:
 }
 
 define <4 x double> @constrained_vector_rint_v4f64() {
-; NO-FMA-LABEL: constrained_vector_rint_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq rint
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq rint
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq rint
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq rint
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_rint_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vroundpd $4, {{.*}}(%rip), %ymm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_rint_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq rint
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq rint
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq rint
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq rint
+; CHECK-NEXT:    movaps %xmm0, %xmm1
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %rint = call <4 x double> @llvm.experimental.constrained.rint.v4f64(
                         <4 x double> <double 42.1, double 42.2,
@@ -3499,21 +1986,15 @@ entry:
 }
 
 define <1 x float> @constrained_vector_nearbyint_v1f32() {
-; NO-FMA-LABEL: constrained_vector_nearbyint_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq nearbyintf
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_nearbyint_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vroundss $12, %xmm0, %xmm0, %xmm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_nearbyint_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq nearbyintf
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %nearby = call <1 x float> @llvm.experimental.constrained.nearbyint.v1f32(
                                <1 x float> <float 42.0>,
@@ -3523,25 +2004,20 @@ entry:
 }
 
 define <2 x double> @constrained_vector_nearbyint_v2f64() {
-; NO-FMA-LABEL: constrained_vector_nearbyint_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq nearbyint
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq nearbyint
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_nearbyint_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vroundpd $12, {{.*}}(%rip), %xmm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_nearbyint_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq nearbyint
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq nearbyint
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %nearby = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(
                                 <2 x double> <double 42.1, double 42.0>,
@@ -3551,38 +2027,26 @@ entry:
 }
 
 define <3 x float> @constrained_vector_nearbyint_v3f32() {
-; NO-FMA-LABEL: constrained_vector_nearbyint_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq nearbyintf
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq nearbyintf
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq nearbyintf
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_nearbyint_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vroundss $12, %xmm0, %xmm0, %xmm0
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vroundss $12, %xmm1, %xmm1, %xmm1
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vroundss $12, %xmm2, %xmm2, %xmm2
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_nearbyint_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq nearbyintf
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq nearbyintf
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq nearbyintf
+; CHECK-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %nearby = call <3 x float> @llvm.experimental.constrained.nearbyint.v3f32(
                               <3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -3592,35 +2056,27 @@ entry:
 }
 
 define <3 x double> @constrained_vector_nearby_v3f64() {
-; NO-FMA-LABEL: constrained_vector_nearby_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq nearbyint
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq nearbyint
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq nearbyint
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_nearby_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vroundsd $12, %xmm0, %xmm0, %xmm0
-; HAS-FMA-NEXT:    vroundpd $12, {{.*}}(%rip), %xmm1
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_nearby_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq nearbyint
+; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq nearbyint
+; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq nearbyint
+; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT:    # xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT:    # xmm1 = mem[0],zero
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %nearby = call <3 x double> @llvm.experimental.constrained.nearbyint.v3f64(
                           <3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -3630,35 +2086,30 @@ entry:
 }
 
 define <4 x double> @constrained_vector_nearbyint_v4f64() {
-; NO-FMA-LABEL: constrained_vector_nearbyint_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq nearbyint
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq nearbyint
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq nearbyint
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    callq nearbyint
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_nearbyint_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    vroundpd $12, {{.*}}(%rip), %ymm0
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_nearbyint_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq nearbyint
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq nearbyint
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq nearbyint
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    callq nearbyint
+; CHECK-NEXT:    movaps %xmm0, %xmm1
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %nearby = call <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(
                                 <4 x double> <double 42.1, double 42.2,
@@ -3669,27 +2120,16 @@ entry:
 }
 
 define <1 x float> @constrained_vector_maxnum_v1f32() {
-; NO-FMA-LABEL: constrained_vector_maxnum_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaxf
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_maxnum_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    pushq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 16
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq fmaxf
-; HAS-FMA-NEXT:    popq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_maxnum_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq fmaxf
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %max = call <1 x float> @llvm.experimental.constrained.maxnum.v1f32(
                                <1 x float> <float 42.0>, <1 x float> <float 41.0>,
@@ -3699,39 +2139,22 @@ entry:
 }
 
 define <2 x double> @constrained_vector_maxnum_v2f64() {
-; NO-FMA-LABEL: constrained_vector_maxnum_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmax
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmax
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_maxnum_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 32
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmax
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmax
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    addq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_maxnum_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmax
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmax
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %max = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(
                                 <2 x double> <double 43.0, double 42.0>,
@@ -3742,52 +2165,29 @@ entry:
 }
 
 define <3 x float> @constrained_vector_maxnum_v3f32() {
-; NO-FMA-LABEL: constrained_vector_maxnum_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaxf
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaxf
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fmaxf
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_maxnum_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq fmaxf
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq fmaxf
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq fmaxf
-; HAS-FMA-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_maxnum_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq fmaxf
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq fmaxf
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq fmaxf
+; CHECK-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %max = call <3 x float> @llvm.experimental.constrained.maxnum.v3f32(
                               <3 x float> <float 43.0, float 44.0, float 45.0>,
@@ -3798,54 +2198,30 @@ entry:
 }
 
 define <3 x double> @constrained_vector_max_v3f64() {
-; NO-FMA-LABEL: constrained_vector_max_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmax
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmax
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmax
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_max_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 64
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmax
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmax
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    vzeroupper
-; HAS-FMA-NEXT:    callq fmax
-; HAS-FMA-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    addq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_max_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmax
+; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmax
+; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmax
+; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT:    # xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT:    # xmm1 = mem[0],zero
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %max = call <3 x double> @llvm.experimental.constrained.maxnum.v3f64(
                           <3 x double> <double 43.0, double 44.0, double 45.0>,
@@ -3856,62 +2232,34 @@ entry:
 }
 
 define <4 x double> @constrained_vector_maxnum_v4f64() {
-; NO-FMA-LABEL: constrained_vector_maxnum_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmax
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmax
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmax
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmax
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_maxnum_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmax
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmax
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmax
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmax
-; HAS-FMA-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_maxnum_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmax
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmax
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmax
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmax
+; CHECK-NEXT:    movaps %xmm0, %xmm1
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %max = call <4 x double> @llvm.experimental.constrained.maxnum.v4f64(
                                 <4 x double> <double 44.0, double 45.0,
@@ -3924,28 +2272,17 @@ entry:
 }
 
 define <1 x float> @constrained_vector_minnum_v1f32() {
-; NO-FMA-LABEL: constrained_vector_minnum_v1f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    pushq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 16
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fminf
-; NO-FMA-NEXT:    popq %rax
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_minnum_v1f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    pushq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 16
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq fminf
-; HAS-FMA-NEXT:    popq %rax
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
-entry:
+; CHECK-LABEL: constrained_vector_minnum_v1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq fminf
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
+ entry:
   %min = call <1 x float> @llvm.experimental.constrained.minnum.v1f32(
                                <1 x float> <float 42.0>, <1 x float> <float 41.0>,
                                metadata !"round.dynamic",
@@ -3954,39 +2291,22 @@ entry:
 }
 
 define <2 x double> @constrained_vector_minnum_v2f64() {
-; NO-FMA-LABEL: constrained_vector_minnum_v2f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmin
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmin
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_minnum_v2f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 32
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmin
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmin
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    addq $24, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_minnum_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmin
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmin
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %min = call <2 x double> @llvm.experimental.constrained.minnum.v2f64(
                                 <2 x double> <double 43.0, double 42.0>,
@@ -3997,52 +2317,29 @@ entry:
 }
 
 define <3 x float> @constrained_vector_minnum_v3f32() {
-; NO-FMA-LABEL: constrained_vector_minnum_v3f32:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fminf
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fminf
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; NO-FMA-NEXT:    callq fminf
-; NO-FMA-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
-; NO-FMA-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm1, %xmm0
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_minnum_v3f32:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq fminf
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq fminf
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; HAS-FMA-NEXT:    callq fminf
-; HAS-FMA-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; HAS-FMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
-; HAS-FMA-NEXT:    vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_minnum_v3f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq fminf
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq fminf
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT:    callq fminf
+; CHECK-NEXT:    movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps %xmm1, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %min = call <3 x float> @llvm.experimental.constrained.minnum.v3f32(
                               <3 x float> <float 43.0, float 44.0, float 45.0>,
@@ -4052,56 +2349,33 @@ entry:
   ret <3 x float> %min
 }
 
-define <3 x double> @constrained_vector_min_v3f64() {entry:
-; NO-FMA-LABEL: constrained_vector_min_v3f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 32
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmin
-; NO-FMA-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmin
-; NO-FMA-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmin
-; NO-FMA-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    fldl {{[0-9]+}}(%rsp)
-; NO-FMA-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
-; NO-FMA-NEXT:    # xmm1 = mem[0],zero
-; NO-FMA-NEXT:    addq $24, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_min_v3f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 64
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmin
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmin
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    vzeroupper
-; HAS-FMA-NEXT:    callq fmin
-; HAS-FMA-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; HAS-FMA-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; HAS-FMA-NEXT:    addq $56, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
-  %min = call <3 x double> @llvm.experimental.constrained.minnum.v3f64(
+define <3 x double> @constrained_vector_min_v3f64() {
+; CHECK-LABEL: constrained_vector_min_v3f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmin
+; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmin
+; CHECK-NEXT:    movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmin
+; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
+; CHECK-NEXT:    # xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd (%rsp), %xmm1 # 8-byte Reload
+; CHECK-NEXT:    # xmm1 = mem[0],zero
+; CHECK-NEXT:    addq $24, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
+entry:
+ %min = call <3 x double> @llvm.experimental.constrained.minnum.v3f64(
                           <3 x double> <double 43.0, double 44.0, double 45.0>,
                           <3 x double> <double 40.0, double 41.0, double 42.0>,
                           metadata !"round.dynamic",
@@ -4110,62 +2384,34 @@ define <3 x double> @constrained_vector_
 }
 
 define <4 x double> @constrained_vector_minnum_v4f64() {
-; NO-FMA-LABEL: constrained_vector_minnum_v4f64:
-; NO-FMA:       # %bb.0: # %entry
-; NO-FMA-NEXT:    subq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 48
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmin
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmin
-; NO-FMA-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; NO-FMA-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmin
-; NO-FMA-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; NO-FMA-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; NO-FMA-NEXT:    callq fmin
-; NO-FMA-NEXT:    movaps %xmm0, %xmm1
-; NO-FMA-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; NO-FMA-NEXT:    # xmm1 = xmm1[0],mem[0]
-; NO-FMA-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
-; NO-FMA-NEXT:    addq $40, %rsp
-; NO-FMA-NEXT:    .cfi_def_cfa_offset 8
-; NO-FMA-NEXT:    retq
-;
-; HAS-FMA-LABEL: constrained_vector_minnum_v4f64:
-; HAS-FMA:       # %bb.0: # %entry
-; HAS-FMA-NEXT:    subq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 48
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmin
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmin
-; HAS-FMA-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmin
-; HAS-FMA-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; HAS-FMA-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; HAS-FMA-NEXT:    callq fmin
-; HAS-FMA-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    # xmm0 = xmm0[0],mem[0]
-; HAS-FMA-NEXT:    vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
-; HAS-FMA-NEXT:    addq $40, %rsp
-; HAS-FMA-NEXT:    .cfi_def_cfa_offset 8
-; HAS-FMA-NEXT:    retq
+; CHECK-LABEL: constrained_vector_minnum_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmin
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmin
+; CHECK-NEXT:    unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmin
+; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    callq fmin
+; CHECK-NEXT:    movaps %xmm0, %xmm1
+; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %min = call <4 x double> @llvm.experimental.constrained.minnum.v4f64(
                                 <4 x double> <double 44.0, double 45.0,
@@ -4183,8 +2429,6 @@ declare <2 x double> @llvm.experimental.
 declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2 x double>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.frem.v2f64(<2 x double>, <2 x double>, metadata, metadata)
-declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
-declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.pow.v2f64(<2 x double>, <2 x double>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.powi.v2f64(<2 x double>, i32, metadata, metadata)
@@ -4206,7 +2450,6 @@ declare <1 x float> @llvm.experimental.c
 declare <1 x float> @llvm.experimental.constrained.fmul.v1f32(<1 x float>, <1 x float>, metadata, metadata)
 declare <1 x float> @llvm.experimental.constrained.fdiv.v1f32(<1 x float>, <1 x float>, metadata, metadata)
 declare <1 x float> @llvm.experimental.constrained.frem.v1f32(<1 x float>, <1 x float>, metadata, metadata)
-declare <1 x float> @llvm.experimental.constrained.fma.v1f32(<1 x float>, <1 x float>, <1 x float>, metadata, metadata)
 declare <1 x float> @llvm.experimental.constrained.sqrt.v1f32(<1 x float>, metadata, metadata)
 declare <1 x float> @llvm.experimental.constrained.pow.v1f32(<1 x float>, <1 x float>, metadata, metadata)
 declare <1 x float> @llvm.experimental.constrained.powi.v1f32(<1 x float>, i32, metadata, metadata)
@@ -4233,8 +2476,6 @@ declare <3 x float> @llvm.experimental.c
 declare <3 x double> @llvm.experimental.constrained.fdiv.v3f64(<3 x double>, <3 x double>, metadata, metadata)
 declare <3 x float> @llvm.experimental.constrained.frem.v3f32(<3 x float>, <3 x float>, metadata, metadata)
 declare <3 x double> @llvm.experimental.constrained.frem.v3f64(<3 x double>, <3 x double>, metadata, metadata)
-declare <3 x float> @llvm.experimental.constrained.fma.v3f32(<3 x float>, <3 x float>, <3 x float>, metadata, metadata)
-declare <3 x double> @llvm.experimental.constrained.fma.v3f64(<3 x double>, <3 x double>, <3 x double>, metadata, metadata)
 declare <3 x float> @llvm.experimental.constrained.sqrt.v3f32(<3 x float>, metadata, metadata)
 declare <3 x double> @llvm.experimental.constrained.sqrt.v3f64(<3 x double>, metadata, metadata)
 declare <3 x float> @llvm.experimental.constrained.pow.v3f32(<3 x float>, <3 x float>, metadata, metadata)
@@ -4270,8 +2511,6 @@ declare <4 x double> @llvm.experimental.
 declare <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double>, <4 x double>, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double>, <4 x double>, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.frem.v4f64(<4 x double>, <4 x double>, metadata, metadata)
-declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata)
-declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.sqrt.v4f64(<4 x double>, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.pow.v4f64(<4 x double>, <4 x double>, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.powi.v4f64(<4 x double>, i32, metadata, metadata)




More information about the llvm-commits mailing list