[llvm] r327225 - [X86] Add comments to the end of FMA3 instructions to make the operation clear

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 10 13:30:46 PST 2018


Author: ctopper
Date: Sat Mar 10 13:30:46 2018
New Revision: 327225

URL: http://llvm.org/viewvc/llvm-project?rev=327225&view=rev
Log:
[X86] Add comments to the end of FMA3 instructions to make the operation clear

Summary:
There are 3 different operand orders for FMA instructions so figuring out the exact operation being performed requires a lot of thought.

This patch adds a comment to the end of the assembly line to print the exact operation.

I think I've got all the instructions in here except the ones with builtin rounding.

I didn't update all tests, but I assume we can get them as we regenerate tests in the future.

Reviewers: spatel, v_klochkov, RKSimon

Reviewed By: spatel

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D44345

Modified:
    llvm/trunk/lib/Target/X86/InstPrinter/X86InstComments.cpp
    llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/fma-commute-x86.ll
    llvm/trunk/test/CodeGen/X86/fma-intrinsics-x86.ll
    llvm/trunk/test/CodeGen/X86/fma-schedule.ll
    llvm/trunk/test/CodeGen/X86/fma.ll
    llvm/trunk/test/CodeGen/X86/recip-fastmath.ll
    llvm/trunk/test/CodeGen/X86/recip-fastmath2.ll

Modified: llvm/trunk/lib/Target/X86/InstPrinter/X86InstComments.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/InstPrinter/X86InstComments.cpp?rev=327225&r1=327224&r2=327225&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/InstPrinter/X86InstComments.cpp (original)
+++ llvm/trunk/lib/Target/X86/InstPrinter/X86InstComments.cpp Sat Mar 10 13:30:46 2018
@@ -160,6 +160,46 @@ using namespace llvm;
   CASE_MASKZ_INS_COMMON(SHUFF##Inst, Z256, r##src##i) \
   CASE_MASKZ_INS_COMMON(SHUFI##Inst, Z256, r##src##i)
 
+#define CASE_AVX512_FMA(Inst, suf)                \
+  CASE_AVX512_INS_COMMON(Inst, Z, suf)            \
+  CASE_AVX512_INS_COMMON(Inst, Z256, suf)         \
+  CASE_AVX512_INS_COMMON(Inst, Z128, suf)
+
+#define CASE_FMA(Inst, suf)                       \
+  CASE_AVX512_FMA(Inst, suf)                      \
+  CASE_AVX_INS_COMMON(Inst, , suf)                \
+  CASE_AVX_INS_COMMON(Inst, Y, suf)
+
+#define CASE_FMA_PACKED_REG(Inst)                 \
+  CASE_FMA(Inst##PD, r)                           \
+  CASE_FMA(Inst##PS, r)
+
+#define CASE_FMA_PACKED_MEM(Inst)                 \
+  CASE_FMA(Inst##PD, m)                           \
+  CASE_FMA(Inst##PS, m)                           \
+  CASE_AVX512_FMA(Inst##PD, mb)                   \
+  CASE_AVX512_FMA(Inst##PS, mb)
+
+#define CASE_FMA_SCALAR_REG(Inst)                 \
+  CASE_AVX_INS_COMMON(Inst##SD, , r)              \
+  CASE_AVX_INS_COMMON(Inst##SS, , r)              \
+  CASE_AVX_INS_COMMON(Inst##SD, , r_Int)          \
+  CASE_AVX_INS_COMMON(Inst##SS, , r_Int)          \
+  CASE_AVX_INS_COMMON(Inst##SD, Z, r)             \
+  CASE_AVX_INS_COMMON(Inst##SS, Z, r)             \
+  CASE_AVX512_INS_COMMON(Inst##SD, Z, r_Int)      \
+  CASE_AVX512_INS_COMMON(Inst##SS, Z, r_Int)
+
+#define CASE_FMA_SCALAR_MEM(Inst)                 \
+  CASE_AVX_INS_COMMON(Inst##SD, , m)              \
+  CASE_AVX_INS_COMMON(Inst##SS, , m)              \
+  CASE_AVX_INS_COMMON(Inst##SD, , m_Int)          \
+  CASE_AVX_INS_COMMON(Inst##SS, , m_Int)          \
+  CASE_AVX_INS_COMMON(Inst##SD, Z, m)             \
+  CASE_AVX_INS_COMMON(Inst##SS, Z, m)             \
+  CASE_AVX512_INS_COMMON(Inst##SD, Z, m_Int)      \
+  CASE_AVX512_INS_COMMON(Inst##SS, Z, m_Int)
+
 static unsigned getVectorRegSize(unsigned RegNo) {
   if (X86::ZMM0 <= RegNo && RegNo <= X86::ZMM31)
     return 512;
@@ -232,6 +272,249 @@ static void printMasking(raw_ostream &OS
     OS << " {z}";
 }
 
+static bool printFMA3Comments(const MCInst *MI, raw_ostream &OS,
+                              const char *(*getRegName)(unsigned)) {
+  const char *Mul1Name = nullptr, *Mul2Name = nullptr, *AccName = nullptr;
+  unsigned NumOperands = MI->getNumOperands();
+  bool RegForm = false;
+  bool Negate = false;
+  StringRef AccStr = "+";
+
+  // The operands for FMA instructions without rounding fall into two forms.
+  //  dest, src1, src2, src3
+  //  dest, src1, mask, src2, src3
+  // Where src3 is either a register or 5 memory address operands. So to find
+  // dest and src1 we can index from the front. To find src2 and src3 we can
+  // index from the end by taking into account memory vs register form when
+  // finding src2.
+
+  switch (MI->getOpcode()) {
+  default:
+    return false;
+  CASE_FMA_PACKED_REG(FMADD132)
+  CASE_FMA_SCALAR_REG(FMADD132)
+    Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FMADD132)
+  CASE_FMA_SCALAR_MEM(FMADD132)
+    AccName = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    Mul1Name = getRegName(MI->getOperand(1).getReg());
+    break;
+
+  CASE_FMA_PACKED_REG(FMADD213)
+  CASE_FMA_SCALAR_REG(FMADD213)
+    AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FMADD213)
+  CASE_FMA_SCALAR_MEM(FMADD213)
+    Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    Mul2Name = getRegName(MI->getOperand(1).getReg());
+    break;
+
+  CASE_FMA_PACKED_REG(FMADD231)
+  CASE_FMA_SCALAR_REG(FMADD231)
+    Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FMADD231)
+  CASE_FMA_SCALAR_MEM(FMADD231)
+    Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    AccName = getRegName(MI->getOperand(1).getReg());
+    break;
+
+  CASE_FMA_PACKED_REG(FMSUB132)
+  CASE_FMA_SCALAR_REG(FMSUB132)
+    Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FMSUB132)
+  CASE_FMA_SCALAR_MEM(FMSUB132)
+    AccName = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    Mul1Name = getRegName(MI->getOperand(1).getReg());
+    AccStr = "-";
+    break;
+
+  CASE_FMA_PACKED_REG(FMSUB213)
+  CASE_FMA_SCALAR_REG(FMSUB213)
+    AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FMSUB213)
+  CASE_FMA_SCALAR_MEM(FMSUB213)
+    Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    Mul2Name = getRegName(MI->getOperand(1).getReg());
+    AccStr = "-";
+    break;
+
+  CASE_FMA_PACKED_REG(FMSUB231)
+  CASE_FMA_SCALAR_REG(FMSUB231)
+    Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FMSUB231)
+  CASE_FMA_SCALAR_MEM(FMSUB231)
+    Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    AccName = getRegName(MI->getOperand(1).getReg());
+    AccStr = "-";
+    break;
+
+  CASE_FMA_PACKED_REG(FNMADD132)
+  CASE_FMA_SCALAR_REG(FNMADD132)
+    Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FNMADD132)
+  CASE_FMA_SCALAR_MEM(FNMADD132)
+    AccName = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    Mul1Name = getRegName(MI->getOperand(1).getReg());
+    Negate = true;
+    break;
+
+  CASE_FMA_PACKED_REG(FNMADD213)
+  CASE_FMA_SCALAR_REG(FNMADD213)
+    AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FNMADD213)
+  CASE_FMA_SCALAR_MEM(FNMADD213)
+    Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    Mul2Name = getRegName(MI->getOperand(1).getReg());
+    Negate = true;
+    break;
+
+  CASE_FMA_PACKED_REG(FNMADD231)
+  CASE_FMA_SCALAR_REG(FNMADD231)
+    Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FNMADD231)
+  CASE_FMA_SCALAR_MEM(FNMADD231)
+    Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    AccName = getRegName(MI->getOperand(1).getReg());
+    Negate = true;
+    break;
+
+  CASE_FMA_PACKED_REG(FNMSUB132)
+  CASE_FMA_SCALAR_REG(FNMSUB132)
+    Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FNMSUB132)
+  CASE_FMA_SCALAR_MEM(FNMSUB132)
+    AccName = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    Mul1Name = getRegName(MI->getOperand(1).getReg());
+    AccStr = "-";
+    Negate = true;
+    break;
+
+  CASE_FMA_PACKED_REG(FNMSUB213)
+  CASE_FMA_SCALAR_REG(FNMSUB213)
+    AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FNMSUB213)
+  CASE_FMA_SCALAR_MEM(FNMSUB213)
+    Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    Mul2Name = getRegName(MI->getOperand(1).getReg());
+    AccStr = "-";
+    Negate = true;
+    break;
+
+  CASE_FMA_PACKED_REG(FNMSUB231)
+  CASE_FMA_SCALAR_REG(FNMSUB231)
+    Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FNMSUB231)
+  CASE_FMA_SCALAR_MEM(FNMSUB231)
+    Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    AccName = getRegName(MI->getOperand(1).getReg());
+    AccStr = "-";
+    Negate = true;
+    break;
+
+  CASE_FMA_PACKED_REG(FMADDSUB132)
+    Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FMADDSUB132)
+    AccName = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    Mul1Name = getRegName(MI->getOperand(1).getReg());
+    AccStr = "+/-";
+    break;
+
+  CASE_FMA_PACKED_REG(FMADDSUB213)
+    AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FMADDSUB213)
+    Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    Mul2Name = getRegName(MI->getOperand(1).getReg());
+    AccStr = "+/-";
+    break;
+
+  CASE_FMA_PACKED_REG(FMADDSUB231)
+    Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FMADDSUB231)
+    Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    AccName = getRegName(MI->getOperand(1).getReg());
+    AccStr = "+/-";
+    break;
+
+  CASE_FMA_PACKED_REG(FMSUBADD132)
+    Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FMSUBADD132)
+    AccName = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    Mul1Name = getRegName(MI->getOperand(1).getReg());
+    AccStr = "-/+";
+    break;
+
+  CASE_FMA_PACKED_REG(FMSUBADD213)
+    AccName = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FMSUBADD213)
+    Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    Mul2Name = getRegName(MI->getOperand(1).getReg());
+    AccStr = "-/+";
+    break;
+
+  CASE_FMA_PACKED_REG(FMSUBADD231)
+    Mul2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+    RegForm = true;
+    LLVM_FALLTHROUGH;
+  CASE_FMA_PACKED_MEM(FMSUBADD231)
+    Mul1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
+    AccName = getRegName(MI->getOperand(1).getReg());
+    AccStr = "-/+";
+    break;
+  }
+
+  const char *DestName = getRegName(MI->getOperand(0).getReg());
+
+  if (!Mul1Name) Mul1Name = "mem";
+  if (!Mul2Name) Mul2Name = "mem";
+  if (!AccName)  AccName = "mem";
+
+  OS << DestName << " = ";
+  // TODO: Print masking information?
+
+  if (Negate)
+    OS << '-';
+
+  OS << '(' << Mul1Name << " * " << Mul2Name << ") " << AccStr << ' '
+     << AccName;
+
+  return true;
+}
+
+
 //===----------------------------------------------------------------------===//
 // Top Level Entrypoint
 //===----------------------------------------------------------------------===//
@@ -248,6 +531,9 @@ bool llvm::EmitAnyX86InstComments(const
   unsigned NumOperands = MI->getNumOperands();
   bool RegForm = false;
 
+  if (printFMA3Comments(MI, OS, getRegName))
+    return true;
+
   switch (MI->getOpcode()) {
   default:
     // Not an instruction for which we can decode comments.

Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll?rev=327225&r1=327224&r2=327225&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll Sat Mar 10 13:30:46 2018
@@ -3986,10 +3986,10 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovapd %xmm0, %xmm3
-; CHECK-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm3
+; CHECK-NEXT:    vfmadd213sd {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %xmm0, %xmm4
-; CHECK-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm4 {%k1}
+; CHECK-NEXT:    vfmadd213sd {{.*#+}} xmm4 = (xmm1 * xmm4) + xmm2
 ; CHECK-NEXT:    vmovapd %xmm0, %xmm5
 ; CHECK-NEXT:    vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm5
 ; CHECK-NEXT:    vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
@@ -4013,10 +4013,10 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ss:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps %xmm0, %xmm3
-; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm3
+; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovaps %xmm0, %xmm4
-; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm4 {%k1}
+; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm4 = (xmm1 * xmm4) + xmm2
 ; CHECK-NEXT:    vmovaps %xmm0, %xmm5
 ; CHECK-NEXT:    vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm5
 ; CHECK-NEXT:    vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
@@ -4041,7 +4041,7 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %xmm0, %xmm3
-; CHECK-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm3 {%k1} {z}
+; CHECK-NEXT:    vfmadd213sd {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
 ; CHECK-NEXT:    vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
 ; CHECK-NEXT:    retq
@@ -4057,7 +4057,7 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ss:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
 ; CHECK-NEXT:    retq
   %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4)
   %res1 = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 3)
@@ -4070,10 +4070,10 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_sd:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm3
-; CHECK-NEXT:    vfmadd231sd %xmm1, %xmm0, %xmm3
+; CHECK-NEXT:    vfmadd231sd {{.*#+}} xmm3 = (xmm0 * xmm1) + xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm4
-; CHECK-NEXT:    vfmadd231sd %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT:    vfmadd231sd {{.*#+}} xmm4 = (xmm0 * xmm1) + xmm4
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm5
 ; CHECK-NEXT:    vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm5
 ; CHECK-NEXT:    vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
@@ -4097,10 +4097,10 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm3
-; CHECK-NEXT:    vfmadd231ss %xmm1, %xmm0, %xmm3
+; CHECK-NEXT:    vfmadd231ss {{.*#+}} xmm3 = (xmm0 * xmm1) + xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm4
-; CHECK-NEXT:    vfmadd231ss %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT:    vfmadd231ss {{.*#+}} xmm4 = (xmm0 * xmm1) + xmm4
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm5
 ; CHECK-NEXT:    vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm5
 ; CHECK-NEXT:    vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
@@ -4123,7 +4123,7 @@ define void @fmadd_ss_mask_memfold(float
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    kmovw %edx, %k1
-; CHECK-NEXT:    vfmadd132ss (%rsi), %xmm0, %xmm0 {%k1}
+; CHECK-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
 ; CHECK-NEXT:    vmovss %xmm0, (%rdi)
 ; CHECK-NEXT:    retq
   %a.val = load float, float* %a
@@ -4150,7 +4150,7 @@ define void @fmadd_ss_maskz_memfold(floa
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; CHECK-NEXT:    kmovw %edx, %k1
-; CHECK-NEXT:    vfmadd132ss (%rsi), %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
 ; CHECK-NEXT:    vmovss %xmm0, (%rdi)
 ; CHECK-NEXT:    retq
   %a.val = load float, float* %a
@@ -4177,7 +4177,7 @@ define void @fmadd_sd_mask_memfold(doubl
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    kmovw %edx, %k1
-; CHECK-NEXT:    vfmadd132sd (%rsi), %xmm0, %xmm0 {%k1}
+; CHECK-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
 ; CHECK-NEXT:    vmovlpd %xmm0, (%rdi)
 ; CHECK-NEXT:    retq
   %a.val = load double, double* %a
@@ -4200,7 +4200,7 @@ define void @fmadd_sd_maskz_memfold(doub
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    kmovw %edx, %k1
-; CHECK-NEXT:    vfmadd132sd (%rsi), %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
 ; CHECK-NEXT:    vmovlpd %xmm0, (%rdi)
 ; CHECK-NEXT:    retq
   %a.val = load double, double* %a
@@ -4224,10 +4224,10 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_sd:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm3
-; CHECK-NEXT:    vfmsub231sd %xmm1, %xmm0, %xmm3
+; CHECK-NEXT:    vfmsub231sd {{.*#+}} xmm3 = (xmm0 * xmm1) - xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm4
-; CHECK-NEXT:    vfmsub231sd %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT:    vfmsub231sd {{.*#+}} xmm4 = (xmm0 * xmm1) - xmm4
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm5
 ; CHECK-NEXT:    vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm5
 ; CHECK-NEXT:    vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
@@ -4251,10 +4251,10 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ss:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm3
-; CHECK-NEXT:    vfmsub231ss %xmm1, %xmm0, %xmm3
+; CHECK-NEXT:    vfmsub231ss {{.*#+}} xmm3 = (xmm0 * xmm1) - xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm4
-; CHECK-NEXT:    vfmsub231ss %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT:    vfmsub231ss {{.*#+}} xmm4 = (xmm0 * xmm1) - xmm4
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm5
 ; CHECK-NEXT:    vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm5
 ; CHECK-NEXT:    vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
@@ -4278,10 +4278,10 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_sd:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm3
-; CHECK-NEXT:    vfnmsub231sd %xmm1, %xmm0, %xmm3
+; CHECK-NEXT:    vfnmsub231sd {{.*#+}} xmm3 = -(xmm0 * xmm1) - xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm4
-; CHECK-NEXT:    vfnmsub231sd %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT:    vfnmsub231sd {{.*#+}} xmm4 = -(xmm0 * xmm1) - xmm4
 ; CHECK-NEXT:    vmovapd %xmm2, %xmm5
 ; CHECK-NEXT:    vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm5
 ; CHECK-NEXT:    vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
@@ -4305,10 +4305,10 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ss:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm3
-; CHECK-NEXT:    vfnmsub231ss %xmm1, %xmm0, %xmm3
+; CHECK-NEXT:    vfnmsub231ss {{.*#+}} xmm3 = -(xmm0 * xmm1) - xmm3
 ; CHECK-NEXT:    kmovw %edi, %k1
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm4
-; CHECK-NEXT:    vfnmsub231ss %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT:    vfnmsub231ss {{.*#+}} xmm4 = -(xmm0 * xmm1) - xmm4
 ; CHECK-NEXT:    vmovaps %xmm2, %xmm5
 ; CHECK-NEXT:    vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm5
 ; CHECK-NEXT:    vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
@@ -4330,7 +4330,7 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_rm:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vfmadd231ss (%rdi), %xmm0, %xmm1 {%k1}
+; CHECK-NEXT:    vfmadd231ss {{.*#+}} xmm1 = (xmm0 * mem) + xmm1
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %q = load float, float* %ptr_b
@@ -4343,7 +4343,7 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ss_rm:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vfmadd132ss (%rdi), %xmm1, %xmm0 {%k1}
+; CHECK-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm1
 ; CHECK-NEXT:    retq
   %q = load float, float* %ptr_b
   %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
@@ -4357,7 +4357,7 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    kmovw %eax, %k1
-; CHECK-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0 {%k1} {z}
+; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-NEXT:    retq
   %q = load float, float* %ptr_b
   %vecinit.i = insertelement <4 x float> undef, float %q, i32 0

Modified: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll?rev=327225&r1=327224&r2=327225&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll Sat Mar 10 13:30:46 2018
@@ -4092,6 +4092,7 @@ define <8 x float> @test_mask_vfmadd256_
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfmadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x98,0xc1]
+; CHECK-NEXT:    ## ymm0 = (ymm0 * ymm1) + ymm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) nounwind
   ret <8 x float> %res
@@ -4104,6 +4105,7 @@ define <4 x float> @test_mask_vfmadd128_
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1]
+; CHECK-NEXT:    ## xmm0 = (xmm0 * xmm1) + xmm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
   ret <4 x float> %res
@@ -4116,6 +4118,7 @@ define <4 x double> @test_mask_fmadd256_
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1]
+; CHECK-NEXT:    ## ymm0 = (ymm0 * ymm1) + ymm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a, <4 x double> %b, <4 x double> %c, i8 %mask)
   ret <4 x double> %res
@@ -4128,6 +4131,7 @@ define <2 x double> @test_mask_fmadd128_
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1]
+; CHECK-NEXT:    ## xmm0 = (xmm0 * xmm1) + xmm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask)
   ret <2 x double> %res
@@ -4139,7 +4143,9 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
 ; CHECK-NEXT:    vfmadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) + xmm2
 ; CHECK-NEXT:    vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1]
+; CHECK-NEXT:    ## xmm0 = (xmm0 * xmm1) + xmm2
 ; CHECK-NEXT:    vaddpd %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
@@ -4156,7 +4162,9 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
 ; CHECK-NEXT:    vfmadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) + xmm2
 ; CHECK-NEXT:    vfmadd231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb8,0xd1]
+; CHECK-NEXT:    ## xmm2 = (xmm0 * xmm1) + xmm2
 ; CHECK-NEXT:    vaddpd %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
@@ -4173,7 +4181,9 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
 ; CHECK-NEXT:    vfmadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) + xmm2
 ; CHECK-NEXT:    vfmadd213pd %xmm2, %xmm0, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0xa8,0xca]
+; CHECK-NEXT:    ## xmm1 = (xmm0 * xmm1) + xmm2
 ; CHECK-NEXT:    vaddpd %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.maskz.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
@@ -4188,7 +4198,9 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
 ; CHECK-NEXT:    vfmadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) + ymm2
 ; CHECK-NEXT:    vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1]
+; CHECK-NEXT:    ## ymm0 = (ymm0 * ymm1) + ymm2
 ; CHECK-NEXT:    vaddpd %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
@@ -4205,7 +4217,9 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
 ; CHECK-NEXT:    vfmadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) + ymm2
 ; CHECK-NEXT:    vfmadd231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xb8,0xd1]
+; CHECK-NEXT:    ## ymm2 = (ymm0 * ymm1) + ymm2
 ; CHECK-NEXT:    vaddpd %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask3.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
@@ -4222,7 +4236,9 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
 ; CHECK-NEXT:    vfmadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) + ymm2
 ; CHECK-NEXT:    vfmadd213pd %ymm2, %ymm0, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0xa8,0xca]
+; CHECK-NEXT:    ## ymm1 = (ymm0 * ymm1) + ymm2
 ; CHECK-NEXT:    vaddpd %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.maskz.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
@@ -4237,7 +4253,9 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
 ; CHECK-NEXT:    vfmadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) + xmm2
 ; CHECK-NEXT:    vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1]
+; CHECK-NEXT:    ## xmm0 = (xmm0 * xmm1) + xmm2
 ; CHECK-NEXT:    vaddps %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
@@ -4254,7 +4272,9 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
 ; CHECK-NEXT:    vfmadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) + xmm2
 ; CHECK-NEXT:    vfmadd231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb8,0xd1]
+; CHECK-NEXT:    ## xmm2 = (xmm0 * xmm1) + xmm2
 ; CHECK-NEXT:    vaddps %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
@@ -4271,7 +4291,9 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
 ; CHECK-NEXT:    vfmadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) + xmm2
 ; CHECK-NEXT:    vfmadd213ps %xmm2, %xmm0, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0xa8,0xca]
+; CHECK-NEXT:    ## xmm1 = (xmm0 * xmm1) + xmm2
 ; CHECK-NEXT:    vaddps %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
@@ -4286,7 +4308,9 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
 ; CHECK-NEXT:    vfmadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) + ymm2
 ; CHECK-NEXT:    vfmadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x98,0xc1]
+; CHECK-NEXT:    ## ymm0 = (ymm0 * ymm1) + ymm2
 ; CHECK-NEXT:    vaddps %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
@@ -4303,7 +4327,9 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
 ; CHECK-NEXT:    vfmadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) + ymm2
 ; CHECK-NEXT:    vfmadd231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xb8,0xd1]
+; CHECK-NEXT:    ## ymm2 = (ymm0 * ymm1) + ymm2
 ; CHECK-NEXT:    vaddps %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.mask3.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
@@ -4320,7 +4346,9 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
 ; CHECK-NEXT:    vfmadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) + ymm2
 ; CHECK-NEXT:    vfmadd213ps %ymm2, %ymm0, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0xa8,0xca]
+; CHECK-NEXT:    ## ymm1 = (ymm0 * ymm1) + ymm2
 ; CHECK-NEXT:    vaddps %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.maskz.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
@@ -4338,7 +4366,9 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
 ; CHECK-NEXT:    vfmsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaa,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) - xmm2
 ; CHECK-NEXT:    vfmsub231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xba,0xd1]
+; CHECK-NEXT:    ## xmm2 = (xmm0 * xmm1) - xmm2
 ; CHECK-NEXT:    vaddpd %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
@@ -4356,7 +4386,9 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
 ; CHECK-NEXT:    vfmsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xaa,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) - ymm2
 ; CHECK-NEXT:    vfmsub231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xba,0xd1]
+; CHECK-NEXT:    ## ymm2 = (ymm0 * ymm1) - ymm2
 ; CHECK-NEXT:    vaddpd %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask3.vfmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
@@ -4373,7 +4405,9 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
 ; CHECK-NEXT:    vfmsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaa,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) - xmm2
 ; CHECK-NEXT:    vfmsub231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xba,0xd1]
+; CHECK-NEXT:    ## xmm2 = (xmm0 * xmm1) - xmm2
 ; CHECK-NEXT:    vaddps %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
@@ -4390,7 +4424,9 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
 ; CHECK-NEXT:    vfmsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xaa,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) - ymm2
 ; CHECK-NEXT:    vfmsub231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xba,0xd1]
+; CHECK-NEXT:    ## ymm2 = (ymm0 * ymm1) - ymm2
 ; CHECK-NEXT:    vaddps %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.mask3.vfmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
@@ -4406,6 +4442,7 @@ define <8 x float> @test_mask_vfnmadd256
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfnmadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9c,0xc1]
+; CHECK-NEXT:    ## ymm0 = -(ymm0 * ymm1) + ymm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) nounwind
   ret <8 x float> %res
@@ -4418,6 +4455,7 @@ define <4 x float> @test_mask_vfnmadd128
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfnmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9c,0xc1]
+; CHECK-NEXT:    ## xmm0 = -(xmm0 * xmm1) + xmm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
   ret <4 x float> %res
@@ -4430,6 +4468,7 @@ define <4 x double> @test_mask_vfnmadd25
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfnmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9c,0xc1]
+; CHECK-NEXT:    ## ymm0 = -(ymm0 * ymm1) + ymm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
   ret <4 x double> %res
@@ -4442,6 +4481,7 @@ define <2 x double> @test_mask_vfnmadd12
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfnmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9c,0xc1]
+; CHECK-NEXT:    ## xmm0 = -(xmm0 * xmm1) + xmm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
   ret <2 x double> %res
@@ -4454,6 +4494,7 @@ define <8 x float> @test_mask_vfnmsub256
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfnmsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9e,0xc1]
+; CHECK-NEXT:    ## ymm0 = -(ymm0 * ymm1) - ymm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) nounwind
   ret <8 x float> %res
@@ -4466,6 +4507,7 @@ define <4 x float> @test_mask_vfnmsub128
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfnmsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9e,0xc1]
+; CHECK-NEXT:    ## xmm0 = -(xmm0 * xmm1) - xmm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
   ret <4 x float> %res
@@ -4478,6 +4520,7 @@ define <4 x double> @test_mask_vfnmsub25
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfnmsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9e,0xc1]
+; CHECK-NEXT:    ## ymm0 = -(ymm0 * ymm1) - ymm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
   ret <4 x double> %res
@@ -4490,6 +4533,7 @@ define <2 x double> @test_mask_vfnmsub12
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfnmsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9e,0xc1]
+; CHECK-NEXT:    ## xmm0 = -(xmm0 * xmm1) - xmm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
   ret <2 x double> %res
@@ -4502,7 +4546,9 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
 ; CHECK-NEXT:    vfnmsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xae,0xda]
+; CHECK-NEXT:    ## xmm3 = -(xmm0 * xmm3) - xmm2
 ; CHECK-NEXT:    vfnmsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9e,0xc1]
+; CHECK-NEXT:    ## xmm0 = -(xmm0 * xmm1) - xmm2
 ; CHECK-NEXT:    vaddpd %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
@@ -4519,7 +4565,9 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
 ; CHECK-NEXT:    vfnmsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xae,0xda]
+; CHECK-NEXT:    ## xmm3 = -(xmm0 * xmm3) - xmm2
 ; CHECK-NEXT:    vfnmsub231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xbe,0xd1]
+; CHECK-NEXT:    ## xmm2 = -(xmm0 * xmm1) - xmm2
 ; CHECK-NEXT:    vaddpd %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
@@ -4534,7 +4582,9 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
 ; CHECK-NEXT:    vfnmsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xae,0xda]
+; CHECK-NEXT:    ## ymm3 = -(ymm0 * ymm3) - ymm2
 ; CHECK-NEXT:    vfnmsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9e,0xc1]
+; CHECK-NEXT:    ## ymm0 = -(ymm0 * ymm1) - ymm2
 ; CHECK-NEXT:    vaddpd %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
@@ -4551,7 +4601,9 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
 ; CHECK-NEXT:    vfnmsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xae,0xda]
+; CHECK-NEXT:    ## ymm3 = -(ymm0 * ymm3) - ymm2
 ; CHECK-NEXT:    vfnmsub231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xbe,0xd1]
+; CHECK-NEXT:    ## ymm2 = -(ymm0 * ymm1) - ymm2
 ; CHECK-NEXT:    vaddpd %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
@@ -4566,7 +4618,9 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
 ; CHECK-NEXT:    vfnmsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xae,0xda]
+; CHECK-NEXT:    ## xmm3 = -(xmm0 * xmm3) - xmm2
 ; CHECK-NEXT:    vfnmsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9e,0xc1]
+; CHECK-NEXT:    ## xmm0 = -(xmm0 * xmm1) - xmm2
 ; CHECK-NEXT:    vaddps %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
@@ -4583,7 +4637,9 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
 ; CHECK-NEXT:    vfnmsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xae,0xda]
+; CHECK-NEXT:    ## xmm3 = -(xmm0 * xmm3) - xmm2
 ; CHECK-NEXT:    vfnmsub231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xbe,0xd1]
+; CHECK-NEXT:    ## xmm2 = -(xmm0 * xmm1) - xmm2
 ; CHECK-NEXT:    vaddps %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
@@ -4598,7 +4654,9 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
 ; CHECK-NEXT:    vfnmsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xae,0xda]
+; CHECK-NEXT:    ## ymm3 = -(ymm0 * ymm3) - ymm2
 ; CHECK-NEXT:    vfnmsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9e,0xc1]
+; CHECK-NEXT:    ## ymm0 = -(ymm0 * ymm1) - ymm2
 ; CHECK-NEXT:    vaddps %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
@@ -4615,7 +4673,9 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
 ; CHECK-NEXT:    vfnmsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xae,0xda]
+; CHECK-NEXT:    ## ymm3 = -(ymm0 * ymm3) - ymm2
 ; CHECK-NEXT:    vfnmsub231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xbe,0xd1]
+; CHECK-NEXT:    ## ymm2 = -(ymm0 * ymm1) - ymm2
 ; CHECK-NEXT:    vaddps %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
@@ -4630,7 +4690,9 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
 ; CHECK-NEXT:    vfnmadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xac,0xda]
+; CHECK-NEXT:    ## xmm3 = -(xmm0 * xmm3) + xmm2
 ; CHECK-NEXT:    vfnmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x9c,0xc1]
+; CHECK-NEXT:    ## xmm0 = -(xmm0 * xmm1) + xmm2
 ; CHECK-NEXT:    vaddpd %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
@@ -4645,7 +4707,9 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
 ; CHECK-NEXT:    vfnmadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xac,0xda]
+; CHECK-NEXT:    ## ymm3 = -(ymm0 * ymm3) + ymm2
 ; CHECK-NEXT:    vfnmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x9c,0xc1]
+; CHECK-NEXT:    ## ymm0 = -(ymm0 * ymm1) + ymm2
 ; CHECK-NEXT:    vaddpd %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
@@ -4660,7 +4724,9 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
 ; CHECK-NEXT:    vfnmadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xac,0xda]
+; CHECK-NEXT:    ## xmm3 = -(xmm0 * xmm3) + xmm2
 ; CHECK-NEXT:    vfnmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x9c,0xc1]
+; CHECK-NEXT:    ## xmm0 = -(xmm0 * xmm1) + xmm2
 ; CHECK-NEXT:    vaddps %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
@@ -4675,7 +4741,9 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
 ; CHECK-NEXT:    vfnmadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xac,0xda]
+; CHECK-NEXT:    ## ymm3 = -(ymm0 * ymm3) + ymm2
 ; CHECK-NEXT:    vfnmadd132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x9c,0xc1]
+; CHECK-NEXT:    ## ymm0 = -(ymm0 * ymm1) + ymm2
 ; CHECK-NEXT:    vaddps %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
@@ -4691,6 +4759,7 @@ define <8 x float> @test_mask_fmaddsub25
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfmaddsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x96,0xc1]
+; CHECK-NEXT:    ## ymm0 = (ymm0 * ymm1) +/- ymm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %c, i8 %mask)
   ret <8 x float> %res
@@ -4703,6 +4772,7 @@ define <4 x float> @test_mask_fmaddsub12
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfmaddsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x96,0xc1]
+; CHECK-NEXT:    ## xmm0 = (xmm0 * xmm1) +/- xmm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask)
   ret <4 x float> %res
@@ -4715,6 +4785,7 @@ define <4 x double> @test_mask_vfmaddsub
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfmaddsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x96,0xc1]
+; CHECK-NEXT:    ## ymm0 = (ymm0 * ymm1) +/- ymm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
   ret <4 x double> %res
@@ -4727,6 +4798,7 @@ define <2 x double> @test_mask_vfmaddsub
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfmaddsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x96,0xc1]
+; CHECK-NEXT:    ## xmm0 = (xmm0 * xmm1) +/- xmm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
   ret <2 x double> %res
@@ -4738,7 +4810,9 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
 ; CHECK-NEXT:    vfmaddsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) +/- xmm2
 ; CHECK-NEXT:    vfmaddsub132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x96,0xc1]
+; CHECK-NEXT:    ## xmm0 = (xmm0 * xmm1) +/- xmm2
 ; CHECK-NEXT:    vaddpd %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
@@ -4755,7 +4829,9 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
 ; CHECK-NEXT:    vfmaddsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) +/- xmm2
 ; CHECK-NEXT:    vfmaddsub231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb6,0xd1]
+; CHECK-NEXT:    ## xmm2 = (xmm0 * xmm1) +/- xmm2
 ; CHECK-NEXT:    vaddpd %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
@@ -4772,7 +4848,9 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
 ; CHECK-NEXT:    vfmaddsub213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) +/- xmm2
 ; CHECK-NEXT:    vfmaddsub213pd %xmm2, %xmm0, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0xa6,0xca]
+; CHECK-NEXT:    ## xmm1 = (xmm0 * xmm1) +/- xmm2
 ; CHECK-NEXT:    vaddpd %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
@@ -4787,7 +4865,9 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
 ; CHECK-NEXT:    vfmaddsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) +/- ymm2
 ; CHECK-NEXT:    vfmaddsub132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x96,0xc1]
+; CHECK-NEXT:    ## ymm0 = (ymm0 * ymm1) +/- ymm2
 ; CHECK-NEXT:    vaddpd %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
@@ -4804,7 +4884,9 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
 ; CHECK-NEXT:    vfmaddsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) +/- ymm2
 ; CHECK-NEXT:    vfmaddsub231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xb6,0xd1]
+; CHECK-NEXT:    ## ymm2 = (ymm0 * ymm1) +/- ymm2
 ; CHECK-NEXT:    vaddpd %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
@@ -4821,7 +4903,9 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
 ; CHECK-NEXT:    vfmaddsub213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) +/- ymm2
 ; CHECK-NEXT:    vfmaddsub213pd %ymm2, %ymm0, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0xa6,0xca]
+; CHECK-NEXT:    ## ymm1 = (ymm0 * ymm1) +/- ymm2
 ; CHECK-NEXT:    vaddpd %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
@@ -4836,7 +4920,9 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
 ; CHECK-NEXT:    vfmaddsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) +/- xmm2
 ; CHECK-NEXT:    vfmaddsub132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x96,0xc1]
+; CHECK-NEXT:    ## xmm0 = (xmm0 * xmm1) +/- xmm2
 ; CHECK-NEXT:    vaddps %xmm3, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
@@ -4853,7 +4939,9 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
 ; CHECK-NEXT:    vfmaddsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) +/- xmm2
 ; CHECK-NEXT:    vfmaddsub231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb6,0xd1]
+; CHECK-NEXT:    ## xmm2 = (xmm0 * xmm1) +/- xmm2
 ; CHECK-NEXT:    vaddps %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
@@ -4870,7 +4958,9 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
 ; CHECK-NEXT:    vfmaddsub213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) +/- xmm2
 ; CHECK-NEXT:    vfmaddsub213ps %xmm2, %xmm0, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0xa6,0xca]
+; CHECK-NEXT:    ## xmm1 = (xmm0 * xmm1) +/- xmm2
 ; CHECK-NEXT:    vaddps %xmm3, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
@@ -4885,7 +4975,9 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
 ; CHECK-NEXT:    vfmaddsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) +/- ymm2
 ; CHECK-NEXT:    vfmaddsub132ps %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x29,0x96,0xc1]
+; CHECK-NEXT:    ## ymm0 = (ymm0 * ymm1) +/- ymm2
 ; CHECK-NEXT:    vaddps %ymm3, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
@@ -4902,7 +4994,9 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
 ; CHECK-NEXT:    vfmaddsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) +/- ymm2
 ; CHECK-NEXT:    vfmaddsub231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xb6,0xd1]
+; CHECK-NEXT:    ## ymm2 = (ymm0 * ymm1) +/- ymm2
 ; CHECK-NEXT:    vaddps %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
@@ -4919,7 +5013,9 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
 ; CHECK-NEXT:    vfmaddsub213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) +/- ymm2
 ; CHECK-NEXT:    vfmaddsub213ps %ymm2, %ymm0, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0xa6,0xca]
+; CHECK-NEXT:    ## ymm1 = (ymm0 * ymm1) +/- ymm2
 ; CHECK-NEXT:    vaddps %ymm3, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
@@ -4936,7 +5032,9 @@ define <2 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9]
 ; CHECK-NEXT:    vfmsubadd213pd %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa7,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) -/+ xmm2
 ; CHECK-NEXT:    vfmsubadd231pd %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0xb7,0xd1]
+; CHECK-NEXT:    ## xmm2 = (xmm0 * xmm1) -/+ xmm2
 ; CHECK-NEXT:    vaddpd %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3)
@@ -4953,7 +5051,9 @@ define <4 x double>@test_int_x86_avx512_
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovapd %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9]
 ; CHECK-NEXT:    vfmsubadd213pd %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa7,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) -/+ ymm2
 ; CHECK-NEXT:    vfmsubadd231pd %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0xb7,0xd1]
+; CHECK-NEXT:    ## ymm2 = (ymm0 * ymm1) -/+ ymm2
 ; CHECK-NEXT:    vaddpd %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3)
@@ -4970,7 +5070,9 @@ define <4 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9]
 ; CHECK-NEXT:    vfmsubadd213ps %xmm2, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa7,0xda]
+; CHECK-NEXT:    ## xmm3 = (xmm0 * xmm3) -/+ xmm2
 ; CHECK-NEXT:    vfmsubadd231ps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0xb7,0xd1]
+; CHECK-NEXT:    ## xmm2 = (xmm0 * xmm1) -/+ xmm2
 ; CHECK-NEXT:    vaddps %xmm3, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3)
@@ -4987,7 +5089,9 @@ define <8 x float>@test_int_x86_avx512_m
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vmovaps %ymm1, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9]
 ; CHECK-NEXT:    vfmsubadd213ps %ymm2, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa7,0xda]
+; CHECK-NEXT:    ## ymm3 = (ymm0 * ymm3) -/+ ymm2
 ; CHECK-NEXT:    vfmsubadd231ps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0xb7,0xd1]
+; CHECK-NEXT:    ## ymm2 = (ymm0 * ymm1) -/+ ymm2
 ; CHECK-NEXT:    vaddps %ymm3, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3)
@@ -5002,6 +5106,7 @@ define <4 x float> @test_mask_vfmadd128_
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1]
+; CHECK-NEXT:    ## xmm0 = (xmm0 * xmm1) + xmm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
   ret <4 x float> %res
@@ -5011,6 +5116,7 @@ define <4 x float> @test_mask_vfmadd128_
 ; CHECK-LABEL: test_mask_vfmadd128_ps_rz:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
+; CHECK-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
   ret <4 x float> %res
@@ -5021,6 +5127,7 @@ define <4 x float> @test_mask_vfmadd128_
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
 ; CHECK-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0x07]
+; CHECK-NEXT:    ## xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %a2 = load <4 x float>, <4 x float>* %ptr_a2
   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
@@ -5032,6 +5139,7 @@ define <4 x float> @test_mask_vfmadd128_
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
 ; CHECK-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0x07]
+; CHECK-NEXT:    ## xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %a2 = load <4 x float>, <4 x float>* %ptr_a2, align 8
   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
@@ -5042,6 +5150,7 @@ define <4 x float> @test_mask_vfmadd128_
 ; CHECK-LABEL: test_mask_vfmadd128_ps_rmkz:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0x07]
+; CHECK-NEXT:    ## xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %a2 = load <4 x float>, <4 x float>* %ptr_a2
   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
@@ -5052,6 +5161,7 @@ define <4 x float> @test_mask_vfmadd128_
 ; CHECK-LABEL: test_mask_vfmadd128_ps_rmkza:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0x07]
+; CHECK-NEXT:    ## xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %a2 = load <4 x float>, <4 x float>* %ptr_a2, align 4
   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
@@ -5063,6 +5173,7 @@ define <4 x float> @test_mask_vfmadd128_
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
 ; CHECK-NEXT:    vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0x07]
+; CHECK-NEXT:    ## xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %q = load float, float* %ptr_a2
   %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
@@ -5078,6 +5189,7 @@ define <4 x float> @test_mask_vfmadd128_
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
 ; CHECK-NEXT:    vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0x07]
+; CHECK-NEXT:    ## xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %q = load float, float* %ptr_a2, align 4
   %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
@@ -5092,6 +5204,7 @@ define <4 x float> @test_mask_vfmadd128_
 ; CHECK-LABEL: test_mask_vfmadd128_ps_rmbz:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0x07]
+; CHECK-NEXT:    ## xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %q = load float, float* %ptr_a2
   %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
@@ -5106,6 +5219,7 @@ define <4 x float> @test_mask_vfmadd128_
 ; CHECK-LABEL: test_mask_vfmadd128_ps_rmbza:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0x07]
+; CHECK-NEXT:    ## xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %q = load float, float* %ptr_a2, align 4
   %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
@@ -5121,6 +5235,7 @@ define <2 x double> @test_mask_vfmadd128
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1]
+; CHECK-NEXT:    ## xmm0 = (xmm0 * xmm1) + xmm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
   ret <2 x double> %res
@@ -5130,6 +5245,7 @@ define <2 x double> @test_mask_vfmadd128
 ; CHECK-LABEL: test_mask_vfmadd128_pd_rz:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
+; CHECK-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind
   ret <2 x double> %res
@@ -5140,6 +5256,7 @@ define <2 x double> @test_mask_vfmadd128
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
 ; CHECK-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa8,0x07]
+; CHECK-NEXT:    ## xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %a2 = load <2 x double>, <2 x double>* %ptr_a2
   %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
@@ -5150,6 +5267,7 @@ define <2 x double> @test_mask_vfmadd128
 ; CHECK-LABEL: test_mask_vfmadd128_pd_rmkz:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0x07]
+; CHECK-NEXT:    ## xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %a2 = load <2 x double>, <2 x double>* %ptr_a2
   %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind
@@ -5161,6 +5279,7 @@ define <4 x double> @test_mask_vfmadd256
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
 ; CHECK-NEXT:    vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1]
+; CHECK-NEXT:    ## ymm0 = (ymm0 * ymm1) + ymm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
   ret <4 x double> %res
@@ -5170,6 +5289,7 @@ define <4 x double> @test_mask_vfmadd256
 ; CHECK-LABEL: test_mask_vfmadd256_pd_rz:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
+; CHECK-NEXT:    ## ymm0 = (ymm1 * ymm0) + ymm2
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind
   ret <4 x double> %res
@@ -5180,6 +5300,7 @@ define <4 x double> @test_mask_vfmadd256
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
 ; CHECK-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0xa8,0x07]
+; CHECK-NEXT:    ## ymm0 = (ymm1 * ymm0) + mem
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %a2 = load <4 x double>, <4 x double>* %ptr_a2
   %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
@@ -5190,6 +5311,7 @@ define <4 x double> @test_mask_vfmadd256
 ; CHECK-LABEL: test_mask_vfmadd256_pd_rmkz:
 ; CHECK:       ## %bb.0:
 ; CHECK-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0x07]
+; CHECK-NEXT:    ## ymm0 = (ymm1 * ymm0) + mem
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %a2 = load <4 x double>, <4 x double>* %ptr_a2
   %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind

Modified: llvm/trunk/test/CodeGen/X86/fma-commute-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma-commute-x86.ll?rev=327225&r1=327224&r2=327225&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma-commute-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma-commute-x86.ll Sat Mar 10 13:30:46 2018
@@ -42,7 +42,7 @@ define <4 x float> @test_x86_fmadd_baa_p
 ; FMA-LABEL: test_x86_fmadd_baa_ps:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
-; FMA-NEXT:    vfmadd132ps (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
 ; FMA-NEXT:    retq
   %res = call <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float> %b, <4 x float> %a, <4 x float> %a) nounwind
   ret <4 x float> %res
@@ -52,7 +52,7 @@ define <4 x float> @test_x86_fmadd_aba_p
 ; FMA-LABEL: test_x86_fmadd_aba_ps:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
-; FMA-NEXT:    vfmadd231ps (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
 ; FMA-NEXT:    retq
   %res = call <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float> %a, <4 x float> %b, <4 x float> %a) nounwind
   ret <4 x float> %res
@@ -62,7 +62,7 @@ define <4 x float> @test_x86_fmadd_bba_p
 ; FMA-LABEL: test_x86_fmadd_bba_ps:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
-; FMA-NEXT:    vfmadd213ps (%rcx), %xmm0, %xmm0
+; FMA-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm0 * xmm0) + mem
 ; FMA-NEXT:    retq
   %res = call <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float> %b, <4 x float> %b, <4 x float> %a) nounwind
   ret <4 x float> %res
@@ -73,7 +73,7 @@ define <8 x float> @test_x86_fmadd_baa_p
 ; FMA-LABEL: test_x86_fmadd_baa_ps_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
-; FMA-NEXT:    vfmadd132ps (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * mem) + ymm0
 ; FMA-NEXT:    retq
   %res = call <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float> %b, <8 x float> %a, <8 x float> %a) nounwind
   ret <8 x float> %res
@@ -83,7 +83,7 @@ define <8 x float> @test_x86_fmadd_aba_p
 ; FMA-LABEL: test_x86_fmadd_aba_ps_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
-; FMA-NEXT:    vfmadd231ps (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm0 * mem) + ymm0
 ; FMA-NEXT:    retq
   %res = call <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %a) nounwind
   ret <8 x float> %res
@@ -93,7 +93,7 @@ define <8 x float> @test_x86_fmadd_bba_p
 ; FMA-LABEL: test_x86_fmadd_bba_ps_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %ymm0
-; FMA-NEXT:    vfmadd213ps (%rcx), %ymm0, %ymm0
+; FMA-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm0 * ymm0) + mem
 ; FMA-NEXT:    retq
   %res = call <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float> %b, <8 x float> %b, <8 x float> %a) nounwind
   ret <8 x float> %res
@@ -136,7 +136,7 @@ define <2 x double> @test_x86_fmadd_baa_
 ; FMA-LABEL: test_x86_fmadd_baa_pd:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
-; FMA-NEXT:    vfmadd132pd (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
 ; FMA-NEXT:    retq
   %res = call <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double> %b, <2 x double> %a, <2 x double> %a) nounwind
   ret <2 x double> %res
@@ -146,7 +146,7 @@ define <2 x double> @test_x86_fmadd_aba_
 ; FMA-LABEL: test_x86_fmadd_aba_pd:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
-; FMA-NEXT:    vfmadd231pd (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
 ; FMA-NEXT:    retq
   %res = call <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double> %a, <2 x double> %b, <2 x double> %a) nounwind
   ret <2 x double> %res
@@ -156,7 +156,7 @@ define <2 x double> @test_x86_fmadd_bba_
 ; FMA-LABEL: test_x86_fmadd_bba_pd:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
-; FMA-NEXT:    vfmadd213pd (%rcx), %xmm0, %xmm0
+; FMA-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm0 * xmm0) + mem
 ; FMA-NEXT:    retq
   %res = call <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double> %b, <2 x double> %b, <2 x double> %a) nounwind
   ret <2 x double> %res
@@ -167,7 +167,7 @@ define <4 x double> @test_x86_fmadd_baa_
 ; FMA-LABEL: test_x86_fmadd_baa_pd_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
-; FMA-NEXT:    vfmadd132pd (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * mem) + ymm0
 ; FMA-NEXT:    retq
   %res = call <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double> %b, <4 x double> %a, <4 x double> %a) nounwind
   ret <4 x double> %res
@@ -177,7 +177,7 @@ define <4 x double> @test_x86_fmadd_aba_
 ; FMA-LABEL: test_x86_fmadd_aba_pd_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
-; FMA-NEXT:    vfmadd231pd (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm0 * mem) + ymm0
 ; FMA-NEXT:    retq
   %res = call <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double> %a, <4 x double> %b, <4 x double> %a) nounwind
   ret <4 x double> %res
@@ -187,7 +187,7 @@ define <4 x double> @test_x86_fmadd_bba_
 ; FMA-LABEL: test_x86_fmadd_bba_pd_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %ymm0
-; FMA-NEXT:    vfmadd213pd (%rcx), %ymm0, %ymm0
+; FMA-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm0 * ymm0) + mem
 ; FMA-NEXT:    retq
   %res = call <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double> %b, <4 x double> %b, <4 x double> %a) nounwind
   ret <4 x double> %res
@@ -231,7 +231,7 @@ define <4 x float> @test_x86_fnmadd_baa_
 ; FMA-LABEL: test_x86_fnmadd_baa_ps:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
-; FMA-NEXT:    vfnmadd132ps (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * mem) + xmm0
 ; FMA-NEXT:    retq
   %res = call <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float> %b, <4 x float> %a, <4 x float> %a) nounwind
   ret <4 x float> %res
@@ -241,7 +241,7 @@ define <4 x float> @test_x86_fnmadd_aba_
 ; FMA-LABEL: test_x86_fnmadd_aba_ps:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
-; FMA-NEXT:    vfnmadd231ps (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm0 * mem) + xmm0
 ; FMA-NEXT:    retq
   %res = call <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float> %a, <4 x float> %b, <4 x float> %a) nounwind
   ret <4 x float> %res
@@ -251,7 +251,7 @@ define <4 x float> @test_x86_fnmadd_bba_
 ; FMA-LABEL: test_x86_fnmadd_bba_ps:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
-; FMA-NEXT:    vfnmadd213ps (%rcx), %xmm0, %xmm0
+; FMA-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm0 * xmm0) + mem
 ; FMA-NEXT:    retq
   %res = call <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float> %b, <4 x float> %b, <4 x float> %a) nounwind
   ret <4 x float> %res
@@ -262,7 +262,7 @@ define <8 x float> @test_x86_fnmadd_baa_
 ; FMA-LABEL: test_x86_fnmadd_baa_ps_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
-; FMA-NEXT:    vfnmadd132ps (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * mem) + ymm0
 ; FMA-NEXT:    retq
   %res = call <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float> %b, <8 x float> %a, <8 x float> %a) nounwind
   ret <8 x float> %res
@@ -272,7 +272,7 @@ define <8 x float> @test_x86_fnmadd_aba_
 ; FMA-LABEL: test_x86_fnmadd_aba_ps_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
-; FMA-NEXT:    vfnmadd231ps (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm0 * mem) + ymm0
 ; FMA-NEXT:    retq
   %res = call <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %a) nounwind
   ret <8 x float> %res
@@ -282,7 +282,7 @@ define <8 x float> @test_x86_fnmadd_bba_
 ; FMA-LABEL: test_x86_fnmadd_bba_ps_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %ymm0
-; FMA-NEXT:    vfnmadd213ps (%rcx), %ymm0, %ymm0
+; FMA-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm0 * ymm0) + mem
 ; FMA-NEXT:    retq
   %res = call <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float> %b, <8 x float> %b, <8 x float> %a) nounwind
   ret <8 x float> %res
@@ -325,7 +325,7 @@ define <2 x double> @test_x86_fnmadd_baa
 ; FMA-LABEL: test_x86_fnmadd_baa_pd:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
-; FMA-NEXT:    vfnmadd132pd (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm0
 ; FMA-NEXT:    retq
   %res = call <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double> %b, <2 x double> %a, <2 x double> %a) nounwind
   ret <2 x double> %res
@@ -335,7 +335,7 @@ define <2 x double> @test_x86_fnmadd_aba
 ; FMA-LABEL: test_x86_fnmadd_aba_pd:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
-; FMA-NEXT:    vfnmadd231pd (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm0
 ; FMA-NEXT:    retq
   %res = call <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double> %a, <2 x double> %b, <2 x double> %a) nounwind
   ret <2 x double> %res
@@ -345,7 +345,7 @@ define <2 x double> @test_x86_fnmadd_bba
 ; FMA-LABEL: test_x86_fnmadd_bba_pd:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
-; FMA-NEXT:    vfnmadd213pd (%rcx), %xmm0, %xmm0
+; FMA-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm0 * xmm0) + mem
 ; FMA-NEXT:    retq
   %res = call <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double> %b, <2 x double> %b, <2 x double> %a) nounwind
   ret <2 x double> %res
@@ -356,7 +356,7 @@ define <4 x double> @test_x86_fnmadd_baa
 ; FMA-LABEL: test_x86_fnmadd_baa_pd_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
-; FMA-NEXT:    vfnmadd132pd (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * mem) + ymm0
 ; FMA-NEXT:    retq
   %res = call <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double> %b, <4 x double> %a, <4 x double> %a) nounwind
   ret <4 x double> %res
@@ -366,7 +366,7 @@ define <4 x double> @test_x86_fnmadd_aba
 ; FMA-LABEL: test_x86_fnmadd_aba_pd_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
-; FMA-NEXT:    vfnmadd231pd (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm0 * mem) + ymm0
 ; FMA-NEXT:    retq
   %res = call <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double> %a, <4 x double> %b, <4 x double> %a) nounwind
   ret <4 x double> %res
@@ -376,7 +376,7 @@ define <4 x double> @test_x86_fnmadd_bba
 ; FMA-LABEL: test_x86_fnmadd_bba_pd_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %ymm0
-; FMA-NEXT:    vfnmadd213pd (%rcx), %ymm0, %ymm0
+; FMA-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm0 * ymm0) + mem
 ; FMA-NEXT:    retq
   %res = call <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double> %b, <4 x double> %b, <4 x double> %a) nounwind
   ret <4 x double> %res
@@ -419,7 +419,7 @@ define <4 x float> @test_x86_fmsub_baa_p
 ; FMA-LABEL: test_x86_fmsub_baa_ps:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
-; FMA-NEXT:    vfmsub132ps (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * mem) - xmm0
 ; FMA-NEXT:    retq
   %res = call <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float> %b, <4 x float> %a, <4 x float> %a) nounwind
   ret <4 x float> %res
@@ -429,7 +429,7 @@ define <4 x float> @test_x86_fmsub_aba_p
 ; FMA-LABEL: test_x86_fmsub_aba_ps:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
-; FMA-NEXT:    vfmsub231ps (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm0 * mem) - xmm0
 ; FMA-NEXT:    retq
   %res = call <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float> %a, <4 x float> %b, <4 x float> %a) nounwind
   ret <4 x float> %res
@@ -439,7 +439,7 @@ define <4 x float> @test_x86_fmsub_bba_p
 ; FMA-LABEL: test_x86_fmsub_bba_ps:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
-; FMA-NEXT:    vfmsub213ps (%rcx), %xmm0, %xmm0
+; FMA-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm0 * xmm0) - mem
 ; FMA-NEXT:    retq
   %res = call <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float> %b, <4 x float> %b, <4 x float> %a) nounwind
   ret <4 x float> %res
@@ -450,7 +450,7 @@ define <8 x float> @test_x86_fmsub_baa_p
 ; FMA-LABEL: test_x86_fmsub_baa_ps_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
-; FMA-NEXT:    vfmsub132ps (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * mem) - ymm0
 ; FMA-NEXT:    retq
   %res = call <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float> %b, <8 x float> %a, <8 x float> %a) nounwind
   ret <8 x float> %res
@@ -460,7 +460,7 @@ define <8 x float> @test_x86_fmsub_aba_p
 ; FMA-LABEL: test_x86_fmsub_aba_ps_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
-; FMA-NEXT:    vfmsub231ps (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm0 * mem) - ymm0
 ; FMA-NEXT:    retq
   %res = call <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %a) nounwind
   ret <8 x float> %res
@@ -470,7 +470,7 @@ define <8 x float> @test_x86_fmsub_bba_p
 ; FMA-LABEL: test_x86_fmsub_bba_ps_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %ymm0
-; FMA-NEXT:    vfmsub213ps (%rcx), %ymm0, %ymm0
+; FMA-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm0 * ymm0) - mem
 ; FMA-NEXT:    retq
   %res = call <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float> %b, <8 x float> %b, <8 x float> %a) nounwind
   ret <8 x float> %res
@@ -513,7 +513,7 @@ define <2 x double> @test_x86_fmsub_baa_
 ; FMA-LABEL: test_x86_fmsub_baa_pd:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
-; FMA-NEXT:    vfmsub132pd (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * mem) - xmm0
 ; FMA-NEXT:    retq
   %res = call <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double> %b, <2 x double> %a, <2 x double> %a) nounwind
   ret <2 x double> %res
@@ -523,7 +523,7 @@ define <2 x double> @test_x86_fmsub_aba_
 ; FMA-LABEL: test_x86_fmsub_aba_pd:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
-; FMA-NEXT:    vfmsub231pd (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm0 * mem) - xmm0
 ; FMA-NEXT:    retq
   %res = call <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double> %a, <2 x double> %b, <2 x double> %a) nounwind
   ret <2 x double> %res
@@ -533,7 +533,7 @@ define <2 x double> @test_x86_fmsub_bba_
 ; FMA-LABEL: test_x86_fmsub_bba_pd:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
-; FMA-NEXT:    vfmsub213pd (%rcx), %xmm0, %xmm0
+; FMA-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm0 * xmm0) - mem
 ; FMA-NEXT:    retq
   %res = call <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double> %b, <2 x double> %b, <2 x double> %a) nounwind
   ret <2 x double> %res
@@ -544,7 +544,7 @@ define <4 x double> @test_x86_fmsub_baa_
 ; FMA-LABEL: test_x86_fmsub_baa_pd_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
-; FMA-NEXT:    vfmsub132pd (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * mem) - ymm0
 ; FMA-NEXT:    retq
   %res = call <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double> %b, <4 x double> %a, <4 x double> %a) nounwind
   ret <4 x double> %res
@@ -554,7 +554,7 @@ define <4 x double> @test_x86_fmsub_aba_
 ; FMA-LABEL: test_x86_fmsub_aba_pd_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
-; FMA-NEXT:    vfmsub231pd (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm0 * mem) - ymm0
 ; FMA-NEXT:    retq
   %res = call <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double> %a, <4 x double> %b, <4 x double> %a) nounwind
   ret <4 x double> %res
@@ -564,7 +564,7 @@ define <4 x double> @test_x86_fmsub_bba_
 ; FMA-LABEL: test_x86_fmsub_bba_pd_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %ymm0
-; FMA-NEXT:    vfmsub213pd (%rcx), %ymm0, %ymm0
+; FMA-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm0 * ymm0) - mem
 ; FMA-NEXT:    retq
   %res = call <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double> %b, <4 x double> %b, <4 x double> %a) nounwind
   ret <4 x double> %res
@@ -608,7 +608,7 @@ define <4 x float> @test_x86_fnmsub_baa_
 ; FMA-LABEL: test_x86_fnmsub_baa_ps:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
-; FMA-NEXT:    vfnmsub132ps (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * mem) - xmm0
 ; FMA-NEXT:    retq
   %res = call <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float> %b, <4 x float> %a, <4 x float> %a) nounwind
   ret <4 x float> %res
@@ -618,7 +618,7 @@ define <4 x float> @test_x86_fnmsub_aba_
 ; FMA-LABEL: test_x86_fnmsub_aba_ps:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %xmm0
-; FMA-NEXT:    vfnmsub231ps (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm0 * mem) - xmm0
 ; FMA-NEXT:    retq
   %res = call <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float> %a, <4 x float> %b, <4 x float> %a) nounwind
   ret <4 x float> %res
@@ -628,7 +628,7 @@ define <4 x float> @test_x86_fnmsub_bba_
 ; FMA-LABEL: test_x86_fnmsub_bba_ps:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %xmm0
-; FMA-NEXT:    vfnmsub213ps (%rcx), %xmm0, %xmm0
+; FMA-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm0 * xmm0) - mem
 ; FMA-NEXT:    retq
   %res = call <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float> %b, <4 x float> %b, <4 x float> %a) nounwind
   ret <4 x float> %res
@@ -639,7 +639,7 @@ define <8 x float> @test_x86_fnmsub_baa_
 ; FMA-LABEL: test_x86_fnmsub_baa_ps_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
-; FMA-NEXT:    vfnmsub132ps (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * mem) - ymm0
 ; FMA-NEXT:    retq
   %res = call <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float> %b, <8 x float> %a, <8 x float> %a) nounwind
   ret <8 x float> %res
@@ -649,7 +649,7 @@ define <8 x float> @test_x86_fnmsub_aba_
 ; FMA-LABEL: test_x86_fnmsub_aba_ps_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rcx), %ymm0
-; FMA-NEXT:    vfnmsub231ps (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm0 * mem) - ymm0
 ; FMA-NEXT:    retq
   %res = call <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %a) nounwind
   ret <8 x float> %res
@@ -659,7 +659,7 @@ define <8 x float> @test_x86_fnmsub_bba_
 ; FMA-LABEL: test_x86_fnmsub_bba_ps_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovaps (%rdx), %ymm0
-; FMA-NEXT:    vfnmsub213ps (%rcx), %ymm0, %ymm0
+; FMA-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm0 * ymm0) - mem
 ; FMA-NEXT:    retq
   %res = call <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float> %b, <8 x float> %b, <8 x float> %a) nounwind
   ret <8 x float> %res
@@ -702,7 +702,7 @@ define <2 x double> @test_x86_fnmsub_baa
 ; FMA-LABEL: test_x86_fnmsub_baa_pd:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
-; FMA-NEXT:    vfnmsub132pd (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm0
 ; FMA-NEXT:    retq
   %res = call <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double> %b, <2 x double> %a, <2 x double> %a) nounwind
   ret <2 x double> %res
@@ -712,7 +712,7 @@ define <2 x double> @test_x86_fnmsub_aba
 ; FMA-LABEL: test_x86_fnmsub_aba_pd:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %xmm0
-; FMA-NEXT:    vfnmsub231pd (%rdx), %xmm0, %xmm0
+; FMA-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm0
 ; FMA-NEXT:    retq
   %res = call <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double> %a, <2 x double> %b, <2 x double> %a) nounwind
   ret <2 x double> %res
@@ -722,7 +722,7 @@ define <2 x double> @test_x86_fnmsub_bba
 ; FMA-LABEL: test_x86_fnmsub_bba_pd:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %xmm0
-; FMA-NEXT:    vfnmsub213pd (%rcx), %xmm0, %xmm0
+; FMA-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm0 * xmm0) - mem
 ; FMA-NEXT:    retq
   %res = call <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double> %b, <2 x double> %b, <2 x double> %a) nounwind
   ret <2 x double> %res
@@ -733,7 +733,7 @@ define <4 x double> @test_x86_fnmsub_baa
 ; FMA-LABEL: test_x86_fnmsub_baa_pd_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
-; FMA-NEXT:    vfnmsub132pd (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * mem) - ymm0
 ; FMA-NEXT:    retq
   %res = call <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double> %b, <4 x double> %a, <4 x double> %a) nounwind
   ret <4 x double> %res
@@ -743,7 +743,7 @@ define <4 x double> @test_x86_fnmsub_aba
 ; FMA-LABEL: test_x86_fnmsub_aba_pd_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rcx), %ymm0
-; FMA-NEXT:    vfnmsub231pd (%rdx), %ymm0, %ymm0
+; FMA-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm0 * mem) - ymm0
 ; FMA-NEXT:    retq
   %res = call <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double> %a, <4 x double> %b, <4 x double> %a) nounwind
   ret <4 x double> %res
@@ -753,7 +753,7 @@ define <4 x double> @test_x86_fnmsub_bba
 ; FMA-LABEL: test_x86_fnmsub_bba_pd_y:
 ; FMA:       # %bb.0:
 ; FMA-NEXT:    vmovapd (%rdx), %ymm0
-; FMA-NEXT:    vfnmsub213pd (%rcx), %ymm0, %ymm0
+; FMA-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm0 * ymm0) - mem
 ; FMA-NEXT:    retq
   %res = call <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double> %b, <4 x double> %b, <4 x double> %a) nounwind
   ret <4 x double> %res

Modified: llvm/trunk/test/CodeGen/X86/fma-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma-intrinsics-x86.ll?rev=327225&r1=327224&r2=327225&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma-intrinsics-x86.ll Sat Mar 10 13:30:46 2018
@@ -8,11 +8,13 @@ define <4 x float> @test_x86_fma_vfmadd_
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_ss:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = (xmm1 * xmm0) + xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_ss:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = (xmm1 * xmm0) + xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ss:
@@ -20,6 +22,7 @@ define <4 x float> @test_x86_fma_vfmadd_
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa9,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfmadd.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
   ret <4 x float> %res
@@ -29,12 +32,14 @@ define <4 x float> @test_x86_fma_vfmadd_
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_bac_ss:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xa9,0xca]
+; CHECK-FMA-NEXT:    # xmm1 = (xmm0 * xmm1) + xmm2
 ; CHECK-FMA-NEXT:    vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_bac_ss:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa9,0xca]
+; CHECK-AVX512VL-NEXT:    # xmm1 = (xmm0 * xmm1) + xmm2
 ; CHECK-AVX512VL-NEXT:    vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
@@ -43,6 +48,7 @@ define <4 x float> @test_x86_fma_vfmadd_
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa9,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfmadd.ss(<4 x float> %a1, <4 x float> %a0, <4 x float> %a2)
   ret <4 x float> %res
@@ -53,11 +59,13 @@ define <2 x double> @test_x86_fma_vfmadd
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_sd:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = (xmm1 * xmm0) + xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_sd:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = (xmm1 * xmm0) + xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_sd:
@@ -65,6 +73,7 @@ define <2 x double> @test_x86_fma_vfmadd
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa9,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfmadd.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
   ret <2 x double> %res
@@ -74,12 +83,14 @@ define <2 x double> @test_x86_fma_vfmadd
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_bac_sd:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xa9,0xca]
+; CHECK-FMA-NEXT:    # xmm1 = (xmm0 * xmm1) + xmm2
 ; CHECK-FMA-NEXT:    vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_bac_sd:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa9,0xca]
+; CHECK-AVX512VL-NEXT:    # xmm1 = (xmm0 * xmm1) + xmm2
 ; CHECK-AVX512VL-NEXT:    vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
@@ -88,6 +99,7 @@ define <2 x double> @test_x86_fma_vfmadd
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa9,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfmadd.sd(<2 x double> %a1, <2 x double> %a0, <2 x double> %a2)
   ret <2 x double> %res
@@ -98,11 +110,13 @@ define <4 x float> @test_x86_fma_vfmadd_
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_ps:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = (xmm1 * xmm0) + xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_ps:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = (xmm1 * xmm0) + xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ps:
@@ -110,6 +124,7 @@ define <4 x float> @test_x86_fma_vfmadd_
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa8,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
   ret <4 x float> %res
@@ -120,11 +135,13 @@ define <2 x double> @test_x86_fma_vfmadd
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_pd:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = (xmm1 * xmm0) + xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_pd:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = (xmm1 * xmm0) + xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_pd:
@@ -132,6 +149,7 @@ define <2 x double> @test_x86_fma_vfmadd
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa8,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfmadd.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
   ret <2 x double> %res
@@ -142,11 +160,13 @@ define <8 x float> @test_x86_fma_vfmadd_
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_ps_256:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
+; CHECK-FMA-NEXT:    # ymm0 = (ymm1 * ymm0) + ymm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_ps_256:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
+; CHECK-AVX512VL-NEXT:    # ymm0 = (ymm1 * ymm0) + ymm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_ps_256:
@@ -154,6 +174,7 @@ define <8 x float> @test_x86_fma_vfmadd_
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa8,0x00]
+; CHECK-FMA-WIN-NEXT:    # ymm0 = (ymm1 * ymm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
   ret <8 x float> %res
@@ -164,11 +185,13 @@ define <4 x double> @test_x86_fma_vfmadd
 ; CHECK-FMA-LABEL: test_x86_fma_vfmadd_pd_256:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
+; CHECK-FMA-NEXT:    # ymm0 = (ymm1 * ymm0) + ymm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmadd_pd_256:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
+; CHECK-AVX512VL-NEXT:    # ymm0 = (ymm1 * ymm0) + ymm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmadd_pd_256:
@@ -176,6 +199,7 @@ define <4 x double> @test_x86_fma_vfmadd
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa8,0x00]
+; CHECK-FMA-WIN-NEXT:    # ymm0 = (ymm1 * ymm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.fma.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
   ret <4 x double> %res
@@ -187,11 +211,13 @@ define <4 x float> @test_x86_fma_vfmsub_
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_ss:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xab,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = (xmm1 * xmm0) - xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_ss:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xab,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = (xmm1 * xmm0) - xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_ss:
@@ -199,6 +225,7 @@ define <4 x float> @test_x86_fma_vfmsub_
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xab,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfmsub.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
   ret <4 x float> %res
@@ -208,12 +235,14 @@ define <4 x float> @test_x86_fma_vfmsub_
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_bac_ss:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xab,0xca]
+; CHECK-FMA-NEXT:    # xmm1 = (xmm0 * xmm1) - xmm2
 ; CHECK-FMA-NEXT:    vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_bac_ss:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xab,0xca]
+; CHECK-AVX512VL-NEXT:    # xmm1 = (xmm0 * xmm1) - xmm2
 ; CHECK-AVX512VL-NEXT:    vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
@@ -222,6 +251,7 @@ define <4 x float> @test_x86_fma_vfmsub_
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xab,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfmsub.ss(<4 x float> %a1, <4 x float> %a0, <4 x float> %a2)
   ret <4 x float> %res
@@ -232,11 +262,13 @@ define <2 x double> @test_x86_fma_vfmsub
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_sd:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xab,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = (xmm1 * xmm0) - xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_sd:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xab,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = (xmm1 * xmm0) - xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_sd:
@@ -244,6 +276,7 @@ define <2 x double> @test_x86_fma_vfmsub
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xab,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfmsub.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
   ret <2 x double> %res
@@ -253,12 +286,14 @@ define <2 x double> @test_x86_fma_vfmsub
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_bac_sd:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xab,0xca]
+; CHECK-FMA-NEXT:    # xmm1 = (xmm0 * xmm1) - xmm2
 ; CHECK-FMA-NEXT:    vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_bac_sd:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xab,0xca]
+; CHECK-AVX512VL-NEXT:    # xmm1 = (xmm0 * xmm1) - xmm2
 ; CHECK-AVX512VL-NEXT:    vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
@@ -267,6 +302,7 @@ define <2 x double> @test_x86_fma_vfmsub
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xab,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfmsub.sd(<2 x double> %a1, <2 x double> %a0, <2 x double> %a2)
   ret <2 x double> %res
@@ -277,11 +313,13 @@ define <4 x float> @test_x86_fma_vfmsub_
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_ps:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xaa,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = (xmm1 * xmm0) - xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_ps:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xaa,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = (xmm1 * xmm0) - xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_ps:
@@ -289,6 +327,7 @@ define <4 x float> @test_x86_fma_vfmsub_
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xaa,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfmsub.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
   ret <4 x float> %res
@@ -299,11 +338,13 @@ define <2 x double> @test_x86_fma_vfmsub
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_pd:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xaa,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = (xmm1 * xmm0) - xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_pd:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xaa,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = (xmm1 * xmm0) - xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_pd:
@@ -311,6 +352,7 @@ define <2 x double> @test_x86_fma_vfmsub
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xaa,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfmsub.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
   ret <2 x double> %res
@@ -321,11 +363,13 @@ define <8 x float> @test_x86_fma_vfmsub_
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_ps_256:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xaa,0xc2]
+; CHECK-FMA-NEXT:    # ymm0 = (ymm1 * ymm0) - ymm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_ps_256:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xaa,0xc2]
+; CHECK-AVX512VL-NEXT:    # ymm0 = (ymm1 * ymm0) - ymm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_ps_256:
@@ -333,6 +377,7 @@ define <8 x float> @test_x86_fma_vfmsub_
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xaa,0x00]
+; CHECK-FMA-WIN-NEXT:    # ymm0 = (ymm1 * ymm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
   ret <8 x float> %res
@@ -343,11 +388,13 @@ define <4 x double> @test_x86_fma_vfmsub
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsub_pd_256:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xaa,0xc2]
+; CHECK-FMA-NEXT:    # ymm0 = (ymm1 * ymm0) - ymm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsub_pd_256:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xaa,0xc2]
+; CHECK-AVX512VL-NEXT:    # ymm0 = (ymm1 * ymm0) - ymm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsub_pd_256:
@@ -355,6 +402,7 @@ define <4 x double> @test_x86_fma_vfmsub
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xaa,0x00]
+; CHECK-FMA-WIN-NEXT:    # ymm0 = (ymm1 * ymm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.fma.vfmsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
   ret <4 x double> %res
@@ -366,11 +414,13 @@ define <4 x float> @test_x86_fma_vfnmadd
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_ss:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xad,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = -(xmm1 * xmm0) + xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_ss:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xad,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = -(xmm1 * xmm0) + xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_ss:
@@ -378,6 +428,7 @@ define <4 x float> @test_x86_fma_vfnmadd
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xad,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = -(xmm1 * xmm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfnmadd.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
   ret <4 x float> %res
@@ -387,12 +438,14 @@ define <4 x float> @test_x86_fma_vfnmadd
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_bac_ss:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xad,0xca]
+; CHECK-FMA-NEXT:    # xmm1 = -(xmm0 * xmm1) + xmm2
 ; CHECK-FMA-NEXT:    vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_bac_ss:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xad,0xca]
+; CHECK-AVX512VL-NEXT:    # xmm1 = -(xmm0 * xmm1) + xmm2
 ; CHECK-AVX512VL-NEXT:    vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
@@ -401,6 +454,7 @@ define <4 x float> @test_x86_fma_vfnmadd
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xad,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = -(xmm1 * xmm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfnmadd.ss(<4 x float> %a1, <4 x float> %a0, <4 x float> %a2)
   ret <4 x float> %res
@@ -411,11 +465,13 @@ define <2 x double> @test_x86_fma_vfnmad
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_sd:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xad,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = -(xmm1 * xmm0) + xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_sd:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xad,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = -(xmm1 * xmm0) + xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_sd:
@@ -423,6 +479,7 @@ define <2 x double> @test_x86_fma_vfnmad
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xad,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = -(xmm1 * xmm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfnmadd.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
   ret <2 x double> %res
@@ -432,12 +489,14 @@ define <2 x double> @test_x86_fma_vfnmad
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_bac_sd:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xad,0xca]
+; CHECK-FMA-NEXT:    # xmm1 = -(xmm0 * xmm1) + xmm2
 ; CHECK-FMA-NEXT:    vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_bac_sd:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xad,0xca]
+; CHECK-AVX512VL-NEXT:    # xmm1 = -(xmm0 * xmm1) + xmm2
 ; CHECK-AVX512VL-NEXT:    vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
@@ -446,6 +505,7 @@ define <2 x double> @test_x86_fma_vfnmad
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xad,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = -(xmm1 * xmm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfnmadd.sd(<2 x double> %a1, <2 x double> %a0, <2 x double> %a2)
   ret <2 x double> %res
@@ -456,11 +516,13 @@ define <4 x float> @test_x86_fma_vfnmadd
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_ps:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xac,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = -(xmm1 * xmm0) + xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_ps:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xac,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = -(xmm1 * xmm0) + xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_ps:
@@ -468,6 +530,7 @@ define <4 x float> @test_x86_fma_vfnmadd
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xac,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = -(xmm1 * xmm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfnmadd.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
   ret <4 x float> %res
@@ -478,11 +541,13 @@ define <2 x double> @test_x86_fma_vfnmad
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_pd:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xac,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = -(xmm1 * xmm0) + xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_pd:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xac,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = -(xmm1 * xmm0) + xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_pd:
@@ -490,6 +555,7 @@ define <2 x double> @test_x86_fma_vfnmad
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xac,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = -(xmm1 * xmm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfnmadd.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
   ret <2 x double> %res
@@ -500,11 +566,13 @@ define <8 x float> @test_x86_fma_vfnmadd
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_ps_256:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xac,0xc2]
+; CHECK-FMA-NEXT:    # ymm0 = -(ymm1 * ymm0) + ymm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_ps_256:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xac,0xc2]
+; CHECK-AVX512VL-NEXT:    # ymm0 = -(ymm1 * ymm0) + ymm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_ps_256:
@@ -512,6 +580,7 @@ define <8 x float> @test_x86_fma_vfnmadd
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xac,0x00]
+; CHECK-FMA-WIN-NEXT:    # ymm0 = -(ymm1 * ymm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.fma.vfnmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
   ret <8 x float> %res
@@ -522,11 +591,13 @@ define <4 x double> @test_x86_fma_vfnmad
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmadd_pd_256:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xac,0xc2]
+; CHECK-FMA-NEXT:    # ymm0 = -(ymm1 * ymm0) + ymm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmadd_pd_256:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xac,0xc2]
+; CHECK-AVX512VL-NEXT:    # ymm0 = -(ymm1 * ymm0) + ymm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmadd_pd_256:
@@ -534,6 +605,7 @@ define <4 x double> @test_x86_fma_vfnmad
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xac,0x00]
+; CHECK-FMA-WIN-NEXT:    # ymm0 = -(ymm1 * ymm0) + mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.fma.vfnmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
   ret <4 x double> %res
@@ -545,11 +617,13 @@ define <4 x float> @test_x86_fma_vfnmsub
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_ss:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xaf,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = -(xmm1 * xmm0) - xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_ss:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xaf,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = -(xmm1 * xmm0) - xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_ss:
@@ -557,6 +631,7 @@ define <4 x float> @test_x86_fma_vfnmsub
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xaf,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = -(xmm1 * xmm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfnmsub.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
   ret <4 x float> %res
@@ -566,12 +641,14 @@ define <4 x float> @test_x86_fma_vfnmsub
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_bac_ss:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213ss %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0x79,0xaf,0xca]
+; CHECK-FMA-NEXT:    # xmm1 = -(xmm0 * xmm1) - xmm2
 ; CHECK-FMA-NEXT:    vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_bac_ss:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213ss %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaf,0xca]
+; CHECK-AVX512VL-NEXT:    # xmm1 = -(xmm0 * xmm1) - xmm2
 ; CHECK-AVX512VL-NEXT:    vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
@@ -580,6 +657,7 @@ define <4 x float> @test_x86_fma_vfnmsub
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213ss (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xaf,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = -(xmm1 * xmm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfnmsub.ss(<4 x float> %a1, <4 x float> %a0, <4 x float> %a2)
   ret <4 x float> %res
@@ -590,11 +668,13 @@ define <2 x double> @test_x86_fma_vfnmsu
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_sd:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xaf,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = -(xmm1 * xmm0) - xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_sd:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xaf,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = -(xmm1 * xmm0) - xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_sd:
@@ -602,6 +682,7 @@ define <2 x double> @test_x86_fma_vfnmsu
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x0a]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x01]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xaf,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = -(xmm1 * xmm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfnmsub.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
   ret <2 x double> %res
@@ -611,12 +692,14 @@ define <2 x double> @test_x86_fma_vfnmsu
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_bac_sd:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213sd %xmm2, %xmm0, %xmm1 # encoding: [0xc4,0xe2,0xf9,0xaf,0xca]
+; CHECK-FMA-NEXT:    # xmm1 = -(xmm0 * xmm1) - xmm2
 ; CHECK-FMA-NEXT:    vmovapd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_bac_sd:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213sd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaf,0xca]
+; CHECK-AVX512VL-NEXT:    # xmm1 = -(xmm0 * xmm1) - xmm2
 ; CHECK-AVX512VL-NEXT:    vmovapd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
@@ -625,6 +708,7 @@ define <2 x double> @test_x86_fma_vfnmsu
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213sd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xaf,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = -(xmm1 * xmm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfnmsub.sd(<2 x double> %a1, <2 x double> %a0, <2 x double> %a2)
   ret <2 x double> %res
@@ -635,11 +719,13 @@ define <4 x float> @test_x86_fma_vfnmsub
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_ps:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xae,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = -(xmm1 * xmm0) - xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_ps:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xae,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = -(xmm1 * xmm0) - xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_ps:
@@ -647,6 +733,7 @@ define <4 x float> @test_x86_fma_vfnmsub
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xae,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = -(xmm1 * xmm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfnmsub.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
   ret <4 x float> %res
@@ -657,11 +744,13 @@ define <2 x double> @test_x86_fma_vfnmsu
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_pd:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xae,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = -(xmm1 * xmm0) - xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_pd:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xae,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = -(xmm1 * xmm0) - xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_pd:
@@ -669,6 +758,7 @@ define <2 x double> @test_x86_fma_vfnmsu
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xae,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = -(xmm1 * xmm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfnmsub.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
   ret <2 x double> %res
@@ -679,11 +769,13 @@ define <8 x float> @test_x86_fma_vfnmsub
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_ps_256:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xae,0xc2]
+; CHECK-FMA-NEXT:    # ymm0 = -(ymm1 * ymm0) - ymm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_ps_256:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xae,0xc2]
+; CHECK-AVX512VL-NEXT:    # ymm0 = -(ymm1 * ymm0) - ymm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_ps_256:
@@ -691,6 +783,7 @@ define <8 x float> @test_x86_fma_vfnmsub
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xae,0x00]
+; CHECK-FMA-WIN-NEXT:    # ymm0 = -(ymm1 * ymm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.fma.vfnmsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
   ret <8 x float> %res
@@ -701,11 +794,13 @@ define <4 x double> @test_x86_fma_vfnmsu
 ; CHECK-FMA-LABEL: test_x86_fma_vfnmsub_pd_256:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xae,0xc2]
+; CHECK-FMA-NEXT:    # ymm0 = -(ymm1 * ymm0) - ymm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfnmsub_pd_256:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xae,0xc2]
+; CHECK-AVX512VL-NEXT:    # ymm0 = -(ymm1 * ymm0) - ymm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfnmsub_pd_256:
@@ -713,6 +808,7 @@ define <4 x double> @test_x86_fma_vfnmsu
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfnmsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xae,0x00]
+; CHECK-FMA-WIN-NEXT:    # ymm0 = -(ymm1 * ymm0) - mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.fma.vfnmsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
   ret <4 x double> %res
@@ -724,11 +820,13 @@ define <4 x float> @test_x86_fma_vfmadds
 ; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_ps:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa6,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = (xmm1 * xmm0) +/- xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_ps:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa6,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = (xmm1 * xmm0) +/- xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_ps:
@@ -736,6 +834,7 @@ define <4 x float> @test_x86_fma_vfmadds
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmaddsub213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa6,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) +/- mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
   ret <4 x float> %res
@@ -746,11 +845,13 @@ define <2 x double> @test_x86_fma_vfmadd
 ; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_pd:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa6,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = (xmm1 * xmm0) +/- xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_pd:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa6,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = (xmm1 * xmm0) +/- xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_pd:
@@ -758,6 +859,7 @@ define <2 x double> @test_x86_fma_vfmadd
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmaddsub213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa6,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) +/- mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
   ret <2 x double> %res
@@ -768,11 +870,13 @@ define <8 x float> @test_x86_fma_vfmadds
 ; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_ps_256:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa6,0xc2]
+; CHECK-FMA-NEXT:    # ymm0 = (ymm1 * ymm0) +/- ymm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_ps_256:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa6,0xc2]
+; CHECK-AVX512VL-NEXT:    # ymm0 = (ymm1 * ymm0) +/- ymm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_ps_256:
@@ -780,6 +884,7 @@ define <8 x float> @test_x86_fma_vfmadds
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmaddsub213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa6,0x00]
+; CHECK-FMA-WIN-NEXT:    # ymm0 = (ymm1 * ymm0) +/- mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
   ret <8 x float> %res
@@ -790,11 +895,13 @@ define <4 x double> @test_x86_fma_vfmadd
 ; CHECK-FMA-LABEL: test_x86_fma_vfmaddsub_pd_256:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa6,0xc2]
+; CHECK-FMA-NEXT:    # ymm0 = (ymm1 * ymm0) +/- ymm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmaddsub_pd_256:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa6,0xc2]
+; CHECK-AVX512VL-NEXT:    # ymm0 = (ymm1 * ymm0) +/- ymm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmaddsub_pd_256:
@@ -802,6 +909,7 @@ define <4 x double> @test_x86_fma_vfmadd
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmaddsub213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa6,0x00]
+; CHECK-FMA-WIN-NEXT:    # ymm0 = (ymm1 * ymm0) +/- mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
   ret <4 x double> %res
@@ -813,11 +921,13 @@ define <4 x float> @test_x86_fma_vfmsuba
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_ps:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0xa7,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = (xmm1 * xmm0) -/+ xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_ps:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa7,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = (xmm1 * xmm0) -/+ xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_ps:
@@ -825,6 +935,7 @@ define <4 x float> @test_x86_fma_vfmsuba
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %xmm1 # encoding: [0xc5,0xf8,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %xmm0 # encoding: [0xc5,0xf8,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsubadd213ps (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0x71,0xa7,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) -/+ mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x float> @llvm.x86.fma.vfmsubadd.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
   ret <4 x float> %res
@@ -835,11 +946,13 @@ define <2 x double> @test_x86_fma_vfmsub
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_pd:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0xf1,0xa7,0xc2]
+; CHECK-FMA-NEXT:    # xmm0 = (xmm1 * xmm0) -/+ xmm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_pd:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa7,0xc2]
+; CHECK-AVX512VL-NEXT:    # xmm0 = (xmm1 * xmm0) -/+ xmm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_pd:
@@ -847,6 +960,7 @@ define <2 x double> @test_x86_fma_vfmsub
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %xmm1 # encoding: [0xc5,0xf9,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %xmm0 # encoding: [0xc5,0xf9,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsubadd213pd (%r8), %xmm1, %xmm0 # encoding: [0xc4,0xc2,0xf1,0xa7,0x00]
+; CHECK-FMA-WIN-NEXT:    # xmm0 = (xmm1 * xmm0) -/+ mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <2 x double> @llvm.x86.fma.vfmsubadd.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
   ret <2 x double> %res
@@ -857,11 +971,13 @@ define <8 x float> @test_x86_fma_vfmsuba
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_ps_256:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0xa7,0xc2]
+; CHECK-FMA-NEXT:    # ymm0 = (ymm1 * ymm0) -/+ ymm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_ps_256:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa7,0xc2]
+; CHECK-AVX512VL-NEXT:    # ymm0 = (ymm1 * ymm0) -/+ ymm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_ps_256:
@@ -869,6 +985,7 @@ define <8 x float> @test_x86_fma_vfmsuba
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rcx), %ymm1 # encoding: [0xc5,0xfc,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovaps (%rdx), %ymm0 # encoding: [0xc5,0xfc,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsubadd213ps (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0x75,0xa7,0x00]
+; CHECK-FMA-WIN-NEXT:    # ymm0 = (ymm1 * ymm0) -/+ mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <8 x float> @llvm.x86.fma.vfmsubadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
   ret <8 x float> %res
@@ -879,11 +996,13 @@ define <4 x double> @test_x86_fma_vfmsub
 ; CHECK-FMA-LABEL: test_x86_fma_vfmsubadd_pd_256:
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0xf5,0xa7,0xc2]
+; CHECK-FMA-NEXT:    # ymm0 = (ymm1 * ymm0) -/+ ymm2
 ; CHECK-FMA-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-AVX512VL-LABEL: test_x86_fma_vfmsubadd_pd_256:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa7,0xc2]
+; CHECK-AVX512VL-NEXT:    # ymm0 = (ymm1 * ymm0) -/+ ymm2
 ; CHECK-AVX512VL-NEXT:    retq # encoding: [0xc3]
 ;
 ; CHECK-FMA-WIN-LABEL: test_x86_fma_vfmsubadd_pd_256:
@@ -891,6 +1010,7 @@ define <4 x double> @test_x86_fma_vfmsub
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rcx), %ymm1 # encoding: [0xc5,0xfd,0x28,0x09]
 ; CHECK-FMA-WIN-NEXT:    vmovapd (%rdx), %ymm0 # encoding: [0xc5,0xfd,0x28,0x02]
 ; CHECK-FMA-WIN-NEXT:    vfmsubadd213pd (%r8), %ymm1, %ymm0 # encoding: [0xc4,0xc2,0xf5,0xa7,0x00]
+; CHECK-FMA-WIN-NEXT:    # ymm0 = (ymm1 * ymm0) -/+ mem
 ; CHECK-FMA-WIN-NEXT:    retq # encoding: [0xc3]
   %res = call <4 x double> @llvm.x86.fma.vfmsubadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
   ret <4 x double> %res

Modified: llvm/trunk/test/CodeGen/X86/fma-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma-schedule.ll?rev=327225&r1=327224&r2=327225&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma-schedule.ll Sat Mar 10 13:30:46 2018
@@ -15,84 +15,84 @@ define void @test_vfmaddpd_128(<2 x doub
 ; GENERIC-LABEL: test_vfmaddpd_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd132pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmadd231pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmaddpd_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmadd231pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; HASWELL-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [11:0.50]
+; HASWELL-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [11:0.50]
+; HASWELL-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [11:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmaddpd_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmadd231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmaddpd_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmadd132pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd231pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmadd231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [10:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmaddpd_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmadd231pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; KNL-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [11:0.50]
+; KNL-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [11:0.50]
+; KNL-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [11:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmaddpd_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmadd132pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd231pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmadd231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKX-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [10:0.50]
+; SKX-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [10:0.50]
+; SKX-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [10:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmaddpd_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd132pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmadd213pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmadd231pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd132pd {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd231pd {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfmadd132pd $2, $1, $0 \0A\09 vfmadd213pd $2, $1, $0 \0A\09 vfmadd231pd $2, $1, $0 \0A\09 vfmadd132pd $3, $1, $0 \0A\09 vfmadd213pd $3, $1, $0 \0A\09 vfmadd231pd $3, $1, $0", "x,x,x,*m"(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) nounwind
@@ -103,12 +103,12 @@ define void @test_vfmaddpd_256(<4 x doub
 ; GENERIC-LABEL: test_vfmaddpd_256:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd132pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmadd231pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * mem) + ymm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    vzeroupper # sched: [100:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
@@ -116,12 +116,12 @@ define void @test_vfmaddpd_256(<4 x doub
 ; HASWELL-LABEL: test_vfmaddpd_256:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmadd231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; HASWELL-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * mem) + ymm1 sched: [12:0.50]
+; HASWELL-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [12:0.50]
+; HASWELL-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [12:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -129,12 +129,12 @@ define void @test_vfmaddpd_256(<4 x doub
 ; BROADWELL-LABEL: test_vfmaddpd_256:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmadd231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; BROADWELL-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * mem) + ymm1 sched: [11:0.50]
+; BROADWELL-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [11:0.50]
+; BROADWELL-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [11:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
@@ -142,12 +142,12 @@ define void @test_vfmaddpd_256(<4 x doub
 ; SKYLAKE-LABEL: test_vfmaddpd_256:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmadd132pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd231pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmadd231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm1 * ymm2) + ymm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * mem) + ymm1 sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [11:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
@@ -155,24 +155,24 @@ define void @test_vfmaddpd_256(<4 x doub
 ; KNL-LABEL: test_vfmaddpd_256:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmadd231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; KNL-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; KNL-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * mem) + ymm1 sched: [12:0.50]
+; KNL-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [12:0.50]
+; KNL-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [12:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmaddpd_256:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmadd132pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd231pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmadd231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKX-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 sched: [4:0.33]
+; SKX-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm1 * ymm2) + ymm0 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * mem) + ymm1 sched: [11:0.50]
+; SKX-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [11:0.50]
+; SKX-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [11:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -180,12 +180,12 @@ define void @test_vfmaddpd_256(<4 x doub
 ; ZNVER1-LABEL: test_vfmaddpd_256:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmadd213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmadd231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd132pd {{.*#+}} ymm0 = (ymm0 * mem) + ymm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd231pd {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -197,84 +197,84 @@ define void @test_vfmaddps_128(<4 x floa
 ; GENERIC-LABEL: test_vfmaddps_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd132ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmadd231ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmaddps_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmadd231ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [11:0.50]
+; HASWELL-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [11:0.50]
+; HASWELL-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [11:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmaddps_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmadd231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmaddps_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmadd132ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd231ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmadd231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [10:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmaddps_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmadd231ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [11:0.50]
+; KNL-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [11:0.50]
+; KNL-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [11:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmaddps_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmadd132ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd231ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmadd231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [10:0.50]
+; SKX-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [10:0.50]
+; SKX-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [10:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmaddps_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd132ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmadd213ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmadd231ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd231ps {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfmadd132ps $2, $1, $0 \0A\09 vfmadd213ps $2, $1, $0 \0A\09 vfmadd231ps $2, $1, $0 \0A\09 vfmadd132ps $3, $1, $0 \0A\09 vfmadd213ps $3, $1, $0 \0A\09 vfmadd231ps $3, $1, $0", "x,x,x,*m"(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) nounwind
@@ -285,12 +285,12 @@ define void @test_vfmaddps_256(<8 x floa
 ; GENERIC-LABEL: test_vfmaddps_256:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd132ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmadd231ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * mem) + ymm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    vzeroupper # sched: [100:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
@@ -298,12 +298,12 @@ define void @test_vfmaddps_256(<8 x floa
 ; HASWELL-LABEL: test_vfmaddps_256:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmadd231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * mem) + ymm1 sched: [12:0.50]
+; HASWELL-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [12:0.50]
+; HASWELL-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [12:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -311,12 +311,12 @@ define void @test_vfmaddps_256(<8 x floa
 ; BROADWELL-LABEL: test_vfmaddps_256:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmadd231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; BROADWELL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * mem) + ymm1 sched: [11:0.50]
+; BROADWELL-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [11:0.50]
+; BROADWELL-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [11:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
@@ -324,12 +324,12 @@ define void @test_vfmaddps_256(<8 x floa
 ; SKYLAKE-LABEL: test_vfmaddps_256:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmadd132ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd231ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmadd231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm1 * ymm2) + ymm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * mem) + ymm1 sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [11:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
@@ -337,24 +337,24 @@ define void @test_vfmaddps_256(<8 x floa
 ; KNL-LABEL: test_vfmaddps_256:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmadd231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; KNL-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * mem) + ymm1 sched: [12:0.50]
+; KNL-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [12:0.50]
+; KNL-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [12:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmaddps_256:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmadd132ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd231ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmadd231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 sched: [4:0.33]
+; SKX-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm1 * ymm2) + ymm0 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * mem) + ymm1 sched: [11:0.50]
+; SKX-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [11:0.50]
+; SKX-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [11:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -362,12 +362,12 @@ define void @test_vfmaddps_256(<8 x floa
 ; ZNVER1-LABEL: test_vfmaddps_256:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmadd213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmadd231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * mem) + ymm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd231ps {{.*#+}} ymm0 = (ymm1 * mem) + ymm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -379,84 +379,84 @@ define void @test_vfmaddsd_128(<2 x doub
 ; GENERIC-LABEL: test_vfmaddsd_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmadd132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd132sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmadd231sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmaddsd_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmadd132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfmadd231sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; HASWELL-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [10:0.50]
+; HASWELL-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [10:0.50]
+; HASWELL-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [10:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmaddsd_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmadd132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd132sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmadd231sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmaddsd_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmadd132sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd231sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd132sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfmadd231sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKYLAKE-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [9:0.50]
+; SKYLAKE-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [9:0.50]
+; SKYLAKE-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [9:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmaddsd_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmadd132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfmadd231sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; KNL-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [10:0.50]
+; KNL-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [10:0.50]
+; KNL-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [10:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmaddsd_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmadd132sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd231sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfmadd231sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKX-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [9:0.50]
+; SKX-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [9:0.50]
+; SKX-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [9:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmaddsd_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmadd132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd132sd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmadd213sd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmadd231sd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd132sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfmadd132sd $2, $1, $0 \0A\09 vfmadd213sd $2, $1, $0 \0A\09 vfmadd231sd $2, $1, $0 \0A\09 vfmadd132sd $3, $1, $0 \0A\09 vfmadd213sd $3, $1, $0 \0A\09 vfmadd231sd $3, $1, $0", "x,x,x,*m"(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) nounwind
@@ -467,84 +467,84 @@ define void @test_vfmaddss_128(<4 x floa
 ; GENERIC-LABEL: test_vfmaddss_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmadd132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmadd132ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmadd231ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmaddss_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmadd132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfmadd231ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; HASWELL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [10:0.50]
+; HASWELL-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [10:0.50]
+; HASWELL-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [10:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmaddss_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmadd132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmadd132ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmadd231ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmaddss_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmadd132ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd231ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmadd132ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfmadd231ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKYLAKE-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [9:0.50]
+; SKYLAKE-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [9:0.50]
+; SKYLAKE-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [9:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmaddss_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmadd132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfmadd231ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; KNL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [10:0.50]
+; KNL-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [10:0.50]
+; KNL-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [10:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmaddss_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmadd132ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd231ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfmadd231ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKX-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [9:0.50]
+; SKX-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [9:0.50]
+; SKX-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [9:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmaddss_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmadd132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmadd132ss (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmadd213ss (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmadd231ss (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm1 * mem) + xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfmadd132ss $2, $1, $0 \0A\09 vfmadd213ss $2, $1, $0 \0A\09 vfmadd231ss $2, $1, $0 \0A\09 vfmadd132ss $3, $1, $0 \0A\09 vfmadd213ss $3, $1, $0 \0A\09 vfmadd231ss $3, $1, $0", "x,x,x,*m"(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) nounwind
@@ -559,84 +559,84 @@ define void @test_vfmaddsubpd_128(<2 x d
 ; GENERIC-LABEL: test_vfmaddsubpd_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmaddsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmaddsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmaddsub132pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmaddsub231pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * xmm2) +/- xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmaddsub231pd {{.*#+}} xmm0 = (xmm1 * xmm2) +/- xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * mem) +/- xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmaddsub231pd {{.*#+}} xmm0 = (xmm1 * mem) +/- xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmaddsubpd_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmaddsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmaddsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmaddsub132pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmaddsub231pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; HASWELL-NEXT:    vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * xmm2) +/- xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmaddsub231pd {{.*#+}} xmm0 = (xmm1 * xmm2) +/- xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * mem) +/- xmm1 sched: [11:0.50]
+; HASWELL-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- mem sched: [11:0.50]
+; HASWELL-NEXT:    vfmaddsub231pd {{.*#+}} xmm0 = (xmm1 * mem) +/- xmm0 sched: [11:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmaddsubpd_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmaddsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmaddsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmaddsub132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmaddsub231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * xmm2) +/- xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmaddsub231pd {{.*#+}} xmm0 = (xmm1 * xmm2) +/- xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * mem) +/- xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfmaddsub231pd {{.*#+}} xmm0 = (xmm1 * mem) +/- xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmaddsubpd_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmaddsub132pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmaddsub231pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmaddsub132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmaddsub231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * xmm2) +/- xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmaddsub231pd {{.*#+}} xmm0 = (xmm1 * xmm2) +/- xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * mem) +/- xmm1 sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- mem sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmaddsub231pd {{.*#+}} xmm0 = (xmm1 * mem) +/- xmm0 sched: [10:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmaddsubpd_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmaddsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmaddsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmaddsub132pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmaddsub231pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; KNL-NEXT:    vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * xmm2) +/- xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmaddsub231pd {{.*#+}} xmm0 = (xmm1 * xmm2) +/- xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * mem) +/- xmm1 sched: [11:0.50]
+; KNL-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- mem sched: [11:0.50]
+; KNL-NEXT:    vfmaddsub231pd {{.*#+}} xmm0 = (xmm1 * mem) +/- xmm0 sched: [11:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmaddsubpd_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmaddsub132pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmaddsub231pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmaddsub132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmaddsub231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKX-NEXT:    vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * xmm2) +/- xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmaddsub231pd {{.*#+}} xmm0 = (xmm1 * xmm2) +/- xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * mem) +/- xmm1 sched: [10:0.50]
+; SKX-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- mem sched: [10:0.50]
+; SKX-NEXT:    vfmaddsub231pd {{.*#+}} xmm0 = (xmm1 * mem) +/- xmm0 sched: [10:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmaddsubpd_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmaddsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmaddsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmaddsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmaddsub132pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmaddsub213pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmaddsub231pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * xmm2) +/- xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmaddsub231pd {{.*#+}} xmm0 = (xmm1 * xmm2) +/- xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * mem) +/- xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmaddsub231pd {{.*#+}} xmm0 = (xmm1 * mem) +/- xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfmaddsub132pd $2, $1, $0 \0A\09 vfmaddsub213pd $2, $1, $0 \0A\09 vfmaddsub231pd $2, $1, $0 \0A\09 vfmaddsub132pd $3, $1, $0 \0A\09 vfmaddsub213pd $3, $1, $0 \0A\09 vfmaddsub231pd $3, $1, $0", "x,x,x,*m"(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) nounwind
@@ -647,12 +647,12 @@ define void @test_vfmaddsubpd_256(<4 x d
 ; GENERIC-LABEL: test_vfmaddsubpd_256:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmaddsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmaddsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmaddsub132pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmaddsub231pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * ymm2) +/- ymm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * ymm2) +/- ymm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * mem) +/- ymm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    vzeroupper # sched: [100:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
@@ -660,12 +660,12 @@ define void @test_vfmaddsubpd_256(<4 x d
 ; HASWELL-LABEL: test_vfmaddsubpd_256:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmaddsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmaddsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmaddsub132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmaddsub231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; HASWELL-NEXT:    vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * ymm2) +/- ymm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * ymm2) +/- ymm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * mem) +/- ymm1 sched: [12:0.50]
+; HASWELL-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [12:0.50]
+; HASWELL-NEXT:    vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [12:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -673,12 +673,12 @@ define void @test_vfmaddsubpd_256(<4 x d
 ; BROADWELL-LABEL: test_vfmaddsubpd_256:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmaddsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmaddsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmaddsub132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmaddsub231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; BROADWELL-NEXT:    vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * ymm2) +/- ymm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * ymm2) +/- ymm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * mem) +/- ymm1 sched: [11:0.50]
+; BROADWELL-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [11:0.50]
+; BROADWELL-NEXT:    vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [11:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
@@ -686,12 +686,12 @@ define void @test_vfmaddsubpd_256(<4 x d
 ; SKYLAKE-LABEL: test_vfmaddsubpd_256:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmaddsub132pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmaddsub231pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmaddsub132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmaddsub231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * ymm2) +/- ymm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * ymm2) +/- ymm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * mem) +/- ymm1 sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [11:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
@@ -699,24 +699,24 @@ define void @test_vfmaddsubpd_256(<4 x d
 ; KNL-LABEL: test_vfmaddsubpd_256:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmaddsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmaddsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmaddsub132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmaddsub231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; KNL-NEXT:    vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * ymm2) +/- ymm1 sched: [5:0.50]
+; KNL-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * ymm2) +/- ymm0 sched: [5:0.50]
+; KNL-NEXT:    vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * mem) +/- ymm1 sched: [12:0.50]
+; KNL-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [12:0.50]
+; KNL-NEXT:    vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [12:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmaddsubpd_256:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmaddsub132pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmaddsub231pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmaddsub132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmaddsub231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKX-NEXT:    vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * ymm2) +/- ymm1 sched: [4:0.33]
+; SKX-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * ymm2) +/- ymm0 sched: [4:0.33]
+; SKX-NEXT:    vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * mem) +/- ymm1 sched: [11:0.50]
+; SKX-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [11:0.50]
+; SKX-NEXT:    vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [11:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -724,12 +724,12 @@ define void @test_vfmaddsubpd_256(<4 x d
 ; ZNVER1-LABEL: test_vfmaddsubpd_256:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmaddsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmaddsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmaddsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmaddsub132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmaddsub213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmaddsub231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * ymm2) +/- ymm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * ymm2) +/- ymm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * mem) +/- ymm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmaddsub231pd {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -741,84 +741,84 @@ define void @test_vfmaddsubps_128(<4 x f
 ; GENERIC-LABEL: test_vfmaddsubps_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmaddsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmaddsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmaddsub132ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmaddsub231ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * xmm2) +/- xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmaddsub231ps {{.*#+}} xmm0 = (xmm1 * xmm2) +/- xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * mem) +/- xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmaddsub231ps {{.*#+}} xmm0 = (xmm1 * mem) +/- xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmaddsubps_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmaddsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmaddsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmaddsub132ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmaddsub231ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; HASWELL-NEXT:    vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * xmm2) +/- xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmaddsub231ps {{.*#+}} xmm0 = (xmm1 * xmm2) +/- xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * mem) +/- xmm1 sched: [11:0.50]
+; HASWELL-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- mem sched: [11:0.50]
+; HASWELL-NEXT:    vfmaddsub231ps {{.*#+}} xmm0 = (xmm1 * mem) +/- xmm0 sched: [11:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmaddsubps_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmaddsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmaddsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmaddsub132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmaddsub231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * xmm2) +/- xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmaddsub231ps {{.*#+}} xmm0 = (xmm1 * xmm2) +/- xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * mem) +/- xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfmaddsub231ps {{.*#+}} xmm0 = (xmm1 * mem) +/- xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmaddsubps_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmaddsub132ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmaddsub231ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmaddsub132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmaddsub231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * xmm2) +/- xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmaddsub231ps {{.*#+}} xmm0 = (xmm1 * xmm2) +/- xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * mem) +/- xmm1 sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- mem sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmaddsub231ps {{.*#+}} xmm0 = (xmm1 * mem) +/- xmm0 sched: [10:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmaddsubps_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmaddsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmaddsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmaddsub132ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmaddsub231ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; KNL-NEXT:    vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * xmm2) +/- xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmaddsub231ps {{.*#+}} xmm0 = (xmm1 * xmm2) +/- xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * mem) +/- xmm1 sched: [11:0.50]
+; KNL-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- mem sched: [11:0.50]
+; KNL-NEXT:    vfmaddsub231ps {{.*#+}} xmm0 = (xmm1 * mem) +/- xmm0 sched: [11:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmaddsubps_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmaddsub132ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmaddsub231ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmaddsub132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmaddsub231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKX-NEXT:    vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * xmm2) +/- xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmaddsub231ps {{.*#+}} xmm0 = (xmm1 * xmm2) +/- xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * mem) +/- xmm1 sched: [10:0.50]
+; SKX-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- mem sched: [10:0.50]
+; SKX-NEXT:    vfmaddsub231ps {{.*#+}} xmm0 = (xmm1 * mem) +/- xmm0 sched: [10:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmaddsubps_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmaddsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmaddsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmaddsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmaddsub132ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmaddsub213ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmaddsub231ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * xmm2) +/- xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmaddsub231ps {{.*#+}} xmm0 = (xmm1 * xmm2) +/- xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * mem) +/- xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmaddsub231ps {{.*#+}} xmm0 = (xmm1 * mem) +/- xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfmaddsub132ps $2, $1, $0 \0A\09 vfmaddsub213ps $2, $1, $0 \0A\09 vfmaddsub231ps $2, $1, $0 \0A\09 vfmaddsub132ps $3, $1, $0 \0A\09 vfmaddsub213ps $3, $1, $0 \0A\09 vfmaddsub231ps $3, $1, $0", "x,x,x,*m"(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) nounwind
@@ -829,12 +829,12 @@ define void @test_vfmaddsubps_256(<8 x f
 ; GENERIC-LABEL: test_vfmaddsubps_256:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmaddsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmaddsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmaddsub132ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmaddsub231ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * ymm2) +/- ymm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * ymm2) +/- ymm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * mem) +/- ymm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    vzeroupper # sched: [100:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
@@ -842,12 +842,12 @@ define void @test_vfmaddsubps_256(<8 x f
 ; HASWELL-LABEL: test_vfmaddsubps_256:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmaddsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmaddsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmaddsub132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmaddsub231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; HASWELL-NEXT:    vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * ymm2) +/- ymm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * ymm2) +/- ymm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * mem) +/- ymm1 sched: [12:0.50]
+; HASWELL-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [12:0.50]
+; HASWELL-NEXT:    vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [12:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -855,12 +855,12 @@ define void @test_vfmaddsubps_256(<8 x f
 ; BROADWELL-LABEL: test_vfmaddsubps_256:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmaddsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmaddsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmaddsub132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmaddsub231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; BROADWELL-NEXT:    vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * ymm2) +/- ymm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * ymm2) +/- ymm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * mem) +/- ymm1 sched: [11:0.50]
+; BROADWELL-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [11:0.50]
+; BROADWELL-NEXT:    vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [11:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
@@ -868,12 +868,12 @@ define void @test_vfmaddsubps_256(<8 x f
 ; SKYLAKE-LABEL: test_vfmaddsubps_256:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmaddsub132ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmaddsub231ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmaddsub132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmaddsub231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * ymm2) +/- ymm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * ymm2) +/- ymm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * mem) +/- ymm1 sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [11:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
@@ -881,24 +881,24 @@ define void @test_vfmaddsubps_256(<8 x f
 ; KNL-LABEL: test_vfmaddsubps_256:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmaddsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmaddsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmaddsub132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmaddsub231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; KNL-NEXT:    vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * ymm2) +/- ymm1 sched: [5:0.50]
+; KNL-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * ymm2) +/- ymm0 sched: [5:0.50]
+; KNL-NEXT:    vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * mem) +/- ymm1 sched: [12:0.50]
+; KNL-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [12:0.50]
+; KNL-NEXT:    vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [12:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmaddsubps_256:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmaddsub132ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmaddsub231ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmaddsub132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmaddsub231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKX-NEXT:    vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * ymm2) +/- ymm1 sched: [4:0.33]
+; SKX-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * ymm2) +/- ymm0 sched: [4:0.33]
+; SKX-NEXT:    vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * mem) +/- ymm1 sched: [11:0.50]
+; SKX-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [11:0.50]
+; SKX-NEXT:    vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [11:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -906,12 +906,12 @@ define void @test_vfmaddsubps_256(<8 x f
 ; ZNVER1-LABEL: test_vfmaddsubps_256:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmaddsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmaddsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmaddsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmaddsub132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmaddsub213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmaddsub231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * ymm2) +/- ymm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * ymm2) +/- ymm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * mem) +/- ymm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmaddsub231ps {{.*#+}} ymm0 = (ymm1 * mem) +/- ymm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -927,84 +927,84 @@ define void @test_vfmsubaddpd_128(<2 x d
 ; GENERIC-LABEL: test_vfmsubaddpd_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmsubadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsubadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsubadd132pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsubadd231pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * xmm2) -/+ xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsubadd231pd {{.*#+}} xmm0 = (xmm1 * xmm2) -/+ xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * mem) -/+ xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmsubadd231pd {{.*#+}} xmm0 = (xmm1 * mem) -/+ xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsubaddpd_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmsubadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsubadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsubadd132pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmsubadd231pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; HASWELL-NEXT:    vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * xmm2) -/+ xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsubadd231pd {{.*#+}} xmm0 = (xmm1 * xmm2) -/+ xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * mem) -/+ xmm1 sched: [11:0.50]
+; HASWELL-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ mem sched: [11:0.50]
+; HASWELL-NEXT:    vfmsubadd231pd {{.*#+}} xmm0 = (xmm1 * mem) -/+ xmm0 sched: [11:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsubaddpd_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmsubadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsubadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsubadd132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmsubadd231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * xmm2) -/+ xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsubadd231pd {{.*#+}} xmm0 = (xmm1 * xmm2) -/+ xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * mem) -/+ xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsubadd231pd {{.*#+}} xmm0 = (xmm1 * mem) -/+ xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsubaddpd_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmsubadd132pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsubadd231pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsubadd132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmsubadd231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * xmm2) -/+ xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsubadd231pd {{.*#+}} xmm0 = (xmm1 * xmm2) -/+ xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * mem) -/+ xmm1 sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ mem sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmsubadd231pd {{.*#+}} xmm0 = (xmm1 * mem) -/+ xmm0 sched: [10:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsubaddpd_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmsubadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsubadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsubadd132pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmsubadd231pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; KNL-NEXT:    vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * xmm2) -/+ xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmsubadd231pd {{.*#+}} xmm0 = (xmm1 * xmm2) -/+ xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * mem) -/+ xmm1 sched: [11:0.50]
+; KNL-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ mem sched: [11:0.50]
+; KNL-NEXT:    vfmsubadd231pd {{.*#+}} xmm0 = (xmm1 * mem) -/+ xmm0 sched: [11:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmsubaddpd_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmsubadd132pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsubadd231pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsubadd132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmsubadd231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKX-NEXT:    vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * xmm2) -/+ xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmsubadd231pd {{.*#+}} xmm0 = (xmm1 * xmm2) -/+ xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * mem) -/+ xmm1 sched: [10:0.50]
+; SKX-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ mem sched: [10:0.50]
+; SKX-NEXT:    vfmsubadd231pd {{.*#+}} xmm0 = (xmm1 * mem) -/+ xmm0 sched: [10:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsubaddpd_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmsubadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsubadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsubadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsubadd132pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsubadd213pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsubadd231pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * xmm2) -/+ xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsubadd231pd {{.*#+}} xmm0 = (xmm1 * xmm2) -/+ xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * mem) -/+ xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsubadd231pd {{.*#+}} xmm0 = (xmm1 * mem) -/+ xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfmsubadd132pd $2, $1, $0 \0A\09 vfmsubadd213pd $2, $1, $0 \0A\09 vfmsubadd231pd $2, $1, $0 \0A\09 vfmsubadd132pd $3, $1, $0 \0A\09 vfmsubadd213pd $3, $1, $0 \0A\09 vfmsubadd231pd $3, $1, $0", "x,x,x,*m"(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) nounwind
@@ -1015,12 +1015,12 @@ define void @test_vfmsubaddpd_256(<4 x d
 ; GENERIC-LABEL: test_vfmsubaddpd_256:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmsubadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsubadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsubadd132pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsubadd231pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * ymm2) -/+ ymm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * ymm2) -/+ ymm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * mem) -/+ ymm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    vzeroupper # sched: [100:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
@@ -1028,12 +1028,12 @@ define void @test_vfmsubaddpd_256(<4 x d
 ; HASWELL-LABEL: test_vfmsubaddpd_256:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmsubadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsubadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsubadd132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmsubadd231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; HASWELL-NEXT:    vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * ymm2) -/+ ymm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * ymm2) -/+ ymm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * mem) -/+ ymm1 sched: [12:0.50]
+; HASWELL-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [12:0.50]
+; HASWELL-NEXT:    vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [12:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -1041,12 +1041,12 @@ define void @test_vfmsubaddpd_256(<4 x d
 ; BROADWELL-LABEL: test_vfmsubaddpd_256:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmsubadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsubadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsubadd132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmsubadd231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; BROADWELL-NEXT:    vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * ymm2) -/+ ymm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * ymm2) -/+ ymm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * mem) -/+ ymm1 sched: [11:0.50]
+; BROADWELL-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [11:0.50]
+; BROADWELL-NEXT:    vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [11:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
@@ -1054,12 +1054,12 @@ define void @test_vfmsubaddpd_256(<4 x d
 ; SKYLAKE-LABEL: test_vfmsubaddpd_256:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmsubadd132pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsubadd231pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsubadd132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmsubadd231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * ymm2) -/+ ymm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * ymm2) -/+ ymm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * mem) -/+ ymm1 sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [11:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
@@ -1067,24 +1067,24 @@ define void @test_vfmsubaddpd_256(<4 x d
 ; KNL-LABEL: test_vfmsubaddpd_256:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmsubadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsubadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsubadd132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmsubadd231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; KNL-NEXT:    vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * ymm2) -/+ ymm1 sched: [5:0.50]
+; KNL-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * ymm2) -/+ ymm0 sched: [5:0.50]
+; KNL-NEXT:    vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * mem) -/+ ymm1 sched: [12:0.50]
+; KNL-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [12:0.50]
+; KNL-NEXT:    vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [12:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmsubaddpd_256:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmsubadd132pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsubadd231pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsubadd132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmsubadd231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKX-NEXT:    vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * ymm2) -/+ ymm1 sched: [4:0.33]
+; SKX-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * ymm2) -/+ ymm0 sched: [4:0.33]
+; SKX-NEXT:    vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * mem) -/+ ymm1 sched: [11:0.50]
+; SKX-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [11:0.50]
+; SKX-NEXT:    vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [11:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -1092,12 +1092,12 @@ define void @test_vfmsubaddpd_256(<4 x d
 ; ZNVER1-LABEL: test_vfmsubaddpd_256:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmsubadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsubadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsubadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsubadd132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsubadd213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsubadd231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * ymm2) -/+ ymm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * ymm2) -/+ ymm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * mem) -/+ ymm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsubadd231pd {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1109,84 +1109,84 @@ define void @test_vfmsubaddps_128(<4 x f
 ; GENERIC-LABEL: test_vfmsubaddps_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmsubadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsubadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsubadd132ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsubadd231ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * xmm2) -/+ xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsubadd231ps {{.*#+}} xmm0 = (xmm1 * xmm2) -/+ xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * mem) -/+ xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmsubadd231ps {{.*#+}} xmm0 = (xmm1 * mem) -/+ xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsubaddps_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmsubadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsubadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsubadd132ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmsubadd231ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; HASWELL-NEXT:    vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * xmm2) -/+ xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsubadd231ps {{.*#+}} xmm0 = (xmm1 * xmm2) -/+ xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * mem) -/+ xmm1 sched: [11:0.50]
+; HASWELL-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ mem sched: [11:0.50]
+; HASWELL-NEXT:    vfmsubadd231ps {{.*#+}} xmm0 = (xmm1 * mem) -/+ xmm0 sched: [11:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsubaddps_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmsubadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsubadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsubadd132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmsubadd231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * xmm2) -/+ xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsubadd231ps {{.*#+}} xmm0 = (xmm1 * xmm2) -/+ xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * mem) -/+ xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsubadd231ps {{.*#+}} xmm0 = (xmm1 * mem) -/+ xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsubaddps_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmsubadd132ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsubadd231ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsubadd132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmsubadd231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * xmm2) -/+ xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsubadd231ps {{.*#+}} xmm0 = (xmm1 * xmm2) -/+ xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * mem) -/+ xmm1 sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ mem sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmsubadd231ps {{.*#+}} xmm0 = (xmm1 * mem) -/+ xmm0 sched: [10:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsubaddps_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmsubadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsubadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsubadd132ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmsubadd231ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; KNL-NEXT:    vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * xmm2) -/+ xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmsubadd231ps {{.*#+}} xmm0 = (xmm1 * xmm2) -/+ xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * mem) -/+ xmm1 sched: [11:0.50]
+; KNL-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ mem sched: [11:0.50]
+; KNL-NEXT:    vfmsubadd231ps {{.*#+}} xmm0 = (xmm1 * mem) -/+ xmm0 sched: [11:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmsubaddps_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmsubadd132ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsubadd231ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsubadd132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmsubadd231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKX-NEXT:    vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * xmm2) -/+ xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmsubadd231ps {{.*#+}} xmm0 = (xmm1 * xmm2) -/+ xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * mem) -/+ xmm1 sched: [10:0.50]
+; SKX-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ mem sched: [10:0.50]
+; SKX-NEXT:    vfmsubadd231ps {{.*#+}} xmm0 = (xmm1 * mem) -/+ xmm0 sched: [10:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsubaddps_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmsubadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsubadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsubadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsubadd132ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsubadd213ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsubadd231ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * xmm2) -/+ xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsubadd231ps {{.*#+}} xmm0 = (xmm1 * xmm2) -/+ xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * mem) -/+ xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsubadd231ps {{.*#+}} xmm0 = (xmm1 * mem) -/+ xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfmsubadd132ps $2, $1, $0 \0A\09 vfmsubadd213ps $2, $1, $0 \0A\09 vfmsubadd231ps $2, $1, $0 \0A\09 vfmsubadd132ps $3, $1, $0 \0A\09 vfmsubadd213ps $3, $1, $0 \0A\09 vfmsubadd231ps $3, $1, $0", "x,x,x,*m"(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) nounwind
@@ -1197,12 +1197,12 @@ define void @test_vfmsubaddps_256(<8 x f
 ; GENERIC-LABEL: test_vfmsubaddps_256:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmsubadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsubadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsubadd132ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsubadd231ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) -/+ ymm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * ymm2) -/+ ymm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * mem) -/+ ymm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    vzeroupper # sched: [100:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
@@ -1210,12 +1210,12 @@ define void @test_vfmsubaddps_256(<8 x f
 ; HASWELL-LABEL: test_vfmsubaddps_256:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmsubadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsubadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsubadd132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmsubadd231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; HASWELL-NEXT:    vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) -/+ ymm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * ymm2) -/+ ymm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * mem) -/+ ymm1 sched: [12:0.50]
+; HASWELL-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [12:0.50]
+; HASWELL-NEXT:    vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [12:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -1223,12 +1223,12 @@ define void @test_vfmsubaddps_256(<8 x f
 ; BROADWELL-LABEL: test_vfmsubaddps_256:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmsubadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsubadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsubadd132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmsubadd231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; BROADWELL-NEXT:    vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) -/+ ymm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * ymm2) -/+ ymm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * mem) -/+ ymm1 sched: [11:0.50]
+; BROADWELL-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [11:0.50]
+; BROADWELL-NEXT:    vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [11:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
@@ -1236,12 +1236,12 @@ define void @test_vfmsubaddps_256(<8 x f
 ; SKYLAKE-LABEL: test_vfmsubaddps_256:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmsubadd132ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsubadd231ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsubadd132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmsubadd231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) -/+ ymm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * ymm2) -/+ ymm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * mem) -/+ ymm1 sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [11:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
@@ -1249,24 +1249,24 @@ define void @test_vfmsubaddps_256(<8 x f
 ; KNL-LABEL: test_vfmsubaddps_256:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmsubadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsubadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsubadd132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmsubadd231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; KNL-NEXT:    vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) -/+ ymm1 sched: [5:0.50]
+; KNL-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * ymm2) -/+ ymm0 sched: [5:0.50]
+; KNL-NEXT:    vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * mem) -/+ ymm1 sched: [12:0.50]
+; KNL-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [12:0.50]
+; KNL-NEXT:    vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [12:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmsubaddps_256:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmsubadd132ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsubadd231ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsubadd132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmsubadd231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKX-NEXT:    vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) -/+ ymm1 sched: [4:0.33]
+; SKX-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * ymm2) -/+ ymm0 sched: [4:0.33]
+; SKX-NEXT:    vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * mem) -/+ ymm1 sched: [11:0.50]
+; SKX-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [11:0.50]
+; SKX-NEXT:    vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [11:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -1274,12 +1274,12 @@ define void @test_vfmsubaddps_256(<8 x f
 ; ZNVER1-LABEL: test_vfmsubaddps_256:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmsubadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsubadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsubadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsubadd132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsubadd213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsubadd231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) -/+ ymm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * ymm2) -/+ ymm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * mem) -/+ ymm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsubadd231ps {{.*#+}} ymm0 = (ymm1 * mem) -/+ ymm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1295,84 +1295,84 @@ define void @test_vfmsubpd_128(<2 x doub
 ; GENERIC-LABEL: test_vfmsubpd_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub132pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsub231pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsubpd_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub132pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmsub231pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; HASWELL-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [11:0.50]
+; HASWELL-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [11:0.50]
+; HASWELL-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [11:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsubpd_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmsub231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsubpd_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmsub132pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub231pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmsub231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [10:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsubpd_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub132pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmsub231pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; KNL-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [11:0.50]
+; KNL-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [11:0.50]
+; KNL-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [11:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmsubpd_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmsub132pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub231pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmsub231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKX-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [10:0.50]
+; SKX-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [10:0.50]
+; SKX-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [10:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsubpd_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub132pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsub213pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsub231pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub132pd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub231pd {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfmsub132pd $2, $1, $0 \0A\09 vfmsub213pd $2, $1, $0 \0A\09 vfmsub231pd $2, $1, $0 \0A\09 vfmsub132pd $3, $1, $0 \0A\09 vfmsub213pd $3, $1, $0 \0A\09 vfmsub231pd $3, $1, $0", "x,x,x,*m"(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) nounwind
@@ -1383,12 +1383,12 @@ define void @test_vfmsubpd_256(<4 x doub
 ; GENERIC-LABEL: test_vfmsubpd_256:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub132pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsub231pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * mem) - ymm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    vzeroupper # sched: [100:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
@@ -1396,12 +1396,12 @@ define void @test_vfmsubpd_256(<4 x doub
 ; HASWELL-LABEL: test_vfmsubpd_256:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmsub231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; HASWELL-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * mem) - ymm1 sched: [12:0.50]
+; HASWELL-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [12:0.50]
+; HASWELL-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [12:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -1409,12 +1409,12 @@ define void @test_vfmsubpd_256(<4 x doub
 ; BROADWELL-LABEL: test_vfmsubpd_256:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmsub231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; BROADWELL-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * mem) - ymm1 sched: [11:0.50]
+; BROADWELL-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [11:0.50]
+; BROADWELL-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [11:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
@@ -1422,12 +1422,12 @@ define void @test_vfmsubpd_256(<4 x doub
 ; SKYLAKE-LABEL: test_vfmsubpd_256:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmsub132pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub231pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmsub231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm1 * ymm2) - ymm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * mem) - ymm1 sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [11:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
@@ -1435,24 +1435,24 @@ define void @test_vfmsubpd_256(<4 x doub
 ; KNL-LABEL: test_vfmsubpd_256:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmsub231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; KNL-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; KNL-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; KNL-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * mem) - ymm1 sched: [12:0.50]
+; KNL-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [12:0.50]
+; KNL-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [12:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmsubpd_256:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmsub132pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub231pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmsub231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKX-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 sched: [4:0.33]
+; SKX-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm1 * ymm2) - ymm0 sched: [4:0.33]
+; SKX-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * mem) - ymm1 sched: [11:0.50]
+; SKX-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [11:0.50]
+; SKX-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [11:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -1460,12 +1460,12 @@ define void @test_vfmsubpd_256(<4 x doub
 ; ZNVER1-LABEL: test_vfmsubpd_256:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsub213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsub231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub132pd {{.*#+}} ymm0 = (ymm0 * mem) - ymm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub231pd {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1477,84 +1477,84 @@ define void @test_vfmsubps_128(<4 x floa
 ; GENERIC-LABEL: test_vfmsubps_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub132ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsub231ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsubps_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub132ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfmsub231ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; HASWELL-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [11:0.50]
+; HASWELL-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [11:0.50]
+; HASWELL-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [11:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsubps_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmsub231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsubps_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmsub132ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub231ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfmsub231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [10:0.50]
+; SKYLAKE-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [10:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsubps_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub132ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfmsub231ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; KNL-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [11:0.50]
+; KNL-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [11:0.50]
+; KNL-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [11:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmsubps_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmsub132ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub231ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmsub231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKX-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [10:0.50]
+; SKX-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [10:0.50]
+; SKX-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [10:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsubps_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub132ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsub213ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsub231ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub132ps {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub231ps {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfmsub132ps $2, $1, $0 \0A\09 vfmsub213ps $2, $1, $0 \0A\09 vfmsub231ps $2, $1, $0 \0A\09 vfmsub132ps $3, $1, $0 \0A\09 vfmsub213ps $3, $1, $0 \0A\09 vfmsub231ps $3, $1, $0", "x,x,x,*m"(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) nounwind
@@ -1565,12 +1565,12 @@ define void @test_vfmsubps_256(<8 x floa
 ; GENERIC-LABEL: test_vfmsubps_256:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub132ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsub231ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * mem) - ymm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    vzeroupper # sched: [100:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
@@ -1578,12 +1578,12 @@ define void @test_vfmsubps_256(<8 x floa
 ; HASWELL-LABEL: test_vfmsubps_256:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfmsub231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; HASWELL-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * mem) - ymm1 sched: [12:0.50]
+; HASWELL-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [12:0.50]
+; HASWELL-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [12:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -1591,12 +1591,12 @@ define void @test_vfmsubps_256(<8 x floa
 ; BROADWELL-LABEL: test_vfmsubps_256:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfmsub231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; BROADWELL-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * mem) - ymm1 sched: [11:0.50]
+; BROADWELL-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [11:0.50]
+; BROADWELL-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [11:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
@@ -1604,12 +1604,12 @@ define void @test_vfmsubps_256(<8 x floa
 ; SKYLAKE-LABEL: test_vfmsubps_256:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmsub132ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub231ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfmsub231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm1 * ymm2) - ymm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * mem) - ymm1 sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [11:0.50]
+; SKYLAKE-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [11:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
@@ -1617,24 +1617,24 @@ define void @test_vfmsubps_256(<8 x floa
 ; KNL-LABEL: test_vfmsubps_256:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfmsub231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; KNL-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; KNL-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; KNL-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * mem) - ymm1 sched: [12:0.50]
+; KNL-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [12:0.50]
+; KNL-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [12:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmsubps_256:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmsub132ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub231ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmsub231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKX-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 sched: [4:0.33]
+; SKX-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm1 * ymm2) - ymm0 sched: [4:0.33]
+; SKX-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * mem) - ymm1 sched: [11:0.50]
+; SKX-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [11:0.50]
+; SKX-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [11:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -1642,12 +1642,12 @@ define void @test_vfmsubps_256(<8 x floa
 ; ZNVER1-LABEL: test_vfmsubps_256:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsub213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsub231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub132ps {{.*#+}} ymm0 = (ymm0 * mem) - ymm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub231ps {{.*#+}} ymm0 = (ymm1 * mem) - ymm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -1659,84 +1659,84 @@ define void @test_vfmsubsd_128(<2 x doub
 ; GENERIC-LABEL: test_vfmsubsd_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmsub132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub132sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsub231sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub132sd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub132sd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsubsd_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmsub132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub132sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfmsub231sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; HASWELL-NEXT:    vfmsub132sd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub132sd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [10:0.50]
+; HASWELL-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [10:0.50]
+; HASWELL-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [10:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsubsd_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmsub132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub132sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmsub231sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsub132sd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub132sd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsubsd_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmsub132sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub231sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub132sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfmsub231sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKYLAKE-NEXT:    vfmsub132sd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub132sd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [9:0.50]
+; SKYLAKE-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [9:0.50]
+; SKYLAKE-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [9:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsubsd_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmsub132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub132sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfmsub231sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; KNL-NEXT:    vfmsub132sd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfmsub132sd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [10:0.50]
+; KNL-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [10:0.50]
+; KNL-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [10:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmsubsd_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmsub132sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub231sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub132sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfmsub231sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKX-NEXT:    vfmsub132sd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfmsub132sd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [9:0.50]
+; SKX-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [9:0.50]
+; SKX-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [9:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsubsd_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmsub132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub132sd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsub213sd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsub231sd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub132sd {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub132sd {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub231sd {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfmsub132sd $2, $1, $0 \0A\09 vfmsub213sd $2, $1, $0 \0A\09 vfmsub231sd $2, $1, $0 \0A\09 vfmsub132sd $3, $1, $0 \0A\09 vfmsub213sd $3, $1, $0 \0A\09 vfmsub231sd $3, $1, $0", "x,x,x,*m"(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) nounwind
@@ -1747,84 +1747,84 @@ define void @test_vfmsubss_128(<4 x floa
 ; GENERIC-LABEL: test_vfmsubss_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfmsub132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfmsub132ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfmsub231ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub132ss {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfmsub132ss {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [9:0.50]
+; GENERIC-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfmsubss_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfmsub132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmsub132ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfmsub231ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; HASWELL-NEXT:    vfmsub132ss {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfmsub132ss {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [10:0.50]
+; HASWELL-NEXT:    vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [10:0.50]
+; HASWELL-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [10:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfmsubss_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfmsub132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfmsub132ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfmsub231ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsub132ss {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfmsub132ss {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfmsubss_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfmsub132ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub231ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfmsub132ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfmsub231ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKYLAKE-NEXT:    vfmsub132ss {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfmsub132ss {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [9:0.50]
+; SKYLAKE-NEXT:    vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [9:0.50]
+; SKYLAKE-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [9:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfmsubss_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfmsub132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmsub132ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfmsub231ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; KNL-NEXT:    vfmsub132ss {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfmsub132ss {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [10:0.50]
+; KNL-NEXT:    vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [10:0.50]
+; KNL-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [10:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfmsubss_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfmsub132ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub231ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmsub132ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfmsub231ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKX-NEXT:    vfmsub132ss {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfmsub132ss {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [9:0.50]
+; SKX-NEXT:    vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [9:0.50]
+; SKX-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [9:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfmsubss_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfmsub132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfmsub132ss (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsub213ss (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfmsub231ss (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub132ss {{.*#+}} xmm0 = (xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfmsub132ss {{.*#+}} xmm0 = (xmm0 * mem) - xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfmsub231ss {{.*#+}} xmm0 = (xmm1 * mem) - xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfmsub132ss $2, $1, $0 \0A\09 vfmsub213ss $2, $1, $0 \0A\09 vfmsub231ss $2, $1, $0 \0A\09 vfmsub132ss $3, $1, $0 \0A\09 vfmsub213ss $3, $1, $0 \0A\09 vfmsub231ss $3, $1, $0", "x,x,x,*m"(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) nounwind
@@ -1839,84 +1839,84 @@ define void @test_vfnmaddpd_128(<2 x dou
 ; GENERIC-LABEL: test_vfnmaddpd_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfnmadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd132pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmadd231pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmaddpd_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfnmadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd132pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfnmadd231pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; HASWELL-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [11:0.50]
+; HASWELL-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [11:0.50]
+; HASWELL-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [11:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmaddpd_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfnmadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmadd231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmaddpd_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfnmadd132pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd231pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfnmadd231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKYLAKE-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [10:0.50]
+; SKYLAKE-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; SKYLAKE-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [10:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmaddpd_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfnmadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd132pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfnmadd231pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; KNL-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [11:0.50]
+; KNL-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [11:0.50]
+; KNL-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [11:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfnmaddpd_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfnmadd132pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd231pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfnmadd231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKX-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [10:0.50]
+; SKX-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; SKX-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [10:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmaddpd_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfnmadd132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd132pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmadd213pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmadd231pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd132pd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd231pd {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfnmadd132pd $2, $1, $0 \0A\09 vfnmadd213pd $2, $1, $0 \0A\09 vfnmadd231pd $2, $1, $0 \0A\09 vfnmadd132pd $3, $1, $0 \0A\09 vfnmadd213pd $3, $1, $0 \0A\09 vfnmadd231pd $3, $1, $0", "x,x,x,*m"(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) nounwind
@@ -1927,12 +1927,12 @@ define void @test_vfnmaddpd_256(<4 x dou
 ; GENERIC-LABEL: test_vfnmaddpd_256:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfnmadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd132pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmadd231pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * mem) + ymm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    vzeroupper # sched: [100:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
@@ -1940,12 +1940,12 @@ define void @test_vfnmaddpd_256(<4 x dou
 ; HASWELL-LABEL: test_vfnmaddpd_256:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfnmadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfnmadd231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; HASWELL-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * mem) + ymm1 sched: [12:0.50]
+; HASWELL-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [12:0.50]
+; HASWELL-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [12:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -1953,12 +1953,12 @@ define void @test_vfnmaddpd_256(<4 x dou
 ; BROADWELL-LABEL: test_vfnmaddpd_256:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfnmadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfnmadd231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; BROADWELL-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * mem) + ymm1 sched: [11:0.50]
+; BROADWELL-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [11:0.50]
+; BROADWELL-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [11:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
@@ -1966,12 +1966,12 @@ define void @test_vfnmaddpd_256(<4 x dou
 ; SKYLAKE-LABEL: test_vfnmaddpd_256:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfnmadd132pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd231pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfnmadd231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKYLAKE-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * ymm2) + ymm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * ymm2) + ymm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * mem) + ymm1 sched: [11:0.50]
+; SKYLAKE-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [11:0.50]
+; SKYLAKE-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [11:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
@@ -1979,24 +1979,24 @@ define void @test_vfnmaddpd_256(<4 x dou
 ; KNL-LABEL: test_vfnmaddpd_256:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfnmadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfnmadd231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; KNL-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * mem) + ymm1 sched: [12:0.50]
+; KNL-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [12:0.50]
+; KNL-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [12:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfnmaddpd_256:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfnmadd132pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd231pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfnmadd231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKX-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * ymm2) + ymm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * ymm2) + ymm0 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * mem) + ymm1 sched: [11:0.50]
+; SKX-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [11:0.50]
+; SKX-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [11:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2004,12 +2004,12 @@ define void @test_vfnmaddpd_256(<4 x dou
 ; ZNVER1-LABEL: test_vfnmaddpd_256:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfnmadd132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmadd213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmadd231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd132pd {{.*#+}} ymm0 = -(ymm0 * mem) + ymm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd231pd {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2021,84 +2021,84 @@ define void @test_vfnmaddps_128(<4 x flo
 ; GENERIC-LABEL: test_vfnmaddps_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfnmadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd132ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmadd231ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmaddps_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfnmadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd132ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfnmadd231ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; HASWELL-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [11:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [11:0.50]
+; HASWELL-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [11:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmaddps_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfnmadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmadd231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmaddps_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfnmadd132ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd231ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfnmadd231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKYLAKE-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [10:0.50]
+; SKYLAKE-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; SKYLAKE-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [10:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmaddps_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfnmadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd132ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfnmadd231ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; KNL-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [11:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [11:0.50]
+; KNL-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [11:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfnmaddps_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfnmadd132ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd231ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfnmadd231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKX-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [10:0.50]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; SKX-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [10:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmaddps_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfnmadd132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd132ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmadd213ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmadd231ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd132ps {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd231ps {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfnmadd132ps $2, $1, $0 \0A\09 vfnmadd213ps $2, $1, $0 \0A\09 vfnmadd231ps $2, $1, $0 \0A\09 vfnmadd132ps $3, $1, $0 \0A\09 vfnmadd213ps $3, $1, $0 \0A\09 vfnmadd231ps $3, $1, $0", "x,x,x,*m"(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) nounwind
@@ -2109,12 +2109,12 @@ define void @test_vfnmaddps_256(<8 x flo
 ; GENERIC-LABEL: test_vfnmaddps_256:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfnmadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd132ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmadd231ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * mem) + ymm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    vzeroupper # sched: [100:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
@@ -2122,12 +2122,12 @@ define void @test_vfnmaddps_256(<8 x flo
 ; HASWELL-LABEL: test_vfnmaddps_256:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfnmadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfnmadd231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; HASWELL-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * mem) + ymm1 sched: [12:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [12:0.50]
+; HASWELL-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [12:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -2135,12 +2135,12 @@ define void @test_vfnmaddps_256(<8 x flo
 ; BROADWELL-LABEL: test_vfnmaddps_256:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfnmadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfnmadd231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; BROADWELL-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * mem) + ymm1 sched: [11:0.50]
+; BROADWELL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [11:0.50]
+; BROADWELL-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [11:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
@@ -2148,12 +2148,12 @@ define void @test_vfnmaddps_256(<8 x flo
 ; SKYLAKE-LABEL: test_vfnmaddps_256:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfnmadd132ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd231ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfnmadd231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKYLAKE-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * ymm2) + ymm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * ymm2) + ymm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * mem) + ymm1 sched: [11:0.50]
+; SKYLAKE-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [11:0.50]
+; SKYLAKE-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [11:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
@@ -2161,24 +2161,24 @@ define void @test_vfnmaddps_256(<8 x flo
 ; KNL-LABEL: test_vfnmaddps_256:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfnmadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfnmadd231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; KNL-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * mem) + ymm1 sched: [12:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [12:0.50]
+; KNL-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [12:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfnmaddps_256:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfnmadd132ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd231ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfnmadd231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKX-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * ymm2) + ymm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * ymm2) + ymm0 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * mem) + ymm1 sched: [11:0.50]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [11:0.50]
+; SKX-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [11:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2186,12 +2186,12 @@ define void @test_vfnmaddps_256(<8 x flo
 ; ZNVER1-LABEL: test_vfnmaddps_256:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfnmadd132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmadd213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmadd231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * ymm2) + ymm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * ymm2) + ymm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd132ps {{.*#+}} ymm0 = -(ymm0 * mem) + ymm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd231ps {{.*#+}} ymm0 = -(ymm1 * mem) + ymm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2203,84 +2203,84 @@ define void @test_vfnmaddsd_128(<2 x dou
 ; GENERIC-LABEL: test_vfnmaddsd_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfnmadd132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd132sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmadd231sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd132sd {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd132sd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmaddsd_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfnmadd132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd132sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfnmadd231sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; HASWELL-NEXT:    vfnmadd132sd {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd132sd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [10:0.50]
+; HASWELL-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; HASWELL-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [10:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmaddsd_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfnmadd132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd132sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmadd231sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmadd132sd {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd132sd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmaddsd_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfnmadd132sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd231sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd132sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfnmadd231sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKYLAKE-NEXT:    vfnmadd132sd {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd132sd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [9:0.50]
+; SKYLAKE-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [9:0.50]
+; SKYLAKE-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [9:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmaddsd_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfnmadd132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd132sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfnmadd231sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; KNL-NEXT:    vfnmadd132sd {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd132sd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [10:0.50]
+; KNL-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; KNL-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [10:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfnmaddsd_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfnmadd132sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd231sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd132sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfnmadd231sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKX-NEXT:    vfnmadd132sd {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd132sd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [9:0.50]
+; SKX-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [9:0.50]
+; SKX-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [9:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmaddsd_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfnmadd132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd132sd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmadd213sd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmadd231sd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd132sd {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd132sd {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd231sd {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfnmadd132sd $2, $1, $0 \0A\09 vfnmadd213sd $2, $1, $0 \0A\09 vfnmadd231sd $2, $1, $0 \0A\09 vfnmadd132sd $3, $1, $0 \0A\09 vfnmadd213sd $3, $1, $0 \0A\09 vfnmadd231sd $3, $1, $0", "x,x,x,*m"(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) nounwind
@@ -2291,84 +2291,84 @@ define void @test_vfnmaddss_128(<4 x flo
 ; GENERIC-LABEL: test_vfnmaddss_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfnmadd132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmadd132ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmadd231ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd132ss {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmadd132ss {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [9:0.50]
+; GENERIC-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmaddss_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfnmadd132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd132ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfnmadd231ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; HASWELL-NEXT:    vfnmadd132ss {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd132ss {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [10:0.50]
+; HASWELL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; HASWELL-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [10:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmaddss_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfnmadd132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmadd132ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmadd231ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmadd132ss {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmadd132ss {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmaddss_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfnmadd132ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd231ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmadd132ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfnmadd231ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKYLAKE-NEXT:    vfnmadd132ss {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmadd132ss {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [9:0.50]
+; SKYLAKE-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [9:0.50]
+; SKYLAKE-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [9:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmaddss_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfnmadd132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd132ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfnmadd231ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; KNL-NEXT:    vfnmadd132ss {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd132ss {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [10:0.50]
+; KNL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; KNL-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [10:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfnmaddss_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfnmadd132ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd231ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd132ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfnmadd231ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKX-NEXT:    vfnmadd132ss {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd132ss {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [9:0.50]
+; SKX-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [9:0.50]
+; SKX-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [9:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmaddss_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfnmadd132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmadd132ss (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmadd213ss (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmadd231ss (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd132ss {{.*#+}} xmm0 = -(xmm0 * xmm2) + xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm1 * xmm2) + xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmadd132ss {{.*#+}} xmm0 = -(xmm0 * mem) + xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmadd231ss {{.*#+}} xmm0 = -(xmm1 * mem) + xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfnmadd132ss $2, $1, $0 \0A\09 vfnmadd213ss $2, $1, $0 \0A\09 vfnmadd231ss $2, $1, $0 \0A\09 vfnmadd132ss $3, $1, $0 \0A\09 vfnmadd213ss $3, $1, $0 \0A\09 vfnmadd231ss $3, $1, $0", "x,x,x,*m"(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) nounwind
@@ -2383,84 +2383,84 @@ define void @test_vfnmsubpd_128(<2 x dou
 ; GENERIC-LABEL: test_vfnmsubpd_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfnmsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub132pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmsub231pd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmsubpd_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfnmsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub132pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfnmsub231pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; HASWELL-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [11:0.50]
+; HASWELL-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [11:0.50]
+; HASWELL-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [11:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmsubpd_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfnmsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmsub231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmsubpd_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfnmsub132pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub231pd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfnmsub231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKYLAKE-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [10:0.50]
+; SKYLAKE-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [10:0.50]
+; SKYLAKE-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [10:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmsubpd_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfnmsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub132pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfnmsub231pd (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; KNL-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [11:0.50]
+; KNL-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [11:0.50]
+; KNL-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [11:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfnmsubpd_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfnmsub132pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub231pd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub132pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfnmsub231pd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKX-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [10:0.50]
+; SKX-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [10:0.50]
+; SKX-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [10:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmsubpd_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfnmsub132pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub213pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub231pd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub132pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmsub213pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmsub231pd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub132pd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub231pd {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfnmsub132pd $2, $1, $0 \0A\09 vfnmsub213pd $2, $1, $0 \0A\09 vfnmsub231pd $2, $1, $0 \0A\09 vfnmsub132pd $3, $1, $0 \0A\09 vfnmsub213pd $3, $1, $0 \0A\09 vfnmsub231pd $3, $1, $0", "x,x,x,*m"(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) nounwind
@@ -2471,12 +2471,12 @@ define void @test_vfnmsubpd_256(<4 x dou
 ; GENERIC-LABEL: test_vfnmsubpd_256:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfnmsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub132pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmsub231pd (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * mem) - ymm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    vzeroupper # sched: [100:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
@@ -2484,12 +2484,12 @@ define void @test_vfnmsubpd_256(<4 x dou
 ; HASWELL-LABEL: test_vfnmsubpd_256:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfnmsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfnmsub231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; HASWELL-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * mem) - ymm1 sched: [12:0.50]
+; HASWELL-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [12:0.50]
+; HASWELL-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [12:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -2497,12 +2497,12 @@ define void @test_vfnmsubpd_256(<4 x dou
 ; BROADWELL-LABEL: test_vfnmsubpd_256:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfnmsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfnmsub231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; BROADWELL-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * mem) - ymm1 sched: [11:0.50]
+; BROADWELL-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [11:0.50]
+; BROADWELL-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [11:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
@@ -2510,12 +2510,12 @@ define void @test_vfnmsubpd_256(<4 x dou
 ; SKYLAKE-LABEL: test_vfnmsubpd_256:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfnmsub132pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub231pd %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfnmsub231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKYLAKE-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * ymm2) - ymm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * ymm2) - ymm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * mem) - ymm1 sched: [11:0.50]
+; SKYLAKE-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [11:0.50]
+; SKYLAKE-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [11:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
@@ -2523,24 +2523,24 @@ define void @test_vfnmsubpd_256(<4 x dou
 ; KNL-LABEL: test_vfnmsubpd_256:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfnmsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfnmsub231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; KNL-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * mem) - ymm1 sched: [12:0.50]
+; KNL-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [12:0.50]
+; KNL-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [12:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfnmsubpd_256:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfnmsub132pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub231pd %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub132pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfnmsub231pd (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKX-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * ymm2) - ymm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * ymm2) - ymm0 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * mem) - ymm1 sched: [11:0.50]
+; SKX-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [11:0.50]
+; SKX-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [11:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2548,12 +2548,12 @@ define void @test_vfnmsubpd_256(<4 x dou
 ; ZNVER1-LABEL: test_vfnmsubpd_256:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfnmsub132pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub213pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub231pd %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub132pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmsub213pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmsub231pd (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub132pd {{.*#+}} ymm0 = -(ymm0 * mem) - ymm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub231pd {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2565,84 +2565,84 @@ define void @test_vfnmsubps_128(<4 x flo
 ; GENERIC-LABEL: test_vfnmsubps_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfnmsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub132ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmsub231ps (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmsubps_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfnmsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub132ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; HASWELL-NEXT:    vfnmsub231ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; HASWELL-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [11:0.50]
+; HASWELL-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [11:0.50]
+; HASWELL-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [11:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmsubps_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfnmsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmsub231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmsubps_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfnmsub132ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub231ps %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKYLAKE-NEXT:    vfnmsub231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKYLAKE-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [10:0.50]
+; SKYLAKE-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [10:0.50]
+; SKYLAKE-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [10:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmsubps_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfnmsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub132ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
-; KNL-NEXT:    vfnmsub231ps (%rdi), %xmm1, %xmm0 # sched: [11:0.50]
+; KNL-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [11:0.50]
+; KNL-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [11:0.50]
+; KNL-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [11:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfnmsubps_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfnmsub132ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub231ps %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub132ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfnmsub231ps (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; SKX-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [10:0.50]
+; SKX-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [10:0.50]
+; SKX-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [10:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmsubps_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfnmsub132ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub231ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub132ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmsub213ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmsub231ps (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub132ps {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub231ps {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfnmsub132ps $2, $1, $0 \0A\09 vfnmsub213ps $2, $1, $0 \0A\09 vfnmsub231ps $2, $1, $0 \0A\09 vfnmsub132ps $3, $1, $0 \0A\09 vfnmsub213ps $3, $1, $0 \0A\09 vfnmsub231ps $3, $1, $0", "x,x,x,*m"(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) nounwind
@@ -2653,12 +2653,12 @@ define void @test_vfnmsubps_256(<8 x flo
 ; GENERIC-LABEL: test_vfnmsubps_256:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfnmsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub132ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmsub231ps (%rdi), %ymm1, %ymm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * mem) - ymm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    vzeroupper # sched: [100:0.33]
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
@@ -2666,12 +2666,12 @@ define void @test_vfnmsubps_256(<8 x flo
 ; HASWELL-LABEL: test_vfnmsubps_256:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfnmsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; HASWELL-NEXT:    vfnmsub231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; HASWELL-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * mem) - ymm1 sched: [12:0.50]
+; HASWELL-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [12:0.50]
+; HASWELL-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [12:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -2679,12 +2679,12 @@ define void @test_vfnmsubps_256(<8 x flo
 ; BROADWELL-LABEL: test_vfnmsubps_256:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfnmsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; BROADWELL-NEXT:    vfnmsub231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; BROADWELL-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * mem) - ymm1 sched: [11:0.50]
+; BROADWELL-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [11:0.50]
+; BROADWELL-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [11:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    vzeroupper # sched: [4:1.00]
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
@@ -2692,12 +2692,12 @@ define void @test_vfnmsubps_256(<8 x flo
 ; SKYLAKE-LABEL: test_vfnmsubps_256:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfnmsub132ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub231ps %ymm2, %ymm1, %ymm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKYLAKE-NEXT:    vfnmsub231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKYLAKE-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * ymm2) - ymm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * ymm2) - ymm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * mem) - ymm1 sched: [11:0.50]
+; SKYLAKE-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [11:0.50]
+; SKYLAKE-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [11:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
@@ -2705,24 +2705,24 @@ define void @test_vfnmsubps_256(<8 x flo
 ; KNL-LABEL: test_vfnmsubps_256:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfnmsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; KNL-NEXT:    vfnmsub231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; KNL-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * mem) - ymm1 sched: [12:0.50]
+; KNL-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [12:0.50]
+; KNL-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [12:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfnmsubps_256:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfnmsub132ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub231ps %ymm2, %ymm1, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub132ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfnmsub231ps (%rdi), %ymm1, %ymm0 # sched: [11:0.50]
+; SKX-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * ymm2) - ymm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * ymm2) - ymm0 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * mem) - ymm1 sched: [11:0.50]
+; SKX-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [11:0.50]
+; SKX-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [11:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    vzeroupper # sched: [4:1.00]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -2730,12 +2730,12 @@ define void @test_vfnmsubps_256(<8 x flo
 ; ZNVER1-LABEL: test_vfnmsubps_256:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfnmsub132ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub231ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub132ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmsub213ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmsub231ps (%rdi), %ymm1, %ymm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * ymm2) - ymm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * ymm2) - ymm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub132ps {{.*#+}} ymm0 = -(ymm0 * mem) - ymm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub231ps {{.*#+}} ymm0 = -(ymm1 * mem) - ymm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
@@ -2747,84 +2747,84 @@ define void @test_vfnmsubsd_128(<2 x dou
 ; GENERIC-LABEL: test_vfnmsubsd_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfnmsub132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub132sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmsub231sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub132sd {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub132sd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmsubsd_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfnmsub132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub132sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfnmsub231sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; HASWELL-NEXT:    vfnmsub132sd {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub132sd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [10:0.50]
+; HASWELL-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [10:0.50]
+; HASWELL-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [10:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmsubsd_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfnmsub132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub132sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmsub231sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmsub132sd {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub132sd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmsubsd_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfnmsub132sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub231sd %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub132sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfnmsub231sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKYLAKE-NEXT:    vfnmsub132sd {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub132sd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [9:0.50]
+; SKYLAKE-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [9:0.50]
+; SKYLAKE-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [9:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmsubsd_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfnmsub132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub132sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfnmsub231sd (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; KNL-NEXT:    vfnmsub132sd {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub132sd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [10:0.50]
+; KNL-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [10:0.50]
+; KNL-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [10:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfnmsubsd_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfnmsub132sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub231sd %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub132sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfnmsub231sd (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKX-NEXT:    vfnmsub132sd {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub132sd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [9:0.50]
+; SKX-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [9:0.50]
+; SKX-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [9:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmsubsd_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfnmsub132sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub213sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub231sd %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub132sd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmsub213sd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmsub231sd (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub132sd {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub132sd {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub231sd {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfnmsub132sd $2, $1, $0 \0A\09 vfnmsub213sd $2, $1, $0 \0A\09 vfnmsub231sd $2, $1, $0 \0A\09 vfnmsub132sd $3, $1, $0 \0A\09 vfnmsub213sd $3, $1, $0 \0A\09 vfnmsub231sd $3, $1, $0", "x,x,x,*m"(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> *%a3) nounwind
@@ -2835,84 +2835,84 @@ define void @test_vfnmsubss_128(<4 x flo
 ; GENERIC-LABEL: test_vfnmsubss_128:
 ; GENERIC:       # %bb.0:
 ; GENERIC-NEXT:    #APP
-; GENERIC-NEXT:    vfnmsub132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; GENERIC-NEXT:    vfnmsub132ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; GENERIC-NEXT:    vfnmsub231ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub132ss {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; GENERIC-NEXT:    vfnmsub132ss {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [9:0.50]
+; GENERIC-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [9:0.50]
 ; GENERIC-NEXT:    #NO_APP
 ; GENERIC-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vfnmsubss_128:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    #APP
-; HASWELL-NEXT:    vfnmsub132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmsub132ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfnmsub231ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; HASWELL-NEXT:    vfnmsub132ss {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmsub132ss {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [10:0.50]
+; HASWELL-NEXT:    vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [10:0.50]
+; HASWELL-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [10:0.50]
 ; HASWELL-NEXT:    #NO_APP
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; BROADWELL-LABEL: test_vfnmsubss_128:
 ; BROADWELL:       # %bb.0:
 ; BROADWELL-NEXT:    #APP
-; BROADWELL-NEXT:    vfnmsub132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; BROADWELL-NEXT:    vfnmsub132ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; BROADWELL-NEXT:    vfnmsub231ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmsub132ss {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; BROADWELL-NEXT:    vfnmsub132ss {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [10:0.50]
+; BROADWELL-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [10:0.50]
 ; BROADWELL-NEXT:    #NO_APP
 ; BROADWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKYLAKE-LABEL: test_vfnmsubss_128:
 ; SKYLAKE:       # %bb.0:
 ; SKYLAKE-NEXT:    #APP
-; SKYLAKE-NEXT:    vfnmsub132ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub231ss %xmm2, %xmm1, %xmm0 # sched: [4:0.50]
-; SKYLAKE-NEXT:    vfnmsub132ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKYLAKE-NEXT:    vfnmsub231ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKYLAKE-NEXT:    vfnmsub132ss {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [4:0.50]
+; SKYLAKE-NEXT:    vfnmsub132ss {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [9:0.50]
+; SKYLAKE-NEXT:    vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [9:0.50]
+; SKYLAKE-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [9:0.50]
 ; SKYLAKE-NEXT:    #NO_APP
 ; SKYLAKE-NEXT:    retq # sched: [7:1.00]
 ;
 ; KNL-LABEL: test_vfnmsubss_128:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    #APP
-; KNL-NEXT:    vfnmsub132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfnmsub132ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfnmsub231ss (%rdi), %xmm1, %xmm0 # sched: [10:0.50]
+; KNL-NEXT:    vfnmsub132ss {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; KNL-NEXT:    vfnmsub132ss {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [10:0.50]
+; KNL-NEXT:    vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [10:0.50]
+; KNL-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [10:0.50]
 ; KNL-NEXT:    #NO_APP
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: test_vfnmsubss_128:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    #APP
-; SKX-NEXT:    vfnmsub132ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub231ss %xmm2, %xmm1, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfnmsub132ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfnmsub231ss (%rdi), %xmm1, %xmm0 # sched: [9:0.50]
+; SKX-NEXT:    vfnmsub132ss {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [4:0.33]
+; SKX-NEXT:    vfnmsub132ss {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [9:0.50]
+; SKX-NEXT:    vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [9:0.50]
+; SKX-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [9:0.50]
 ; SKX-NEXT:    #NO_APP
 ; SKX-NEXT:    retq # sched: [7:1.00]
 ;
 ; ZNVER1-LABEL: test_vfnmsubss_128:
 ; ZNVER1:       # %bb.0:
 ; ZNVER1-NEXT:    #APP
-; ZNVER1-NEXT:    vfnmsub132ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub213ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub231ss %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; ZNVER1-NEXT:    vfnmsub132ss (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmsub213ss (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vfnmsub231ss (%rdi), %xmm1, %xmm0 # sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub132ss {{.*#+}} xmm0 = -(xmm0 * xmm2) - xmm1 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm1 * xmm2) - xmm0 sched: [5:0.50]
+; ZNVER1-NEXT:    vfnmsub132ss {{.*#+}} xmm0 = -(xmm0 * mem) - xmm1 sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - mem sched: [12:0.50]
+; ZNVER1-NEXT:    vfnmsub231ss {{.*#+}} xmm0 = -(xmm1 * mem) - xmm0 sched: [12:0.50]
 ; ZNVER1-NEXT:    #NO_APP
 ; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void asm "vfnmsub132ss $2, $1, $0 \0A\09 vfnmsub213ss $2, $1, $0 \0A\09 vfnmsub231ss $2, $1, $0 \0A\09 vfnmsub132ss $3, $1, $0 \0A\09 vfnmsub213ss $3, $1, $0 \0A\09 vfnmsub231ss $3, $1, $0", "x,x,x,*m"(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> *%a3) nounwind

Modified: llvm/trunk/test/CodeGen/X86/fma.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fma.ll?rev=327225&r1=327224&r2=327225&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fma.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fma.ll Sat Mar 10 13:30:46 2018
@@ -17,6 +17,7 @@ define float @test_f32(float %a, float %
 ; FMA32-NEXT:    vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x0c]
 ; FMA32-NEXT:    ## xmm1 = mem[0],zero,zero,zero
 ; FMA32-NEXT:    vfmadd213ss {{[0-9]+}}(%esp), %xmm0, %xmm1 ## encoding: [0xc4,0xe2,0x79,0xa9,0x4c,0x24,0x10]
+; FMA32-NEXT:    ## xmm1 = (xmm0 * xmm1) + mem
 ; FMA32-NEXT:    vmovss %xmm1, (%esp) ## encoding: [0xc5,0xfa,0x11,0x0c,0x24]
 ; FMA32-NEXT:    flds (%esp) ## encoding: [0xd9,0x04,0x24]
 ; FMA32-NEXT:    popl %eax ## encoding: [0x58]
@@ -31,6 +32,7 @@ define float @test_f32(float %a, float %
 ; FMA64-LABEL: test_f32:
 ; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
+; FMA64-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
 ; FMACALL64-LABEL: test_f32:
@@ -42,11 +44,13 @@ define float @test_f32(float %a, float %
 ; AVX512-LABEL: test_f32:
 ; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
+; AVX512-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_f32:
 ; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa9,0xc2]
+; AVX512VL-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
   %call = call float @llvm.fma.f32(float %a, float %b, float %c)
@@ -62,6 +66,7 @@ define double @test_f64(double %a, doubl
 ; FMA32-NEXT:    vmovsd {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x18]
 ; FMA32-NEXT:    ## xmm1 = mem[0],zero
 ; FMA32-NEXT:    vfmadd213sd {{[0-9]+}}(%esp), %xmm0, %xmm1 ## encoding: [0xc4,0xe2,0xf9,0xa9,0x4c,0x24,0x20]
+; FMA32-NEXT:    ## xmm1 = (xmm0 * xmm1) + mem
 ; FMA32-NEXT:    vmovsd %xmm1, (%esp) ## encoding: [0xc5,0xfb,0x11,0x0c,0x24]
 ; FMA32-NEXT:    fldl (%esp) ## encoding: [0xdd,0x04,0x24]
 ; FMA32-NEXT:    addl $12, %esp ## encoding: [0x83,0xc4,0x0c]
@@ -76,6 +81,7 @@ define double @test_f64(double %a, doubl
 ; FMA64-LABEL: test_f64:
 ; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
+; FMA64-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
 ; FMACALL64-LABEL: test_f64:
@@ -87,11 +93,13 @@ define double @test_f64(double %a, doubl
 ; AVX512-LABEL: test_f64:
 ; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
+; AVX512-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_f64:
 ; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213sd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa9,0xc2]
+; AVX512VL-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
   %call = call double @llvm.fma.f64(double %a, double %b, double %c)
@@ -236,21 +244,168 @@ define <4 x float> @test_v4f32(<4 x floa
 ; FMA32-LABEL: test_v4f32:
 ; FMA32:       ## %bb.0: ## %entry
 ; FMA32-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
+; FMA32-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
+; FMACALL32-LABEL: test_v4f32:
+; FMACALL32:       ## %bb.0: ## %entry
+; FMACALL32-NEXT:    subl $108, %esp ## encoding: [0x83,0xec,0x6c]
+; FMACALL32-NEXT:    vmovaps %xmm2, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x54,0x24,0x40]
+; FMACALL32-NEXT:    vmovaps %xmm1, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x4c,0x24,0x30]
+; FMACALL32-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x44,0x24,0x20]
+; FMACALL32-NEXT:    vextractps $2, %xmm2, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x54,0x24,0x08,0x02]
+; FMACALL32-NEXT:    vextractps $2, %xmm1, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x4c,0x24,0x04,0x02]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x02]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x7c,0x24,0x60]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x40]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x01]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x30]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x01]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x20]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x01]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x7c,0x24,0x54]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x40]
+; FMACALL32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfa,0x11,0x44,0x24,0x08]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x30]
+; FMACALL32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfa,0x11,0x44,0x24,0x04]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x20]
+; FMACALL32-NEXT:    vmovss %xmm0, (%esp) ## encoding: [0xc5,0xfa,0x11,0x04,0x24]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x40]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x03]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x30]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x03]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x20]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x03]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x1c]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x6c,0x24,0x54]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x18]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x6c,0x24,0x60]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x14]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x10]
+; FMACALL32-NEXT:    vmovss {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x1c]
+; FMACALL32-NEXT:    ## xmm0 = mem[0],zero,zero,zero
+; FMACALL32-NEXT:    vinsertps $16, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x18,0x10]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; FMACALL32-NEXT:    vinsertps $32, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x14,0x20]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; FMACALL32-NEXT:    vinsertps $48, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x10,0x30]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0,1,2],mem[0]
+; FMACALL32-NEXT:    addl $108, %esp ## encoding: [0x83,0xc4,0x6c]
+; FMACALL32-NEXT:    retl ## encoding: [0xc3]
+;
 ; FMA64-LABEL: test_v4f32:
 ; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
+; FMA64-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
+; FMACALL64-LABEL: test_v4f32:
+; FMACALL64:       ## %bb.0: ## %entry
+; FMACALL64-NEXT:    subq $88, %rsp ## encoding: [0x48,0x83,0xec,0x58]
+; FMACALL64-NEXT:    movaps %xmm2, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x54,0x24,0x30]
+; FMACALL64-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x20]
+; FMACALL64-NEXT:    shufps $231, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe7]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; FMACALL64-NEXT:    shufps $231, %xmm1, %xmm1 ## encoding: [0x0f,0xc6,0xc9,0xe7]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[3,1,2,3]
+; FMACALL64-NEXT:    shufps $231, %xmm2, %xmm2 ## encoding: [0x0f,0xc6,0xd2,0xe7]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[3,1,2,3]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x04,0x24]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x20]
+; FMACALL64-NEXT:    movhlps %xmm0, %xmm0 ## encoding: [0x0f,0x12,0xc0]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movhlps %xmm1, %xmm1 ## encoding: [0x0f,0x12,0xc9]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x54,0x24,0x30]
+; FMACALL64-NEXT:    movhlps %xmm2, %xmm2 ## encoding: [0x0f,0x12,0xd2]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    unpcklps (%rsp), %xmm0 ## 16-byte Folded Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x14,0x04,0x24]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; FMACALL64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x04,0x24]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x20]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x54,0x24,0x30]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x40]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x20]
+; FMACALL64-NEXT:    shufps $229, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe5]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    shufps $229, %xmm1, %xmm1 ## encoding: [0x0f,0xc6,0xc9,0xe5]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x54,0x24,0x30]
+; FMACALL64-NEXT:    shufps $229, %xmm2, %xmm2 ## encoding: [0x0f,0xc6,0xd2,0xe5]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1,2,3]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x40]
+; FMACALL64-NEXT:    unpcklps %xmm0, %xmm1 ## encoding: [0x0f,0x14,0xc8]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; FMACALL64-NEXT:    unpcklpd (%rsp), %xmm1 ## 16-byte Folded Reload
+; FMACALL64-NEXT:    ## encoding: [0x66,0x0f,0x14,0x0c,0x24]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],mem[0]
+; FMACALL64-NEXT:    movaps %xmm1, %xmm0 ## encoding: [0x0f,0x28,0xc1]
+; FMACALL64-NEXT:    addq $88, %rsp ## encoding: [0x48,0x83,0xc4,0x58]
+; FMACALL64-NEXT:    retq ## encoding: [0xc3]
+;
 ; AVX512-LABEL: test_v4f32:
 ; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
+; AVX512-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_v4f32:
 ; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213ps %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2]
+; AVX512VL-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
   %call = call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c)
@@ -261,21 +416,334 @@ define <8 x float> @test_v8f32(<8 x floa
 ; FMA32-LABEL: test_v8f32:
 ; FMA32:       ## %bb.0: ## %entry
 ; FMA32-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
+; FMA32-NEXT:    ## ymm0 = (ymm1 * ymm0) + ymm2
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
+; FMACALL32-LABEL: test_v8f32:
+; FMACALL32:       ## %bb.0: ## %entry
+; FMACALL32-NEXT:    subl $316, %esp ## encoding: [0x81,0xec,0x3c,0x01,0x00,0x00]
+; FMACALL32-NEXT:    ## imm = 0x13C
+; FMACALL32-NEXT:    vmovups %ymm2, {{[0-9]+}}(%esp) ## 32-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x11,0x94,0x24,0x00,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovups %ymm1, {{[0-9]+}}(%esp) ## 32-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x11,0x8c,0x24,0xe0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovups %ymm0, {{[0-9]+}}(%esp) ## 32-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x11,0x84,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm2, %xmm3 ## encoding: [0xc4,0xe3,0x7d,0x19,0xd3,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm3, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x5c,0x24,0x60]
+; FMACALL32-NEXT:    vextractps $2, %xmm3, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x5c,0x24,0x08,0x02]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm1, %xmm2 ## encoding: [0xc4,0xe3,0x7d,0x19,0xca,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm2, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x54,0x24,0x50]
+; FMACALL32-NEXT:    vextractps $2, %xmm2, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x54,0x24,0x04,0x02]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm0, %xmm1 ## encoding: [0xc4,0xe3,0x7d,0x19,0xc1,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm1, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x4c,0x24,0x40]
+; FMACALL32-NEXT:    vextractps $2, %xmm1, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x0c,0x24,0x02]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0xb4,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x60]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x01]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x50]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x01]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x40]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x01]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0xa8,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x60]
+; FMACALL32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfa,0x11,0x44,0x24,0x08]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x50]
+; FMACALL32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfa,0x11,0x44,0x24,0x04]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x40]
+; FMACALL32-NEXT:    vmovss %xmm0, (%esp) ## encoding: [0xc5,0xfa,0x11,0x04,0x24]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0x9c,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0x00,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x03]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0xe0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x03]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x03]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0x90,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0x00,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x02]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0xe0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x02]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x02]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0x84,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0x00,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x01]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0xe0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x01]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x01]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x7c,0x24,0x78]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0x00,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfa,0x11,0x44,0x24,0x08]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0xe0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfa,0x11,0x44,0x24,0x04]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovss %xmm0, (%esp) ## encoding: [0xc5,0xfa,0x11,0x04,0x24]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x60]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x03]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x50]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x03]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x40]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x03]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x3c]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x6c,0x24,0x78]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x38]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0x84,0x00,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x34]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0x90,0x00,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x30]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0x9c,0x00,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x2c]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0xa8,0x00,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x28]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0xb4,0x00,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x24]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x20]
+; FMACALL32-NEXT:    vmovss {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x3c]
+; FMACALL32-NEXT:    ## xmm0 = mem[0],zero,zero,zero
+; FMACALL32-NEXT:    vinsertps $16, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x38,0x10]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; FMACALL32-NEXT:    vinsertps $32, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x34,0x20]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; FMACALL32-NEXT:    vinsertps $48, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x30,0x30]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0,1,2],mem[0]
+; FMACALL32-NEXT:    vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x2c]
+; FMACALL32-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; FMACALL32-NEXT:    vinsertps $16, {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0x4c,0x24,0x28,0x10]
+; FMACALL32-NEXT:    ## xmm1 = xmm1[0],mem[0],xmm1[2,3]
+; FMACALL32-NEXT:    vinsertps $32, {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0x4c,0x24,0x24,0x20]
+; FMACALL32-NEXT:    ## xmm1 = xmm1[0,1],mem[0],xmm1[3]
+; FMACALL32-NEXT:    vinsertps $48, {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0x4c,0x24,0x20,0x30]
+; FMACALL32-NEXT:    ## xmm1 = xmm1[0,1,2],mem[0]
+; FMACALL32-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x18,0xc1,0x01]
+; FMACALL32-NEXT:    addl $316, %esp ## encoding: [0x81,0xc4,0x3c,0x01,0x00,0x00]
+; FMACALL32-NEXT:    ## imm = 0x13C
+; FMACALL32-NEXT:    retl ## encoding: [0xc3]
+;
 ; FMA64-LABEL: test_v8f32:
 ; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
+; FMA64-NEXT:    ## ymm0 = (ymm1 * ymm0) + ymm2
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
+; FMACALL64-LABEL: test_v8f32:
+; FMACALL64:       ## %bb.0: ## %entry
+; FMACALL64-NEXT:    subq $136, %rsp ## encoding: [0x48,0x81,0xec,0x88,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movaps %xmm5, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x6c,0x24,0x50]
+; FMACALL64-NEXT:    movaps %xmm4, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x24,0x24]
+; FMACALL64-NEXT:    movaps %xmm3, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x5c,0x24,0x40]
+; FMACALL64-NEXT:    movaps %xmm2, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x54,0x24,0x60]
+; FMACALL64-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x4c,0x24,0x30]
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x10]
+; FMACALL64-NEXT:    shufps $231, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe7]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; FMACALL64-NEXT:    movaps %xmm2, %xmm1 ## encoding: [0x0f,0x28,0xca]
+; FMACALL64-NEXT:    shufps $231, %xmm2, %xmm1 ## encoding: [0x0f,0xc6,0xca,0xe7]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[3,1],xmm2[2,3]
+; FMACALL64-NEXT:    movaps %xmm4, %xmm2 ## encoding: [0x0f,0x28,0xd4]
+; FMACALL64-NEXT:    shufps $231, %xmm4, %xmm2 ## encoding: [0x0f,0xc6,0xd4,0xe7]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[3,1],xmm4[2,3]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x20]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x10]
+; FMACALL64-NEXT:    movhlps %xmm0, %xmm0 ## encoding: [0x0f,0x12,0xc0]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x60]
+; FMACALL64-NEXT:    movhlps %xmm1, %xmm1 ## encoding: [0x0f,0x12,0xc9]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1]
+; FMACALL64-NEXT:    movaps (%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x14,0x24]
+; FMACALL64-NEXT:    movhlps %xmm2, %xmm2 ## encoding: [0x0f,0x12,0xd2]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    unpcklps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Folded Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x14,0x44,0x24,0x20]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x70]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x10]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x60]
+; FMACALL64-NEXT:    movaps (%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x14,0x24]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x20]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x10]
+; FMACALL64-NEXT:    shufps $229, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe5]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x60]
+; FMACALL64-NEXT:    shufps $229, %xmm1, %xmm1 ## encoding: [0x0f,0xc6,0xc9,0xe5]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1,2,3]
+; FMACALL64-NEXT:    movaps (%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x14,0x24]
+; FMACALL64-NEXT:    shufps $229, %xmm2, %xmm2 ## encoding: [0x0f,0xc6,0xd2,0xe5]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1,2,3]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x20]
+; FMACALL64-NEXT:    unpcklps %xmm0, %xmm1 ## encoding: [0x0f,0x14,0xc8]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; FMACALL64-NEXT:    unpcklpd {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Folded Reload
+; FMACALL64-NEXT:    ## encoding: [0x66,0x0f,0x14,0x4c,0x24,0x70]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],mem[0]
+; FMACALL64-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x4c,0x24,0x20]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x30]
+; FMACALL64-NEXT:    shufps $231, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe7]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x40]
+; FMACALL64-NEXT:    shufps $231, %xmm1, %xmm1 ## encoding: [0x0f,0xc6,0xc9,0xe7]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[3,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x54,0x24,0x50]
+; FMACALL64-NEXT:    shufps $231, %xmm2, %xmm2 ## encoding: [0x0f,0xc6,0xd2,0xe7]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[3,1,2,3]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x04,0x24]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x30]
+; FMACALL64-NEXT:    movhlps %xmm0, %xmm0 ## encoding: [0x0f,0x12,0xc0]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x40]
+; FMACALL64-NEXT:    movhlps %xmm1, %xmm1 ## encoding: [0x0f,0x12,0xc9]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x54,0x24,0x50]
+; FMACALL64-NEXT:    movhlps %xmm2, %xmm2 ## encoding: [0x0f,0x12,0xd2]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    unpcklps (%rsp), %xmm0 ## 16-byte Folded Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x14,0x04,0x24]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; FMACALL64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x04,0x24]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x30]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x40]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x54,0x24,0x50]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x10]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x30]
+; FMACALL64-NEXT:    shufps $229, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe5]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x40]
+; FMACALL64-NEXT:    shufps $229, %xmm1, %xmm1 ## encoding: [0x0f,0xc6,0xc9,0xe5]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x54,0x24,0x50]
+; FMACALL64-NEXT:    shufps $229, %xmm2, %xmm2 ## encoding: [0x0f,0xc6,0xd2,0xe5]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1,2,3]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    unpcklps %xmm0, %xmm1 ## encoding: [0x0f,0x14,0xc8]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; FMACALL64-NEXT:    unpcklpd (%rsp), %xmm1 ## 16-byte Folded Reload
+; FMACALL64-NEXT:    ## encoding: [0x66,0x0f,0x14,0x0c,0x24]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],mem[0]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x20]
+; FMACALL64-NEXT:    addq $136, %rsp ## encoding: [0x48,0x81,0xc4,0x88,0x00,0x00,0x00]
+; FMACALL64-NEXT:    retq ## encoding: [0xc3]
+;
 ; AVX512-LABEL: test_v8f32:
 ; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
+; AVX512-NEXT:    ## ymm0 = (ymm1 * ymm0) + ymm2
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_v8f32:
 ; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213ps %ymm2, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa8,0xc2]
+; AVX512VL-NEXT:    ## ymm0 = (ymm1 * ymm0) + ymm2
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
   %call = call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c)
@@ -290,25 +758,626 @@ define <16 x float> @test_v16f32(<16 x f
 ; FMA32-NEXT:    andl $-32, %esp ## encoding: [0x83,0xe4,0xe0]
 ; FMA32-NEXT:    subl $32, %esp ## encoding: [0x83,0xec,0x20]
 ; FMA32-NEXT:    vfmadd213ps 8(%ebp), %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0x6d,0xa8,0x45,0x08]
+; FMA32-NEXT:    ## ymm0 = (ymm2 * ymm0) + mem
 ; FMA32-NEXT:    vfmadd213ps 40(%ebp), %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0x65,0xa8,0x4d,0x28]
+; FMA32-NEXT:    ## ymm1 = (ymm3 * ymm1) + mem
 ; FMA32-NEXT:    movl %ebp, %esp ## encoding: [0x89,0xec]
 ; FMA32-NEXT:    popl %ebp ## encoding: [0x5d]
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
+; FMACALL32-LABEL: test_v16f32:
+; FMACALL32:       ## %bb.0: ## %entry
+; FMACALL32-NEXT:    pushl %ebp ## encoding: [0x55]
+; FMACALL32-NEXT:    movl %esp, %ebp ## encoding: [0x89,0xe5]
+; FMACALL32-NEXT:    andl $-32, %esp ## encoding: [0x83,0xe4,0xe0]
+; FMACALL32-NEXT:    subl $448, %esp ## encoding: [0x81,0xec,0xc0,0x01,0x00,0x00]
+; FMACALL32-NEXT:    ## imm = 0x1C0
+; FMACALL32-NEXT:    vmovaps %ymm3, {{[0-9]+}}(%esp) ## 32-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x29,0x5c,0x24,0x60]
+; FMACALL32-NEXT:    vmovaps %ymm2, {{[0-9]+}}(%esp) ## 32-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x29,0x94,0x24,0x60,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps %ymm1, {{[0-9]+}}(%esp) ## 32-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x29,0x8c,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp) ## 32-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x29,0x84,0x24,0x80,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps 40(%ebp), %ymm0 ## encoding: [0xc5,0xfc,0x28,0x45,0x28]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm0, %xmm0 ## encoding: [0xc4,0xe3,0x7d,0x19,0xc0,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x84,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x02]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm3, %xmm0 ## encoding: [0xc4,0xe3,0x7d,0x19,0xd8,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x84,0x24,0xb0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x02]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm1, %xmm0 ## encoding: [0xc4,0xe3,0x7d,0x19,0xc8,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x84,0x24,0xa0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x02]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0x54,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x84,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x01]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x84,0x24,0xb0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x01]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x84,0x24,0xa0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x01]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0x48,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x84,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfa,0x11,0x44,0x24,0x08]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x84,0x24,0xb0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfa,0x11,0x44,0x24,0x04]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x84,0x24,0xa0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovss %xmm0, (%esp) ## encoding: [0xc5,0xfa,0x11,0x04,0x24]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0x3c,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps 40(%ebp), %ymm0 ## encoding: [0xc5,0xfc,0x28,0x45,0x28]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x03]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x44,0x24,0x60]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x03]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x03]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0x30,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps 40(%ebp), %ymm0 ## encoding: [0xc5,0xfc,0x28,0x45,0x28]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x02]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x44,0x24,0x60]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x02]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x02]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0x24,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps 40(%ebp), %ymm0 ## encoding: [0xc5,0xfc,0x28,0x45,0x28]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x01]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x44,0x24,0x60]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x01]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x01]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0x18,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps 40(%ebp), %ymm0 ## encoding: [0xc5,0xfc,0x28,0x45,0x28]
+; FMACALL32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfa,0x11,0x44,0x24,0x08]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x44,0x24,0x60]
+; FMACALL32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfa,0x11,0x44,0x24,0x04]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovss %xmm0, (%esp) ## encoding: [0xc5,0xfa,0x11,0x04,0x24]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0x0c,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps 8(%ebp), %ymm0 ## encoding: [0xc5,0xfc,0x28,0x45,0x08]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm0, %xmm0 ## encoding: [0xc4,0xe3,0x7d,0x19,0xc0,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x84,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x03]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x60,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm0, %xmm0 ## encoding: [0xc4,0xe3,0x7d,0x19,0xc0,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x44,0x24,0x60]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x03]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x80,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm0, %xmm0 ## encoding: [0xc4,0xe3,0x7d,0x19,0xc0,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x44,0x24,0x50]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x03]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0x00,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x84,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x02]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x60]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x02]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x50]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x02]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0xf4,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x84,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x01]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x60]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x01]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x50]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x01]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0xe8,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x84,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfa,0x11,0x44,0x24,0x08]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x60]
+; FMACALL32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfa,0x11,0x44,0x24,0x04]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x50]
+; FMACALL32-NEXT:    vmovss %xmm0, (%esp) ## encoding: [0xc5,0xfa,0x11,0x04,0x24]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps 8(%ebp), %ymm0 ## encoding: [0xc5,0xfc,0x28,0x45,0x08]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x03]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x60,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x03]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x80,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x03]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x7c,0x24,0x60]
+; FMACALL32-NEXT:    vmovaps 8(%ebp), %ymm0 ## encoding: [0xc5,0xfc,0x28,0x45,0x08]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x02]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x60,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x02]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x80,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $2, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x02]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x7c,0x24,0x50]
+; FMACALL32-NEXT:    vmovaps 8(%ebp), %ymm0 ## encoding: [0xc5,0xfc,0x28,0x45,0x08]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x01]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x60,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x01]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x80,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $1, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x01]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0xdc,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps 8(%ebp), %ymm0 ## encoding: [0xc5,0xfc,0x28,0x45,0x08]
+; FMACALL32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfa,0x11,0x44,0x24,0x08]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x60,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovss %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xfa,0x11,0x44,0x24,0x04]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x80,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovss %xmm0, (%esp) ## encoding: [0xc5,0xfa,0x11,0x04,0x24]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x84,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x08,0x03]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x84,0x24,0xb0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x44,0x24,0x04,0x03]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x84,0x24,0xa0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractps $3, %xmm0, (%esp) ## encoding: [0xc4,0xe3,0x79,0x17,0x04,0x24,0x03]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x2c]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0xdc,0x00,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x28]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x6c,0x24,0x50]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x24]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x6c,0x24,0x60]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x20]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x1c]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0xe8,0x00,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x18]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0xf4,0x00,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x14]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0x00,0x01,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x10]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0x0c,0x01,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x4c]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0x18,0x01,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x48]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0x24,0x01,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x44]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0x30,0x01,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x40]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0x3c,0x01,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x3c]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0x48,0x01,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x38]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0x54,0x01,0x00,0x00]
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x34]
+; FMACALL32-NEXT:    calll _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstps {{[0-9]+}}(%esp) ## encoding: [0xd9,0x5c,0x24,0x30]
+; FMACALL32-NEXT:    vmovss {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfa,0x10,0x44,0x24,0x2c]
+; FMACALL32-NEXT:    ## xmm0 = mem[0],zero,zero,zero
+; FMACALL32-NEXT:    vinsertps $16, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x28,0x10]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; FMACALL32-NEXT:    vinsertps $32, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x24,0x20]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; FMACALL32-NEXT:    vinsertps $48, {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0x44,0x24,0x20,0x30]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0,1,2],mem[0]
+; FMACALL32-NEXT:    vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x1c]
+; FMACALL32-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; FMACALL32-NEXT:    vinsertps $16, {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0x4c,0x24,0x18,0x10]
+; FMACALL32-NEXT:    ## xmm1 = xmm1[0],mem[0],xmm1[2,3]
+; FMACALL32-NEXT:    vinsertps $32, {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0x4c,0x24,0x14,0x20]
+; FMACALL32-NEXT:    ## xmm1 = xmm1[0,1],mem[0],xmm1[3]
+; FMACALL32-NEXT:    vinsertps $48, {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0x4c,0x24,0x10,0x30]
+; FMACALL32-NEXT:    ## xmm1 = xmm1[0,1,2],mem[0]
+; FMACALL32-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x18,0xc1,0x01]
+; FMACALL32-NEXT:    vmovss {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x4c]
+; FMACALL32-NEXT:    ## xmm1 = mem[0],zero,zero,zero
+; FMACALL32-NEXT:    vinsertps $16, {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0x4c,0x24,0x48,0x10]
+; FMACALL32-NEXT:    ## xmm1 = xmm1[0],mem[0],xmm1[2,3]
+; FMACALL32-NEXT:    vinsertps $32, {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0x4c,0x24,0x44,0x20]
+; FMACALL32-NEXT:    ## xmm1 = xmm1[0,1],mem[0],xmm1[3]
+; FMACALL32-NEXT:    vinsertps $48, {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc4,0xe3,0x71,0x21,0x4c,0x24,0x40,0x30]
+; FMACALL32-NEXT:    ## xmm1 = xmm1[0,1,2],mem[0]
+; FMACALL32-NEXT:    vmovss {{[0-9]+}}(%esp), %xmm2 ## encoding: [0xc5,0xfa,0x10,0x54,0x24,0x3c]
+; FMACALL32-NEXT:    ## xmm2 = mem[0],zero,zero,zero
+; FMACALL32-NEXT:    vinsertps $16, {{[0-9]+}}(%esp), %xmm2, %xmm2 ## encoding: [0xc4,0xe3,0x69,0x21,0x54,0x24,0x38,0x10]
+; FMACALL32-NEXT:    ## xmm2 = xmm2[0],mem[0],xmm2[2,3]
+; FMACALL32-NEXT:    vinsertps $32, {{[0-9]+}}(%esp), %xmm2, %xmm2 ## encoding: [0xc4,0xe3,0x69,0x21,0x54,0x24,0x34,0x20]
+; FMACALL32-NEXT:    ## xmm2 = xmm2[0,1],mem[0],xmm2[3]
+; FMACALL32-NEXT:    vinsertps $48, {{[0-9]+}}(%esp), %xmm2, %xmm2 ## encoding: [0xc4,0xe3,0x69,0x21,0x54,0x24,0x30,0x30]
+; FMACALL32-NEXT:    ## xmm2 = xmm2[0,1,2],mem[0]
+; FMACALL32-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1 ## encoding: [0xc4,0xe3,0x75,0x18,0xca,0x01]
+; FMACALL32-NEXT:    movl %ebp, %esp ## encoding: [0x89,0xec]
+; FMACALL32-NEXT:    popl %ebp ## encoding: [0x5d]
+; FMACALL32-NEXT:    retl ## encoding: [0xc3]
+;
 ; FMA64-LABEL: test_v16f32:
 ; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213ps %ymm4, %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0x6d,0xa8,0xc4]
+; FMA64-NEXT:    ## ymm0 = (ymm2 * ymm0) + ymm4
 ; FMA64-NEXT:    vfmadd213ps %ymm5, %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0x65,0xa8,0xcd]
+; FMA64-NEXT:    ## ymm1 = (ymm3 * ymm1) + ymm5
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
+; FMACALL64-LABEL: test_v16f32:
+; FMACALL64:       ## %bb.0: ## %entry
+; FMACALL64-NEXT:    subq $168, %rsp ## encoding: [0x48,0x81,0xec,0xa8,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movaps %xmm7, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0xbc,0x24,0x80,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movaps %xmm6, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x74,0x24,0x20]
+; FMACALL64-NEXT:    movaps %xmm5, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x2c,0x24]
+; FMACALL64-NEXT:    movaps %xmm4, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x64,0x24,0x10]
+; FMACALL64-NEXT:    movaps %xmm3, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x5c,0x24,0x70]
+; FMACALL64-NEXT:    movaps %xmm2, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x54,0x24,0x40]
+; FMACALL64-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x4c,0x24,0x30]
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x50]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xb0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    shufps $231, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe7]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; FMACALL64-NEXT:    movaps %xmm4, %xmm1 ## encoding: [0x0f,0x28,0xcc]
+; FMACALL64-NEXT:    shufps $231, %xmm4, %xmm1 ## encoding: [0x0f,0xc6,0xcc,0xe7]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[3,1],xmm4[2,3]
+; FMACALL64-NEXT:    shufps $231, %xmm2, %xmm2 ## encoding: [0x0f,0xc6,0xd2,0xe7]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[3,1,2,3]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x60]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x50]
+; FMACALL64-NEXT:    movhlps %xmm0, %xmm0 ## encoding: [0x0f,0x12,0xc0]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movhlps %xmm1, %xmm1 ## encoding: [0x0f,0x12,0xc9]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xb0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movhlps %xmm2, %xmm2 ## encoding: [0x0f,0x12,0xd2]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    unpcklps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Folded Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x14,0x44,0x24,0x60]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x84,0x24,0x90,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x50]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xb0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x60]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x50]
+; FMACALL64-NEXT:    shufps $229, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe5]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    shufps $229, %xmm1, %xmm1 ## encoding: [0x0f,0xc6,0xc9,0xe5]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xb0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    shufps $229, %xmm2, %xmm2 ## encoding: [0x0f,0xc6,0xd2,0xe5]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1,2,3]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x60]
+; FMACALL64-NEXT:    unpcklps %xmm0, %xmm1 ## encoding: [0x0f,0x14,0xc8]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; FMACALL64-NEXT:    unpcklpd {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Folded Reload
+; FMACALL64-NEXT:    ## encoding: [0x66,0x0f,0x14,0x8c,0x24,0x90,0x00,0x00,0x00]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],mem[0]
+; FMACALL64-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x4c,0x24,0x60]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x30]
+; FMACALL64-NEXT:    shufps $231, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe7]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; FMACALL64-NEXT:    movaps (%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x0c,0x24]
+; FMACALL64-NEXT:    shufps $231, %xmm1, %xmm1 ## encoding: [0x0f,0xc6,0xc9,0xe7]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[3,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    shufps $231, %xmm2, %xmm2 ## encoding: [0x0f,0xc6,0xd2,0xe7]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[3,1,2,3]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x10]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x30]
+; FMACALL64-NEXT:    movhlps %xmm0, %xmm0 ## encoding: [0x0f,0x12,0xc0]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1]
+; FMACALL64-NEXT:    movaps (%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x0c,0x24]
+; FMACALL64-NEXT:    movhlps %xmm1, %xmm1 ## encoding: [0x0f,0x12,0xc9]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movhlps %xmm2, %xmm2 ## encoding: [0x0f,0x12,0xd2]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    unpcklps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Folded Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x14,0x44,0x24,0x10]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x50]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x30]
+; FMACALL64-NEXT:    movaps (%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x0c,0x24]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x10]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x30]
+; FMACALL64-NEXT:    shufps $229, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe5]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1,2,3]
+; FMACALL64-NEXT:    movaps (%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x0c,0x24]
+; FMACALL64-NEXT:    shufps $229, %xmm1, %xmm1 ## encoding: [0x0f,0xc6,0xc9,0xe5]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    shufps $229, %xmm2, %xmm2 ## encoding: [0x0f,0xc6,0xd2,0xe5]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1,2,3]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    unpcklps %xmm0, %xmm1 ## encoding: [0x0f,0x14,0xc8]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; FMACALL64-NEXT:    unpcklpd {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Folded Reload
+; FMACALL64-NEXT:    ## encoding: [0x66,0x0f,0x14,0x4c,0x24,0x50]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],mem[0]
+; FMACALL64-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x40]
+; FMACALL64-NEXT:    shufps $231, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe7]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x20]
+; FMACALL64-NEXT:    shufps $231, %xmm1, %xmm1 ## encoding: [0x0f,0xc6,0xc9,0xe7]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[3,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xd0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    shufps $231, %xmm2, %xmm2 ## encoding: [0x0f,0xc6,0xd2,0xe7]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[3,1,2,3]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x04,0x24]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x40]
+; FMACALL64-NEXT:    movhlps %xmm0, %xmm0 ## encoding: [0x0f,0x12,0xc0]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x20]
+; FMACALL64-NEXT:    movhlps %xmm1, %xmm1 ## encoding: [0x0f,0x12,0xc9]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xd0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movhlps %xmm2, %xmm2 ## encoding: [0x0f,0x12,0xd2]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    unpcklps (%rsp), %xmm0 ## 16-byte Folded Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x14,0x04,0x24]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x30]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x40]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x20]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xd0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x04,0x24]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x40]
+; FMACALL64-NEXT:    shufps $229, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe5]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x20]
+; FMACALL64-NEXT:    shufps $229, %xmm1, %xmm1 ## encoding: [0x0f,0xc6,0xc9,0xe5]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xd0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    shufps $229, %xmm2, %xmm2 ## encoding: [0x0f,0xc6,0xd2,0xe5]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1,2,3]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps (%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x0c,0x24]
+; FMACALL64-NEXT:    unpcklps %xmm0, %xmm1 ## encoding: [0x0f,0x14,0xc8]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; FMACALL64-NEXT:    unpcklpd {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Folded Reload
+; FMACALL64-NEXT:    ## encoding: [0x66,0x0f,0x14,0x4c,0x24,0x30]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],mem[0]
+; FMACALL64-NEXT:    movaps %xmm1, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x0c,0x24]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x70]
+; FMACALL64-NEXT:    shufps $231, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe7]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[3,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x8c,0x24,0x80,0x00,0x00,0x00]
+; FMACALL64-NEXT:    shufps $231, %xmm1, %xmm1 ## encoding: [0x0f,0xc6,0xc9,0xe7]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[3,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xe0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    shufps $231, %xmm2, %xmm2 ## encoding: [0x0f,0xc6,0xd2,0xe7]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[3,1,2,3]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x20]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x70]
+; FMACALL64-NEXT:    movhlps %xmm0, %xmm0 ## encoding: [0x0f,0x12,0xc0]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x8c,0x24,0x80,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movhlps %xmm1, %xmm1 ## encoding: [0x0f,0x12,0xc9]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xe0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movhlps %xmm2, %xmm2 ## encoding: [0x0f,0x12,0xd2]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    unpcklps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Folded Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x14,0x44,0x24,0x20]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x20]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x70]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x8c,0x24,0x80,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xe0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x40]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x70]
+; FMACALL64-NEXT:    shufps $229, %xmm0, %xmm0 ## encoding: [0x0f,0xc6,0xc0,0xe5]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x8c,0x24,0x80,0x00,0x00,0x00]
+; FMACALL64-NEXT:    shufps $229, %xmm1, %xmm1 ## encoding: [0x0f,0xc6,0xc9,0xe5]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1,2,3]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xe0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    shufps $229, %xmm2, %xmm2 ## encoding: [0x0f,0xc6,0xd2,0xe5]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1,2,3]
+; FMACALL64-NEXT:    callq _fmaf ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fmaf-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x40]
+; FMACALL64-NEXT:    unpcklps %xmm0, %xmm1 ## encoding: [0x0f,0x14,0xc8]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; FMACALL64-NEXT:    unpcklpd {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Folded Reload
+; FMACALL64-NEXT:    ## encoding: [0x66,0x0f,0x14,0x4c,0x24,0x20]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],mem[0]
+; FMACALL64-NEXT:    movaps %xmm1, %xmm3 ## encoding: [0x0f,0x28,0xd9]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x60]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movaps (%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x14,0x24]
+; FMACALL64-NEXT:    addq $168, %rsp ## encoding: [0x48,0x81,0xc4,0xa8,0x00,0x00,0x00]
+; FMACALL64-NEXT:    retq ## encoding: [0xc3]
+;
 ; AVX512-LABEL: test_v16f32:
 ; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa8,0xc2]
+; AVX512-NEXT:    ## zmm0 = (zmm1 * zmm0) + zmm2
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_v16f32:
 ; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213ps %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0x75,0x48,0xa8,0xc2]
+; AVX512VL-NEXT:    ## zmm0 = (zmm1 * zmm0) + zmm2
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
   %call = call <16 x float> @llvm.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c)
@@ -319,21 +1388,95 @@ define <2 x double> @test_v2f64(<2 x dou
 ; FMA32-LABEL: test_v2f64:
 ; FMA32:       ## %bb.0: ## %entry
 ; FMA32-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
+; FMA32-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
+; FMACALL32-LABEL: test_v2f64:
+; FMACALL32:       ## %bb.0: ## %entry
+; FMACALL32-NEXT:    subl $108, %esp ## encoding: [0x83,0xec,0x6c]
+; FMACALL32-NEXT:    vmovaps %xmm2, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x54,0x24,0x30]
+; FMACALL32-NEXT:    vmovaps %xmm1, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x4c,0x24,0x40]
+; FMACALL32-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x44,0x24,0x50]
+; FMACALL32-NEXT:    vmovlps %xmm2, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xf8,0x13,0x54,0x24,0x10]
+; FMACALL32-NEXT:    vmovlhps %xmm1, %xmm0, %xmm2 ## encoding: [0xc5,0xf8,0x16,0xd1]
+; FMACALL32-NEXT:    ## xmm2 = xmm0[0],xmm1[0]
+; FMACALL32-NEXT:    vmovups %xmm2, (%esp) ## encoding: [0xc5,0xf8,0x11,0x14,0x24]
+; FMACALL32-NEXT:    calll _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    vmovapd {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf9,0x28,0x44,0x24,0x30]
+; FMACALL32-NEXT:    vmovhpd %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xf9,0x17,0x44,0x24,0x10]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x50]
+; FMACALL32-NEXT:    vunpckhpd {{[0-9]+}}(%esp), %xmm0, %xmm0 ## 16-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf9,0x15,0x44,0x24,0x40]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[1],mem[1]
+; FMACALL32-NEXT:    vmovups %xmm0, (%esp) ## encoding: [0xc5,0xf8,0x11,0x04,0x24]
+; FMACALL32-NEXT:    fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x28]
+; FMACALL32-NEXT:    calll _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x20]
+; FMACALL32-NEXT:    vmovsd {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x44,0x24,0x28]
+; FMACALL32-NEXT:    ## xmm0 = mem[0],zero
+; FMACALL32-NEXT:    vmovhpd {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x16,0x44,0x24,0x20]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0],mem[0]
+; FMACALL32-NEXT:    addl $108, %esp ## encoding: [0x83,0xc4,0x6c]
+; FMACALL32-NEXT:    retl ## encoding: [0xc3]
+;
 ; FMA64-LABEL: test_v2f64:
 ; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
+; FMA64-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
+; FMACALL64-LABEL: test_v2f64:
+; FMACALL64:       ## %bb.0: ## %entry
+; FMACALL64-NEXT:    subq $72, %rsp ## encoding: [0x48,0x83,0xec,0x48]
+; FMACALL64-NEXT:    movaps %xmm2, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x54,0x24,0x20]
+; FMACALL64-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x04,0x24]
+; FMACALL64-NEXT:    callq _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x30]
+; FMACALL64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x04,0x24]
+; FMACALL64-NEXT:    movhlps %xmm0, %xmm0 ## encoding: [0x0f,0x12,0xc0]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movhlps %xmm1, %xmm1 ## encoding: [0x0f,0x12,0xc9]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x54,0x24,0x20]
+; FMACALL64-NEXT:    movhlps %xmm2, %xmm2 ## encoding: [0x0f,0x12,0xd2]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1]
+; FMACALL64-NEXT:    callq _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x30]
+; FMACALL64-NEXT:    movlhps %xmm0, %xmm1 ## encoding: [0x0f,0x16,0xc8]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],xmm0[0]
+; FMACALL64-NEXT:    movaps %xmm1, %xmm0 ## encoding: [0x0f,0x28,0xc1]
+; FMACALL64-NEXT:    addq $72, %rsp ## encoding: [0x48,0x83,0xc4,0x48]
+; FMACALL64-NEXT:    retq ## encoding: [0xc3]
+;
 ; AVX512-LABEL: test_v2f64:
 ; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
+; AVX512-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_v2f64:
 ; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213pd %xmm2, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2]
+; AVX512VL-NEXT:    ## xmm0 = (xmm1 * xmm0) + xmm2
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
   %call = call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c)
@@ -344,21 +1487,183 @@ define <4 x double> @test_v4f64(<4 x dou
 ; FMA32-LABEL: test_v4f64:
 ; FMA32:       ## %bb.0: ## %entry
 ; FMA32-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
+; FMA32-NEXT:    ## ymm0 = (ymm1 * ymm0) + ymm2
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
+; FMACALL32-LABEL: test_v4f64:
+; FMACALL32:       ## %bb.0: ## %entry
+; FMACALL32-NEXT:    subl $252, %esp ## encoding: [0x81,0xec,0xfc,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovups %ymm2, {{[0-9]+}}(%esp) ## 32-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x11,0x94,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovups %ymm1, {{[0-9]+}}(%esp) ## 32-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x11,0x8c,0x24,0xa0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovups %ymm0, {{[0-9]+}}(%esp) ## 32-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x11,0x84,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm2, %xmm3 ## encoding: [0xc4,0xe3,0x7d,0x19,0xd3,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm3, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x5c,0x24,0x70]
+; FMACALL32-NEXT:    vmovlps %xmm3, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xf8,0x13,0x5c,0x24,0x10]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm1, %xmm2 ## encoding: [0xc4,0xe3,0x7d,0x19,0xca,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm2, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x54,0x24,0x50]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm0, %xmm1 ## encoding: [0xc4,0xe3,0x7d,0x19,0xc1,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm1, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x4c,0x24,0x60]
+; FMACALL32-NEXT:    vmovlhps %xmm2, %xmm1, %xmm0 ## encoding: [0xc5,0xf0,0x16,0xc2]
+; FMACALL32-NEXT:    ## xmm0 = xmm1[0],xmm2[0]
+; FMACALL32-NEXT:    vmovups %xmm0, (%esp) ## encoding: [0xc5,0xf8,0x11,0x04,0x24]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x7c,0x24,0x44]
+; FMACALL32-NEXT:    vmovupd {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfd,0x10,0x84,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovhpd %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xf9,0x17,0x44,0x24,0x10]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vunpckhpd {{[0-9]+}}(%esp), %xmm0, %xmm0 ## 16-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf9,0x15,0x84,0x24,0xa0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[1],mem[1]
+; FMACALL32-NEXT:    vmovups %xmm0, (%esp) ## encoding: [0xc5,0xf8,0x11,0x04,0x24]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x7c,0x24,0x38]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovlps %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xf8,0x13,0x44,0x24,0x10]
+; FMACALL32-NEXT:    vmovups {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x10,0x84,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vunpcklpd {{[0-9]+}}(%esp), %xmm0, %xmm0 ## 16-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf9,0x14,0x84,0x24,0xa0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0],mem[0]
+; FMACALL32-NEXT:    vmovups %xmm0, (%esp) ## encoding: [0xc5,0xf8,0x11,0x04,0x24]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    vmovapd {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf9,0x28,0x44,0x24,0x70]
+; FMACALL32-NEXT:    vmovhpd %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xf9,0x17,0x44,0x24,0x10]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x60]
+; FMACALL32-NEXT:    vunpckhpd {{[0-9]+}}(%esp), %xmm0, %xmm0 ## 16-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf9,0x15,0x44,0x24,0x50]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[1],mem[1]
+; FMACALL32-NEXT:    vmovups %xmm0, (%esp) ## encoding: [0xc5,0xf8,0x11,0x04,0x24]
+; FMACALL32-NEXT:    fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x30]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x6c,0x24,0x38]
+; FMACALL32-NEXT:    fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x28]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x6c,0x24,0x44]
+; FMACALL32-NEXT:    fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x20]
+; FMACALL32-NEXT:    calll _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x18]
+; FMACALL32-NEXT:    vmovsd {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x44,0x24,0x30]
+; FMACALL32-NEXT:    ## xmm0 = mem[0],zero
+; FMACALL32-NEXT:    vmovhpd {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x16,0x44,0x24,0x28]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0],mem[0]
+; FMACALL32-NEXT:    vmovsd {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x20]
+; FMACALL32-NEXT:    ## xmm1 = mem[0],zero
+; FMACALL32-NEXT:    vmovhpd {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x16,0x4c,0x24,0x18]
+; FMACALL32-NEXT:    ## xmm1 = xmm1[0],mem[0]
+; FMACALL32-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x18,0xc1,0x01]
+; FMACALL32-NEXT:    addl $252, %esp ## encoding: [0x81,0xc4,0xfc,0x00,0x00,0x00]
+; FMACALL32-NEXT:    retl ## encoding: [0xc3]
+;
 ; FMA64-LABEL: test_v4f64:
 ; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
+; FMA64-NEXT:    ## ymm0 = (ymm1 * ymm0) + ymm2
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
+; FMACALL64-LABEL: test_v4f64:
+; FMACALL64:       ## %bb.0: ## %entry
+; FMACALL64-NEXT:    subq $120, %rsp ## encoding: [0x48,0x83,0xec,0x78]
+; FMACALL64-NEXT:    movaps %xmm5, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x6c,0x24,0x40]
+; FMACALL64-NEXT:    movaps %xmm4, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x24,0x24]
+; FMACALL64-NEXT:    movaps %xmm3, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x5c,0x24,0x30]
+; FMACALL64-NEXT:    movaps %xmm2, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x54,0x24,0x60]
+; FMACALL64-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x4c,0x24,0x20]
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x50]
+; FMACALL64-NEXT:    movaps %xmm2, %xmm1 ## encoding: [0x0f,0x28,0xca]
+; FMACALL64-NEXT:    movaps %xmm4, %xmm2 ## encoding: [0x0f,0x28,0xd4]
+; FMACALL64-NEXT:    callq _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x10]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x50]
+; FMACALL64-NEXT:    movhlps %xmm0, %xmm0 ## encoding: [0x0f,0x12,0xc0]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x60]
+; FMACALL64-NEXT:    movhlps %xmm1, %xmm1 ## encoding: [0x0f,0x12,0xc9]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1]
+; FMACALL64-NEXT:    movaps (%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x14,0x24]
+; FMACALL64-NEXT:    movhlps %xmm2, %xmm2 ## encoding: [0x0f,0x12,0xd2]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1]
+; FMACALL64-NEXT:    callq _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movlhps %xmm0, %xmm1 ## encoding: [0x0f,0x16,0xc8]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],xmm0[0]
+; FMACALL64-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x20]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x30]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x54,0x24,0x40]
+; FMACALL64-NEXT:    callq _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x04,0x24]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x20]
+; FMACALL64-NEXT:    movhlps %xmm0, %xmm0 ## encoding: [0x0f,0x12,0xc0]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x30]
+; FMACALL64-NEXT:    movhlps %xmm1, %xmm1 ## encoding: [0x0f,0x12,0xc9]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x54,0x24,0x40]
+; FMACALL64-NEXT:    movhlps %xmm2, %xmm2 ## encoding: [0x0f,0x12,0xd2]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1]
+; FMACALL64-NEXT:    callq _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps (%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x0c,0x24]
+; FMACALL64-NEXT:    movlhps %xmm0, %xmm1 ## encoding: [0x0f,0x16,0xc8]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],xmm0[0]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x10]
+; FMACALL64-NEXT:    addq $120, %rsp ## encoding: [0x48,0x83,0xc4,0x78]
+; FMACALL64-NEXT:    retq ## encoding: [0xc3]
+;
 ; AVX512-LABEL: test_v4f64:
 ; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
+; AVX512-NEXT:    ## ymm0 = (ymm1 * ymm0) + ymm2
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_v4f64:
 ; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213pd %ymm2, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2]
+; AVX512VL-NEXT:    ## ymm0 = (ymm1 * ymm0) + ymm2
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
   %call = call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c)
@@ -373,25 +1678,339 @@ define <8 x double> @test_v8f64(<8 x dou
 ; FMA32-NEXT:    andl $-32, %esp ## encoding: [0x83,0xe4,0xe0]
 ; FMA32-NEXT:    subl $32, %esp ## encoding: [0x83,0xec,0x20]
 ; FMA32-NEXT:    vfmadd213pd 8(%ebp), %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0xed,0xa8,0x45,0x08]
+; FMA32-NEXT:    ## ymm0 = (ymm2 * ymm0) + mem
 ; FMA32-NEXT:    vfmadd213pd 40(%ebp), %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0xe5,0xa8,0x4d,0x28]
+; FMA32-NEXT:    ## ymm1 = (ymm3 * ymm1) + mem
 ; FMA32-NEXT:    movl %ebp, %esp ## encoding: [0x89,0xec]
 ; FMA32-NEXT:    popl %ebp ## encoding: [0x5d]
 ; FMA32-NEXT:    retl ## encoding: [0xc3]
 ;
+; FMACALL32-LABEL: test_v8f64:
+; FMACALL32:       ## %bb.0: ## %entry
+; FMACALL32-NEXT:    pushl %ebp ## encoding: [0x55]
+; FMACALL32-NEXT:    movl %esp, %ebp ## encoding: [0x89,0xe5]
+; FMACALL32-NEXT:    andl $-32, %esp ## encoding: [0x83,0xe4,0xe0]
+; FMACALL32-NEXT:    subl $384, %esp ## encoding: [0x81,0xec,0x80,0x01,0x00,0x00]
+; FMACALL32-NEXT:    ## imm = 0x180
+; FMACALL32-NEXT:    vmovaps %ymm3, {{[0-9]+}}(%esp) ## 32-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x29,0x9c,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps %ymm2, {{[0-9]+}}(%esp) ## 32-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x29,0x94,0x24,0x00,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps %ymm1, {{[0-9]+}}(%esp) ## 32-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x29,0x8c,0x24,0xa0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%esp) ## 32-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x29,0x84,0x24,0xe0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps 40(%ebp), %ymm0 ## encoding: [0xc5,0xfc,0x28,0x45,0x28]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm0, %xmm0 ## encoding: [0xc4,0xe3,0x7d,0x19,0xc0,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x84,0x24,0x60,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovlps %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xf8,0x13,0x44,0x24,0x10]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm3, %xmm2 ## encoding: [0xc4,0xe3,0x7d,0x19,0xda,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm2, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x94,0x24,0x40,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm1, %xmm0 ## encoding: [0xc4,0xe3,0x7d,0x19,0xc8,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x84,0x24,0x50,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovlhps %xmm2, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x16,0xc2]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0],xmm2[0]
+; FMACALL32-NEXT:    vmovups %xmm0, (%esp) ## encoding: [0xc5,0xf8,0x11,0x04,0x24]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0x94,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovapd 40(%ebp), %ymm0 ## encoding: [0xc5,0xfd,0x28,0x45,0x28]
+; FMACALL32-NEXT:    vmovhpd %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xf9,0x17,0x44,0x24,0x10]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0xa0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vunpckhpd {{[0-9]+}}(%esp), %xmm0, %xmm0 ## 16-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf9,0x15,0x84,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[1],mem[1]
+; FMACALL32-NEXT:    vmovups %xmm0, (%esp) ## encoding: [0xc5,0xf8,0x11,0x04,0x24]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0x88,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps 40(%ebp), %ymm0 ## encoding: [0xc5,0xfc,0x28,0x45,0x28]
+; FMACALL32-NEXT:    vmovlps %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xf8,0x13,0x44,0x24,0x10]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0xa0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vunpcklpd {{[0-9]+}}(%esp), %xmm0, %xmm0 ## 16-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf9,0x14,0x84,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0],mem[0]
+; FMACALL32-NEXT:    vmovups %xmm0, (%esp) ## encoding: [0xc5,0xf8,0x11,0x04,0x24]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps 8(%ebp), %ymm0 ## encoding: [0xc5,0xfc,0x28,0x45,0x08]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm0, %xmm0 ## encoding: [0xc4,0xe3,0x7d,0x19,0xc0,0x01]
+; FMACALL32-NEXT:    vmovapd %xmm0, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf9,0x29,0x44,0x24,0x30]
+; FMACALL32-NEXT:    vmovhpd %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xf9,0x17,0x44,0x24,0x10]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0x00,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm0, %xmm1 ## encoding: [0xc4,0xe3,0x7d,0x19,0xc1,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm1, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x8c,0x24,0x30,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0xe0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vextractf128 $1, %ymm0, %xmm0 ## encoding: [0xc4,0xe3,0x7d,0x19,0xc0,0x01]
+; FMACALL32-NEXT:    vmovaps %xmm0, {{[0-9]+}}(%esp) ## 16-byte Spill
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x29,0x44,0x24,0x20]
+; FMACALL32-NEXT:    vunpckhpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x15,0xc1]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[1],xmm1[1]
+; FMACALL32-NEXT:    vmovups %xmm0, (%esp) ## encoding: [0xc5,0xf8,0x11,0x04,0x24]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xbc,0x24,0xa0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x30]
+; FMACALL32-NEXT:    vmovlps %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xf8,0x13,0x44,0x24,0x10]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x44,0x24,0x20]
+; FMACALL32-NEXT:    vunpcklpd {{[0-9]+}}(%esp), %xmm0, %xmm0 ## 16-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf9,0x14,0x84,0x24,0x30,0x01,0x00,0x00]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0],mem[0]
+; FMACALL32-NEXT:    vmovups %xmm0, (%esp) ## encoding: [0xc5,0xf8,0x11,0x04,0x24]
+; FMACALL32-NEXT:    calll _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x7c,0x24,0x30]
+; FMACALL32-NEXT:    vmovapd 8(%ebp), %ymm0 ## encoding: [0xc5,0xfd,0x28,0x45,0x08]
+; FMACALL32-NEXT:    vmovhpd %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xf9,0x17,0x44,0x24,0x10]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0xe0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vunpckhpd {{[0-9]+}}(%esp), %xmm0, %xmm0 ## 16-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf9,0x15,0x84,0x24,0x00,0x01,0x00,0x00]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[1],mem[1]
+; FMACALL32-NEXT:    vmovups %xmm0, (%esp) ## encoding: [0xc5,0xf8,0x11,0x04,0x24]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpt {{[0-9]+}}(%esp) ## 10-byte Folded Spill
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x7c,0x24,0x20]
+; FMACALL32-NEXT:    vmovaps 8(%ebp), %ymm0 ## encoding: [0xc5,0xfc,0x28,0x45,0x08]
+; FMACALL32-NEXT:    vmovlps %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xf8,0x13,0x44,0x24,0x10]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %ymm0 ## 32-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xfc,0x28,0x84,0x24,0xe0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    vunpcklpd {{[0-9]+}}(%esp), %xmm0, %xmm0 ## 16-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf9,0x14,0x84,0x24,0x00,0x01,0x00,0x00]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0],mem[0]
+; FMACALL32-NEXT:    vmovups %xmm0, (%esp) ## encoding: [0xc5,0xf8,0x11,0x04,0x24]
+; FMACALL32-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
+; FMACALL32-NEXT:    calll _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    vmovapd {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf9,0x28,0x84,0x24,0x60,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vmovhpd %xmm0, {{[0-9]+}}(%esp) ## encoding: [0xc5,0xf9,0x17,0x44,0x24,0x10]
+; FMACALL32-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0 ## 16-byte Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf8,0x28,0x84,0x24,0x50,0x01,0x00,0x00]
+; FMACALL32-NEXT:    vunpckhpd {{[0-9]+}}(%esp), %xmm0, %xmm0 ## 16-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xc5,0xf9,0x15,0x84,0x24,0x40,0x01,0x00,0x00]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[1],mem[1]
+; FMACALL32-NEXT:    vmovups %xmm0, (%esp) ## encoding: [0xc5,0xf8,0x11,0x04,0x24]
+; FMACALL32-NEXT:    fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x60]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x6c,0x24,0x20]
+; FMACALL32-NEXT:    fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x58]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0x6c,0x24,0x30]
+; FMACALL32-NEXT:    fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x50]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0xa0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x48]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL32-NEXT:    fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x9c,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0x88,0x00,0x00,0x00]
+; FMACALL32-NEXT:    fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x78]
+; FMACALL32-NEXT:    fldt {{[0-9]+}}(%esp) ## 10-byte Folded Reload
+; FMACALL32-NEXT:    ## encoding: [0xdb,0xac,0x24,0x94,0x00,0x00,0x00]
+; FMACALL32-NEXT:    fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x70]
+; FMACALL32-NEXT:    calll _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL32-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: FK_PCRel_4
+; FMACALL32-NEXT:    fstpl {{[0-9]+}}(%esp) ## encoding: [0xdd,0x5c,0x24,0x68]
+; FMACALL32-NEXT:    vmovsd {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xc5,0xfb,0x10,0x44,0x24,0x60]
+; FMACALL32-NEXT:    ## xmm0 = mem[0],zero
+; FMACALL32-NEXT:    vmovhpd {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x16,0x44,0x24,0x58]
+; FMACALL32-NEXT:    ## xmm0 = xmm0[0],mem[0]
+; FMACALL32-NEXT:    vmovsd {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x50]
+; FMACALL32-NEXT:    ## xmm1 = mem[0],zero
+; FMACALL32-NEXT:    vmovhpd {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x16,0x4c,0x24,0x48]
+; FMACALL32-NEXT:    ## xmm1 = xmm1[0],mem[0]
+; FMACALL32-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 ## encoding: [0xc4,0xe3,0x7d,0x18,0xc1,0x01]
+; FMACALL32-NEXT:    vmovsd {{[0-9]+}}(%esp), %xmm1 ## encoding: [0xc5,0xfb,0x10,0x8c,0x24,0x80,0x00,0x00,0x00]
+; FMACALL32-NEXT:    ## xmm1 = mem[0],zero
+; FMACALL32-NEXT:    vmovhpd {{[0-9]+}}(%esp), %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x16,0x4c,0x24,0x78]
+; FMACALL32-NEXT:    ## xmm1 = xmm1[0],mem[0]
+; FMACALL32-NEXT:    vmovsd {{[0-9]+}}(%esp), %xmm2 ## encoding: [0xc5,0xfb,0x10,0x54,0x24,0x70]
+; FMACALL32-NEXT:    ## xmm2 = mem[0],zero
+; FMACALL32-NEXT:    vmovhpd {{[0-9]+}}(%esp), %xmm2, %xmm2 ## encoding: [0xc5,0xe9,0x16,0x54,0x24,0x68]
+; FMACALL32-NEXT:    ## xmm2 = xmm2[0],mem[0]
+; FMACALL32-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1 ## encoding: [0xc4,0xe3,0x75,0x18,0xca,0x01]
+; FMACALL32-NEXT:    movl %ebp, %esp ## encoding: [0x89,0xec]
+; FMACALL32-NEXT:    popl %ebp ## encoding: [0x5d]
+; FMACALL32-NEXT:    retl ## encoding: [0xc3]
+;
 ; FMA64-LABEL: test_v8f64:
 ; FMA64:       ## %bb.0: ## %entry
 ; FMA64-NEXT:    vfmadd213pd %ymm4, %ymm2, %ymm0 ## encoding: [0xc4,0xe2,0xed,0xa8,0xc4]
+; FMA64-NEXT:    ## ymm0 = (ymm2 * ymm0) + ymm4
 ; FMA64-NEXT:    vfmadd213pd %ymm5, %ymm3, %ymm1 ## encoding: [0xc4,0xe2,0xe5,0xa8,0xcd]
+; FMA64-NEXT:    ## ymm1 = (ymm3 * ymm1) + ymm5
 ; FMA64-NEXT:    retq ## encoding: [0xc3]
 ;
+; FMACALL64-LABEL: test_v8f64:
+; FMACALL64:       ## %bb.0: ## %entry
+; FMACALL64-NEXT:    subq $152, %rsp ## encoding: [0x48,0x81,0xec,0x98,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movaps %xmm7, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x7c,0x24,0x70]
+; FMACALL64-NEXT:    movaps %xmm6, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x74,0x24,0x20]
+; FMACALL64-NEXT:    movaps %xmm5, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x2c,0x24]
+; FMACALL64-NEXT:    movaps %xmm4, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x64,0x24,0x10]
+; FMACALL64-NEXT:    movaps %xmm3, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x5c,0x24,0x60]
+; FMACALL64-NEXT:    movaps %xmm2, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x54,0x24,0x50]
+; FMACALL64-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x4c,0x24,0x40]
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x84,0x24,0x80,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xa0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movaps %xmm4, %xmm1 ## encoding: [0x0f,0x28,0xcc]
+; FMACALL64-NEXT:    callq _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x30]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x84,0x24,0x80,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movhlps %xmm0, %xmm0 ## encoding: [0x0f,0x12,0xc0]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movhlps %xmm1, %xmm1 ## encoding: [0x0f,0x12,0xc9]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xa0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movhlps %xmm2, %xmm2 ## encoding: [0x0f,0x12,0xd2]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1]
+; FMACALL64-NEXT:    callq _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x30]
+; FMACALL64-NEXT:    movlhps %xmm0, %xmm1 ## encoding: [0x0f,0x16,0xc8]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],xmm0[0]
+; FMACALL64-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x4c,0x24,0x30]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x40]
+; FMACALL64-NEXT:    movaps (%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x0c,0x24]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xb0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    callq _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x10]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x40]
+; FMACALL64-NEXT:    movhlps %xmm0, %xmm0 ## encoding: [0x0f,0x12,0xc0]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1]
+; FMACALL64-NEXT:    movaps (%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x0c,0x24]
+; FMACALL64-NEXT:    movhlps %xmm1, %xmm1 ## encoding: [0x0f,0x12,0xc9]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xb0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movhlps %xmm2, %xmm2 ## encoding: [0x0f,0x12,0xd2]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1]
+; FMACALL64-NEXT:    callq _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movlhps %xmm0, %xmm1 ## encoding: [0x0f,0x16,0xc8]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],xmm0[0]
+; FMACALL64-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x50]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x20]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    callq _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x04,0x24]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x50]
+; FMACALL64-NEXT:    movhlps %xmm0, %xmm0 ## encoding: [0x0f,0x12,0xc0]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x20]
+; FMACALL64-NEXT:    movhlps %xmm1, %xmm1 ## encoding: [0x0f,0x12,0xc9]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xc0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movhlps %xmm2, %xmm2 ## encoding: [0x0f,0x12,0xd2]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1]
+; FMACALL64-NEXT:    callq _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps (%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x0c,0x24]
+; FMACALL64-NEXT:    movlhps %xmm0, %xmm1 ## encoding: [0x0f,0x16,0xc8]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[0],xmm0[0]
+; FMACALL64-NEXT:    movaps %xmm1, (%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x0c,0x24]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x60]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x70]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xd0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    callq _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x29,0x44,0x24,0x20]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x60]
+; FMACALL64-NEXT:    movhlps %xmm0, %xmm0 ## encoding: [0x0f,0x12,0xc0]
+; FMACALL64-NEXT:    ## xmm0 = xmm0[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x70]
+; FMACALL64-NEXT:    movhlps %xmm1, %xmm1 ## encoding: [0x0f,0x12,0xc9]
+; FMACALL64-NEXT:    ## xmm1 = xmm1[1,1]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm2 ## encoding: [0x0f,0x28,0x94,0x24,0xd0,0x00,0x00,0x00]
+; FMACALL64-NEXT:    movhlps %xmm2, %xmm2 ## encoding: [0x0f,0x12,0xd2]
+; FMACALL64-NEXT:    ## xmm2 = xmm2[1,1]
+; FMACALL64-NEXT:    callq _fma ## encoding: [0xe8,A,A,A,A]
+; FMACALL64-NEXT:    ## fixup A - offset: 1, value: _fma-4, kind: reloc_branch_4byte_pcrel
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm3 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x5c,0x24,0x20]
+; FMACALL64-NEXT:    movlhps %xmm0, %xmm3 ## encoding: [0x0f,0x16,0xd8]
+; FMACALL64-NEXT:    ## xmm3 = xmm3[0],xmm0[0]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x44,0x24,0x30]
+; FMACALL64-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm1 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x4c,0x24,0x10]
+; FMACALL64-NEXT:    movaps (%rsp), %xmm2 ## 16-byte Reload
+; FMACALL64-NEXT:    ## encoding: [0x0f,0x28,0x14,0x24]
+; FMACALL64-NEXT:    addq $152, %rsp ## encoding: [0x48,0x81,0xc4,0x98,0x00,0x00,0x00]
+; FMACALL64-NEXT:    retq ## encoding: [0xc3]
+;
 ; AVX512-LABEL: test_v8f64:
 ; AVX512:       ## %bb.0: ## %entry
 ; AVX512-NEXT:    vfmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa8,0xc2]
+; AVX512-NEXT:    ## zmm0 = (zmm1 * zmm0) + zmm2
 ; AVX512-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512VL-LABEL: test_v8f64:
 ; AVX512VL:       ## %bb.0: ## %entry
 ; AVX512VL-NEXT:    vfmadd213pd %zmm2, %zmm1, %zmm0 ## encoding: [0x62,0xf2,0xf5,0x48,0xa8,0xc2]
+; AVX512VL-NEXT:    ## zmm0 = (zmm1 * zmm0) + zmm2
 ; AVX512VL-NEXT:    retq ## encoding: [0xc3]
 entry:
   %call = call <8 x double> @llvm.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c)

Modified: llvm/trunk/test/CodeGen/X86/recip-fastmath.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/recip-fastmath.ll?rev=327225&r1=327224&r2=327225&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/recip-fastmath.ll (original)
+++ llvm/trunk/test/CodeGen/X86/recip-fastmath.ll Sat Mar 10 13:30:46 2018
@@ -101,8 +101,8 @@ define float @f32_one_step(float %x) #1
 ; FMA-RECIP-LABEL: f32_one_step:
 ; FMA-RECIP:       # %bb.0:
 ; FMA-RECIP-NEXT:    vrcpss %xmm0, %xmm0, %xmm1
-; FMA-RECIP-NEXT:    vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
-; FMA-RECIP-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm0
+; FMA-RECIP-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
+; FMA-RECIP-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
 ; FMA-RECIP-NEXT:    retq
 ;
 ; BTVER2-LABEL: f32_one_step:
@@ -128,8 +128,8 @@ define float @f32_one_step(float %x) #1
 ; HASWELL-LABEL: f32_one_step:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
-; HASWELL-NEXT:    vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; HASWELL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; HASWELL-NO-FMA-LABEL: f32_one_step:
@@ -145,15 +145,15 @@ define float @f32_one_step(float %x) #1
 ; KNL-LABEL: f32_one_step:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
-; KNL-NEXT:    vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; KNL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: f32_one_step:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [4:1.00]
-; SKX-NEXT:    vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [9:0.50]
+; SKX-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %div = fdiv fast float 1.0, %x
   ret float %div
@@ -196,10 +196,10 @@ define float @f32_two_step(float %x) #2
 ; FMA-RECIP-NEXT:    vrcpss %xmm0, %xmm0, %xmm1
 ; FMA-RECIP-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
 ; FMA-RECIP-NEXT:    vmovaps %xmm1, %xmm3
-; FMA-RECIP-NEXT:    vfnmadd213ss %xmm2, %xmm0, %xmm3
-; FMA-RECIP-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm3
-; FMA-RECIP-NEXT:    vfnmadd213ss %xmm2, %xmm3, %xmm0
-; FMA-RECIP-NEXT:    vfmadd132ss %xmm3, %xmm3, %xmm0
+; FMA-RECIP-NEXT:    vfnmadd213ss {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2
+; FMA-RECIP-NEXT:    vfmadd132ss {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1
+; FMA-RECIP-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2
+; FMA-RECIP-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3
 ; FMA-RECIP-NEXT:    retq
 ;
 ; BTVER2-LABEL: f32_two_step:
@@ -235,10 +235,10 @@ define float @f32_two_step(float %x) #2
 ; HASWELL-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
 ; HASWELL-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:0.50]
 ; HASWELL-NEXT:    vmovaps %xmm1, %xmm3 # sched: [1:1.00]
-; HASWELL-NEXT:    vfnmadd213ss %xmm2, %xmm0, %xmm3 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm3 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd213ss %xmm2, %xmm3, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ss %xmm3, %xmm3, %xmm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ss {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ss {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3 sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; HASWELL-NO-FMA-LABEL: f32_two_step:
@@ -260,10 +260,10 @@ define float @f32_two_step(float %x) #2
 ; KNL-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
 ; KNL-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:0.50]
 ; KNL-NEXT:    vmovaps %xmm1, %xmm3 # sched: [1:1.00]
-; KNL-NEXT:    vfnmadd213ss %xmm2, %xmm0, %xmm3 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm3 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd213ss %xmm2, %xmm3, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ss %xmm3, %xmm3, %xmm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ss {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ss {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3 sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: f32_two_step:
@@ -271,10 +271,10 @@ define float @f32_two_step(float %x) #2
 ; SKX-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [4:1.00]
 ; SKX-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:0.50]
 ; SKX-NEXT:    vmovaps %xmm1, %xmm3 # sched: [1:0.33]
-; SKX-NEXT:    vfnmadd213ss %xmm2, %xmm0, %xmm3 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm3 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd213ss %xmm2, %xmm3, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ss %xmm3, %xmm3, %xmm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ss {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ss {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3 sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %div = fdiv fast float 1.0, %x
   ret float %div
@@ -364,8 +364,8 @@ define <4 x float> @v4f32_one_step(<4 x
 ; FMA-RECIP-LABEL: v4f32_one_step:
 ; FMA-RECIP:       # %bb.0:
 ; FMA-RECIP-NEXT:    vrcpps %xmm0, %xmm1
-; FMA-RECIP-NEXT:    vfnmadd213ps {{.*}}(%rip), %xmm1, %xmm0
-; FMA-RECIP-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm0
+; FMA-RECIP-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
+; FMA-RECIP-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
 ; FMA-RECIP-NEXT:    retq
 ;
 ; BTVER2-LABEL: v4f32_one_step:
@@ -392,8 +392,8 @@ define <4 x float> @v4f32_one_step(<4 x
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrcpps %xmm0, %xmm1 # sched: [5:1.00]
 ; HASWELL-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [6:0.50]
-; HASWELL-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; HASWELL-NO-FMA-LABEL: v4f32_one_step:
@@ -410,15 +410,15 @@ define <4 x float> @v4f32_one_step(<4 x
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    vrcpps %xmm0, %xmm1 # sched: [5:1.00]
 ; KNL-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [6:0.50]
-; KNL-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: v4f32_one_step:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    vrcpps %xmm0, %xmm1 # sched: [4:1.00]
-; SKX-NEXT:    vfnmadd213ps {{.*}}(%rip){1to4}, %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %div = fdiv fast <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x
   ret <4 x float> %div
@@ -461,10 +461,10 @@ define <4 x float> @v4f32_two_step(<4 x
 ; FMA-RECIP-NEXT:    vrcpps %xmm0, %xmm1
 ; FMA-RECIP-NEXT:    vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-RECIP-NEXT:    vmovaps %xmm1, %xmm3
-; FMA-RECIP-NEXT:    vfnmadd213ps %xmm2, %xmm0, %xmm3
-; FMA-RECIP-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm3
-; FMA-RECIP-NEXT:    vfnmadd213ps %xmm2, %xmm3, %xmm0
-; FMA-RECIP-NEXT:    vfmadd132ps %xmm3, %xmm3, %xmm0
+; FMA-RECIP-NEXT:    vfnmadd213ps {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2
+; FMA-RECIP-NEXT:    vfmadd132ps {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1
+; FMA-RECIP-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2
+; FMA-RECIP-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3
 ; FMA-RECIP-NEXT:    retq
 ;
 ; BTVER2-LABEL: v4f32_two_step:
@@ -500,10 +500,10 @@ define <4 x float> @v4f32_two_step(<4 x
 ; HASWELL-NEXT:    vrcpps %xmm0, %xmm1 # sched: [5:1.00]
 ; HASWELL-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [6:0.50]
 ; HASWELL-NEXT:    vmovaps %xmm1, %xmm3 # sched: [1:1.00]
-; HASWELL-NEXT:    vfnmadd213ps %xmm2, %xmm0, %xmm3 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm3 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd213ps %xmm2, %xmm3, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps %xmm3, %xmm3, %xmm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3 sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; HASWELL-NO-FMA-LABEL: v4f32_two_step:
@@ -525,10 +525,10 @@ define <4 x float> @v4f32_two_step(<4 x
 ; KNL-NEXT:    vrcpps %xmm0, %xmm1 # sched: [5:1.00]
 ; KNL-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [6:0.50]
 ; KNL-NEXT:    vmovaps %xmm1, %xmm3 # sched: [1:1.00]
-; KNL-NEXT:    vfnmadd213ps %xmm2, %xmm0, %xmm3 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm3 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd213ps %xmm2, %xmm3, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps %xmm3, %xmm3, %xmm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3 sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: v4f32_two_step:
@@ -536,10 +536,10 @@ define <4 x float> @v4f32_two_step(<4 x
 ; SKX-NEXT:    vrcpps %xmm0, %xmm1 # sched: [4:1.00]
 ; SKX-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [6:0.50]
 ; SKX-NEXT:    vmovaps %xmm1, %xmm3 # sched: [1:0.33]
-; SKX-NEXT:    vfnmadd213ps %xmm2, %xmm0, %xmm3 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm3 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd213ps %xmm2, %xmm3, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ps %xmm3, %xmm3, %xmm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3 sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %div = fdiv fast <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %x
   ret <4 x float> %div
@@ -639,8 +639,8 @@ define <8 x float> @v8f32_one_step(<8 x
 ; FMA-RECIP-LABEL: v8f32_one_step:
 ; FMA-RECIP:       # %bb.0:
 ; FMA-RECIP-NEXT:    vrcpps %ymm0, %ymm1
-; FMA-RECIP-NEXT:    vfnmadd213ps {{.*}}(%rip), %ymm1, %ymm0
-; FMA-RECIP-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm0
+; FMA-RECIP-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem
+; FMA-RECIP-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1
 ; FMA-RECIP-NEXT:    retq
 ;
 ; BTVER2-LABEL: v8f32_one_step:
@@ -667,8 +667,8 @@ define <8 x float> @v8f32_one_step(<8 x
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrcpps %ymm0, %ymm1 # sched: [11:2.00]
 ; HASWELL-NEXT:    vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
-; HASWELL-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1 sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; HASWELL-NO-FMA-LABEL: v8f32_one_step:
@@ -685,15 +685,15 @@ define <8 x float> @v8f32_one_step(<8 x
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    vrcpps %ymm0, %ymm1 # sched: [11:2.00]
 ; KNL-NEXT:    vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
-; KNL-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1 sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: v8f32_one_step:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    vrcpps %ymm0, %ymm1 # sched: [4:1.00]
-; SKX-NEXT:    vfnmadd213ps {{.*}}(%rip){1to8}, %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [11:0.50]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1 sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
   ret <8 x float> %div
@@ -749,10 +749,10 @@ define <8 x float> @v8f32_two_step(<8 x
 ; FMA-RECIP-NEXT:    vrcpps %ymm0, %ymm1
 ; FMA-RECIP-NEXT:    vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-RECIP-NEXT:    vmovaps %ymm1, %ymm3
-; FMA-RECIP-NEXT:    vfnmadd213ps %ymm2, %ymm0, %ymm3
-; FMA-RECIP-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm3
-; FMA-RECIP-NEXT:    vfnmadd213ps %ymm2, %ymm3, %ymm0
-; FMA-RECIP-NEXT:    vfmadd132ps %ymm3, %ymm3, %ymm0
+; FMA-RECIP-NEXT:    vfnmadd213ps {{.*#+}} ymm3 = -(ymm0 * ymm3) + ymm2
+; FMA-RECIP-NEXT:    vfmadd132ps {{.*#+}} ymm3 = (ymm3 * ymm1) + ymm1
+; FMA-RECIP-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm3 * ymm0) + ymm2
+; FMA-RECIP-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm3) + ymm3
 ; FMA-RECIP-NEXT:    retq
 ;
 ; BTVER2-LABEL: v8f32_two_step:
@@ -788,10 +788,10 @@ define <8 x float> @v8f32_two_step(<8 x
 ; HASWELL-NEXT:    vrcpps %ymm0, %ymm1 # sched: [11:2.00]
 ; HASWELL-NEXT:    vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
 ; HASWELL-NEXT:    vmovaps %ymm1, %ymm3 # sched: [1:1.00]
-; HASWELL-NEXT:    vfnmadd213ps %ymm2, %ymm0, %ymm3 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm3 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd213ps %ymm2, %ymm3, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps %ymm3, %ymm3, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} ymm3 = -(ymm0 * ymm3) + ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} ymm3 = (ymm3 * ymm1) + ymm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm3 * ymm0) + ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm3) + ymm3 sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
 ; HASWELL-NO-FMA-LABEL: v8f32_two_step:
@@ -813,10 +813,10 @@ define <8 x float> @v8f32_two_step(<8 x
 ; KNL-NEXT:    vrcpps %ymm0, %ymm1 # sched: [11:2.00]
 ; KNL-NEXT:    vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
 ; KNL-NEXT:    vmovaps %ymm1, %ymm3 # sched: [1:1.00]
-; KNL-NEXT:    vfnmadd213ps %ymm2, %ymm0, %ymm3 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm3 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd213ps %ymm2, %ymm3, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps %ymm3, %ymm3, %ymm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} ymm3 = -(ymm0 * ymm3) + ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} ymm3 = (ymm3 * ymm1) + ymm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm3 * ymm0) + ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm3) + ymm3 sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: v8f32_two_step:
@@ -824,10 +824,10 @@ define <8 x float> @v8f32_two_step(<8 x
 ; SKX-NEXT:    vrcpps %ymm0, %ymm1 # sched: [4:1.00]
 ; SKX-NEXT:    vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
 ; SKX-NEXT:    vmovaps %ymm1, %ymm3 # sched: [1:0.33]
-; SKX-NEXT:    vfnmadd213ps %ymm2, %ymm0, %ymm3 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm3 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd213ps %ymm2, %ymm3, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ps %ymm3, %ymm3, %ymm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} ymm3 = -(ymm0 * ymm3) + ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} ymm3 = (ymm3 * ymm1) + ymm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm3 * ymm0) + ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm3) + ymm3 sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
   ret <8 x float> %div

Modified: llvm/trunk/test/CodeGen/X86/recip-fastmath2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/recip-fastmath2.ll?rev=327225&r1=327224&r2=327225&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/recip-fastmath2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/recip-fastmath2.ll Sat Mar 10 13:30:46 2018
@@ -96,8 +96,8 @@ define float @f32_one_step_2(float %x) #
 ; FMA-RECIP-LABEL: f32_one_step_2:
 ; FMA-RECIP:       # %bb.0:
 ; FMA-RECIP-NEXT:    vrcpss %xmm0, %xmm0, %xmm1
-; FMA-RECIP-NEXT:    vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
-; FMA-RECIP-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm0
+; FMA-RECIP-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
+; FMA-RECIP-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
 ; FMA-RECIP-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0
 ; FMA-RECIP-NEXT:    retq
 ;
@@ -126,8 +126,8 @@ define float @f32_one_step_2(float %x) #
 ; HASWELL-LABEL: f32_one_step_2:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
-; HASWELL-NEXT:    vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; HASWELL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [5:0.50]
 ; HASWELL-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [10:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
@@ -145,16 +145,16 @@ define float @f32_one_step_2(float %x) #
 ; KNL-LABEL: f32_one_step_2:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
-; KNL-NEXT:    vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; KNL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [5:0.50]
 ; KNL-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [10:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: f32_one_step_2:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [4:1.00]
-; SKX-NEXT:    vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [9:0.50]
+; SKX-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [4:0.33]
 ; SKX-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %div = fdiv fast float 3456.0, %x
@@ -190,8 +190,8 @@ define float @f32_one_step_2_divs(float
 ; FMA-RECIP-LABEL: f32_one_step_2_divs:
 ; FMA-RECIP:       # %bb.0:
 ; FMA-RECIP-NEXT:    vrcpss %xmm0, %xmm0, %xmm1
-; FMA-RECIP-NEXT:    vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0
-; FMA-RECIP-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm0
+; FMA-RECIP-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
+; FMA-RECIP-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
 ; FMA-RECIP-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm1
 ; FMA-RECIP-NEXT:    vmulss %xmm0, %xmm1, %xmm0
 ; FMA-RECIP-NEXT:    retq
@@ -223,8 +223,8 @@ define float @f32_one_step_2_divs(float
 ; HASWELL-LABEL: f32_one_step_2_divs:
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
-; HASWELL-NEXT:    vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [10:0.50]
-; HASWELL-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; HASWELL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [5:0.50]
 ; HASWELL-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm1 # sched: [10:0.50]
 ; HASWELL-NEXT:    vmulss %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -244,8 +244,8 @@ define float @f32_one_step_2_divs(float
 ; KNL-LABEL: f32_one_step_2_divs:
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
-; KNL-NEXT:    vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [10:0.50]
-; KNL-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; KNL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [5:0.50]
 ; KNL-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm1 # sched: [10:0.50]
 ; KNL-NEXT:    vmulss %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
@@ -253,8 +253,8 @@ define float @f32_one_step_2_divs(float
 ; SKX-LABEL: f32_one_step_2_divs:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [4:1.00]
-; SKX-NEXT:    vfnmadd213ss {{.*}}(%rip), %xmm1, %xmm0 # sched: [9:0.50]
-; SKX-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [9:0.50]
+; SKX-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [4:0.33]
 ; SKX-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm1 # sched: [9:0.50]
 ; SKX-NEXT:    vmulss %xmm0, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -302,10 +302,10 @@ define float @f32_two_step_2(float %x) #
 ; FMA-RECIP-NEXT:    vrcpss %xmm0, %xmm0, %xmm1
 ; FMA-RECIP-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
 ; FMA-RECIP-NEXT:    vmovaps %xmm1, %xmm3
-; FMA-RECIP-NEXT:    vfnmadd213ss %xmm2, %xmm0, %xmm3
-; FMA-RECIP-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm3
-; FMA-RECIP-NEXT:    vfnmadd213ss %xmm2, %xmm3, %xmm0
-; FMA-RECIP-NEXT:    vfmadd132ss %xmm3, %xmm3, %xmm0
+; FMA-RECIP-NEXT:    vfnmadd213ss {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2
+; FMA-RECIP-NEXT:    vfmadd132ss {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1
+; FMA-RECIP-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2
+; FMA-RECIP-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3
 ; FMA-RECIP-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0
 ; FMA-RECIP-NEXT:    retq
 ;
@@ -344,10 +344,10 @@ define float @f32_two_step_2(float %x) #
 ; HASWELL-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
 ; HASWELL-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:0.50]
 ; HASWELL-NEXT:    vmovaps %xmm1, %xmm3 # sched: [1:1.00]
-; HASWELL-NEXT:    vfnmadd213ss %xmm2, %xmm0, %xmm3 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm3 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd213ss %xmm2, %xmm3, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ss %xmm3, %xmm3, %xmm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ss {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ss {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3 sched: [5:0.50]
 ; HASWELL-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [10:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
@@ -371,10 +371,10 @@ define float @f32_two_step_2(float %x) #
 ; KNL-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [5:1.00]
 ; KNL-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:0.50]
 ; KNL-NEXT:    vmovaps %xmm1, %xmm3 # sched: [1:1.00]
-; KNL-NEXT:    vfnmadd213ss %xmm2, %xmm0, %xmm3 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm3 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd213ss %xmm2, %xmm3, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ss %xmm3, %xmm3, %xmm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ss {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ss {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3 sched: [5:0.50]
 ; KNL-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [10:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
@@ -383,10 +383,10 @@ define float @f32_two_step_2(float %x) #
 ; SKX-NEXT:    vrcpss %xmm0, %xmm0, %xmm1 # sched: [4:1.00]
 ; SKX-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [5:0.50]
 ; SKX-NEXT:    vmovaps %xmm1, %xmm3 # sched: [1:0.33]
-; SKX-NEXT:    vfnmadd213ss %xmm2, %xmm0, %xmm3 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ss %xmm1, %xmm1, %xmm3 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd213ss %xmm2, %xmm3, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ss %xmm3, %xmm3, %xmm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ss {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ss {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3 sched: [4:0.33]
 ; SKX-NEXT:    vmulss {{.*}}(%rip), %xmm0, %xmm0 # sched: [9:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %div = fdiv fast float 6789.0, %x
@@ -420,8 +420,8 @@ define <4 x float> @v4f32_one_step2(<4 x
 ; FMA-RECIP-LABEL: v4f32_one_step2:
 ; FMA-RECIP:       # %bb.0:
 ; FMA-RECIP-NEXT:    vrcpps %xmm0, %xmm1
-; FMA-RECIP-NEXT:    vfnmadd213ps {{.*}}(%rip), %xmm1, %xmm0
-; FMA-RECIP-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm0
+; FMA-RECIP-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
+; FMA-RECIP-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
 ; FMA-RECIP-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA-RECIP-NEXT:    retq
 ;
@@ -451,8 +451,8 @@ define <4 x float> @v4f32_one_step2(<4 x
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrcpps %xmm0, %xmm1 # sched: [5:1.00]
 ; HASWELL-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [6:0.50]
-; HASWELL-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [5:0.50]
 ; HASWELL-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [11:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
@@ -471,16 +471,16 @@ define <4 x float> @v4f32_one_step2(<4 x
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    vrcpps %xmm0, %xmm1 # sched: [5:1.00]
 ; KNL-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [6:0.50]
-; KNL-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [5:0.50]
 ; KNL-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [11:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: v4f32_one_step2:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    vrcpps %xmm0, %xmm1 # sched: [4:1.00]
-; SKX-NEXT:    vfnmadd213ps {{.*}}(%rip){1to4}, %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [4:0.33]
 ; SKX-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %div = fdiv fast <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %x
@@ -516,8 +516,8 @@ define <4 x float> @v4f32_one_step_2_div
 ; FMA-RECIP-LABEL: v4f32_one_step_2_divs:
 ; FMA-RECIP:       # %bb.0:
 ; FMA-RECIP-NEXT:    vrcpps %xmm0, %xmm1
-; FMA-RECIP-NEXT:    vfnmadd213ps {{.*}}(%rip), %xmm1, %xmm0
-; FMA-RECIP-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm0
+; FMA-RECIP-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
+; FMA-RECIP-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
 ; FMA-RECIP-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm1
 ; FMA-RECIP-NEXT:    vmulps %xmm0, %xmm1, %xmm0
 ; FMA-RECIP-NEXT:    retq
@@ -550,8 +550,8 @@ define <4 x float> @v4f32_one_step_2_div
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrcpps %xmm0, %xmm1 # sched: [5:1.00]
 ; HASWELL-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [6:0.50]
-; HASWELL-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [5:0.50]
 ; HASWELL-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm1 # sched: [11:0.50]
 ; HASWELL-NEXT:    vmulps %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -572,8 +572,8 @@ define <4 x float> @v4f32_one_step_2_div
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    vrcpps %xmm0, %xmm1 # sched: [5:1.00]
 ; KNL-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [6:0.50]
-; KNL-NEXT:    vfnmadd213ps %xmm2, %xmm1, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [5:0.50]
 ; KNL-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm1 # sched: [11:0.50]
 ; KNL-NEXT:    vmulps %xmm0, %xmm1, %xmm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
@@ -581,8 +581,8 @@ define <4 x float> @v4f32_one_step_2_div
 ; SKX-LABEL: v4f32_one_step_2_divs:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    vrcpps %xmm0, %xmm1 # sched: [4:1.00]
-; SKX-NEXT:    vfnmadd213ps {{.*}}(%rip){1to4}, %xmm1, %xmm0 # sched: [10:0.50]
-; SKX-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem sched: [10:0.50]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1 sched: [4:0.33]
 ; SKX-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm1 # sched: [10:0.50]
 ; SKX-NEXT:    vmulps %xmm0, %xmm1, %xmm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -630,10 +630,10 @@ define <4 x float> @v4f32_two_step2(<4 x
 ; FMA-RECIP-NEXT:    vrcpps %xmm0, %xmm1
 ; FMA-RECIP-NEXT:    vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-RECIP-NEXT:    vmovaps %xmm1, %xmm3
-; FMA-RECIP-NEXT:    vfnmadd213ps %xmm2, %xmm0, %xmm3
-; FMA-RECIP-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm3
-; FMA-RECIP-NEXT:    vfnmadd213ps %xmm2, %xmm3, %xmm0
-; FMA-RECIP-NEXT:    vfmadd132ps %xmm3, %xmm3, %xmm0
+; FMA-RECIP-NEXT:    vfnmadd213ps {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2
+; FMA-RECIP-NEXT:    vfmadd132ps {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1
+; FMA-RECIP-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2
+; FMA-RECIP-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3
 ; FMA-RECIP-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm0
 ; FMA-RECIP-NEXT:    retq
 ;
@@ -672,10 +672,10 @@ define <4 x float> @v4f32_two_step2(<4 x
 ; HASWELL-NEXT:    vrcpps %xmm0, %xmm1 # sched: [5:1.00]
 ; HASWELL-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [6:0.50]
 ; HASWELL-NEXT:    vmovaps %xmm1, %xmm3 # sched: [1:1.00]
-; HASWELL-NEXT:    vfnmadd213ps %xmm2, %xmm0, %xmm3 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm3 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd213ps %xmm2, %xmm3, %xmm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps %xmm3, %xmm3, %xmm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3 sched: [5:0.50]
 ; HASWELL-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [11:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
@@ -699,10 +699,10 @@ define <4 x float> @v4f32_two_step2(<4 x
 ; KNL-NEXT:    vrcpps %xmm0, %xmm1 # sched: [5:1.00]
 ; KNL-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [6:0.50]
 ; KNL-NEXT:    vmovaps %xmm1, %xmm3 # sched: [1:1.00]
-; KNL-NEXT:    vfnmadd213ps %xmm2, %xmm0, %xmm3 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm3 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd213ps %xmm2, %xmm3, %xmm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps %xmm3, %xmm3, %xmm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3 sched: [5:0.50]
 ; KNL-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [11:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
@@ -711,10 +711,10 @@ define <4 x float> @v4f32_two_step2(<4 x
 ; SKX-NEXT:    vrcpps %xmm0, %xmm1 # sched: [4:1.00]
 ; SKX-NEXT:    vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] sched: [6:0.50]
 ; SKX-NEXT:    vmovaps %xmm1, %xmm3 # sched: [1:0.33]
-; SKX-NEXT:    vfnmadd213ps %xmm2, %xmm0, %xmm3 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ps %xmm1, %xmm1, %xmm3 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd213ps %xmm2, %xmm3, %xmm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ps %xmm3, %xmm3, %xmm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3 sched: [4:0.33]
 ; SKX-NEXT:    vmulps {{.*}}(%rip), %xmm0, %xmm0 # sched: [10:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %div = fdiv fast <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %x
@@ -756,8 +756,8 @@ define <8 x float> @v8f32_one_step2(<8 x
 ; FMA-RECIP-LABEL: v8f32_one_step2:
 ; FMA-RECIP:       # %bb.0:
 ; FMA-RECIP-NEXT:    vrcpps %ymm0, %ymm1
-; FMA-RECIP-NEXT:    vfnmadd213ps {{.*}}(%rip), %ymm1, %ymm0
-; FMA-RECIP-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm0
+; FMA-RECIP-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem
+; FMA-RECIP-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1
 ; FMA-RECIP-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm0
 ; FMA-RECIP-NEXT:    retq
 ;
@@ -787,8 +787,8 @@ define <8 x float> @v8f32_one_step2(<8 x
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrcpps %ymm0, %ymm1 # sched: [11:2.00]
 ; HASWELL-NEXT:    vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
-; HASWELL-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1 sched: [5:0.50]
 ; HASWELL-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
@@ -807,16 +807,16 @@ define <8 x float> @v8f32_one_step2(<8 x
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    vrcpps %ymm0, %ymm1 # sched: [11:2.00]
 ; KNL-NEXT:    vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
-; KNL-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1 sched: [5:0.50]
 ; KNL-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
 ; SKX-LABEL: v8f32_one_step2:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    vrcpps %ymm0, %ymm1 # sched: [4:1.00]
-; SKX-NEXT:    vfnmadd213ps {{.*}}(%rip){1to8}, %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [11:0.50]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1 sched: [4:0.33]
 ; SKX-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x
@@ -861,8 +861,8 @@ define <8 x float> @v8f32_one_step_2_div
 ; FMA-RECIP-LABEL: v8f32_one_step_2_divs:
 ; FMA-RECIP:       # %bb.0:
 ; FMA-RECIP-NEXT:    vrcpps %ymm0, %ymm1
-; FMA-RECIP-NEXT:    vfnmadd213ps {{.*}}(%rip), %ymm1, %ymm0
-; FMA-RECIP-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm0
+; FMA-RECIP-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem
+; FMA-RECIP-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1
 ; FMA-RECIP-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm1
 ; FMA-RECIP-NEXT:    vmulps %ymm0, %ymm1, %ymm0
 ; FMA-RECIP-NEXT:    retq
@@ -895,8 +895,8 @@ define <8 x float> @v8f32_one_step_2_div
 ; HASWELL:       # %bb.0:
 ; HASWELL-NEXT:    vrcpps %ymm0, %ymm1 # sched: [11:2.00]
 ; HASWELL-NEXT:    vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
-; HASWELL-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1 sched: [5:0.50]
 ; HASWELL-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [12:0.50]
 ; HASWELL-NEXT:    vmulps %ymm0, %ymm1, %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
@@ -917,8 +917,8 @@ define <8 x float> @v8f32_one_step_2_div
 ; KNL:       # %bb.0:
 ; KNL-NEXT:    vrcpps %ymm0, %ymm1 # sched: [11:2.00]
 ; KNL-NEXT:    vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
-; KNL-NEXT:    vfnmadd213ps %ymm2, %ymm1, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1 sched: [5:0.50]
 ; KNL-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [12:0.50]
 ; KNL-NEXT:    vmulps %ymm0, %ymm1, %ymm0 # sched: [5:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
@@ -926,8 +926,8 @@ define <8 x float> @v8f32_one_step_2_div
 ; SKX-LABEL: v8f32_one_step_2_divs:
 ; SKX:       # %bb.0:
 ; SKX-NEXT:    vrcpps %ymm0, %ymm1 # sched: [4:1.00]
-; SKX-NEXT:    vfnmadd213ps {{.*}}(%rip){1to8}, %ymm1, %ymm0 # sched: [11:0.50]
-; SKX-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem sched: [11:0.50]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1 sched: [4:0.33]
 ; SKX-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm1 # sched: [11:0.50]
 ; SKX-NEXT:    vmulps %ymm0, %ymm1, %ymm0 # sched: [4:0.33]
 ; SKX-NEXT:    retq # sched: [7:1.00]
@@ -989,10 +989,10 @@ define <8 x float> @v8f32_two_step2(<8 x
 ; FMA-RECIP-NEXT:    vrcpps %ymm0, %ymm1
 ; FMA-RECIP-NEXT:    vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00]
 ; FMA-RECIP-NEXT:    vmovaps %ymm1, %ymm3
-; FMA-RECIP-NEXT:    vfnmadd213ps %ymm2, %ymm0, %ymm3
-; FMA-RECIP-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm3
-; FMA-RECIP-NEXT:    vfnmadd213ps %ymm2, %ymm3, %ymm0
-; FMA-RECIP-NEXT:    vfmadd132ps %ymm3, %ymm3, %ymm0
+; FMA-RECIP-NEXT:    vfnmadd213ps {{.*#+}} ymm3 = -(ymm0 * ymm3) + ymm2
+; FMA-RECIP-NEXT:    vfmadd132ps {{.*#+}} ymm3 = (ymm3 * ymm1) + ymm1
+; FMA-RECIP-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm3 * ymm0) + ymm2
+; FMA-RECIP-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm3) + ymm3
 ; FMA-RECIP-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm0
 ; FMA-RECIP-NEXT:    retq
 ;
@@ -1031,10 +1031,10 @@ define <8 x float> @v8f32_two_step2(<8 x
 ; HASWELL-NEXT:    vrcpps %ymm0, %ymm1 # sched: [11:2.00]
 ; HASWELL-NEXT:    vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
 ; HASWELL-NEXT:    vmovaps %ymm1, %ymm3 # sched: [1:1.00]
-; HASWELL-NEXT:    vfnmadd213ps %ymm2, %ymm0, %ymm3 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm3 # sched: [5:0.50]
-; HASWELL-NEXT:    vfnmadd213ps %ymm2, %ymm3, %ymm0 # sched: [5:0.50]
-; HASWELL-NEXT:    vfmadd132ps %ymm3, %ymm3, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} ymm3 = -(ymm0 * ymm3) + ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} ymm3 = (ymm3 * ymm1) + ymm1 sched: [5:0.50]
+; HASWELL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm3 * ymm0) + ymm2 sched: [5:0.50]
+; HASWELL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm3) + ymm3 sched: [5:0.50]
 ; HASWELL-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:0.50]
 ; HASWELL-NEXT:    retq # sched: [7:1.00]
 ;
@@ -1058,10 +1058,10 @@ define <8 x float> @v8f32_two_step2(<8 x
 ; KNL-NEXT:    vrcpps %ymm0, %ymm1 # sched: [11:2.00]
 ; KNL-NEXT:    vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
 ; KNL-NEXT:    vmovaps %ymm1, %ymm3 # sched: [1:1.00]
-; KNL-NEXT:    vfnmadd213ps %ymm2, %ymm0, %ymm3 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm3 # sched: [5:0.50]
-; KNL-NEXT:    vfnmadd213ps %ymm2, %ymm3, %ymm0 # sched: [5:0.50]
-; KNL-NEXT:    vfmadd132ps %ymm3, %ymm3, %ymm0 # sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} ymm3 = -(ymm0 * ymm3) + ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} ymm3 = (ymm3 * ymm1) + ymm1 sched: [5:0.50]
+; KNL-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm3 * ymm0) + ymm2 sched: [5:0.50]
+; KNL-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm3) + ymm3 sched: [5:0.50]
 ; KNL-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:0.50]
 ; KNL-NEXT:    retq # sched: [7:1.00]
 ;
@@ -1070,10 +1070,10 @@ define <8 x float> @v8f32_two_step2(<8 x
 ; SKX-NEXT:    vrcpps %ymm0, %ymm1 # sched: [4:1.00]
 ; SKX-NEXT:    vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50]
 ; SKX-NEXT:    vmovaps %ymm1, %ymm3 # sched: [1:0.33]
-; SKX-NEXT:    vfnmadd213ps %ymm2, %ymm0, %ymm3 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ps %ymm1, %ymm1, %ymm3 # sched: [4:0.33]
-; SKX-NEXT:    vfnmadd213ps %ymm2, %ymm3, %ymm0 # sched: [4:0.33]
-; SKX-NEXT:    vfmadd132ps %ymm3, %ymm3, %ymm0 # sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} ymm3 = -(ymm0 * ymm3) + ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} ymm3 = (ymm3 * ymm1) + ymm1 sched: [4:0.33]
+; SKX-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm3 * ymm0) + ymm2 sched: [4:0.33]
+; SKX-NEXT:    vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm3) + ymm3 sched: [4:0.33]
 ; SKX-NEXT:    vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [11:0.50]
 ; SKX-NEXT:    retq # sched: [7:1.00]
   %div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x




More information about the llvm-commits mailing list