[llvm] r312237 - AMD family 17h (znver1) scheduler model update.

Ashutosh Nema via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 31 05:38:35 PDT 2017


Author: ashutosh
Date: Thu Aug 31 05:38:35 2017
New Revision: 312237

URL: http://llvm.org/viewvc/llvm-project?rev=312237&view=rev
Log:
AMD family 17h (znver1) scheduler model update.

Summary:
This patch enables the following:
1) Regex based Instruction itineraries for integer instructions.
2) The instructions are grouped as per the nature of the instructions
   (move, arithmetic, logic, Misc, Control Transfer). 
3) FP instructions and their itineraries are added which includes values
   for SSE4A, BMI, BMI2 and SHA instructions.

Patch by Ganesh Gopalasubramanian

Reviewers: RKSimon, craig.topper

Subscribers: vprasad, shivaram, ddibyend, andreadb, javed.absar, llvm-commits

Differential Revision: https://reviews.llvm.org/D36617


Modified:
    llvm/trunk/lib/Target/X86/X86ScheduleZnver1.td
    llvm/trunk/test/CodeGen/X86/aes-schedule.ll
    llvm/trunk/test/CodeGen/X86/avx-schedule.ll
    llvm/trunk/test/CodeGen/X86/avx2-schedule.ll
    llvm/trunk/test/CodeGen/X86/bmi-schedule.ll
    llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll
    llvm/trunk/test/CodeGen/X86/f16c-schedule.ll
    llvm/trunk/test/CodeGen/X86/lea32-schedule.ll
    llvm/trunk/test/CodeGen/X86/lea64-schedule.ll
    llvm/trunk/test/CodeGen/X86/lzcnt-schedule.ll
    llvm/trunk/test/CodeGen/X86/movbe-schedule.ll
    llvm/trunk/test/CodeGen/X86/popcnt-schedule.ll
    llvm/trunk/test/CodeGen/X86/sha-schedule.ll
    llvm/trunk/test/CodeGen/X86/sse-schedule.ll
    llvm/trunk/test/CodeGen/X86/sse2-schedule.ll
    llvm/trunk/test/CodeGen/X86/sse3-schedule.ll
    llvm/trunk/test/CodeGen/X86/sse41-schedule.ll
    llvm/trunk/test/CodeGen/X86/sse42-schedule.ll
    llvm/trunk/test/CodeGen/X86/sse4a-schedule.ll
    llvm/trunk/test/CodeGen/X86/ssse3-schedule.ll

Modified: llvm/trunk/lib/Target/X86/X86ScheduleZnver1.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ScheduleZnver1.td?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ScheduleZnver1.td (original)
+++ llvm/trunk/lib/Target/X86/X86ScheduleZnver1.td Thu Aug 31 05:38:35 2017
@@ -92,7 +92,7 @@ def ZnDivider : ProcResource<1>;
 def : ReadAdvance<ReadAfterLd, 4>;
 
 // (a folded load is an instruction that loads and does some operation)
-// Ex: ADDPD xmm,[mem]-> This instruction has two micro-ops 
+// Ex: ADDPD xmm,[mem]-> This instruction has two micro-ops
 // Instructions with folded loads are usually micro-fused, so they only appear
 // as two micro-ops.
 //      a. load and
@@ -104,9 +104,10 @@ multiclass ZnWriteResPair<X86FoldableSch
   // Register variant takes 1-cycle on Execution Port.
   def : WriteRes<SchedRW, [ExePort]> { let Latency = Lat; }
 
-  // Memory variant also uses a cycle on ZnAGU 
+  // Memory variant also uses a cycle on ZnAGU
   // adds 4 cycles to the latency.
   def : WriteRes<SchedRW.Folded, [ZnAGU, ExePort]> {
+     let NumMicroOps = 2;
      let Latency = !add(Lat, 4);
   }
 }
@@ -125,7 +126,7 @@ multiclass ZnWriteResFpuPair<X86Foldable
   }
 }
 
-// WriteRMW is set for instructions with Memory write 
+// WriteRMW is set for instructions with Memory write
 // operation in codegen
 def : WriteRes<WriteRMW, [ZnAGU]>;
 
@@ -220,4 +221,1550 @@ let Latency = 100 in {
   def : WriteRes<WritePCmpIStrI, []>;
   def : WriteRes<WritePCmpIStrILd, []>;
   }
+
+//=== Regex based itineraries ===//
+// Notation:
+// - r: register.
+// - m = memory.
+// - i = immediate
+// - mm: 64 bit mmx register.
+// - x = 128 bit xmm register.
+// - (x)mm = mmx or xmm register.
+// - y = 256 bit ymm register.
+// - v = any vector register.
+
+//=== Integer Instructions ===//
+//-- Move instructions --//
+// MOV.
+// r16,m.
+def : InstRW<[WriteALULd, ReadAfterLd], (instregex "MOV16rm")>;
+
+// MOVSX, MOVZX.
+// r,m.
+def : InstRW<[WriteLoad], (instregex "MOV(S|Z)X32rm(8|16)")>;
+
+// CMOVcc.
+// r,r.
+def : InstRW<[WriteALU],
+      (instregex "CMOV(O|NO|B|AE|E|NE|BE|A|S|NS|P|NP|L|GE|LE|G)(16|32|64)rr")>;
+// r,m.
+def : InstRW<[WriteALULd, ReadAfterLd],
+      (instregex "CMOV(O|NO|B|AE|E|NE|BE|A|S|NS|P|NP|L|GE|LE|G)(16|32|64)rm")>;
+
+// XCHG.
+// r,r.
+def ZnWriteXCHG : SchedWriteRes<[ZnALU]> {
+  let NumMicroOps = 2;
+  let ResourceCycles = [2];
+}
+
+def : InstRW<[ZnWriteXCHG], (instregex "XCHG(8|16|32|64)rr", "XCHG(16|32|64)ar")>;
+
+// r,m.
+def ZnWriteXCHGrm : SchedWriteRes<[ZnAGU, ZnALU]> {
+  let Latency = 5;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteXCHGrm, ReadAfterLd], (instregex "XCHG(8|16|32|64)rm")>;
+
+def : InstRW<[WriteMicrocoded], (instregex "XLAT")>;
+
+// POP16.
+// r.
+def ZnWritePop16r : SchedWriteRes<[ZnAGU]>{
+  let Latency = 5;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWritePop16r], (instregex "POP16rmm")>;
+def : InstRW<[WriteMicrocoded], (instregex "POPF(16|32)")>;
+def : InstRW<[WriteMicrocoded], (instregex "POPA(16|32)")>;
+
+
+// PUSH.
+// r. Has default values.
+// m.
+def ZnWritePUSH : SchedWriteRes<[ZnAGU]>{
+  let Latency = 4;
+}
+def : InstRW<[ZnWritePUSH], (instregex "PUSH(16|32)rmm")>;
+
+//PUSHF
+def : InstRW<[WriteMicrocoded], (instregex "PUSHF(16|32)")>;
+
+// PUSHA.
+def ZnWritePushA : SchedWriteRes<[ZnAGU]> {
+  let Latency = 8;
+}
+def : InstRW<[ZnWritePushA], (instregex "PUSHA(16|32)")>;
+
+//LAHF
+def : InstRW<[WriteMicrocoded], (instregex "LAHF")>;
+
+// SAHF.
+def ZnWriteSAHF : SchedWriteRes<[ZnALU]> {
+  let Latency = 2;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteSAHF], (instregex "SAHF")>;
+
+// BSWAP.
+def ZnWriteBSwap : SchedWriteRes<[ZnALU]> {
+  let ResourceCycles = [4];
+}
+def : InstRW<[ZnWriteBSwap], (instregex "BSWAP")>;
+
+// MOVBE.
+// r,m.
+def ZnWriteMOVBE : SchedWriteRes<[ZnAGU, ZnALU]> {
+  let Latency = 5;
+}
+def : InstRW<[ZnWriteMOVBE, ReadAfterLd], (instregex "MOVBE(16|32|64)rm")>;
+
+// m16,r16.
+def : InstRW<[ZnWriteMOVBE], (instregex "MOVBE(16|32|64)mr")>;
+
+//-- Arithmetic instructions --//
+
+// ADD SUB.
+// m,r/i.
+def : InstRW<[WriteALULd], (instregex "(ADD|SUB)(8|16|32|64)m(r|i)",
+                          "(ADD|SUB)(8|16|32|64)mi8",
+                          "(ADD|SUB)64mi32")>;
+
+// ADC SBB.
+// r,r/i.
+def : InstRW<[WriteALU], (instregex "(ADC|SBB)(8|16|32|64)r(r|i)",
+                          "(ADC|SBB)(16|32|64)ri8",
+                          "(ADC|SBB)64ri32",
+                          "(ADC|SBB)(8|16|32|64)rr_REV")>;
+
+// r,m.
+def : InstRW<[WriteALULd, ReadAfterLd],
+            (instregex "(ADC|SBB)(8|16|32|64)rm")>;
+
+// m,r/i.
+def : InstRW<[WriteALULd],
+             (instregex "(ADC|SBB)(8|16|32|64)m(r|i)",
+              "(ADC|SBB)(16|32|64)mi8",
+              "(ADC|SBB)64mi32")>;
+
+// INC DEC NOT NEG.
+// m.
+def : InstRW<[WriteALULd],
+             (instregex "(INC|DEC|NOT|NEG)(8|16|32|64)m",
+              "(INC|DEC)64(16|32)m")>;
+
+// MUL IMUL.
+// r16.
+def ZnWriteMul16 : SchedWriteRes<[ZnALU1, ZnMultiplier]> {
+  let Latency = 3;
+}
+def : InstRW<[ZnWriteMul16], (instregex "IMUL16r", "MUL16r")>;
+
+// m16.
+def ZnWriteMul16Ld : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> {
+  let Latency = 8;
+}
+def : InstRW<[ZnWriteMul16Ld, ReadAfterLd], (instregex "IMUL16m", "MUL16m")>;
+
+// r32.
+def ZnWriteMul32 : SchedWriteRes<[ZnALU1, ZnMultiplier]> {
+  let Latency = 3;
+}
+def : InstRW<[ZnWriteMul32], (instregex "IMUL32r", "MUL32r")>;
+
+// m32.
+def ZnWriteMul32Ld : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> {
+  let Latency = 8;
+}
+def : InstRW<[ZnWriteMul32Ld, ReadAfterLd], (instregex "IMUL32m", "MUL32m")>;
+
+// r64.
+def ZnWriteMul64 : SchedWriteRes<[ZnALU1, ZnMultiplier]> {
+  let Latency = 4;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteMul64], (instregex "IMUL64r", "MUL64r")>;
+
+// m64.
+def ZnWriteMul64Ld : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> {
+  let Latency = 9;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteMul64Ld, ReadAfterLd], (instregex "IMUL64m", "MUL64m")>;
+
+// r16,r16.
+def ZnWriteMul16rri : SchedWriteRes<[ZnALU1, ZnMultiplier]> {
+  let Latency = 3;
+}
+def : InstRW<[ZnWriteMul16rri], (instregex "IMUL16rri", "IMUL16rri8")>;
+
+// r16,m16.
+def ZnWriteMul16rmi : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> {
+  let Latency = 8;
+}
+def : InstRW<[ZnWriteMul16rmi, ReadAfterLd], (instregex "IMUL16rmi", "IMUL16rmi8")>;
+
+// MULX.
+// r32,r32,r32.
+def ZnWriteMulX32 : SchedWriteRes<[ZnALU1, ZnMultiplier]> {
+  let Latency = 3;
+  let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWriteMulX32], (instregex "MULX32rr")>;
+
+// r32,r32,m32.
+def ZnWriteMulX32Ld : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> {
+  let Latency = 8;
+  let ResourceCycles = [1, 2, 2];
+}
+def : InstRW<[ZnWriteMulX32Ld, ReadAfterLd], (instregex "MULX32rm")>;
+
+// r64,r64,r64.
+def ZnWriteMulX64 : SchedWriteRes<[ZnALU1]> {
+  let Latency = 3;
+}
+def : InstRW<[ZnWriteMulX64], (instregex "MULX64rr")>;
+
+// r64,r64,m64.
+def ZnWriteMulX64Ld : SchedWriteRes<[ZnAGU, ZnALU1, ZnMultiplier]> {
+  let Latency = 8;
+}
+def : InstRW<[ZnWriteMulX64Ld, ReadAfterLd], (instregex "MULX64rm")>;
+
+// DIV, IDIV.
+// r8.
+def ZnWriteDiv8 : SchedWriteRes<[ZnALU2, ZnDivider]> {
+  let Latency = 15;
+}
+def : InstRW<[ZnWriteDiv8], (instregex "DIV8r", "IDIV8r")>;
+
+// r16.
+def ZnWriteDiv16 : SchedWriteRes<[ZnALU2, ZnDivider]> {
+  let Latency = 17;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteDiv16], (instregex "DIV16r", "IDIV16r")>;
+
+// r32.
+def ZnWriteDiv32 : SchedWriteRes<[ZnALU2, ZnDivider]> {
+  let Latency = 25;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteDiv32], (instregex "DIV32r", "IDIV32r")>;
+
+// r64.
+def ZnWriteDiv64 : SchedWriteRes<[ZnALU2, ZnDivider]> {
+  let Latency = 41;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteDiv64], (instregex "DIV64r", "IDIV64r")>;
+
+//-- Control transfer instructions --//
+
+// J(E|R)CXZ.
+def ZnWriteJCXZ : SchedWriteRes<[ZnALU03]>;
+def : InstRW<[ZnWriteJCXZ], (instregex "JCXZ", "JECXZ_(32|64)", "JRCXZ")>;
+
+// INTO
+def : InstRW<[WriteMicrocoded], (instregex "INTO")>;
+
+// LOOP.
+def ZnWriteLOOP : SchedWriteRes<[ZnALU03]>;
+def : InstRW<[ZnWriteLOOP], (instregex "LOOP")>;
+
+// LOOP(N)E, LOOP(N)Z
+def ZnWriteLOOPE : SchedWriteRes<[ZnALU03]>;
+def : InstRW<[ZnWriteLOOPE], (instregex "LOOPE", "LOOPNE",
+                              "LOOPZ", "LOOPNZ")>;
+
+// CALL.
+// r.
+def ZnWriteCALLr : SchedWriteRes<[ZnAGU, ZnALU03]>;
+def : InstRW<[ZnWriteCALLr], (instregex "CALL(16|32)r")>;
+
+def : InstRW<[WriteMicrocoded], (instregex "CALL(16|32)m")>;
+
+// RET.
+def ZnWriteRET : SchedWriteRes<[ZnALU03]> {
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteRET], (instregex "RET(L|Q|W)", "LRET(L|Q|W)",
+                            "IRET(D|Q)", "RETF")>;
+
+//-- Logic instructions --//
+
+// AND OR XOR.
+// m,r/i.
+def : InstRW<[WriteALULd],
+             (instregex "(AND|OR|XOR)(8|16|32|64)m(r|i)",
+              "(AND|OR|XOR)(8|16|32|64)mi8", "(AND|OR|XOR)64mi32")>;
+
+// ANDN.
+// r,r.
+def : InstRW<[WriteALU], (instregex "ANDN(32|64)rr")>;
+// r,m.
+def : InstRW<[WriteALULd, ReadAfterLd], (instregex "ANDN(32|64)rm")>;
+
+// Define ALU latency variants
+def ZnWriteALULat2 : SchedWriteRes<[ZnALU]> {
+  let Latency = 2;
+}
+def ZnWriteALULat2Ld : SchedWriteRes<[ZnAGU, ZnALU]> {
+  let Latency = 6;
+}
+
+def ZnWriteALULat3 : SchedWriteRes<[ZnALU]> {
+  let Latency = 3;
+}
+def ZnWriteALULat3Ld : SchedWriteRes<[ZnAGU, ZnALU]> {
+  let Latency = 7;
+}
+
+// BSF BSR.
+// r,r.
+def : InstRW<[ZnWriteALULat3], (instregex "BS(R|F)(16|32|64)rr")>;
+// r,m.
+def : InstRW<[ZnWriteALULat3Ld, ReadAfterLd], (instregex "BS(R|F)(16|32|64)rm")>;
+
+// BT.
+// r,r/i.
+def : InstRW<[WriteShift], (instregex "BT(16|32|64)r(r|i8)")>;
+
+def : InstRW<[WriteShiftLd], (instregex "BT(16|32|64)mr")>;
+def : InstRW<[WriteShiftLd], (instregex "BT(16|32|64)mi8")>;
+
+// BTR BTS BTC.
+// r,r,i.
+def ZnWriteBTRSC : SchedWriteRes<[ZnALU]> {
+  let Latency = 2;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteBTRSC], (instregex "BT(R|S|C)(16|32|64)r(r|i8)")>;
+
+
+// m,r,i.
+def ZnWriteBTRSCm : SchedWriteRes<[ZnAGU, ZnALU]> {
+  let Latency = 6;
+  let NumMicroOps = 2;
+}
+// m,r,i.
+def : InstRW<[ZnWriteBTRSCm], (instregex "BT(R|S|C)(16|32|64)m(r|i8)")>;
+
+// BLSI BLSMSK BLSR.
+// r,r.
+def : InstRW<[ZnWriteALULat2], (instregex "BLS(I|MSK|R)(32|64)rr")>;
+// r,m.
+def : InstRW<[ZnWriteALULat2Ld, ReadAfterLd], (instregex "BLS(I|MSK|R)(32|64)rm")>;
+
+// BEXTR.
+// r,r,r.
+def : InstRW<[WriteALU], (instregex "BEXTR(32|64)rr")>;
+// r,m,r.
+def : InstRW<[WriteALULd, ReadAfterLd], (instregex "BEXTR(32|64)rm")>;
+
+// BZHI.
+// r,r,r.
+def : InstRW<[WriteALU], (instregex "BZHI(32|64)rr")>;
+// r,m,r.
+def : InstRW<[WriteALULd, ReadAfterLd], (instregex "BZHI(32|64)rm")>;
+
+// CLD STD.
+def : InstRW<[WriteALU], (instregex "STD", "CLD")>;
+
+// PDEP PEXT.
+// r,r,r.
+def : InstRW<[WriteMicrocoded], (instregex "PDEP(32|64)rr", "PEXT(32|64)rr")>;
+// r,m,r.
+def : InstRW<[WriteMicrocoded], (instregex "PDEP(32|64)rm", "PEXT(32|64)rm")>;
+
+// ROR ROL.
+def : InstRW<[WriteShift], (instregex "RO(R|L)(8|16|32|64)r1")>;
+
+// RCR RCL.
+// r,1.
+def : InstRW<[WriteShift], (instregex "RC(R|L)(8|16|32|64)r1")>;
+
+// m,1.
+def : InstRW<[WriteMicrocoded], (instregex "RC(R|L)(8|16|32|64)m1")>;
+
+// i.
+def : InstRW<[WriteShift], (instregex "RC(R|L)(8|16|32|64)r(i|CL)")>;
+
+// m,i.
+def : InstRW<[WriteMicrocoded], (instregex "RC(R|L)(8|16|32|64)m(i|CL)")>;
+
+// SHR SHL SAR.
+// m,i.
+def : InstRW<[WriteShiftLd], (instregex "S(A|H)(R|L)(8|16|32|64)m(i|1)")>;
+
+// SHRD SHLD.
+// r,r
+def : InstRW<[WriteShift], (instregex "SH(R|L)D(16|32|64)rri8")>;
+
+// m,r
+def : InstRW<[WriteShiftLd], (instregex "SH(R|L)D(16|32|64)mri8")>;
+
+// r,r,cl.
+def : InstRW<[WriteMicrocoded], (instregex "SHLD(16|32|64)rrCL")>;
+
+// r,r,cl.
+def : InstRW<[WriteMicrocoded], (instregex "SHRD(16|32|64)rrCL")>;
+
+// m,r,cl.
+def : InstRW<[WriteMicrocoded], (instregex "SH(R|L)D(16|32|64)mrCL")>;
+
+// SETcc.
+// r.
+def : InstRW<[WriteShift],
+             (instregex "SET(O|NO|B|AE|E|NE|BE|A|S|NS|P|NP|L|GE|LE|G)r")>;
+// m.
+def : InstRW<[WriteShift],
+             (instregex "SET(O|NO|B|AE|E|NE|BE|A|S|NS|P|NP|L|GE|LE|G)m")>;
+
+// LZCNT TZCNT.
+// r,r.
+def : InstRW<[ZnWriteALULat2], (instregex "(LZCNT|TZCNT)(16|32|64)rr")>;
+// r,m.
+def : InstRW<[ZnWriteALULat2Ld, ReadAfterLd], (instregex "(LZCNT|TZCNT)(16|32|64)rm")>;
+
+//-- Misc instructions --//
+// CMPXCHG.
+def ZnWriteCMPXCHG : SchedWriteRes<[ZnAGU, ZnALU]> {
+  let Latency = 8;
+  let NumMicroOps = 5;
+}
+def : InstRW<[ZnWriteCMPXCHG], (instregex "CMPXCHG(8|16|32|64)rm")>;
+
+// CMPXCHG8B.
+def ZnWriteCMPXCHG8B : SchedWriteRes<[ZnAGU, ZnALU]> {
+  let NumMicroOps = 18;
+}
+def : InstRW<[ZnWriteCMPXCHG8B], (instregex "CMPXCHG8B")>;
+
+def : InstRW<[WriteMicrocoded], (instregex "CMPXCHG16B")>;
+
+// LEAVE
+def ZnWriteLEAVE : SchedWriteRes<[ZnALU, ZnAGU]> {
+  let Latency = 8;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteLEAVE], (instregex "LEAVE")>;
+
+// PAUSE.
+def : InstRW<[WriteMicrocoded], (instregex "PAUSE")>;
+
+// RDTSC.
+def : InstRW<[WriteMicrocoded], (instregex "RDTSC")>;
+
+// RDPMC.
+def : InstRW<[WriteMicrocoded], (instregex "RDPMC")>;
+
+// RDRAND.
+def : InstRW<[WriteMicrocoded], (instregex "RDRAND(16|32|64)r")>;
+
+// XGETBV.
+def : InstRW<[WriteMicrocoded], (instregex "XGETBV")>;
+
+//-- String instructions --//
+// CMPS.
+def : InstRW<[WriteMicrocoded], (instregex "CMPS(B|L|Q|W)")>;
+
+// LODSB/W.
+def : InstRW<[WriteMicrocoded], (instregex "LODS(B|W)")>;
+
+// LODSD/Q.
+def : InstRW<[WriteMicrocoded], (instregex "LODS(L|Q)")>;
+
+// MOVS.
+def : InstRW<[WriteMicrocoded], (instregex "MOVS(B|L|Q|W)")>;
+
+// SCAS.
+def : InstRW<[WriteMicrocoded], (instregex "SCAS(B|W|L|Q)")>;
+
+// STOS
+def : InstRW<[WriteMicrocoded], (instregex "STOS(B|L|Q|W)")>;
+
+// XADD.
+def : InstRW<[WriteMicrocoded], (instregex "XADD(8|16|32|64)rm")>;
+
+//=== Floating Point x87 Instructions ===//
+//-- Move instructions --//
+
+def ZnWriteFLDr : SchedWriteRes<[ZnFPU13]> ;
+
+def ZnWriteSTr: SchedWriteRes<[ZnFPU23]> {
+  let Latency = 5;
+  let NumMicroOps = 2;
+}
+
+// LD_F.
+// r.
+def : InstRW<[ZnWriteFLDr], (instregex "LD_Frr")>;
+
+// m.
+def ZnWriteLD_F80m : SchedWriteRes<[ZnAGU, ZnFPU13]> {
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteLD_F80m], (instregex "LD_F80m")>;
+
+// FBLD.
+def : InstRW<[WriteMicrocoded], (instregex "FBLDm")>;
+
+// FST(P).
+// r.
+def : InstRW<[ZnWriteSTr], (instregex "ST_(F|FP)rr")>;
+
+// m80.
+def ZnWriteST_FP80m : SchedWriteRes<[ZnAGU, ZnFPU23]> {
+  let Latency = 5;
+}
+def : InstRW<[ZnWriteST_FP80m], (instregex "ST_FP80m")>;
+
+// FBSTP.
+// m80.
+def : InstRW<[WriteMicrocoded], (instregex "FBSTPm")>;
+
+def ZnWriteFXCH : SchedWriteRes<[ZnFPU]>;
+
+// FXCHG.
+def : InstRW<[ZnWriteFXCH], (instregex "XCH_F")>;
+
+// FILD.
+def ZnWriteFILD : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+  let Latency = 11;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteFILD], (instregex "ILD_F(16|32|64)m")>;
+
+// FIST(P) FISTTP.
+def ZnWriteFIST : SchedWriteRes<[ZnAGU, ZnFPU23]> {
+  let Latency = 12;
+}
+def : InstRW<[ZnWriteFIST], (instregex "IST_(F|FP)(16|32)m")>;
+
+def ZnWriteFPU13 : SchedWriteRes<[ZnAGU, ZnFPU13]> {
+  let Latency = 8;
+}
+
+def ZnWriteFPU3 : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+  let Latency = 11;
+}
+
+// FLDZ.
+def : InstRW<[ZnWriteFPU13], (instregex "LD_F0")>;
+
+// FLD1.
+def : InstRW<[ZnWriteFPU3], (instregex "LD_F1")>;
+
+// FLDPI FLDL2E etc.
+def : InstRW<[ZnWriteFPU3], (instregex "FLDPI", "FLDL2(T|E)" "FLDL(G|N)2")>;
+
+def : InstRW<[WriteMicrocoded], (instregex "CMOV(B|BE|P|NB|NBE|NE|NP)_F")>;
+
+// FNSTSW.
+// AX.
+def : InstRW<[WriteMicrocoded], (instregex "FNSTSW16r")>;
+
+// m16.
+def : InstRW<[WriteMicrocoded], (instregex "FNSTSWm")>;
+
+// FLDCW.
+def : InstRW<[WriteMicrocoded], (instregex "FLDCW16m")>;
+
+// FNSTCW.
+def : InstRW<[WriteMicrocoded], (instregex "FNSTCW16m")>;
+
+// FINCSTP FDECSTP.
+def : InstRW<[ZnWriteFPU3], (instregex "FINCSTP", "FDECSTP")>;
+
+// FFREE.
+def : InstRW<[ZnWriteFPU3], (instregex "FFREE")>;
+
+// FNSAVE.
+def : InstRW<[WriteMicrocoded], (instregex "FSAVEm")>;
+
+// FRSTOR.
+def : InstRW<[WriteMicrocoded], (instregex "FRSTORm")>;
+
+//-- Arithmetic instructions --//
+
+def ZnWriteFPU3Lat2 : SchedWriteRes<[ZnFPU3]> {
+  let Latency = 2;
+}
+
+def ZnWriteFPU3Lat2Ld : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+  let Latency = 9;
+}
+
+def ZnWriteFPU3Lat1 : SchedWriteRes<[ZnFPU3]> ;
+
+def ZnWriteFPU0Lat1 : SchedWriteRes<[ZnFPU0]> ;
+
+def ZnWriteFPU0Lat1Ld : SchedWriteRes<[ZnAGU, ZnFPU0]> {
+  let Latency = 8;
+}
+
+// FABS.
+def : InstRW<[ZnWriteFPU3Lat2], (instregex "ABS_F")>;
+
+// FCHS.
+def : InstRW<[ZnWriteFPU3Lat1], (instregex "CHS_F")>;
+
+// FCOM(P) FUCOM(P).
+// r.
+def : InstRW<[ZnWriteFPU0Lat1], (instregex "COM_FST0r", "COMP_FST0r", "UCOM_Fr",
+                         "UCOM_FPr")>;
+// m.
+def : InstRW<[ZnWriteFPU0Lat1Ld], (instregex "FCOM(32|64)m", "FCOMP(32|64)m")>;
+
+// FCOMPP FUCOMPP.
+// r.
+def : InstRW<[ZnWriteFPU0Lat1], (instregex "FCOMPP", "UCOM_FPPr")>;
+
+def ZnWriteFPU02 : SchedWriteRes<[ZnAGU, ZnFPU02]>
+{
+  let Latency = 9;
+}
+
+// FCOMI(P) FUCOMI(P).
+// m.
+def : InstRW<[ZnWriteFPU02], (instregex "COM_FIr", "COM_FIPr", "UCOM_FIr",
+                           "UCOM_FIPr")>;
+
+def ZnWriteFPU03 : SchedWriteRes<[ZnAGU, ZnFPU03]>
+{
+  let Latency = 12;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1,3];
+}
+
+// FICOM(P).
+def : InstRW<[ZnWriteFPU03], (instregex "FICOM(16|32)m", "FICOMP(16|32)m")>;
+
+// FTST.
+def : InstRW<[ZnWriteFPU0Lat1], (instregex "TST_F")>;
+
+// FXAM.
+def : InstRW<[ZnWriteFPU3Lat1], (instregex "FXAM")>;
+
+// FPREM.
+def : InstRW<[WriteMicrocoded], (instregex "FPREM")>;
+
+// FPREM1.
+def : InstRW<[WriteMicrocoded], (instregex "FPREM1")>;
+
+// FRNDINT.
+def : InstRW<[WriteMicrocoded], (instregex "FRNDINT")>;
+
+// FSCALE.
+def : InstRW<[WriteMicrocoded], (instregex "FSCALE")>;
+
+// FXTRACT.
+def : InstRW<[WriteMicrocoded], (instregex "FXTRACT")>;
+
+// FNOP.
+def : InstRW<[ZnWriteFPU0Lat1], (instregex "FNOP")>;
+
+// WAIT.
+def : InstRW<[ZnWriteFPU0Lat1], (instregex "WAIT")>;
+
+// FNCLEX.
+def : InstRW<[WriteMicrocoded], (instregex "FNCLEX")>;
+
+// FNINIT.
+def : InstRW<[WriteMicrocoded], (instregex "FNINIT")>;
+
+//=== Integer MMX and XMM Instructions ===//
+//-- Move instructions --//
+
+// Moves from GPR to FPR incurs a penalty
+def ZnWriteFPU2 : SchedWriteRes<[ZnFPU2]> {
+  let Latency = 3;
+}
+
+// Move to ALU doesn't incur penalty
+def ZnWriteToALU2 : SchedWriteRes<[ZnFPU2]> {
+  let Latency = 2;
+}
+
+def ZnWriteFPU : SchedWriteRes<[ZnFPU]>;
+def ZnWriteFPUY : SchedWriteRes<[ZnFPU]> {
+  let NumMicroOps = 2;
+  let Latency=2;
+}
+
+// MOVD.
+// r32/64 <- (x)mm.
+def : InstRW<[ZnWriteToALU2], (instregex "MMX_MOVD64grr", "MMX_MOVD64from64rr",
+                         "VMOVPDI2DIrr", "MOVPDI2DIrr")>;
+
+// (x)mm <- r32/64.
+def : InstRW<[ZnWriteFPU2], (instregex "MMX_MOVD64rr", "MMX_MOVD64to64rr",
+                         "VMOVDI2PDIrr", "MOVDI2PDIrr")>;
+
+// MOVQ.
+// r64 <- (x)mm.
+def : InstRW<[ZnWriteToALU2], (instregex "VMOVPQIto64rr")>;
+
+// (x)mm <- r64.
+def : InstRW<[ZnWriteFPU2], (instregex "VMOV64toPQIrr", "VMOVZQI2PQIrr")>;
+
+// (x)mm <- (x)mm.
+def : InstRW<[ZnWriteFPU], (instregex "MMX_MOVQ64rr")>;
+
+// (V)MOVDQA/U.
+// x <- x.
+def : InstRW<[ZnWriteFPU], (instregex "MOVDQ(A|U)rr", "VMOVDQ(A|U)rr",
+                           "MOVDQ(A|U)rr_REV", "VMOVDQ(A|U)rr_REV")>;
+
+// y <- y.
+def : InstRW<[ZnWriteFPUY], (instregex "VMOVDQ(A|U)Yrr", "VMOVDQ(A|U)Yrr_REV")>;
+
+// MOVDQ2Q.
+def : InstRW<[ZnWriteFPU], (instregex "MMX_MOVDQ2Qrr")>;
+
+// MOVQ2DQ.
+def : InstRW<[ZnWriteFPU], (instregex "MMX_MOVQ2DQrr")>;
+
+// PACKSSWB/DW.
+// mm <- mm.
+def ZnWriteFPU12 : SchedWriteRes<[ZnFPU12]> ;
+def ZnWriteFPU12Y : SchedWriteRes<[ZnFPU12]> {
+  let NumMicroOps = 2;
+}
+def ZnWriteFPU12m : SchedWriteRes<[ZnAGU, ZnFPU12]> ;
+
+def : InstRW<[ZnWriteFPU12], (instregex "MMX_PACKSSDWirr",
+                                  "MMX_PACKSSWBirr", "MMX_PACKUSWBirr")>;
+def : InstRW<[ZnWriteFPU12m], (instregex "MMX_PACKSSDWirm",
+                                  "MMX_PACKSSWBirm", "MMX_PACKUSWBirm")>;
+
+// VPMOVSX/ZX BW BD BQ DW DQ.
+// y <- x.
+def : InstRW<[ZnWriteFPU12Y], (instregex "VPMOV(SX|ZX)(BW|BQ|DW|DQ)Yrr")>;
+
+def ZnWriteFPU013 : SchedWriteRes<[ZnFPU013]> ;
+def ZnWriteFPU013Y : SchedWriteRes<[ZnFPU013]> {
+  let Latency = 2;
+}
+def ZnWriteFPU013m : SchedWriteRes<[ZnAGU, ZnFPU013]> {
+  let Latency = 8;
+  let NumMicroOps = 2;
+}
+def ZnWriteFPU013Ld : SchedWriteRes<[ZnAGU, ZnFPU013]> {
+  let Latency = 8;
+  let NumMicroOps = 2;
+}
+def ZnWriteFPU013LdY : SchedWriteRes<[ZnAGU, ZnFPU013]> {
+  let Latency = 9;
+  let NumMicroOps = 2;
+}
+
+// PBLENDW.
+// x,x,i / v,v,v,i
+def : InstRW<[ZnWriteFPU013], (instregex "(V?)PBLENDWrri")>;
+// ymm
+def : InstRW<[ZnWriteFPU013Y], (instregex "(V?)PBLENDWYrri")>;
+
+// x,m,i / v,v,m,i
+def : InstRW<[ZnWriteFPU013Ld], (instregex "(V?)PBLENDWrmi")>;
+// y,m,i
+def : InstRW<[ZnWriteFPU013LdY], (instregex "(V?)PBLENDWYrmi")>;
+
+def ZnWriteFPU01 : SchedWriteRes<[ZnFPU01]> ;
+def ZnWriteFPU01Y : SchedWriteRes<[ZnFPU01]> {
+  let NumMicroOps = 2;
+}
+
+// VPBLENDD.
+// v,v,v,i.
+def : InstRW<[ZnWriteFPU01], (instregex "VPBLENDDrri")>;
+// ymm
+def : InstRW<[ZnWriteFPU01Y], (instregex "VPBLENDDYrri")>;
+
+// v,v,m,i
+def ZnWriteFPU01Op2 : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+  let NumMicroOps = 2;
+  let Latency = 8;
+  let ResourceCycles = [1, 2];
+}
+def ZnWriteFPU01Op2Y : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+  let NumMicroOps = 2;
+  let Latency = 9;
+  let ResourceCycles = [1, 3];
+}
+def : InstRW<[ZnWriteFPU01Op2], (instregex "VPBLENDDrmi")>;
+def : InstRW<[ZnWriteFPU01Op2Y], (instregex "VPBLENDDYrmi")>;
+
+// MASKMOVQ.
+def : InstRW<[WriteMicrocoded], (instregex "MMX_MASKMOVQ(64)?")>;
+
+// MASKMOVDQU.
+def : InstRW<[WriteMicrocoded], (instregex "(V?)MASKMOVDQU(64)?")>;
+
+// VPMASKMOVQ.
+// ymm
+def : InstRW<[ZnWriteFPU01Op2],(instregex "VPMASKMOVQrm")>;
+def : InstRW<[ZnWriteFPU01Op2Y],(instregex "VPMASKMOVQYrm")>;
+
+def : InstRW<[WriteMicrocoded],
+                               (instregex "VPMASKMOVD(Y?)rm")>;
+// m, v,v.
+def : InstRW<[WriteMicrocoded], (instregex "VPMASKMOV(D|Q)(Y?)mr")>;
+
+// PMOVMSKB.
+def ZnWritePMOVMSKB : SchedWriteRes<[ZnFPU2]> {
+  let NumMicroOps = 2;
+}
+def ZnWritePMOVMSKBY : SchedWriteRes<[ZnFPU2]> {
+  let Latency = 2;
+}
+def : InstRW<[ZnWritePMOVMSKB], (instregex "(V|MMX_)?PMOVMSKBrr")>;
+def : InstRW<[ZnWritePMOVMSKBY], (instregex "(V|MMX_)?PMOVMSKBYrr")>;
+
+// PEXTR B/W/D/Q.
+// r32,x,i.
+def ZnWritePEXTRr : SchedWriteRes<[ZnFPU12, ZnFPU2]> {
+  let Latency = 2;
+  let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWritePEXTRr], (instregex "PEXTR(B|W|D|Q)rr", "MMX_PEXTRWirri")>;
+
+def ZnWritePEXTRm : SchedWriteRes<[ZnAGU, ZnFPU12, ZnFPU2]> {
+  let Latency = 5;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1, 2, 3];
+}
+// m8,x,i.
+def : InstRW<[ZnWritePEXTRm], (instregex "PEXTR(B|W|D|Q)mr")>;
+
+// VPBROADCAST B/W.
+// x, m8/16.
+def ZnWriteVPBROADCAST128Ld : SchedWriteRes<[ZnAGU, ZnFPU12]> {
+  let Latency = 8;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWriteVPBROADCAST128Ld],
+                                     (instregex "VPBROADCAST(B|W)rm")>;
+
+// y, m8/16
+def ZnWriteVPBROADCAST256Ld : SchedWriteRes<[ZnAGU, ZnFPU1]> {
+  let Latency = 8;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWriteVPBROADCAST256Ld],
+                                     (instregex "VPBROADCAST(B|W)Yrm")>;
+
+// VPGATHER.
+def : InstRW<[WriteMicrocoded], (instregex "VPGATHER(Q|D)(Q|D)(Y?)rm")>;
+
+//-- Arithmetic instructions --//
+
+// HADD, HSUB PS/PD
+// PHADD|PHSUB (S) W/D.
+def : InstRW<[WriteMicrocoded], (instregex "MMX_PHADD(W?)r(r|m)64",
+                               "MMX_PHADDSWr(r|m)64",
+                               "MMX_PHSUB(W|D)r(r|m)64",
+                               "MMX_PHSUBSWrr64",
+                               "(V?)PH(ADD|SUB)(W|D)(Y?)r(r|m)",
+                               "(V?)PH(ADD|SUB)SWr(r|m)(256)?")>;
+
+
+// PCMPGTQ.
+def ZnWritePCMPGTQr : SchedWriteRes<[ZnFPU03]>;
+def : InstRW<[ZnWritePCMPGTQr], (instregex "(V?)PCMPGTQ(Y?)rr")>;
+
+// x <- x,m.
+def ZnWritePCMPGTQm : SchedWriteRes<[ZnAGU, ZnFPU03]> {
+  let Latency = 8;
+}
+// ymm.
+def ZnWritePCMPGTQYm : SchedWriteRes<[ZnAGU, ZnFPU03]> {
+  let Latency = 8;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1,2];
+}
+def : InstRW<[ZnWritePCMPGTQm], (instregex "(V?)PCMPGTQrm")>;
+def : InstRW<[ZnWritePCMPGTQYm], (instregex "(V?)PCMPGTQYrm")>;
+
+// PMULLD.
+// x,x.
+def ZnWritePMULLDr : SchedWriteRes<[ZnFPU0]> {
+  let Latency = 4;
+}
+// ymm.
+def ZnWritePMULLDYr : SchedWriteRes<[ZnFPU0]> {
+  let Latency = 5;
+  let ResourceCycles = [2];
+}
+def : InstRW<[ZnWritePMULLDr], (instregex "(V?)PMULLDrr")>;
+def : InstRW<[ZnWritePMULLDYr], (instregex "(V?)PMULLDYrr")>;
+
+// x,m.
+def ZnWritePMULLDm : SchedWriteRes<[ZnAGU, ZnFPU0]> {
+  let Latency = 11;
+  let NumMicroOps = 2;
 }
+// y,m.
+def ZnWritePMULLDYm : SchedWriteRes<[ZnAGU, ZnFPU0]> {
+  let Latency = 12;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWritePMULLDm], (instregex "(V?)PMULLDrm")>;
+def : InstRW<[ZnWritePMULLDYm], (instregex "(V?)PMULLDYrm")>;
+
+//-- Logic instructions --//
+
+// PTEST.
+// v,v.
+def ZnWritePTESTr : SchedWriteRes<[ZnFPU12]> {
+  let ResourceCycles = [2];
+}
+def : InstRW<[ZnWritePTESTr], (instregex "(V?)PTEST(Y?)rr")>;
+
+// v,m.
+def ZnWritePTESTm : SchedWriteRes<[ZnAGU, ZnFPU12]> {
+  let Latency = 8;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWritePTESTm], (instregex "(V?)PTEST(Y?)rm")>;
+
+// PSLL,PSRL,PSRA W/D/Q.
+// x,x / v,v,x.
+def ZnWritePShift  : SchedWriteRes<[ZnFPU2]> ;
+def ZnWritePShiftY : SchedWriteRes<[ZnFPU2]> {
+  let Latency = 2;
+}
+def ZnWritePShiftLd  : SchedWriteRes<[ZnAGU,ZnFPU2]> {
+  let Latency = 8;
+}
+def ZnWritePShiftYLd : SchedWriteRes<[ZnAGU, ZnFPU2]> {
+  let Latency = 9;
+}
+def : InstRW<[ZnWritePShift], (instregex "(V?)PS(LL|RL|RA)(W|D|Q)rr")>;
+def : InstRW<[ZnWritePShiftY], (instregex "(V?)PS(LL|RL|RA)(W|D|Q)Yrr")>;
+
+def : InstRW<[ZnWritePShiftLd], (instregex "(V?)PS(LL|RL|RA)(W|D|Q)rm")>;
+def : InstRW<[ZnWritePShiftYLd], (instregex "(V?)PS(LL|RL|RA)(W|D|Q)Yrm")>;
+
+// PSLL,PSRL DQ.
+def : InstRW<[ZnWritePShift], (instregex "(V?)PS(R|L)LDQri")>;
+def : InstRW<[ZnWritePShiftY], (instregex "(V?)PS(R|L)LDQYri")>;
+
+//=== Floating Point XMM and YMM Instructions ===//
+//-- Move instructions --//
+
+// MOVMSKP S/D.
+// r32 <- x,y.
+def ZnWriteMOVMSKPr : SchedWriteRes<[ZnFPU2]> ;
+def : InstRW<[ZnWriteMOVMSKPr], (instregex "(V?)MOVMSKP(S|D)(Y?)rr")>;
+
+// VPERM2F128.
+def : InstRW<[WriteMicrocoded], (instregex "VPERM2F128rr")>;
+def : InstRW<[WriteMicrocoded], (instregex "VPERM2F128rm")>;
+
+// BLENDVP S/D.
+def ZnWriteFPU01Lat3 : SchedWriteRes<[ZnFPU013]> {
+  let Latency = 3;
+}
+def ZnWriteFPU01Lat3Ld : SchedWriteRes<[ZnAGU, ZnFPU013]> {
+  let Latency = 11;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWriteFPU01Lat3], (instregex "BLENDVP(S|D)rr0")>;
+def : InstRW<[ZnWriteFPU01Lat3Ld, ReadAfterLd], (instregex "BLENDVP(S|D)rm0")>;
+
+def ZnWriteBROADCAST : SchedWriteRes<[ZnAGU, ZnFPU13]> {
+  let NumMicroOps = 2;
+  let Latency = 8;
+}
+// VBROADCASTF128.
+def : InstRW<[ZnWriteBROADCAST], (instregex "VBROADCASTF128")>;
+
+// EXTRACTPS.
+// r32,x,i.
+def ZnWriteEXTRACTPSr : SchedWriteRes<[ZnFPU12, ZnFPU2]> {
+  let Latency = 2;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWriteEXTRACTPSr], (instregex "(V?)EXTRACTPSrr")>;
+
+def ZnWriteEXTRACTPSm : SchedWriteRes<[ZnAGU,ZnFPU12, ZnFPU2]> {
+  let Latency = 5;
+  let NumMicroOps = 2;
+  let ResourceCycles = [5, 1, 2];
+}
+// m32,x,i.
+def : InstRW<[ZnWriteEXTRACTPSm], (instregex "(V?)EXTRACTPSmr")>;
+
+// VEXTRACTF128.
+// x,y,i.
+def : InstRW<[ZnWriteFPU013], (instregex "VEXTRACTF128rr")>;
+
+// m128,y,i.
+def : InstRW<[ZnWriteFPU013m], (instregex "VEXTRACTF128mr")>;
+
+def ZnWriteVINSERT128r: SchedWriteRes<[ZnFPU013]> {
+  let Latency = 2;
+  let ResourceCycles = [2];
+}
+def ZnWriteVINSERT128Ld: SchedWriteRes<[ZnAGU,ZnFPU013]> {
+  let Latency = 9;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1, 2];
+}
+// VINSERTF128.
+// y,y,x,i.
+def : InstRW<[ZnWriteVINSERT128r], (instregex "VINSERTF128rr")>;
+def : InstRW<[ZnWriteVINSERT128Ld], (instregex "VINSERTF128rm")>;
+
+// VMASKMOVP S/D.
+// x,x,m.
+def ZnWriteVMASKMOVPLd : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+  let Latency = 8;
+}
+// y,y,m.
+def ZnWriteVMASKMOVPLdY : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+  let Latency = 8;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1, 2];
+}
+def ZnWriteVMASKMOVPm : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+  let Latency = 4;
+}
+def : InstRW<[ZnWriteVMASKMOVPLd], (instregex "VMASKMOVP(S|D)rm")>;
+def : InstRW<[ZnWriteVMASKMOVPLdY], (instregex "VMASKMOVP(S|D)Yrm")>;
+def : InstRW<[ZnWriteVMASKMOVPm], (instregex "VMASKMOVP(S|D)mr")>;
+
+// m256,y,y.
+def ZnWriteVMASKMOVPYmr : SchedWriteRes<[ZnAGU,ZnFPU01]> {
+  let Latency = 5;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWriteVMASKMOVPYmr], (instregex "VMASKMOVP(S|D)Ymr")>;
+
+// VGATHERDPS.
+// x.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERDPSrm")>;
+// y.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERDPSYrm")>;
+
+// VGATHERQPS.
+// x.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERQPSrm")>;
+
+// y.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERQPSYrm")>;
+
+// VGATHERDPD.
+// x.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERDPDrm")>;
+
+// y.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERDPDYrm")>;
+
+// VGATHERQPD.
+// x.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERQPDrm")>;
+
+// y.
+def : InstRW<[WriteMicrocoded], (instregex "VGATHERQPDYrm")>;
+
+//-- Conversion instructions --//
+def ZnWriteCVTPD2PSr: SchedWriteRes<[ZnFPU3]> {
+  let Latency = 4;
+}
+// CVTPD2PS.
+// x,x.
+def : InstRW<[ZnWriteCVTPD2PSr], (instregex "(V?)CVTPD2PSrr")>;
+
+def ZnWriteCVTPD2PSLd: SchedWriteRes<[ZnAGU,ZnFPU03]> {
+  let Latency = 11;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1,2];
+}
+// x,m128.
+def : InstRW<[ZnWriteCVTPD2PSLd], (instregex "(V?)CVTPD2PS(X?)rm")>;
+
+// x,y.
+def ZnWriteCVTPD2PSYr : SchedWriteRes<[ZnFPU3]> {
+  let Latency = 5;
+}
+def : InstRW<[ZnWriteCVTPD2PSYr], (instregex "(V?)CVTPD2PSYrr")>;
+
+// x,m256.
+def ZnWriteCVTPD2PSYLd : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+  let Latency = 11;
+}
+def : InstRW<[ZnWriteCVTPD2PSYLd], (instregex "(V?)CVTPD2PSYrm")>;
+
+// CVTSD2SS.
+// x,x.
+// Same as WriteCVTPD2PSr
+def : InstRW<[ZnWriteCVTPD2PSr], (instregex "(Int_)?(V)?CVTSD2SSrr")>;
+
+// x,m64.
+def : InstRW<[ZnWriteCVTPD2PSLd], (instregex "(Int_)?(V)?CVTSD2SSrm")>;
+
+// CVTPS2PD.
+// x,x.
+def ZnWriteCVTPS2PDr : SchedWriteRes<[ZnFPU3]> {
+  let Latency = 3;
+}
+def : InstRW<[ZnWriteCVTPS2PDr], (instregex "(V?)CVTPS2PDrr")>;
+
+// x,m64.
+// y,m128.
+def ZnWriteCVTPS2PDLd : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+  let Latency = 10;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteCVTPS2PDLd], (instregex "(V?)CVTPS2PD(Y?)rm")>;
+
+// y,x.
+def ZnWriteVCVTPS2PDY : SchedWriteRes<[ZnFPU3]> {
+  let Latency = 3;
+}
+def : InstRW<[ZnWriteVCVTPS2PDY], (instregex "VCVTPS2PDYrr")>;
+
+// CVTSS2SD.
+// x,x.
+def ZnWriteCVTSS2SDr : SchedWriteRes<[ZnFPU3]> {
+  let Latency = 4;
+}
+def : InstRW<[ZnWriteCVTSS2SDr], (instregex "(Int_)?(V?)CVTSS2SDrr")>;
+
+// x,m32.
+def ZnWriteCVTSS2SDLd : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+  let Latency = 11;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1, 2];
+}
+def : InstRW<[ZnWriteCVTSS2SDLd], (instregex "(Int_)?(V?)CVTSS2SDrm")>;
+
+def ZnWriteCVTDQ2PDr: SchedWriteRes<[ZnFPU12,ZnFPU3]> {
+  let Latency = 5;
+}
+// CVTDQ2PD.
+// x,x.
+def : InstRW<[ZnWriteCVTDQ2PDr], (instregex "(V)?CVTDQ2PDrr")>;
+
+// Same as xmm
+// y,x.
+def : InstRW<[ZnWriteCVTDQ2PDr], (instregex "VCVTDQ2PDYrr")>;
+
+def ZnWriteCVTPD2DQr: SchedWriteRes<[ZnFPU12, ZnFPU3]> {
+  let Latency = 5;
+}
+// CVT(T)PD2DQ.
+// x,x.
+def : InstRW<[ZnWriteCVTDQ2PDr], (instregex "(V?)CVT(T?)PD2DQrr")>;
+
+def ZnWriteCVTPD2DQLd: SchedWriteRes<[ZnAGU,ZnFPU12,ZnFPU3]> {
+  let Latency = 12;
+  let NumMicroOps = 2;
+}
+// x,m128.
+def : InstRW<[ZnWriteCVTPD2DQLd], (instregex "(V?)CVT(T?)PD2DQrm")>;
+// same as xmm handling
+// x,y.
+def : InstRW<[ZnWriteCVTPD2DQr], (instregex "VCVT(T?)PD2DQYrr")>;
+// x,m256.
+def : InstRW<[ZnWriteCVTPD2DQLd], (instregex "VCVT(T?)PD2DQYrm")>;
+def : InstRW<[ZnWriteCVTPD2DQLd], (instregex "VCVT(T?)PD2DQ(64)?rm")>;
+
+def ZnWriteCVTPS2PIr: SchedWriteRes<[ZnFPU3]> {
+  let Latency = 4;
+}
+// CVT(T)PS2PI.
+// mm,x.
+def : InstRW<[ZnWriteCVTPS2PIr], (instregex "MMX_CVT(T?)PS2PIirr")>;
+
+// CVTPI2PD.
+// x,mm.
+def : InstRW<[ZnWriteCVTPS2PDr], (instregex "MMX_CVT(T?)PI2PDirr")>;
+
+// CVT(T)PD2PI.
+// mm,x.
+def : InstRW<[ZnWriteCVTPS2PIr], (instregex "MMX_CVT(T?)PD2PIirr")>;
+
+def ZnWriteCVSTSI2SSr: SchedWriteRes<[ZnFPU3]> {
+  let Latency = 5;
+}
+// CVSTSI2SS.
+// x,r32.
+def : InstRW<[ZnWriteCVSTSI2SSr], (instregex "(Int_)?(V?)CVT(T?)SI2SS(64)?rr")>;
+
+// same as CVTPD2DQr
+// CVT(T)SS2SI.
+// r32,x.
+def : InstRW<[ZnWriteCVTPD2DQr], (instregex "(Int_)?(V?)CVT(T?)SS2SI(64)?rr")>;
+// same as CVTPD2DQm
+// r32,m32.
+def : InstRW<[ZnWriteCVTPD2DQLd], (instregex "(Int_)?(V?)CVT(T?)SS2SI(64)?rm")>;
+
+def ZnWriteCVSTSI2SDr: SchedWriteRes<[ZnFPU013, ZnFPU3]> {
+  let Latency = 5;
+}
+// CVTSI2SD.
+// x,r32/64.
+def : InstRW<[ZnWriteCVSTSI2SDr], (instregex "(Int_)?(V?)CVTSI2SS(64)?rr")>;
+
+
+def ZnWriteCVSTSI2SIr: SchedWriteRes<[ZnFPU3, ZnFPU2]> {
+  let Latency = 5;
+}
+def ZnWriteCVSTSI2SILd: SchedWriteRes<[ZnAGU, ZnFPU3, ZnFPU2]> {
+  let Latency = 12;
+}
+// CVTSD2SI.
+// r32/64
+def : InstRW<[ZnWriteCVSTSI2SIr], (instregex "(Int_)?CVT(T?)SD2SI(64)?rr")>;
+// r32,m32.
+def : InstRW<[ZnWriteCVSTSI2SILd], (instregex "(Int_)?CVT(T?)SD2SI(64)?rm")>;
+
+
+def ZnWriteVCVSTSI2SIr: SchedWriteRes<[ZnFPU3]> {
+  let Latency = 5;
+}
+def ZnWriteVCVSTSI2SILd: SchedWriteRes<[ZnFPU3, ZnAGU]> {
+  let Latency = 12;
+}
+// VCVTSD2SI.
+// r32/64
+def : InstRW<[ZnWriteCVSTSI2SIr], (instregex "(Int_)?VCVT(T?)SD2SI(64)?rr")>;
+// r32,m32.
+def : InstRW<[ZnWriteCVSTSI2SILd], (instregex "(Int_)?VCVT(T?)SD2SI(64)?rm")>;
+
+// VCVTPS2PH.
+// x,v,i.
+def : InstRW<[WriteMicrocoded], (instregex "VCVTPS2PH(Y?)rr")>;
+// m,v,i.
+def : InstRW<[WriteMicrocoded], (instregex "VCVTPS2PH(Y?)mr")>;
+
+// VCVTPH2PS.
+// v,x.
+def : InstRW<[WriteMicrocoded], (instregex "VCVTPH2PS(Y?)rr")>;
+// v,m.
+def : InstRW<[WriteMicrocoded], (instregex "VCVTPH2PS(Y?)rm")>;
+
+//-- SSE4A instructions --//
+// EXTRQ
+def ZnWriteEXTRQ: SchedWriteRes<[ZnFPU12, ZnFPU2]> {
+  let Latency = 2;
+}
+def : InstRW<[ZnWriteEXTRQ], (instregex "EXTRQ")>;
+
+// INSERTQ
+def ZnWriteINSERTQ: SchedWriteRes<[ZnFPU03,ZnFPU1]> {
+  let Latency = 4;
+}
+def : InstRW<[ZnWriteINSERTQ], (instregex "INSERTQ")>;
+
+// MOVNTSS/MOVNTSD
+def ZnWriteMOVNT: SchedWriteRes<[ZnAGU,ZnFPU2]> {
+  let Latency = 8;
+}
+def : InstRW<[ZnWriteMOVNT], (instregex "MOVNTS(S|D)")>;
+
+//-- SHA instructions --//
+// SHA256MSG2
+def : InstRW<[WriteMicrocoded], (instregex "SHA256MSG2(Y?)r(r|m)")>;
+
+// SHA1MSG1, SHA256MSG1
+// x,x.
+def ZnWriteSHA1MSG1r : SchedWriteRes<[ZnFPU12]> {
+  let Latency = 2;
+  let ResourceCycles = [2];
+}
+def : InstRW<[ZnWriteSHA1MSG1r], (instregex "SHA(1|256)MSG1rr")>;
+// x,m.
+def ZnWriteSHA1MSG1Ld : SchedWriteRes<[ZnAGU, ZnFPU12]> {
+  let Latency = 9;
+  let ResourceCycles = [1,2];
+}
+def : InstRW<[ZnWriteSHA1MSG1Ld], (instregex "SHA(1|256)MSG1rm")>;
+
+// SHA1MSG2
+// x,x.
+def ZnWriteSHA1MSG2r : SchedWriteRes<[ZnFPU12]> ;
+def : InstRW<[ZnWriteSHA1MSG2r], (instregex "SHA1MSG2rr")>;
+// x,m.
+def ZnWriteSHA1MSG2Ld : SchedWriteRes<[ZnAGU, ZnFPU12]> {
+  let Latency = 8;
+}
+def : InstRW<[ZnWriteSHA1MSG2Ld], (instregex "SHA1MSG2rm")>;
+
+// SHA1NEXTE
+// x,x.
+def ZnWriteSHA1NEXTEr : SchedWriteRes<[ZnFPU1]> ;
+def : InstRW<[ZnWriteSHA1NEXTEr], (instregex "SHA1NEXTErr")>;
+// x,m.
+def ZnWriteSHA1NEXTELd : SchedWriteRes<[ZnAGU, ZnFPU1]> {
+  let Latency = 8;
+}
+def : InstRW<[ZnWriteSHA1NEXTELd], (instregex "SHA1NEXTErm")>;
+
+// SHA1RNDS4
+// x,x.
+def ZnWriteSHA1RNDS4r : SchedWriteRes<[ZnFPU1]> {
+  let Latency = 6;
+}
+def : InstRW<[ZnWriteSHA1RNDS4r], (instregex "SHA1RNDS4rr")>;
+// x,m.
+def ZnWriteSHA1RNDS4Ld : SchedWriteRes<[ZnAGU, ZnFPU1]> {
+  let Latency = 13;
+}
+def : InstRW<[ZnWriteSHA1RNDS4Ld], (instregex "SHA1RNDS4rm")>;
+
+// SHA256RNDS2
+// x,x.
+def ZnWriteSHA256RNDS2r : SchedWriteRes<[ZnFPU1]> {
+  let Latency = 4;
+}
+def : InstRW<[ZnWriteSHA256RNDS2r], (instregex "SHA256RNDS2rr")>;
+// x,m.
+def ZnWriteSHA256RNDS2Ld : SchedWriteRes<[ZnAGU, ZnFPU1]> {
+  let Latency = 11;
+}
+def : InstRW<[ZnWriteSHA256RNDS2Ld], (instregex "SHA256RNDS2rm")>;
+
+//-- Arithmetic instructions --//
+
+// HADD, HSUB PS/PD
+def : InstRW<[WriteMicrocoded], (instregex "(V?)H(ADD|SUB)P(S|D)(Y?)r(r|m)")>;
+
+// MULL SS/SD PS/PD.
+// x,x / v,v,v.
+def ZnWriteMULr : SchedWriteRes<[ZnFPU01]> {
+  let Latency = 3;
+}
+// ymm.
+def ZnWriteMULYr : SchedWriteRes<[ZnFPU01]> {
+  let Latency = 4;
+}
+def : InstRW<[ZnWriteMULr], (instregex "(V?)MUL(P|S)(S|D)rr")>;
+def : InstRW<[ZnWriteMULYr], (instregex "(V?)MUL(P|S)(S|D)Yrr")>;
+
+// x,m / v,v,m.
+def ZnWriteMULLd : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+  let Latency = 10;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteMULLd], (instregex "(V?)MUL(P|S)(S|D)rm")>;
+
+// ymm
+def ZnWriteMULYLd : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+  let Latency = 11;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteMULYLd], (instregex "(V?)MUL(P|S)(S|D)Yrm")>;
+
+// VDIVPS.
+// y,y,y.
+def ZnWriteVDIVPSYr : SchedWriteRes<[ZnFPU3]> {
+  let Latency = 12;
+  let ResourceCycles = [12];
+}
+def : InstRW<[ZnWriteVDIVPSYr], (instregex "VDIVPSYrr")>;
+
+// y,y,m256.
+def ZnWriteVDIVPSYLd : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+  let Latency = 19;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1, 19];
+}
+def : InstRW<[ZnWriteVDIVPSYLd], (instregex "VDIVPSYrm")>;
+
+// VDIVPD.
+// y,y,y.
+def ZnWriteVDIVPDY : SchedWriteRes<[ZnFPU3]> {
+  let Latency = 15;
+  let ResourceCycles = [15];
+}
+def : InstRW<[ZnWriteVDIVPDY], (instregex "VDIVPDYrr")>;
+
+// y,y,m256.
+def ZnWriteVDIVPDYLd : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+  let Latency = 22;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1,22];
+}
+def : InstRW<[ZnWriteVDIVPDYLd], (instregex "VDIVPDYrm")>;
+
+// VRCPPS.
+// y,y.
+def ZnWriteVRCPPSr : SchedWriteRes<[ZnFPU01]> {
+  let Latency = 5;
+}
+def : InstRW<[ZnWriteVRCPPSr], (instregex "VRCPPSYr(_Int)?")>;
+
+// y,m256.
+def ZnWriteVRCPPSLd : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+  let Latency = 12;
+  let NumMicroOps = 3;
+}
+def : InstRW<[ZnWriteVRCPPSLd], (instregex "VRCPPSYm(_Int)?")>;
+
+// ROUND SS/SD PS/PD.
+// v,v,i.
+def ZnWriteROUNDr : SchedWriteRes<[ZnFPU3]> {
+  let Latency = 4;
+}
+def : InstRW<[ZnWriteROUNDr], (instregex "(V?)ROUND(Y?)(S|P)(S|D)r(_Int)?")>;
+
+// VFMADD.
+// v,v,v.
+def ZnWriteFMADDr : SchedWriteRes<[ZnFPU03]> {
+  let Latency = 5;
+}
+def : InstRW<[ZnWriteFMADDr],
+    (instregex
+    "VF(N?)M(ADD|SUB|ADDSUB|SUBADD)P(S|D)(r213|r132|r231)r(Y)?",
+    "VF(N?)M(ADD|SUB)S(S|D)(r132|r231|r213)r",
+    "VF(N?)M(ADD|SUB)S(S|D)4rr(_REV|_Int)?",
+    "VF(N?)M(ADD|SUB)P(S|D)4rr(Y)?(_REV)?")>;
+
+// v,v,m.
+def ZnWriteFMADDm : SchedWriteRes<[ZnAGU, ZnFPU03]> {
+  let Latency = 12;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteFMADDm],
+    (instregex
+    "VF(N?)M(ADD|SUB|ADDSUB|SUBADD)P(S|D)(r213|r132|r231)m(Y)?",
+    "VF(N?)M(ADD|SUB)S(S|D)(r132|r231|r213)m",
+    "VF(N?)M(ADD|SUB)S(S|D)4(rm|mr)(_Int)?",
+    "VF(N?)M(ADD|SUB)P(S|D)4(rm|mr)(Y)?")>;
+
+// v,m,i.
+def ZnWriteROUNDm : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+  let Latency = 11;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteROUNDm], (instregex "(V?)ROUND(Y?)(S|P)(S|D)m(_Int)?")>;
+
+// DPPS.
+// x,x,i / v,v,v,i.
+def : InstRW<[WriteMicrocoded], (instregex "(V?)DPPS(Y?)rri")>;
+
+// x,m,i / v,v,m,i.
+def : InstRW<[WriteMicrocoded], (instregex "(V?)DPPS(Y?)rmi")>;
+
+// DPPD.
+// x,x,i.
+def : InstRW<[WriteMicrocoded], (instregex "(V?)DPPDrri")>;
+
+// x,m,i.
+def : InstRW<[WriteMicrocoded], (instregex "(V?)DPPDrmi")>;
+
+// VSQRTPS.
+// y,y.
+def ZnWriteVSQRTPSYr : SchedWriteRes<[ZnFPU3]> {
+  let Latency = 28;
+  let ResourceCycles = [28];
+}
+def : InstRW<[ZnWriteVSQRTPSYr], (instregex "VSQRTPSYr")>;
+
+// y,m256.
+def ZnWriteVSQRTPSYLd : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+  let Latency = 35;
+  let ResourceCycles = [1,35];
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteVSQRTPSYLd], (instregex "VSQRTPSYm")>;
+
+// VSQRTPD.
+// y,y.
+def ZnWriteVSQRTPDYr : SchedWriteRes<[ZnFPU3]> {
+  let Latency = 40;
+  let ResourceCycles = [40];
+}
+def : InstRW<[ZnWriteVSQRTPDYr], (instregex "VSQRTPDYr")>;
+
+// y,m256.
+def ZnWriteVSQRTPDYLd : SchedWriteRes<[ZnAGU, ZnFPU3]> {
+  let Latency = 47;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1,47];
+}
+def : InstRW<[ZnWriteVSQRTPDYLd], (instregex "VSQRTPDYm")>;
+
+// RSQRTSS
+// x,x.
+def ZnWriteRSQRTSSr : SchedWriteRes<[ZnFPU02]> {
+  let Latency = 5;
+}
+def : InstRW<[ZnWriteRSQRTSSr], (instregex "(V?)RSQRTSS(Y?)r(_Int)?")>;
+
+// RSQRTPS
+// x,x.
+def ZnWriteRSQRTPSr : SchedWriteRes<[ZnFPU01]> {
+  let Latency = 5;
+}
+def : InstRW<[ZnWriteRSQRTPSr], (instregex "(V?)RSQRTPS(Y?)r(_Int)?")>;
+
+// RSQRTSSm
+// x,m128.
+def ZnWriteRSQRTSSLd: SchedWriteRes<[ZnAGU, ZnFPU02]> {
+  let Latency = 12;
+  let NumMicroOps = 2;
+  let ResourceCycles = [1,2];
+}
+def : InstRW<[ZnWriteRSQRTSSLd], (instregex "(V?)RSQRTSSm(_Int)?")>;
+
+// RSQRTPSm
+def ZnWriteRSQRTPSLd : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+  let Latency = 12;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteRSQRTPSLd], (instregex "(V?)RSQRTPSm(_Int)?")>;
+
+// RSQRTPS 256.
+// y,y.
+def ZnWriteRSQRTPSYr : SchedWriteRes<[ZnFPU01]> {
+  let Latency = 5;
+  let NumMicroOps = 2;
+  let ResourceCycles = [2];
+}
+def : InstRW<[ZnWriteRSQRTPSYr], (instregex "VRSQRTPSYr(_Int)?")>;
+
+// y,m256.
+def ZnWriteRSQRTPSYLd : SchedWriteRes<[ZnAGU, ZnFPU01]> {
+  let Latency = 12;
+  let NumMicroOps = 2;
+}
+def : InstRW<[ZnWriteRSQRTPSYLd], (instregex "VRSQRTPSYm(_Int)?")>;
+
+//-- Logic instructions --//
+
+// AND, ANDN, OR, XOR PS/PD.
+// x,x / v,v,v.
+def : InstRW<[WriteVecLogic], (instregex "(V?)(AND|ANDN|OR|XOR)P(S|D)(Y?)rr")>;
+// x,m / v,v,m.
+def : InstRW<[WriteVecLogicLd],
+                         (instregex "(V?)(AND|ANDN|OR|XOR)P(S|D)(Y?)rm")>;
+
+//-- Other instructions --//
+
+// VZEROUPPER.
+def : InstRW<[WriteMicrocoded], (instregex "VZEROUPPER")>;
+
+// VZEROALL.
+def : InstRW<[WriteMicrocoded], (instregex "VZEROALL")>;
+
+// LDMXCSR.
+def : InstRW<[WriteMicrocoded], (instregex "(V)?LDMXCSR")>;
+
+// STMXCSR.
+def : InstRW<[WriteMicrocoded], (instregex "(V)?STMXCSR")>;
+
+} // SchedModel

Modified: llvm/trunk/test/CodeGen/X86/aes-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/aes-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/aes-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/aes-schedule.ll Thu Aug 31 05:38:35 2017
@@ -50,7 +50,7 @@ define <2 x i64> @test_aesdec(<2 x i64>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaesdec %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; ZNVER1-NEXT:    vaesdec (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <2 x i64>, <2 x i64> *%a2, align 16
   %2 = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %a0, <2 x i64> %a1)
   %3 = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %2, <2 x i64> %1)
@@ -99,7 +99,7 @@ define <2 x i64> @test_aesdeclast(<2 x i
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaesdeclast %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; ZNVER1-NEXT:    vaesdeclast (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <2 x i64>, <2 x i64> *%a2, align 16
   %2 = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %a0, <2 x i64> %a1)
   %3 = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %2, <2 x i64> %1)
@@ -148,7 +148,7 @@ define <2 x i64> @test_aesenc(<2 x i64>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaesenc %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; ZNVER1-NEXT:    vaesenc (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <2 x i64>, <2 x i64> *%a2, align 16
   %2 = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %a0, <2 x i64> %a1)
   %3 = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %2, <2 x i64> %1)
@@ -197,7 +197,7 @@ define <2 x i64> @test_aesenclast(<2 x i
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaesenclast %xmm1, %xmm0, %xmm0 # sched: [4:0.50]
 ; ZNVER1-NEXT:    vaesenclast (%rdi), %xmm0, %xmm0 # sched: [11:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <2 x i64>, <2 x i64> *%a2, align 16
   %2 = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %a0, <2 x i64> %a1)
   %3 = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %2, <2 x i64> %1)
@@ -253,7 +253,7 @@ define <2 x i64> @test_aesimc(<2 x i64>
 ; ZNVER1-NEXT:    vaesimc (%rdi), %xmm1 # sched: [11:0.50]
 ; ZNVER1-NEXT:    vaesimc %xmm0, %xmm0 # sched: [4:0.50]
 ; ZNVER1-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <2 x i64>, <2 x i64> *%a1, align 16
   %2 = call <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64> %a0)
   %3 = call <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64> %1)
@@ -310,7 +310,7 @@ define <2 x i64> @test_aeskeygenassist(<
 ; ZNVER1-NEXT:    vaeskeygenassist $7, (%rdi), %xmm1 # sched: [11:0.50]
 ; ZNVER1-NEXT:    vaeskeygenassist $7, %xmm0, %xmm0 # sched: [4:0.50]
 ; ZNVER1-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <2 x i64>, <2 x i64> *%a1, align 16
   %2 = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %a0, i8 7)
   %3 = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %1, i8 7)

Modified: llvm/trunk/test/CodeGen/X86/avx-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-schedule.ll Thu Aug 31 05:38:35 2017
@@ -42,7 +42,7 @@ define <4 x double> @test_addpd(<4 x dou
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fadd <4 x double> %a0, %a1
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = fadd <4 x double> %1, %2
@@ -84,7 +84,7 @@ define <8 x float> @test_addps(<8 x floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fadd <8 x float> %a0, %a1
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = fadd <8 x float> %1, %2
@@ -126,7 +126,7 @@ define <4 x double> @test_addsubpd(<4 x
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaddsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1)
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %1, <4 x double> %2)
@@ -169,7 +169,7 @@ define <8 x float> @test_addsubps(<8 x f
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaddsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1)
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %1, <8 x float> %2)
@@ -218,7 +218,7 @@ define <4 x double> @test_andnotpd(<4 x
 ; ZNVER1-NEXT:    vandnpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vandnpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <4 x double> %a0 to <4 x i64>
   %2 = bitcast <4 x double> %a1 to <4 x i64>
   %3 = xor <4 x i64> %1, <i64 -1, i64 -1, i64 -1, i64 -1>
@@ -273,7 +273,7 @@ define <8 x float> @test_andnotps(<8 x f
 ; ZNVER1-NEXT:    vandnps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vandnps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <8 x float> %a0 to <4 x i64>
   %2 = bitcast <8 x float> %a1 to <4 x i64>
   %3 = xor <4 x i64> %1, <i64 -1, i64 -1, i64 -1, i64 -1>
@@ -328,7 +328,7 @@ define <4 x double> @test_andpd(<4 x dou
 ; ZNVER1-NEXT:    vandpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vandpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <4 x double> %a0 to <4 x i64>
   %2 = bitcast <4 x double> %a1 to <4 x i64>
   %3 = and <4 x i64> %1, %2
@@ -381,7 +381,7 @@ define <8 x float> @test_andps(<8 x floa
 ; ZNVER1-NEXT:    vandps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vandps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <8 x float> %a0 to <4 x i64>
   %2 = bitcast <8 x float> %a1 to <4 x i64>
   %3 = and <4 x i64> %1, %2
@@ -434,7 +434,7 @@ define <4 x double> @test_blendpd(<4 x d
 ; ZNVER1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],mem[1,2],ymm0[3] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = fadd <4 x double> %a1, %1
@@ -477,7 +477,7 @@ define <8 x float> @test_blendps(<8 x fl
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3,4,5,6,7] sched: [1:0.50]
 ; ZNVER1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3],mem[4,5,6],ymm0[7] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 1, i32 10, i32 3, i32 12, i32 13, i32 14, i32 7>
@@ -519,7 +519,7 @@ define <4 x double> @test_blendvpd(<4 x
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vblendvpd %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2)
   %2 = load <4 x double>, <4 x double> *%a3, align 32
   %3 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %1, <4 x double> %2, <4 x double> %a2)
@@ -562,7 +562,7 @@ define <8 x float> @test_blendvps(<8 x f
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vblendvps %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vblendvps %ymm2, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2)
   %2 = load <8 x float>, <8 x float> *%a3, align 32
   %3 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %1, <8 x float> %2, <8 x float> %a2)
@@ -599,7 +599,7 @@ define <8 x float> @test_broadcastf128(<
 ; ZNVER1-LABEL: test_broadcastf128:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <4 x float>, <4 x float> *%a0, align 32
   %2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
   ret <8 x float> %2
@@ -634,7 +634,7 @@ define <4 x double> @test_broadcastsd_ym
 ; ZNVER1-LABEL: test_broadcastsd_ymm:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vbroadcastsd (%rdi), %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load double, double *%a0, align 8
   %2 = insertelement <4 x double> undef, double %1, i32 0
   %3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> zeroinitializer
@@ -670,7 +670,7 @@ define <4 x float> @test_broadcastss(flo
 ; ZNVER1-LABEL: test_broadcastss:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vbroadcastss (%rdi), %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load float, float *%a0, align 4
   %2 = insertelement <4 x float> undef, float %1, i32 0
   %3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> zeroinitializer
@@ -706,7 +706,7 @@ define <8 x float> @test_broadcastss_ymm
 ; ZNVER1-LABEL: test_broadcastss_ymm:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vbroadcastss (%rdi), %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load float, float *%a0, align 4
   %2 = insertelement <8 x float> undef, float %1, i32 0
   %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> zeroinitializer
@@ -754,7 +754,7 @@ define <4 x double> @test_cmppd(<4 x dou
 ; ZNVER1-NEXT:    vcmpeqpd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vcmpeqpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    vorpd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fcmp oeq <4 x double> %a0, %a1
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = fcmp oeq <4 x double> %a0, %2
@@ -806,7 +806,7 @@ define <8 x float> @test_cmpps(<8 x floa
 ; ZNVER1-NEXT:    vcmpeqps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vcmpeqps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    vorps %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fcmp oeq <8 x float> %a0, %a1
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = fcmp oeq <8 x float> %a0, %2
@@ -858,7 +858,7 @@ define <4 x double> @test_cvtdq2pd(<4 x
 ; ZNVER1-NEXT:    vcvtdq2pd (%rdi), %ymm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtdq2pd %xmm0, %ymm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sitofp <4 x i32> %a0 to <4 x double>
   %2 = load <4 x i32>, <4 x i32> *%a1, align 16
   %3 = sitofp <4 x i32> %2 to <4 x double>
@@ -909,7 +909,7 @@ define <8 x float> @test_cvtdq2ps(<8 x i
 ; ZNVER1-NEXT:    vcvtdq2ps (%rdi), %ymm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtdq2ps %ymm0, %ymm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sitofp <8 x i32> %a0 to <8 x float>
   %2 = load <8 x i32>, <8 x i32> *%a1, align 16
   %3 = sitofp <8 x i32> %2 to <8 x float>
@@ -957,8 +957,8 @@ define <8 x i32> @test_cvtpd2dq(<4 x dou
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vcvttpd2dqy (%rdi), %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvttpd2dq %ymm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [2:0.67]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fptosi <4 x double> %a0 to <4 x i32>
   %2 = load <4 x double>, <4 x double> *%a1, align 32
   %3 = fptosi <4 x double> %2 to <4 x i32>
@@ -1004,10 +1004,10 @@ define <8 x float> @test_cvtpd2ps(<4 x d
 ;
 ; ZNVER1-LABEL: test_cvtpd2ps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vcvtpd2psy (%rdi), %xmm1 # sched: [12:1.00]
+; ZNVER1-NEXT:    vcvtpd2psy (%rdi), %xmm1 # sched: [11:1.00]
 ; ZNVER1-NEXT:    vcvtpd2ps %ymm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0 # sched: [2:0.67]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fptrunc <4 x double> %a0 to <4 x float>
   %2 = load <4 x double>, <4 x double> *%a1, align 32
   %3 = fptrunc <4 x double> %2 to <4 x float>
@@ -1056,7 +1056,7 @@ define <8 x i32> @test_cvtps2dq(<8 x flo
 ; ZNVER1-NEXT:    vcvttps2dq (%rdi), %ymm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvttps2dq %ymm0, %ymm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fptosi <8 x float> %a0 to <8 x i32>
   %2 = load <8 x float>, <8 x float> *%a1, align 32
   %3 = fptosi <8 x float> %2 to <8 x i32>
@@ -1097,9 +1097,9 @@ define <4 x double> @test_divpd(<4 x dou
 ;
 ; ZNVER1-LABEL: test_divpd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vdivpd %ymm1, %ymm0, %ymm0 # sched: [15:1.00]
-; ZNVER1-NEXT:    vdivpd (%rdi), %ymm0, %ymm0 # sched: [22:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vdivpd %ymm1, %ymm0, %ymm0 # sched: [15:15.00]
+; ZNVER1-NEXT:    vdivpd (%rdi), %ymm0, %ymm0 # sched: [22:22.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fdiv <4 x double> %a0, %a1
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = fdiv <4 x double> %1, %2
@@ -1139,9 +1139,9 @@ define <8 x float> @test_divps(<8 x floa
 ;
 ; ZNVER1-LABEL: test_divps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vdivps %ymm1, %ymm0, %ymm0 # sched: [15:1.00]
-; ZNVER1-NEXT:    vdivps (%rdi), %ymm0, %ymm0 # sched: [22:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vdivps %ymm1, %ymm0, %ymm0 # sched: [12:12.00]
+; ZNVER1-NEXT:    vdivps (%rdi), %ymm0, %ymm0 # sched: [19:19.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fdiv <8 x float> %a0, %a1
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = fdiv <8 x float> %1, %2
@@ -1181,9 +1181,9 @@ define <8 x float> @test_dpps(<8 x float
 ;
 ; ZNVER1-LABEL: test_dpps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT:    vdpps $7, (%rdi), %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7)
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %1, <8 x float> %2, i8 7)
@@ -1228,10 +1228,10 @@ define <4 x float> @test_extractf128(<8
 ;
 ; ZNVER1-LABEL: test_extractf128:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT:    vextractf128 $1, %ymm1, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT:    vzeroupper
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.33]
+; ZNVER1-NEXT:    vextractf128 $1, %ymm1, (%rdi) # sched: [8:0.50]
+; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x float> %a0, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %2 = shufflevector <8 x float> %a1, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   store <4 x float> %2, <4 x float> *%a2
@@ -1271,9 +1271,9 @@ define <4 x double> @test_haddpd(<4 x do
 ;
 ; ZNVER1-LABEL: test_haddpd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    vhaddpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vhaddpd %ymm1, %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT:    vhaddpd (%rdi), %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1)
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %1, <4 x double> %2)
@@ -1314,9 +1314,9 @@ define <8 x float> @test_haddps(<8 x flo
 ;
 ; ZNVER1-LABEL: test_haddps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vhaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    vhaddps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vhaddps %ymm1, %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT:    vhaddps (%rdi), %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1)
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %1, <8 x float> %2)
@@ -1357,9 +1357,9 @@ define <4 x double> @test_hsubpd(<4 x do
 ;
 ; ZNVER1-LABEL: test_hsubpd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    vhsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vhsubpd %ymm1, %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT:    vhsubpd (%rdi), %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1)
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %1, <4 x double> %2)
@@ -1400,9 +1400,9 @@ define <8 x float> @test_hsubps(<8 x flo
 ;
 ; ZNVER1-LABEL: test_hsubps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vhsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    vhsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vhsubps %ymm1, %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT:    vhsubps (%rdi), %ymm0, %ymm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1)
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %1, <8 x float> %2)
@@ -1448,10 +1448,10 @@ define <8 x float> @test_insertf128(<8 x
 ;
 ; ZNVER1-LABEL: test_insertf128:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:0.50]
-; ZNVER1-NEXT:    vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
+; ZNVER1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1 # sched: [2:0.67]
+; ZNVER1-NEXT:    vinsertf128 $1, (%rdi), %ymm0, %ymm0 # sched: [9:0.67]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x float> %a1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
   %2 = shufflevector <8 x float> %a0, <8 x float> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
   %3 = load <4 x float>, <4 x float> *%a2, align 16
@@ -1490,7 +1490,7 @@ define <32 x i8> @test_lddqu(i8* %a0) {
 ; ZNVER1-LABEL: test_lddqu:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vlddqu (%rdi), %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <32 x i8> @llvm.x86.avx.ldu.dq.256(i8* %a0)
   ret <32 x i8> %1
 }
@@ -1534,10 +1534,10 @@ define <2 x double> @test_maskmovpd(i8*
 ;
 ; ZNVER1-LABEL: test_maskmovpd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2
-; ZNVER1-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi)
+; ZNVER1-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [8:0.50]
+; ZNVER1-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [4:0.50]
 ; ZNVER1-NEXT:    vmovapd %xmm2, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %a1)
   call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x i64> %a1, <2 x double> %a2)
   ret <2 x double> %1
@@ -1583,10 +1583,10 @@ define <4 x double> @test_maskmovpd_ymm(
 ;
 ; ZNVER1-LABEL: test_maskmovpd_ymm:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2
-; ZNVER1-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi)
+; ZNVER1-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [8:1.00]
+; ZNVER1-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
 ; ZNVER1-NEXT:    vmovapd %ymm2, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %a1)
   call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x i64> %a1, <4 x double> %a2)
   ret <4 x double> %1
@@ -1632,10 +1632,10 @@ define <4 x float> @test_maskmovps(i8* %
 ;
 ; ZNVER1-LABEL: test_maskmovps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2
-; ZNVER1-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi)
+; ZNVER1-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [8:0.50]
+; ZNVER1-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [4:0.50]
 ; ZNVER1-NEXT:    vmovaps %xmm2, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %a1)
   call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x i32> %a1, <4 x float> %a2)
   ret <4 x float> %1
@@ -1681,10 +1681,10 @@ define <8 x float> @test_maskmovps_ymm(i
 ;
 ; ZNVER1-LABEL: test_maskmovps_ymm:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2
-; ZNVER1-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi)
+; ZNVER1-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [8:1.00]
+; ZNVER1-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [5:1.00]
 ; ZNVER1-NEXT:    vmovaps %ymm2, %ymm0 # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %a1)
   call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x i32> %a1, <8 x float> %a2)
   ret <8 x float> %1
@@ -1727,7 +1727,7 @@ define <4 x double> @test_maxpd(<4 x dou
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmaxpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1)
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %1, <4 x double> %2)
@@ -1770,7 +1770,7 @@ define <8 x float> @test_maxps(<8 x floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmaxps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmaxps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %1, <8 x float> %2)
@@ -1813,7 +1813,7 @@ define <4 x double> @test_minpd(<4 x dou
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vminpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vminpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1)
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %1, <4 x double> %2)
@@ -1856,7 +1856,7 @@ define <8 x float> @test_minps(<8 x floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vminps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vminps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %1, <8 x float> %2)
@@ -1905,7 +1905,7 @@ define <4 x double> @test_movapd(<4 x do
 ; ZNVER1-NEXT:    vmovapd (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovapd %ymm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <4 x double>, <4 x double> *%a0, align 32
   %2 = fadd <4 x double> %1, %1
   store <4 x double> %2, <4 x double> *%a1, align 32
@@ -1953,7 +1953,7 @@ define <8 x float> @test_movaps(<8 x flo
 ; ZNVER1-NEXT:    vmovaps (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovaps %ymm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <8 x float>, <8 x float> *%a0, align 32
   %2 = fadd <8 x float> %1, %1
   store <8 x float> %2, <8 x float> *%a1, align 32
@@ -2001,7 +2001,7 @@ define <4 x double> @test_movddup(<4 x d
 ; ZNVER1-NEXT:    vmovddup {{.*#+}} ymm1 = mem[0,0,2,2] sched: [8:0.50]
 ; ZNVER1-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
   %2 = load <4 x double>, <4 x double> *%a1, align 32
   %3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
@@ -2041,9 +2041,9 @@ define i32 @test_movmskpd(<4 x double> %
 ;
 ; ZNVER1-LABEL: test_movmskpd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmovmskpd %ymm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    vzeroupper
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vmovmskpd %ymm0, %eax # sched: [1:1.00]
+; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0)
   ret i32 %1
 }
@@ -2081,9 +2081,9 @@ define i32 @test_movmskps(<8 x float> %a
 ;
 ; ZNVER1-LABEL: test_movmskps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmovmskps %ymm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    vzeroupper
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vmovmskps %ymm0, %eax # sched: [1:1.00]
+; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0)
   ret i32 %1
 }
@@ -2124,7 +2124,7 @@ define <4 x double> @test_movntpd(<4 x d
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovntpd %ymm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fadd <4 x double> %a0, %a0
   store <4 x double> %1, <4 x double> *%a1, align 32, !nontemporal !0
   ret <4 x double> %1
@@ -2165,7 +2165,7 @@ define <8 x float> @test_movntps(<8 x fl
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovntps %ymm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fadd <8 x float> %a0, %a0
   store <8 x float> %1, <8 x float> *%a1, align 32, !nontemporal !0
   ret <8 x float> %1
@@ -2212,7 +2212,7 @@ define <8 x float> @test_movshdup(<8 x f
 ; ZNVER1-NEXT:    vmovshdup {{.*#+}} ymm1 = mem[1,1,3,3,5,5,7,7] sched: [8:0.50]
 ; ZNVER1-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
   %2 = load <8 x float>, <8 x float> *%a1, align 32
   %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
@@ -2261,7 +2261,7 @@ define <8 x float> @test_movsldup(<8 x f
 ; ZNVER1-NEXT:    vmovsldup {{.*#+}} ymm1 = mem[0,0,2,2,4,4,6,6] sched: [8:0.50]
 ; ZNVER1-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
   %2 = load <8 x float>, <8 x float> *%a1, align 32
   %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
@@ -2312,7 +2312,7 @@ define <4 x double> @test_movupd(<4 x do
 ; ZNVER1-NEXT:    vmovupd (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovupd %ymm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <4 x double>, <4 x double> *%a0, align 1
   %2 = fadd <4 x double> %1, %1
   store <4 x double> %2, <4 x double> *%a1, align 1
@@ -2362,7 +2362,7 @@ define <8 x float> @test_movups(<8 x flo
 ; ZNVER1-NEXT:    vmovups (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovups %ymm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <8 x float>, <8 x float> *%a0, align 1
   %2 = fadd <8 x float> %1, %1
   store <8 x float> %2, <8 x float> *%a1, align 1
@@ -2402,9 +2402,9 @@ define <4 x double> @test_mulpd(<4 x dou
 ;
 ; ZNVER1-LABEL: test_mulpd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmulpd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
-; ZNVER1-NEXT:    vmulpd (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vmulpd %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
+; ZNVER1-NEXT:    vmulpd (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fmul <4 x double> %a0, %a1
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = fmul <4 x double> %1, %2
@@ -2444,9 +2444,9 @@ define <8 x float> @test_mulps(<8 x floa
 ;
 ; ZNVER1-LABEL: test_mulps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmulps %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
-; ZNVER1-NEXT:    vmulps (%rdi), %ymm0, %ymm0 # sched: [12:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vmulps %ymm1, %ymm0, %ymm0 # sched: [4:0.50]
+; ZNVER1-NEXT:    vmulps (%rdi), %ymm0, %ymm0 # sched: [11:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fmul <8 x float> %a0, %a1
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = fmul <8 x float> %1, %2
@@ -2494,7 +2494,7 @@ define <4 x double> @orpd(<4 x double> %
 ; ZNVER1-NEXT:    vorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <4 x double> %a0 to <4 x i64>
   %2 = bitcast <4 x double> %a1 to <4 x i64>
   %3 = or <4 x i64> %1, %2
@@ -2547,7 +2547,7 @@ define <8 x float> @test_orps(<8 x float
 ; ZNVER1-NEXT:    vorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <8 x float> %a0 to <4 x i64>
   %2 = bitcast <8 x float> %a1 to <4 x i64>
   %3 = or <4 x i64> %1, %2
@@ -2600,7 +2600,7 @@ define <2 x double> @test_permilpd(<2 x
 ; ZNVER1-NEXT:    vpermilpd {{.*#+}} xmm1 = mem[1,0] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> <i32 1, i32 0>
   %2 = load <2 x double>, <2 x double> *%a1, align 16
   %3 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> <i32 1, i32 0>
@@ -2649,7 +2649,7 @@ define <4 x double> @test_permilpd_ymm(<
 ; ZNVER1-NEXT:    vpermilpd {{.*#+}} ymm1 = mem[1,0,2,3] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
   %2 = load <4 x double>, <4 x double> *%a1, align 32
   %3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
@@ -2698,7 +2698,7 @@ define <4 x float> @test_permilps(<4 x f
 ; ZNVER1-NEXT:    vpermilps {{.*#+}} xmm1 = mem[3,2,1,0] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   %2 = load <4 x float>, <4 x float> *%a1, align 16
   %3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -2747,7 +2747,7 @@ define <8 x float> @test_permilps_ymm(<8
 ; ZNVER1-NEXT:    vpermilps {{.*#+}} ymm1 = mem[3,2,1,0,7,6,5,4] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
   %2 = load <8 x float>, <8 x float> *%a1, align 32
   %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
@@ -2790,7 +2790,7 @@ define <2 x double> @test_permilvarpd(<2
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpermilpd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vpermilpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1)
   %2 = load <2 x i64>, <2 x i64> *%a2, align 16
   %3 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %1, <2 x i64> %2)
@@ -2833,7 +2833,7 @@ define <4 x double> @test_permilvarpd_ym
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpermilpd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vpermilpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1)
   %2 = load <4 x i64>, <4 x i64> *%a2, align 32
   %3 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %1, <4 x i64> %2)
@@ -2876,7 +2876,7 @@ define <4 x float> @test_permilvarps(<4
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpermilps %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vpermilps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a1)
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> %2)
@@ -2919,7 +2919,7 @@ define <8 x float> @test_permilvarps_ymm
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpermilps %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vpermilps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1)
   %2 = load <8 x i32>, <8 x i32> *%a2, align 32
   %3 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> %2)
@@ -2968,7 +2968,7 @@ define <8 x float> @test_rcpps(<8 x floa
 ; ZNVER1-NEXT:    vrcpps (%rdi), %ymm1 # sched: [12:0.50]
 ; ZNVER1-NEXT:    vrcpps %ymm0, %ymm0 # sched: [5:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0)
   %2 = load <8 x float>, <8 x float> *%a1, align 32
   %3 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %2)
@@ -3015,10 +3015,10 @@ define <4 x double> @test_roundpd(<4 x d
 ;
 ; ZNVER1-LABEL: test_roundpd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vroundpd $7, (%rdi), %ymm1 # sched: [10:1.00]
-; ZNVER1-NEXT:    vroundpd $7, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT:    vroundpd $7, (%rdi), %ymm1 # sched: [11:1.00]
+; ZNVER1-NEXT:    vroundpd $7, %ymm0, %ymm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7)
   %2 = load <4 x double>, <4 x double> *%a1, align 32
   %3 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %2, i32 7)
@@ -3065,10 +3065,10 @@ define <8 x float> @test_roundps(<8 x fl
 ;
 ; ZNVER1-LABEL: test_roundps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vroundps $7, (%rdi), %ymm1 # sched: [10:1.00]
-; ZNVER1-NEXT:    vroundps $7, %ymm0, %ymm0 # sched: [3:1.00]
+; ZNVER1-NEXT:    vroundps $7, (%rdi), %ymm1 # sched: [11:1.00]
+; ZNVER1-NEXT:    vroundps $7, %ymm0, %ymm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7)
   %2 = load <8 x float>, <8 x float> *%a1, align 32
   %3 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %2, i32 7)
@@ -3118,7 +3118,7 @@ define <8 x float> @test_rsqrtps(<8 x fl
 ; ZNVER1-NEXT:    vrsqrtps (%rdi), %ymm1 # sched: [12:0.50]
 ; ZNVER1-NEXT:    vrsqrtps %ymm0, %ymm0 # sched: [5:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0)
   %2 = load <8 x float>, <8 x float> *%a1, align 32
   %3 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %2)
@@ -3168,7 +3168,7 @@ define <4 x double> @test_shufpd(<4 x do
 ; ZNVER1-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[3] sched: [1:0.50]
 ; ZNVER1-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],mem[0],ymm1[2],mem[3] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 4, i32 2, i32 7>
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 1, i32 4, i32 2, i32 7>
@@ -3211,7 +3211,7 @@ define <8 x float> @test_shufps(<8 x flo
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,0],ymm1[0,0],ymm0[4,4],ymm1[4,4] sched: [1:0.50]
 ; ZNVER1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,3],mem[0,0],ymm0[4,7],mem[4,4] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 0, i32 8, i32 8, i32 4, i32 4, i32 12, i32 12>
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 3, i32 8, i32 8, i32 4, i32 7, i32 12, i32 12>
@@ -3256,10 +3256,10 @@ define <4 x double> @test_sqrtpd(<4 x do
 ;
 ; ZNVER1-LABEL: test_sqrtpd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vsqrtpd (%rdi), %ymm1 # sched: [27:1.00]
-; ZNVER1-NEXT:    vsqrtpd %ymm0, %ymm0 # sched: [20:1.00]
+; ZNVER1-NEXT:    vsqrtpd (%rdi), %ymm1 # sched: [47:47.00]
+; ZNVER1-NEXT:    vsqrtpd %ymm0, %ymm0 # sched: [40:40.00]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %a0)
   %2 = load <4 x double>, <4 x double> *%a1, align 32
   %3 = call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %2)
@@ -3306,10 +3306,10 @@ define <8 x float> @test_sqrtps(<8 x flo
 ;
 ; ZNVER1-LABEL: test_sqrtps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vsqrtps (%rdi), %ymm1 # sched: [27:1.00]
-; ZNVER1-NEXT:    vsqrtps %ymm0, %ymm0 # sched: [20:1.00]
+; ZNVER1-NEXT:    vsqrtps (%rdi), %ymm1 # sched: [35:35.00]
+; ZNVER1-NEXT:    vsqrtps %ymm0, %ymm0 # sched: [28:28.00]
 ; ZNVER1-NEXT:    vaddps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %a0)
   %2 = load <8 x float>, <8 x float> *%a1, align 32
   %3 = call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %2)
@@ -3353,7 +3353,7 @@ define <4 x double> @test_subpd(<4 x dou
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vsubpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vsubpd (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fsub <4 x double> %a0, %a1
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = fsub <4 x double> %1, %2
@@ -3395,7 +3395,7 @@ define <8 x float> @test_subps(<8 x floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vsubps %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vsubps (%rdi), %ymm0, %ymm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fsub <8 x float> %a0, %a1
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = fsub <8 x float> %1, %2
@@ -3455,7 +3455,7 @@ define i32 @test_testpd(<2 x double> %a0
 ; ZNVER1-NEXT:    setb %al # sched: [1:0.25]
 ; ZNVER1-NEXT:    vtestpd (%rdi), %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    adcl $0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1)
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %2)
@@ -3521,8 +3521,8 @@ define i32 @test_testpd_ymm(<4 x double>
 ; ZNVER1-NEXT:    setb %al # sched: [1:0.25]
 ; ZNVER1-NEXT:    vtestpd (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    adcl $0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    vzeroupper
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1)
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %2)
@@ -3584,7 +3584,7 @@ define i32 @test_testps(<4 x float> %a0,
 ; ZNVER1-NEXT:    setb %al # sched: [1:0.25]
 ; ZNVER1-NEXT:    vtestps (%rdi), %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    adcl $0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1)
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %2)
@@ -3650,8 +3650,8 @@ define i32 @test_testps_ymm(<8 x float>
 ; ZNVER1-NEXT:    setb %al # sched: [1:0.25]
 ; ZNVER1-NEXT:    vtestps (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    adcl $0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    vzeroupper
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1)
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %2)
@@ -3701,7 +3701,7 @@ define <4 x double> @test_unpckhpd(<4 x
 ; ZNVER1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.50]
 ; ZNVER1-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
@@ -3744,7 +3744,7 @@ define <8 x float> @test_unpckhps(<8 x f
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:0.50]
 ; ZNVER1-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
@@ -3792,7 +3792,7 @@ define <4 x double> @test_unpcklpd(<4 x
 ; ZNVER1-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.50]
 ; ZNVER1-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
   %2 = load <4 x double>, <4 x double> *%a2, align 32
   %3 = shufflevector <4 x double> %a1, <4 x double> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -3835,7 +3835,7 @@ define <8 x float> @test_unpcklps(<8 x f
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:0.50]
 ; ZNVER1-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
   %2 = load <8 x float>, <8 x float> *%a2, align 32
   %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
@@ -3883,7 +3883,7 @@ define <4 x double> @test_xorpd(<4 x dou
 ; ZNVER1-NEXT:    vxorpd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vxorpd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <4 x double> %a0 to <4 x i64>
   %2 = bitcast <4 x double> %a1 to <4 x i64>
   %3 = xor <4 x i64> %1, %2
@@ -3936,7 +3936,7 @@ define <8 x float> @test_xorps(<8 x floa
 ; ZNVER1-NEXT:    vxorps %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vxorps (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <8 x float> %a0 to <4 x i64>
   %2 = bitcast <8 x float> %a1 to <4 x i64>
   %3 = xor <4 x i64> %1, %2
@@ -3976,8 +3976,8 @@ define void @test_zeroall() {
 ;
 ; ZNVER1-LABEL: test_zeroall:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vzeroall
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vzeroall # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   call void @llvm.x86.avx.vzeroall()
   ret void
 }
@@ -4011,8 +4011,8 @@ define void @test_zeroupper() {
 ;
 ; ZNVER1-LABEL: test_zeroupper:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vzeroupper
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   call void @llvm.x86.avx.vzeroupper()
   ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/avx2-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-schedule.ll Thu Aug 31 05:38:35 2017
@@ -31,7 +31,7 @@ define <32 x i8> @test_pabsb(<32 x i8> %
 ; ZNVER1-NEXT:    vpabsb (%rdi), %ymm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpabsb %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0)
   %2 = load <32 x i8>, <32 x i8> *%a1, align 32
   %3 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %2)
@@ -67,7 +67,7 @@ define <8 x i32> @test_pabsd(<8 x i32> %
 ; ZNVER1-NEXT:    vpabsd (%rdi), %ymm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpabsd %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0)
   %2 = load <8 x i32>, <8 x i32> *%a1, align 32
   %3 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %2)
@@ -103,7 +103,7 @@ define <16 x i16> @test_pabsw(<16 x i16>
 ; ZNVER1-NEXT:    vpabsw (%rdi), %ymm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpabsw %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0)
   %2 = load <16 x i16>, <16 x i16> *%a1, align 32
   %3 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %2)
@@ -135,7 +135,7 @@ define <32 x i8> @test_paddb(<32 x i8> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = add <32 x i8> %a0, %a1
   %2 = load <32 x i8>, <32 x i8> *%a2, align 32
   %3 = add <32 x i8> %1, %2
@@ -165,7 +165,7 @@ define <8 x i32> @test_paddd(<8 x i32> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = add <8 x i32> %a0, %a1
   %2 = load <8 x i32>, <8 x i32> *%a2, align 32
   %3 = add <8 x i32> %1, %2
@@ -195,7 +195,7 @@ define <4 x i64> @test_paddq(<4 x i64> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = add <4 x i64> %a0, %a1
   %2 = load <4 x i64>, <4 x i64> *%a2, align 32
   %3 = add <4 x i64> %1, %2
@@ -225,7 +225,7 @@ define <16 x i16> @test_paddw(<16 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = add <16 x i16> %a0, %a1
   %2 = load <16 x i16>, <16 x i16> *%a2, align 32
   %3 = add <16 x i16> %1, %2
@@ -259,7 +259,7 @@ define <4 x i64> @test_pand(<4 x i64> %a
 ; ZNVER1-NEXT:    vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpand (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = and <4 x i64> %a0, %a1
   %2 = load <4 x i64>, <4 x i64> *%a2, align 32
   %3 = and <4 x i64> %1, %2
@@ -294,7 +294,7 @@ define <4 x i64> @test_pandn(<4 x i64> %
 ; ZNVER1-NEXT:    vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpandn (%rdi), %ymm0, %ymm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = xor <4 x i64> %a0, <i64 -1, i64 -1, i64 -1, i64 -1>
   %2 = and <4 x i64> %a1, %1
   %3 = load <4 x i64>, <4 x i64> *%a2, align 32
@@ -325,9 +325,9 @@ define <8 x i32> @test_pmulld(<8 x i32>
 ;
 ; ZNVER1-LABEL: test_pmulld:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpmulld %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
-; ZNVER1-NEXT:    vpmulld (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vpmulld %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
+; ZNVER1-NEXT:    vpmulld (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = mul <8 x i32> %a0, %a1
   %2 = load <8 x i32>, <8 x i32> *%a2, align 32
   %3 = mul <8 x i32> %1, %2
@@ -357,7 +357,7 @@ define <16 x i16> @test_pmullw(<16 x i16
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmullw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vpmullw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = mul <16 x i16> %a0, %a1
   %2 = load <16 x i16>, <16 x i16> *%a2, align 32
   %3 = mul <16 x i16> %1, %2
@@ -391,7 +391,7 @@ define <4 x i64> @test_por(<4 x i64> %a0
 ; ZNVER1-NEXT:    vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = or <4 x i64> %a0, %a1
   %2 = load <4 x i64>, <4 x i64> *%a2, align 32
   %3 = or <4 x i64> %1, %2
@@ -422,7 +422,7 @@ define <32 x i8> @test_psubb(<32 x i8> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sub <32 x i8> %a0, %a1
   %2 = load <32 x i8>, <32 x i8> *%a2, align 32
   %3 = sub <32 x i8> %1, %2
@@ -452,7 +452,7 @@ define <8 x i32> @test_psubd(<8 x i32> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sub <8 x i32> %a0, %a1
   %2 = load <8 x i32>, <8 x i32> *%a2, align 32
   %3 = sub <8 x i32> %1, %2
@@ -482,7 +482,7 @@ define <4 x i64> @test_psubq(<4 x i64> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sub <4 x i64> %a0, %a1
   %2 = load <4 x i64>, <4 x i64> *%a2, align 32
   %3 = sub <4 x i64> %1, %2
@@ -512,7 +512,7 @@ define <16 x i16> @test_psubw(<16 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sub <16 x i16> %a0, %a1
   %2 = load <16 x i16>, <16 x i16> *%a2, align 32
   %3 = sub <16 x i16> %1, %2
@@ -546,7 +546,7 @@ define <4 x i64> @test_pxor(<4 x i64> %a
 ; ZNVER1-NEXT:    vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpxor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = xor <4 x i64> %a0, %a1
   %2 = load <4 x i64>, <4 x i64> *%a2, align 32
   %3 = xor <4 x i64> %1, %2

Modified: llvm/trunk/test/CodeGen/X86/bmi-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bmi-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bmi-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bmi-schedule.ll Thu Aug 31 05:38:35 2017
@@ -41,7 +41,7 @@ define i16 @test_andn_i16(i16 zeroext %a
 ; ZNVER1-NEXT:    andw (%rdx), %di # sched: [5:0.50]
 ; ZNVER1-NEXT:    addl %edi, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i16, i16 *%a2
   %2 = xor i16 %a0, -1
   %3 = and i16 %2, %a1
@@ -77,7 +77,7 @@ define i32 @test_andn_i32(i32 %a0, i32 %
 ; ZNVER1-NEXT:    andnl (%rdx), %edi, %eax # sched: [5:0.50]
 ; ZNVER1-NEXT:    andnl %esi, %edi, %ecx # sched: [1:0.25]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a2
   %2 = xor i32 %a0, -1
   %3 = and i32 %2, %a1
@@ -113,7 +113,7 @@ define i64 @test_andn_i64(i64 %a0, i64 %
 ; ZNVER1-NEXT:    andnq (%rdx), %rdi, %rax # sched: [5:0.50]
 ; ZNVER1-NEXT:    andnq %rsi, %rdi, %rcx # sched: [1:0.25]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a2
   %2 = xor i64 %a0, -1
   %3 = and i64 %2, %a1
@@ -146,10 +146,10 @@ define i32 @test_bextr_i32(i32 %a0, i32
 ;
 ; ZNVER1-LABEL: test_bextr_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    bextrl %edi, (%rdx), %ecx
-; ZNVER1-NEXT:    bextrl %edi, %esi, %eax
+; ZNVER1-NEXT:    bextrl %edi, (%rdx), %ecx # sched: [5:0.50]
+; ZNVER1-NEXT:    bextrl %edi, %esi, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a2
   %2 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %1, i32 %a0)
   %3 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %a1, i32 %a0)
@@ -182,10 +182,10 @@ define i64 @test_bextr_i64(i64 %a0, i64
 ;
 ; ZNVER1-LABEL: test_bextr_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    bextrq %rdi, (%rdx), %rcx
-; ZNVER1-NEXT:    bextrq %rdi, %rsi, %rax
+; ZNVER1-NEXT:    bextrq %rdi, (%rdx), %rcx # sched: [5:0.50]
+; ZNVER1-NEXT:    bextrq %rdi, %rsi, %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a2
   %2 = tail call i64 @llvm.x86.bmi.bextr.64(i64 %1, i64 %a0)
   %3 = tail call i64 @llvm.x86.bmi.bextr.64(i64 %a1, i64 %a0)
@@ -218,10 +218,10 @@ define i32 @test_blsi_i32(i32 %a0, i32 *
 ;
 ; ZNVER1-LABEL: test_blsi_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    blsil (%rsi), %ecx
-; ZNVER1-NEXT:    blsil %edi, %eax
+; ZNVER1-NEXT:    blsil (%rsi), %ecx # sched: [6:0.50]
+; ZNVER1-NEXT:    blsil %edi, %eax # sched: [2:0.25]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a1
   %2 = sub i32 0, %1
   %3 = sub i32 0, %a0
@@ -255,10 +255,10 @@ define i64 @test_blsi_i64(i64 %a0, i64 *
 ;
 ; ZNVER1-LABEL: test_blsi_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    blsiq (%rsi), %rcx
-; ZNVER1-NEXT:    blsiq %rdi, %rax
+; ZNVER1-NEXT:    blsiq (%rsi), %rcx # sched: [6:0.50]
+; ZNVER1-NEXT:    blsiq %rdi, %rax # sched: [2:0.25]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a1
   %2 = sub i64 0, %1
   %3 = sub i64 0, %a0
@@ -292,10 +292,10 @@ define i32 @test_blsmsk_i32(i32 %a0, i32
 ;
 ; ZNVER1-LABEL: test_blsmsk_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    blsmskl (%rsi), %ecx
-; ZNVER1-NEXT:    blsmskl %edi, %eax
+; ZNVER1-NEXT:    blsmskl (%rsi), %ecx # sched: [6:0.50]
+; ZNVER1-NEXT:    blsmskl %edi, %eax # sched: [2:0.25]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a1
   %2 = sub i32 %1, 1
   %3 = sub i32 %a0, 1
@@ -329,10 +329,10 @@ define i64 @test_blsmsk_i64(i64 %a0, i64
 ;
 ; ZNVER1-LABEL: test_blsmsk_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    blsmskq (%rsi), %rcx
-; ZNVER1-NEXT:    blsmskq %rdi, %rax
+; ZNVER1-NEXT:    blsmskq (%rsi), %rcx # sched: [6:0.50]
+; ZNVER1-NEXT:    blsmskq %rdi, %rax # sched: [2:0.25]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a1
   %2 = sub i64 %1, 1
   %3 = sub i64 %a0, 1
@@ -366,10 +366,10 @@ define i32 @test_blsr_i32(i32 %a0, i32 *
 ;
 ; ZNVER1-LABEL: test_blsr_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    blsrl (%rsi), %ecx
-; ZNVER1-NEXT:    blsrl %edi, %eax
+; ZNVER1-NEXT:    blsrl (%rsi), %ecx # sched: [6:0.50]
+; ZNVER1-NEXT:    blsrl %edi, %eax # sched: [2:0.25]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a1
   %2 = sub i32 %1, 1
   %3 = sub i32 %a0, 1
@@ -403,10 +403,10 @@ define i64 @test_blsr_i64(i64 %a0, i64 *
 ;
 ; ZNVER1-LABEL: test_blsr_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    blsrq (%rsi), %rcx
-; ZNVER1-NEXT:    blsrq %rdi, %rax
+; ZNVER1-NEXT:    blsrq (%rsi), %rcx # sched: [6:0.50]
+; ZNVER1-NEXT:    blsrq %rdi, %rax # sched: [2:0.25]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a1
   %2 = sub i64 %1, 1
   %3 = sub i64 %a0, 1
@@ -443,11 +443,11 @@ define i16 @test_cttz_i16(i16 zeroext %a
 ;
 ; ZNVER1-LABEL: test_cttz_i16:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    tzcntw (%rsi), %cx
-; ZNVER1-NEXT:    tzcntw %di, %ax
+; ZNVER1-NEXT:    tzcntw (%rsi), %cx # sched: [6:0.50]
+; ZNVER1-NEXT:    tzcntw %di, %ax # sched: [2:0.25]
 ; ZNVER1-NEXT:    orl %ecx, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i16, i16 *%a1
   %2 = tail call i16 @llvm.cttz.i16( i16 %1, i1 false )
   %3 = tail call i16 @llvm.cttz.i16( i16 %a0, i1 false )
@@ -480,10 +480,10 @@ define i32 @test_cttz_i32(i32 %a0, i32 *
 ;
 ; ZNVER1-LABEL: test_cttz_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    tzcntl (%rsi), %ecx
-; ZNVER1-NEXT:    tzcntl %edi, %eax
+; ZNVER1-NEXT:    tzcntl (%rsi), %ecx # sched: [6:0.50]
+; ZNVER1-NEXT:    tzcntl %edi, %eax # sched: [2:0.25]
 ; ZNVER1-NEXT:    orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a1
   %2 = tail call i32 @llvm.cttz.i32( i32 %1, i1 false )
   %3 = tail call i32 @llvm.cttz.i32( i32 %a0, i1 false )
@@ -516,10 +516,10 @@ define i64 @test_cttz_i64(i64 %a0, i64 *
 ;
 ; ZNVER1-LABEL: test_cttz_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    tzcntq (%rsi), %rcx
-; ZNVER1-NEXT:    tzcntq %rdi, %rax
+; ZNVER1-NEXT:    tzcntq (%rsi), %rcx # sched: [6:0.50]
+; ZNVER1-NEXT:    tzcntq %rdi, %rax # sched: [2:0.25]
 ; ZNVER1-NEXT:    orq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a1
   %2 = tail call i64 @llvm.cttz.i64( i64 %1, i1 false )
   %3 = tail call i64 @llvm.cttz.i64( i64 %a0, i1 false )

Modified: llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll Thu Aug 31 05:38:35 2017
@@ -22,10 +22,10 @@ define i32 @test_bzhi_i32(i32 %a0, i32 %
 ;
 ; ZNVER1-LABEL: test_bzhi_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    bzhil %edi, (%rdx), %ecx
-; ZNVER1-NEXT:    bzhil %edi, %esi, %eax
+; ZNVER1-NEXT:    bzhil %edi, (%rdx), %ecx # sched: [5:0.50]
+; ZNVER1-NEXT:    bzhil %edi, %esi, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a2
   %2 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %1, i32 %a0)
   %3 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %a1, i32 %a0)
@@ -51,10 +51,10 @@ define i64 @test_bzhi_i64(i64 %a0, i64 %
 ;
 ; ZNVER1-LABEL: test_bzhi_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    bzhiq %rdi, (%rdx), %rcx
-; ZNVER1-NEXT:    bzhiq %rdi, %rsi, %rax
+; ZNVER1-NEXT:    bzhiq %rdi, (%rdx), %rcx # sched: [5:0.50]
+; ZNVER1-NEXT:    bzhiq %rdi, %rsi, %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a2
   %2 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %1, i64 %a0)
   %3 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %a1, i64 %a0)
@@ -88,10 +88,10 @@ define i64 @test_mulx_i64(i64 %a0, i64 %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    movq %rdx, %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    movq %rdi, %rdx # sched: [1:0.25]
-; ZNVER1-NEXT:    mulxq %rsi, %rsi, %rcx # sched: [4:2.00]
-; ZNVER1-NEXT:    mulxq (%rax), %rdx, %rax # sched: [8:2.00]
+; ZNVER1-NEXT:    mulxq %rsi, %rsi, %rcx # sched: [3:1.00]
+; ZNVER1-NEXT:    mulxq (%rax), %rdx, %rax # sched: [8:1.00]
 ; ZNVER1-NEXT:    orq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1  = load i64, i64 *%a2
   %2  = zext i64 %a0 to i128
   %3  = zext i64 %a1 to i128
@@ -123,10 +123,10 @@ define i32 @test_pdep_i32(i32 %a0, i32 %
 ;
 ; ZNVER1-LABEL: test_pdep_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    pdepl (%rdx), %edi, %ecx
-; ZNVER1-NEXT:    pdepl %esi, %edi, %eax
+; ZNVER1-NEXT:    pdepl (%rdx), %edi, %ecx # sched: [100:?]
+; ZNVER1-NEXT:    pdepl %esi, %edi, %eax # sched: [100:?]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a2
   %2 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %1)
   %3 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %a1)
@@ -152,10 +152,10 @@ define i64 @test_pdep_i64(i64 %a0, i64 %
 ;
 ; ZNVER1-LABEL: test_pdep_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    pdepq (%rdx), %rdi, %rcx
-; ZNVER1-NEXT:    pdepq %rsi, %rdi, %rax
+; ZNVER1-NEXT:    pdepq (%rdx), %rdi, %rcx # sched: [100:?]
+; ZNVER1-NEXT:    pdepq %rsi, %rdi, %rax # sched: [100:?]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a2
   %2 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %1)
   %3 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %a1)
@@ -181,10 +181,10 @@ define i32 @test_pext_i32(i32 %a0, i32 %
 ;
 ; ZNVER1-LABEL: test_pext_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    pextl (%rdx), %edi, %ecx
-; ZNVER1-NEXT:    pextl %esi, %edi, %eax
+; ZNVER1-NEXT:    pextl (%rdx), %edi, %ecx # sched: [100:?]
+; ZNVER1-NEXT:    pextl %esi, %edi, %eax # sched: [100:?]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a2
   %2 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %1)
   %3 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %a1)
@@ -210,10 +210,10 @@ define i64 @test_pext_i64(i64 %a0, i64 %
 ;
 ; ZNVER1-LABEL: test_pext_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    pextq (%rdx), %rdi, %rcx
-; ZNVER1-NEXT:    pextq %rsi, %rdi, %rax
+; ZNVER1-NEXT:    pextq (%rdx), %rdi, %rcx # sched: [100:?]
+; ZNVER1-NEXT:    pextq %rsi, %rdi, %rax # sched: [100:?]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a2
   %2 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %1)
   %3 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %a1)
@@ -242,7 +242,7 @@ define i32 @test_rorx_i32(i32 %a0, i32 %
 ; ZNVER1-NEXT:    rorxl $5, (%rdx), %eax # sched: [5:0.50]
 ; ZNVER1-NEXT:    rorxl $5, %edi, %ecx # sched: [1:0.25]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a2
   %2 = lshr i32 %a0, 5
   %3 = shl i32 %a0, 27
@@ -274,7 +274,7 @@ define i64 @test_rorx_i64(i64 %a0, i64 %
 ; ZNVER1-NEXT:    rorxq $5, (%rdx), %rax # sched: [5:0.50]
 ; ZNVER1-NEXT:    rorxq $5, %rdi, %rcx # sched: [1:0.25]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a2
   %2 = lshr i64 %a0, 5
   %3 = shl i64 %a0, 59
@@ -306,7 +306,7 @@ define i32 @test_sarx_i32(i32 %a0, i32 %
 ; ZNVER1-NEXT:    sarxl %esi, (%rdx), %eax # sched: [5:0.50]
 ; ZNVER1-NEXT:    sarxl %esi, %edi, %ecx # sched: [1:0.25]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a2
   %2 = ashr i32 %a0, %a1
   %3 = ashr i32 %1, %a1
@@ -334,7 +334,7 @@ define i64 @test_sarx_i64(i64 %a0, i64 %
 ; ZNVER1-NEXT:    sarxq %rsi, (%rdx), %rax # sched: [5:0.50]
 ; ZNVER1-NEXT:    sarxq %rsi, %rdi, %rcx # sched: [1:0.25]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a2
   %2 = ashr i64 %a0, %a1
   %3 = ashr i64 %1, %a1
@@ -362,7 +362,7 @@ define i32 @test_shlx_i32(i32 %a0, i32 %
 ; ZNVER1-NEXT:    shlxl %esi, (%rdx), %eax # sched: [5:0.50]
 ; ZNVER1-NEXT:    shlxl %esi, %edi, %ecx # sched: [1:0.25]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a2
   %2 = shl i32 %a0, %a1
   %3 = shl i32 %1, %a1
@@ -390,7 +390,7 @@ define i64 @test_shlx_i64(i64 %a0, i64 %
 ; ZNVER1-NEXT:    shlxq %rsi, (%rdx), %rax # sched: [5:0.50]
 ; ZNVER1-NEXT:    shlxq %rsi, %rdi, %rcx # sched: [1:0.25]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a2
   %2 = shl i64 %a0, %a1
   %3 = shl i64 %1, %a1
@@ -418,7 +418,7 @@ define i32 @test_shrx_i32(i32 %a0, i32 %
 ; ZNVER1-NEXT:    shrxl %esi, (%rdx), %eax # sched: [5:0.50]
 ; ZNVER1-NEXT:    shrxl %esi, %edi, %ecx # sched: [1:0.25]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a2
   %2 = lshr i32 %a0, %a1
   %3 = lshr i32 %1, %a1
@@ -446,7 +446,7 @@ define i64 @test_shrx_i64(i64 %a0, i64 %
 ; ZNVER1-NEXT:    shrxq %rsi, (%rdx), %rax # sched: [5:0.50]
 ; ZNVER1-NEXT:    shrxq %rsi, %rdi, %rcx # sched: [1:0.25]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a2
   %2 = lshr i64 %a0, %a1
   %3 = lshr i64 %1, %a1

Modified: llvm/trunk/test/CodeGen/X86/f16c-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/f16c-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/f16c-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/f16c-schedule.ll Thu Aug 31 05:38:35 2017
@@ -44,10 +44,10 @@ define <4 x float> @test_vcvtph2ps_128(<
 ;
 ; ZNVER1-LABEL: test_vcvtph2ps_128:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vcvtph2ps (%rdi), %xmm1 # sched: [12:1.00]
-; ZNVER1-NEXT:    vcvtph2ps %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT:    vcvtph2ps (%rdi), %xmm1 # sched: [100:?]
+; ZNVER1-NEXT:    vcvtph2ps %xmm0, %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <8 x i16>, <8 x i16> *%a1
   %2 = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %1)
   %3 = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0)
@@ -94,10 +94,10 @@ define <8 x float> @test_vcvtph2ps_256(<
 ;
 ; ZNVER1-LABEL: test_vcvtph2ps_256:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vcvtph2ps (%rdi), %ymm1 # sched: [12:1.00]
-; ZNVER1-NEXT:    vcvtph2ps %xmm0, %ymm0 # sched: [5:1.00]
+; ZNVER1-NEXT:    vcvtph2ps (%rdi), %ymm1 # sched: [100:?]
+; ZNVER1-NEXT:    vcvtph2ps %xmm0, %ymm0 # sched: [100:?]
 ; ZNVER1-NEXT:    vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <8 x i16>, <8 x i16> *%a1
   %2 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %1)
   %3 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0)
@@ -139,9 +139,9 @@ define <8 x i16> @test_vcvtps2ph_128(<4
 ;
 ; ZNVER1-LABEL: test_vcvtps2ph_128:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vcvtps2ph $0, %xmm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT:    vcvtps2ph $0, %xmm1, (%rdi) # sched: [12:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vcvtps2ph $0, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vcvtps2ph $0, %xmm1, (%rdi) # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0)
   %2 = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a1, i32 0)
   %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -187,10 +187,10 @@ define <8 x i16> @test_vcvtps2ph_256(<8
 ;
 ; ZNVER1-LABEL: test_vcvtps2ph_256:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT:    vcvtps2ph $0, %ymm1, (%rdi) # sched: [12:1.00]
-; ZNVER1-NEXT:    vzeroupper
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vcvtps2ph $0, %ymm1, (%rdi) # sched: [100:?]
+; ZNVER1-NEXT:    vzeroupper # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0)
   %2 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a1, i32 0)
   store <8 x i16> %2, <8 x i16> *%a2

Modified: llvm/trunk/test/CodeGen/X86/lea32-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lea32-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lea32-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lea32-schedule.ll Thu Aug 31 05:38:35 2017
@@ -57,7 +57,7 @@ define i32 @test_lea_offset(i32) {
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; ZNVER1-NEXT:    leal -24(%rdi), %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = add nsw i32 %0, -24
   ret i32 %2
 }
@@ -109,7 +109,7 @@ define i32 @test_lea_offset_big(i32) {
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; ZNVER1-NEXT:    leal 1024(%rdi), %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = add nsw i32 %0, 1024
   ret i32 %2
 }
@@ -169,7 +169,7 @@ define i32 @test_lea_add(i32, i32) {
 ; ZNVER1-NEXT:    # kill: %ESI<def> %ESI<kill> %RSI<def>
 ; ZNVER1-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; ZNVER1-NEXT:    leal (%rdi,%rsi), %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = add nsw i32 %1, %0
   ret i32 %3
 }
@@ -231,7 +231,7 @@ define i32 @test_lea_add_offset(i32, i32
 ; ZNVER1-NEXT:    # kill: %ESI<def> %ESI<kill> %RSI<def>
 ; ZNVER1-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; ZNVER1-NEXT:    leal 16(%rdi,%rsi), %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = add i32 %0, 16
   %4 = add i32 %3, %1
   ret i32 %4
@@ -297,7 +297,7 @@ define i32 @test_lea_add_offset_big(i32,
 ; ZNVER1-NEXT:    # kill: %ESI<def> %ESI<kill> %RSI<def>
 ; ZNVER1-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; ZNVER1-NEXT:    leal -4096(%rdi,%rsi), %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = add i32 %0, -4096
   %4 = add i32 %3, %1
   ret i32 %4
@@ -350,7 +350,7 @@ define i32 @test_lea_mul(i32) {
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; ZNVER1-NEXT:    leal (%rdi,%rdi,2), %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = mul nsw i32 %0, 3
   ret i32 %2
 }
@@ -405,7 +405,7 @@ define i32 @test_lea_mul_offset(i32) {
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; ZNVER1-NEXT:    leal -32(%rdi,%rdi,2), %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = mul nsw i32 %0, 3
   %3 = add nsw i32 %2, -32
   ret i32 %3
@@ -464,7 +464,7 @@ define i32 @test_lea_mul_offset_big(i32)
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; ZNVER1-NEXT:    leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = mul nsw i32 %0, 9
   %3 = add nsw i32 %2, 10000
   ret i32 %3
@@ -524,7 +524,7 @@ define i32 @test_lea_add_scale(i32, i32)
 ; ZNVER1-NEXT:    # kill: %ESI<def> %ESI<kill> %RSI<def>
 ; ZNVER1-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; ZNVER1-NEXT:    leal (%rdi,%rsi,2), %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = shl i32 %1, 1
   %4 = add nsw i32 %3, %0
   ret i32 %4
@@ -587,7 +587,7 @@ define i32 @test_lea_add_scale_offset(i3
 ; ZNVER1-NEXT:    # kill: %ESI<def> %ESI<kill> %RSI<def>
 ; ZNVER1-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; ZNVER1-NEXT:    leal 96(%rdi,%rsi,4), %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = shl i32 %1, 2
   %4 = add i32 %0, 96
   %5 = add i32 %4, %3
@@ -654,7 +654,7 @@ define i32 @test_lea_add_scale_offset_bi
 ; ZNVER1-NEXT:    # kill: %ESI<def> %ESI<kill> %RSI<def>
 ; ZNVER1-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; ZNVER1-NEXT:    leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = shl i32 %1, 3
   %4 = add i32 %0, -1200
   %5 = add i32 %4, %3

Modified: llvm/trunk/test/CodeGen/X86/lea64-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lea64-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lea64-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lea64-schedule.ll Thu Aug 31 05:38:35 2017
@@ -50,7 +50,7 @@ define i64 @test_lea_offset(i64) {
 ; ZNVER1-LABEL: test_lea_offset:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    leaq -24(%rdi), %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = add nsw i64 %0, -24
   ret i64 %2
 }
@@ -95,7 +95,7 @@ define i64 @test_lea_offset_big(i64) {
 ; ZNVER1-LABEL: test_lea_offset_big:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    leaq 1024(%rdi), %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = add nsw i64 %0, 1024
   ret i64 %2
 }
@@ -141,7 +141,7 @@ define i64 @test_lea_add(i64, i64) {
 ; ZNVER1-LABEL: test_lea_add:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    leaq (%rdi,%rsi), %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = add nsw i64 %1, %0
   ret i64 %3
 }
@@ -189,7 +189,7 @@ define i64 @test_lea_add_offset(i64, i64
 ; ZNVER1-LABEL: test_lea_add_offset:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    leaq 16(%rdi,%rsi), %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = add i64 %0, 16
   %4 = add i64 %3, %1
   ret i64 %4
@@ -241,7 +241,7 @@ define i64 @test_lea_add_offset_big(i64,
 ; ZNVER1-LABEL: test_lea_add_offset_big:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    leaq -4096(%rdi,%rsi), %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = add i64 %0, -4096
   %4 = add i64 %3, %1
   ret i64 %4
@@ -287,7 +287,7 @@ define i64 @test_lea_mul(i64) {
 ; ZNVER1-LABEL: test_lea_mul:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    leaq (%rdi,%rdi,2), %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = mul nsw i64 %0, 3
   ret i64 %2
 }
@@ -335,7 +335,7 @@ define i64 @test_lea_mul_offset(i64) {
 ; ZNVER1-LABEL: test_lea_mul_offset:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    leaq -32(%rdi,%rdi,2), %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = mul nsw i64 %0, 3
   %3 = add nsw i64 %2, -32
   ret i64 %3
@@ -387,7 +387,7 @@ define i64 @test_lea_mul_offset_big(i64)
 ; ZNVER1-LABEL: test_lea_mul_offset_big:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    leaq 10000(%rdi,%rdi,8), %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %2 = mul nsw i64 %0, 9
   %3 = add nsw i64 %2, 10000
   ret i64 %3
@@ -433,7 +433,7 @@ define i64 @test_lea_add_scale(i64, i64)
 ; ZNVER1-LABEL: test_lea_add_scale:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    leaq (%rdi,%rsi,2), %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = shl i64 %1, 1
   %4 = add nsw i64 %3, %0
   ret i64 %4
@@ -482,7 +482,7 @@ define i64 @test_lea_add_scale_offset(i6
 ; ZNVER1-LABEL: test_lea_add_scale_offset:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    leaq 96(%rdi,%rsi,4), %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = shl i64 %1, 2
   %4 = add i64 %0, 96
   %5 = add i64 %4, %3
@@ -535,7 +535,7 @@ define i64 @test_lea_add_scale_offset_bi
 ; ZNVER1-LABEL: test_lea_add_scale_offset_big:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    leaq -1200(%rdi,%rsi,8), %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %3 = shl i64 %1, 3
   %4 = add i64 %0, -1200
   %5 = add i64 %4, %3

Modified: llvm/trunk/test/CodeGen/X86/lzcnt-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lzcnt-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lzcnt-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lzcnt-schedule.ll Thu Aug 31 05:38:35 2017
@@ -33,11 +33,11 @@ define i16 @test_ctlz_i16(i16 zeroext %a
 ;
 ; ZNVER1-LABEL: test_ctlz_i16:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    lzcntw (%rsi), %cx
-; ZNVER1-NEXT:    lzcntw %di, %ax
+; ZNVER1-NEXT:    lzcntw (%rsi), %cx # sched: [6:0.50]
+; ZNVER1-NEXT:    lzcntw %di, %ax # sched: [2:0.25]
 ; ZNVER1-NEXT:    orl %ecx, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i16, i16 *%a1
   %2 = tail call i16 @llvm.ctlz.i16( i16 %1, i1 false )
   %3 = tail call i16 @llvm.ctlz.i16( i16 %a0, i1 false )
@@ -70,10 +70,10 @@ define i32 @test_ctlz_i32(i32 %a0, i32 *
 ;
 ; ZNVER1-LABEL: test_ctlz_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    lzcntl (%rsi), %ecx
-; ZNVER1-NEXT:    lzcntl %edi, %eax
+; ZNVER1-NEXT:    lzcntl (%rsi), %ecx # sched: [6:0.50]
+; ZNVER1-NEXT:    lzcntl %edi, %eax # sched: [2:0.25]
 ; ZNVER1-NEXT:    orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a1
   %2 = tail call i32 @llvm.ctlz.i32( i32 %1, i1 false )
   %3 = tail call i32 @llvm.ctlz.i32( i32 %a0, i1 false )
@@ -106,10 +106,10 @@ define i64 @test_ctlz_i64(i64 %a0, i64 *
 ;
 ; ZNVER1-LABEL: test_ctlz_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    lzcntq (%rsi), %rcx
-; ZNVER1-NEXT:    lzcntq %rdi, %rax
+; ZNVER1-NEXT:    lzcntq (%rsi), %rcx # sched: [6:0.50]
+; ZNVER1-NEXT:    lzcntq %rdi, %rax # sched: [2:0.25]
 ; ZNVER1-NEXT:    orq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a1
   %2 = tail call i64 @llvm.ctlz.i64( i64 %1, i1 false )
   %3 = tail call i64 @llvm.ctlz.i64( i64 %a0, i1 false )

Modified: llvm/trunk/test/CodeGen/X86/movbe-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/movbe-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/movbe-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/movbe-schedule.ll Thu Aug 31 05:38:35 2017
@@ -46,8 +46,8 @@ define i16 @test_ctlz_i16(i16 *%a0, i16
 ; ZNVER1-LABEL: test_ctlz_i16:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    movbew (%rdi), %ax # sched: [5:0.50]
-; ZNVER1-NEXT:    movbew %si, (%rdx) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    movbew %si, (%rdx) # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i16, i16 *%a0
   %2 = tail call i16 @llvm.bswap.i16( i16 %1 )
   %3 = tail call i16 @llvm.bswap.i16( i16 %a1 )
@@ -94,8 +94,8 @@ define i32 @test_ctlz_i32(i32 *%a0, i32
 ; ZNVER1-LABEL: test_ctlz_i32:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    movbel (%rdi), %eax # sched: [5:0.50]
-; ZNVER1-NEXT:    movbel %esi, (%rdx) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    movbel %esi, (%rdx) # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a0
   %2 = tail call i32 @llvm.bswap.i32( i32 %1 )
   %3 = tail call i32 @llvm.bswap.i32( i32 %a1 )
@@ -142,8 +142,8 @@ define i64 @test_ctlz_i64(i64 *%a0, i64
 ; ZNVER1-LABEL: test_ctlz_i64:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    movbeq (%rdi), %rax # sched: [5:0.50]
-; ZNVER1-NEXT:    movbeq %rsi, (%rdx) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    movbeq %rsi, (%rdx) # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a0
   %2 = tail call i64 @llvm.bswap.i64( i64 %1 )
   %3 = tail call i64 @llvm.bswap.i64( i64 %a1 )

Modified: llvm/trunk/test/CodeGen/X86/popcnt-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/popcnt-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/popcnt-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/popcnt-schedule.ll Thu Aug 31 05:38:35 2017
@@ -57,7 +57,7 @@ define i16 @test_ctpop_i16(i16 zeroext %
 ; ZNVER1-NEXT:    popcntw %di, %ax # sched: [3:1.00]
 ; ZNVER1-NEXT:    orl %ecx, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i16, i16 *%a1
   %2 = tail call i16 @llvm.ctpop.i16( i16 %1 )
   %3 = tail call i16 @llvm.ctpop.i16( i16 %a0 )
@@ -107,7 +107,7 @@ define i32 @test_ctpop_i32(i32 %a0, i32
 ; ZNVER1-NEXT:    popcntl (%rsi), %ecx # sched: [10:1.00]
 ; ZNVER1-NEXT:    popcntl %edi, %eax # sched: [3:1.00]
 ; ZNVER1-NEXT:    orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i32, i32 *%a1
   %2 = tail call i32 @llvm.ctpop.i32( i32 %1 )
   %3 = tail call i32 @llvm.ctpop.i32( i32 %a0 )
@@ -157,7 +157,7 @@ define i64 @test_ctpop_i64(i64 %a0, i64
 ; ZNVER1-NEXT:    popcntq (%rsi), %rcx # sched: [10:1.00]
 ; ZNVER1-NEXT:    popcntq %rdi, %rax # sched: [3:1.00]
 ; ZNVER1-NEXT:    orq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64 *%a1
   %2 = tail call i64 @llvm.ctpop.i64( i64 %1 )
   %3 = tail call i64 @llvm.ctpop.i64( i64 %a0 )

Modified: llvm/trunk/test/CodeGen/X86/sha-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sha-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sha-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sha-schedule.ll Thu Aug 31 05:38:35 2017
@@ -29,9 +29,9 @@ define <4 x i32> @test_sha1msg1(<4 x i32
 ;
 ; ZNVER1-LABEL: test_sha1msg1:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    sha1msg1 %xmm1, %xmm0
-; ZNVER1-NEXT:    sha1msg1 (%rdi), %xmm0
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    sha1msg1 %xmm1, %xmm0 # sched: [2:1.00]
+; ZNVER1-NEXT:    sha1msg1 (%rdi), %xmm0 # sched: [9:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <4 x i32>, <4 x i32>* %a2
   %2 = tail call <4 x i32> @llvm.x86.sha1msg1(<4 x i32> %a0, <4 x i32> %a1)
   %3 = tail call <4 x i32> @llvm.x86.sha1msg1(<4 x i32> %2, <4 x i32> %1)
@@ -60,9 +60,9 @@ define <4 x i32> @test_sha1msg2(<4 x i32
 ;
 ; ZNVER1-LABEL: test_sha1msg2:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    sha1msg2 %xmm1, %xmm0
-; ZNVER1-NEXT:    sha1msg2 (%rdi), %xmm0
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    sha1msg2 %xmm1, %xmm0 # sched: [1:0.50]
+; ZNVER1-NEXT:    sha1msg2 (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <4 x i32>, <4 x i32>* %a2
   %2 = tail call <4 x i32> @llvm.x86.sha1msg2(<4 x i32> %a0, <4 x i32> %a1)
   %3 = tail call <4 x i32> @llvm.x86.sha1msg2(<4 x i32> %2, <4 x i32> %1)
@@ -91,9 +91,9 @@ define <4 x i32> @test_sha1nexte(<4 x i3
 ;
 ; ZNVER1-LABEL: test_sha1nexte:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    sha1nexte %xmm1, %xmm0
-; ZNVER1-NEXT:    sha1nexte (%rdi), %xmm0
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    sha1nexte %xmm1, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT:    sha1nexte (%rdi), %xmm0 # sched: [8:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <4 x i32>, <4 x i32>* %a2
   %2 = tail call <4 x i32> @llvm.x86.sha1nexte(<4 x i32> %a0, <4 x i32> %a1)
   %3 = tail call <4 x i32> @llvm.x86.sha1nexte(<4 x i32> %2, <4 x i32> %1)
@@ -122,9 +122,9 @@ define <4 x i32> @test_sha1rnds4(<4 x i3
 ;
 ; ZNVER1-LABEL: test_sha1rnds4:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    sha1rnds4 $3, %xmm1, %xmm0
-; ZNVER1-NEXT:    sha1rnds4 $3, (%rdi), %xmm0
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    sha1rnds4 $3, %xmm1, %xmm0 # sched: [6:1.00]
+; ZNVER1-NEXT:    sha1rnds4 $3, (%rdi), %xmm0 # sched: [13:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <4 x i32>, <4 x i32>* %a2
   %2 = tail call <4 x i32> @llvm.x86.sha1rnds4(<4 x i32> %a0, <4 x i32> %a1, i8 3)
   %3 = tail call <4 x i32> @llvm.x86.sha1rnds4(<4 x i32> %2, <4 x i32> %1, i8 3)
@@ -157,9 +157,9 @@ define <4 x i32> @test_sha256msg1(<4 x i
 ;
 ; ZNVER1-LABEL: test_sha256msg1:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    sha256msg1 %xmm1, %xmm0
-; ZNVER1-NEXT:    sha256msg1 (%rdi), %xmm0
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    sha256msg1 %xmm1, %xmm0 # sched: [2:1.00]
+; ZNVER1-NEXT:    sha256msg1 (%rdi), %xmm0 # sched: [9:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <4 x i32>, <4 x i32>* %a2
   %2 = tail call <4 x i32> @llvm.x86.sha256msg1(<4 x i32> %a0, <4 x i32> %a1)
   %3 = tail call <4 x i32> @llvm.x86.sha256msg1(<4 x i32> %2, <4 x i32> %1)
@@ -188,9 +188,9 @@ define <4 x i32> @test_sha256msg2(<4 x i
 ;
 ; ZNVER1-LABEL: test_sha256msg2:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    sha256msg2 %xmm1, %xmm0
-; ZNVER1-NEXT:    sha256msg2 (%rdi), %xmm0
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    sha256msg2 %xmm1, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    sha256msg2 (%rdi), %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <4 x i32>, <4 x i32>* %a2
   %2 = tail call <4 x i32> @llvm.x86.sha256msg2(<4 x i32> %a0, <4 x i32> %a1)
   %3 = tail call <4 x i32> @llvm.x86.sha256msg2(<4 x i32> %2, <4 x i32> %1)
@@ -230,10 +230,10 @@ define <4 x i32> @test_sha256rnds2(<4 x
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmovaps %xmm0, %xmm3 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vmovaps %xmm2, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT:    sha256rnds2 %xmm0, %xmm1, %xmm3
-; ZNVER1-NEXT:    sha256rnds2 %xmm0, (%rdi), %xmm3
+; ZNVER1-NEXT:    sha256rnds2 %xmm0, %xmm1, %xmm3 # sched: [4:1.00]
+; ZNVER1-NEXT:    sha256rnds2 %xmm0, (%rdi), %xmm3 # sched: [11:1.00]
 ; ZNVER1-NEXT:    vmovaps %xmm3, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <4 x i32>, <4 x i32>* %a3
   %2 = tail call <4 x i32> @llvm.x86.sha256rnds2(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2)
   %3 = tail call <4 x i32> @llvm.x86.sha256rnds2(<4 x i32> %2, <4 x i32> %1, <4 x i32> %a2)

Modified: llvm/trunk/test/CodeGen/X86/sse-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-schedule.ll Thu Aug 31 05:38:35 2017
@@ -56,7 +56,7 @@ define <4 x float> @test_addps(<4 x floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fadd <4 x float> %a0, %a1
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = fadd <4 x float> %1, %2
@@ -110,7 +110,7 @@ define float @test_addss(float %a0, floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fadd float %a0, %a1
   %2 = load float, float *%a2, align 4
   %3 = fadd float %1, %2
@@ -168,7 +168,7 @@ define <4 x float> @test_andps(<4 x floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vandps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <4 x float> %a0 to <4 x i32>
   %2 = bitcast <4 x float> %a1 to <4 x i32>
   %3 = and <4 x i32> %1, %2
@@ -230,7 +230,7 @@ define <4 x float> @test_andnotps(<4 x f
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vandnps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <4 x float> %a0 to <4 x i32>
   %2 = bitcast <4 x float> %a1 to <4 x i32>
   %3 = xor <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -298,7 +298,7 @@ define <4 x float> @test_cmpps(<4 x floa
 ; ZNVER1-NEXT:    vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fcmp oeq <4 x float> %a0, %a1
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = fcmp oeq <4 x float> %a0, %2
@@ -355,7 +355,7 @@ define float @test_cmpss(float %a0, floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = insertelement <4 x float> undef, float %a0, i32 0
   %2 = insertelement <4 x float> undef, float %a1, i32 0
   %3 = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %1, <4 x float> %2, i8 0)
@@ -478,7 +478,7 @@ define i32 @test_comiss(<4 x float> %a0,
 ; ZNVER1-NEXT:    andb %al, %dl # sched: [1:0.25]
 ; ZNVER1-NEXT:    orb %cl, %dl # sched: [1:0.25]
 ; ZNVER1-NEXT:    movzbl %dl, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1)
   %2 = load <4 x float>, <4 x float> *%a2, align 4
   %3 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %2)
@@ -542,7 +542,7 @@ define float @test_cvtsi2ss(i32 %a0, i32
 ; ZNVER1-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sitofp i32 %a0 to float
   %2 = load i32, i32 *%a1, align 4
   %3 = sitofp i32 %2 to float
@@ -605,7 +605,7 @@ define float @test_cvtsi2ssq(i64 %a0, i6
 ; ZNVER1-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sitofp i64 %a0 to float
   %2 = load i64, i64 *%a1, align 8
   %3 = sitofp i64 %2 to float
@@ -668,7 +668,7 @@ define i32 @test_cvtss2si(float %a0, flo
 ; ZNVER1-NEXT:    vcvtss2si (%rdi), %eax # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtss2si %xmm0, %ecx # sched: [5:1.00]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = insertelement <4 x float> undef, float %a0, i32 0
   %2 = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %1)
   %3 = load float, float *%a1, align 4
@@ -734,7 +734,7 @@ define i64 @test_cvtss2siq(float %a0, fl
 ; ZNVER1-NEXT:    vcvtss2si (%rdi), %rax # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtss2si %xmm0, %rcx # sched: [5:1.00]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = insertelement <4 x float> undef, float %a0, i32 0
   %2 = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %1)
   %3 = load float, float *%a1, align 4
@@ -800,7 +800,7 @@ define i32 @test_cvttss2si(float %a0, fl
 ; ZNVER1-NEXT:    vcvttss2si (%rdi), %eax # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvttss2si %xmm0, %ecx # sched: [5:1.00]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fptosi float %a0 to i32
   %2 = load float, float *%a1, align 4
   %3 = fptosi float %2 to i32
@@ -863,7 +863,7 @@ define i64 @test_cvttss2siq(float %a0, f
 ; ZNVER1-NEXT:    vcvttss2si (%rdi), %rax # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvttss2si %xmm0, %rcx # sched: [5:1.00]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fptosi float %a0 to i64
   %2 = load float, float *%a1, align 4
   %3 = fptosi float %2 to i64
@@ -918,7 +918,7 @@ define <4 x float> @test_divps(<4 x floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vdivps %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
 ; ZNVER1-NEXT:    vdivps (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fdiv <4 x float> %a0, %a1
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = fdiv <4 x float> %1, %2
@@ -972,7 +972,7 @@ define float @test_divss(float %a0, floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vdivss %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
 ; ZNVER1-NEXT:    vdivss (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fdiv float %a0, %a1
   %2 = load float, float *%a2, align 4
   %3 = fdiv float %1, %2
@@ -1025,8 +1025,8 @@ define void @test_ldmxcsr(i32 %a0) {
 ; ZNVER1-LABEL: test_ldmxcsr:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:0.50]
-; ZNVER1-NEXT:    vldmxcsr -{{[0-9]+}}(%rsp) # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vldmxcsr -{{[0-9]+}}(%rsp) # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = alloca i32, align 4
   %2 = bitcast i32* %1 to i8*
   store i32 %a0, i32* %1
@@ -1082,7 +1082,7 @@ define <4 x float> @test_maxps(<4 x floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmaxps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %1, <4 x float> %2)
@@ -1137,7 +1137,7 @@ define <4 x float> @test_maxss(<4 x floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmaxss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1)
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %1, <4 x float> %2)
@@ -1192,7 +1192,7 @@ define <4 x float> @test_minps(<4 x floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vminps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %1, <4 x float> %2)
@@ -1247,7 +1247,7 @@ define <4 x float> @test_minss(<4 x floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vminss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1)
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %1, <4 x float> %2)
@@ -1310,7 +1310,7 @@ define void @test_movaps(<4 x float> *%a
 ; ZNVER1-NEXT:    vmovaps (%rdi), %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovaps %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <4 x float>, <4 x float> *%a0, align 16
   %2 = fadd <4 x float> %1, %1
   store <4 x float> %2, <4 x float> *%a1, align 16
@@ -1364,7 +1364,7 @@ define <4 x float> @test_movhlps(<4 x fl
 ; ZNVER1-LABEL: test_movhlps:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 6, i32 7, i32 2, i32 3>
   ret <4 x float> %1
 }
@@ -1428,7 +1428,7 @@ define void @test_movhps(<4 x float> %a0
 ; ZNVER1-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vpextrq $1, %xmm0, (%rdi) # sched: [8:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast x86_mmx* %a2 to <2 x float>*
   %2 = load <2 x float>, <2 x float> *%1, align 8
   %3 = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1488,7 +1488,7 @@ define <4 x float> @test_movlhps(<4 x fl
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
   %2 = fadd <4 x float> %a1, %1
   ret <4 x float> %2
@@ -1549,7 +1549,7 @@ define void @test_movlps(<4 x float> %a0
 ; ZNVER1-NEXT:    vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovlps %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast x86_mmx* %a2 to <2 x float>*
   %2 = load <2 x float>, <2 x float> *%1, align 8
   %3 = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1600,8 +1600,8 @@ define i32 @test_movmskps(<4 x float> %a
 ;
 ; ZNVER1-LABEL: test_movmskps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmovmskps %xmm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vmovmskps %xmm0, %eax # sched: [1:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0)
   ret i32 %1
 }
@@ -1652,7 +1652,7 @@ define void @test_movntps(<4 x float> %a
 ; ZNVER1-LABEL: test_movntps:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmovntps %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   store <4 x float> %a0, <4 x float> *%a1, align 16, !nontemporal !0
   ret void
 }
@@ -1712,7 +1712,7 @@ define void @test_movss_mem(float* %a0,
 ; ZNVER1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovss %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load float, float* %a0, align 1
   %2 = fadd float %1, %1
   store float %2, float *%a1, align 1
@@ -1764,7 +1764,7 @@ define <4 x float> @test_movss_reg(<4 x
 ; ZNVER1-LABEL: test_movss_reg:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
   ret <4 x float> %1
 }
@@ -1824,7 +1824,7 @@ define void @test_movups(<4 x float> *%a
 ; ZNVER1-NEXT:    vmovups (%rdi), %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovups %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <4 x float>, <4 x float> *%a0, align 1
   %2 = fadd <4 x float> %1, %1
   store <4 x float> %2, <4 x float> *%a1, align 1
@@ -1876,9 +1876,9 @@ define <4 x float> @test_mulps(<4 x floa
 ;
 ; ZNVER1-LABEL: test_mulps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT:    vmulps (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vmulps %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
+; ZNVER1-NEXT:    vmulps (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fmul <4 x float> %a0, %a1
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = fmul <4 x float> %1, %2
@@ -1930,9 +1930,9 @@ define float @test_mulss(float %a0, floa
 ;
 ; ZNVER1-LABEL: test_mulss:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT:    vmulss (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vmulss %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
+; ZNVER1-NEXT:    vmulss (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fmul float %a0, %a1
   %2 = load float, float *%a2, align 4
   %3 = fmul float %1, %2
@@ -1990,7 +1990,7 @@ define <4 x float> @test_orps(<4 x float
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <4 x float> %a0 to <4 x i32>
   %2 = bitcast <4 x float> %a1 to <4 x i32>
   %3 = or <4 x i32> %1, %2
@@ -2046,7 +2046,7 @@ define void @test_prefetchnta(i8* %a0) {
 ; ZNVER1-LABEL: test_prefetchnta:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    prefetchnta (%rdi) # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   call void @llvm.prefetch(i8* %a0, i32 0, i32 0, i32 1)
   ret void
 }
@@ -2109,7 +2109,7 @@ define <4 x float> @test_rcpps(<4 x floa
 ; ZNVER1-NEXT:    vrcpps (%rdi), %xmm1 # sched: [12:0.50]
 ; ZNVER1-NEXT:    vrcpps %xmm0, %xmm0 # sched: [5:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0)
   %2 = load <4 x float>, <4 x float> *%a1, align 16
   %3 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %2)
@@ -2183,7 +2183,7 @@ define <4 x float> @test_rcpss(float %a0
 ; ZNVER1-NEXT:    vrcpss %xmm0, %xmm0, %xmm0 # sched: [12:0.50]
 ; ZNVER1-NEXT:    vrcpss %xmm1, %xmm1, %xmm1 # sched: [12:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = insertelement <4 x float> undef, float %a0, i32 0
   %2 = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %1)
   %3 = load float, float *%a1, align 4
@@ -2251,7 +2251,7 @@ define <4 x float> @test_rsqrtps(<4 x fl
 ; ZNVER1-NEXT:    vrsqrtps (%rdi), %xmm1 # sched: [12:0.50]
 ; ZNVER1-NEXT:    vrsqrtps %xmm0, %xmm0 # sched: [5:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0)
   %2 = load <4 x float>, <4 x float> *%a1, align 16
   %3 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %2)
@@ -2322,10 +2322,10 @@ define <4 x float> @test_rsqrtss(float %
 ; ZNVER1-LABEL: test_rsqrtss:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50]
-; ZNVER1-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [12:0.50]
-; ZNVER1-NEXT:    vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [12:0.50]
+; ZNVER1-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [5:0.50]
+; ZNVER1-NEXT:    vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [5:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = insertelement <4 x float> undef, float %a0, i32 0
   %2 = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %1)
   %3 = load float, float *%a1, align 4
@@ -2381,7 +2381,7 @@ define void @test_sfence() {
 ; ZNVER1-LABEL: test_sfence:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    sfence # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   call void @llvm.x86.sse.sfence()
   ret void
 }
@@ -2438,7 +2438,7 @@ define <4 x float> @test_shufps(<4 x flo
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:0.50]
 ; ZNVER1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 0, i32 4, i32 4>
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 3, i32 4, i32 4>
@@ -2501,7 +2501,7 @@ define <4 x float> @test_sqrtps(<4 x flo
 ; ZNVER1-NEXT:    vsqrtps (%rdi), %xmm1 # sched: [27:1.00]
 ; ZNVER1-NEXT:    vsqrtps %xmm0, %xmm0 # sched: [20:1.00]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0)
   %2 = load <4 x float>, <4 x float> *%a1, align 16
   %3 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %2)
@@ -2575,7 +2575,7 @@ define <4 x float> @test_sqrtss(<4 x flo
 ; ZNVER1-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0 # sched: [27:1.00]
 ; ZNVER1-NEXT:    vsqrtss %xmm1, %xmm1, %xmm1 # sched: [27:1.00]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0)
   %2 = load <4 x float>, <4 x float> *%a1, align 16
   %3 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %2)
@@ -2629,9 +2629,9 @@ define i32 @test_stmxcsr() {
 ;
 ; ZNVER1-LABEL: test_stmxcsr:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:0.50]
+; ZNVER1-NEXT:    vstmxcsr -{{[0-9]+}}(%rsp) # sched: [100:?]
 ; ZNVER1-NEXT:    movl -{{[0-9]+}}(%rsp), %eax # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = alloca i32, align 4
   %2 = bitcast i32* %1 to i8*
   call void @llvm.x86.sse.stmxcsr(i8* %2)
@@ -2687,7 +2687,7 @@ define <4 x float> @test_subps(<4 x floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fsub <4 x float> %a0, %a1
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = fsub <4 x float> %1, %2
@@ -2741,7 +2741,7 @@ define float @test_subss(float %a0, floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vsubss (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fsub float %a0, %a1
   %2 = load float, float *%a2, align 4
   %3 = fsub float %1, %2
@@ -2859,7 +2859,7 @@ define i32 @test_ucomiss(<4 x float> %a0
 ; ZNVER1-NEXT:    andb %al, %dl # sched: [1:0.25]
 ; ZNVER1-NEXT:    orb %cl, %dl # sched: [1:0.25]
 ; ZNVER1-NEXT:    movzbl %dl, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1)
   %2 = load <4 x float>, <4 x float> *%a2, align 4
   %3 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %2)
@@ -2919,7 +2919,7 @@ define <4 x float> @test_unpckhps(<4 x f
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50]
 ; ZNVER1-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -2977,7 +2977,7 @@ define <4 x float> @test_unpcklps(<4 x f
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50]
 ; ZNVER1-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -3035,7 +3035,7 @@ define <4 x float> @test_xorps(<4 x floa
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vxorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <4 x float> %a0 to <4 x i32>
   %2 = bitcast <4 x float> %a1 to <4 x i32>
   %3 = xor <4 x i32> %1, %2

Modified: llvm/trunk/test/CodeGen/X86/sse2-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-schedule.ll Thu Aug 31 05:38:35 2017
@@ -56,7 +56,7 @@ define <2 x double> @test_addpd(<2 x dou
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fadd <2 x double> %a0, %a1
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = fadd <2 x double> %1, %2
@@ -110,7 +110,7 @@ define double @test_addsd(double %a0, do
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fadd double %a0, %a1
   %2 = load double, double *%a2, align 8
   %3 = fadd double %1, %2
@@ -172,7 +172,7 @@ define <2 x double> @test_andpd(<2 x dou
 ; ZNVER1-NEXT:    vandpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vandpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <2 x double> %a0 to <4 x i32>
   %2 = bitcast <2 x double> %a1 to <4 x i32>
   %3 = and <4 x i32> %1, %2
@@ -239,7 +239,7 @@ define <2 x double> @test_andnotpd(<2 x
 ; ZNVER1-NEXT:    vandnpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vandnpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <2 x double> %a0 to <4 x i32>
   %2 = bitcast <2 x double> %a1 to <4 x i32>
   %3 = xor <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -308,7 +308,7 @@ define <2 x double> @test_cmppd(<2 x dou
 ; ZNVER1-NEXT:    vcmpeqpd %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vcmpeqpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
 ; ZNVER1-NEXT:    vorpd %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fcmp oeq <2 x double> %a0, %a1
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = fcmp oeq <2 x double> %a0, %2
@@ -365,7 +365,7 @@ define double @test_cmpsd(double %a0, do
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vcmpeqsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vcmpeqsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = insertelement <2 x double> undef, double %a0, i32 0
   %2 = insertelement <2 x double> undef, double %a1, i32 0
   %3 = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %1, <2 x double> %2, i8 0)
@@ -488,7 +488,7 @@ define i32 @test_comisd(<2 x double> %a0
 ; ZNVER1-NEXT:    andb %al, %dl # sched: [1:0.25]
 ; ZNVER1-NEXT:    orb %cl, %dl # sched: [1:0.25]
 ; ZNVER1-NEXT:    movzbl %dl, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1)
   %2 = load <2 x double>, <2 x double> *%a2, align 8
   %3 = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %2)
@@ -552,7 +552,7 @@ define <2 x double> @test_cvtdq2pd(<4 x
 ; ZNVER1-NEXT:    vcvtdq2pd (%rdi), %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtdq2pd %xmm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
   %2 = sitofp <2 x i32> %1 to <2 x double>
   %3 = load <4 x i32>, <4 x i32>*%a1, align 16
@@ -618,7 +618,7 @@ define <4 x float> @test_cvtdq2ps(<4 x i
 ; ZNVER1-NEXT:    vcvtdq2ps (%rdi), %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtdq2ps %xmm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sitofp <4 x i32> %a0 to <4 x float>
   %2 = load <4 x i32>, <4 x i32>*%a1, align 16
   %3 = sitofp <4 x i32> %2 to <4 x float>
@@ -682,7 +682,7 @@ define <4 x i32> @test_cvtpd2dq(<2 x dou
 ; ZNVER1-NEXT:    vcvtpd2dqx (%rdi), %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtpd2dq %xmm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
   %2 = load <2 x double>, <2 x double> *%a1, align 16
   %3 = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %2)
@@ -744,10 +744,10 @@ define <4 x float> @test_cvtpd2ps(<2 x d
 ;
 ; ZNVER1-LABEL: test_cvtpd2ps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vcvtpd2psx (%rdi), %xmm1 # sched: [12:1.00]
-; ZNVER1-NEXT:    vcvtpd2ps %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT:    vcvtpd2psx (%rdi), %xmm1 # sched: [11:1.00]
+; ZNVER1-NEXT:    vcvtpd2ps %xmm0, %xmm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0)
   %2 = load <2 x double>, <2 x double> *%a1, align 16
   %3 = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %2)
@@ -812,7 +812,7 @@ define <4 x i32> @test_cvtps2dq(<4 x flo
 ; ZNVER1-NEXT:    vcvtps2dq (%rdi), %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtps2dq %xmm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0)
   %2 = load <4 x float>, <4 x float> *%a1, align 16
   %3 = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %2)
@@ -874,10 +874,10 @@ define <2 x double> @test_cvtps2pd(<4 x
 ;
 ; ZNVER1-LABEL: test_cvtps2pd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vcvtps2pd (%rdi), %xmm1 # sched: [12:1.00]
-; ZNVER1-NEXT:    vcvtps2pd %xmm0, %xmm0 # sched: [5:1.00]
+; ZNVER1-NEXT:    vcvtps2pd (%rdi), %xmm1 # sched: [10:1.00]
+; ZNVER1-NEXT:    vcvtps2pd %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x float> %a0, <4 x float> undef, <2 x i32> <i32 0, i32 1>
   %2 = fpext <2 x float> %1 to <2 x double>
   %3 = load <4 x float>, <4 x float> *%a1, align 16
@@ -942,7 +942,7 @@ define i32 @test_cvtsd2si(double %a0, do
 ; ZNVER1-NEXT:    vcvtsd2si (%rdi), %eax # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtsd2si %xmm0, %ecx # sched: [5:1.00]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = insertelement <2 x double> undef, double %a0, i32 0
   %2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %1)
   %3 = load double, double *%a1, align 8
@@ -1008,7 +1008,7 @@ define i64 @test_cvtsd2siq(double %a0, d
 ; ZNVER1-NEXT:    vcvtsd2si (%rdi), %rax # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvtsd2si %xmm0, %rcx # sched: [5:1.00]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = insertelement <2 x double> undef, double %a0, i32 0
   %2 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %1)
   %3 = load double, double *%a1, align 8
@@ -1080,10 +1080,10 @@ define float @test_cvtsd2ss(double %a0,
 ; ZNVER1-LABEL: test_cvtsd2ss:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero sched: [8:0.50]
-; ZNVER1-NEXT:    vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT:    vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
+; ZNVER1-NEXT:    vcvtsd2ss %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT:    vcvtsd2ss %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fptrunc double %a0 to float
   %2 = load double, double *%a1, align 8
   %3 = fptrunc double %2 to float
@@ -1146,7 +1146,7 @@ define double @test_cvtsi2sd(i32 %a0, i3
 ; ZNVER1-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sitofp i32 %a0 to double
   %2 = load i32, i32 *%a1, align 8
   %3 = sitofp i32 %2 to double
@@ -1209,7 +1209,7 @@ define double @test_cvtsi2sdq(i64 %a0, i
 ; ZNVER1-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sitofp i64 %a0 to double
   %2 = load i64, i64 *%a1, align 8
   %3 = sitofp i64 %2 to double
@@ -1280,10 +1280,10 @@ define double @test_cvtss2sd(float %a0,
 ; ZNVER1-LABEL: test_cvtss2sd:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50]
-; ZNVER1-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [5:1.00]
+; ZNVER1-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fpext float %a0 to double
   %2 = load float, float *%a1, align 4
   %3 = fpext float %2 to double
@@ -1347,7 +1347,7 @@ define <4 x i32> @test_cvttpd2dq(<2 x do
 ; ZNVER1-NEXT:    vcvttpd2dqx (%rdi), %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvttpd2dq %xmm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fptosi <2 x double> %a0 to <2 x i32>
   %2 = shufflevector <2 x i32> %1, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %3 = load <2 x double>, <2 x double> *%a1, align 16
@@ -1413,7 +1413,7 @@ define <4 x i32> @test_cvttps2dq(<4 x fl
 ; ZNVER1-NEXT:    vcvttps2dq (%rdi), %xmm1 # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvttps2dq %xmm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fptosi <4 x float> %a0 to <4 x i32>
   %2 = load <4 x float>, <4 x float> *%a1, align 16
   %3 = fptosi <4 x float> %2 to <4 x i32>
@@ -1476,7 +1476,7 @@ define i32 @test_cvttsd2si(double %a0, d
 ; ZNVER1-NEXT:    vcvttsd2si (%rdi), %eax # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvttsd2si %xmm0, %ecx # sched: [5:1.00]
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fptosi double %a0 to i32
   %2 = load double, double *%a1, align 8
   %3 = fptosi double %2 to i32
@@ -1539,7 +1539,7 @@ define i64 @test_cvttsd2siq(double %a0,
 ; ZNVER1-NEXT:    vcvttsd2si (%rdi), %rax # sched: [12:1.00]
 ; ZNVER1-NEXT:    vcvttsd2si %xmm0, %rcx # sched: [5:1.00]
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fptosi double %a0 to i64
   %2 = load double, double *%a1, align 8
   %3 = fptosi double %2 to i64
@@ -1594,7 +1594,7 @@ define <2 x double> @test_divpd(<2 x dou
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vdivpd %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
 ; ZNVER1-NEXT:    vdivpd (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fdiv <2 x double> %a0, %a1
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = fdiv <2 x double> %1, %2
@@ -1648,7 +1648,7 @@ define double @test_divsd(double %a0, do
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vdivsd %xmm1, %xmm0, %xmm0 # sched: [15:1.00]
 ; ZNVER1-NEXT:    vdivsd (%rdi), %xmm0, %xmm0 # sched: [22:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fdiv double %a0, %a1
   %2 = load double, double *%a2, align 8
   %3 = fdiv double %1, %2
@@ -1700,7 +1700,7 @@ define void @test_lfence() {
 ; ZNVER1-LABEL: test_lfence:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    lfence # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   call void @llvm.x86.sse2.lfence()
   ret void
 }
@@ -1751,7 +1751,7 @@ define void @test_mfence() {
 ; ZNVER1-LABEL: test_mfence:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    mfence # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   call void @llvm.x86.sse2.mfence()
   ret void
 }
@@ -1799,8 +1799,8 @@ define void @test_maskmovdqu(<16 x i8> %
 ;
 ; ZNVER1-LABEL: test_maskmovdqu:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmaskmovdqu %xmm1, %xmm0 # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vmaskmovdqu %xmm1, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   call void @llvm.x86.sse2.maskmov.dqu(<16 x i8> %a0, <16 x i8> %a1, i8* %a2)
   ret void
 }
@@ -1853,7 +1853,7 @@ define <2 x double> @test_maxpd(<2 x dou
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmaxpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %1, <2 x double> %2)
@@ -1908,7 +1908,7 @@ define <2 x double> @test_maxsd(<2 x dou
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmaxsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1)
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %1, <2 x double> %2)
@@ -1963,7 +1963,7 @@ define <2 x double> @test_minpd(<2 x dou
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vminpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vminpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1)
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %1, <2 x double> %2)
@@ -2018,7 +2018,7 @@ define <2 x double> @test_minsd(<2 x dou
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vminsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vminsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1)
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %1, <2 x double> %2)
@@ -2081,7 +2081,7 @@ define void @test_movapd(<2 x double> *%
 ; ZNVER1-NEXT:    vmovapd (%rdi), %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovapd %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <2 x double>, <2 x double> *%a0, align 16
   %2 = fadd <2 x double> %1, %1
   store <2 x double> %2, <2 x double> *%a1, align 16
@@ -2143,7 +2143,7 @@ define void @test_movdqa(<2 x i64> *%a0,
 ; ZNVER1-NEXT:    vmovdqa (%rdi), %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vmovdqa %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <2 x i64>, <2 x i64> *%a0, align 16
   %2 = add <2 x i64> %1, %1
   store <2 x i64> %2, <2 x i64> *%a1, align 16
@@ -2205,7 +2205,7 @@ define void @test_movdqu(<2 x i64> *%a0,
 ; ZNVER1-NEXT:    vmovdqu (%rdi), %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vmovdqu %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <2 x i64>, <2 x i64> *%a0, align 1
   %2 = add <2 x i64> %1, %1
   store <2 x i64> %2, <2 x i64> *%a1, align 1
@@ -2286,12 +2286,12 @@ define i32 @test_movd(<4 x i32> %a0, i32
 ; ZNVER1-LABEL: test_movd:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero sched: [8:0.50]
-; ZNVER1-NEXT:    vmovd %edi, %xmm1 # sched: [1:0.25]
+; ZNVER1-NEXT:    vmovd %edi, %xmm1 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vmovd %xmm1, (%rsi) # sched: [1:0.50]
 ; ZNVER1-NEXT:    vpaddd %xmm2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vmovd %xmm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vmovd %xmm0, %eax # sched: [2:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = insertelement <4 x i32> undef, i32 %a1, i32 0
   %2 = load i32, i32 *%a2
   %3 = insertelement <4 x i32> undef, i32 %2, i32 0
@@ -2377,12 +2377,12 @@ define i64 @test_movd_64(<2 x i64> %a0,
 ; ZNVER1-LABEL: test_movd_64:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero sched: [8:0.50]
-; ZNVER1-NEXT:    vmovq %rdi, %xmm1 # sched: [1:0.25]
+; ZNVER1-NEXT:    vmovq %rdi, %xmm1 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vmovq %xmm1, (%rsi) # sched: [1:0.50]
 ; ZNVER1-NEXT:    vpaddq %xmm2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vmovq %xmm0, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vmovq %xmm0, %rax # sched: [2:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = insertelement <2 x i64> undef, i64 %a1, i64 0
   %2 = load i64, i64 *%a2
   %3 = insertelement <2 x i64> undef, i64 %2, i64 0
@@ -2449,7 +2449,7 @@ define void @test_movhpd(<2 x double> %a
 ; ZNVER1-NEXT:    vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovhpd %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast x86_mmx* %a2 to double*
   %2 = load double, double *%1, align 8
   %3 = insertelement <2 x double> %a1, double %2, i32 1
@@ -2514,7 +2514,7 @@ define void @test_movlpd(<2 x double> %a
 ; ZNVER1-NEXT:    vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovlpd %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast x86_mmx* %a2 to double*
   %2 = load double, double *%1, align 8
   %3 = insertelement <2 x double> %a1, double %2, i32 0
@@ -2564,8 +2564,8 @@ define i32 @test_movmskpd(<2 x double> %
 ;
 ; ZNVER1-LABEL: test_movmskpd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmovmskpd %xmm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vmovmskpd %xmm0, %eax # sched: [1:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0)
   ret i32 %1
 }
@@ -2620,7 +2620,7 @@ define void @test_movntdqa(<2 x i64> %a0
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpaddq %xmm0, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vmovntdq %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = add <2 x i64> %a0, %a0
   store <2 x i64> %1, <2 x i64> *%a1, align 16, !nontemporal !0
   ret void
@@ -2673,7 +2673,7 @@ define void @test_movntpd(<2 x double> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovntpd %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fadd <2 x double> %a0, %a0
   store <2 x double> %1, <2 x double> *%a1, align 16, !nontemporal !0
   ret void
@@ -2734,7 +2734,7 @@ define <2 x i64> @test_movq_mem(<2 x i64
 ; ZNVER1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vmovq %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load i64, i64* %a1, align 1
   %2 = insertelement <2 x i64> zeroinitializer, i64 %1, i32 0
   %3 = add <2 x i64> %a0, %2
@@ -2792,7 +2792,7 @@ define <2 x i64> @test_movq_reg(<2 x i64
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddq %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
   %2 = add <2 x i64> %a1, %1
   ret <2 x i64> %2
@@ -2853,7 +2853,7 @@ define void @test_movsd_mem(double* %a0,
 ; ZNVER1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddsd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovsd %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load double, double* %a0, align 1
   %2 = fadd double %1, %1
   store double %2, double *%a1, align 1
@@ -2906,7 +2906,7 @@ define <2 x double> @test_movsd_reg(<2 x
 ; ZNVER1-LABEL: test_movsd_reg:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 2, i32 0>
   ret <2 x double> %1
 }
@@ -2966,7 +2966,7 @@ define void @test_movupd(<2 x double> *%
 ; ZNVER1-NEXT:    vmovupd (%rdi), %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vmovupd %xmm0, (%rsi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <2 x double>, <2 x double> *%a0, align 1
   %2 = fadd <2 x double> %1, %1
   store <2 x double> %2, <2 x double> *%a1, align 1
@@ -3018,9 +3018,9 @@ define <2 x double> @test_mulpd(<2 x dou
 ;
 ; ZNVER1-LABEL: test_mulpd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmulpd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT:    vmulpd (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vmulpd %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
+; ZNVER1-NEXT:    vmulpd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fmul <2 x double> %a0, %a1
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = fmul <2 x double> %1, %2
@@ -3072,9 +3072,9 @@ define double @test_mulsd(double %a0, do
 ;
 ; ZNVER1-LABEL: test_mulsd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmulsd %xmm1, %xmm0, %xmm0 # sched: [5:1.00]
-; ZNVER1-NEXT:    vmulsd (%rdi), %xmm0, %xmm0 # sched: [12:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vmulsd %xmm1, %xmm0, %xmm0 # sched: [3:0.50]
+; ZNVER1-NEXT:    vmulsd (%rdi), %xmm0, %xmm0 # sched: [10:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fmul double %a0, %a1
   %2 = load double, double *%a2, align 8
   %3 = fmul double %1, %2
@@ -3136,7 +3136,7 @@ define <2 x double> @test_orpd(<2 x doub
 ; ZNVER1-NEXT:    vorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vorpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <2 x double> %a0 to <4 x i32>
   %2 = bitcast <2 x double> %a1 to <4 x i32>
   %3 = or <4 x i32> %1, %2
@@ -3199,7 +3199,7 @@ define <8 x i16> @test_packssdw(<4 x i32
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpackssdw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1)
   %2 = bitcast <8 x i16> %1 to <4 x i32>
   %3 = load <4 x i32>, <4 x i32> *%a2, align 16
@@ -3259,7 +3259,7 @@ define <16 x i8> @test_packsswb(<8 x i16
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpacksswb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1)
   %2 = bitcast <16 x i8> %1 to <8 x i16>
   %3 = load <8 x i16>, <8 x i16> *%a2, align 16
@@ -3319,7 +3319,7 @@ define <16 x i8> @test_packuswb(<8 x i16
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpackuswb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1)
   %2 = bitcast <16 x i8> %1 to <8 x i16>
   %3 = load <8 x i16>, <8 x i16> *%a2, align 16
@@ -3379,7 +3379,7 @@ define <16 x i8> @test_paddb(<16 x i8> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = add <16 x i8> %a0, %a1
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = add <16 x i8> %1, %2
@@ -3437,7 +3437,7 @@ define <4 x i32> @test_paddd(<4 x i32> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = add <4 x i32> %a0, %a1
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = add <4 x i32> %1, %2
@@ -3491,7 +3491,7 @@ define <2 x i64> @test_paddq(<2 x i64> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = add <2 x i64> %a0, %a1
   %2 = load <2 x i64>, <2 x i64> *%a2, align 16
   %3 = add <2 x i64> %1, %2
@@ -3549,7 +3549,7 @@ define <16 x i8> @test_paddsb(<16 x i8>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %1, <16 x i8> %2)
@@ -3608,7 +3608,7 @@ define <8 x i16> @test_paddsw(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %1, <8 x i16> %2)
@@ -3667,7 +3667,7 @@ define <16 x i8> @test_paddusb(<16 x i8>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddusb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %1, <16 x i8> %2)
@@ -3726,7 +3726,7 @@ define <8 x i16> @test_paddusw(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddusw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %1, <8 x i16> %2)
@@ -3785,7 +3785,7 @@ define <8 x i16> @test_paddw(<8 x i16> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = add <8 x i16> %a0, %a1
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = add <8 x i16> %1, %2
@@ -3847,7 +3847,7 @@ define <2 x i64> @test_pand(<2 x i64> %a
 ; ZNVER1-NEXT:    vpand %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpand (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = and <2 x i64> %a0, %a1
   %2 = load <2 x i64>, <2 x i64> *%a2, align 16
   %3 = and <2 x i64> %1, %2
@@ -3916,7 +3916,7 @@ define <2 x i64> @test_pandn(<2 x i64> %
 ; ZNVER1-NEXT:    vpandn %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpandn (%rdi), %xmm0, %xmm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = xor <2 x i64> %a0, <i64 -1, i64 -1>
   %2 = and <2 x i64> %a1, %1
   %3 = load <2 x i64>, <2 x i64> *%a2, align 16
@@ -3977,7 +3977,7 @@ define <16 x i8> @test_pavgb(<16 x i8> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpavgb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpavgb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %1, <16 x i8> %2)
@@ -4036,7 +4036,7 @@ define <8 x i16> @test_pavgw(<8 x i16> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpavgw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpavgw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %1, <8 x i16> %2)
@@ -4101,7 +4101,7 @@ define <16 x i8> @test_pcmpeqb(<16 x i8>
 ; ZNVER1-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpcmpeqb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = icmp eq <16 x i8> %a0, %a1
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = icmp eq <16 x i8> %a0, %2
@@ -4167,7 +4167,7 @@ define <4 x i32> @test_pcmpeqd(<4 x i32>
 ; ZNVER1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = icmp eq <4 x i32> %a0, %a1
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = icmp eq <4 x i32> %a0, %2
@@ -4233,7 +4233,7 @@ define <8 x i16> @test_pcmpeqw(<8 x i16>
 ; ZNVER1-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpcmpeqw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = icmp eq <8 x i16> %a0, %a1
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = icmp eq <8 x i16> %a0, %2
@@ -4300,7 +4300,7 @@ define <16 x i8> @test_pcmpgtb(<16 x i8>
 ; ZNVER1-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpcmpgtb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = icmp sgt <16 x i8> %a0, %a1
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = icmp sgt <16 x i8> %a0, %2
@@ -4367,7 +4367,7 @@ define <4 x i32> @test_pcmpgtd(<4 x i32>
 ; ZNVER1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpcmpeqd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = icmp sgt <4 x i32> %a0, %a1
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = icmp eq <4 x i32> %a0, %2
@@ -4434,7 +4434,7 @@ define <8 x i16> @test_pcmpgtw(<8 x i16>
 ; ZNVER1-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm1 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpcmpgtw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpor %xmm0, %xmm1, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = icmp sgt <8 x i16> %a0, %a1
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = icmp sgt <8 x i16> %a0, %2
@@ -4490,7 +4490,7 @@ define i16 @test_pextrw(<8 x i16> %a0) {
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpextrw $6, %xmm0, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = extractelement <8 x i16> %a0, i32 6
   ret i16 %1
 }
@@ -4546,7 +4546,7 @@ define <8 x i16> @test_pinsrw(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpinsrw $1, %edi, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpinsrw $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = insertelement <8 x i16> %a0, i16 %a1, i32 1
   %2 = load i16, i16 *%a2
   %3 = insertelement <8 x i16> %1, i16 %2, i32 3
@@ -4608,7 +4608,7 @@ define <4 x i32> @test_pmaddwd(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmaddwd %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vpmaddwd (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1)
   %2 = bitcast <4 x i32> %1 to <8 x i16>
   %3 = load <8 x i16>, <8 x i16> *%a2, align 16
@@ -4668,7 +4668,7 @@ define <8 x i16> @test_pmaxsw(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpmaxsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %1, <8 x i16> %2)
@@ -4727,7 +4727,7 @@ define <16 x i8> @test_pmaxub(<16 x i8>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpmaxub (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %1, <16 x i8> %2)
@@ -4786,7 +4786,7 @@ define <8 x i16> @test_pminsw(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpminsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %1, <8 x i16> %2)
@@ -4845,7 +4845,7 @@ define <16 x i8> @test_pminub(<16 x i8>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpminub %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpminub (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %1, <16 x i8> %2)
@@ -4893,8 +4893,8 @@ define i32 @test_pmovmskb(<16 x i8> %a0)
 ;
 ; ZNVER1-LABEL: test_pmovmskb:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpmovmskb %xmm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vpmovmskb %xmm0, %eax # sched: [1:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0)
   ret i32 %1
 }
@@ -4947,7 +4947,7 @@ define <8 x i16> @test_pmulhuw(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vpmulhuw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %1, <8 x i16> %2)
@@ -5002,7 +5002,7 @@ define <8 x i16> @test_pmulhw(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmulhw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vpmulhw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %1, <8 x i16> %2)
@@ -5057,7 +5057,7 @@ define <8 x i16> @test_pmullw(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmullw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vpmullw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = mul <8 x i16> %a0, %a1
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = mul <8 x i16> %1, %2
@@ -5119,7 +5119,7 @@ define <2 x i64> @test_pmuludq(<4 x i32>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vpmuludq (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %a0, <4 x i32> %a1)
   %2 = bitcast <2 x i64> %1 to <4 x i32>
   %3 = load <4 x i32>, <4 x i32> *%a2, align 16
@@ -5183,7 +5183,7 @@ define <2 x i64> @test_por(<2 x i64> %a0
 ; ZNVER1-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpor (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = or <2 x i64> %a0, %a1
   %2 = load <2 x i64>, <2 x i64> *%a2, align 16
   %3 = or <2 x i64> %1, %2
@@ -5246,7 +5246,7 @@ define <2 x i64> @test_psadbw(<16 x i8>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vpsadbw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a0, <16 x i8> %a1)
   %2 = bitcast <2 x i64> %1 to <16 x i8>
   %3 = load <16 x i8>, <16 x i8> *%a2, align 16
@@ -5312,7 +5312,7 @@ define <4 x i32> @test_pshufd(<4 x i32>
 ; ZNVER1-NEXT:    vpshufd {{.*#+}} xmm1 = mem[3,2,1,0] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
   %2 = load <4 x i32>, <4 x i32> *%a1, align 16
   %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -5377,7 +5377,7 @@ define <8 x i16> @test_pshufhw(<8 x i16>
 ; ZNVER1-NEXT:    vpshufhw {{.*#+}} xmm1 = mem[0,1,2,3,7,6,5,4] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 7, i32 6>
   %2 = load <8 x i16>, <8 x i16> *%a1, align 16
   %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4>
@@ -5442,7 +5442,7 @@ define <8 x i16> @test_pshuflw(<8 x i16>
 ; ZNVER1-NEXT:    vpshuflw {{.*#+}} xmm1 = mem[3,2,1,0,4,5,6,7] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 4, i32 5, i32 6, i32 7>
   %2 = load <8 x i16>, <8 x i16> *%a1, align 16
   %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7>
@@ -5502,10 +5502,10 @@ define <4 x i32> @test_pslld(<4 x i32> %
 ;
 ; ZNVER1-LABEL: test_pslld:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpslld %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vpslld (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT:    vpslld %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT:    vpslld (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; ZNVER1-NEXT:    vpslld $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %a0, <4 x i32> %a1)
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %1, <4 x i32> %2)
@@ -5559,8 +5559,8 @@ define <4 x i32> @test_pslldq(<4 x i32>
 ;
 ; ZNVER1-LABEL: test_pslldq:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11] sched: [1:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 4, i32 0, i32 1, i32 2>
   ret <4 x i32> %1
 }
@@ -5617,10 +5617,10 @@ define <2 x i64> @test_psllq(<2 x i64> %
 ;
 ; ZNVER1-LABEL: test_psllq:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpsllq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vpsllq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT:    vpsllq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT:    vpsllq (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; ZNVER1-NEXT:    vpsllq $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1)
   %2 = load <2 x i64>, <2 x i64> *%a2, align 16
   %3 = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %1, <2 x i64> %2)
@@ -5682,10 +5682,10 @@ define <8 x i16> @test_psllw(<8 x i16> %
 ;
 ; ZNVER1-LABEL: test_psllw:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpsllw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vpsllw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT:    vpsllw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT:    vpsllw (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; ZNVER1-NEXT:    vpsllw $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %1, <8 x i16> %2)
@@ -5747,10 +5747,10 @@ define <4 x i32> @test_psrad(<4 x i32> %
 ;
 ; ZNVER1-LABEL: test_psrad:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpsrad %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vpsrad (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT:    vpsrad %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT:    vpsrad (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; ZNVER1-NEXT:    vpsrad $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %a0, <4 x i32> %a1)
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %1, <4 x i32> %2)
@@ -5812,10 +5812,10 @@ define <8 x i16> @test_psraw(<8 x i16> %
 ;
 ; ZNVER1-LABEL: test_psraw:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpsraw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vpsraw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT:    vpsraw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT:    vpsraw (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; ZNVER1-NEXT:    vpsraw $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %1, <8 x i16> %2)
@@ -5877,10 +5877,10 @@ define <4 x i32> @test_psrld(<4 x i32> %
 ;
 ; ZNVER1-LABEL: test_psrld:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpsrld %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vpsrld (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT:    vpsrld %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT:    vpsrld (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; ZNVER1-NEXT:    vpsrld $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1)
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %1, <4 x i32> %2)
@@ -5934,8 +5934,8 @@ define <4 x i32> @test_psrldq(<4 x i32>
 ;
 ; ZNVER1-LABEL: test_psrldq:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero sched: [1:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
   ret <4 x i32> %1
 }
@@ -5992,10 +5992,10 @@ define <2 x i64> @test_psrlq(<2 x i64> %
 ;
 ; ZNVER1-LABEL: test_psrlq:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vpsrlq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT:    vpsrlq (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; ZNVER1-NEXT:    vpsrlq $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1)
   %2 = load <2 x i64>, <2 x i64> *%a2, align 16
   %3 = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %1, <2 x i64> %2)
@@ -6057,10 +6057,10 @@ define <8 x i16> @test_psrlw(<8 x i16> %
 ;
 ; ZNVER1-LABEL: test_psrlw:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vpsrlw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
+; ZNVER1-NEXT:    vpsrlw (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
 ; ZNVER1-NEXT:    vpsrlw $2, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %1, <8 x i16> %2)
@@ -6121,7 +6121,7 @@ define <16 x i8> @test_psubb(<16 x i8> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sub <16 x i8> %a0, %a1
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = sub <16 x i8> %1, %2
@@ -6179,7 +6179,7 @@ define <4 x i32> @test_psubd(<4 x i32> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sub <4 x i32> %a0, %a1
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = sub <4 x i32> %1, %2
@@ -6233,7 +6233,7 @@ define <2 x i64> @test_psubq(<2 x i64> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsubq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sub <2 x i64> %a0, %a1
   %2 = load <2 x i64>, <2 x i64> *%a2, align 16
   %3 = sub <2 x i64> %1, %2
@@ -6291,7 +6291,7 @@ define <16 x i8> @test_psubsb(<16 x i8>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %1, <16 x i8> %2)
@@ -6350,7 +6350,7 @@ define <8 x i16> @test_psubsw(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %1, <8 x i16> %2)
@@ -6409,7 +6409,7 @@ define <16 x i8> @test_psubusb(<16 x i8>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubusb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %1, <16 x i8> %2)
@@ -6468,7 +6468,7 @@ define <8 x i16> @test_psubusw(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubusw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %1, <8 x i16> %2)
@@ -6527,7 +6527,7 @@ define <8 x i16> @test_psubw(<8 x i16> %
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsubw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = sub <8 x i16> %a0, %a1
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = sub <8 x i16> %1, %2
@@ -6585,7 +6585,7 @@ define <16 x i8> @test_punpckhbw(<16 x i
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] sched: [1:0.25]
 ; ZNVER1-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
@@ -6649,7 +6649,7 @@ define <4 x i32> @test_punpckhdq(<4 x i3
 ; ZNVER1-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.25]
 ; ZNVER1-NEXT:    vpunpckhdq {{.*#+}} xmm1 = xmm1[2],mem[2],xmm1[3],mem[3] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = shufflevector <4 x i32> %a1, <4 x i32> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
@@ -6712,7 +6712,7 @@ define <2 x i64> @test_punpckhqdq(<2 x i
 ; ZNVER1-NEXT:    vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.25]
 ; ZNVER1-NEXT:    vpunpckhqdq {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 1, i32 3>
   %2 = load <2 x i64>, <2 x i64> *%a2, align 16
   %3 = shufflevector <2 x i64> %a1, <2 x i64> %2, <2x i32> <i32 1, i32 3>
@@ -6771,7 +6771,7 @@ define <8 x i16> @test_punpckhwd(<8 x i1
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.25]
 ; ZNVER1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -6829,7 +6829,7 @@ define <16 x i8> @test_punpcklbw(<16 x i
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] sched: [1:0.25]
 ; ZNVER1-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
@@ -6893,7 +6893,7 @@ define <4 x i32> @test_punpckldq(<4 x i3
 ; ZNVER1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.25]
 ; ZNVER1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = shufflevector <4 x i32> %a1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
@@ -6956,7 +6956,7 @@ define <2 x i64> @test_punpcklqdq(<2 x i
 ; ZNVER1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.25]
 ; ZNVER1-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 0, i32 2>
   %2 = load <2 x i64>, <2 x i64> *%a2, align 16
   %3 = shufflevector <2 x i64> %a1, <2 x i64> %2, <2x i32> <i32 0, i32 2>
@@ -7015,7 +7015,7 @@ define <8 x i16> @test_punpcklwd(<8 x i1
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.25]
 ; ZNVER1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
@@ -7077,7 +7077,7 @@ define <2 x i64> @test_pxor(<2 x i64> %a
 ; ZNVER1-NEXT:    vpxor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpxor (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = xor <2 x i64> %a0, %a1
   %2 = load <2 x i64>, <2 x i64> *%a2, align 16
   %3 = xor <2 x i64> %1, %2
@@ -7140,7 +7140,7 @@ define <2 x double> @test_shufpd(<2 x do
 ; ZNVER1-NEXT:    vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] sched: [1:0.50]
 ; ZNVER1-NEXT:    vshufpd {{.*#+}} xmm1 = xmm1[1],mem[0] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 2>
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = shufflevector <2 x double> %a1, <2 x double> %2, <2 x i32> <i32 1, i32 2>
@@ -7204,7 +7204,7 @@ define <2 x double> @test_sqrtpd(<2 x do
 ; ZNVER1-NEXT:    vsqrtpd (%rdi), %xmm1 # sched: [27:1.00]
 ; ZNVER1-NEXT:    vsqrtpd %xmm0, %xmm0 # sched: [20:1.00]
 ; ZNVER1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0)
   %2 = load <2 x double>, <2 x double> *%a1, align 16
   %3 = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %2)
@@ -7278,7 +7278,7 @@ define <2 x double> @test_sqrtsd(<2 x do
 ; ZNVER1-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 # sched: [27:1.00]
 ; ZNVER1-NEXT:    vsqrtsd %xmm1, %xmm1, %xmm1 # sched: [27:1.00]
 ; ZNVER1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a0)
   %2 = load <2 x double>, <2 x double> *%a1, align 16
   %3 = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %2)
@@ -7334,7 +7334,7 @@ define <2 x double> @test_subpd(<2 x dou
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fsub <2 x double> %a0, %a1
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = fsub <2 x double> %1, %2
@@ -7388,7 +7388,7 @@ define double @test_subsd(double %a0, do
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vsubsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vsubsd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = fsub double %a0, %a1
   %2 = load double, double *%a2, align 8
   %3 = fsub double %1, %2
@@ -7506,7 +7506,7 @@ define i32 @test_ucomisd(<2 x double> %a
 ; ZNVER1-NEXT:    andb %al, %dl # sched: [1:0.25]
 ; ZNVER1-NEXT:    orb %cl, %dl # sched: [1:0.25]
 ; ZNVER1-NEXT:    movzbl %dl, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1)
   %2 = load <2 x double>, <2 x double> *%a2, align 8
   %3 = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %2)
@@ -7570,7 +7570,7 @@ define <2 x double> @test_unpckhpd(<2 x
 ; ZNVER1-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] sched: [1:0.50]
 ; ZNVER1-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 3>
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = shufflevector <2 x double> %a1, <2 x double> %2, <2 x i32> <i32 1, i32 3>
@@ -7639,7 +7639,7 @@ define <2 x double> @test_unpcklpd(<2 x
 ; ZNVER1-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50]
 ; ZNVER1-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0] sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 2>
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = shufflevector <2 x double> %1, <2 x double> %2, <2 x i32> <i32 0, i32 2>
@@ -7702,7 +7702,7 @@ define <2 x double> @test_xorpd(<2 x dou
 ; ZNVER1-NEXT:    vxorpd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vxorpd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = bitcast <2 x double> %a0 to <4 x i32>
   %2 = bitcast <2 x double> %a1 to <4 x i32>
   %3 = xor <4 x i32> %1, %2

Modified: llvm/trunk/test/CodeGen/X86/sse3-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse3-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse3-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse3-schedule.ll Thu Aug 31 05:38:35 2017
@@ -56,7 +56,7 @@ define <2 x double> @test_addsubpd(<2 x
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaddsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %a0, <2 x double> %a1)
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %1, <2 x double> %2)
@@ -111,7 +111,7 @@ define <4 x float> @test_addsubps(<4 x f
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vaddsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vaddsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %a0, <4 x float> %a1)
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %1, <4 x float> %2)
@@ -164,9 +164,9 @@ define <2 x double> @test_haddpd(<2 x do
 ;
 ; ZNVER1-LABEL: test_haddpd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vhaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    vhaddpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vhaddpd %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vhaddpd (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1)
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %1, <2 x double> %2)
@@ -219,9 +219,9 @@ define <4 x float> @test_haddps(<4 x flo
 ;
 ; ZNVER1-LABEL: test_haddps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vhaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    vhaddps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vhaddps %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vhaddps (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1)
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %1, <4 x float> %2)
@@ -274,9 +274,9 @@ define <2 x double> @test_hsubpd(<2 x do
 ;
 ; ZNVER1-LABEL: test_hsubpd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vhsubpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    vhsubpd (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vhsubpd %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vhsubpd (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1)
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %1, <2 x double> %2)
@@ -329,9 +329,9 @@ define <4 x float> @test_hsubps(<4 x flo
 ;
 ; ZNVER1-LABEL: test_hsubps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vhsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    vhsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vhsubps %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vhsubps (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1)
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %1, <4 x float> %2)
@@ -380,7 +380,7 @@ define <16 x i8> @test_lddqu(i8* %a0) {
 ; ZNVER1-LABEL: test_lddqu:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vlddqu (%rdi), %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %a0)
   ret <16 x i8> %1
 }
@@ -441,7 +441,7 @@ define void @test_monitor(i8* %a0, i32 %
 ; ZNVER1-NEXT:    leaq (%rdi), %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    movl %esi, %ecx # sched: [1:0.25]
 ; ZNVER1-NEXT:    monitor # sched: [100:?]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void @llvm.x86.sse3.monitor(i8* %a0, i32 %a1, i32 %a2)
   ret void
 }
@@ -503,7 +503,7 @@ define <2 x double> @test_movddup(<2 x d
 ; ZNVER1-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0] sched: [8:0.50]
 ; ZNVER1-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> zeroinitializer
   %2 = load <2 x double>, <2 x double> *%a1, align 16
   %3 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> zeroinitializer
@@ -567,7 +567,7 @@ define <4 x float> @test_movshdup(<4 x f
 ; ZNVER1-NEXT:    vmovshdup {{.*#+}} xmm1 = mem[1,1,3,3] sched: [8:0.50]
 ; ZNVER1-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
   %2 = load <4 x float>, <4 x float> *%a1, align 16
   %3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
@@ -631,7 +631,7 @@ define <4 x float> @test_movsldup(<4 x f
 ; ZNVER1-NEXT:    vmovsldup {{.*#+}} xmm1 = mem[0,0,2,2] sched: [8:0.50]
 ; ZNVER1-NEXT:    vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
   %2 = load <4 x float>, <4 x float> *%a1, align 16
   %3 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
@@ -694,7 +694,7 @@ define void @test_mwait(i32 %a0, i32 %a1
 ; ZNVER1-NEXT:    movl %edi, %ecx # sched: [1:0.25]
 ; ZNVER1-NEXT:    movl %esi, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    mwait # sched: [100:?]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void @llvm.x86.sse3.mwait(i32 %a0, i32 %a1)
   ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/sse41-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse41-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse41-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse41-schedule.ll Thu Aug 31 05:38:35 2017
@@ -56,7 +56,7 @@ define <2 x double> @test_blendpd(<2 x d
 ; ZNVER1-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] sched: [1:0.50]
 ; ZNVER1-NEXT:    vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
 ; ZNVER1-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 3>
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = fadd <2 x double> %a1, %1
@@ -105,7 +105,7 @@ define <4 x float> @test_blendps(<4 x fl
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] sched: [1:0.50]
 ; ZNVER1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2,3] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
@@ -159,7 +159,7 @@ define <2 x double> @test_blendvpd(<2 x
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vblendvpd %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2)
   %2 = load <2 x double>, <2 x double> *%a3, align 16
   %3 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %1, <2 x double> %2, <2 x double> %a2)
@@ -214,7 +214,7 @@ define <4 x float> @test_blendvps(<4 x f
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vblendvps %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2)
   %2 = load <4 x float>, <4 x float> *%a3
   %3 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %1, <4 x float> %2, <4 x float> %a2)
@@ -261,9 +261,9 @@ define <2 x double> @test_dppd(<2 x doub
 ;
 ; ZNVER1-LABEL: test_dppd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vdppd $7, %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vdppd $7, (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7)
   %2 = load <2 x double>, <2 x double> *%a2, align 16
   %3 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %1, <2 x double> %2, i8 7)
@@ -310,9 +310,9 @@ define <4 x float> @test_dpps(<4 x float
 ;
 ; ZNVER1-LABEL: test_dpps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vdpps $7, %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vdpps $7, (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7)
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %1, <4 x float> %2, i8 7)
@@ -361,7 +361,7 @@ define <4 x float> @test_insertps(<4 x f
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm1[0],xmm0[2,3] sched: [1:0.50]
 ; ZNVER1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 17)
   %2 = load float, float *%a2
   %3 = insertelement <4 x float> %1, float %2, i32 3
@@ -403,7 +403,7 @@ define <2 x i64> @test_movntdqa(i8* %a0)
 ; ZNVER1-LABEL: test_movntdqa:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmovntdqa (%rdi), %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x i64> @llvm.x86.sse41.movntdqa(i8* %a0)
   ret <2 x i64> %1
 }
@@ -450,7 +450,7 @@ define <8 x i16> @test_mpsadbw(<16 x i8>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [100:?]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7)
   %2 = bitcast <8 x i16> %1 to <16 x i8>
   %3 = load <16 x i8>, <16 x i8> *%a2, align 16
@@ -500,7 +500,7 @@ define <8 x i16> @test_packusdw(<4 x i32
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpackusdw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1)
   %2 = bitcast <8 x i16> %1 to <4 x i32>
   %3 = load <4 x i32>, <4 x i32> *%a2, align 16
@@ -556,7 +556,7 @@ define <16 x i8> @test_pblendvb(<16 x i8
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
 ; ZNVER1-NEXT:    vpblendvb %xmm2, (%rdi), %xmm0, %xmm0 # sched: [8:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2)
   %2 = load <16 x i8>, <16 x i8> *%a3, align 16
   %3 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %1, <16 x i8> %2, <16 x i8> %a2)
@@ -603,9 +603,9 @@ define <8 x i16> @test_pblendw(<8 x i16>
 ;
 ; ZNVER1-LABEL: test_pblendw:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:0.50]
+; ZNVER1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] sched: [1:0.33]
 ; ZNVER1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6],mem[7] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 4, i32 5, i32 6, i32 15>
@@ -653,7 +653,7 @@ define <2 x i64> @test_pcmpeqq(<2 x i64>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpcmpeqq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = icmp eq <2 x i64> %a0, %a1
   %2 = sext <2 x i1> %1 to <2 x i64>
   %3 = load <2 x i64>, <2 x i64>*%a2, align 16
@@ -703,7 +703,7 @@ define i32 @test_pextrb(<16 x i8> %a0, i
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpextrb $3, %xmm0, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpextrb $1, %xmm0, (%rdi) # sched: [8:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = extractelement <16 x i8> %a0, i32 3
   %2 = extractelement <16 x i8> %a0, i32 1
   store i8 %2, i8 *%a1
@@ -752,7 +752,7 @@ define i32 @test_pextrd(<4 x i32> %a0, i
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpextrd $3, %xmm0, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpextrd $1, %xmm0, (%rdi) # sched: [8:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = extractelement <4 x i32> %a0, i32 3
   %2 = extractelement <4 x i32> %a0, i32 1
   store i32 %2, i32 *%a1
@@ -800,7 +800,7 @@ define i64 @test_pextrq(<2 x i64> %a0, <
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpextrq $1, %xmm0, %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpextrq $1, %xmm0, (%rdi) # sched: [8:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = extractelement <2 x i64> %a0, i32 1
   %2 = extractelement <2 x i64> %a0, i32 1
   store i64 %2, i64 *%a2
@@ -848,7 +848,7 @@ define i32 @test_pextrw(<8 x i16> %a0, i
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpextrw $3, %xmm0, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpextrw $1, %xmm0, (%rdi) # sched: [8:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = extractelement <8 x i16> %a0, i32 3
   %2 = extractelement <8 x i16> %a0, i32 1
   store i16 %2, i16 *%a1
@@ -897,7 +897,7 @@ define <8 x i16> @test_phminposuw(<8 x i
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vphminposuw (%rdi), %xmm0 # sched: [11:1.00]
 ; ZNVER1-NEXT:    vphminposuw %xmm0, %xmm0 # sched: [4:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <8 x i16>, <8 x i16> *%a0, align 16
   %2 = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %1)
   %3 = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %2)
@@ -946,7 +946,7 @@ define <16 x i8> @test_pinsrb(<16 x i8>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpinsrb $1, %edi, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpinsrb $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = insertelement <16 x i8> %a0, i8 %a1, i32 1
   %2 = load i8, i8 *%a2
   %3 = insertelement <16 x i8> %1, i8 %2, i32 3
@@ -994,7 +994,7 @@ define <4 x i32> @test_pinsrd(<4 x i32>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpinsrd $1, %edi, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpinsrd $3, (%rsi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = insertelement <4 x i32> %a0, i32 %a1, i32 1
   %2 = load i32, i32 *%a2
   %3 = insertelement <4 x i32> %1, i32 %2, i32 3
@@ -1049,7 +1049,7 @@ define <2 x i64> @test_pinsrq(<2 x i64>
 ; ZNVER1-NEXT:    vpinsrq $1, (%rsi), %xmm1, %xmm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpinsrq $1, %rdi, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = insertelement <2 x i64> %a0, i64 %a2, i32 1
   %2 = load i64, i64 *%a3
   %3 = insertelement <2 x i64> %a1, i64 %2, i32 1
@@ -1098,7 +1098,7 @@ define <16 x i8> @test_pmaxsb(<16 x i8>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpmaxsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call <16 x i8> @llvm.x86.sse41.pmaxsb(<16 x i8> %1, <16 x i8> %2)
@@ -1147,7 +1147,7 @@ define <4 x i32> @test_pmaxsd(<4 x i32>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpmaxsd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a0, <4 x i32> %a1)
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %1, <4 x i32> %2)
@@ -1196,7 +1196,7 @@ define <4 x i32> @test_pmaxud(<4 x i32>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmaxud %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpmaxud (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %a0, <4 x i32> %a1)
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = call <4 x i32> @llvm.x86.sse41.pmaxud(<4 x i32> %1, <4 x i32> %2)
@@ -1245,7 +1245,7 @@ define <8 x i16> @test_pmaxuw(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpmaxuw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.sse41.pmaxuw(<8 x i16> %1, <8 x i16> %2)
@@ -1294,7 +1294,7 @@ define <16 x i8> @test_pminsb(<16 x i8>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpminsb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpminsb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call <16 x i8> @llvm.x86.sse41.pminsb(<16 x i8> %1, <16 x i8> %2)
@@ -1343,7 +1343,7 @@ define <4 x i32> @test_pminsd(<4 x i32>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpminsd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpminsd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> %a1)
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %1, <4 x i32> %2)
@@ -1392,7 +1392,7 @@ define <4 x i32> @test_pminud(<4 x i32>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpminud %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpminud (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %a0, <4 x i32> %a1)
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %1, <4 x i32> %2)
@@ -1441,7 +1441,7 @@ define <8 x i16> @test_pminuw(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpminuw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpminuw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.sse41.pminuw(<8 x i16> %1, <8 x i16> %2)
@@ -1498,7 +1498,7 @@ define <8 x i16> @test_pmovsxbw(<16 x i8
 ; ZNVER1-NEXT:    vpmovsxbw (%rdi), %xmm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpmovsxbw %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %2 = sext <8 x i8> %1 to <8 x i16>
   %3 = load <8 x i8>, <8 x i8>* %a1, align 1
@@ -1556,7 +1556,7 @@ define <4 x i32> @test_pmovsxbd(<16 x i8
 ; ZNVER1-NEXT:    vpmovsxbd (%rdi), %xmm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpmovsxbd %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = sext <4 x i8> %1 to <4 x i32>
   %3 = load <4 x i8>, <4 x i8>* %a1, align 1
@@ -1614,7 +1614,7 @@ define <2 x i64> @test_pmovsxbq(<16 x i8
 ; ZNVER1-NEXT:    vpmovsxbq (%rdi), %xmm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpmovsxbq %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
   %2 = sext <2 x i8> %1 to <2 x i64>
   %3 = load <2 x i8>, <2 x i8>* %a1, align 1
@@ -1672,7 +1672,7 @@ define <2 x i64> @test_pmovsxdq(<4 x i32
 ; ZNVER1-NEXT:    vpmovsxdq (%rdi), %xmm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpmovsxdq %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
   %2 = sext <2 x i32> %1 to <2 x i64>
   %3 = load <2 x i32>, <2 x i32>* %a1, align 1
@@ -1730,7 +1730,7 @@ define <4 x i32> @test_pmovsxwd(<8 x i16
 ; ZNVER1-NEXT:    vpmovsxwd (%rdi), %xmm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpmovsxwd %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = sext <4 x i16> %1 to <4 x i32>
   %3 = load <4 x i16>, <4 x i16>* %a1, align 1
@@ -1788,7 +1788,7 @@ define <2 x i64> @test_pmovsxwq(<8 x i16
 ; ZNVER1-NEXT:    vpmovsxwq (%rdi), %xmm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpmovsxwq %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
   %2 = sext <2 x i16> %1 to <2 x i64>
   %3 = load <2 x i16>, <2 x i16>* %a1, align 1
@@ -1846,7 +1846,7 @@ define <8 x i16> @test_pmovzxbw(<16 x i8
 ; ZNVER1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [8:0.50]
 ; ZNVER1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %2 = zext <8 x i8> %1 to <8 x i16>
   %3 = load <8 x i8>, <8 x i8>* %a1, align 1
@@ -1904,7 +1904,7 @@ define <4 x i32> @test_pmovzxbd(<16 x i8
 ; ZNVER1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [8:0.50]
 ; ZNVER1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = zext <4 x i8> %1 to <4 x i32>
   %3 = load <4 x i8>, <4 x i8>* %a1, align 1
@@ -1962,7 +1962,7 @@ define <2 x i64> @test_pmovzxbq(<16 x i8
 ; ZNVER1-NEXT:    vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero sched: [8:0.50]
 ; ZNVER1-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
   %2 = zext <2 x i8> %1 to <2 x i64>
   %3 = load <2 x i8>, <2 x i8>* %a1, align 1
@@ -2020,7 +2020,7 @@ define <2 x i64> @test_pmovzxdq(<4 x i32
 ; ZNVER1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero sched: [8:0.50]
 ; ZNVER1-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
   %2 = zext <2 x i32> %1 to <2 x i64>
   %3 = load <2 x i32>, <2 x i32>* %a1, align 1
@@ -2078,7 +2078,7 @@ define <4 x i32> @test_pmovzxwd(<8 x i16
 ; ZNVER1-NEXT:    vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [8:0.50]
 ; ZNVER1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %2 = zext <4 x i16> %1 to <4 x i32>
   %3 = load <4 x i16>, <4 x i16>* %a1, align 1
@@ -2136,7 +2136,7 @@ define <2 x i64> @test_pmovzxwq(<8 x i16
 ; ZNVER1-NEXT:    vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero sched: [8:0.50]
 ; ZNVER1-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero sched: [1:0.25]
 ; ZNVER1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
   %2 = zext <2 x i16> %1 to <2 x i64>
   %3 = load <2 x i16>, <2 x i16>* %a1, align 1
@@ -2186,7 +2186,7 @@ define <2 x i64> @test_pmuldq(<4 x i32>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmuldq %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vpmuldq (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %a0, <4 x i32> %a1)
   %2 = bitcast <2 x i64> %1 to <4 x i32>
   %3 = load <4 x i32>, <4 x i32> *%a2, align 16
@@ -2236,7 +2236,7 @@ define <4 x i32> @test_pmulld(<4 x i32>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmulld %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vpmulld (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = mul <4 x i32> %a0, %a1
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = mul <4 x i32> %1, %2
@@ -2306,13 +2306,13 @@ define i32 @test_ptest(<2 x i64> %a0, <2
 ;
 ; ZNVER1-LABEL: test_ptest:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vptest %xmm1, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT:    vptest %xmm1, %xmm0 # sched: [1:1.00]
 ; ZNVER1-NEXT:    setb %al # sched: [1:0.25]
-; ZNVER1-NEXT:    vptest (%rdi), %xmm0 # sched: [8:0.50]
+; ZNVER1-NEXT:    vptest (%rdi), %xmm0 # sched: [8:1.00]
 ; ZNVER1-NEXT:    setb %cl # sched: [1:0.25]
 ; ZNVER1-NEXT:    andb %al, %cl # sched: [1:0.25]
 ; ZNVER1-NEXT:    movzbl %cl, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1)
   %2 = load <2 x i64>, <2 x i64> *%a2, align 16
   %3 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %2)
@@ -2367,10 +2367,10 @@ define <2 x double> @test_roundpd(<2 x d
 ;
 ; ZNVER1-LABEL: test_roundpd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vroundpd $7, (%rdi), %xmm1 # sched: [10:1.00]
-; ZNVER1-NEXT:    vroundpd $7, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT:    vroundpd $7, (%rdi), %xmm1 # sched: [11:1.00]
+; ZNVER1-NEXT:    vroundpd $7, %xmm0, %xmm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7)
   %2 = load <2 x double>, <2 x double> *%a1, align 16
   %3 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %2, i32 7)
@@ -2425,10 +2425,10 @@ define <4 x float> @test_roundps(<4 x fl
 ;
 ; ZNVER1-LABEL: test_roundps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vroundps $7, (%rdi), %xmm1 # sched: [10:1.00]
-; ZNVER1-NEXT:    vroundps $7, %xmm0, %xmm0 # sched: [3:1.00]
+; ZNVER1-NEXT:    vroundps $7, (%rdi), %xmm1 # sched: [11:1.00]
+; ZNVER1-NEXT:    vroundps $7, %xmm0, %xmm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7)
   %2 = load <4 x float>, <4 x float> *%a1, align 16
   %3 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %2, i32 7)
@@ -2484,10 +2484,10 @@ define <2 x double> @test_roundsd(<2 x d
 ;
 ; ZNVER1-LABEL: test_roundsd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
-; ZNVER1-NEXT:    vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT:    vroundsd $7, %xmm1, %xmm0, %xmm1 # sched: [4:1.00]
+; ZNVER1-NEXT:    vroundsd $7, (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
 ; ZNVER1-NEXT:    vaddpd %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 7)
   %2 = load <2 x double>, <2 x double>* %a2, align 16
   %3 = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %2, i32 7)
@@ -2543,10 +2543,10 @@ define <4 x float> @test_roundss(<4 x fl
 ;
 ; ZNVER1-LABEL: test_roundss:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [3:1.00]
-; ZNVER1-NEXT:    vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [10:1.00]
+; ZNVER1-NEXT:    vroundss $7, %xmm1, %xmm0, %xmm1 # sched: [4:1.00]
+; ZNVER1-NEXT:    vroundss $7, (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
 ; ZNVER1-NEXT:    vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 7)
   %2 = load <4 x float>, <4 x float> *%a2, align 16
   %3 = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %2, i32 7)

Modified: llvm/trunk/test/CodeGen/X86/sse42-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse42-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse42-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse42-schedule.ll Thu Aug 31 05:38:35 2017
@@ -56,7 +56,7 @@ define i32 @crc32_32_8(i32 %a0, i8 %a1,
 ; ZNVER1-NEXT:    crc32b %sil, %edi # sched: [3:1.00]
 ; ZNVER1-NEXT:    crc32b (%rdx), %edi # sched: [10:1.00]
 ; ZNVER1-NEXT:    movl %edi, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.sse42.crc32.32.8(i32 %a0, i8 %a1)
   %2 = load i8, i8 *%a2
   %3 = call i32 @llvm.x86.sse42.crc32.32.8(i32 %1, i8 %2)
@@ -112,7 +112,7 @@ define i32 @crc32_32_16(i32 %a0, i16 %a1
 ; ZNVER1-NEXT:    crc32w %si, %edi # sched: [3:1.00]
 ; ZNVER1-NEXT:    crc32w (%rdx), %edi # sched: [10:1.00]
 ; ZNVER1-NEXT:    movl %edi, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.sse42.crc32.32.16(i32 %a0, i16 %a1)
   %2 = load i16, i16 *%a2
   %3 = call i32 @llvm.x86.sse42.crc32.32.16(i32 %1, i16 %2)
@@ -168,7 +168,7 @@ define i32 @crc32_32_32(i32 %a0, i32 %a1
 ; ZNVER1-NEXT:    crc32l %esi, %edi # sched: [3:1.00]
 ; ZNVER1-NEXT:    crc32l (%rdx), %edi # sched: [10:1.00]
 ; ZNVER1-NEXT:    movl %edi, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.sse42.crc32.32.32(i32 %a0, i32 %a1)
   %2 = load i32, i32 *%a2
   %3 = call i32 @llvm.x86.sse42.crc32.32.32(i32 %1, i32 %2)
@@ -224,7 +224,7 @@ define i64 @crc32_64_8(i64 %a0, i8 %a1,
 ; ZNVER1-NEXT:    crc32b %sil, %edi # sched: [3:1.00]
 ; ZNVER1-NEXT:    crc32b (%rdx), %edi # sched: [10:1.00]
 ; ZNVER1-NEXT:    movq %rdi, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i64 @llvm.x86.sse42.crc32.64.8(i64 %a0, i8 %a1)
   %2 = load i8, i8 *%a2
   %3 = call i64 @llvm.x86.sse42.crc32.64.8(i64 %1, i8 %2)
@@ -280,7 +280,7 @@ define i64 @crc32_64_64(i64 %a0, i64 %a1
 ; ZNVER1-NEXT:    crc32q %rsi, %rdi # sched: [3:1.00]
 ; ZNVER1-NEXT:    crc32q (%rdx), %rdi # sched: [10:1.00]
 ; ZNVER1-NEXT:    movq %rdi, %rax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i64 @llvm.x86.sse42.crc32.64.64(i64 %a0, i64 %a1)
   %2 = load i64, i64 *%a2
   %3 = call i64 @llvm.x86.sse42.crc32.64.64(i64 %1, i64 %2)
@@ -378,7 +378,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0
 ; ZNVER1-NEXT:    vpcmpestri $7, (%rdi), %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    # kill: %ECX<def> %ECX<kill> %RCX<def>
 ; ZNVER1-NEXT:    leal (%rcx,%rsi), %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %2, i32 7, i8 7)
@@ -456,7 +456,7 @@ define <16 x i8> @test_pcmpestrm(<16 x i
 ; ZNVER1-NEXT:    movl $7, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    movl $7, %edx # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpcmpestrm $7, (%rdi), %xmm0 # sched: [100:?]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %1, i32 7, <16 x i8> %2, i32 7, i8 7)
@@ -526,7 +526,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0
 ; ZNVER1-NEXT:    vpcmpistri $7, (%rdi), %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    # kill: %ECX<def> %ECX<kill> %RCX<def>
 ; ZNVER1-NEXT:    leal (%rcx,%rax), %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %2, i8 7)
@@ -576,7 +576,7 @@ define <16 x i8> @test_pcmpistrm(<16 x i
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpcmpistrm $7, %xmm1, %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    vpcmpistrm $7, (%rdi), %xmm0 # sched: [100:?]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %1, <16 x i8> %2, i8 7)
@@ -623,9 +623,9 @@ define <2 x i64> @test_pcmpgtq(<2 x i64>
 ;
 ; ZNVER1-LABEL: test_pcmpgtq:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
+; ZNVER1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vpcmpgtq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = icmp sgt <2 x i64> %a0, %a1
   %2 = sext <2 x i1> %1 to <2 x i64>
   %3 = load <2 x i64>, <2 x i64>*%a2, align 16
@@ -675,7 +675,7 @@ define <2 x i64> @test_pclmulqdq(<2 x i6
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    vpclmulqdq $0, (%rdi), %xmm0, %xmm0 # sched: [100:?]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = load <2 x i64>, <2 x i64> *%a2, align 16
   %2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, i8 0)
   %3 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %2, i8 0)

Modified: llvm/trunk/test/CodeGen/X86/sse4a-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse4a-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse4a-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse4a-schedule.ll Thu Aug 31 05:38:35 2017
@@ -16,8 +16,8 @@ define <2 x i64> @test_extrq(<2 x i64> %
 ;
 ; ZNVER1-LABEL: test_extrq:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    extrq %xmm1, %xmm0
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    extrq %xmm1, %xmm0 # sched: [2:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %a0, <16 x i8> %a1)
   ret <2 x i64> %1
 }
@@ -36,8 +36,8 @@ define <2 x i64> @test_extrqi(<2 x i64>
 ;
 ; ZNVER1-LABEL: test_extrqi:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    extrq $2, $3, %xmm0
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    extrq $2, $3, %xmm0 # sched: [2:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a0, i8 3, i8 2)
   ret <2 x i64> %1
 }
@@ -56,8 +56,8 @@ define <2 x i64> @test_insertq(<2 x i64>
 ;
 ; ZNVER1-LABEL: test_insertq:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    insertq %xmm1, %xmm0
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    insertq %xmm1, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %a0, <2 x i64> %a1)
   ret <2 x i64> %1
 }
@@ -76,8 +76,8 @@ define <2 x i64> @test_insertqi(<2 x i64
 ;
 ; ZNVER1-LABEL: test_insertqi:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    insertq $6, $5, %xmm1, %xmm0
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    insertq $6, $5, %xmm1, %xmm0 # sched: [4:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a0, <2 x i64> %a1, i8 5, i8 6)
   ret <2 x i64> %1
 }
@@ -96,8 +96,8 @@ define void @test_movntsd(i8* %p, <2 x d
 ;
 ; ZNVER1-LABEL: test_movntsd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    movntsd %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    movntsd %xmm0, (%rdi) # sched: [8:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void @llvm.x86.sse4a.movnt.sd(i8* %p, <2 x double> %a)
   ret void
 }
@@ -116,8 +116,8 @@ define void @test_movntss(i8* %p, <4 x f
 ;
 ; ZNVER1-LABEL: test_movntss:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    movntss %xmm0, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    movntss %xmm0, (%rdi) # sched: [8:1.00]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   tail call void @llvm.x86.sse4a.movnt.ss(i8* %p, <4 x float> %a)
   ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/ssse3-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/ssse3-schedule.ll?rev=312237&r1=312236&r2=312237&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/ssse3-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/ssse3-schedule.ll Thu Aug 31 05:38:35 2017
@@ -65,7 +65,7 @@ define <16 x i8> @test_pabsb(<16 x i8> %
 ; ZNVER1-NEXT:    vpabsb (%rdi), %xmm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpabsb %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %a0)
   %2 = load <16 x i8>, <16 x i8> *%a1, align 16
   %3 = call <16 x i8> @llvm.x86.ssse3.pabs.b.128(<16 x i8> %2)
@@ -130,7 +130,7 @@ define <4 x i32> @test_pabsd(<4 x i32> %
 ; ZNVER1-NEXT:    vpabsd (%rdi), %xmm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpabsd %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %a0)
   %2 = load <4 x i32>, <4 x i32> *%a1, align 16
   %3 = call <4 x i32> @llvm.x86.ssse3.pabs.d.128(<4 x i32> %2)
@@ -195,7 +195,7 @@ define <8 x i16> @test_pabsw(<8 x i16> %
 ; ZNVER1-NEXT:    vpabsw (%rdi), %xmm1 # sched: [8:0.50]
 ; ZNVER1-NEXT:    vpabsw %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpor %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a0)
   %2 = load <8 x i16>, <8 x i16> *%a1, align 16
   %3 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %2)
@@ -256,7 +256,7 @@ define <8 x i16> @test_palignr(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] sched: [1:0.25]
 ; ZNVER1-NEXT:    vpalignr {{.*#+}} xmm0 = mem[14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13] sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = shufflevector <8 x i16> %2, <8 x i16> %1, <8 x i32> <i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
@@ -308,9 +308,9 @@ define <4 x i32> @test_phaddd(<4 x i32>
 ;
 ; ZNVER1-LABEL: test_phaddd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vphaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vphaddd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vphaddd %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vphaddd (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1)
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %1, <4 x i32> %2)
@@ -363,9 +363,9 @@ define <8 x i16> @test_phaddsw(<8 x i16>
 ;
 ; ZNVER1-LABEL: test_phaddsw:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vphaddsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vphaddsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vphaddsw %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vphaddsw (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %1, <8 x i16> %2)
@@ -418,9 +418,9 @@ define <8 x i16> @test_phaddw(<8 x i16>
 ;
 ; ZNVER1-LABEL: test_phaddw:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vphaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vphaddw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vphaddw %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vphaddw (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %1, <8 x i16> %2)
@@ -473,9 +473,9 @@ define <4 x i32> @test_phsubd(<4 x i32>
 ;
 ; ZNVER1-LABEL: test_phsubd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vphsubd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vphsubd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vphsubd %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vphsubd (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1)
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %1, <4 x i32> %2)
@@ -528,9 +528,9 @@ define <8 x i16> @test_phsubsw(<8 x i16>
 ;
 ; ZNVER1-LABEL: test_phsubsw:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vphsubsw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vphsubsw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vphsubsw %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vphsubsw (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %1, <8 x i16> %2)
@@ -583,9 +583,9 @@ define <8 x i16> @test_phsubw(<8 x i16>
 ;
 ; ZNVER1-LABEL: test_phsubw:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vphsubw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
-; ZNVER1-NEXT:    vphsubw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    vphsubw %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vphsubw (%rdi), %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %1, <8 x i16> %2)
@@ -640,7 +640,7 @@ define <8 x i16> @test_pmaddubsw(<16 x i
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmaddubsw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vpmaddubsw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = bitcast <8 x i16> %1 to <16 x i8>
@@ -696,7 +696,7 @@ define <8 x i16> @test_pmulhrsw(<8 x i16
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpmulhrsw %xmm1, %xmm0, %xmm0 # sched: [4:1.00]
 ; ZNVER1-NEXT:    vpmulhrsw (%rdi), %xmm0, %xmm0 # sched: [11:1.00]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %1, <8 x i16> %2)
@@ -751,7 +751,7 @@ define <16 x i8> @test_pshufb(<16 x i8>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpshufb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpshufb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> %2)
@@ -810,7 +810,7 @@ define <16 x i8> @test_psignb(<16 x i8>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsignb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsignb (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %a0, <16 x i8> %a1)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
   %3 = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %1, <16 x i8> %2)
@@ -869,7 +869,7 @@ define <4 x i32> @test_psignd(<4 x i32>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsignd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsignd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %a0, <4 x i32> %a1)
   %2 = load <4 x i32>, <4 x i32> *%a2, align 16
   %3 = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %1, <4 x i32> %2)
@@ -928,7 +928,7 @@ define <8 x i16> @test_psignw(<8 x i16>
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vpsignw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
 ; ZNVER1-NEXT:    vpsignw (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
-; ZNVER1-NEXT:    retq # sched: [5:0.50]
+; ZNVER1-NEXT:    retq # sched: [1:0.50]
   %1 = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %a0, <8 x i16> %a1)
   %2 = load <8 x i16>, <8 x i16> *%a2, align 16
   %3 = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %1, <8 x i16> %2)




More information about the llvm-commits mailing list