[llvm] r309156 - This patch returns proper value to indicate the case when instruction throughput can't be calculated.

Andrew V. Tischenko via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 26 11:55:14 PDT 2017


Author: avt77
Date: Wed Jul 26 11:55:14 2017
New Revision: 309156

URL: http://llvm.org/viewvc/llvm-project?rev=309156&view=rev
Log:
This patch returns proper value to indicate the case when instruction throughput can't be calculated.
Differential revision https://reviews.llvm.org/D35831

Modified:
    llvm/trunk/lib/CodeGen/TargetSchedule.cpp
    llvm/trunk/test/CodeGen/X86/avx-schedule.ll
    llvm/trunk/test/CodeGen/X86/bmi-schedule.ll
    llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll
    llvm/trunk/test/CodeGen/X86/f16c-schedule.ll
    llvm/trunk/test/CodeGen/X86/sse41-schedule.ll
    llvm/trunk/test/CodeGen/X86/sse42-schedule.ll
    llvm/trunk/test/CodeGen/X86/sse4a-schedule.ll

Modified: llvm/trunk/lib/CodeGen/TargetSchedule.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TargetSchedule.cpp?rev=309156&r1=309155&r2=309156&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TargetSchedule.cpp (original)
+++ llvm/trunk/lib/CodeGen/TargetSchedule.cpp Wed Jul 26 11:55:14 2017
@@ -339,42 +339,46 @@ computeOutputLatency(const MachineInstr
 static Optional<double>
 getRThroughputFromItineraries(unsigned schedClass,
                               const InstrItineraryData *IID){
-  double Unknown = std::numeric_limits<double>::infinity();
-  double Throughput = Unknown;
+  Optional<double> Throughput;
 
   for (const InstrStage *IS = IID->beginStage(schedClass),
                         *E = IID->endStage(schedClass);
        IS != E; ++IS) {
-    unsigned Cycles = IS->getCycles();
-    if (!Cycles)
-      continue;
-    Throughput =
-        std::min(Throughput, countPopulation(IS->getUnits()) * 1.0 / Cycles);
+    if (IS->getCycles()) {
+      double Temp = countPopulation(IS->getUnits()) * 1.0 / IS->getCycles();
+      Throughput = Throughput.hasValue()
+                        ? std::min(Throughput.getValue(), Temp)
+                        : Temp;
+    }
   }
-  // We need reciprocal throughput that's why we return such value.
-  return 1 / Throughput;
+  if (Throughput.hasValue())
+    // We need reciprocal throughput that's why we return such value.
+    return 1 / Throughput.getValue();
+  return Throughput;
 }
 
 static Optional<double>
 getRThroughputFromInstrSchedModel(const MCSchedClassDesc *SCDesc,
                                   const TargetSubtargetInfo *STI,
                                   const MCSchedModel &SchedModel) {
-  double Unknown = std::numeric_limits<double>::infinity();
-  double Throughput = Unknown;
+  Optional<double> Throughput;
 
   for (const MCWriteProcResEntry *WPR = STI->getWriteProcResBegin(SCDesc),
                                  *WEnd = STI->getWriteProcResEnd(SCDesc);
        WPR != WEnd; ++WPR) {
-    unsigned Cycles = WPR->Cycles;
-    if (!Cycles)
-      return Optional<double>();
-
-    unsigned NumUnits =
-        SchedModel.getProcResource(WPR->ProcResourceIdx)->NumUnits;
-    Throughput = std::min(Throughput, NumUnits * 1.0 / Cycles);
+    if (WPR->Cycles) {
+      unsigned NumUnits =
+          SchedModel.getProcResource(WPR->ProcResourceIdx)->NumUnits;
+      double Temp = NumUnits * 1.0 / WPR->Cycles;
+      Throughput = Throughput.hasValue()
+                       ? std::min(Throughput.getValue(), Temp)
+                       : Temp;
+    }
   }
-  // We need reciprocal throughput that's why we return such value.
-  return 1 / Throughput;
+  if (Throughput.hasValue())
+    // We need reciprocal throughput that's why we return such value.
+    return 1 / Throughput.getValue();
+  return Throughput;
 }
 
 Optional<double>

Modified: llvm/trunk/test/CodeGen/X86/avx-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-schedule.ll?rev=309156&r1=309155&r2=309156&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-schedule.ll Wed Jul 26 11:55:14 2017
@@ -867,14 +867,14 @@ define <4 x float> @test_extractf128(<8
 ; SANDY:       # BB#0:
 ; SANDY-NEXT:    vextractf128 $1, %ymm0, %xmm0 # sched: [1:1.00]
 ; SANDY-NEXT:    vextractf128 $1, %ymm1, (%rdi) # sched: [5:1.00]
-; SANDY-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; SANDY-NEXT:    vzeroupper
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_extractf128:
 ; HASWELL:       # BB#0:
 ; HASWELL-NEXT:    vextractf128 $1, %ymm0, %xmm0 # sched: [3:1.00]
 ; HASWELL-NEXT:    vextractf128 $1, %ymm1, (%rdi) # sched: [4:1.00]
-; HASWELL-NEXT:    vzeroupper # sched: [1:0.00]
+; HASWELL-NEXT:    vzeroupper # sched: [1:?]
 ; HASWELL-NEXT:    retq # sched: [1:1.00]
 ;
 ; BTVER2-LABEL: test_extractf128:
@@ -887,7 +887,7 @@ define <4 x float> @test_extractf128(<8
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vextractf128 $1, %ymm0, %xmm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    vextractf128 $1, %ymm1, (%rdi) # sched: [1:0.50]
-; ZNVER1-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    vzeroupper
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = shufflevector <8 x float> %a0, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %2 = shufflevector <8 x float> %a1, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -1098,15 +1098,15 @@ define <2 x double> @test_maskmovpd(i8*
 ;
 ; BTVER2-LABEL: test_maskmovpd:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2
+; BTVER2-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi)
 ; BTVER2-NEXT:    vmovapd %xmm2, %xmm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_maskmovpd:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    vmaskmovpd (%rdi), %xmm0, %xmm2
+; ZNVER1-NEXT:    vmaskmovpd %xmm1, %xmm0, (%rdi)
 ; ZNVER1-NEXT:    vmovapd %xmm2, %xmm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %a1)
@@ -1120,7 +1120,7 @@ define <4 x double> @test_maskmovpd_ymm(
 ; SANDY-LABEL: test_maskmovpd_ymm:
 ; SANDY:       # BB#0:
 ; SANDY-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [5:1.00]
-; SANDY-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00]
+; SANDY-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi)
 ; SANDY-NEXT:    vmovapd %ymm2, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
@@ -1133,15 +1133,15 @@ define <4 x double> @test_maskmovpd_ymm(
 ;
 ; BTVER2-LABEL: test_maskmovpd_ymm:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2
+; BTVER2-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi)
 ; BTVER2-NEXT:    vmovapd %ymm2, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_maskmovpd_ymm:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    vmaskmovpd (%rdi), %ymm0, %ymm2
+; ZNVER1-NEXT:    vmaskmovpd %ymm1, %ymm0, (%rdi)
 ; ZNVER1-NEXT:    vmovapd %ymm2, %ymm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %a1)
@@ -1168,15 +1168,15 @@ define <4 x float> @test_maskmovps(i8* %
 ;
 ; BTVER2-LABEL: test_maskmovps:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2
+; BTVER2-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi)
 ; BTVER2-NEXT:    vmovaps %xmm2, %xmm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_maskmovps:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2 # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi) # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    vmaskmovps (%rdi), %xmm0, %xmm2
+; ZNVER1-NEXT:    vmaskmovps %xmm1, %xmm0, (%rdi)
 ; ZNVER1-NEXT:    vmovaps %xmm2, %xmm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %a1)
@@ -1190,7 +1190,7 @@ define <8 x float> @test_maskmovps_ymm(i
 ; SANDY-LABEL: test_maskmovps_ymm:
 ; SANDY:       # BB#0:
 ; SANDY-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [1:0.50]
-; SANDY-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00]
+; SANDY-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi)
 ; SANDY-NEXT:    vmovaps %ymm2, %ymm0 # sched: [1:1.00]
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
@@ -1203,15 +1203,15 @@ define <8 x float> @test_maskmovps_ymm(i
 ;
 ; BTVER2-LABEL: test_maskmovps_ymm:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2
+; BTVER2-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi)
 ; BTVER2-NEXT:    vmovaps %ymm2, %ymm0 # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_maskmovps_ymm:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2 # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi) # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    vmaskmovps (%rdi), %ymm0, %ymm2
+; ZNVER1-NEXT:    vmaskmovps %ymm1, %ymm0, (%rdi)
 ; ZNVER1-NEXT:    vmovaps %ymm2, %ymm0 # sched: [1:0.50]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %a1)
@@ -1452,13 +1452,13 @@ define i32 @test_movmskpd(<4 x double> %
 ; SANDY-LABEL: test_movmskpd:
 ; SANDY:       # BB#0:
 ; SANDY-NEXT:    vmovmskpd %ymm0, %eax # sched: [2:1.00]
-; SANDY-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; SANDY-NEXT:    vzeroupper
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movmskpd:
 ; HASWELL:       # BB#0:
 ; HASWELL-NEXT:    vmovmskpd %ymm0, %eax # sched: [2:1.00]
-; HASWELL-NEXT:    vzeroupper # sched: [1:0.00]
+; HASWELL-NEXT:    vzeroupper # sched: [1:?]
 ; HASWELL-NEXT:    retq # sched: [1:1.00]
 ;
 ; BTVER2-LABEL: test_movmskpd:
@@ -1469,7 +1469,7 @@ define i32 @test_movmskpd(<4 x double> %
 ; ZNVER1-LABEL: test_movmskpd:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmovmskpd %ymm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    vzeroupper
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0)
   ret i32 %1
@@ -1480,13 +1480,13 @@ define i32 @test_movmskps(<8 x float> %a
 ; SANDY-LABEL: test_movmskps:
 ; SANDY:       # BB#0:
 ; SANDY-NEXT:    vmovmskps %ymm0, %eax # sched: [3:1.00]
-; SANDY-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; SANDY-NEXT:    vzeroupper
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_movmskps:
 ; HASWELL:       # BB#0:
 ; HASWELL-NEXT:    vmovmskps %ymm0, %eax # sched: [2:1.00]
-; HASWELL-NEXT:    vzeroupper # sched: [1:0.00]
+; HASWELL-NEXT:    vzeroupper # sched: [1:?]
 ; HASWELL-NEXT:    retq # sched: [1:1.00]
 ;
 ; BTVER2-LABEL: test_movmskps:
@@ -1497,7 +1497,7 @@ define i32 @test_movmskps(<8 x float> %a
 ; ZNVER1-LABEL: test_movmskps:
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vmovmskps %ymm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    vzeroupper
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0)
   ret i32 %1
@@ -2499,7 +2499,7 @@ define i32 @test_testpd_ymm(<4 x double>
 ; SANDY-NEXT:    setb %al # sched: [1:1.00]
 ; SANDY-NEXT:    vtestpd (%rdi), %ymm0 # sched: [8:1.00]
 ; SANDY-NEXT:    adcl $0, %eax # sched: [1:0.33]
-; SANDY-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; SANDY-NEXT:    vzeroupper
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_testpd_ymm:
@@ -2509,7 +2509,7 @@ define i32 @test_testpd_ymm(<4 x double>
 ; HASWELL-NEXT:    setb %al # sched: [1:0.50]
 ; HASWELL-NEXT:    vtestpd (%rdi), %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    adcl $0, %eax # sched: [2:0.50]
-; HASWELL-NEXT:    vzeroupper # sched: [1:0.00]
+; HASWELL-NEXT:    vzeroupper # sched: [1:?]
 ; HASWELL-NEXT:    retq # sched: [1:1.00]
 ;
 ; BTVER2-LABEL: test_testpd_ymm:
@@ -2528,7 +2528,7 @@ define i32 @test_testpd_ymm(<4 x double>
 ; ZNVER1-NEXT:    setb %al # sched: [1:0.25]
 ; ZNVER1-NEXT:    vtestpd (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    adcl $0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    vzeroupper
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1)
   %2 = load <4 x double>, <4 x double> *%a2, align 32
@@ -2590,7 +2590,7 @@ define i32 @test_testps_ymm(<8 x float>
 ; SANDY-NEXT:    setb %al # sched: [1:1.00]
 ; SANDY-NEXT:    vtestps (%rdi), %ymm0 # sched: [8:1.00]
 ; SANDY-NEXT:    adcl $0, %eax # sched: [1:0.33]
-; SANDY-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; SANDY-NEXT:    vzeroupper
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_testps_ymm:
@@ -2600,7 +2600,7 @@ define i32 @test_testps_ymm(<8 x float>
 ; HASWELL-NEXT:    setb %al # sched: [1:0.50]
 ; HASWELL-NEXT:    vtestps (%rdi), %ymm0 # sched: [5:0.50]
 ; HASWELL-NEXT:    adcl $0, %eax # sched: [2:0.50]
-; HASWELL-NEXT:    vzeroupper # sched: [1:0.00]
+; HASWELL-NEXT:    vzeroupper # sched: [1:?]
 ; HASWELL-NEXT:    retq # sched: [1:1.00]
 ;
 ; BTVER2-LABEL: test_testps_ymm:
@@ -2619,7 +2619,7 @@ define i32 @test_testps_ymm(<8 x float>
 ; ZNVER1-NEXT:    setb %al # sched: [1:0.25]
 ; ZNVER1-NEXT:    vtestps (%rdi), %ymm0 # sched: [8:0.50]
 ; ZNVER1-NEXT:    adcl $0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    vzeroupper
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1)
   %2 = load <8 x float>, <8 x float> *%a2, align 32
@@ -2840,22 +2840,22 @@ define <8 x float> @test_xorps(<8 x floa
 define void @test_zeroall() {
 ; SANDY-LABEL: test_zeroall:
 ; SANDY:       # BB#0:
-; SANDY-NEXT:    vzeroall # sched: [?:0.000000e+00]
+; SANDY-NEXT:    vzeroall
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_zeroall:
 ; HASWELL:       # BB#0:
-; HASWELL-NEXT:    vzeroall # sched: [1:0.00]
+; HASWELL-NEXT:    vzeroall # sched: [1:?]
 ; HASWELL-NEXT:    retq # sched: [1:1.00]
 ;
 ; BTVER2-LABEL: test_zeroall:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    vzeroall # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    vzeroall
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_zeroall:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vzeroall # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    vzeroall
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   call void @llvm.x86.avx.vzeroall()
   ret void
@@ -2865,22 +2865,22 @@ declare void @llvm.x86.avx.vzeroall() no
 define void @test_zeroupper() {
 ; SANDY-LABEL: test_zeroupper:
 ; SANDY:       # BB#0:
-; SANDY-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; SANDY-NEXT:    vzeroupper
 ; SANDY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_zeroupper:
 ; HASWELL:       # BB#0:
-; HASWELL-NEXT:    vzeroupper # sched: [1:0.00]
+; HASWELL-NEXT:    vzeroupper # sched: [1:?]
 ; HASWELL-NEXT:    retq # sched: [1:1.00]
 ;
 ; BTVER2-LABEL: test_zeroupper:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    vzeroupper
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_zeroupper:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    vzeroupper
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   call void @llvm.x86.avx.vzeroupper()
   ret void

Modified: llvm/trunk/test/CodeGen/X86/bmi-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bmi-schedule.ll?rev=309156&r1=309155&r2=309156&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bmi-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bmi-schedule.ll Wed Jul 26 11:55:14 2017
@@ -139,15 +139,15 @@ define i32 @test_bextr_i32(i32 %a0, i32
 ;
 ; BTVER2-LABEL: test_bextr_i32:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    bextrl %edi, (%rdx), %ecx # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    bextrl %edi, %esi, %eax # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    bextrl %edi, (%rdx), %ecx
+; BTVER2-NEXT:    bextrl %edi, %esi, %eax
 ; BTVER2-NEXT:    addl %ecx, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_bextr_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    bextrl %edi, (%rdx), %ecx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    bextrl %edi, %esi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    bextrl %edi, (%rdx), %ecx
+; ZNVER1-NEXT:    bextrl %edi, %esi, %eax
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i32, i32 *%a2
@@ -175,15 +175,15 @@ define i64 @test_bextr_i64(i64 %a0, i64
 ;
 ; BTVER2-LABEL: test_bextr_i64:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    bextrq %rdi, (%rdx), %rcx # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    bextrq %rdi, %rsi, %rax # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    bextrq %rdi, (%rdx), %rcx
+; BTVER2-NEXT:    bextrq %rdi, %rsi, %rax
 ; BTVER2-NEXT:    addq %rcx, %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_bextr_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    bextrq %rdi, (%rdx), %rcx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    bextrq %rdi, %rsi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    bextrq %rdi, (%rdx), %rcx
+; ZNVER1-NEXT:    bextrq %rdi, %rsi, %rax
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i64, i64 *%a2
@@ -211,15 +211,15 @@ define i32 @test_blsi_i32(i32 %a0, i32 *
 ;
 ; BTVER2-LABEL: test_blsi_i32:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    blsil (%rsi), %ecx # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    blsil %edi, %eax # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    blsil (%rsi), %ecx
+; BTVER2-NEXT:    blsil %edi, %eax
 ; BTVER2-NEXT:    addl %ecx, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_blsi_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    blsil (%rsi), %ecx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    blsil %edi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    blsil (%rsi), %ecx
+; ZNVER1-NEXT:    blsil %edi, %eax
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i32, i32 *%a1
@@ -248,15 +248,15 @@ define i64 @test_blsi_i64(i64 %a0, i64 *
 ;
 ; BTVER2-LABEL: test_blsi_i64:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    blsiq (%rsi), %rcx # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    blsiq %rdi, %rax # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    blsiq (%rsi), %rcx
+; BTVER2-NEXT:    blsiq %rdi, %rax
 ; BTVER2-NEXT:    addq %rcx, %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_blsi_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    blsiq (%rsi), %rcx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    blsiq %rdi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    blsiq (%rsi), %rcx
+; ZNVER1-NEXT:    blsiq %rdi, %rax
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i64, i64 *%a1
@@ -285,15 +285,15 @@ define i32 @test_blsmsk_i32(i32 %a0, i32
 ;
 ; BTVER2-LABEL: test_blsmsk_i32:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    blsmskl (%rsi), %ecx # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    blsmskl %edi, %eax # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    blsmskl (%rsi), %ecx
+; BTVER2-NEXT:    blsmskl %edi, %eax
 ; BTVER2-NEXT:    addl %ecx, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_blsmsk_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    blsmskl (%rsi), %ecx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    blsmskl %edi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    blsmskl (%rsi), %ecx
+; ZNVER1-NEXT:    blsmskl %edi, %eax
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i32, i32 *%a1
@@ -322,15 +322,15 @@ define i64 @test_blsmsk_i64(i64 %a0, i64
 ;
 ; BTVER2-LABEL: test_blsmsk_i64:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    blsmskq (%rsi), %rcx # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    blsmskq %rdi, %rax # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    blsmskq (%rsi), %rcx
+; BTVER2-NEXT:    blsmskq %rdi, %rax
 ; BTVER2-NEXT:    addq %rcx, %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_blsmsk_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    blsmskq (%rsi), %rcx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    blsmskq %rdi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    blsmskq (%rsi), %rcx
+; ZNVER1-NEXT:    blsmskq %rdi, %rax
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i64, i64 *%a1
@@ -359,15 +359,15 @@ define i32 @test_blsr_i32(i32 %a0, i32 *
 ;
 ; BTVER2-LABEL: test_blsr_i32:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    blsrl (%rsi), %ecx # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    blsrl %edi, %eax # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    blsrl (%rsi), %ecx
+; BTVER2-NEXT:    blsrl %edi, %eax
 ; BTVER2-NEXT:    addl %ecx, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_blsr_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    blsrl (%rsi), %ecx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    blsrl %edi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    blsrl (%rsi), %ecx
+; ZNVER1-NEXT:    blsrl %edi, %eax
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i32, i32 *%a1
@@ -396,15 +396,15 @@ define i64 @test_blsr_i64(i64 %a0, i64 *
 ;
 ; BTVER2-LABEL: test_blsr_i64:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    blsrq (%rsi), %rcx # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    blsrq %rdi, %rax # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    blsrq (%rsi), %rcx
+; BTVER2-NEXT:    blsrq %rdi, %rax
 ; BTVER2-NEXT:    addq %rcx, %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_blsr_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    blsrq (%rsi), %rcx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    blsrq %rdi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    blsrq (%rsi), %rcx
+; ZNVER1-NEXT:    blsrq %rdi, %rax
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i64, i64 *%a1
@@ -435,16 +435,16 @@ define i16 @test_cttz_i16(i16 zeroext %a
 ;
 ; BTVER2-LABEL: test_cttz_i16:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    tzcntw (%rsi), %cx # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    tzcntw %di, %ax # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    tzcntw (%rsi), %cx
+; BTVER2-NEXT:    tzcntw %di, %ax
 ; BTVER2-NEXT:    orl %ecx, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cttz_i16:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    tzcntw (%rsi), %cx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    tzcntw %di, %ax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    tzcntw (%rsi), %cx
+; ZNVER1-NEXT:    tzcntw %di, %ax
 ; ZNVER1-NEXT:    orl %ecx, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
@@ -473,15 +473,15 @@ define i32 @test_cttz_i32(i32 %a0, i32 *
 ;
 ; BTVER2-LABEL: test_cttz_i32:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    tzcntl (%rsi), %ecx # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    tzcntl %edi, %eax # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    tzcntl (%rsi), %ecx
+; BTVER2-NEXT:    tzcntl %edi, %eax
 ; BTVER2-NEXT:    orl %ecx, %eax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cttz_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    tzcntl (%rsi), %ecx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    tzcntl %edi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    tzcntl (%rsi), %ecx
+; ZNVER1-NEXT:    tzcntl %edi, %eax
 ; ZNVER1-NEXT:    orl %ecx, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i32, i32 *%a1
@@ -509,15 +509,15 @@ define i64 @test_cttz_i64(i64 %a0, i64 *
 ;
 ; BTVER2-LABEL: test_cttz_i64:
 ; BTVER2:       # BB#0:
-; BTVER2-NEXT:    tzcntq (%rsi), %rcx # sched: [?:0.000000e+00]
-; BTVER2-NEXT:    tzcntq %rdi, %rax # sched: [?:0.000000e+00]
+; BTVER2-NEXT:    tzcntq (%rsi), %rcx
+; BTVER2-NEXT:    tzcntq %rdi, %rax
 ; BTVER2-NEXT:    orq %rcx, %rax # sched: [1:0.50]
 ; BTVER2-NEXT:    retq # sched: [4:1.00]
 ;
 ; ZNVER1-LABEL: test_cttz_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    tzcntq (%rsi), %rcx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    tzcntq %rdi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    tzcntq (%rsi), %rcx
+; ZNVER1-NEXT:    tzcntq %rdi, %rax
 ; ZNVER1-NEXT:    orq %rcx, %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i64, i64 *%a1

Modified: llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll?rev=309156&r1=309155&r2=309156&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bmi2-schedule.ll Wed Jul 26 11:55:14 2017
@@ -22,8 +22,8 @@ define i32 @test_bzhi_i32(i32 %a0, i32 %
 ;
 ; ZNVER1-LABEL: test_bzhi_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    bzhil %edi, (%rdx), %ecx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    bzhil %edi, %esi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    bzhil %edi, (%rdx), %ecx
+; ZNVER1-NEXT:    bzhil %edi, %esi, %eax
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i32, i32 *%a2
@@ -51,8 +51,8 @@ define i64 @test_bzhi_i64(i64 %a0, i64 %
 ;
 ; ZNVER1-LABEL: test_bzhi_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    bzhiq %rdi, (%rdx), %rcx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    bzhiq %rdi, %rsi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    bzhiq %rdi, (%rdx), %rcx
+; ZNVER1-NEXT:    bzhiq %rdi, %rsi, %rax
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i64, i64 *%a2
@@ -80,8 +80,8 @@ define i32 @test_pdep_i32(i32 %a0, i32 %
 ;
 ; ZNVER1-LABEL: test_pdep_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    pdepl (%rdx), %edi, %ecx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    pdepl %esi, %edi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    pdepl (%rdx), %edi, %ecx
+; ZNVER1-NEXT:    pdepl %esi, %edi, %eax
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i32, i32 *%a2
@@ -109,8 +109,8 @@ define i64 @test_pdep_i64(i64 %a0, i64 %
 ;
 ; ZNVER1-LABEL: test_pdep_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    pdepq (%rdx), %rdi, %rcx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    pdepq %rsi, %rdi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    pdepq (%rdx), %rdi, %rcx
+; ZNVER1-NEXT:    pdepq %rsi, %rdi, %rax
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i64, i64 *%a2
@@ -138,8 +138,8 @@ define i32 @test_pext_i32(i32 %a0, i32 %
 ;
 ; ZNVER1-LABEL: test_pext_i32:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    pextl (%rdx), %edi, %ecx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    pextl %esi, %edi, %eax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    pextl (%rdx), %edi, %ecx
+; ZNVER1-NEXT:    pextl %esi, %edi, %eax
 ; ZNVER1-NEXT:    addl %ecx, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i32, i32 *%a2
@@ -167,8 +167,8 @@ define i64 @test_pext_i64(i64 %a0, i64 %
 ;
 ; ZNVER1-LABEL: test_pext_i64:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    pextq (%rdx), %rdi, %rcx # sched: [?:0.000000e+00]
-; ZNVER1-NEXT:    pextq %rsi, %rdi, %rax # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    pextq (%rdx), %rdi, %rcx
+; ZNVER1-NEXT:    pextq %rsi, %rdi, %rax
 ; ZNVER1-NEXT:    addq %rcx, %rax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = load i64, i64 *%a2

Modified: llvm/trunk/test/CodeGen/X86/f16c-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/f16c-schedule.ll?rev=309156&r1=309155&r2=309156&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/f16c-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/f16c-schedule.ll Wed Jul 26 11:55:14 2017
@@ -114,14 +114,14 @@ define <8 x i16> @test_vcvtps2ph_256(<8
 ; IVY:       # BB#0:
 ; IVY-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # sched: [3:1.00]
 ; IVY-NEXT:    vcvtps2ph $0, %ymm1, (%rdi) # sched: [7:1.00]
-; IVY-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; IVY-NEXT:    vzeroupper
 ; IVY-NEXT:    retq # sched: [1:1.00]
 ;
 ; HASWELL-LABEL: test_vcvtps2ph_256:
 ; HASWELL:       # BB#0:
 ; HASWELL-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # sched: [4:1.00]
 ; HASWELL-NEXT:    vcvtps2ph $0, %ymm1, (%rdi) # sched: [8:1.00]
-; HASWELL-NEXT:    vzeroupper # sched: [1:0.00]
+; HASWELL-NEXT:    vzeroupper # sched: [1:?]
 ; HASWELL-NEXT:    retq # sched: [1:1.00]
 ;
 ; BTVER2-LABEL: test_vcvtps2ph_256:
@@ -134,7 +134,7 @@ define <8 x i16> @test_vcvtps2ph_256(<8
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    vcvtps2ph $0, %ymm0, %xmm0 # sched: [5:1.00]
 ; ZNVER1-NEXT:    vcvtps2ph $0, %ymm1, (%rdi) # sched: [12:1.00]
-; ZNVER1-NEXT:    vzeroupper # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    vzeroupper
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0)
   %2 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a1, i32 0)

Modified: llvm/trunk/test/CodeGen/X86/sse41-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse41-schedule.ll?rev=309156&r1=309155&r2=309156&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse41-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse41-schedule.ll Wed Jul 26 11:55:14 2017
@@ -394,8 +394,8 @@ define <8 x i16> @test_mpsadbw(<16 x i8>
 ;
 ; ZNVER1-LABEL: test_mpsadbw:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [100:0.00]
-; ZNVER1-NEXT:    vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT:    vmpsadbw $7, %xmm1, %xmm0, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vmpsadbw $7, (%rdi), %xmm0, %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7)
   %2 = bitcast <8 x i16> %1 to <16 x i8>

Modified: llvm/trunk/test/CodeGen/X86/sse42-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse42-schedule.ll?rev=309156&r1=309155&r2=309156&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse42-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse42-schedule.ll Wed Jul 26 11:55:14 2017
@@ -323,11 +323,11 @@ define i32 @test_pcmpestri(<16 x i8> %a0
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    movl $7, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    movl $7, %edx # sched: [1:0.25]
-; ZNVER1-NEXT:    vpcmpestri $7, %xmm1, %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT:    vpcmpestri $7, %xmm1, %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    movl $7, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    movl $7, %edx # sched: [1:0.25]
 ; ZNVER1-NEXT:    movl %ecx, %esi # sched: [1:0.25]
-; ZNVER1-NEXT:    vpcmpestri $7, (%rdi), %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT:    vpcmpestri $7, (%rdi), %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    # kill: %ECX<def> %ECX<kill> %RCX<def>
 ; ZNVER1-NEXT:    leal (%rcx,%rsi), %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
@@ -394,10 +394,10 @@ define <16 x i8> @test_pcmpestrm(<16 x i
 ; ZNVER1:       # BB#0:
 ; ZNVER1-NEXT:    movl $7, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    movl $7, %edx # sched: [1:0.25]
-; ZNVER1-NEXT:    vpcmpestrm $7, %xmm1, %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT:    vpcmpestrm $7, %xmm1, %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    movl $7, %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    movl $7, %edx # sched: [1:0.25]
-; ZNVER1-NEXT:    vpcmpestrm $7, (%rdi), %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT:    vpcmpestrm $7, (%rdi), %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16
@@ -454,9 +454,9 @@ define i32 @test_pcmpistri(<16 x i8> %a0
 ;
 ; ZNVER1-LABEL: test_pcmpistri:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpcmpistri $7, %xmm1, %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT:    vpcmpistri $7, %xmm1, %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    movl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT:    vpcmpistri $7, (%rdi), %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT:    vpcmpistri $7, (%rdi), %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    # kill: %ECX<def> %ECX<kill> %RCX<def>
 ; ZNVER1-NEXT:    leal (%rcx,%rax), %eax # sched: [1:0.25]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
@@ -501,8 +501,8 @@ define <16 x i8> @test_pcmpistrm(<16 x i
 ;
 ; ZNVER1-LABEL: test_pcmpistrm:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    vpcmpistrm $7, %xmm1, %xmm0 # sched: [100:0.00]
-; ZNVER1-NEXT:    vpcmpistrm $7, (%rdi), %xmm0 # sched: [100:0.00]
+; ZNVER1-NEXT:    vpcmpistrm $7, %xmm1, %xmm0 # sched: [100:?]
+; ZNVER1-NEXT:    vpcmpistrm $7, (%rdi), %xmm0 # sched: [100:?]
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
   %2 = load <16 x i8>, <16 x i8> *%a2, align 16

Modified: llvm/trunk/test/CodeGen/X86/sse4a-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse4a-schedule.ll?rev=309156&r1=309155&r2=309156&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse4a-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse4a-schedule.ll Wed Jul 26 11:55:14 2017
@@ -16,7 +16,7 @@ define <2 x i64> @test_extrq(<2 x i64> %
 ;
 ; ZNVER1-LABEL: test_extrq:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    extrq %xmm1, %xmm0 # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    extrq %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %a0, <16 x i8> %a1)
   ret <2 x i64> %1
@@ -36,7 +36,7 @@ define <2 x i64> @test_extrqi(<2 x i64>
 ;
 ; ZNVER1-LABEL: test_extrqi:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    extrq $2, $3, %xmm0 # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    extrq $2, $3, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %a0, i8 3, i8 2)
   ret <2 x i64> %1
@@ -56,7 +56,7 @@ define <2 x i64> @test_insertq(<2 x i64>
 ;
 ; ZNVER1-LABEL: test_insertq:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    insertq %xmm1, %xmm0 # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    insertq %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %a0, <2 x i64> %a1)
   ret <2 x i64> %1
@@ -76,7 +76,7 @@ define <2 x i64> @test_insertqi(<2 x i64
 ;
 ; ZNVER1-LABEL: test_insertqi:
 ; ZNVER1:       # BB#0:
-; ZNVER1-NEXT:    insertq $6, $5, %xmm1, %xmm0 # sched: [?:0.000000e+00]
+; ZNVER1-NEXT:    insertq $6, $5, %xmm1, %xmm0
 ; ZNVER1-NEXT:    retq # sched: [5:0.50]
   %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %a0, <2 x i64> %a1, i8 5, i8 6)
   ret <2 x i64> %1




More information about the llvm-commits mailing list