[llvm] r322086 - [CodeGen] Don't print register classes in -debug output

Francis Visoiu Mistrih via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 9 07:39:44 PST 2018


Author: thegameg
Date: Tue Jan  9 07:39:44 2018
New Revision: 322086

URL: http://llvm.org/viewvc/llvm-project?rev=322086&view=rev
Log:
[CodeGen] Don't print register classes in -debug output

Since register classes and banks are already printed with the register
definition, don't print it at the end of every instruction anymore.

This follows MIR in this regard and is another step to the unification
of the two formats.

Modified:
    llvm/trunk/lib/CodeGen/MachineInstr.cpp
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
    llvm/trunk/test/CodeGen/AMDGPU/extload-align.ll
    llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir
    llvm/trunk/test/CodeGen/ARM/misched-int-basic.mir
    llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir
    llvm/trunk/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll
    llvm/trunk/test/CodeGen/X86/misched-copy.ll

Modified: llvm/trunk/lib/CodeGen/MachineInstr.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineInstr.cpp?rev=322086&r1=322085&r2=322086&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineInstr.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineInstr.cpp Tue Jan  9 07:39:44 2018
@@ -1237,8 +1237,6 @@ void MachineInstr::print(raw_ostream &OS
     }
   }
 
-  // Save a list of virtual registers.
-  SmallVector<unsigned, 8> VirtRegs;
 
   SmallBitVector PrintedTypes(8);
   bool ShouldPrintRegisterTies = hasComplexRegisterTies();
@@ -1262,9 +1260,6 @@ void MachineInstr::print(raw_ostream &OS
     getOperand(StartOp).print(OS, MST, TypeToPrint, /*PrintDef=*/false,
                               ShouldPrintRegisterTies, TiedOperandIdx, TRI,
                               IntrinsicInfo);
-    unsigned Reg = getOperand(StartOp).getReg();
-    if (TargetRegisterInfo::isVirtualRegister(Reg))
-      VirtRegs.push_back(Reg);
   }
 
   if (StartOp != 0)
@@ -1318,9 +1313,6 @@ void MachineInstr::print(raw_ostream &OS
   for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
     const MachineOperand &MO = getOperand(i);
 
-    if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
-      VirtRegs.push_back(MO.getReg());
-
     if (FirstOp) FirstOp = false; else OS << ",";
     OS << " ";
     if (i < getDesc().NumOperands) {
@@ -1444,35 +1436,6 @@ void MachineInstr::print(raw_ostream &OS
     }
   }
 
-  // Print the regclass of any virtual registers encountered.
-  if (MRI && !VirtRegs.empty()) {
-    if (!HaveSemi) {
-      OS << ";";
-      HaveSemi = true;
-    }
-    for (unsigned i = 0; i != VirtRegs.size(); ++i) {
-      const RegClassOrRegBank &RC = MRI->getRegClassOrRegBank(VirtRegs[i]);
-      if (!RC)
-        continue;
-      // Generic virtual registers do not have register classes.
-      if (RC.is<const RegisterBank *>())
-        OS << " " << RC.get<const RegisterBank *>()->getName();
-      else
-        OS << " "
-           << TRI->getRegClassName(RC.get<const TargetRegisterClass *>());
-      OS << ':' << printReg(VirtRegs[i]);
-      for (unsigned j = i+1; j != VirtRegs.size();) {
-        if (MRI->getRegClassOrRegBank(VirtRegs[j]) != RC) {
-          ++j;
-          continue;
-        }
-        if (VirtRegs[i] != VirtRegs[j])
-          OS << "," << printReg(VirtRegs[j]);
-        VirtRegs.erase(VirtRegs.begin()+j);
-      }
-    }
-  }
-
   // Print debug location information.
   if (isDebugValue() && getOperand(e - 2).isMetadata()) {
     if (!HaveSemi)

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll?rev=322086&r1=322085&r2=322086&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll Tue Jan  9 07:39:44 2018
@@ -46,7 +46,7 @@ define [1 x double] @constant() {
   ; The key problem here is that we may fail to create an MBB referenced by a
   ; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things
   ; happen.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %6(s32), %2(p0); mem:ST4[%addr] GPR:%6,%2 (in function: pending_phis)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %6(s32), %2(p0); mem:ST4[%addr] (in function: pending_phis)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for pending_phis
 ; FALLBACK-WITH-REPORT-OUT-LABEL: pending_phis:
 define i32 @pending_phis(i1 %tst, i32 %val, i32* %addr) {
@@ -94,7 +94,7 @@ define i128 @sequence_sizes([8 x i8] %in
 }
 
 ; Just to make sure we don't accidentally emit a normal load/store.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %2:gpr(s64) = G_LOAD %0(p0); mem:LD8[%addr] GPR:%2,%0 (in function: atomic_ops)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %2:gpr(s64) = G_LOAD %0(p0); mem:LD8[%addr] (in function: atomic_ops)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops
 ; FALLBACK-WITH-REPORT-LABEL: atomic_ops:
 define i64 @atomic_ops(i64* %addr) {
@@ -142,7 +142,7 @@ define fp128 @test_quad_dump() {
   ret fp128 0xL00000000000000004000000000000000
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0:_(p0) = G_EXTRACT_VECTOR_ELT %1(<2 x p0>), %2(s32); (in function: vector_of_pointers_extractelement)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0:_(p0) = G_EXTRACT_VECTOR_ELT %1(<2 x p0>), %2(s32) (in function: vector_of_pointers_extractelement)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_extractelement
 ; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_extractelement:
 @var = global <2 x i16*> zeroinitializer
@@ -237,7 +237,7 @@ define void @nonpow2_constant_narrowing(
 ; Currently can't handle vector lengths that aren't an exact multiple of
 ; natively supported vector lengths. Test that the fall-back works for those.
 ; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %1(<7 x s64>) = G_ADD %0, %0; (in function: nonpow2_vector_add_fewerelements
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %2:_(s64) = G_EXTRACT_VECTOR_ELT %1(<7 x s64>), %3(s64); (in function: nonpow2_vector_add_fewerelements)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %2:_(s64) = G_EXTRACT_VECTOR_ELT %1(<7 x s64>), %3(s64) (in function: nonpow2_vector_add_fewerelements)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_vector_add_fewerelements
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_vector_add_fewerelements:
 define void @nonpow2_vector_add_fewerelements() {

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll?rev=322086&r1=322085&r2=322086&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll Tue Jan  9 07:39:44 2018
@@ -5,15 +5,15 @@
 ;
 ; CHECK: ********** MI Scheduling **********
 ; CHECK: misched_bug:%bb.0 entry
-; CHECK: SU(2):   %2:gpr32 = LDRWui %0, 1; mem:LD4[%ptr1_plus1] GPR32:%2 GPR64common:%0
+; CHECK: SU(2):   %2:gpr32 = LDRWui %0, 1; mem:LD4[%ptr1_plus1]
 ; CHECK:   Successors:
 ; CHECK-NEXT:    SU(5): Data Latency=4 Reg=%2
 ; CHECK-NEXT:    SU(4): Ord  Latency=0
-; CHECK: SU(3):   STRWui %wzr, %0, 0; mem:ST4[%ptr1] GPR64common:%0
+; CHECK: SU(3):   STRWui %wzr, %0, 0; mem:ST4[%ptr1]
 ; CHECK:   Successors:
 ; CHECK: SU(4): Ord  Latency=0
-; CHECK: SU(4):   STRWui %wzr, %1, 0; mem:ST4[%ptr2] GPR64common:%1
-; CHECK: SU(5):   %w0 = COPY %2; GPR32:%2
+; CHECK: SU(4):   STRWui %wzr, %1, 0; mem:ST4[%ptr2]
+; CHECK: SU(5):   %w0 = COPY %2
 ; CHECK: ** ScheduleDAGMI::schedule picking next node
 define i32 @misched_bug(i32* %ptr1, i32* %ptr2) {
 entry:

Modified: llvm/trunk/test/CodeGen/AMDGPU/extload-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/extload-align.ll?rev=322086&r1=322085&r2=322086&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/extload-align.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/extload-align.ll Tue Jan  9 07:39:44 2018
@@ -7,7 +7,7 @@ target datalayout = "A5"
 ; size and not 4 corresponding to the sign-extended size (i32).
 
 ; DEBUG: {{^}}# Machine code for function extload_align:
-; DEBUG: mem:LD2[<unknown>(addrspace=5)]{{[^(]}}
+; DEBUG: mem:LD2[<unknown>(addrspace=5)]
 ; DEBUG: {{^}}# End machine code for function extload_align.
 
 define amdgpu_kernel void @extload_align(i32 addrspace(5)* %out, i32 %index) #0 {

Modified: llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir?rev=322086&r1=322085&r2=322086&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir (original)
+++ llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir Tue Jan  9 07:39:44 2018
@@ -37,62 +37,62 @@
   }
 #
 # CHECK:       ********** MI Scheduling **********
-# CHECK:       SU(2):   %2:rgpr = t2MOVi32imm @g1; rGPR:%2
+# CHECK:       SU(2):   %2:rgpr = t2MOVi32imm @g1
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 2
 # CHECK_R52:   Latency    : 2
 #
-# CHECK:       SU(3):   %3:rgpr = t2LDRi12 %2, 0, pred:14, pred:%noreg; mem:LD4[@g1](dereferenceable) rGPR:%3,%2
+# CHECK:       SU(3):   %3:rgpr = t2LDRi12 %2, 0, pred:14, pred:%noreg; mem:LD4[@g1](dereferenceable)
 # CHECK_A9:    Latency    : 1
 # CHECK_SWIFT: Latency    : 3
 # CHECK_R52:   Latency    : 4
 #
-# CHECK :      SU(6):   %6 = t2ADDrr %3, %3, pred:14, pred:%noreg, opt:%noreg; rGPR:%6,%3,%3
+# CHECK :      SU(6):   %6 = t2ADDrr %3, %3, pred:14, pred:%noreg, opt:%noreg
 # CHECK_A9:    Latency    : 1
 # CHECK_SWIFT: Latency    : 1
 # CHECK_R52:   Latency    : 3
 
-# CHECK:       SU(7):   %7:rgpr = t2SDIV %6, %5, pred:14, pred:%noreg; rGPR:%7,%6,%5
+# CHECK:       SU(7):   %7:rgpr = t2SDIV %6, %5, pred:14, pred:%noreg
 # CHECK_A9:    Latency    : 0
 # CHECK_SWIFT: Latency    : 14
 # CHECK_R52:   Latency    : 8
 
-# CHECK:       SU(8):   t2STRi12 %7, %2, 0, pred:14, pred:%noreg; mem:ST4[@g1] rGPR:%7,%2
+# CHECK:       SU(8):   t2STRi12 %7, %2, 0, pred:14, pred:%noreg; mem:ST4[@g1]
 # CHECK_A9:    Latency    : 1
 # CHECK_SWIFT: Latency    : 0
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(9):   %8:rgpr = t2SMULBB %1, %1, pred:14, pred:%noreg; rGPR:%8,%1,%1
+# CHECK:       SU(9):   %8:rgpr = t2SMULBB %1, %1, pred:14, pred:%noreg
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(10):   %9:rgpr = t2SMLABB %0, %0, %8, pred:14, pred:%noreg; rGPR:%9,%0,%0,%8
+# CHECK:       SU(10):   %9:rgpr = t2SMLABB %0, %0, %8, pred:14, pred:%noreg
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(11):   %10:rgpr = t2UXTH %9, 0, pred:14, pred:%noreg; rGPR:%10,%9
+# CHECK:       SU(11):   %10:rgpr = t2UXTH %9, 0, pred:14, pred:%noreg
 # CHECK_A9:    Latency    : 1
 # CHECK_SWIFT: Latency    : 1
 # CHECK_R52:   Latency    : 3
 #
-# CHECK:       SU(12):   %11:rgpr = t2MUL %10, %7, pred:14, pred:%noreg; rGPR:%11,%10,%7
+# CHECK:       SU(12):   %11:rgpr = t2MUL %10, %7, pred:14, pred:%noreg
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(13):   %12:rgpr = t2MLA %11, %11, %11, pred:14, pred:%noreg; rGPR:%12,%11,%11,%11
+# CHECK:       SU(13):   %12:rgpr = t2MLA %11, %11, %11, pred:14, pred:%noreg
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(14):   %13:rgpr, %14:rgpr = t2UMULL %12, %12, pred:14, pred:%noreg; rGPR:%13,%14,%12,%12
+# CHECK:       SU(14):   %13:rgpr, %14:rgpr = t2UMULL %12, %12, pred:14, pred:%noreg
 # CHECK_A9:    Latency    : 3
 # CHECK_SWIFT: Latency    : 5
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(18):   %19:rgpr, %20:rgpr = t2UMLAL %12, %12, %19, %20, pred:14, pred:%noreg; rGPR:%19,%20,%12,%12,%20
+# CHECK:       SU(18):   %19:rgpr, %20:rgpr = t2UMLAL %12, %12, %19, %20, pred:14, pred:%noreg
 # CHECK_A9:    Latency    : 3
 # CHECK_SWIFT: Latency    : 7
 # CHECK_R52:   Latency    : 4

Modified: llvm/trunk/test/CodeGen/ARM/misched-int-basic.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/misched-int-basic.mir?rev=322086&r1=322085&r2=322086&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/misched-int-basic.mir (original)
+++ llvm/trunk/test/CodeGen/ARM/misched-int-basic.mir Tue Jan  9 07:39:44 2018
@@ -28,37 +28,37 @@
   }
 
 # CHECK:       ********** MI Scheduling **********
-# CHECK:       SU(2):   %2:gpr = SMULBB %1, %1, pred:14, pred:%noreg; GPR:%2,%1,%1
+# CHECK:       SU(2):   %2:gpr = SMULBB %1, %1, pred:14, pred:%noreg
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(3):   %3:gprnopc = SMLABB %0, %0, %2, pred:14, pred:%noreg; GPRnopc:%3,%0,%0 GPR:%2
+# CHECK:       SU(3):   %3:gprnopc = SMLABB %0, %0, %2, pred:14, pred:%noreg
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(4):   %4:gprnopc = UXTH %3, 0, pred:14, pred:%noreg; GPRnopc:%4,%3
+# CHECK:       SU(4):   %4:gprnopc = UXTH %3, 0, pred:14, pred:%noreg
 # CHECK_A9:    Latency    : 1
 # CHECK_SWIFT: Latency    : 1
 # CHECK_R52:   Latency    : 3
 #
-# CHECK:       SU(5):   %5:gprnopc = MUL %4, %4, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%5,%4,%4
+# CHECK:       SU(5):   %5:gprnopc = MUL %4, %4, pred:14, pred:%noreg, opt:%noreg
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(6):   %6:gprnopc = MLA %5, %5, %5, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%6,%5,%5,%5
+# CHECK:       SU(6):   %6:gprnopc = MLA %5, %5, %5, pred:14, pred:%noreg, opt:%noreg
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(7):   %7:gprnopc, %8:gprnopc = UMULL %6, %6, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%7,%8,%6,%6
+# CHECK:       SU(7):   %7:gprnopc, %8:gprnopc = UMULL %6, %6, pred:14, pred:%noreg, opt:%noreg
 # CHECK_A9:    Latency    : 3
 # CHECK_SWIFT: Latency    : 5
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(11):   %13:gpr, %14:gprnopc = UMLAL %6, %6, %13, %14, pred:14, pred:%noreg, opt:%noreg; GPR:%13 GPRnopc:%14,%6,%6
+# CHECK:       SU(11):   %13:gpr, %14:gprnopc = UMLAL %6, %6, %13, %14, pred:14, pred:%noreg, opt:%noreg
 # CHECK_SWIFT: Latency    : 7
 # CHECK_A9:    Latency    : 3
 # CHECK_R52:   Latency    : 4

Modified: llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir?rev=322086&r1=322085&r2=322086&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir (original)
+++ llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir Tue Jan  9 07:39:44 2018
@@ -20,13 +20,13 @@
 
 # CHECK: ********** MI Scheduling **********
 # CHECK: ScheduleDAGMILive::schedule starting
-# CHECK: SU(1):   %1:qqpr = VLD4d8Pseudo %0, 8, pred:14, pred:%noreg; mem:LD32[%A](align=8) QQPR:%1 GPR:%0
+# CHECK: SU(1):   %1:qqpr = VLD4d8Pseudo %0, 8, pred:14, pred:%noreg; mem:LD32[%A](align=8)
 # CHECK: Latency            : 8
 # CHECK: Single Issue       : true;
-# CHECK: SU(2):   %4:dpr = VADDv8i8 %1.dsub_0, %1.dsub_1, pred:14, pred:%noreg; DPR:%4 QQPR:%1
+# CHECK: SU(2):   %4:dpr = VADDv8i8 %1.dsub_0, %1.dsub_1, pred:14, pred:%noreg
 # CHECK: Latency            : 5
 # CHECK: Single Issue       : false;
-# CHECK: SU(3):   %5:gpr, %6:gpr = VMOVRRD %4, pred:14, pred:%noreg; GPR:%5,%6 DPR:%4
+# CHECK: SU(3):   %5:gpr, %6:gpr = VMOVRRD %4, pred:14, pred:%noreg
 # CHECK: Latency            : 4
 # CHECK: Single Issue       : false;
 

Modified: llvm/trunk/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll?rev=322086&r1=322085&r2=322086&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll Tue Jan  9 07:39:44 2018
@@ -7,11 +7,11 @@ define signext i32 @fn1(i32 %baz) {
   %2 = zext i32 %1 to i64
   %3 = shl i64 %2, 48
   %4 = ashr exact i64 %3, 48
-; CHECK: ANDIo8 killed {{[^,]+}}, 65520, implicit-def dead %cr0;
+; CHECK: ANDIo8 killed {{[^,]+}}, 65520, implicit-def dead %cr0
 ; CHECK: CMPLDI
 ; CHECK: BCC
 
-; CHECK: ANDIo8 {{[^,]+}}, 65520, implicit-def %cr0;
+; CHECK: ANDIo8 {{[^,]+}}, 65520, implicit-def %cr0
 ; CHECK: COPY %cr0
 ; CHECK: BCC
   %5 = icmp eq i64 %4, 0
@@ -26,8 +26,8 @@ bar:
 
 ; CHECK-LABEL: fn2
 define signext i32 @fn2(i64 %a, i64 %b) {
-; CHECK: OR8o {{[^, ]+}}, {{[^, ]+}}, implicit-def %cr0;
-; CHECK: [[CREG:[^, ]+]]:crrc = COPY killed %cr0
+; CHECK: OR8o {{[^, ]+}}, {{[^, ]+}}, implicit-def %cr0
+; CHECK: [[CREG:[^, ]+]]:crrc = COPY killed %cr
 ; CHECK: BCC 12, killed [[CREG]]
   %1 = or i64 %b, %a
   %2 = icmp sgt i64 %1, -1
@@ -42,7 +42,7 @@ bar:
 
 ; CHECK-LABEL: fn3
 define signext i32 @fn3(i32 %a) {
-; CHECK: ANDIo killed {{[%0-9]+}}, 10, implicit-def %cr0;
+; CHECK: ANDIo killed {{[%0-9]+}}, 10, implicit-def %cr0
 ; CHECK: [[CREG:[^, ]+]]:crrc = COPY %cr0
 ; CHECK: BCC 76, killed [[CREG]]
   %1 = and i32 %a, 10

Modified: llvm/trunk/test/CodeGen/X86/misched-copy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/misched-copy.ll?rev=322086&r1=322085&r2=322086&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/misched-copy.ll (original)
+++ llvm/trunk/test/CodeGen/X86/misched-copy.ll Tue Jan  9 07:39:44 2018
@@ -10,7 +10,7 @@
 ;
 ; CHECK: *** Final schedule for %bb.1 ***
 ; CHECK:      %eax = COPY
-; CHECK-NEXT: MUL32r %{{[0-9]+}}, implicit-def %eax, implicit-def %edx, implicit-def dead %eflags, implicit %eax;
+; CHECK-NEXT: MUL32r %{{[0-9]+}}, implicit-def %eax, implicit-def %edx, implicit-def dead %eflags, implicit %eax
 ; CHECK-NEXT: COPY %e{{[ad]}}x
 ; CHECK-NEXT: COPY %e{{[ad]}}x
 ; CHECK:      DIVSSrm




More information about the llvm-commits mailing list