[llvm] r319427 - [CodeGen] Print "%vreg0" as "%0" in both MIR and debug output

Francis Visoiu Mistrih via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 30 04:12:19 PST 2017


Author: thegameg
Date: Thu Nov 30 04:12:19 2017
New Revision: 319427

URL: http://llvm.org/viewvc/llvm-project?rev=319427&view=rev
Log:
[CodeGen] Print "%vreg0" as "%0" in both MIR and debug output

As part of the unification of the debug format and the MIR format, avoid
printing "vreg" for virtual registers (which is one of the current MIR
possibilities).

Basically:

* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" \) -type f -print0 | xargs -0 sed -i '' -E "s/%vreg([0-9]+)/%\1/g"
* grep -nr '%vreg' . and fix if needed
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" \) -type f -print0 | xargs -0 sed -i '' -E "s/ vreg([0-9]+)/ %\1/g"
* grep -nr 'vreg[0-9]\+' . and fix if needed

Differential Revision: https://reviews.llvm.org/D40420

Modified:
    llvm/trunk/include/llvm/CodeGen/MachineOperand.h
    llvm/trunk/include/llvm/CodeGen/TargetInstrInfo.h
    llvm/trunk/include/llvm/CodeGen/TargetRegisterInfo.h
    llvm/trunk/lib/CodeGen/DetectDeadLanes.cpp
    llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp
    llvm/trunk/lib/CodeGen/MachineVerifier.cpp
    llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp
    llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp
    llvm/trunk/lib/CodeGen/RegisterCoalescer.cpp
    llvm/trunk/lib/CodeGen/RenameIndependentSubregs.cpp
    llvm/trunk/lib/CodeGen/SplitKit.cpp
    llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp
    llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
    llvm/trunk/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
    llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp
    llvm/trunk/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
    llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp
    llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp
    llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.h
    llvm/trunk/lib/Target/BPF/BPFISelDAGToDAG.cpp
    llvm/trunk/lib/Target/Hexagon/BitTracker.cpp
    llvm/trunk/lib/Target/Hexagon/HexagonBitSimplify.cpp
    llvm/trunk/lib/Target/Hexagon/HexagonBlockRanges.cpp
    llvm/trunk/lib/Target/Hexagon/HexagonConstPropagation.cpp
    llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
    llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp
    llvm/trunk/lib/Target/Hexagon/HexagonGenInsert.cpp
    llvm/trunk/lib/Target/Hexagon/HexagonHardwareLoops.cpp
    llvm/trunk/lib/Target/Hexagon/HexagonPeephole.cpp
    llvm/trunk/lib/Target/Hexagon/HexagonStoreWidening.cpp
    llvm/trunk/lib/Target/Hexagon/HexagonSubtarget.cpp
    llvm/trunk/lib/Target/NVPTX/NVPTXPeephole.cpp
    llvm/trunk/lib/Target/PowerPC/PPCBranchCoalescing.cpp
    llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp
    llvm/trunk/lib/Target/PowerPC/PPCMIPeephole.cpp
    llvm/trunk/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-selected.mir
    llvm/trunk/test/CodeGen/AArch64/aarch64-stp-cluster.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-ldp-cluster.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
    llvm/trunk/test/CodeGen/AArch64/tailcall_misched_graph.ll
    llvm/trunk/test/CodeGen/AMDGPU/lds-output-queue.ll
    llvm/trunk/test/CodeGen/AMDGPU/liveness.mir
    llvm/trunk/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
    llvm/trunk/test/CodeGen/AMDGPU/subreg-intervals.mir
    llvm/trunk/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll
    llvm/trunk/test/CodeGen/ARM/Windows/dbzchk.ll
    llvm/trunk/test/CodeGen/ARM/crash-greedy.ll
    llvm/trunk/test/CodeGen/ARM/misched-copy-arm.ll
    llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir
    llvm/trunk/test/CodeGen/ARM/misched-int-basic.mir
    llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir
    llvm/trunk/test/CodeGen/ARM/subreg-remat.ll
    llvm/trunk/test/CodeGen/AVR/select-must-add-unconditional-jump.ll
    llvm/trunk/test/CodeGen/Hexagon/circ_ldd_bug.ll
    llvm/trunk/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
    llvm/trunk/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
    llvm/trunk/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
    llvm/trunk/test/CodeGen/MIR/AArch64/spill-fold.mir
    llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll
    llvm/trunk/test/CodeGen/WebAssembly/dbgvalue.ll
    llvm/trunk/test/CodeGen/X86/2011-09-14-valcoalesce.ll
    llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
    llvm/trunk/test/CodeGen/X86/cmovcmov.ll
    llvm/trunk/test/CodeGen/X86/coalescer-dce.ll
    llvm/trunk/test/CodeGen/X86/crash.ll
    llvm/trunk/test/CodeGen/X86/handle-move.ll
    llvm/trunk/test/CodeGen/X86/invalid-liveness.mir
    llvm/trunk/test/CodeGen/X86/liveness-local-regalloc.ll
    llvm/trunk/test/CodeGen/X86/misched-copy.ll
    llvm/trunk/test/CodeGen/X86/norex-subreg.ll
    llvm/trunk/test/CodeGen/X86/phys_subreg_coalesce-3.ll
    llvm/trunk/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir

Modified: llvm/trunk/include/llvm/CodeGen/MachineOperand.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/MachineOperand.h?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/MachineOperand.h (original)
+++ llvm/trunk/include/llvm/CodeGen/MachineOperand.h Thu Nov 30 04:12:19 2017
@@ -116,9 +116,9 @@ private:
   /// the same register.  In that case, the instruction may depend on those
   /// operands reading the same dont-care value.  For example:
   ///
-  ///   %vreg1<def> = XOR %vreg2<undef>, %vreg2<undef>
+  ///   %1<def> = XOR %2<undef>, %2<undef>
   ///
-  /// Any register can be used for %vreg2, and its value doesn't matter, but
+  /// Any register can be used for %2, and its value doesn't matter, but
   /// the two operands must be the same register.
   ///
   bool IsUndef : 1;

Modified: llvm/trunk/include/llvm/CodeGen/TargetInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/TargetInstrInfo.h?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/TargetInstrInfo.h (original)
+++ llvm/trunk/include/llvm/CodeGen/TargetInstrInfo.h Thu Nov 30 04:12:19 2017
@@ -422,10 +422,10 @@ public:
   /// and \p DefIdx.
   /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
   /// the list is modeled as <Reg:SubReg, SubIdx>.
-  /// E.g., REG_SEQUENCE vreg1:sub1, sub0, vreg2, sub1 would produce
+  /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
   /// two elements:
-  /// - vreg1:sub1, sub0
-  /// - vreg2<:0>, sub1
+  /// - %1:sub1, sub0
+  /// - %2<:0>, sub1
   ///
   /// \returns true if it is possible to build such an input sequence
   /// with the pair \p MI, \p DefIdx. False otherwise.
@@ -442,8 +442,8 @@ public:
   /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
   /// and \p DefIdx.
   /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
-  /// E.g., EXTRACT_SUBREG vreg1:sub1, sub0, sub1 would produce:
-  /// - vreg1:sub1, sub0
+  /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
+  /// - %1:sub1, sub0
   ///
   /// \returns true if it is possible to build such an input sequence
   /// with the pair \p MI, \p DefIdx. False otherwise.
@@ -460,9 +460,9 @@ public:
   /// and \p DefIdx.
   /// \p [out] BaseReg and \p [out] InsertedReg contain
   /// the equivalent inputs of INSERT_SUBREG.
-  /// E.g., INSERT_SUBREG vreg0:sub0, vreg1:sub1, sub3 would produce:
-  /// - BaseReg: vreg0:sub0
-  /// - InsertedReg: vreg1:sub1, sub3
+  /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
+  /// - BaseReg: %0:sub0
+  /// - InsertedReg: %1:sub1, sub3
   ///
   /// \returns true if it is possible to build such an input sequence
   /// with the pair \p MI, \p DefIdx. False otherwise.

Modified: llvm/trunk/include/llvm/CodeGen/TargetRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/TargetRegisterInfo.h?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/TargetRegisterInfo.h (original)
+++ llvm/trunk/include/llvm/CodeGen/TargetRegisterInfo.h Thu Nov 30 04:12:19 2017
@@ -1138,8 +1138,8 @@ struct VirtReg2IndexFunctor {
 ///
 /// The format is:
 ///   %noreg          - NoRegister
-///   %vreg5          - a virtual register.
-///   %vreg5:sub_8bit - a virtual register with sub-register index (with TRI).
+///   %5              - a virtual register.
+///   %5:sub_8bit     - a virtual register with sub-register index (with TRI).
 ///   %eax            - a physical register
 ///   %physreg17      - a physical register when no TRI instance given.
 ///

Modified: llvm/trunk/lib/CodeGen/DetectDeadLanes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/DetectDeadLanes.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/DetectDeadLanes.cpp (original)
+++ llvm/trunk/lib/CodeGen/DetectDeadLanes.cpp Thu Nov 30 04:12:19 2017
@@ -17,12 +17,12 @@
 /// when subregisters are involved.
 ///
 /// Example:
-///    %vreg0 = some definition
-///    %vreg1 = IMPLICIT_DEF
-///    %vreg2 = REG_SEQUENCE %vreg0, sub0, %vreg1, sub1
-///    %vreg3 = EXTRACT_SUBREG %vreg2, sub1
-///           = use %vreg3
-/// The %vreg0 definition is dead and %vreg3 contains an undefined value.
+///    %0 = some definition
+///    %1 = IMPLICIT_DEF
+///    %2 = REG_SEQUENCE %0, sub0, %1, sub1
+///    %3 = EXTRACT_SUBREG %2, sub1
+///       = use %3
+/// The %0 definition is dead and %3 contains an undefined value.
 //
 //===----------------------------------------------------------------------===//
 

Modified: llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp (original)
+++ llvm/trunk/lib/CodeGen/LiveIntervalAnalysis.cpp Thu Nov 30 04:12:19 2017
@@ -698,11 +698,11 @@ void LiveIntervals::addKillFlags(const V
       // Check if any of the regunits are live beyond the end of RI. That could
       // happen when a physreg is defined as a copy of a virtreg:
       //
-      //   %eax = COPY %vreg5
-      //   FOO %vreg5         <--- MI, cancel kill because %eax is live.
+      //   %eax = COPY %5
+      //   FOO %5             <--- MI, cancel kill because %eax is live.
       //   BAR %eax<kill>
       //
-      // There should be no kill flag on FOO when %vreg5 is rewritten as %eax.
+      // There should be no kill flag on FOO when %5 is rewritten as %eax.
       for (auto &RUP : RU) {
         const LiveRange &RURange = *RUP.first;
         LiveRange::const_iterator &I = RUP.second;
@@ -719,13 +719,13 @@ void LiveIntervals::addKillFlags(const V
         // When reading a partial undefined value we must not add a kill flag.
         // The regalloc might have used the undef lane for something else.
         // Example:
-        //     %vreg1 = ...              ; R32: %vreg1
-        //     %vreg2:high16 = ...       ; R64: %vreg2
-        //        = read %vreg2<kill>    ; R64: %vreg2
-        //        = read %vreg1          ; R32: %vreg1
-        // The <kill> flag is correct for %vreg2, but the register allocator may
-        // assign R0L to %vreg1, and R0 to %vreg2 because the low 32bits of R0
-        // are actually never written by %vreg2. After assignment the <kill>
+        //     %1 = ...                  ; R32: %1
+        //     %2:high16 = ...           ; R64: %2
+        //        = read %2<kill>        ; R64: %2
+        //        = read %1              ; R32: %1
+        // The <kill> flag is correct for %2, but the register allocator may
+        // assign R0L to %1, and R0 to %2 because the low 32bits of R0
+        // are actually never written by %2. After assignment the <kill>
         // flag at the read instruction is invalid.
         LaneBitmask DefinedLanesMask;
         if (!SRs.empty()) {

Modified: llvm/trunk/lib/CodeGen/MachineVerifier.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineVerifier.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineVerifier.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineVerifier.cpp Thu Nov 30 04:12:19 2017
@@ -1961,7 +1961,7 @@ void MachineVerifier::verifyLiveRangeSeg
       if (MOI->isDef()) {
         if (Sub != 0) {
           hasSubRegDef = true;
-          // An operand vreg0:sub0<def> reads vreg0:sub1..n. Invert the lane
+          // An operand %0:sub0<def> reads %0:sub1..n. Invert the lane
           // mask for subregister defs. Read-undef defs will be handled by
           // readsReg below.
           SLM = ~SLM;

Modified: llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp (original)
+++ llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp Thu Nov 30 04:12:19 2017
@@ -1453,10 +1453,10 @@ bool PeepholeOptimizer::foldImmediate(
 // only the first copy is considered.
 //
 // e.g.
-// %vreg1 = COPY %vreg0
-// %vreg2 = COPY %vreg0:sub1
+// %1 = COPY %0
+// %2 = COPY %0:sub1
 //
-// Should replace %vreg2 uses with %vreg1:sub1
+// Should replace %2 uses with %1:sub1
 bool PeepholeOptimizer::foldRedundantCopy(
     MachineInstr *MI, SmallSet<unsigned, 4> &CopySrcRegs,
     DenseMap<unsigned, MachineInstr *> &CopyMIs) {
@@ -1621,16 +1621,16 @@ bool PeepholeOptimizer::findTargetRecurr
 /// from the phi. For example, if there is a recurrence of
 ///
 /// LoopHeader:
-///   %vreg1 = phi(%vreg0, %vreg100)
+///   %1 = phi(%0, %100)
 /// LoopLatch:
-///   %vreg0<def, tied1> = ADD %vreg2<def, tied0>, %vreg1
+///   %0<def, tied1> = ADD %2<def, tied0>, %1
 ///
-/// , the fact that vreg0 and vreg2 are in the same tied operands set makes
+/// , the fact that %0 and %2 are in the same tied operands set makes
 /// the coalescing of copy instruction generated from the phi in
-/// LoopHeader(i.e. %vreg1 = COPY %vreg0) impossible, because %vreg1 and
-/// %vreg2 have overlapping live range. This introduces additional move
-/// instruction to the final assembly. However, if we commute %vreg2 and
-/// %vreg1 of ADD instruction, the redundant move instruction can be
+/// LoopHeader(i.e. %1 = COPY %0) impossible, because %1 and
+/// %2 have overlapping live range. This introduces additional move
+/// instruction to the final assembly. However, if we commute %2 and
+/// %1 of ADD instruction, the redundant move instruction can be
 /// avoided.
 bool PeepholeOptimizer::optimizeRecurrence(MachineInstr &PHI) {
   SmallSet<unsigned, 2> TargetRegs;

Modified: llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp (original)
+++ llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp Thu Nov 30 04:12:19 2017
@@ -1396,30 +1396,30 @@ BlockFrequency RAGreedy::calcSpillCost()
 /// Such sequences are created in 2 scenarios:
 ///
 /// Scenario #1:
-/// vreg0 is evicted from physreg0 by vreg1.
-/// Evictee vreg0 is intended for region splitting with split candidate
-/// physreg0 (the reg vreg0 was evicted from).
+/// %0 is evicted from physreg0 by %1.
+/// Evictee %0 is intended for region splitting with split candidate
+/// physreg0 (the reg %0 was evicted from).
 /// Region splitting creates a local interval because of interference with the
-/// evictor vreg1 (normally region spliitting creates 2 interval, the "by reg"
+/// evictor %1 (normally region spliitting creates 2 interval, the "by reg"
 /// and "by stack" intervals and local interval created when interference
 /// occurs).
-/// One of the split intervals ends up evicting vreg2 from physreg1.
-/// Evictee vreg2 is intended for region splitting with split candidate
+/// One of the split intervals ends up evicting %2 from physreg1.
+/// Evictee %2 is intended for region splitting with split candidate
 /// physreg1.
-/// One of the split intervals ends up evicting vreg3 from physreg2, etc.
+/// One of the split intervals ends up evicting %3 from physreg2, etc.
 ///
 /// Scenario #2
-/// vreg0 is evicted from physreg0 by vreg1.
-/// vreg2 is evicted from physreg2 by vreg3 etc.
-/// Evictee vreg0 is intended for region splitting with split candidate
+/// %0 is evicted from physreg0 by %1.
+/// %2 is evicted from physreg2 by %3 etc.
+/// Evictee %0 is intended for region splitting with split candidate
 /// physreg1.
 /// Region splitting creates a local interval because of interference with the
-/// evictor vreg1.
-/// One of the split intervals ends up evicting back original evictor vreg1
-/// from physreg0 (the reg vreg0 was evicted from).
-/// Another evictee vreg2 is intended for region splitting with split candidate
+/// evictor %1.
+/// One of the split intervals ends up evicting back original evictor %1
+/// from physreg0 (the reg %0 was evicted from).
+/// Another evictee %2 is intended for region splitting with split candidate
 /// physreg1.
-/// One of the split intervals ends up evicting vreg3 from physreg2, etc.
+/// One of the split intervals ends up evicting %3 from physreg2, etc.
 ///
 /// \param Evictee  The register considered to be split.
 /// \param Cand     The split candidate that determines the physical register

Modified: llvm/trunk/lib/CodeGen/RegisterCoalescer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegisterCoalescer.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/RegisterCoalescer.cpp (original)
+++ llvm/trunk/lib/CodeGen/RegisterCoalescer.cpp Thu Nov 30 04:12:19 2017
@@ -228,9 +228,9 @@ namespace {
     /// flag.
     /// This can happen when undef uses were previously concealed by a copy
     /// which we coalesced. Example:
-    ///    %vreg0:sub0<def,read-undef> = ...
-    ///    %vreg1 = COPY %vreg0       <-- Coalescing COPY reveals undef
-    ///           = use %vreg1:sub1   <-- hidden undef use
+    ///    %0:sub0<def,read-undef> = ...
+    ///    %1 = COPY %0           <-- Coalescing COPY reveals undef
+    ///       = use %1:sub1       <-- hidden undef use
     void addUndefFlag(const LiveInterval &Int, SlotIndex UseIdx,
                       MachineOperand &MO, unsigned SubRegIdx);
 
@@ -1143,10 +1143,10 @@ bool RegisterCoalescer::reMaterializeTri
   NewMI.setDebugLoc(DL);
 
   // In a situation like the following:
-  //     %vreg0:subreg = instr              ; DefMI, subreg = DstIdx
-  //     %vreg1        = copy %vreg0:subreg ; CopyMI, SrcIdx = 0
-  // instead of widening %vreg1 to the register class of %vreg0 simply do:
-  //     %vreg1 = instr
+  //     %0:subreg = instr              ; DefMI, subreg = DstIdx
+  //     %1        = copy %0:subreg ; CopyMI, SrcIdx = 0
+  // instead of widening %1 to the register class of %0 simply do:
+  //     %1 = instr
   const TargetRegisterClass *NewRC = CP.getNewRC();
   if (DstIdx != 0) {
     MachineOperand &DefMO = NewMI.getOperand(0);
@@ -1226,12 +1226,12 @@ bool RegisterCoalescer::reMaterializeTri
     // This could happen if the rematerialization instruction is rematerializing
     // more than actually is used in the register.
     // An example would be:
-    // vreg1 = LOAD CONSTANTS 5, 8 ; Loading both 5 and 8 in different subregs
+    // %1 = LOAD CONSTANTS 5, 8 ; Loading both 5 and 8 in different subregs
     // ; Copying only part of the register here, but the rest is undef.
-    // vreg2:sub_16bit<def, read-undef> = COPY vreg1:sub_16bit
+    // %2:sub_16bit<def, read-undef> = COPY %1:sub_16bit
     // ==>
     // ; Materialize all the constants but only using one
-    // vreg2 = LOAD_CONSTANTS 5, 8
+    // %2 = LOAD_CONSTANTS 5, 8
     //
     // at this point for the part that wasn't defined before we could have
     // subranges missing the definition.
@@ -1254,11 +1254,11 @@ bool RegisterCoalescer::reMaterializeTri
 
     // Make sure that the subrange for resultant undef is removed
     // For example:
-    //   vreg1:sub1<def,read-undef> = LOAD CONSTANT 1
-    //   vreg2<def> = COPY vreg1
+    //   %1:sub1<def,read-undef> = LOAD CONSTANT 1
+    //   %2<def> = COPY %1
     // ==>
-    //   vreg2:sub1<def, read-undef> = LOAD CONSTANT 1
-    //     ; Correct but need to remove the subrange for vreg2:sub0
+    //   %2:sub1<def, read-undef> = LOAD CONSTANT 1
+    //     ; Correct but need to remove the subrange for %2:sub0
     //     ; as it is now undef
     if (NewIdx != 0 && DstInt.hasSubRanges()) {
       // The affected subregister segments can be removed.
@@ -1292,15 +1292,15 @@ bool RegisterCoalescer::reMaterializeTri
     // Otherwise, variables that live through may miss some
     // interferences, thus creating invalid allocation.
     // E.g., i386 code:
-    // vreg1 = somedef ; vreg1 GR8
-    // vreg2 = remat ; vreg2 GR32
-    // CL = COPY vreg2.sub_8bit
-    // = somedef vreg1 ; vreg1 GR8
+    // %1 = somedef ; %1 GR8
+    // %2 = remat ; %2 GR32
+    // CL = COPY %2.sub_8bit
+    // = somedef %1 ; %1 GR8
     // =>
-    // vreg1 = somedef ; vreg1 GR8
+    // %1 = somedef ; %1 GR8
     // ECX<def, dead> = remat ; CL<imp-def>
-    // = somedef vreg1 ; vreg1 GR8
-    // vreg1 will see the inteferences with CL but not with CH since
+    // = somedef %1 ; %1 GR8
+    // %1 will see the inteferences with CL but not with CH since
     // no live-ranges would have been created for ECX.
     // Fix that!
     SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI);
@@ -1353,9 +1353,9 @@ bool RegisterCoalescer::eliminateUndefCo
   // ProcessImpicitDefs may leave some copies of <undef> values, it only removes
   // local variables. When we have a copy like:
   //
-  //   %vreg1 = COPY %vreg2<undef>
+  //   %1 = COPY %2<undef>
   //
-  // We delete the copy and remove the corresponding value number from %vreg1.
+  // We delete the copy and remove the corresponding value number from %1.
   // Any uses of that value number are marked as <undef>.
 
   // Note that we do not query CoalescerPair here but redo isMoveInstr as the
@@ -1820,18 +1820,18 @@ bool RegisterCoalescer::joinReservedPhys
   MachineInstr *CopyMI;
   if (CP.isFlipped()) {
     // Physreg is copied into vreg
-    //   %vregY = COPY %x
+    //   %y = COPY %physreg_x
     //   ...  //< no other def of %x here
-    //   use %vregY
+    //   use %y
     // =>
     //   ...
     //   use %x
     CopyMI = MRI->getVRegDef(SrcReg);
   } else {
     // VReg is copied into physreg:
-    //   %vregX = def
+    //   %y = def
     //   ... //< no other def or use of %y here
-    //   %y = COPY %vregX
+    //   %y = COPY %physreg_x
     // =>
     //   %y = def
     //   ...

Modified: llvm/trunk/lib/CodeGen/RenameIndependentSubregs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RenameIndependentSubregs.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/RenameIndependentSubregs.cpp (original)
+++ llvm/trunk/lib/CodeGen/RenameIndependentSubregs.cpp Thu Nov 30 04:12:19 2017
@@ -10,20 +10,20 @@
 /// Rename independent subregisters looks for virtual registers with
 /// independently used subregisters and renames them to new virtual registers.
 /// Example: In the following:
-///   %vreg0:sub0<read-undef> = ...
-///   %vreg0:sub1 = ...
-///   use %vreg0:sub0
-///   %vreg0:sub0 = ...
-///   use %vreg0:sub0
-///   use %vreg0:sub1
+///   %0:sub0<read-undef> = ...
+///   %0:sub1 = ...
+///   use %0:sub0
+///   %0:sub0 = ...
+///   use %0:sub0
+///   use %0:sub1
 /// sub0 and sub1 are never used together, and we have two independent sub0
 /// definitions. This pass will rename to:
-///   %vreg0:sub0<read-undef> = ...
-///   %vreg1:sub1<read-undef> = ...
-///   use %vreg1:sub1
-///   %vreg2:sub1<read-undef> = ...
-///   use %vreg2:sub1
-///   use %vreg0:sub0
+///   %0:sub0<read-undef> = ...
+///   %1:sub1<read-undef> = ...
+///   use %1:sub1
+///   %2:sub1<read-undef> = ...
+///   use %2:sub1
+///   use %0:sub0
 //
 //===----------------------------------------------------------------------===//
 

Modified: llvm/trunk/lib/CodeGen/SplitKit.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SplitKit.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SplitKit.cpp (original)
+++ llvm/trunk/lib/CodeGen/SplitKit.cpp Thu Nov 30 04:12:19 2017
@@ -1375,9 +1375,9 @@ void SplitEditor::rewriteAssigned(bool E
         continue;
       // The problem here can be that the new register may have been created
       // for a partially defined original register. For example:
-      //   %vreg827:subreg_hireg<def,read-undef> = ...
+      //   %0:subreg_hireg<def,read-undef> = ...
       //   ...
-      //   %vreg828<def> = COPY %vreg827
+      //   %1<def> = COPY %0
       if (S.empty())
         continue;
       SubLRC.reset(&VRM.getMachineFunction(), LIS.getSlotIndexes(), &MDT,

Modified: llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp (original)
+++ llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp Thu Nov 30 04:12:19 2017
@@ -93,7 +93,7 @@ Printable printReg(unsigned Reg, const T
     else if (TargetRegisterInfo::isStackSlot(Reg))
       OS << "SS#" << TargetRegisterInfo::stackSlot2Index(Reg);
     else if (TargetRegisterInfo::isVirtualRegister(Reg))
-      OS << "%vreg" << TargetRegisterInfo::virtReg2Index(Reg);
+      OS << '%' << TargetRegisterInfo::virtReg2Index(Reg);
     else if (TRI && Reg < TRI->getNumRegs()) {
       OS << '%';
       printLowerCase(TRI->getName(Reg), OS);
@@ -134,7 +134,7 @@ Printable printRegUnit(unsigned Unit, co
 Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
   return Printable([Unit, TRI](raw_ostream &OS) {
     if (TRI && TRI->isVirtualRegister(Unit)) {
-      OS << "%vreg" << TargetRegisterInfo::virtReg2Index(Unit);
+      OS << '%' << TargetRegisterInfo::virtReg2Index(Unit);
     } else {
       OS << printRegUnit(Unit, TRI);
     }

Modified: llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp Thu Nov 30 04:12:19 2017
@@ -2801,14 +2801,14 @@ MachineInstr *AArch64InstrInfo::foldMemo
     LiveIntervals *LIS) const {
   // This is a bit of a hack. Consider this instruction:
   //
-  //   %vreg0<def> = COPY %sp; GPR64all:%vreg0
+  //   %0<def> = COPY %sp; GPR64all:%0
   //
   // We explicitly chose GPR64all for the virtual register so such a copy might
   // be eliminated by RegisterCoalescer. However, that may not be possible, and
-  // %vreg0 may even spill. We can't spill %sp, and since it is in the GPR64all
+  // %0 may even spill. We can't spill %sp, and since it is in the GPR64all
   // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
   //
-  // To prevent that, we are going to constrain the %vreg0 register class here.
+  // To prevent that, we are going to constrain the %0 register class here.
   //
   // <rdar://problem/11522048>
   //
@@ -2830,7 +2830,7 @@ MachineInstr *AArch64InstrInfo::foldMemo
   // Handle the case where a copy is being spilled or filled but the source
   // and destination register class don't match.  For example:
   //
-  //   %vreg0<def> = COPY %xzr; GPR64common:%vreg0
+  //   %0<def> = COPY %xzr; GPR64common:%0
   //
   // In this case we can still safely fold away the COPY and generate the
   // following spill code:
@@ -2840,16 +2840,16 @@ MachineInstr *AArch64InstrInfo::foldMemo
   // This also eliminates spilled cross register class COPYs (e.g. between x and
   // d regs) of the same size.  For example:
   //
-  //   %vreg0<def> = COPY %vreg1; GPR64:%vreg0, FPR64:%vreg1
+  //   %0<def> = COPY %1; GPR64:%0, FPR64:%1
   //
   // will be filled as
   //
-  //   LDRDui %vreg0, fi<#0>
+  //   LDRDui %0, fi<#0>
   //
   // instead of
   //
-  //   LDRXui %vregTemp, fi<#0>
-  //   %vreg0 = FMOV %vregTemp
+  //   LDRXui %Temp, fi<#0>
+  //   %0 = FMOV %Temp
   //
   if (MI.isCopy() && Ops.size() == 1 &&
       // Make sure we're only folding the explicit COPY defs/uses.
@@ -2886,7 +2886,7 @@ MachineInstr *AArch64InstrInfo::foldMemo
 
     // Handle cases like spilling def of:
     //
-    //   %vreg0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%vreg0
+    //   %0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%0
     //
     // where the physical register source can be widened and stored to the full
     // virtual reg destination stack slot, in this case producing:
@@ -2934,12 +2934,12 @@ MachineInstr *AArch64InstrInfo::foldMemo
 
     // Handle cases like filling use of:
     //
-    //   %vreg0:sub_32<def,read-undef> = COPY %vreg1; GPR64:%vreg0, GPR32:%vreg1
+    //   %0:sub_32<def,read-undef> = COPY %1; GPR64:%0, GPR32:%1
     //
     // where we can load the full virtual reg source stack slot, into the subreg
     // destination, in this case producing:
     //
-    //   LDRWui %vreg0:sub_32<def,read-undef>, <fi#0>
+    //   LDRWui %0:sub_32<def,read-undef>, <fi#0>
     //
     if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) {
       const TargetRegisterClass *FillRC;

Modified: llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp Thu Nov 30 04:12:19 2017
@@ -12,16 +12,16 @@
 /// common data and/or have enough undef subreg using swizzle abilities.
 ///
 /// For instance let's consider the following pseudo code :
-/// vreg5<def> = REG_SEQ vreg1, sub0, vreg2, sub1, vreg3, sub2, undef, sub3
+/// %5<def> = REG_SEQ %1, sub0, %2, sub1, %3, sub2, undef, sub3
 /// ...
-/// vreg7<def> = REG_SEQ vreg1, sub0, vreg3, sub1, undef, sub2, vreg4, sub3
-/// (swizzable Inst) vreg7, SwizzleMask : sub0, sub1, sub2, sub3
+/// %7<def> = REG_SEQ %1, sub0, %3, sub1, undef, sub2, %4, sub3
+/// (swizzable Inst) %7, SwizzleMask : sub0, sub1, sub2, sub3
 ///
 /// is turned into :
-/// vreg5<def> = REG_SEQ vreg1, sub0, vreg2, sub1, vreg3, sub2, undef, sub3
+/// %5<def> = REG_SEQ %1, sub0, %2, sub1, %3, sub2, undef, sub3
 /// ...
-/// vreg7<def> = INSERT_SUBREG vreg4, sub3
-/// (swizzable Inst) vreg7, SwizzleMask : sub0, sub2, sub1, sub3
+/// %7<def> = INSERT_SUBREG %4, sub3
+/// (swizzable Inst) %7, SwizzleMask : sub0, sub2, sub1, sub3
 ///
 /// This allow regalloc to reduce register pressure for vector registers and
 /// to reduce MOV count.

Modified: llvm/trunk/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIFixSGPRCopies.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIFixSGPRCopies.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIFixSGPRCopies.cpp Thu Nov 30 04:12:19 2017
@@ -14,46 +14,46 @@
 ///  Register Class <vsrc> is the union of <vgpr> and <sgpr>
 ///
 /// BB0:
-///   %vreg0 <sgpr> = SCALAR_INST
-///   %vreg1 <vsrc> = COPY %vreg0 <sgpr>
+///   %0 <sgpr> = SCALAR_INST
+///   %1 <vsrc> = COPY %0 <sgpr>
 ///    ...
 ///    BRANCH %cond BB1, BB2
 ///  BB1:
-///    %vreg2 <vgpr> = VECTOR_INST
-///    %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+///    %2 <vgpr> = VECTOR_INST
+///    %3 <vsrc> = COPY %2 <vgpr>
 ///  BB2:
-///    %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
-///    %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
+///    %4 <vsrc> = PHI %1 <vsrc>, <BB#0>, %3 <vrsc>, <BB#1>
+///    %5 <vgpr> = VECTOR_INST %4 <vsrc>
 ///
 ///
 /// The coalescer will begin at BB0 and eliminate its copy, then the resulting
 /// code will look like this:
 ///
 /// BB0:
-///   %vreg0 <sgpr> = SCALAR_INST
+///   %0 <sgpr> = SCALAR_INST
 ///    ...
 ///    BRANCH %cond BB1, BB2
 /// BB1:
-///   %vreg2 <vgpr> = VECTOR_INST
-///   %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+///   %2 <vgpr> = VECTOR_INST
+///   %3 <vsrc> = COPY %2 <vgpr>
 /// BB2:
-///   %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
-///   %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+///   %4 <sgpr> = PHI %0 <sgpr>, <BB#0>, %3 <vsrc>, <BB#1>
+///   %5 <vgpr> = VECTOR_INST %4 <sgpr>
 ///
 /// Now that the result of the PHI instruction is an SGPR, the register
-/// allocator is now forced to constrain the register class of %vreg3 to
+/// allocator is now forced to constrain the register class of %3 to
 /// <sgpr> so we end up with final code like this:
 ///
 /// BB0:
-///   %vreg0 <sgpr> = SCALAR_INST
+///   %0 <sgpr> = SCALAR_INST
 ///    ...
 ///    BRANCH %cond BB1, BB2
 /// BB1:
-///   %vreg2 <vgpr> = VECTOR_INST
-///   %vreg3 <sgpr> = COPY %vreg2 <vgpr>
+///   %2 <vgpr> = VECTOR_INST
+///   %3 <sgpr> = COPY %2 <vgpr>
 /// BB2:
-///   %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
-///   %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+///   %4 <sgpr> = PHI %0 <sgpr>, <BB#0>, %3 <sgpr>, <BB#1>
+///   %5 <vgpr> = VECTOR_INST %4 <sgpr>
 ///
 /// Now this code contains an illegal copy from a VGPR to an SGPR.
 ///

Modified: llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp Thu Nov 30 04:12:19 2017
@@ -290,11 +290,11 @@ void SIFoldOperands::foldOperand(
     // copy since a subregister use tied to a full register def doesn't really
     // make sense. e.g. don't fold:
     //
-    // %vreg1 = COPY %vreg0:sub1
-    // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1<tied0>
+    // %1 = COPY %0:sub1
+    // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0>
     //
     //  into
-    // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1<tied0>
+    // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0>
     if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
       return;
   }
@@ -971,7 +971,7 @@ bool SIFoldOperands::runOnMachineFunctio
       // Prevent folding operands backwards in the function. For example,
       // the COPY opcode must not be replaced by 1 in this example:
       //
-      //    %vreg3<def> = COPY %vgpr0; VGPR_32:%vreg3
+      //    %3<def> = COPY %vgpr0; VGPR_32:%3
       //    ...
       //    %vgpr0<def> = V_MOV_B32_e32 1, %exec<imp-use>
       MachineOperand &Dst = MI.getOperand(0);

Modified: llvm/trunk/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIPeepholeSDWA.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIPeepholeSDWA.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIPeepholeSDWA.cpp Thu Nov 30 04:12:19 2017
@@ -10,12 +10,12 @@
 /// \file This pass tries to apply several peephole SDWA patterns.
 ///
 /// E.g. original:
-///   V_LSHRREV_B32_e32 %vreg0, 16, %vreg1
-///   V_ADD_I32_e32 %vreg2, %vreg0, %vreg3
-///   V_LSHLREV_B32_e32 %vreg4, 16, %vreg2
+///   V_LSHRREV_B32_e32 %0, 16, %1
+///   V_ADD_I32_e32 %2, %0, %3
+///   V_LSHLREV_B32_e32 %4, 16, %2
 ///
 /// Replace:
-///   V_ADD_I32_sdwa %vreg4, %vreg1, %vreg3
+///   V_ADD_I32_sdwa %4, %1, %3
 ///       dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
 ///
 //===----------------------------------------------------------------------===//
@@ -410,7 +410,7 @@ Optional<int64_t> SIPeepholeSDWA::foldTo
   }
 
   // If this is not immediate then it can be copy of immediate value, e.g.:
-  // %vreg1<def> = S_MOV_B32 255;
+  // %1<def> = S_MOV_B32 255;
   if (Op.isReg()) {
     for (const MachineOperand &Def : MRI->def_operands(Op.getReg())) {
       if (!isSameReg(Op, Def))

Modified: llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp Thu Nov 30 04:12:19 2017
@@ -1347,13 +1347,13 @@ bool SIRegisterInfo::shouldRewriteCopySr
   // class.
   //
   // e.g. if we have something like
-  // vreg0 = ...
-  // vreg1 = ...
-  // vreg2 = REG_SEQUENCE vreg0, sub0, vreg1, sub1, vreg2, sub2
-  // vreg3 = COPY vreg2, sub0
+  // %0 = ...
+  // %1 = ...
+  // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
+  // %3 = COPY %2, sub0
   //
   // We want to look through the COPY to find:
-  //  => vreg3 = COPY vreg0
+  //  => %3 = COPY %0
 
   // Plain copy.
   return getCommonSubClass(DefRC, SrcRC) != nullptr;

Modified: llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp Thu Nov 30 04:12:19 2017
@@ -1650,7 +1650,7 @@ bool ARMBaseInstrInfo::produceSameValue(
     }
 
     for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) {
-      // %vreg12<def> = PICLDR %vreg11, 0, pred:14, pred:%noreg
+      // %12<def> = PICLDR %11, 0, pred:14, pred:%noreg
       const MachineOperand &MO0 = MI0.getOperand(i);
       const MachineOperand &MO1 = MI1.getOperand(i);
       if (!MO0.isIdenticalTo(MO1))

Modified: llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.h?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.h (original)
+++ llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.h Thu Nov 30 04:12:19 2017
@@ -47,10 +47,10 @@ protected:
   /// and \p DefIdx.
   /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
   /// the list is modeled as <Reg:SubReg, SubIdx>.
-  /// E.g., REG_SEQUENCE vreg1:sub1, sub0, vreg2, sub1 would produce
+  /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
   /// two elements:
-  /// - vreg1:sub1, sub0
-  /// - vreg2<:0>, sub1
+  /// - %1:sub1, sub0
+  /// - %2<:0>, sub1
   ///
   /// \returns true if it is possible to build such an input sequence
   /// with the pair \p MI, \p DefIdx. False otherwise.
@@ -63,8 +63,8 @@ protected:
   /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
   /// and \p DefIdx.
   /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
-  /// E.g., EXTRACT_SUBREG vreg1:sub1, sub0, sub1 would produce:
-  /// - vreg1:sub1, sub0
+  /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
+  /// - %1:sub1, sub0
   ///
   /// \returns true if it is possible to build such an input sequence
   /// with the pair \p MI, \p DefIdx. False otherwise.
@@ -77,9 +77,9 @@ protected:
   /// and \p DefIdx.
   /// \p [out] BaseReg and \p [out] InsertedReg contain
   /// the equivalent inputs of INSERT_SUBREG.
-  /// E.g., INSERT_SUBREG vreg0:sub0, vreg1:sub1, sub3 would produce:
-  /// - BaseReg: vreg0:sub0
-  /// - InsertedReg: vreg1:sub1, sub3
+  /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
+  /// - BaseReg: %0:sub0
+  /// - InsertedReg: %1:sub1, sub3
   ///
   /// \returns true if it is possible to build such an input sequence
   /// with the pair \p MI, \p DefIdx. False otherwise.

Modified: llvm/trunk/lib/Target/BPF/BPFISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/BPF/BPFISelDAGToDAG.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/BPF/BPFISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/BPF/BPFISelDAGToDAG.cpp Thu Nov 30 04:12:19 2017
@@ -546,7 +546,7 @@ void BPFDAGToDAGISel::PreprocessTrunc(SD
   if (!RegN || !TargetRegisterInfo::isVirtualRegister(RegN->getReg()))
     return;
   unsigned AndOpReg = RegN->getReg();
-  DEBUG(dbgs() << "Examine %vreg" << TargetRegisterInfo::virtReg2Index(AndOpReg)
+  DEBUG(dbgs() << "Examine %" << TargetRegisterInfo::virtReg2Index(AndOpReg)
                << '\n');
 
   // Examine the PHI insns in the MachineBasicBlock to found out the
@@ -574,9 +574,9 @@ void BPFDAGToDAGISel::PreprocessTrunc(SD
       return;
   } else {
     // The PHI node looks like:
-    //   %vreg2<def> = PHI %vreg0, <BB#1>, %vreg1, <BB#3>
-    // Trace each incoming definition, e.g., (%vreg0, BB#1) and (%vreg1, BB#3)
-    // The AND operation can be removed if both %vreg0 in BB#1 and %vreg1 in
+    //   %2<def> = PHI %0, <BB#1>, %1, <BB#3>
+    // Trace each incoming definition, e.g., (%0, BB#1) and (%1, BB#3)
+    // The AND operation can be removed if both %0 in BB#1 and %1 in
     // BB#3 are defined with with a load matching the MaskN.
     DEBUG(dbgs() << "Check PHI Insn: "; MII->dump(); dbgs() << '\n');
     unsigned PrevReg = -1;

Modified: llvm/trunk/lib/Target/Hexagon/BitTracker.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/BitTracker.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/BitTracker.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/BitTracker.cpp Thu Nov 30 04:12:19 2017
@@ -18,16 +18,16 @@
 // A "ref" value is associated with a BitRef structure, which indicates
 // which virtual register, and which bit in that register is the origin
 // of the value. For example, given an instruction
-//   vreg2 = ASL vreg1, 1
-// assuming that nothing is known about bits of vreg1, bit 1 of vreg2
-// will be a "ref" to (vreg1, 0). If there is a subsequent instruction
-//   vreg3 = ASL vreg2, 2
-// then bit 3 of vreg3 will be a "ref" to (vreg1, 0) as well.
+//   %2 = ASL %1, 1
+// assuming that nothing is known about bits of %1, bit 1 of %2
+// will be a "ref" to (%1, 0). If there is a subsequent instruction
+//   %3 = ASL %2, 2
+// then bit 3 of %3 will be a "ref" to (%1, 0) as well.
 // The "bottom" case means that the bit's value cannot be determined,
 // and that this virtual register actually defines it. The "bottom" case
 // is discussed in detail in BitTracker.h. In fact, "bottom" is a "ref
-// to self", so for the vreg1 above, the bit 0 of it will be a "ref" to
-// (vreg1, 0), bit 1 will be a "ref" to (vreg1, 1), etc.
+// to self", so for the %1 above, the bit 0 of it will be a "ref" to
+// (%1, 0), bit 1 will be a "ref" to (%1, 1), etc.
 //
 // The tracker implements the Wegman-Zadeck algorithm, originally developed
 // for SSA-based constant propagation. Each register is represented as
@@ -75,7 +75,7 @@ using BT = BitTracker;
 
 namespace {
 
-  // Local trickery to pretty print a register (without the whole "%vreg"
+  // Local trickery to pretty print a register (without the whole "%number"
   // business).
   struct printv {
     printv(unsigned r) : R(r) {}

Modified: llvm/trunk/lib/Target/Hexagon/HexagonBitSimplify.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonBitSimplify.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonBitSimplify.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonBitSimplify.cpp Thu Nov 30 04:12:19 2017
@@ -895,7 +895,7 @@ bool HexagonBitSimplify::getUsedBits(uns
 }
 
 // Calculate the register class that matches Reg:Sub. For example, if
-// vreg1 is a double register, then vreg1:isub_hi would match the "int"
+// %1 is a double register, then %1:isub_hi would match the "int"
 // register class.
 const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass(
       const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI) {
@@ -1246,11 +1246,11 @@ bool RedundantInstrElimination::computeU
 // holds the bits for the entire register. To keep track of that, the
 // argument Begin indicates where in Bits is the lowest-significant bit
 // of the register used in operand OpN. For example, in instruction:
-//   vreg1 = S2_lsr_i_r vreg2:isub_hi, 10
+//   %1 = S2_lsr_i_r %2:isub_hi, 10
 // the operand 1 is a 32-bit register, which happens to be a subregister
-// of the 64-bit register vreg2, and that subregister starts at position 32.
+// of the 64-bit register %2, and that subregister starts at position 32.
 // In this case Begin=32, since Bits[32] would be the lowest-significant bit
-// of vreg2:isub_hi.
+// of %2:isub_hi.
 bool RedundantInstrElimination::computeUsedBits(const MachineInstr &MI,
       unsigned OpN, BitVector &Bits, uint16_t Begin) {
   unsigned Opc = MI.getOpcode();
@@ -1356,11 +1356,11 @@ bool RedundantInstrElimination::processB
       // This pass can create copies between registers that don't have the
       // exact same values. Updating the tracker has to involve updating
       // all dependent cells. Example:
-      //   vreg1 = inst vreg2     ; vreg1 != vreg2, but used bits are equal
+      //   %1  = inst %2     ; %1 != %2, but used bits are equal
       //
-      //   vreg3 = copy vreg2     ; <- inserted
-      //     ... = vreg3          ; <- replaced from vreg2
-      // Indirectly, we can create a "copy" between vreg1 and vreg2 even
+      //   %3  = copy %2     ; <- inserted
+      //   ... = %3          ; <- replaced from %2
+      // Indirectly, we can create a "copy" between %1 and %2 even
       // though their exact values do not match.
       BT.visit(*CopyI);
       Changed = true;
@@ -2313,10 +2313,10 @@ bool BitSimplification::genBitSplit(Mach
 
 // Check for tstbit simplification opportunity, where the bit being checked
 // can be tracked back to another register. For example:
-//   vreg2 = S2_lsr_i_r  vreg1, 5
-//   vreg3 = S2_tstbit_i vreg2, 0
+//   %2 = S2_lsr_i_r  %1, 5
+//   %3 = S2_tstbit_i %2, 0
 // =>
-//   vreg3 = S2_tstbit_i vreg1, 5
+//   %3 = S2_tstbit_i %1, 5
 bool BitSimplification::simplifyTstbit(MachineInstr *MI,
       BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
   unsigned Opc = MI->getOpcode();

Modified: llvm/trunk/lib/Target/Hexagon/HexagonBlockRanges.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonBlockRanges.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonBlockRanges.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonBlockRanges.cpp Thu Nov 30 04:12:19 2017
@@ -368,7 +368,7 @@ void HexagonBlockRanges::computeInitialL
       }
     }
     // Defs and clobbers can overlap, e.g.
-    // %d0<def,dead> = COPY %vreg5, %r0<imp-def>, %r1<imp-def>
+    // %d0<def,dead> = COPY %5, %r0<imp-def>, %r1<imp-def>
     for (RegisterRef R : Defs)
       Clobbers.erase(R);
 

Modified: llvm/trunk/lib/Target/Hexagon/HexagonConstPropagation.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonConstPropagation.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonConstPropagation.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonConstPropagation.cpp Thu Nov 30 04:12:19 2017
@@ -1974,7 +1974,7 @@ bool HexagonConstEvaluator::evaluate(con
     {
       const MachineOperand &VO = MI.getOperand(1);
       // The operand of CONST32 can be a blockaddress, e.g.
-      //   %vreg0<def> = CONST32 <blockaddress(@eat, %l)>
+      //   %0<def> = CONST32 <blockaddress(@eat, %l)>
       // Do this check for all instructions for safety.
       if (!VO.isImm())
         return false;

Modified: llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp Thu Nov 30 04:12:19 2017
@@ -25,37 +25,37 @@
 //
 // Example:
 //
-//         %vreg40<def> = L2_loadrub_io %vreg39<kill>, 1
-//         %vreg41<def> = S2_tstbit_i %vreg40<kill>, 0
-//         J2_jumpt %vreg41<kill>, <BB#5>, %pc<imp-def,dead>
+//         %40<def> = L2_loadrub_io %39<kill>, 1
+//         %41<def> = S2_tstbit_i %40<kill>, 0
+//         J2_jumpt %41<kill>, <BB#5>, %pc<imp-def,dead>
 //         J2_jump <BB#4>, %pc<imp-def,dead>
 //     Successors according to CFG: BB#4(62) BB#5(62)
 //
 // BB#4: derived from LLVM BB %if.then
 //     Predecessors according to CFG: BB#3
-//         %vreg11<def> = A2_addp %vreg6, %vreg10
-//         S2_storerd_io %vreg32, 16, %vreg11
+//         %11<def> = A2_addp %6, %10
+//         S2_storerd_io %32, 16, %11
 //     Successors according to CFG: BB#5
 //
 // BB#5: derived from LLVM BB %if.end
 //     Predecessors according to CFG: BB#3 BB#4
-//         %vreg12<def> = PHI %vreg6, <BB#3>, %vreg11, <BB#4>
-//         %vreg13<def> = A2_addp %vreg7, %vreg12
-//         %vreg42<def> = C2_cmpeqi %vreg9, 10
-//         J2_jumpf %vreg42<kill>, <BB#3>, %pc<imp-def,dead>
+//         %12<def> = PHI %6, <BB#3>, %11, <BB#4>
+//         %13<def> = A2_addp %7, %12
+//         %42<def> = C2_cmpeqi %9, 10
+//         J2_jumpf %42<kill>, <BB#3>, %pc<imp-def,dead>
 //         J2_jump <BB#6>, %pc<imp-def,dead>
 //     Successors according to CFG: BB#6(4) BB#3(124)
 //
 // would become:
 //
-//         %vreg40<def> = L2_loadrub_io %vreg39<kill>, 1
-//         %vreg41<def> = S2_tstbit_i %vreg40<kill>, 0
-// spec->  %vreg11<def> = A2_addp %vreg6, %vreg10
-// pred->  S2_pstorerdf_io %vreg41, %vreg32, 16, %vreg11
-//         %vreg46<def> = PS_pselect %vreg41, %vreg6, %vreg11
-//         %vreg13<def> = A2_addp %vreg7, %vreg46
-//         %vreg42<def> = C2_cmpeqi %vreg9, 10
-//         J2_jumpf %vreg42<kill>, <BB#3>, %pc<imp-def,dead>
+//         %40<def> = L2_loadrub_io %39<kill>, 1
+//         %41<def> = S2_tstbit_i %40<kill>, 0
+// spec->  %11<def> = A2_addp %6, %10
+// pred->  S2_pstorerdf_io %41, %32, 16, %11
+//         %46<def> = PS_pselect %41, %6, %11
+//         %13<def> = A2_addp %7, %46
+//         %42<def> = C2_cmpeqi %9, 10
+//         J2_jumpf %42<kill>, <BB#3>, %pc<imp-def,dead>
 //         J2_jump <BB#6>, %pc<imp-def,dead>
 //     Successors according to CFG: BB#6 BB#3
 

Modified: llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp Thu Nov 30 04:12:19 2017
@@ -17,33 +17,33 @@
 //
 // Liveness tracking aside, the main functionality of this pass is divided
 // into two steps. The first step is to replace an instruction
-//   vreg0 = C2_mux vreg1, vreg2, vreg3
+//   %0 = C2_mux %1, %2, %3
 // with a pair of conditional transfers
-//   vreg0 = A2_tfrt vreg1, vreg2
-//   vreg0 = A2_tfrf vreg1, vreg3
+//   %0 = A2_tfrt %1, %2
+//   %0 = A2_tfrf %1, %3
 // It is the intention that the execution of this pass could be terminated
 // after this step, and the code generated would be functionally correct.
 //
-// If the uses of the source values vreg1 and vreg2 are kills, and their
+// If the uses of the source values %1 and %2 are kills, and their
 // definitions are predicable, then in the second step, the conditional
 // transfers will then be rewritten as predicated instructions. E.g.
-//   vreg0 = A2_or vreg1, vreg2
-//   vreg3 = A2_tfrt vreg99, vreg0<kill>
+//   %0 = A2_or %1, %2
+//   %3 = A2_tfrt %99, %0<kill>
 // will be rewritten as
-//   vreg3 = A2_port vreg99, vreg1, vreg2
+//   %3 = A2_port %99, %1, %2
 //
 // This replacement has two variants: "up" and "down". Consider this case:
-//   vreg0 = A2_or vreg1, vreg2
+//   %0 = A2_or %1, %2
 //   ... [intervening instructions] ...
-//   vreg3 = A2_tfrt vreg99, vreg0<kill>
+//   %3 = A2_tfrt %99, %0<kill>
 // variant "up":
-//   vreg3 = A2_port vreg99, vreg1, vreg2
-//   ... [intervening instructions, vreg0->vreg3] ...
+//   %3 = A2_port %99, %1, %2
+//   ... [intervening instructions, %0->vreg3] ...
 //   [deleted]
 // variant "down":
 //   [deleted]
 //   ... [intervening instructions] ...
-//   vreg3 = A2_port vreg99, vreg1, vreg2
+//   %3 = A2_port %99, %1, %2
 //
 // Both, one or none of these variants may be valid, and checks are made
 // to rule out inapplicable variants.
@@ -51,13 +51,13 @@
 // As an additional optimization, before either of the two steps above is
 // executed, the pass attempts to coalesce the target register with one of
 // the source registers, e.g. given an instruction
-//   vreg3 = C2_mux vreg0, vreg1, vreg2
-// vreg3 will be coalesced with either vreg1 or vreg2. If this succeeds,
+//   %3 = C2_mux %0, %1, %2
+// %3 will be coalesced with either %1 or %2. If this succeeds,
 // the instruction would then be (for example)
-//   vreg3 = C2_mux vreg0, vreg3, vreg2
+//   %3 = C2_mux %0, %3, %2
 // and, under certain circumstances, this could result in only one predicated
 // instruction:
-//   vreg3 = A2_tfrf vreg0, vreg2
+//   %3 = A2_tfrf %0, %2
 //
 
 // Splitting a definition of a register into two predicated transfers
@@ -65,18 +65,18 @@
 // will see both instructions as actual definitions, and will mark the
 // first one as dead. The definition is not actually dead, and this
 // situation will need to be fixed. For example:
-//   vreg1<def,dead> = A2_tfrt ...  ; marked as dead
-//   vreg1<def> = A2_tfrf ...
+//   %1<def,dead> = A2_tfrt ...  ; marked as dead
+//   %1<def> = A2_tfrf ...
 //
 // Since any of the individual predicated transfers may end up getting
 // removed (in case it is an identity copy), some pre-existing def may
 // be marked as dead after live interval recomputation:
-//   vreg1<def,dead> = ...          ; marked as dead
+//   %1<def,dead> = ...          ; marked as dead
 //   ...
-//   vreg1<def> = A2_tfrf ...       ; if A2_tfrt is removed
-// This case happens if vreg1 was used as a source in A2_tfrt, which means
+//   %1<def> = A2_tfrf ...       ; if A2_tfrt is removed
+// This case happens if %1 was used as a source in A2_tfrt, which means
 // that is it actually live at the A2_tfrf, and so the now dead definition
-// of vreg1 will need to be updated to non-dead at some point.
+// of %1 will need to be updated to non-dead at some point.
 //
 // This issue could be remedied by adding implicit uses to the predicated
 // transfers, but this will create a problem with subsequent predication,
@@ -760,8 +760,8 @@ MachineInstr *HexagonExpandCondsets::get
       if (RR.Reg != RD.Reg)
         continue;
       // If the "Reg" part agrees, there is still the subregister to check.
-      // If we are looking for vreg1:loreg, we can skip vreg1:hireg, but
-      // not vreg1 (w/o subregisters).
+      // If we are looking for %1:loreg, we can skip %1:hireg, but
+      // not %1 (w/o subregisters).
       if (RR.Sub == RD.Sub)
         return MI;
       if (RR.Sub == 0 || RD.Sub == 0)
@@ -1071,7 +1071,7 @@ bool HexagonExpandCondsets::predicateInB
       bool Done = predicate(*I, (Opc == Hexagon::A2_tfrt), UpdRegs);
       if (!Done) {
         // If we didn't predicate I, we may need to remove it in case it is
-        // an "identity" copy, e.g.  vreg1 = A2_tfrt vreg2, vreg1.
+        // an "identity" copy, e.g.  %1 = A2_tfrt %2, %1.
         if (RegisterRef(I->getOperand(0)) == RegisterRef(I->getOperand(2))) {
           for (auto &Op : I->operands())
             if (Op.isReg())
@@ -1198,18 +1198,18 @@ bool HexagonExpandCondsets::coalesceSegm
     MachineOperand &S1 = CI->getOperand(2), &S2 = CI->getOperand(3);
     bool Done = false;
     // Consider this case:
-    //   vreg1 = instr1 ...
-    //   vreg2 = instr2 ...
-    //   vreg0 = C2_mux ..., vreg1, vreg2
-    // If vreg0 was coalesced with vreg1, we could end up with the following
+    //   %1 = instr1 ...
+    //   %2 = instr2 ...
+    //   %0 = C2_mux ..., %1, %2
+    // If %0 was coalesced with %1, we could end up with the following
     // code:
-    //   vreg0 = instr1 ...
-    //   vreg2 = instr2 ...
-    //   vreg0 = A2_tfrf ..., vreg2
+    //   %0 = instr1 ...
+    //   %2 = instr2 ...
+    //   %0 = A2_tfrf ..., %2
     // which will later become:
-    //   vreg0 = instr1 ...
-    //   vreg0 = instr2_cNotPt ...
-    // i.e. there will be an unconditional definition (instr1) of vreg0
+    //   %0 = instr1 ...
+    //   %0 = instr2_cNotPt ...
+    // i.e. there will be an unconditional definition (instr1) of %0
     // followed by a conditional one. The output dependency was there before
     // and it unavoidable, but if instr1 is predicable, we will no longer be
     // able to predicate it here.

Modified: llvm/trunk/lib/Target/Hexagon/HexagonGenInsert.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonGenInsert.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonGenInsert.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonGenInsert.cpp Thu Nov 30 04:12:19 2017
@@ -1106,10 +1106,10 @@ void HexagonGenInsert::pruneCoveredSets(
 
   // Now, remove those whose sets of potentially removable registers are
   // contained in another IF candidate for VR. For example, given these
-  // candidates for vreg45,
-  //   %vreg45:
-  //     (%vreg44,%vreg41,#9,#8), { %vreg42 }
-  //     (%vreg43,%vreg41,#9,#8), { %vreg42 %vreg44 }
+  // candidates for %45,
+  //   %45:
+  //     (%44,%41,#9,#8), { %42 }
+  //     (%43,%41,#9,#8), { %42 %44 }
   // remove the first one, since it is contained in the second one.
   for (unsigned i = 0, n = LL.size(); i < n; ) {
     const RegisterSet &RMi = LL[i].second;

Modified: llvm/trunk/lib/Target/Hexagon/HexagonHardwareLoops.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonHardwareLoops.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonHardwareLoops.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonHardwareLoops.cpp Thu Nov 30 04:12:19 2017
@@ -1622,8 +1622,8 @@ bool HexagonHardwareLoops::fixupInductio
   RegisterInductionSet IndRegs;
 
   // Look for induction patterns:
-  //   vreg1 = PHI ..., [ latch, vreg2 ]
-  //   vreg2 = ADD vreg1, imm
+  //   %1 = PHI ..., [ latch, %2 ]
+  //   %2 = ADD %1, imm
   using instr_iterator = MachineBasicBlock::instr_iterator;
 
   for (instr_iterator I = Header->instr_begin(), E = Header->instr_end();
@@ -1720,7 +1720,7 @@ bool HexagonHardwareLoops::fixupInductio
     MachineOperand &MO = PredDef->getOperand(i);
     if (MO.isReg()) {
       // Skip all implicit references.  In one case there was:
-      //   %vreg140<def> = FCMPUGT32_rr %vreg138, %vreg139, %usr<imp-use>
+      //   %140<def> = FCMPUGT32_rr %138, %139, %usr<imp-use>
       if (MO.isImplicit())
         continue;
       if (MO.isUse()) {

Modified: llvm/trunk/lib/Target/Hexagon/HexagonPeephole.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonPeephole.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonPeephole.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonPeephole.cpp Thu Nov 30 04:12:19 2017
@@ -8,27 +8,27 @@
 // This peephole pass optimizes in the following cases.
 // 1. Optimizes redundant sign extends for the following case
 //    Transform the following pattern
-//    %vreg170<def> = SXTW %vreg166
+//    %170<def> = SXTW %166
 //    ...
-//    %vreg176<def> = COPY %vreg170:isub_lo
+//    %176<def> = COPY %170:isub_lo
 //
 //    Into
-//    %vreg176<def> = COPY vreg166
+//    %176<def> = COPY %166
 //
 //  2. Optimizes redundant negation of predicates.
-//     %vreg15<def> = CMPGTrr %vreg6, %vreg2
+//     %15<def> = CMPGTrr %6, %2
 //     ...
-//     %vreg16<def> = NOT_p %vreg15<kill>
+//     %16<def> = NOT_p %15<kill>
 //     ...
-//     JMP_c %vreg16<kill>, <BB#1>, %pc<imp-def,dead>
+//     JMP_c %16<kill>, <BB#1>, %pc<imp-def,dead>
 //
 //     Into
-//     %vreg15<def> = CMPGTrr %vreg6, %vreg2;
+//     %15<def> = CMPGTrr %6, %2;
 //     ...
-//     JMP_cNot %vreg15<kill>, <BB#1>, %pc<imp-def,dead>;
+//     JMP_cNot %15<kill>, <BB#1>, %pc<imp-def,dead>;
 //
 // Note: The peephole pass makes the instrucstions like
-// %vreg170<def> = SXTW %vreg166 or %vreg16<def> = NOT_p %vreg15<kill>
+// %170<def> = SXTW %166 or %16<def> = NOT_p %15<kill>
 // redundant and relies on some form of dead removal instructions, like
 // DCE or DIE to actually eliminate them.
 
@@ -133,7 +133,7 @@ bool HexagonPeephole::runOnMachineFuncti
       NextI = std::next(I);
       MachineInstr &MI = *I;
       // Look for sign extends:
-      // %vreg170<def> = SXTW %vreg166
+      // %170<def> = SXTW %166
       if (!DisableOptSZExt && MI.getOpcode() == Hexagon::A2_sxtw) {
         assert(MI.getNumOperands() == 2);
         MachineOperand &Dst = MI.getOperand(0);
@@ -144,14 +144,14 @@ bool HexagonPeephole::runOnMachineFuncti
         if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
             TargetRegisterInfo::isVirtualRegister(SrcReg)) {
           // Map the following:
-          // %vreg170<def> = SXTW %vreg166
-          // PeepholeMap[170] = vreg166
+          // %170<def> = SXTW %166
+          // PeepholeMap[170] = %166
           PeepholeMap[DstReg] = SrcReg;
         }
       }
 
-      // Look for  %vreg170<def> = COMBINE_ir_V4 (0, %vreg169)
-      // %vreg170:DoublRegs, %vreg169:IntRegs
+      // Look for  %170<def> = COMBINE_ir_V4 (0, %169)
+      // %170:DoublRegs, %169:IntRegs
       if (!DisableOptExtTo64 && MI.getOpcode() == Hexagon::A4_combineir) {
         assert(MI.getNumOperands() == 3);
         MachineOperand &Dst = MI.getOperand(0);
@@ -165,10 +165,10 @@ bool HexagonPeephole::runOnMachineFuncti
       }
 
       // Look for this sequence below
-      // %vregDoubleReg1 = LSRd_ri %vregDoubleReg0, 32
-      // %vregIntReg = COPY %vregDoubleReg1:isub_lo.
+      // %DoubleReg1 = LSRd_ri %DoubleReg0, 32
+      // %IntReg = COPY %DoubleReg1:isub_lo.
       // and convert into
-      // %vregIntReg = COPY %vregDoubleReg0:isub_hi.
+      // %IntReg = COPY %DoubleReg0:isub_hi.
       if (MI.getOpcode() == Hexagon::S2_lsr_i_p) {
         assert(MI.getNumOperands() == 3);
         MachineOperand &Dst = MI.getOperand(0);
@@ -193,14 +193,14 @@ bool HexagonPeephole::runOnMachineFuncti
         if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
             TargetRegisterInfo::isVirtualRegister(SrcReg)) {
           // Map the following:
-          // %vreg170<def> = NOT_xx %vreg166
-          // PeepholeMap[170] = vreg166
+          // %170<def> = NOT_xx %166
+          // PeepholeMap[170] = %166
           PeepholeMap[DstReg] = SrcReg;
         }
       }
 
       // Look for copy:
-      // %vreg176<def> = COPY %vreg170:isub_lo
+      // %176<def> = COPY %170:isub_lo
       if (!DisableOptSZExt && MI.isCopy()) {
         assert(MI.getNumOperands() == 2);
         MachineOperand &Dst = MI.getOperand(0);

Modified: llvm/trunk/lib/Target/Hexagon/HexagonStoreWidening.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonStoreWidening.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonStoreWidening.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonStoreWidening.cpp Thu Nov 30 04:12:19 2017
@@ -9,10 +9,10 @@
 // Replace sequences of "narrow" stores to adjacent memory locations with
 // a fewer "wide" stores that have the same effect.
 // For example, replace:
-//   S4_storeirb_io  %vreg100, 0, 0   ; store-immediate-byte
-//   S4_storeirb_io  %vreg100, 1, 0   ; store-immediate-byte
+//   S4_storeirb_io  %100, 0, 0   ; store-immediate-byte
+//   S4_storeirb_io  %100, 1, 0   ; store-immediate-byte
 // with
-//   S4_storeirh_io  %vreg100, 0, 0   ; store-immediate-halfword
+//   S4_storeirh_io  %100, 0, 0   ; store-immediate-halfword
 // The above is the general idea.  The actual cases handled by the code
 // may be a bit more complex.
 // The purpose of this pass is to reduce the number of outstanding stores,

Modified: llvm/trunk/lib/Target/Hexagon/HexagonSubtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonSubtarget.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonSubtarget.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonSubtarget.cpp Thu Nov 30 04:12:19 2017
@@ -223,8 +223,8 @@ void HexagonSubtarget::CallMutation::app
     // both the return value and the argument for the next call being in %r0.
     // Example:
     //   1: <call1>
-    //   2: %vregX = COPY %r0
-    //   3: <use of %vregX>
+    //   2: %vreg = COPY %r0
+    //   3: <use of %vreg>
     //   4: %r0 = ...
     //   5: <call2>
     // The scheduler would often swap 3 and 4, so an additional register is
@@ -234,12 +234,12 @@ void HexagonSubtarget::CallMutation::app
       const MachineInstr *MI = DAG->SUnits[su].getInstr();
       if (MI->isCopy() && (MI->readsRegister(Hexagon::R0, &TRI) ||
                            MI->readsRegister(Hexagon::V0, &TRI)))  {
-        // %vregX = COPY %r0
+        // %vreg = COPY %r0
         VRegHoldingRet = MI->getOperand(0).getReg();
         RetRegister = MI->getOperand(1).getReg();
         LastUseOfRet = nullptr;
       } else if (VRegHoldingRet && MI->readsVirtualRegister(VRegHoldingRet))
-        // <use of %vregX>
+        // <use of %X>
         LastUseOfRet = &DAG->SUnits[su];
       else if (LastUseOfRet && MI->definesRegister(RetRegister, &TRI))
         // %r0 = ...

Modified: llvm/trunk/lib/Target/NVPTX/NVPTXPeephole.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXPeephole.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTXPeephole.cpp (original)
+++ llvm/trunk/lib/Target/NVPTX/NVPTXPeephole.cpp Thu Nov 30 04:12:19 2017
@@ -22,11 +22,11 @@
 // This peephole pass optimizes these cases, for example
 //
 // It will transform the following pattern
-//    %vreg0<def> = LEA_ADDRi64 %VRFrame, 4
-//    %vreg1<def> = cvta_to_local_yes_64 %vreg0
+//    %0<def> = LEA_ADDRi64 %VRFrame, 4
+//    %1<def> = cvta_to_local_yes_64 %0
 //
 // into
-//    %vreg1<def> = LEA_ADDRi64 %VRFrameLocal, 4
+//    %1<def> = LEA_ADDRi64 %VRFrameLocal, 4
 //
 // %VRFrameLocal is the virtual register name of %SPL
 //

Modified: llvm/trunk/lib/Target/PowerPC/PPCBranchCoalescing.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCBranchCoalescing.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCBranchCoalescing.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCBranchCoalescing.cpp Thu Nov 30 04:12:19 2017
@@ -62,11 +62,11 @@ namespace llvm {
 /// BB#0: derived from LLVM BB %entry
 ///    Live Ins: %f1 %f3 %x6
 ///        <SNIP1>
-///        %vreg0<def> = COPY %f1; F8RC:%vreg0
-///        %vreg5<def> = CMPLWI %vreg4<kill>, 0; CRRC:%vreg5 GPRC:%vreg4
-///        %vreg8<def> = LXSDX %zero8, %vreg7<kill>, %rm<imp-use>;
-///                    mem:LD8[ConstantPool] F8RC:%vreg8 G8RC:%vreg7
-///        BCC 76, %vreg5, <BB#2>; CRRC:%vreg5
+///        %0<def> = COPY %f1; F8RC:%0
+///        %5<def> = CMPLWI %4<kill>, 0; CRRC:%5 GPRC:%4
+///        %8<def> = LXSDX %zero8, %7<kill>, %rm<imp-use>;
+///                    mem:LD8[ConstantPool] F8RC:%8 G8RC:%7
+///        BCC 76, %5, <BB#2>; CRRC:%5
 ///    Successors according to CFG: BB#1(?%) BB#2(?%)
 ///
 /// BB#1: derived from LLVM BB %entry
@@ -75,10 +75,10 @@ namespace llvm {
 ///
 /// BB#2: derived from LLVM BB %entry
 ///    Predecessors according to CFG: BB#0 BB#1
-///        %vreg9<def> = PHI %vreg8, <BB#1>, %vreg0, <BB#0>;
-///                    F8RC:%vreg9,%vreg8,%vreg0
+///        %9<def> = PHI %8, <BB#1>, %0, <BB#0>;
+///                    F8RC:%9,%8,%0
 ///        <SNIP2>
-///        BCC 76, %vreg5, <BB#4>; CRRC:%vreg5
+///        BCC 76, %5, <BB#4>; CRRC:%5
 ///    Successors according to CFG: BB#3(?%) BB#4(?%)
 ///
 /// BB#3: derived from LLVM BB %entry
@@ -87,8 +87,8 @@ namespace llvm {
 ///
 /// BB#4: derived from LLVM BB %entry
 ///    Predecessors according to CFG: BB#2 BB#3
-///        %vreg13<def> = PHI %vreg12, <BB#3>, %vreg2, <BB#2>;
-///                     F8RC:%vreg13,%vreg12,%vreg2
+///        %13<def> = PHI %12, <BB#3>, %2, <BB#2>;
+///                     F8RC:%13,%12,%2
 ///        <SNIP3>
 ///        BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
 ///
@@ -100,12 +100,12 @@ namespace llvm {
 /// BB#0: derived from LLVM BB %entry
 ///    Live Ins: %f1 %f3 %x6
 ///        <SNIP1>
-///        %vreg0<def> = COPY %f1; F8RC:%vreg0
-///        %vreg5<def> = CMPLWI %vreg4<kill>, 0; CRRC:%vreg5 GPRC:%vreg4
-///        %vreg8<def> = LXSDX %zero8, %vreg7<kill>, %rm<imp-use>;
-///                     mem:LD8[ConstantPool] F8RC:%vreg8 G8RC:%vreg7
+///        %0<def> = COPY %f1; F8RC:%0
+///        %5<def> = CMPLWI %4<kill>, 0; CRRC:%5 GPRC:%4
+///        %8<def> = LXSDX %zero8, %7<kill>, %rm<imp-use>;
+///                     mem:LD8[ConstantPool] F8RC:%8 G8RC:%7
 ///        <SNIP2>
-///        BCC 76, %vreg5, <BB#4>; CRRC:%vreg5
+///        BCC 76, %5, <BB#4>; CRRC:%5
 ///    Successors according to CFG: BB#1(0x2aaaaaaa / 0x80000000 = 33.33%)
 ///      BB#4(0x55555554 / 0x80000000 = 66.67%)
 ///
@@ -115,10 +115,10 @@ namespace llvm {
 ///
 /// BB#4: derived from LLVM BB %entry
 ///    Predecessors according to CFG: BB#0 BB#1
-///        %vreg9<def> = PHI %vreg8, <BB#1>, %vreg0, <BB#0>;
-///                    F8RC:%vreg9,%vreg8,%vreg0
-///        %vreg13<def> = PHI %vreg12, <BB#1>, %vreg2, <BB#0>;
-///                     F8RC:%vreg13,%vreg12,%vreg2
+///        %9<def> = PHI %8, <BB#1>, %0, <BB#0>;
+///                    F8RC:%9,%8,%0
+///        %13<def> = PHI %12, <BB#1>, %2, <BB#0>;
+///                     F8RC:%13,%12,%2
 ///        <SNIP3>
 ///        BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
 ///

Modified: llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp Thu Nov 30 04:12:19 2017
@@ -2318,7 +2318,7 @@ PPCInstrInfo::isSignOrZeroExtended(const
       //   ADJCALLSTACKDOWN 32, %r1<imp-def,dead>, %r1<imp-use>
       //   BL8_NOP <ga:@func>,...
       //   ADJCALLSTACKUP 32, 0, %r1<imp-def,dead>, %r1<imp-use>
-      //   %vreg5<def> = COPY %x3; G8RC:%vreg5
+      //   %5<def> = COPY %x3; G8RC:%5
       if (SrcReg == PPC::X3) {
         const MachineBasicBlock *MBB = MI.getParent();
         MachineBasicBlock::const_instr_iterator II =

Modified: llvm/trunk/lib/Target/PowerPC/PPCMIPeephole.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCMIPeephole.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCMIPeephole.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCMIPeephole.cpp Thu Nov 30 04:12:19 2017
@@ -585,9 +585,9 @@ bool PPCMIPeephole::simplifyCode(void) {
         // We can eliminate RLDICL (e.g. for zero-extension)
         // if all bits to clear are already zero in the input.
         // This code assume following code sequence for zero-extension.
-        //   %vreg6<def> = COPY %vreg5:sub_32; (optional)
-        //   %vreg8<def> = IMPLICIT_DEF;
-        //   %vreg7<def,tied1> = INSERT_SUBREG %vreg8<tied0>, %vreg6, sub_32;
+        //   %6<def> = COPY %5:sub_32; (optional)
+        //   %8<def> = IMPLICIT_DEF;
+        //   %7<def,tied1> = INSERT_SUBREG %8<tied0>, %6, sub_32;
         if (!EnableZExtElimination) break;
 
         if (MI.getOperand(2).getImm() != 0)
@@ -685,8 +685,8 @@ bool PPCMIPeephole::simplifyCode(void) {
           DEBUG(dbgs() << "Optimizing LI to ADDI: ");
           DEBUG(LiMI->dump());
 
-          // There could be repeated registers in the PHI, e.g: %vreg1<def> =
-          // PHI %vreg6, <BB#2>, %vreg8, <BB#3>, %vreg8, <BB#6>; So if we've
+          // There could be repeated registers in the PHI, e.g: %1<def> =
+          // PHI %6, <BB#2>, %8, <BB#3>, %8, <BB#6>; So if we've
           // already replaced the def instruction, skip.
           if (LiMI->getOpcode() == PPC::ADDI || LiMI->getOpcode() == PPC::ADDI8)
             continue;

Modified: llvm/trunk/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCVSXFMAMutate.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCVSXFMAMutate.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCVSXFMAMutate.cpp Thu Nov 30 04:12:19 2017
@@ -90,21 +90,21 @@ protected:
         // This pass is run after register coalescing, and so we're looking for
         // a situation like this:
         //   ...
-        //   %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
-        //   %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
-        //                         %rm<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
+        //   %5<def> = COPY %9; VSLRC:%5,%9
+        //   %5<def,tied1> = XSMADDADP %5<tied0>, %17, %16,
+        //                         %rm<imp-use>; VSLRC:%5,%17,%16
         //   ...
-        //   %vreg9<def,tied1> = XSMADDADP %vreg9<tied0>, %vreg17, %vreg19,
-        //                         %rm<imp-use>; VSLRC:%vreg9,%vreg17,%vreg19
+        //   %9<def,tied1> = XSMADDADP %9<tied0>, %17, %19,
+        //                         %rm<imp-use>; VSLRC:%9,%17,%19
         //   ...
         // Where we can eliminate the copy by changing from the A-type to the
         // M-type instruction. Specifically, for this example, this means:
-        //   %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
-        //                         %rm<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
+        //   %5<def,tied1> = XSMADDADP %5<tied0>, %17, %16,
+        //                         %rm<imp-use>; VSLRC:%5,%17,%16
         // is replaced by:
-        //   %vreg16<def,tied1> = XSMADDMDP %vreg16<tied0>, %vreg18, %vreg9,
-        //                         %rm<imp-use>; VSLRC:%vreg16,%vreg18,%vreg9
-        // and we remove: %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
+        //   %16<def,tied1> = XSMADDMDP %16<tied0>, %18, %9,
+        //                         %rm<imp-use>; VSLRC:%16,%18,%9
+        // and we remove: %5<def> = COPY %9; VSLRC:%5,%9
 
         SlotIndex FMAIdx = LIS->getInstructionIndex(MI);
 
@@ -150,13 +150,13 @@ protected:
         // walking the MIs we may as well test liveness here.
         //
         // FIXME: There is a case that occurs in practice, like this:
-        //   %vreg9<def> = COPY %f1; VSSRC:%vreg9
+        //   %9<def> = COPY %f1; VSSRC:%9
         //   ...
-        //   %vreg6<def> = COPY %vreg9; VSSRC:%vreg6,%vreg9
-        //   %vreg7<def> = COPY %vreg9; VSSRC:%vreg7,%vreg9
-        //   %vreg9<def,tied1> = XSMADDASP %vreg9<tied0>, %vreg1, %vreg4; VSSRC:
-        //   %vreg6<def,tied1> = XSMADDASP %vreg6<tied0>, %vreg1, %vreg2; VSSRC:
-        //   %vreg7<def,tied1> = XSMADDASP %vreg7<tied0>, %vreg1, %vreg3; VSSRC:
+        //   %6<def> = COPY %9; VSSRC:%6,%9
+        //   %7<def> = COPY %9; VSSRC:%7,%9
+        //   %9<def,tied1> = XSMADDASP %9<tied0>, %1, %4; VSSRC:
+        //   %6<def,tied1> = XSMADDASP %6<tied0>, %1, %2; VSSRC:
+        //   %7<def,tied1> = XSMADDASP %7<tied0>, %1, %3; VSSRC:
         // which prevents an otherwise-profitable transformation.
         bool OtherUsers = false, KillsAddendSrc = false;
         for (auto J = std::prev(I), JE = MachineBasicBlock::iterator(AddendMI);
@@ -177,11 +177,11 @@ protected:
 
 
         // The transformation doesn't work well with things like:
-        //    %vreg5 = A-form-op %vreg5, %vreg11, %vreg5;
-        // unless vreg11 is also a kill, so skip when it is not,
+        //    %5 = A-form-op %5, %11, %5;
+        // unless %11 is also a kill, so skip when it is not,
         // and check operand 3 to see it is also a kill to handle the case:
-        //   %vreg5 = A-form-op %vreg5, %vreg5, %vreg11;
-        // where vreg5 and vreg11 are both kills. This case would be skipped
+        //   %5 = A-form-op %5, %5, %11;
+        // where %5 and %11 are both kills. This case would be skipped
         // otherwise.
         unsigned OldFMAReg = MI.getOperand(0).getReg();
 

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Thu Nov 30 04:12:19 2017
@@ -6948,10 +6948,10 @@ static int getUnderlyingExtractedFromVec
 
   // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
   // lowered this:
-  //   (extract_vector_elt (v8f32 %vreg1), Constant<6>)
+  //   (extract_vector_elt (v8f32 %1), Constant<6>)
   // to:
   //   (extract_vector_elt (vector_shuffle<2,u,u,u>
-  //                           (extract_subvector (v8f32 %vreg0), Constant<4>),
+  //                           (extract_subvector (v8f32 %0), Constant<4>),
   //                           undef)
   //                       Constant<0>)
   // In this case the vector is the extract_subvector expression and the index

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll Thu Nov 30 04:12:19 2017
@@ -43,7 +43,7 @@ define [1 x double] @constant() {
   ; The key problem here is that we may fail to create an MBB referenced by a
   ; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things
   ; happen.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %vreg6, %vreg2; mem:ST4[%addr] GPR:%vreg6,%vreg2 (in function: pending_phis)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %6, %2; mem:ST4[%addr] GPR:%6,%2 (in function: pending_phis)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for pending_phis
 ; FALLBACK-WITH-REPORT-OUT-LABEL: pending_phis:
 define i32 @pending_phis(i1 %tst, i32 %val, i32* %addr) {
@@ -63,7 +63,7 @@ false:
 }
 
   ; General legalizer inability to handle types whose size wasn't a power of 2.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg1, %vreg0; mem:ST6[%addr](align=8) (in function: odd_type)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST6[%addr](align=8) (in function: odd_type)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_type
 ; FALLBACK-WITH-REPORT-OUT-LABEL: odd_type:
 define void @odd_type(i42* %addr) {
@@ -72,7 +72,7 @@ define void @odd_type(i42* %addr) {
   ret void
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg1, %vreg0; mem:ST28[%addr](align=32) (in function: odd_vector)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST28[%addr](align=32) (in function: odd_vector)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_vector
 ; FALLBACK-WITH-REPORT-OUT-LABEL: odd_vector:
 define void @odd_vector(<7 x i32>* %addr) {
@@ -91,7 +91,7 @@ define i128 @sequence_sizes([8 x i8] %in
 }
 
 ; Just to make sure we don't accidentally emit a normal load/store.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %vreg2<def>(s64) = G_LOAD %vreg0; mem:LD8[%addr] GPR:%vreg2,%vreg0 (in function: atomic_ops)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %2<def>(s64) = G_LOAD %0; mem:LD8[%addr] GPR:%2,%0 (in function: atomic_ops)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops
 ; FALLBACK-WITH-REPORT-LABEL: atomic_ops:
 define i64 @atomic_ops(i64* %addr) {
@@ -132,14 +132,14 @@ continue:
 }
 
 ; Check that we fallback on invoke translation failures.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(s128) = G_FCONSTANT quad 2
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0<def>(s128) = G_FCONSTANT quad 2
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_quad_dump
 ; FALLBACK-WITH-REPORT-OUT-LABEL: test_quad_dump:
 define fp128 @test_quad_dump() {
   ret fp128 0xL00000000000000004000000000000000
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(p0) = G_EXTRACT_VECTOR_ELT %vreg1, %vreg2; (in function: vector_of_pointers_extractelement)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0<def>(p0) = G_EXTRACT_VECTOR_ELT %1, %2; (in function: vector_of_pointers_extractelement)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_extractelement
 ; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_extractelement:
 @var = global <2 x i16*> zeroinitializer
@@ -156,7 +156,7 @@ end:
   br label %block
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg0, %vreg4; mem:ST16[undef] (in function: vector_of_pointers_insertelement)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %4; mem:ST16[undef] (in function: vector_of_pointers_insertelement)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_insertelement
 ; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_insertelement:
 define void @vector_of_pointers_insertelement() {
@@ -172,7 +172,7 @@ end:
   br label %block
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg1, %vreg3; mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %3; mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_insertvalue_narrowing
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_insertvalue_narrowing:
 %struct96 = type { float, float, float }
@@ -182,7 +182,7 @@ define void @nonpow2_insertvalue_narrowi
   ret void
 }
 
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg3, %vreg4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_add_narrowing
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_add_narrowing:
 define void @nonpow2_add_narrowing() {
@@ -193,7 +193,7 @@ define void @nonpow2_add_narrowing() {
   ret void
 }
 
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg3, %vreg4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_or_narrowing
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_or_narrowing:
 define void @nonpow2_or_narrowing() {
@@ -204,7 +204,7 @@ define void @nonpow2_or_narrowing() {
   ret void
 }
 
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg0, %vreg1; mem:ST12[undef](align=16) (in function: nonpow2_load_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %1; mem:ST12[undef](align=16) (in function: nonpow2_load_narrowing)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_load_narrowing
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_load_narrowing:
 define void @nonpow2_load_narrowing() {
@@ -213,7 +213,7 @@ define void @nonpow2_load_narrowing() {
   ret void
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg3, %vreg0; mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %0; mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_store_narrowing
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_store_narrowing:
 define void @nonpow2_store_narrowing(i96* %c) {
@@ -223,7 +223,7 @@ define void @nonpow2_store_narrowing(i96
   ret void
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg0, %vreg1; mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %1; mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_constant_narrowing
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_constant_narrowing:
 define void @nonpow2_constant_narrowing() {
@@ -233,8 +233,8 @@ define void @nonpow2_constant_narrowing(
 
 ; Currently can't handle vector lengths that aren't an exact multiple of
 ; natively supported vector lengths. Test that the fall-back works for those.
-; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %vreg1<def>(<7 x s64>) = G_ADD %vreg0, %vreg0; (in function: nonpow2_vector_add_fewerelements
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg2<def>(s64) = G_EXTRACT_VECTOR_ELT %vreg1, %vreg3; (in function: nonpow2_vector_add_fewerelements)
+; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %1<def>(<7 x s64>) = G_ADD %0, %0; (in function: nonpow2_vector_add_fewerelements
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %2<def>(s64) = G_EXTRACT_VECTOR_ELT %1, %3; (in function: nonpow2_vector_add_fewerelements)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_vector_add_fewerelements
 ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_vector_add_fewerelements:
 define void @nonpow2_vector_add_fewerelements() {

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir Thu Nov 30 04:12:19 2017
@@ -9,8 +9,8 @@
 ...
 ---
 # CHECK: *** Bad machine code: Generic virtual register must have a bank in a RegBankSelected function ***
-# CHECK: instruction: %vreg0<def>(s64) = COPY
-# CHECK: operand 0: %vreg0<def>
+# CHECK: instruction: %0<def>(s64) = COPY
+# CHECK: operand 0: %0<def>
 name:            test
 regBankSelected: true
 registers:

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-selected.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-selected.mir?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-selected.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-selected.mir Thu Nov 30 04:12:19 2017
@@ -22,11 +22,11 @@ body: |
    %0 = COPY %x0
 
    ; CHECK: *** Bad machine code: Unexpected generic instruction in a Selected function ***
-   ; CHECK: instruction: %vreg1<def> = G_ADD
+   ; CHECK: instruction: %1<def> = G_ADD
    %1 = G_ADD %0, %0
 
    ; CHECK: *** Bad machine code: Generic virtual register invalid in a Selected function ***
-   ; CHECK: instruction: %vreg2<def>(s64) = COPY
-   ; CHECK: operand 0: %vreg2<def>
+   ; CHECK: instruction: %2<def>(s64) = COPY
+   ; CHECK: operand 0: %2<def>
    %2(s64) = COPY %x0
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/aarch64-stp-cluster.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-stp-cluster.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/aarch64-stp-cluster.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/aarch64-stp-cluster.ll Thu Nov 30 04:12:19 2017
@@ -5,10 +5,10 @@
 ; CHECK-LABEL: stp_i64_scale:BB#0
 ; CHECK:Cluster ld/st SU(4) - SU(3)
 ; CHECK:Cluster ld/st SU(2) - SU(5)
-; CHECK:SU(4):   STRXui %vreg1, %vreg0, 1
-; CHECK:SU(3):   STRXui %vreg1, %vreg0, 2
-; CHECK:SU(2):   STRXui %vreg1, %vreg0, 3
-; CHECK:SU(5):   STRXui %vreg1, %vreg0, 4
+; CHECK:SU(4):   STRXui %1, %0, 1
+; CHECK:SU(3):   STRXui %1, %0, 2
+; CHECK:SU(2):   STRXui %1, %0, 3
+; CHECK:SU(5):   STRXui %1, %0, 4
 define i64 @stp_i64_scale(i64* nocapture %P, i64 %v) {
 entry:
   %arrayidx = getelementptr inbounds i64, i64* %P, i64 3
@@ -26,10 +26,10 @@ entry:
 ; CHECK-LABEL: stp_i32_scale:BB#0
 ; CHECK:Cluster ld/st SU(4) - SU(3)
 ; CHECK:Cluster ld/st SU(2) - SU(5)
-; CHECK:SU(4):   STRWui %vreg1, %vreg0, 1
-; CHECK:SU(3):   STRWui %vreg1, %vreg0, 2
-; CHECK:SU(2):   STRWui %vreg1, %vreg0, 3
-; CHECK:SU(5):   STRWui %vreg1, %vreg0, 4
+; CHECK:SU(4):   STRWui %1, %0, 1
+; CHECK:SU(3):   STRWui %1, %0, 2
+; CHECK:SU(2):   STRWui %1, %0, 3
+; CHECK:SU(5):   STRWui %1, %0, 4
 define i32 @stp_i32_scale(i32* nocapture %P, i32 %v) {
 entry:
   %arrayidx = getelementptr inbounds i32, i32* %P, i32 3
@@ -47,10 +47,10 @@ entry:
 ; CHECK-LABEL:stp_i64_unscale:BB#0 entry
 ; CHECK:Cluster ld/st SU(5) - SU(2)
 ; CHECK:Cluster ld/st SU(4) - SU(3)
-; CHECK:SU(5):   STURXi %vreg1, %vreg0, -32
-; CHECK:SU(2):   STURXi %vreg1, %vreg0, -24
-; CHECK:SU(4):   STURXi %vreg1, %vreg0, -16
-; CHECK:SU(3):   STURXi %vreg1, %vreg0, -8
+; CHECK:SU(5):   STURXi %1, %0, -32
+; CHECK:SU(2):   STURXi %1, %0, -24
+; CHECK:SU(4):   STURXi %1, %0, -16
+; CHECK:SU(3):   STURXi %1, %0, -8
 define void @stp_i64_unscale(i64* nocapture %P, i64 %v) #0 {
 entry:
   %arrayidx = getelementptr inbounds i64, i64* %P, i64 -3
@@ -68,10 +68,10 @@ entry:
 ; CHECK-LABEL:stp_i32_unscale:BB#0 entry
 ; CHECK:Cluster ld/st SU(5) - SU(2)
 ; CHECK:Cluster ld/st SU(4) - SU(3)
-; CHECK:SU(5):   STURWi %vreg1, %vreg0, -16
-; CHECK:SU(2):   STURWi %vreg1, %vreg0, -12
-; CHECK:SU(4):   STURWi %vreg1, %vreg0, -8
-; CHECK:SU(3):   STURWi %vreg1, %vreg0, -4
+; CHECK:SU(5):   STURWi %1, %0, -16
+; CHECK:SU(2):   STURWi %1, %0, -12
+; CHECK:SU(4):   STURWi %1, %0, -8
+; CHECK:SU(3):   STURWi %1, %0, -4
 define void @stp_i32_unscale(i32* nocapture %P, i32 %v) #0 {
 entry:
   %arrayidx = getelementptr inbounds i32, i32* %P, i32 -3
@@ -89,10 +89,10 @@ entry:
 ; CHECK-LABEL:stp_double:BB#0
 ; CHECK:Cluster ld/st SU(3) - SU(4)
 ; CHECK:Cluster ld/st SU(2) - SU(5)
-; CHECK:SU(3):   STRDui %vreg1, %vreg0, 1
-; CHECK:SU(4):   STRDui %vreg1, %vreg0, 2
-; CHECK:SU(2):   STRDui %vreg1, %vreg0, 3
-; CHECK:SU(5):   STRDui %vreg1, %vreg0, 4
+; CHECK:SU(3):   STRDui %1, %0, 1
+; CHECK:SU(4):   STRDui %1, %0, 2
+; CHECK:SU(2):   STRDui %1, %0, 3
+; CHECK:SU(5):   STRDui %1, %0, 4
 define void @stp_double(double* nocapture %P, double %v)  {
 entry:
   %arrayidx = getelementptr inbounds double, double* %P, i64 3
@@ -110,10 +110,10 @@ entry:
 ; CHECK-LABEL:stp_float:BB#0
 ; CHECK:Cluster ld/st SU(3) - SU(4)
 ; CHECK:Cluster ld/st SU(2) - SU(5)
-; CHECK:SU(3):   STRSui %vreg1, %vreg0, 1
-; CHECK:SU(4):   STRSui %vreg1, %vreg0, 2
-; CHECK:SU(2):   STRSui %vreg1, %vreg0, 3
-; CHECK:SU(5):   STRSui %vreg1, %vreg0, 4
+; CHECK:SU(3):   STRSui %1, %0, 1
+; CHECK:SU(4):   STRSui %1, %0, 2
+; CHECK:SU(2):   STRSui %1, %0, 3
+; CHECK:SU(5):   STRSui %1, %0, 4
 define void @stp_float(float* nocapture %P, float %v)  {
 entry:
   %arrayidx = getelementptr inbounds float, float* %P, i64 3
@@ -130,10 +130,10 @@ entry:
 ; CHECK: ********** MI Scheduling **********
 ; CHECK-LABEL: stp_volatile:BB#0
 ; CHECK-NOT: Cluster ld/st
-; CHECK:SU(2):   STRXui %vreg1, %vreg0, 3; mem:Volatile
-; CHECK:SU(3):   STRXui %vreg1, %vreg0, 2; mem:Volatile
-; CHECK:SU(4):   STRXui %vreg1, %vreg0, 1; mem:Volatile
-; CHECK:SU(5):   STRXui %vreg1, %vreg0, 4; mem:Volatile
+; CHECK:SU(2):   STRXui %1, %0, 3; mem:Volatile
+; CHECK:SU(3):   STRXui %1, %0, 2; mem:Volatile
+; CHECK:SU(4):   STRXui %1, %0, 1; mem:Volatile
+; CHECK:SU(5):   STRXui %1, %0, 4; mem:Volatile
 define i64 @stp_volatile(i64* nocapture %P, i64 %v) {
 entry:
   %arrayidx = getelementptr inbounds i64, i64* %P, i64 3

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-rem.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-rem.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-rem.ll Thu Nov 30 04:12:19 2017
@@ -4,9 +4,9 @@
 
 ; CHECK-SSA-LABEL: Machine code for function t1
 
-; CHECK-SSA: [[QUOTREG:%vreg[0-9]+]]<def> = SDIVWr
+; CHECK-SSA: [[QUOTREG:%[0-9]+]]<def> = SDIVWr
 ; CHECK-SSA-NOT: [[QUOTREG]]<def> =
-; CHECK-SSA: {{%vreg[0-9]+}}<def> = MSUBWrrr [[QUOTREG]]
+; CHECK-SSA: {{%[0-9]+}}<def> = MSUBWrrr [[QUOTREG]]
 
 ; CHECK-SSA-LABEL: Machine code for function t2
 

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-ldp-cluster.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-ldp-cluster.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-ldp-cluster.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-ldp-cluster.ll Thu Nov 30 04:12:19 2017
@@ -6,13 +6,13 @@
 ; CHECK: ********** MI Scheduling **********
 ; CHECK-LABEL: ldr_int:BB#0
 ; CHECK: Cluster ld/st SU(1) - SU(2)
-; CHECK: SU(1):   %vreg{{[0-9]+}}<def> = LDRWui
-; CHECK: SU(2):   %vreg{{[0-9]+}}<def> = LDRWui
+; CHECK: SU(1):   %{{[0-9]+}}<def> = LDRWui
+; CHECK: SU(2):   %{{[0-9]+}}<def> = LDRWui
 ; EXYNOS: ********** MI Scheduling **********
 ; EXYNOS-LABEL: ldr_int:BB#0
 ; EXYNOS: Cluster ld/st SU(1) - SU(2)
-; EXYNOS: SU(1):   %vreg{{[0-9]+}}<def> = LDRWui
-; EXYNOS: SU(2):   %vreg{{[0-9]+}}<def> = LDRWui
+; EXYNOS: SU(1):   %{{[0-9]+}}<def> = LDRWui
+; EXYNOS: SU(2):   %{{[0-9]+}}<def> = LDRWui
 define i32 @ldr_int(i32* %a) nounwind {
   %p1 = getelementptr inbounds i32, i32* %a, i32 1
   %tmp1 = load i32, i32* %p1, align 2
@@ -26,13 +26,13 @@ define i32 @ldr_int(i32* %a) nounwind {
 ; CHECK: ********** MI Scheduling **********
 ; CHECK-LABEL: ldp_sext_int:BB#0
 ; CHECK: Cluster ld/st SU(1) - SU(2)
-; CHECK: SU(1):   %vreg{{[0-9]+}}<def> = LDRSWui
-; CHECK: SU(2):   %vreg{{[0-9]+}}<def> = LDRSWui
+; CHECK: SU(1):   %{{[0-9]+}}<def> = LDRSWui
+; CHECK: SU(2):   %{{[0-9]+}}<def> = LDRSWui
 ; EXYNOS: ********** MI Scheduling **********
 ; EXYNOS-LABEL: ldp_sext_int:BB#0
 ; EXYNOS: Cluster ld/st SU(1) - SU(2)
-; EXYNOS: SU(1):   %vreg{{[0-9]+}}<def> = LDRSWui
-; EXYNOS: SU(2):   %vreg{{[0-9]+}}<def> = LDRSWui
+; EXYNOS: SU(1):   %{{[0-9]+}}<def> = LDRSWui
+; EXYNOS: SU(2):   %{{[0-9]+}}<def> = LDRSWui
 define i64 @ldp_sext_int(i32* %p) nounwind {
   %tmp = load i32, i32* %p, align 4
   %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
@@ -47,13 +47,13 @@ define i64 @ldp_sext_int(i32* %p) nounwi
 ; CHECK: ********** MI Scheduling **********
 ; CHECK-LABEL: ldur_int:BB#0
 ; CHECK: Cluster ld/st SU(2) - SU(1)
-; CHECK: SU(1):   %vreg{{[0-9]+}}<def> = LDURWi
-; CHECK: SU(2):   %vreg{{[0-9]+}}<def> = LDURWi
+; CHECK: SU(1):   %{{[0-9]+}}<def> = LDURWi
+; CHECK: SU(2):   %{{[0-9]+}}<def> = LDURWi
 ; EXYNOS: ********** MI Scheduling **********
 ; EXYNOS-LABEL: ldur_int:BB#0
 ; EXYNOS: Cluster ld/st SU(2) - SU(1)
-; EXYNOS: SU(1):   %vreg{{[0-9]+}}<def> = LDURWi
-; EXYNOS: SU(2):   %vreg{{[0-9]+}}<def> = LDURWi
+; EXYNOS: SU(1):   %{{[0-9]+}}<def> = LDURWi
+; EXYNOS: SU(2):   %{{[0-9]+}}<def> = LDURWi
 define i32 @ldur_int(i32* %a) nounwind {
   %p1 = getelementptr inbounds i32, i32* %a, i32 -1
   %tmp1 = load i32, i32* %p1, align 2
@@ -67,13 +67,13 @@ define i32 @ldur_int(i32* %a) nounwind {
 ; CHECK: ********** MI Scheduling **********
 ; CHECK-LABEL: ldp_half_sext_zext_int:BB#0
 ; CHECK: Cluster ld/st SU(3) - SU(4)
-; CHECK: SU(3):   %vreg{{[0-9]+}}<def> = LDRSWui
-; CHECK: SU(4):   %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
+; CHECK: SU(3):   %{{[0-9]+}}<def> = LDRSWui
+; CHECK: SU(4):   %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
 ; EXYNOS: ********** MI Scheduling **********
 ; EXYNOS-LABEL: ldp_half_sext_zext_int:BB#0
 ; EXYNOS: Cluster ld/st SU(3) - SU(4)
-; EXYNOS: SU(3):   %vreg{{[0-9]+}}<def> = LDRSWui
-; EXYNOS: SU(4):   %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
+; EXYNOS: SU(3):   %{{[0-9]+}}<def> = LDRSWui
+; EXYNOS: SU(4):   %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
 define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind {
   %tmp0 = load i64, i64* %q, align 4
   %tmp = load i32, i32* %p, align 4
@@ -90,13 +90,13 @@ define i64 @ldp_half_sext_zext_int(i64*
 ; CHECK: ********** MI Scheduling **********
 ; CHECK-LABEL: ldp_half_zext_sext_int:BB#0
 ; CHECK: Cluster ld/st SU(3) - SU(4)
-; CHECK: SU(3):   %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
-; CHECK: SU(4):   %vreg{{[0-9]+}}<def> = LDRSWui
+; CHECK: SU(3):   %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
+; CHECK: SU(4):   %{{[0-9]+}}<def> = LDRSWui
 ; EXYNOS: ********** MI Scheduling **********
 ; EXYNOS-LABEL: ldp_half_zext_sext_int:BB#0
 ; EXYNOS: Cluster ld/st SU(3) - SU(4)
-; EXYNOS: SU(3):   %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
-; EXYNOS: SU(4):   %vreg{{[0-9]+}}<def> = LDRSWui
+; EXYNOS: SU(3):   %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
+; EXYNOS: SU(4):   %{{[0-9]+}}<def> = LDRSWui
 define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind {
   %tmp0 = load i64, i64* %q, align 4
   %tmp = load i32, i32* %p, align 4
@@ -113,13 +113,13 @@ define i64 @ldp_half_zext_sext_int(i64*
 ; CHECK: ********** MI Scheduling **********
 ; CHECK-LABEL: ldr_int_volatile:BB#0
 ; CHECK-NOT: Cluster ld/st
-; CHECK: SU(1):   %vreg{{[0-9]+}}<def> = LDRWui
-; CHECK: SU(2):   %vreg{{[0-9]+}}<def> = LDRWui
+; CHECK: SU(1):   %{{[0-9]+}}<def> = LDRWui
+; CHECK: SU(2):   %{{[0-9]+}}<def> = LDRWui
 ; EXYNOS: ********** MI Scheduling **********
 ; EXYNOS-LABEL: ldr_int_volatile:BB#0
 ; EXYNOS-NOT: Cluster ld/st
-; EXYNOS: SU(1):   %vreg{{[0-9]+}}<def> = LDRWui
-; EXYNOS: SU(2):   %vreg{{[0-9]+}}<def> = LDRWui
+; EXYNOS: SU(1):   %{{[0-9]+}}<def> = LDRWui
+; EXYNOS: SU(2):   %{{[0-9]+}}<def> = LDRWui
 define i32 @ldr_int_volatile(i32* %a) nounwind {
   %p1 = getelementptr inbounds i32, i32* %a, i32 1
   %tmp1 = load volatile i32, i32* %p1, align 2
@@ -133,8 +133,8 @@ define i32 @ldr_int_volatile(i32* %a) no
 ; CHECK: ********** MI Scheduling **********
 ; CHECK-LABEL: ldq_cluster:BB#0
 ; CHECK: Cluster ld/st SU(1) - SU(3)
-; CHECK: SU(1):   %vreg{{[0-9]+}}<def> = LDRQui
-; CHECK: SU(3):   %vreg{{[0-9]+}}<def> = LDRQui
+; CHECK: SU(1):   %{{[0-9]+}}<def> = LDRQui
+; CHECK: SU(3):   %{{[0-9]+}}<def> = LDRQui
 ; EXYNOS: ********** MI Scheduling **********
 ; EXYNOS-LABEL: ldq_cluster:BB#0
 ; EXYNOS-NOT: Cluster ld/st

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll Thu Nov 30 04:12:19 2017
@@ -6,10 +6,10 @@
 ;
 ; CHECK: ********** MI Scheduling **********
 ; CHECK: shiftable
-; CHECK: SU(2):   %vreg2<def> = SUBXri %vreg1, 20, 0
+; CHECK: SU(2):   %2<def> = SUBXri %1, 20, 0
 ; CHECK:   Successors:
-; CHECK-NEXT:    SU(4): Data Latency=1 Reg=%vreg2
-; CHECK-NEXT:    SU(3): Data Latency=2 Reg=%vreg2
+; CHECK-NEXT:    SU(4): Data Latency=1 Reg=%2
+; CHECK-NEXT:    SU(3): Data Latency=2 Reg=%2
 ; CHECK: ********** INTERVALS **********
 define i64 @shiftable(i64 %A, i64 %B) {
         %tmp0 = sub i64 %B, 20

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll Thu Nov 30 04:12:19 2017
@@ -5,15 +5,15 @@
 ;
 ; CHECK: ********** MI Scheduling **********
 ; CHECK: misched_bug:BB#0 entry
-; CHECK: SU(2):   %vreg2<def> = LDRWui %vreg0, 1; mem:LD4[%ptr1_plus1] GPR32:%vreg2 GPR64common:%vreg0
+; CHECK: SU(2):   %2<def> = LDRWui %0, 1; mem:LD4[%ptr1_plus1] GPR32:%2 GPR64common:%0
 ; CHECK:   Successors:
-; CHECK-NEXT:    SU(5): Data Latency=4 Reg=%vreg2
+; CHECK-NEXT:    SU(5): Data Latency=4 Reg=%2
 ; CHECK-NEXT:    SU(4): Ord  Latency=0
-; CHECK: SU(3):   STRWui %wzr, %vreg0, 0; mem:ST4[%ptr1] GPR64common:%vreg0
+; CHECK: SU(3):   STRWui %wzr, %0, 0; mem:ST4[%ptr1] GPR64common:%0
 ; CHECK:   Successors:
 ; CHECK: SU(4): Ord  Latency=0
-; CHECK: SU(4):   STRWui %wzr, %vreg1, 0; mem:ST4[%ptr2] GPR64common:%vreg1
-; CHECK: SU(5):   %w0<def> = COPY %vreg2; GPR32:%vreg2
+; CHECK: SU(4):   STRWui %wzr, %1, 0; mem:ST4[%ptr2] GPR64common:%1
+; CHECK: SU(5):   %w0<def> = COPY %2; GPR32:%2
 ; CHECK: ** ScheduleDAGMI::schedule picking next node
 define i32 @misched_bug(i32* %ptr1, i32* %ptr2) {
 entry:

Modified: llvm/trunk/test/CodeGen/AArch64/tailcall_misched_graph.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/tailcall_misched_graph.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/tailcall_misched_graph.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/tailcall_misched_graph.ll Thu Nov 30 04:12:19 2017
@@ -26,9 +26,9 @@ declare void @callee2(i8*, i8*, i8*, i8*
 ; CHECK:  fi#-2: {{.*}} fixed, at location [SP+8]
 ; CHECK:  fi#-1: {{.*}} fixed, at location [SP]
 
-; CHECK:  [[VRA:%vreg.*]]<def> = LDRXui <fi#-1>
-; CHECK:  [[VRB:%vreg.*]]<def> = LDRXui <fi#-2>
-; CHECK:  STRXui %vreg{{.*}}, <fi#-4>
+; CHECK:  [[VRA:%.*]]<def> = LDRXui <fi#-1>
+; CHECK:  [[VRB:%.*]]<def> = LDRXui <fi#-2>
+; CHECK:  STRXui %{{.*}}, <fi#-4>
 ; CHECK:  STRXui [[VRB]], <fi#-3>
 
 ; Make sure that there is an dependence edge between fi#-2 and fi#-4.
@@ -40,5 +40,5 @@ declare void @callee2(i8*, i8*, i8*, i8*
 ; CHECK:   SU([[DEPSTOREB:.*]]): Ord  Latency=0
 ; CHECK:   SU([[DEPSTOREA:.*]]): Ord  Latency=0
 
-; CHECK: SU([[DEPSTOREA]]):   STRXui %vreg{{.*}}, <fi#-4>
-; CHECK: SU([[DEPSTOREB]]):   STRXui %vreg{{.*}}, <fi#-3>
+; CHECK: SU([[DEPSTOREA]]):   STRXui %{{.*}}, <fi#-4>
+; CHECK: SU([[DEPSTOREB]]):   STRXui %{{.*}}, <fi#-3>

Modified: llvm/trunk/test/CodeGen/AMDGPU/lds-output-queue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/lds-output-queue.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/lds-output-queue.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/lds-output-queue.ll Thu Nov 30 04:12:19 2017
@@ -46,20 +46,20 @@ declare void @llvm.r600.group.barrier()
 ;
 ; The instruction selection phase will generate ISA that looks like this:
 ; %oqap = LDS_READ_RET
-; %vreg0 = MOV %oqap
-; %vreg1 = VTX_READ_32
-; %vreg2 = ADD_INT %vreg1, %vreg0
+; %0 = MOV %oqap
+; %1 = VTX_READ_32
+; %2 = ADD_INT %1, %0
 ;
 ; The bottom scheduler will schedule the two ALU instructions first:
 ;
 ; UNSCHEDULED:
 ; %oqap = LDS_READ_RET
-; %vreg1 = VTX_READ_32
+; %1 = VTX_READ_32
 ;
 ; SCHEDULED:
 ;
-; vreg0 = MOV %oqap
-; vreg2 = ADD_INT %vreg1, %vreg2
+; %0 = MOV %oqap
+; %2 = ADD_INT %1, %2
 ;
 ; The lack of proper aliasing results in the local memory read (LDS_READ_RET)
 ; to consider the global memory read (VTX_READ_32) has a chain dependency, so
@@ -69,10 +69,10 @@ declare void @llvm.r600.group.barrier()
 ; Alu clause:
 ; %oqap = LDS_READ_RET
 ; VTX clause:
-; %vreg1 = VTX_READ_32
+; %1 = VTX_READ_32
 ; Alu clause:
-; vreg0 = MOV %oqap
-; vreg2 = ADD_INT %vreg1, %vreg2
+; %0 = MOV %oqap
+; %2 = ADD_INT %1, %2
 ;
 ; This is an illegal program because the oqap def and use know occur in
 ; different ALU clauses.

Modified: llvm/trunk/test/CodeGen/AMDGPU/liveness.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/liveness.mir?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/liveness.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/liveness.mir Thu Nov 30 04:12:19 2017
@@ -6,7 +6,7 @@
 # liveranges needed it.
 #
 # Should see three distinct value numbers:
-# CHECK: %vreg0 [{{.*}}:0)[{{.*}}:1)[{{.*}}:2) 0@{{[0-9]+[Berd]}} 1@{{[0-9]+[Berd]}} 2@{{[0-9]+B-phi}}
+# CHECK: %0 [{{.*}}:0)[{{.*}}:1)[{{.*}}:2) 0@{{[0-9]+[Berd]}} 1@{{[0-9]+[Berd]}} 2@{{[0-9]+B-phi}}
 --- |
   define amdgpu_kernel void @test0() { ret void }
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/spill-empty-live-interval.mir?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/spill-empty-live-interval.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/spill-empty-live-interval.mir Thu Nov 30 04:12:19 2017
@@ -2,7 +2,7 @@
 # https://bugs.llvm.org/show_bug.cgi?id=33620
 
 ---
-# This would assert due to the empty live interval created for %vreg9
+# This would assert due to the empty live interval created for %9
 # on the last S_NOP with an undef subreg use.
 
 # CHECK-LABEL: name: expecting_non_empty_interval

Modified: llvm/trunk/test/CodeGen/AMDGPU/subreg-intervals.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/subreg-intervals.mir?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/subreg-intervals.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/subreg-intervals.mir Thu Nov 30 04:12:19 2017
@@ -2,11 +2,11 @@
 # REQUIRES: asserts
 
 # CHECK: INTERVALS
-# CHECK: vreg0
+# CHECK: %0
 # CHECK-LABEL: Machine code for function test0:
 
 # CHECK: INTERVALS
-# CHECK: vreg0
+# CHECK: %0
 # CHECK-LABEL: Machine code for function test1:
 
 --- |

Modified: llvm/trunk/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll Thu Nov 30 04:12:19 2017
@@ -5,11 +5,11 @@ target triple = "thumbv7-apple-ios"
 ; This test calls shrinkToUses with an early-clobber redefined live range during
 ; spilling.
 ;
-;   Shrink: %vreg47,1.158257e-02 = [384r,400e:0)[400e,420r:1)  0 at 384r 1 at 400e
+;   Shrink: %47,1.158257e-02 = [384r,400e:0)[400e,420r:1)  0 at 384r 1 at 400e
 ;
 ; The early-clobber instruction is an str:
 ;
-;   %vreg12<earlyclobber,def> = t2STR_PRE %vreg6, %vreg12, 32, pred:14, pred:%noreg
+;   %12<earlyclobber,def> = t2STR_PRE %6, %12, 32, pred:14, pred:%noreg
 ;
 ; This tests that shrinkToUses handles the EC redef correctly.
 

Modified: llvm/trunk/test/CodeGen/ARM/Windows/dbzchk.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/Windows/dbzchk.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/Windows/dbzchk.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/Windows/dbzchk.ll Thu Nov 30 04:12:19 2017
@@ -119,7 +119,7 @@ attributes #0 = { optsize }
 ; CHECK-CFG-DAG: t2B <BB#3>
 
 ; CHECK-CFG-DAG: BB#2
-; CHECK-CFG-DAG: tCMPi8 %vreg{{[0-9]}}, 0
+; CHECK-CFG-DAG: tCMPi8 %{{[0-9]}}, 0
 ; CHECK-CFG-DAG: t2Bcc <BB#5>
 
 ; CHECK-CFG-DAG: BB#4

Modified: llvm/trunk/test/CodeGen/ARM/crash-greedy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/crash-greedy.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/crash-greedy.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/crash-greedy.ll Thu Nov 30 04:12:19 2017
@@ -61,7 +61,7 @@ for.end:
 
 ; CHECK: insert_elem
 ; This test has a sub-register copy with a kill flag:
-;   %vreg6:ssub_3<def> = COPY %vreg6:ssub_2<kill>; QPR_VFP2:%vreg6
+;   %6:ssub_3<def> = COPY %6:ssub_2<kill>; QPR_VFP2:%6
 ; The rewriter must do something sensible with that, or the scavenger crashes.
 define void @insert_elem() nounwind {
 entry:

Modified: llvm/trunk/test/CodeGen/ARM/misched-copy-arm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/misched-copy-arm.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/misched-copy-arm.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/misched-copy-arm.ll Thu Nov 30 04:12:19 2017
@@ -33,9 +33,9 @@ for.end:
 ; This case was a crasher in constrainLocalCopy.
 ; The problem was the t2LDR_PRE defining both the global and local lrg.
 ; CHECK-LABEL: *** Final schedule for BB#5 ***
-; CHECK: %[[R4:vreg[0-9]+]]<def>, %[[R1:vreg[0-9]+]]<def,tied2> = t2LDR_PRE %[[R1]]<tied1>
-; CHECK: %vreg{{[0-9]+}}<def> = COPY %[[R1]]
-; CHECK: %vreg{{[0-9]+}}<def> = COPY %[[R4]]
+; CHECK: %[[R4:[0-9]+]]<def>, %[[R1:[0-9]+]]<def,tied2> = t2LDR_PRE %[[R1]]<tied1>
+; CHECK: %{{[0-9]+}}<def> = COPY %[[R1]]
+; CHECK: %{{[0-9]+}}<def> = COPY %[[R4]]
 ; CHECK-LABEL: MACHINEINSTRS
 %struct.rtx_def = type { [4 x i8], [1 x %union.rtunion_def] }
 %union.rtunion_def = type { i64 }

Modified: llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir (original)
+++ llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir Thu Nov 30 04:12:19 2017
@@ -37,62 +37,62 @@
   }
 #
 # CHECK:       ********** MI Scheduling **********
-# CHECK:       SU(2):   %vreg2<def> = t2MOVi32imm <ga:@g1>; rGPR:%vreg2
+# CHECK:       SU(2):   %2<def> = t2MOVi32imm <ga:@g1>; rGPR:%2
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 2
 # CHECK_R52:   Latency    : 2
 #
-# CHECK:       SU(3):   %vreg3<def> = t2LDRi12 %vreg2, 0, pred:14, pred:%noreg; mem:LD4[@g1](dereferenceable) rGPR:%vreg3,%vreg2
+# CHECK:       SU(3):   %3<def> = t2LDRi12 %2, 0, pred:14, pred:%noreg; mem:LD4[@g1](dereferenceable) rGPR:%3,%2
 # CHECK_A9:    Latency    : 1
 # CHECK_SWIFT: Latency    : 3
 # CHECK_R52:   Latency    : 4
 #
-# CHECK :      SU(6):   %vreg6<def> = t2ADDrr %vreg3, %vreg3, pred:14, pred:%noreg, opt:%noreg; rGPR:%vreg6,%vreg3,%vreg3
+# CHECK :      SU(6):   %6<def> = t2ADDrr %3, %3, pred:14, pred:%noreg, opt:%noreg; rGPR:%6,%3,%3
 # CHECK_A9:    Latency    : 1
 # CHECK_SWIFT: Latency    : 1
 # CHECK_R52:   Latency    : 3
 
-# CHECK:       SU(7):   %vreg7<def> = t2SDIV %vreg6, %vreg5, pred:14, pred:%noreg; rGPR:%vreg7,%vreg6,%vreg5
+# CHECK:       SU(7):   %7<def> = t2SDIV %6, %5, pred:14, pred:%noreg; rGPR:%7,%6,%5
 # CHECK_A9:    Latency    : 0
 # CHECK_SWIFT: Latency    : 14
 # CHECK_R52:   Latency    : 8
 
-# CHECK:       SU(8):   t2STRi12 %vreg7, %vreg2, 0, pred:14, pred:%noreg; mem:ST4[@g1] rGPR:%vreg7,%vreg2
+# CHECK:       SU(8):   t2STRi12 %7, %2, 0, pred:14, pred:%noreg; mem:ST4[@g1] rGPR:%7,%2
 # CHECK_A9:    Latency    : 1
 # CHECK_SWIFT: Latency    : 0
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(9):   %vreg8<def> = t2SMULBB %vreg1, %vreg1, pred:14, pred:%noreg; rGPR:%vreg8,%vreg1,%vreg1
+# CHECK:       SU(9):   %8<def> = t2SMULBB %1, %1, pred:14, pred:%noreg; rGPR:%8,%1,%1
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(10):   %vreg9<def> = t2SMLABB %vreg0, %vreg0, %vreg8, pred:14, pred:%noreg; rGPR:%vreg9,%vreg0,%vreg0,%vreg8
+# CHECK:       SU(10):   %9<def> = t2SMLABB %0, %0, %8, pred:14, pred:%noreg; rGPR:%9,%0,%0,%8
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(11):   %vreg10<def> = t2UXTH %vreg9, 0, pred:14, pred:%noreg; rGPR:%vreg10,%vreg9
+# CHECK:       SU(11):   %10<def> = t2UXTH %9, 0, pred:14, pred:%noreg; rGPR:%10,%9
 # CHECK_A9:    Latency    : 1
 # CHECK_SWIFT: Latency    : 1
 # CHECK_R52:   Latency    : 3
 #
-# CHECK:       SU(12):   %vreg11<def> = t2MUL %vreg10, %vreg7, pred:14, pred:%noreg; rGPR:%vreg11,%vreg10,%vreg7
+# CHECK:       SU(12):   %11<def> = t2MUL %10, %7, pred:14, pred:%noreg; rGPR:%11,%10,%7
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(13):   %vreg12<def> = t2MLA %vreg11, %vreg11, %vreg11, pred:14, pred:%noreg; rGPR:%vreg12,%vreg11,%vreg11,%vreg11
+# CHECK:       SU(13):   %12<def> = t2MLA %11, %11, %11, pred:14, pred:%noreg; rGPR:%12,%11,%11,%11
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(14):   %vreg13<def>, %vreg14<def> = t2UMULL %vreg12, %vreg12, pred:14, pred:%noreg; rGPR:%vreg13,%vreg14,%vreg12,%vreg12
+# CHECK:       SU(14):   %13<def>, %14<def> = t2UMULL %12, %12, pred:14, pred:%noreg; rGPR:%13,%14,%12,%12
 # CHECK_A9:    Latency    : 3
 # CHECK_SWIFT: Latency    : 5
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(18):   %vreg19<def,tied4>, %vreg20<def,tied5> = t2UMLAL %vreg12, %vreg12, %vreg19<tied0>, %vreg20<tied1>, pred:14, pred:%noreg; rGPR:%vreg19,%vreg20,%vreg12,%vreg12,%vreg20
+# CHECK:       SU(18):   %19<def,tied4>, %20<def,tied5> = t2UMLAL %12, %12, %19<tied0>, %20<tied1>, pred:14, pred:%noreg; rGPR:%19,%20,%12,%12,%20
 # CHECK_A9:    Latency    : 3
 # CHECK_SWIFT: Latency    : 7
 # CHECK_R52:   Latency    : 4

Modified: llvm/trunk/test/CodeGen/ARM/misched-int-basic.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/misched-int-basic.mir?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/misched-int-basic.mir (original)
+++ llvm/trunk/test/CodeGen/ARM/misched-int-basic.mir Thu Nov 30 04:12:19 2017
@@ -28,37 +28,37 @@
   }
 
 # CHECK:       ********** MI Scheduling **********
-# CHECK:       SU(2):   %vreg2<def> = SMULBB %vreg1, %vreg1, pred:14, pred:%noreg; GPR:%vreg2,%vreg1,%vreg1
+# CHECK:       SU(2):   %2<def> = SMULBB %1, %1, pred:14, pred:%noreg; GPR:%2,%1,%1
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(3):   %vreg3<def> = SMLABB %vreg0, %vreg0, %vreg2, pred:14, pred:%noreg; GPRnopc:%vreg3,%vreg0,%vreg0 GPR:%vreg2
+# CHECK:       SU(3):   %3<def> = SMLABB %0, %0, %2, pred:14, pred:%noreg; GPRnopc:%3,%0,%0 GPR:%2
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(4):   %vreg4<def> = UXTH %vreg3, 0, pred:14, pred:%noreg; GPRnopc:%vreg4,%vreg3
+# CHECK:       SU(4):   %4<def> = UXTH %3, 0, pred:14, pred:%noreg; GPRnopc:%4,%3
 # CHECK_A9:    Latency    : 1
 # CHECK_SWIFT: Latency    : 1
 # CHECK_R52:   Latency    : 3
 #
-# CHECK:       SU(5):   %vreg5<def> = MUL %vreg4, %vreg4, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%vreg5,%vreg4,%vreg4
+# CHECK:       SU(5):   %5<def> = MUL %4, %4, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%5,%4,%4
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(6):   %vreg6<def> = MLA %vreg5, %vreg5, %vreg5, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%vreg6,%vreg5,%vreg5,%vreg5
+# CHECK:       SU(6):   %6<def> = MLA %5, %5, %5, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%6,%5,%5,%5
 # CHECK_A9:    Latency    : 2
 # CHECK_SWIFT: Latency    : 4
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(7):   %vreg7<def>, %vreg8<def> = UMULL %vreg6, %vreg6, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%vreg7,%vreg8,%vreg6,%vreg6
+# CHECK:       SU(7):   %7<def>, %8<def> = UMULL %6, %6, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%7,%8,%6,%6
 # CHECK_A9:    Latency    : 3
 # CHECK_SWIFT: Latency    : 5
 # CHECK_R52:   Latency    : 4
 #
-# CHECK:       SU(11):   %vreg13<def,tied4>, %vreg14<def,tied5> = UMLAL %vreg6, %vreg6, %vreg13<tied0>, %vreg14<tied1>, pred:14, pred:%noreg, opt:%noreg; GPR:%vreg13 GPRnopc:%vreg14,%vreg6,%vreg6
+# CHECK:       SU(11):   %13<def,tied4>, %14<def,tied5> = UMLAL %6, %6, %13<tied0>, %14<tied1>, pred:14, pred:%noreg, opt:%noreg; GPR:%13 GPRnopc:%14,%6,%6
 # CHECK_SWIFT: Latency    : 7
 # CHECK_A9:    Latency    : 3
 # CHECK_R52:   Latency    : 4

Modified: llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir (original)
+++ llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir Thu Nov 30 04:12:19 2017
@@ -20,22 +20,22 @@
 
 # CHECK: ********** MI Scheduling **********
 # CHECK: ScheduleDAGMILive::schedule starting
-# CHECK: SU(1):   %vreg1<def> = VLD4d8Pseudo %vreg0, 8, pred:14, pred:%noreg; mem:LD32[%A](align=8) QQPR:%vreg1 GPR:%vreg0
+# CHECK: SU(1):   %1<def> = VLD4d8Pseudo %0, 8, pred:14, pred:%noreg; mem:LD32[%A](align=8) QQPR:%1 GPR:%0
 # CHECK: Latency            : 8
 # CHECK: Single Issue       : true;
-# CHECK: SU(2):   %vreg4<def> = VADDv8i8 %vreg1:dsub_0, %vreg1:dsub_1, pred:14, pred:%noreg; DPR:%vreg4 QQPR:%vreg1
+# CHECK: SU(2):   %4<def> = VADDv8i8 %1:dsub_0, %1:dsub_1, pred:14, pred:%noreg; DPR:%4 QQPR:%1
 # CHECK: Latency            : 5
 # CHECK: Single Issue       : false;
-# CHECK: SU(3):   %vreg5<def>, %vreg6<def> = VMOVRRD %vreg4, pred:14, pred:%noreg; GPR:%vreg5,%vreg6 DPR:%vreg4
+# CHECK: SU(3):   %5<def>, %6<def> = VMOVRRD %4, pred:14, pred:%noreg; GPR:%5,%6 DPR:%4
 # CHECK: Latency            : 4
 # CHECK: Single Issue       : false;
 
-# TOPDOWN: Scheduling SU(1) %vreg1<def> = VLD4d8Pseudo
+# TOPDOWN: Scheduling SU(1) %1<def> = VLD4d8Pseudo
 # TOPDOWN: Bump cycle to end group
-# TOPDOWN: Scheduling SU(2) %vreg4<def> = VADDv8i8
+# TOPDOWN: Scheduling SU(2) %4<def> = VADDv8i8
 
-# BOTTOMUP: Scheduling SU(2) %vreg4<def> = VADDv8i8
-# BOTTOMUP: Scheduling SU(1) %vreg1<def> = VLD4d8Pseudo
+# BOTTOMUP: Scheduling SU(2) %4<def> = VADDv8i8
+# BOTTOMUP: Scheduling SU(1) %1<def> = VLD4d8Pseudo
 # BOTTOMUP: Bump cycle to begin group
 
 ...

Modified: llvm/trunk/test/CodeGen/ARM/subreg-remat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/subreg-remat.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/subreg-remat.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/subreg-remat.ll Thu Nov 30 04:12:19 2017
@@ -4,10 +4,10 @@ target triple = "thumbv7-apple-ios"
 ;
 ; The vector %v2 is built like this:
 ;
-;   %vreg6:ssub_1<def> = ...
-;   %vreg6:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg; mem:LD4[ConstantPool] DPR_VFP2:%vreg6
+;   %6:ssub_1<def> = ...
+;   %6:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg; mem:LD4[ConstantPool] DPR_VFP2:%6
 ;
-; When %vreg6 spills, the VLDRS constant pool load cannot be rematerialized
+; When %6 spills, the VLDRS constant pool load cannot be rematerialized
 ; since it implicitly reads the ssub_1 sub-register.
 ;
 ; CHECK: f1
@@ -31,7 +31,7 @@ define void @f1(float %x, <2 x float>* %
 ; because the bits are undef, we should rematerialize.  The vector is now built
 ; like this:
 ;
-;   %vreg2:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg, %vreg2<imp-def>; mem:LD4[ConstantPool]
+;   %2:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg, %2<imp-def>; mem:LD4[ConstantPool]
 ;
 ; The extra <imp-def> operand indicates that the instruction fully defines the
 ; virtual register.  It doesn't read the old value.

Modified: llvm/trunk/test/CodeGen/AVR/select-must-add-unconditional-jump.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AVR/select-must-add-unconditional-jump.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AVR/select-must-add-unconditional-jump.ll (original)
+++ llvm/trunk/test/CodeGen/AVR/select-must-add-unconditional-jump.ll Thu Nov 30 04:12:19 2017
@@ -11,10 +11,10 @@
 ;
 ; BB#2: derived from LLVM BB %finish
 ;     Predecessors according to CFG: BB#0 BB#1
-;         %vreg0<def> = PHI %vreg3, <BB#0>, %vreg5, <BB#1>
-;         %vreg7<def> = LDIRdK 2
-;         %vreg8<def> = LDIRdK 1
-;         CPRdRr %vreg2, %vreg0, %SREG<imp-def>
+;         %0<def> = PHI %3, <BB#0>, %5, <BB#1>
+;         %7<def> = LDIRdK 2
+;         %8<def> = LDIRdK 1
+;         CPRdRr %2, %0, %SREG<imp-def>
 ;         BREQk <BB#6>, %SREG<imp-use>
 ;     Successors according to CFG: BB#5(?%) BB#6(?%)
 ;

Modified: llvm/trunk/test/CodeGen/Hexagon/circ_ldd_bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/circ_ldd_bug.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/circ_ldd_bug.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/circ_ldd_bug.ll Thu Nov 30 04:12:19 2017
@@ -7,10 +7,10 @@ target triple = "hexagon"
 ; UNREACHABLE executed at llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp:615!
 ; This happened because after unrolling a loop with a ldd_circ instruction we
 ; would have several TFCR and ldd_circ instruction sequences.
-; %vreg0 (CRRegs) = TFCR %vreg0 (IntRegs)
-;                 = ldd_circ( , , vreg0)
-; %vreg1 (CRRegs) = TFCR %vreg1 (IntRegs)
-;                 = ldd_circ( , , vreg0)
+; %0 (CRRegs) = TFCR %0 (IntRegs)
+;                 = ldd_circ( , , %0)
+; %1 (CRRegs) = TFCR %1 (IntRegs)
+;                 = ldd_circ( , , %0)
 ; The scheduler would move the CRRegs to the top of the loop. The allocator
 ; would try to spill the CRRegs after running out of them. We don't have code to
 ; spill CRRegs and the above assertion would be triggered.

Modified: llvm/trunk/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir (original)
+++ llvm/trunk/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir Thu Nov 30 04:12:19 2017
@@ -3,12 +3,12 @@
 
 # Check that coalesced registers are removed from live intervals.
 #
-# Check that vreg3 is coalesced into vreg4, and that after coalescing
+# Check that %3 is coalesced into %4, and that after coalescing
 # it is no longer in live intervals.
 
 # CHECK-LABEL: After expand-condsets
 # CHECK: INTERVALS
-# CHECK-NOT: vreg3
+# CHECK-NOT: %3
 # CHECK: MACHINEINSTRS
 
 

Modified: llvm/trunk/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/post-inc-aa-metadata.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/post-inc-aa-metadata.ll (original)
+++ llvm/trunk/test/CodeGen/Hexagon/post-inc-aa-metadata.ll Thu Nov 30 04:12:19 2017
@@ -3,7 +3,7 @@
 
 ; Check that the generated post-increment load has TBAA information.
 ; CHECK-LABEL: Machine code for function fred:
-; CHECK: = V6_vL32b_pi %vreg{{[0-9]+}}<tied1>, 64; mem:LD64[{{.*}}](tbaa=
+; CHECK: = V6_vL32b_pi %{{[0-9]+}}<tied1>, 64; mem:LD64[{{.*}}](tbaa=
 
 target triple = "hexagon"
 

Modified: llvm/trunk/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll (original)
+++ llvm/trunk/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll Thu Nov 30 04:12:19 2017
@@ -36,7 +36,7 @@ entry:
 ; CHECK-LABEL: SU({{.*}}):   SW_RI{{.*}}, 4,
 ; CHECK:  # preds left       : 2
 ; CHECK:  # succs left       : 0
-; CHECK-LABEL: SU({{.*}}):   %vreg{{.*}}<def> = LDW_RI{{.*}}, 12,
+; CHECK-LABEL: SU({{.*}}):   %{{.*}}<def> = LDW_RI{{.*}}, 12,
 ; CHECK:  # preds left       : 1
 ; CHECK:  # succs left       : 4
 ; CHECK-LABEL: SU({{.*}}):   STH_RI{{.*}}, 10,

Modified: llvm/trunk/test/CodeGen/MIR/AArch64/spill-fold.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MIR/AArch64/spill-fold.mir?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/MIR/AArch64/spill-fold.mir (original)
+++ llvm/trunk/test/CodeGen/MIR/AArch64/spill-fold.mir Thu Nov 30 04:12:19 2017
@@ -22,7 +22,7 @@ body:             |
 ...
 ---
 # CHECK-LABEL: name: test_subreg_spill_fold2
-# Similar to test_subreg_spill_fold, but with a vreg0 register class not containing %WZR.
+# Similar to test_subreg_spill_fold, but with a %0 register class not containing %WZR.
 name:            test_subreg_spill_fold2
 registers:
   - { id: 0, class: gpr64sp }

Modified: llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll Thu Nov 30 04:12:19 2017
@@ -14,6 +14,6 @@ entry:
 
 ; CHECK: ********** Function: foo
 ; CHECK: ********** FAST REGISTER ALLOCATION **********
-; CHECK: %x3<def> = COPY %vreg
-; CHECK-NEXT: %x4<def> = COPY %vreg
+; CHECK: %x3<def> = COPY %{{[0-9]+}}
+; CHECK-NEXT: %x4<def> = COPY %{{[0-9]+}}
 ; CHECK-NEXT: BLR

Modified: llvm/trunk/test/CodeGen/WebAssembly/dbgvalue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/WebAssembly/dbgvalue.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/WebAssembly/dbgvalue.ll (original)
+++ llvm/trunk/test/CodeGen/WebAssembly/dbgvalue.ll Thu Nov 30 04:12:19 2017
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=wasm32-unknown-unknown-wasm | FileCheck %s
 
 ; CHECK: BB#0
-; CHECK: #DEBUG_VALUE: usage:self <- %vreg4
+; CHECK: #DEBUG_VALUE: usage:self <- %4
 ; CHECK: BB#1
 ; CHECK: DW_TAG_variable
 source_filename = "test/CodeGen/WebAssembly/dbgvalue.ll"

Modified: llvm/trunk/test/CodeGen/X86/2011-09-14-valcoalesce.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2011-09-14-valcoalesce.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2011-09-14-valcoalesce.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2011-09-14-valcoalesce.ll Thu Nov 30 04:12:19 2017
@@ -2,17 +2,17 @@
 ;
 ; Test RegistersDefinedFromSameValue. We have multiple copies of the same vreg:
 ; while.body85.i:
-;   vreg1 = copy vreg2
-;   vreg2 = add
+;   %1 = copy %2
+;   %2 = add
 ; critical edge from land.lhs.true.i -> if.end117.i:
-;   vreg27 = vreg2
+;   %27 = %2
 ; critical edge from land.lhs.true103.i -> if.end117.i:
-;   vreg27 = vreg2
+;   %27 = %2
 ; if.then108.i:
-;   vreg27 = vreg1
+;   %27 = %1
 ;
 ; Prior to fixing PR10920 401.bzip miscompile, the coalescer would
-; consider vreg1 and vreg27 to be copies of the same value. It would
+; consider %1 and %27 to be copies of the same value. It would
 ; then remove one of the critical edge copes, which cannot safely be removed.
 
 ; There are two obvious ways the register-allocator could go here, either

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll Thu Nov 30 04:12:19 2017
@@ -8,7 +8,7 @@
 ; the fallback path.
 
 ; Check that we fallback on invoke translation failures.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg1, %vreg0; mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_x86_fp80_dump
 ; FALLBACK-WITH-REPORT-OUT-LABEL: test_x86_fp80_dump:
 define void @test_x86_fp80_dump(x86_fp80* %ptr){

Modified: llvm/trunk/test/CodeGen/X86/cmovcmov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cmovcmov.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cmovcmov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cmovcmov.ll Thu Nov 30 04:12:19 2017
@@ -227,8 +227,8 @@ attributes #0 = { nounwind }
 
 ; The following test failed because llvm had a bug where a structure like:
 ;
-; %vreg12<def> = CMOV_GR8 %vreg7, %vreg11 ... (lt)
-; %vreg13<def> = CMOV_GR8 %vreg12, %vreg11 ... (gt)
+; %12<def> = CMOV_GR8 %7, %11 ... (lt)
+; %13<def> = CMOV_GR8 %12, %11 ... (gt)
 ;
 ; was lowered to:
 ;
@@ -239,9 +239,9 @@ attributes #0 = { nounwind }
 ;   JG_1 BB#9
 ; BB#8:
 ; BB#9:
-;   vreg12 = phi(vreg7, BB#8, vreg11, BB#0, vreg12, BB#7)
-;   vreg13 = COPY vreg12
-; Which was invalid as %vreg12 is not the same value as %vreg13
+;   %12 = phi(%7, BB#8, %11, BB#0, %12, BB#7)
+;   %13 = COPY %12
+; Which was invalid as %12 is not the same value as %13
 
 ; CHECK-LABEL: no_cascade_opt:
 ; CMOV-DAG: cmpl %edx, %esi

Modified: llvm/trunk/test/CodeGen/X86/coalescer-dce.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/coalescer-dce.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/coalescer-dce.ll (original)
+++ llvm/trunk/test/CodeGen/X86/coalescer-dce.ll Thu Nov 30 04:12:19 2017
@@ -4,28 +4,28 @@ target triple = "x86_64-apple-macosx10.7
 
 ; This test case has a sub-register join followed by a remat:
 ;
-; 256L    %vreg2<def> = COPY %vreg7:sub_32bit<kill>; GR32:%vreg2 GR64:%vreg7
-;         Considering merging %vreg2 with %vreg7:sub_32bit
+; 256L    %2<def> = COPY %7:sub_32bit<kill>; GR32:%2 GR64:%7
+;         Considering merging %2 with %7:sub_32bit
 ;         Cross-class to GR64.
-;                 RHS = %vreg2 = [256d,272d:0)  0 at 256d
-;                 LHS = %vreg7 = [208d,256d:0)[304L,480L:0)  0 at 208d
-;                 updated: 272L   %vreg0<def> = COPY %vreg7:sub_32bit<kill>; GR32:%vreg0 GR64:%vreg7
-;         Joined. Result = %vreg7 = [208d,272d:0)[304L,480L:0)  0 at 208d
+;                 RHS = %2 = [256d,272d:0)  0 at 256d
+;                 LHS = %7 = [208d,256d:0)[304L,480L:0)  0 at 208d
+;                 updated: 272L   %0<def> = COPY %7:sub_32bit<kill>; GR32:%0 GR64:%7
+;         Joined. Result = %7 = [208d,272d:0)[304L,480L:0)  0 at 208d
 ;
-; 272L    %vreg10:sub_32bit<def> = COPY %vreg7:sub_32bit<kill>, %vreg10<imp-def>; GR64:%vreg10,%vreg7
-;         Considering merging %vreg7 with %vreg10
-;                 RHS = %vreg7 = [208d,272d:0)[304L,480L:0)  0 at 208d
-;                 LHS = %vreg10 = [16d,64L:2)[64L,160L:1)[192L,240L:1)[272d,304L:3)[304L,352d:1)[352d,400d:0)[400d,400S:4)  0 at 352d 1 at 64L-phidef 2 at 16d-phikill 3 at 272d-phikill 4 at 400d
-; Remat: %vreg10<def> = MOV64r0 %vreg10<imp-def>, %eflags<imp-def,dead>, %vreg10<imp-def>; GR64:%vreg10
-; Shrink: %vreg7 = [208d,272d:0)[304L,480L:0)  0 at 208d
+; 272L    %10:sub_32bit<def> = COPY %7:sub_32bit<kill>, %10<imp-def>; GR64:%10,%7
+;         Considering merging %7 with %10
+;                 RHS = %7 = [208d,272d:0)[304L,480L:0)  0 at 208d
+;                 LHS = %10 = [16d,64L:2)[64L,160L:1)[192L,240L:1)[272d,304L:3)[304L,352d:1)[352d,400d:0)[400d,400S:4)  0 at 352d 1 at 64L-phidef 2 at 16d-phikill 3 at 272d-phikill 4 at 400d
+; Remat: %10<def> = MOV64r0 %10<imp-def>, %eflags<imp-def,dead>, %10<imp-def>; GR64:%10
+; Shrink: %7 = [208d,272d:0)[304L,480L:0)  0 at 208d
 ;  live-in at 240L
 ;  live-in at 416L
 ;  live-in at 320L
 ;  live-in at 304L
-; Shrunk: %vreg7 = [208d,256d:0)[304L,480L:0)  0 at 208d
+; Shrunk: %7 = [208d,256d:0)[304L,480L:0)  0 at 208d
 ;
 ; The COPY at 256L is rewritten as a partial def, and that would artificially
-; extend the live range of %vreg7 to end at 256d.  When the joined copy is
+; extend the live range of %7 to end at 256d.  When the joined copy is
 ; removed, -verify-coalescing complains about the dangling kill.
 ;
 ; <rdar://problem/9967101>

Modified: llvm/trunk/test/CodeGen/X86/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/crash.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/crash.ll Thu Nov 30 04:12:19 2017
@@ -481,10 +481,10 @@ declare void @fn3(...)
 
 ; Check coalescing of IMPLICIT_DEF instructions:
 ;
-; %vreg1 = IMPLICIT_DEF
-; %vreg2 = MOV32r0
+; %1 = IMPLICIT_DEF
+; %2 = MOV32r0
 ;
-; When coalescing %vreg1 and %vreg2, the IMPLICIT_DEF instruction should be
+; When coalescing %1 and %2, the IMPLICIT_DEF instruction should be
 ; erased along with its value number.
 ;
 define void @rdar12474033() nounwind ssp {

Modified: llvm/trunk/test/CodeGen/X86/handle-move.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/handle-move.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/handle-move.ll (original)
+++ llvm/trunk/test/CodeGen/X86/handle-move.ll Thu Nov 30 04:12:19 2017
@@ -8,8 +8,8 @@
 ; %edx has a live range into the function and is used by the DIV32r.
 ;
 ; Here sinking a kill + dead def:
-; 144B -> 180B: DIV32r %vreg4, %eax<imp-def>, %edx<imp-def,dead>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
-;       %vreg4: [48r,144r:0)  0 at 48r
+; 144B -> 180B: DIV32r %4, %eax<imp-def>, %edx<imp-def,dead>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
+;       %4: [48r,144r:0)  0 at 48r
 ;         -->   [48r,180r:0)  0 at 48r
 ;       DH:     [0B,16r:0)[128r,144r:2)[144r,144d:1)  0 at 0B-phi 1 at 144r 2 at 128r
 ;         -->   [0B,16r:0)[128r,180r:2)[180r,180d:1)  0 at 0B-phi 1 at 180r 2 at 128r
@@ -25,8 +25,8 @@ entry:
 }
 
 ; Same as above, but moving a kill + live def:
-; 144B -> 180B: DIV32r %vreg4, %eax<imp-def,dead>, %edx<imp-def>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
-;       %vreg4: [48r,144r:0)  0 at 48r
+; 144B -> 180B: DIV32r %4, %eax<imp-def,dead>, %edx<imp-def>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
+;       %4: [48r,144r:0)  0 at 48r
 ;         -->   [48r,180r:0)  0 at 48r
 ;       DH:     [0B,16r:0)[128r,144r:2)[144r,184r:1)  0 at 0B-phi 1 at 144r 2 at 128r
 ;         -->   [0B,16r:0)[128r,180r:2)[180r,184r:1)  0 at 0B-phi 1 at 180r 2 at 128r
@@ -41,13 +41,13 @@ entry:
   ret i32 %add
 }
 
-; Moving a use below the existing kill (%vreg5):
-; Moving a tied virtual register def (%vreg11):
+; Moving a use below the existing kill (%5):
+; Moving a tied virtual register def (%11):
 ;
-; 96B -> 120B: %vreg11<def,tied1> = SUB32rr %vreg11<tied0>, %vreg5
-;       %vreg11:        [80r,96r:1)[96r,144r:0)  0 at 96r 1 at 80r
+; 96B -> 120B: %11<def,tied1> = SUB32rr %11<tied0>, %5
+;       %11:        [80r,96r:1)[96r,144r:0)  0 at 96r 1 at 80r
 ;            -->        [80r,120r:1)[120r,144r:0)  0 at 120r 1 at 80r
-;       %vreg5:         [16r,112r:0)  0 at 16r
+;       %5:         [16r,112r:0)  0 at 16r
 ;            -->        [16r,120r:0)  0 at 16r
 ;
 define i32 @f3(i32 %a, i32 %b) nounwind uwtable readnone ssp {

Modified: llvm/trunk/test/CodeGen/X86/invalid-liveness.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/invalid-liveness.mir?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/invalid-liveness.mir (original)
+++ llvm/trunk/test/CodeGen/X86/invalid-liveness.mir Thu Nov 30 04:12:19 2017
@@ -5,11 +5,11 @@
   define void @func() { ret void }
 ...
 ---
-# Liveness calculation should detect that we do not have a definition for vreg0
-# on all paths; In this example a def for vreg0 is missing when jumping from
+# Liveness calculation should detect that we do not have a definition for %0
+# on all paths; In this example a def for %0 is missing when jumping from
 # bb.0 to bb.3.
 #
-# CHECK: Use of %vreg0 does not have a corresponding definition on every path
+# CHECK: Use of %0 does not have a corresponding definition on every path
 # CHECK: ERROR: Use not jointly dominated by defs.
 name: func
 registers:

Modified: llvm/trunk/test/CodeGen/X86/liveness-local-regalloc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/liveness-local-regalloc.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/liveness-local-regalloc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/liveness-local-regalloc.ll Thu Nov 30 04:12:19 2017
@@ -62,7 +62,7 @@ infloop1:
 
 
 ; RAFast would forget to add a super-register <imp-def> when rewriting:
-;  %vreg10:sub_32bit<def,read-undef> = COPY %R9D<kill>
+;  %10:sub_32bit<def,read-undef> = COPY %R9D<kill>
 ; This trips up the machine code verifier.
 define void @autogen_SD24657(i8*, i32*, i64*, i32, i64, i8) {
 BB:

Modified: llvm/trunk/test/CodeGen/X86/misched-copy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/misched-copy.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/misched-copy.ll (original)
+++ llvm/trunk/test/CodeGen/X86/misched-copy.ll Thu Nov 30 04:12:19 2017
@@ -10,7 +10,7 @@
 ;
 ; CHECK: *** Final schedule for BB#1 ***
 ; CHECK:      %eax<def> = COPY
-; CHECK-NEXT: MUL32r %vreg{{[0-9]+}}, %eax<imp-def>, %edx<imp-def>, %eflags<imp-def,dead>, %eax<imp-use>;
+; CHECK-NEXT: MUL32r %{{[0-9]+}}, %eax<imp-def>, %edx<imp-def>, %eflags<imp-def,dead>, %eax<imp-use>;
 ; CHECK-NEXT: COPY %e{{[ad]}}x
 ; CHECK-NEXT: COPY %e{{[ad]}}x
 ; CHECK:      DIVSSrm

Modified: llvm/trunk/test/CodeGen/X86/norex-subreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/norex-subreg.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/norex-subreg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/norex-subreg.ll Thu Nov 30 04:12:19 2017
@@ -41,10 +41,10 @@ entry:
 
 ; This test case extracts a sub_8bit_hi sub-register:
 ;
-;       %vreg2<def> = COPY %vreg1:sub_8bit_hi; GR8:%vreg2 GR64_ABCD:%vreg1
-;       TEST8ri %vreg2, 1, %eflags<imp-def>; GR8:%vreg2
+;       %2<def> = COPY %1:sub_8bit_hi; GR8:%2 GR64_ABCD:%1
+;       TEST8ri %2, 1, %eflags<imp-def>; GR8:%2
 ;
-; %vreg2 must be constrained to GR8_NOREX, or the COPY could become impossible.
+; %2 must be constrained to GR8_NOREX, or the COPY could become impossible.
 ;
 ; PR11088
 

Modified: llvm/trunk/test/CodeGen/X86/phys_subreg_coalesce-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/phys_subreg_coalesce-3.ll?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/phys_subreg_coalesce-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/phys_subreg_coalesce-3.ll Thu Nov 30 04:12:19 2017
@@ -1,10 +1,10 @@
 ; RUN: llc < %s -verify-machineinstrs -mtriple=i386-apple-darwin -mcpu=corei7 | FileCheck %s
 ; rdar://5571034
 
-; This requires physreg joining, %vreg13 is live everywhere:
-; 304L		%cl<def> = COPY %vreg13:sub_8bit; GR32_ABCD:%vreg13
-; 320L		%vreg15<def> = COPY %vreg19; GR32:%vreg15 GR32_NOSP:%vreg19
-; 336L		%vreg15<def> = SAR32rCL %vreg15, %eflags<imp-def,dead>, %cl<imp-use,kill>; GR32:%vreg15
+; This requires physreg joining, %13 is live everywhere:
+; 304L		%cl<def> = COPY %13:sub_8bit; GR32_ABCD:%13
+; 320L		%15<def> = COPY %19; GR32:%15 GR32_NOSP:%19
+; 336L		%15<def> = SAR32rCL %15, %eflags<imp-def,dead>, %cl<imp-use,kill>; GR32:%15
 
 define void @foo(i32* nocapture %quadrant, i32* nocapture %ptr, i32 %bbSize, i32 %bbStart, i32 %shifts) nounwind ssp {
 ; CHECK-LABEL: foo:

Modified: llvm/trunk/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir?rev=319427&r1=319426&r2=319427&view=diff
==============================================================================
--- llvm/trunk/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir (original)
+++ llvm/trunk/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir Thu Nov 30 04:12:19 2017
@@ -148,7 +148,7 @@ body:             |
 
 # Let's verify that the slot index ranges for the unused variables argc/argv,
 # connected to physical regs %edi and %rsi, does not overlap with the ranges
-# for %vreg2 and %vreg3. The register allocator is actually allocating the
+# for %2 and %3. The register allocator is actually allocating the
 # virtual registers # to %edi and %esi, so the ranges for argc/argv should
 # not cover the whole BB.
 #
@@ -157,7 +157,7 @@ body:             |
 # CHECKDBG-NEXT:         [0B;0e):0 BB#0-160B
 # CHECKDBG-NEXT: !"argv,5"        [0B;0e):0 Loc0=%rsi
 # CHECKDBG-NEXT:         [0B;0e):0 BB#0-160B
-# CHECKDBG-NEXT: !"a0,7"  [16r;64r):0 Loc0=%vreg2
+# CHECKDBG-NEXT: !"a0,7"  [16r;64r):0 Loc0=%2
 # CHECKDBG-NEXT:         [16r;64r):0 BB#0-160B
-# CHECKDBG-NEXT: !"a1,8"  [32r;80r):0 Loc0=%vreg3
+# CHECKDBG-NEXT: !"a1,8"  [32r;80r):0 Loc0=%3
 # CHECKDBG-NEXT:         [32r;80r):0 BB#0-160B




More information about the llvm-commits mailing list