[llvm] 3fe914c - [X86] Use Register and MCRegister. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 15 23:31:05 PDT 2025


Author: Craig Topper
Date: 2025-03-15T23:15:28-07:00
New Revision: 3fe914c9faa8367ec72f44e8953de1d1ea63e011

URL: https://github.com/llvm/llvm-project/commit/3fe914c9faa8367ec72f44e8953de1d1ea63e011
DIFF: https://github.com/llvm/llvm-project/commit/3fe914c9faa8367ec72f44e8953de1d1ea63e011.diff

LOG: [X86] Use Register and MCRegister. NFC

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
    llvm/lib/Target/X86/X86CallFrameOptimization.cpp
    llvm/lib/Target/X86/X86CmovConversion.cpp
    llvm/lib/Target/X86/X86CompressEVEX.cpp
    llvm/lib/Target/X86/X86DomainReassignment.cpp
    llvm/lib/Target/X86/X86DynAllocaExpander.cpp
    llvm/lib/Target/X86/X86FastISel.cpp
    llvm/lib/Target/X86/X86FixupLEAs.cpp
    llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
    llvm/lib/Target/X86/X86FloatingPoint.cpp
    llvm/lib/Target/X86/X86FrameLowering.cpp
    llvm/lib/Target/X86/X86FrameLowering.h
    llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/lib/Target/X86/X86InstrInfo.cpp
    llvm/lib/Target/X86/X86InstrInfo.h
    llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
    llvm/lib/Target/X86/X86OptimizeLEAs.cpp
    llvm/lib/Target/X86/X86RegisterInfo.cpp
    llvm/lib/Target/X86/X86RegisterInfo.h
    llvm/lib/Target/X86/X86VZeroUpper.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
index fe9bacb6856a6..3ac7c8222b54b 100644
--- a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
+++ b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
@@ -533,7 +533,7 @@ void X86AvoidSFBPass::findPotentiallylBlockedCopies(MachineFunction &MF) {
     for (auto &MI : MBB) {
       if (!isPotentialBlockedMemCpyLd(MI.getOpcode()))
         continue;
-      int DefVR = MI.getOperand(0).getReg();
+      Register DefVR = MI.getOperand(0).getReg();
       if (!MRI->hasOneNonDBGUse(DefVR))
         continue;
       for (MachineOperand &StoreMO :

diff  --git a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
index 792bcddde7076..0e4add27cce02 100644
--- a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
+++ b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
@@ -112,7 +112,7 @@ class X86CallFrameOptimization : public MachineFunctionPass {
   InstClassification classifyInstruction(MachineBasicBlock &MBB,
                                          MachineBasicBlock::iterator MI,
                                          const X86RegisterInfo &RegInfo,
-                                         DenseSet<unsigned int> &UsedRegs);
+                                         const DenseSet<MCRegister> &UsedRegs);
 
   StringRef getPassName() const override { return "X86 Optimize Call Frame"; }
 
@@ -278,7 +278,7 @@ bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
 X86CallFrameOptimization::InstClassification
 X86CallFrameOptimization::classifyInstruction(
     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
-    const X86RegisterInfo &RegInfo, DenseSet<unsigned int> &UsedRegs) {
+    const X86RegisterInfo &RegInfo, const DenseSet<MCRegister> &UsedRegs) {
   if (MI == MBB.end())
     return Exit;
 
@@ -341,7 +341,7 @@ X86CallFrameOptimization::classifyInstruction(
     if (RegInfo.regsOverlap(Reg, RegInfo.getStackRegister()))
       return Exit;
     if (MO.isDef()) {
-      for (unsigned int U : UsedRegs)
+      for (MCRegister U : UsedRegs)
         if (RegInfo.regsOverlap(Reg, U))
           return Exit;
     }
@@ -406,7 +406,7 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
   if (MaxAdjust > 4)
     Context.ArgStoreVector.resize(MaxAdjust, nullptr);
 
-  DenseSet<unsigned int> UsedRegs;
+  DenseSet<MCRegister> UsedRegs;
 
   for (InstClassification Classification = Skip; Classification != Exit; ++I) {
     // If this is the COPY of the stack pointer, it's ok to ignore.
@@ -455,7 +455,7 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
         continue;
       Register Reg = MO.getReg();
       if (Reg.isPhysical())
-        UsedRegs.insert(Reg);
+        UsedRegs.insert(Reg.asMCReg());
     }
   }
 

diff  --git a/llvm/lib/Target/X86/X86CmovConversion.cpp b/llvm/lib/Target/X86/X86CmovConversion.cpp
index 86922fb7c1dfb..908e4b63ab0c9 100644
--- a/llvm/lib/Target/X86/X86CmovConversion.cpp
+++ b/llvm/lib/Target/X86/X86CmovConversion.cpp
@@ -406,7 +406,7 @@ bool X86CmovConverterPass::checkForProfitableCmovCandidates(
   DepthInfo LoopDepth[LoopIterations] = {{0, 0}, {0, 0}};
   enum { PhyRegType = 0, VirRegType = 1, RegTypeNum = 2 };
   /// For each register type maps the register to its last def instruction.
-  DenseMap<unsigned, MachineInstr *> RegDefMaps[RegTypeNum];
+  DenseMap<Register, MachineInstr *> RegDefMaps[RegTypeNum];
   /// Maps register operand to its def instruction, which can be nullptr if it
   /// is unknown (e.g., operand is defined outside the loop).
   DenseMap<MachineOperand *, MachineInstr *> OperandToDefMap;
@@ -723,7 +723,7 @@ void X86CmovConverterPass::convertCmovInstsToBranches(
   // operand. We also need to potentially do register rewriting here, but it is
   // simpler as the memory operands are always on the false path so we can
   // simply take that input, whatever it is.
-  DenseMap<unsigned, unsigned> FalseBBRegRewriteTable;
+  DenseMap<Register, Register> FalseBBRegRewriteTable;
   for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd;) {
     auto &MI = *MIIt++;
     // Skip any CMOVs in this group which don't load from memory.
@@ -830,7 +830,7 @@ void X86CmovConverterPass::convertCmovInstsToBranches(
   // That also means that PHI construction must work forward from earlier to
   // later, and that the code must maintain a mapping from earlier PHI's
   // destination registers, and the registers that went into the PHI.
-  DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
+  DenseMap<Register, std::pair<Register, Register>> RegRewriteTable;
 
   for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
     Register DestReg = MIIt->getOperand(0).getReg();

diff  --git a/llvm/lib/Target/X86/X86CompressEVEX.cpp b/llvm/lib/Target/X86/X86CompressEVEX.cpp
index 7213ed32fc46d..b80c21b008f4b 100644
--- a/llvm/lib/Target/X86/X86CompressEVEX.cpp
+++ b/llvm/lib/Target/X86/X86CompressEVEX.cpp
@@ -83,7 +83,7 @@ class CompressEVEXPass : public MachineFunctionPass {
 char CompressEVEXPass::ID = 0;
 
 static bool usesExtendedRegister(const MachineInstr &MI) {
-  auto isHiRegIdx = [](unsigned Reg) {
+  auto isHiRegIdx = [](MCRegister Reg) {
     // Check for XMM register with indexes between 16 - 31.
     if (Reg >= X86::XMM16 && Reg <= X86::XMM31)
       return true;
@@ -102,7 +102,7 @@ static bool usesExtendedRegister(const MachineInstr &MI) {
     if (!MO.isReg())
       continue;
 
-    Register Reg = MO.getReg();
+    MCRegister Reg = MO.getReg().asMCReg();
     assert(!X86II::isZMMReg(Reg) &&
            "ZMM instructions should not be in the EVEX->VEX tables");
     if (isHiRegIdx(Reg))

diff  --git a/llvm/lib/Target/X86/X86DomainReassignment.cpp b/llvm/lib/Target/X86/X86DomainReassignment.cpp
index 25f81834bfa5e..93e55ca5fabf9 100644
--- a/llvm/lib/Target/X86/X86DomainReassignment.cpp
+++ b/llvm/lib/Target/X86/X86DomainReassignment.cpp
@@ -400,7 +400,7 @@ class X86DomainReassignment : public MachineFunctionPass {
   /// Enqueue \p Reg to be considered for addition to the closure.
   /// Return false if the closure becomes invalid.
   bool visitRegister(Closure &, Register Reg, RegDomain &Domain,
-                     SmallVectorImpl<unsigned> &Worklist);
+                     SmallVectorImpl<Register> &Worklist);
 
   /// Reassign the closure to \p Domain.
   void reassign(const Closure &C, RegDomain Domain) const;
@@ -422,7 +422,7 @@ char X86DomainReassignment::ID = 0;
 
 bool X86DomainReassignment::visitRegister(Closure &C, Register Reg,
                                           RegDomain &Domain,
-                                          SmallVectorImpl<unsigned> &Worklist) {
+                                          SmallVectorImpl<Register> &Worklist) {
   if (!Reg.isVirtual())
     return true;
 
@@ -544,11 +544,11 @@ static bool usedAsAddr(const MachineInstr &MI, Register Reg,
 }
 
 void X86DomainReassignment::buildClosure(Closure &C, Register Reg) {
-  SmallVector<unsigned, 4> Worklist;
+  SmallVector<Register, 4> Worklist;
   RegDomain Domain = NoDomain;
   visitRegister(C, Reg, Domain, Worklist);
   while (!Worklist.empty()) {
-    unsigned CurReg = Worklist.pop_back_val();
+    Register CurReg = Worklist.pop_back_val();
 
     // Register already in this closure.
     if (!C.insertEdge(CurReg))

diff  --git a/llvm/lib/Target/X86/X86DynAllocaExpander.cpp b/llvm/lib/Target/X86/X86DynAllocaExpander.cpp
index 1e88824c68879..c2a06efd4d46e 100644
--- a/llvm/lib/Target/X86/X86DynAllocaExpander.cpp
+++ b/llvm/lib/Target/X86/X86DynAllocaExpander.cpp
@@ -57,7 +57,7 @@ class X86DynAllocaExpander : public MachineFunctionPass {
   const X86Subtarget *STI = nullptr;
   const TargetInstrInfo *TII = nullptr;
   const X86RegisterInfo *TRI = nullptr;
-  unsigned StackPtr = 0;
+  Register StackPtr;
   unsigned SlotSize = 0;
   int64_t StackProbeSize = 0;
   bool NoStackArgProbe = false;

diff  --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index 9f3d9dca402cb..afe5d7f4bc7ed 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -2684,7 +2684,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
     MFI.setFrameAddressIsTaken(true);
 
     const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
-    unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(*MF);
+    Register FrameReg = RegInfo->getPtrSizedFrameRegister(*MF);
     assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
             (FrameReg == X86::EBP && VT == MVT::i32)) &&
            "Invalid Frame Register!");
@@ -3478,7 +3478,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
   // ELF / PIC requires GOT in the EBX register before function calls via PLT
   // GOT pointer.
   if (Subtarget->isPICStyleGOT()) {
-    unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
+    Register Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
             TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base);
   }
@@ -3510,11 +3510,11 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
   if (!X86SelectCallAddress(Callee, CalleeAM))
     return false;
 
-  unsigned CalleeOp = 0;
+  Register CalleeOp;
   const GlobalValue *GV = nullptr;
   if (CalleeAM.GV != nullptr) {
     GV = CalleeAM.GV;
-  } else if (CalleeAM.Base.Reg != 0) {
+  } else if (CalleeAM.Base.Reg) {
     CalleeOp = CalleeAM.Base.Reg;
   } else
     return false;
@@ -3816,7 +3816,7 @@ Register X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
   Align Alignment = DL.getPrefTypeAlign(CFP->getType());
 
   // x86-32 PIC requires a PIC base register for constant pools.
-  unsigned PICBase = 0;
+  Register PICBase;
   unsigned char OpFlag = Subtarget->classifyLocalReference(nullptr);
   if (OpFlag == X86II::MO_PIC_BASE_OFFSET)
     PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);

diff  --git a/llvm/lib/Target/X86/X86FixupLEAs.cpp b/llvm/lib/Target/X86/X86FixupLEAs.cpp
index beeebf42dfe81..146591ae135fd 100644
--- a/llvm/lib/Target/X86/X86FixupLEAs.cpp
+++ b/llvm/lib/Target/X86/X86FixupLEAs.cpp
@@ -325,7 +325,7 @@ FixupLEAPass::searchBackwards(MachineOperand &p, MachineBasicBlock::iterator &I,
   return MachineBasicBlock::iterator();
 }
 
-static inline bool isInefficientLEAReg(unsigned Reg) {
+static inline bool isInefficientLEAReg(Register Reg) {
   return Reg == X86::EBP || Reg == X86::RBP ||
          Reg == X86::R13D || Reg == X86::R13;
 }
@@ -337,7 +337,7 @@ static inline bool isInefficientLEAReg(unsigned Reg) {
 static inline bool hasInefficientLEABaseReg(const MachineOperand &Base,
                                             const MachineOperand &Index) {
   return Base.isReg() && isInefficientLEAReg(Base.getReg()) && Index.isReg() &&
-         Index.getReg() != X86::NoRegister;
+         Index.getReg().isValid();
 }
 
 static inline bool hasLEAOffset(const MachineOperand &Offset) {
@@ -557,7 +557,7 @@ bool FixupLEAPass::optTwoAddrLEA(MachineBasicBlock::iterator &I,
   const MachineOperand &Disp =    MI.getOperand(1 + X86::AddrDisp);
   const MachineOperand &Segment = MI.getOperand(1 + X86::AddrSegmentReg);
 
-  if (Segment.getReg() != 0 || !Disp.isImm() || Scale.getImm() > 1 ||
+  if (Segment.getReg().isValid() || !Disp.isImm() || Scale.getImm() > 1 ||
       MBB.computeRegisterLiveness(TRI, X86::EFLAGS, I) !=
           MachineBasicBlock::LQR_Dead)
     return false;
@@ -572,9 +572,9 @@ bool FixupLEAPass::optTwoAddrLEA(MachineBasicBlock::iterator &I,
 
   // LEA64_32 has 64-bit operands but 32-bit result.
   if (MI.getOpcode() == X86::LEA64_32r) {
-    if (BaseReg != 0)
+    if (BaseReg)
       BaseReg = TRI->getSubReg(BaseReg, X86::sub_32bit);
-    if (IndexReg != 0)
+    if (IndexReg)
       IndexReg = TRI->getSubReg(IndexReg, X86::sub_32bit);
   }
 
@@ -583,7 +583,7 @@ bool FixupLEAPass::optTwoAddrLEA(MachineBasicBlock::iterator &I,
   // Case 1.
   // Look for lea(%reg1, %reg2), %reg1 or lea(%reg2, %reg1), %reg1
   // which can be turned into add %reg2, %reg1
-  if (BaseReg != 0 && IndexReg != 0 && Disp.getImm() == 0 &&
+  if (BaseReg.isValid() && IndexReg.isValid() && Disp.getImm() == 0 &&
       (DestReg == BaseReg || DestReg == IndexReg)) {
     unsigned NewOpcode = getADDrrFromLEA(MI.getOpcode());
     if (DestReg != BaseReg)
@@ -599,7 +599,7 @@ bool FixupLEAPass::optTwoAddrLEA(MachineBasicBlock::iterator &I,
       NewMI = BuildMI(MBB, I, MI.getDebugLoc(), TII->get(NewOpcode), DestReg)
         .addReg(BaseReg).addReg(IndexReg);
     }
-  } else if (DestReg == BaseReg && IndexReg == 0) {
+  } else if (DestReg == BaseReg && !IndexReg) {
     // Case 2.
     // This is an LEA with only a base register and a displacement,
     // We can use ADDri or INC/DEC.
@@ -631,7 +631,7 @@ bool FixupLEAPass::optTwoAddrLEA(MachineBasicBlock::iterator &I,
           .addReg(BaseReg).addImm(Disp.getImm());
       }
     }
-  } else if (BaseReg != 0 && IndexReg != 0 && Disp.getImm() == 0) {
+  } else if (BaseReg.isValid() && IndexReg.isValid() && Disp.getImm() == 0) {
     // Case 3.
     // Look for and transform the sequence
     //     lea (reg1, reg2), reg3
@@ -697,14 +697,14 @@ void FixupLEAPass::processInstructionForSlowLEA(MachineBasicBlock::iterator &I,
   const MachineOperand &Offset =  MI.getOperand(1 + X86::AddrDisp);
   const MachineOperand &Segment = MI.getOperand(1 + X86::AddrSegmentReg);
 
-  if (Segment.getReg() != 0 || !Offset.isImm() ||
+  if (Segment.getReg().isValid() || !Offset.isImm() ||
       MBB.computeRegisterLiveness(TRI, X86::EFLAGS, I, 4) !=
           MachineBasicBlock::LQR_Dead)
     return;
   const Register DstR = Dst.getReg();
   const Register SrcR1 = Base.getReg();
   const Register SrcR2 = Index.getReg();
-  if ((SrcR1 == 0 || SrcR1 != DstR) && (SrcR2 == 0 || SrcR2 != DstR))
+  if ((!SrcR1 || SrcR1 != DstR) && (!SrcR2 || SrcR2 != DstR))
     return;
   if (Scale.getImm() > 1)
     return;
@@ -712,7 +712,7 @@ void FixupLEAPass::processInstructionForSlowLEA(MachineBasicBlock::iterator &I,
   LLVM_DEBUG(dbgs() << "FixLEA: Replaced by: ";);
   MachineInstr *NewMI = nullptr;
   // Make ADD instruction for two registers writing to LEA's destination
-  if (SrcR1 != 0 && SrcR2 != 0) {
+  if (SrcR1 && SrcR2) {
     const MCInstrDesc &ADDrr = TII->get(getADDrrFromLEA(Opcode));
     const MachineOperand &Src = SrcR1 == DstR ? Index : Base;
     NewMI =
@@ -752,7 +752,7 @@ void FixupLEAPass::processInstrForSlow3OpLEA(MachineBasicBlock::iterator &I,
   if (!(TII->isThreeOperandsLEA(MI) || hasInefficientLEABaseReg(Base, Index)) ||
       MBB.computeRegisterLiveness(TRI, X86::EFLAGS, I, 4) !=
           MachineBasicBlock::LQR_Dead ||
-      Segment.getReg() != X86::NoRegister)
+      Segment.getReg().isValid())
     return;
 
   Register DestReg = Dest.getReg();
@@ -760,9 +760,9 @@ void FixupLEAPass::processInstrForSlow3OpLEA(MachineBasicBlock::iterator &I,
   Register IndexReg = Index.getReg();
 
   if (MI.getOpcode() == X86::LEA64_32r) {
-    if (BaseReg != 0)
+    if (BaseReg)
       BaseReg = TRI->getSubReg(BaseReg, X86::sub_32bit);
-    if (IndexReg != 0)
+    if (IndexReg)
       IndexReg = TRI->getSubReg(IndexReg, X86::sub_32bit);
   }
 

diff  --git a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
index 873dce4e922d1..ca953d6008b27 100644
--- a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
+++ b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
@@ -69,7 +69,7 @@ STATISTIC(NumNFsConvertedTo, "Number of NF instructions converted to");
 namespace {
 
 // Convenient array type for storing registers associated with each condition.
-using CondRegArray = std::array<unsigned, X86::LAST_VALID_COND + 1>;
+using CondRegArray = std::array<Register, X86::LAST_VALID_COND + 1>;
 
 class X86FlagsCopyLoweringPass : public MachineFunctionPass {
 public:
@@ -96,11 +96,11 @@ class X86FlagsCopyLoweringPass : public MachineFunctionPass {
   Register promoteCondToReg(MachineBasicBlock &MBB,
                             MachineBasicBlock::iterator TestPos,
                             const DebugLoc &TestLoc, X86::CondCode Cond);
-  std::pair<unsigned, bool> getCondOrInverseInReg(
+  std::pair<Register, bool> getCondOrInverseInReg(
       MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
       const DebugLoc &TestLoc, X86::CondCode Cond, CondRegArray &CondRegs);
   void insertTest(MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
-                  const DebugLoc &Loc, unsigned Reg);
+                  const DebugLoc &Loc, Register Reg);
 
   void rewriteSetCC(MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos,
                     const DebugLoc &Loc, MachineInstr &MI,
@@ -744,11 +744,11 @@ Register X86FlagsCopyLoweringPass::promoteCondToReg(
   return Reg;
 }
 
-std::pair<unsigned, bool> X86FlagsCopyLoweringPass::getCondOrInverseInReg(
+std::pair<Register, bool> X86FlagsCopyLoweringPass::getCondOrInverseInReg(
     MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos,
     const DebugLoc &TestLoc, X86::CondCode Cond, CondRegArray &CondRegs) {
-  unsigned &CondReg = CondRegs[Cond];
-  unsigned &InvCondReg = CondRegs[X86::GetOppositeBranchCondition(Cond)];
+  Register &CondReg = CondRegs[Cond];
+  Register &InvCondReg = CondRegs[X86::GetOppositeBranchCondition(Cond)];
   if (!CondReg && !InvCondReg)
     CondReg = promoteCondToReg(TestMBB, TestPos, TestLoc, Cond);
 
@@ -760,7 +760,7 @@ std::pair<unsigned, bool> X86FlagsCopyLoweringPass::getCondOrInverseInReg(
 
 void X86FlagsCopyLoweringPass::insertTest(MachineBasicBlock &MBB,
                                           MachineBasicBlock::iterator Pos,
-                                          const DebugLoc &Loc, unsigned Reg) {
+                                          const DebugLoc &Loc, Register Reg) {
   auto TestI =
       BuildMI(MBB, Pos, Loc, TII->get(X86::TEST8rr)).addReg(Reg).addReg(Reg);
   (void)TestI;
@@ -777,7 +777,7 @@ void X86FlagsCopyLoweringPass::rewriteSetCC(MachineBasicBlock &MBB,
   // Note that we can't usefully rewrite this to the inverse without complex
   // analysis of the users of the setCC. Largely we rely on duplicates which
   // could have been avoided already being avoided here.
-  unsigned &CondReg = CondRegs[Cond];
+  Register &CondReg = CondRegs[Cond];
   if (!CondReg)
     CondReg = promoteCondToReg(MBB, Pos, Loc, Cond);
 
@@ -843,7 +843,7 @@ void X86FlagsCopyLoweringPass::rewriteArithmetic(
   // Now get a register that contains the value of the flag input to the
   // arithmetic. We require exactly this flag to simplify the arithmetic
   // required to materialize it back into the flag.
-  unsigned &CondReg = CondRegs[Cond];
+  Register &CondReg = CondRegs[Cond];
   if (!CondReg)
     CondReg = promoteCondToReg(MBB, Pos, Loc, Cond);
 
@@ -917,7 +917,7 @@ void X86FlagsCopyLoweringPass::rewriteMI(MachineBasicBlock &MBB,
     IsImplicitCC = true;
   }
   assert(CC != X86::COND_INVALID && "Unknown EFLAG user!");
-  unsigned CondReg;
+  Register CondReg;
   bool Inverted;
   std::tie(CondReg, Inverted) =
       getCondOrInverseInReg(MBB, Pos, Loc, CC, CondRegs);

diff  --git a/llvm/lib/Target/X86/X86FloatingPoint.cpp b/llvm/lib/Target/X86/X86FloatingPoint.cpp
index 24129de9b7171..e36fd3ee60bae 100644
--- a/llvm/lib/Target/X86/X86FloatingPoint.cpp
+++ b/llvm/lib/Target/X86/X86FloatingPoint.cpp
@@ -462,7 +462,7 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
 
     // Get dead variables list now because the MI pointer may be deleted as part
     // of processing!
-    SmallVector<unsigned, 8> DeadRegs;
+    SmallVector<Register, 8> DeadRegs;
     for (const MachineOperand &MO : MI.operands())
       if (MO.isReg() && MO.isDead())
         DeadRegs.push_back(MO.getReg());
@@ -480,7 +480,7 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
 
     // Check to see if any of the values defined by this instruction are dead
     // after definition.  If so, pop them.
-    for (unsigned Reg : DeadRegs) {
+    for (Register Reg : DeadRegs) {
       // Check if Reg is live on the stack. An inline-asm register operand that
       // is in the clobber list and marked dead might not be live on the stack.
       static_assert(X86::FP7 - X86::FP0 == 7, "sequential FP regnumbers");

diff  --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index 68bf1d09d1093..b7374558604ec 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -1304,7 +1304,7 @@ X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
 
 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
                                           MachineBasicBlock::iterator MBBI,
-                                          const DebugLoc &DL, unsigned Reg,
+                                          const DebugLoc &DL, Register Reg,
                                           uint64_t MaxAlign) const {
   uint64_t Val = -MaxAlign;
   unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
@@ -1768,7 +1768,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
   int stackGrowth = -SlotSize;
 
   // Find the funclet establisher parameter
-  Register Establisher = X86::NoRegister;
+  MCRegister Establisher;
   if (IsClrFunclet)
     Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX;
   else if (IsFunclet)
@@ -2081,7 +2081,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
   }
 
   int SEHFrameOffset = 0;
-  unsigned SPOrEstablisher;
+  Register SPOrEstablisher;
   if (IsFunclet) {
     if (IsClrFunclet) {
       // The establisher parameter passed to a CLR funclet is actually a pointer
@@ -4447,7 +4447,7 @@ bool X86FrameLowering::skipSpillFPBP(
     // And later LCMPXCHG16B_SAVE_RBX is expanded to restore RBX from SaveRbx.
     // We should skip this instruction sequence.
     int FI;
-    unsigned Reg;
+    Register Reg;
     while (!(MI->getOpcode() == TargetOpcode::COPY &&
              MI->getOperand(1).getReg() == X86::RBX) &&
            !((Reg = TII.isStoreToStackSlot(*MI, FI)) && Reg == X86::RBX))

diff  --git a/llvm/lib/Target/X86/X86FrameLowering.h b/llvm/lib/Target/X86/X86FrameLowering.h
index ef41b4653becc..f1e3796f5fddd 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.h
+++ b/llvm/lib/Target/X86/X86FrameLowering.h
@@ -47,7 +47,7 @@ class X86FrameLowering : public TargetFrameLowering {
   /// instruction operands should be used to manipulate StackPtr and FramePtr.
   bool Uses64BitFramePtr;
 
-  unsigned StackPtr;
+  Register StackPtr;
 
   /// Emit target stack probe code. This is required for all
   /// large stack allocations on Windows. The caller is required to materialize
@@ -288,7 +288,7 @@ class X86FrameLowering : public TargetFrameLowering {
   /// Aligns the stack pointer by ANDing it with -MaxAlign.
   void BuildStackAlignAND(MachineBasicBlock &MBB,
                           MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
-                          unsigned Reg, uint64_t MaxAlign) const;
+                          Register Reg, uint64_t MaxAlign) const;
 
   /// Make small positive stack adjustments using POPs.
   bool adjustStackWithPops(MachineBasicBlock &MBB,

diff  --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index c8ca5759578ca..878c054503473 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -3289,7 +3289,7 @@ bool X86DAGToDAGISel::tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N,
 /// Output instructions required to initialize the global base register,
 /// if necessary.
 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
-  unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
+  Register GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
   auto &DL = MF->getDataLayout();
   return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
 }

diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 723f40685eb6b..da10a88d0111b 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -27043,7 +27043,7 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     // which is used to reference local variables.
     MachineFunction &MF = DAG.getMachineFunction();
     const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
-    unsigned Reg;
+    Register Reg;
     if (RegInfo->hasBasePointer(MF))
       Reg = RegInfo->getBaseRegister();
     else { // Handles the SP or FP case.
@@ -28059,7 +28059,7 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
     return DAG.getFrameIndex(FrameAddrIndex, VT);
   }
 
-  unsigned FrameReg =
+  Register FrameReg =
       RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
   SDLoc dl(Op);  // FIXME probably not meaningful
   unsigned Depth = Op.getConstantOperandVal(0);
@@ -35607,14 +35607,14 @@ X86TargetLowering::EmitVAARGWithCustomInserter(MachineInstr &MI,
   MachineBasicBlock *offsetMBB;
   MachineBasicBlock *endMBB;
 
-  unsigned OffsetDestReg = 0;    // Argument address computed by offsetMBB
-  unsigned OverflowDestReg = 0;  // Argument address computed by overflowMBB
-  unsigned OffsetReg = 0;
+  Register OffsetDestReg;   // Argument address computed by offsetMBB
+  Register OverflowDestReg; // Argument address computed by overflowMBB
+  Register OffsetReg;
 
   if (!UseGPOffset && !UseFPOffset) {
     // If we only pull from the overflow region, we don't create a branch.
     // We don't need to alter control flow.
-    OffsetDestReg = 0; // unused
+    OffsetDestReg = Register(); // unused
     OverflowDestReg = DestReg;
 
     offsetMBB = nullptr;
@@ -35894,7 +35894,7 @@ static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
   // That also means that PHI construction must work forward from earlier to
   // later, and that the code must maintain a mapping from earlier PHI's
   // destination registers, and the registers that went into the PHI.
-  DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
+  DenseMap<Register, std::pair<Register, Register>> RegRewriteTable;
   MachineInstrBuilder MIB;
 
   for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
@@ -36573,7 +36573,7 @@ static unsigned getOpcodeForIndirectThunk(unsigned RPOpc) {
 }
 
 static const char *getIndirectThunkSymbol(const X86Subtarget &Subtarget,
-                                          unsigned Reg) {
+                                          Register Reg) {
   if (Subtarget.useRetpolineExternalThunk()) {
     // When using an external thunk for retpolines, we pick names that match the
     // names GCC happens to use as well. This helps simplify the implementation
@@ -36588,7 +36588,7 @@ static const char *getIndirectThunkSymbol(const X86Subtarget &Subtarget,
     // LLVM will generate calls to specific thunks, we merely make a best-effort
     // attempt to help out kernels and other systems where duplicating the
     // thunks is costly.
-    switch (Reg) {
+    switch (Reg.id()) {
     case X86::EAX:
       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
       return "__x86_indirect_thunk_eax";
@@ -36611,7 +36611,7 @@ static const char *getIndirectThunkSymbol(const X86Subtarget &Subtarget,
   if (Subtarget.useRetpolineIndirectCalls() ||
       Subtarget.useRetpolineIndirectBranches()) {
     // When targeting an internal COMDAT thunk use an LLVM-specific name.
-    switch (Reg) {
+    switch (Reg.id()) {
     case X86::EAX:
       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
       return "__llvm_retpoline_eax";
@@ -36654,7 +36654,7 @@ X86TargetLowering::EmitLoweredIndirectThunk(MachineInstr &MI,
   // already a register use operand to the call to hold the callee. If none
   // are available, use EDI instead. EDI is chosen because EBX is the PIC base
   // register and ESI is the base pointer to realigned stack frames with VLAs.
-  SmallVector<unsigned, 3> AvailableRegs;
+  SmallVector<Register, 3> AvailableRegs;
   if (Subtarget.is64Bit())
     AvailableRegs.push_back(X86::R11);
   else
@@ -36663,12 +36663,12 @@ X86TargetLowering::EmitLoweredIndirectThunk(MachineInstr &MI,
   // Zero out any registers that are already used.
   for (const auto &MO : MI.operands()) {
     if (MO.isReg() && MO.isUse())
-      llvm::replace(AvailableRegs, static_cast<unsigned>(MO.getReg()), 0U);
+      llvm::replace(AvailableRegs, MO.getReg(), Register());
   }
 
   // Choose the first remaining non-zero available register.
-  unsigned AvailableReg = 0;
-  for (unsigned MaybeReg : AvailableRegs) {
+  Register AvailableReg;
+  for (Register MaybeReg : AvailableRegs) {
     if (MaybeReg) {
       AvailableReg = MaybeReg;
       break;
@@ -36757,12 +36757,11 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
   // Memory Reference
   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands());
 
-  unsigned DstReg;
   unsigned MemOpndSlot = 0;
 
   unsigned CurOp = 0;
 
-  DstReg = MI.getOperand(CurOp++).getReg();
+  Register DstReg = MI.getOperand(CurOp++).getReg();
   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
   (void)TRI;
@@ -36809,7 +36808,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
 
   // thisMBB:
   unsigned PtrStoreOpc = 0;
-  unsigned LabelReg = 0;
+  Register LabelReg;
   const int64_t LabelOffset = 1 * PVT.getStoreSize();
   bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
                      !isPositionIndependent();
@@ -37188,7 +37187,7 @@ void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
   assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
 
   unsigned Op = 0;
-  unsigned VR = 0;
+  Register VR;
 
   bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
                      !isPositionIndependent();
@@ -37430,14 +37429,14 @@ X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
       if (!II.isCall())
         continue;
 
-      DenseMap<unsigned, bool> DefRegs;
+      DenseMap<Register, bool> DefRegs;
       for (auto &MOp : II.operands())
         if (MOp.isReg())
           DefRegs[MOp.getReg()] = true;
 
       MachineInstrBuilder MIB(*MF, &II);
       for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
-        unsigned Reg = SavedRegs[RegIdx];
+        Register Reg = SavedRegs[RegIdx];
         if (!DefRegs[Reg])
           MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
       }

diff  --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index eaceca8524884..5c65171dd83b0 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -697,15 +697,14 @@ Register X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
   if (isFrameLoadOpcode(MI.getOpcode(), MemBytes))
     if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
       return MI.getOperand(0).getReg();
-  return 0;
+  return Register();
 }
 
 Register X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
                                                  int &FrameIndex) const {
   unsigned Dummy;
   if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) {
-    unsigned Reg;
-    if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
+    if (Register Reg = isLoadFromStackSlot(MI, FrameIndex))
       return Reg;
     // Check for post-frame index elimination operations
     SmallVector<const MachineMemOperand *, 1> Accesses;
@@ -716,7 +715,7 @@ Register X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
       return MI.getOperand(0).getReg();
     }
   }
-  return 0;
+  return Register();
 }
 
 Register X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
@@ -732,15 +731,14 @@ Register X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
     if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
         isFrameOperand(MI, 0, FrameIndex))
       return MI.getOperand(X86::AddrNumOperands).getReg();
-  return 0;
+  return Register();
 }
 
 Register X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
                                                 int &FrameIndex) const {
   unsigned Dummy;
   if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) {
-    unsigned Reg;
-    if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
+    if (Register Reg = isStoreToStackSlot(MI, FrameIndex))
       return Reg;
     // Check for post-frame index elimination operations
     SmallVector<const MachineMemOperand *, 1> Accesses;
@@ -751,7 +749,7 @@ Register X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
       return MI.getOperand(X86::AddrNumOperands).getReg();
     }
   }
-  return 0;
+  return Register();
 }
 
 /// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
@@ -3624,7 +3622,7 @@ unsigned X86::getVectorRegisterWidth(const MCOperandInfo &Info) {
 }
 
 /// Return true if the Reg is X87 register.
-static bool isX87Reg(unsigned Reg) {
+static bool isX87Reg(Register Reg) {
   return (Reg == X86::FPCW || Reg == X86::FPSW ||
           (Reg >= X86::ST0 && Reg <= X86::ST7));
 }
@@ -4245,12 +4243,12 @@ void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
 }
 
 /// Test if the given register is a physical h register.
-static bool isHReg(unsigned Reg) {
+static bool isHReg(Register Reg) {
   return X86::GR8_ABCD_HRegClass.contains(Reg);
 }
 
 // Try and copy between VR128/VR64 and GR64 registers.
-static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
+static unsigned CopyToFromAsymmetricReg(Register DestReg, Register SrcReg,
                                         const X86Subtarget &Subtarget) {
   bool HasAVX = Subtarget.hasAVX();
   bool HasAVX512 = Subtarget.hasAVX512();
@@ -9006,10 +9004,10 @@ bool X86InstrInfo::isSafeToMoveRegClassDefs(
 ///
 /// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
 ///
-unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
+Register X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
   X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
   Register GlobalBaseReg = X86FI->getGlobalBaseReg();
-  if (GlobalBaseReg != 0)
+  if (GlobalBaseReg)
     return GlobalBaseReg;
 
   // Create the register. The code to initialize it is inserted
@@ -10481,7 +10479,7 @@ struct LDTLSCleanup : public MachineFunctionPass {
 
     MachineDominatorTree *DT =
         &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
-    return VisitNode(DT->getRootNode(), 0);
+    return VisitNode(DT->getRootNode(), Register());
   }
 
   // Visit the dominator subtree rooted at Node in pre-order.
@@ -10489,7 +10487,7 @@ struct LDTLSCleanup : public MachineFunctionPass {
   // TLS_base_addr instructions. Otherwise, create the register
   // when the first such instruction is seen, and then use it
   // as we encounter more instructions.
-  bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
+  bool VisitNode(MachineDomTreeNode *Node, Register TLSBaseAddrReg) {
     MachineBasicBlock *BB = Node->getBlock();
     bool Changed = false;
 
@@ -10521,7 +10519,7 @@ struct LDTLSCleanup : public MachineFunctionPass {
   // Replace the TLS_base_addr instruction I with a copy from
   // TLSBaseAddrReg, returning the new instruction.
   MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I,
-                                       unsigned TLSBaseAddrReg) {
+                                       Register TLSBaseAddrReg) {
     MachineFunction *MF = I.getParent()->getParent();
     const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
     const bool is64Bit = STI.is64Bit();
@@ -10541,7 +10539,7 @@ struct LDTLSCleanup : public MachineFunctionPass {
 
   // Create a virtual register in *TLSBaseAddrReg, and populate it by
   // inserting a copy instruction after I. Returns the new instruction.
-  MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) {
+  MachineInstr *SetRegister(MachineInstr &I, Register *TLSBaseAddrReg) {
     MachineFunction *MF = I.getParent()->getParent();
     const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
     const bool is64Bit = STI.is64Bit();

diff  --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index f21e39f56d2be..403964a305ba5 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -514,7 +514,7 @@ class X86InstrInfo final : public X86GenInstrInfo {
   /// the global base register value. Output instructions required to
   /// initialize the register in the function entry block, if necessary.
   ///
-  unsigned getGlobalBaseReg(MachineFunction *MF) const;
+  Register getGlobalBaseReg(MachineFunction *MF) const;
 
   std::pair<uint16_t, uint16_t>
   getExecutionDomain(const MachineInstr &MI) const override;

diff  --git a/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp b/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
index c9e495c1eba1f..fecfc3fa68469 100644
--- a/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
@@ -171,8 +171,8 @@ class X86LoadValueInjectionLoadHardeningPass : public MachineFunctionPass {
   trimMitigatedEdges(std::unique_ptr<MachineGadgetGraph> Graph) const;
   int insertFences(MachineFunction &MF, MachineGadgetGraph &G,
                    EdgeSet &CutEdges /* in, out */) const;
-  bool instrUsesRegToAccessMemory(const MachineInstr &I, unsigned Reg) const;
-  bool instrUsesRegToBranch(const MachineInstr &I, unsigned Reg) const;
+  bool instrUsesRegToAccessMemory(const MachineInstr &I, Register Reg) const;
+  bool instrUsesRegToBranch(const MachineInstr &I, Register Reg) const;
   inline bool isFence(const MachineInstr *MI) const {
     return MI && (MI->getOpcode() == X86::LFENCE ||
                   (STI->useLVIControlFlowIntegrity() && MI->isCall()));
@@ -763,7 +763,7 @@ int X86LoadValueInjectionLoadHardeningPass::insertFences(
 }
 
 bool X86LoadValueInjectionLoadHardeningPass::instrUsesRegToAccessMemory(
-    const MachineInstr &MI, unsigned Reg) const {
+    const MachineInstr &MI, Register Reg) const {
   if (!MI.mayLoadOrStore() || MI.getOpcode() == X86::MFENCE ||
       MI.getOpcode() == X86::SFENCE || MI.getOpcode() == X86::LFENCE)
     return false;
@@ -780,14 +780,14 @@ bool X86LoadValueInjectionLoadHardeningPass::instrUsesRegToAccessMemory(
       MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
   const MachineOperand &IndexMO =
       MI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
-  return (BaseMO.isReg() && BaseMO.getReg() != X86::NoRegister &&
+  return (BaseMO.isReg() && BaseMO.getReg().isValid() &&
           TRI->regsOverlap(BaseMO.getReg(), Reg)) ||
-         (IndexMO.isReg() && IndexMO.getReg() != X86::NoRegister &&
+         (IndexMO.isReg() && IndexMO.getReg().isValid() &&
           TRI->regsOverlap(IndexMO.getReg(), Reg));
 }
 
 bool X86LoadValueInjectionLoadHardeningPass::instrUsesRegToBranch(
-    const MachineInstr &MI, unsigned Reg) const {
+    const MachineInstr &MI, Register Reg) const {
   if (!MI.isConditionalBranch())
     return false;
   for (const MachineOperand &Use : MI.uses())

diff  --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
index 280eaf04f23c5..8dcd4b8cf7551 100644
--- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -294,8 +294,8 @@ class X86OptimizeLEAPass : public MachineFunctionPass {
   /// Replace debug value MI with a new debug value instruction using register
   /// VReg with an appropriate offset and DIExpression to incorporate the
   /// address displacement AddrDispShift. Return new debug value instruction.
-  MachineInstr *replaceDebugValue(MachineInstr &MI, unsigned OldReg,
-                                  unsigned NewReg, int64_t AddrDispShift);
+  MachineInstr *replaceDebugValue(MachineInstr &MI, Register OldReg,
+                                  Register NewReg, int64_t AddrDispShift);
 
   /// Removes LEAs which calculate similar addresses.
   bool removeRedundantLEAs(MemOpMap &LEAs);
@@ -572,8 +572,8 @@ bool X86OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
 }
 
 MachineInstr *X86OptimizeLEAPass::replaceDebugValue(MachineInstr &MI,
-                                                    unsigned OldReg,
-                                                    unsigned NewReg,
+                                                    Register OldReg,
+                                                    Register NewReg,
                                                     int64_t AddrDispShift) {
   const DIExpression *Expr = MI.getDebugExpression();
   if (AddrDispShift != 0) {

diff  --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index af1060519ae5c..b59d980172458 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -1019,7 +1019,7 @@ Register X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
   return TFI->hasFP(MF) ? FramePtr : StackPtr;
 }
 
-unsigned
+Register
 X86RegisterInfo::getPtrSizedFrameRegister(const MachineFunction &MF) const {
   const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
   Register FrameReg = getFrameRegister(MF);
@@ -1028,7 +1028,7 @@ X86RegisterInfo::getPtrSizedFrameRegister(const MachineFunction &MF) const {
   return FrameReg;
 }
 
-unsigned
+Register
 X86RegisterInfo::getPtrSizedStackRegister(const MachineFunction &MF) const {
   const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
   Register StackReg = getStackRegister();

diff  --git a/llvm/lib/Target/X86/X86RegisterInfo.h b/llvm/lib/Target/X86/X86RegisterInfo.h
index 009d2a8c7ac3a..5b6ac3c5da019 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -155,8 +155,8 @@ class X86RegisterInfo final : public X86GenRegisterInfo {
 
   // Debug information queries.
   Register getFrameRegister(const MachineFunction &MF) const override;
-  unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const;
-  unsigned getPtrSizedStackRegister(const MachineFunction &MF) const;
+  Register getPtrSizedFrameRegister(const MachineFunction &MF) const;
+  Register getPtrSizedStackRegister(const MachineFunction &MF) const;
   Register getStackRegister() const { return StackPtr; }
   Register getBaseRegister() const { return BasePtr; }
   /// Returns physical register used as frame pointer.

diff  --git a/llvm/lib/Target/X86/X86VZeroUpper.cpp b/llvm/lib/Target/X86/X86VZeroUpper.cpp
index c92167f4c8b34..9d119b9455855 100644
--- a/llvm/lib/Target/X86/X86VZeroUpper.cpp
+++ b/llvm/lib/Target/X86/X86VZeroUpper.cpp
@@ -130,7 +130,7 @@ const char* VZeroUpperInserter::getBlockExitStateName(BlockExitState ST) {
 
 /// VZEROUPPER cleans state that is related to Y/ZMM0-15 only.
 /// Thus, there is no need to check for Y/ZMM16 and above.
-static bool isYmmOrZmmReg(unsigned Reg) {
+static bool isYmmOrZmmReg(MCRegister Reg) {
   return (Reg >= X86::YMM0 && Reg <= X86::YMM15) ||
          (Reg >= X86::ZMM0 && Reg <= X86::ZMM15);
 }
@@ -163,7 +163,7 @@ static bool hasYmmOrZmmReg(MachineInstr &MI) {
       continue;
     if (MO.isDebug())
       continue;
-    if (isYmmOrZmmReg(MO.getReg()))
+    if (isYmmOrZmmReg(MO.getReg().asMCReg()))
       return true;
   }
   return false;


        


More information about the llvm-commits mailing list