[llvm] 7a25c72 - [X86] Use Register in X86SpeculativeLoadHardening.cpp. NFC (#130905)

via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 12 08:23:25 PDT 2025


Author: Craig Topper
Date: 2025-03-12T08:23:22-07:00
New Revision: 7a25c725abc52284eeab5529ef6264dc0a57277c

URL: https://github.com/llvm/llvm-project/commit/7a25c725abc52284eeab5529ef6264dc0a57277c
DIFF: https://github.com/llvm/llvm-project/commit/7a25c725abc52284eeab5529ef6264dc0a57277c.diff

LOG: [X86] Use Register in X86SpeculativeLoadHardening.cpp. NFC (#130905)

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 0ad17cc877b9f..459cfdc984f16 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -146,8 +146,8 @@ class X86SpeculativeLoadHardeningPass : public MachineFunctionPass {
 
   /// Manages the predicate state traced through the program.
   struct PredState {
-    unsigned InitialReg = 0;
-    unsigned PoisonReg = 0;
+    Register InitialReg;
+    Register PoisonReg;
 
     const TargetRegisterClass *RC;
     MachineSSAUpdater SSA;
@@ -177,7 +177,7 @@ class X86SpeculativeLoadHardeningPass : public MachineFunctionPass {
 
   void tracePredStateThroughBlocksAndHarden(MachineFunction &MF);
 
-  unsigned saveEFLAGS(MachineBasicBlock &MBB,
+  Register saveEFLAGS(MachineBasicBlock &MBB,
                       MachineBasicBlock::iterator InsertPt,
                       const DebugLoc &Loc);
   void restoreEFLAGS(MachineBasicBlock &MBB,
@@ -186,28 +186,28 @@ class X86SpeculativeLoadHardeningPass : public MachineFunctionPass {
 
   void mergePredStateIntoSP(MachineBasicBlock &MBB,
                             MachineBasicBlock::iterator InsertPt,
-                            const DebugLoc &Loc, unsigned PredStateReg);
-  unsigned extractPredStateFromSP(MachineBasicBlock &MBB,
+                            const DebugLoc &Loc, Register PredStateReg);
+  Register extractPredStateFromSP(MachineBasicBlock &MBB,
                                   MachineBasicBlock::iterator InsertPt,
                                   const DebugLoc &Loc);
 
   void
   hardenLoadAddr(MachineInstr &MI, MachineOperand &BaseMO,
                  MachineOperand &IndexMO,
-                 SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg);
+                 SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg);
   MachineInstr *
   sinkPostLoadHardenedInst(MachineInstr &MI,
                            SmallPtrSetImpl<MachineInstr *> &HardenedInstrs);
   bool canHardenRegister(Register Reg);
-  unsigned hardenValueInRegister(Register Reg, MachineBasicBlock &MBB,
+  Register hardenValueInRegister(Register Reg, MachineBasicBlock &MBB,
                                  MachineBasicBlock::iterator InsertPt,
                                  const DebugLoc &Loc);
-  unsigned hardenPostLoad(MachineInstr &MI);
+  Register hardenPostLoad(MachineInstr &MI);
   void hardenReturnInstr(MachineInstr &MI);
   void tracePredStateThroughCall(MachineInstr &MI);
   void hardenIndirectCallOrJumpInstr(
       MachineInstr &MI,
-      SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg);
+      SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg);
 };
 
 } // end anonymous namespace
@@ -743,7 +743,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughCFG(
 
           // We will wire each cmov to each other, but need to start with the
           // incoming pred state.
-          unsigned CurStateReg = PS->InitialReg;
+          Register CurStateReg = PS->InitialReg;
 
           for (X86::CondCode Cond : Conds) {
             int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
@@ -986,7 +986,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches(
       // No terminator or non-branch terminator.
       continue;
 
-    unsigned TargetReg;
+    Register TargetReg;
 
     switch (TI.getOpcode()) {
     default:
@@ -1265,9 +1265,9 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
   SmallPtrSet<MachineInstr *, 16> HardenPostLoad;
   SmallPtrSet<MachineInstr *, 16> HardenLoadAddr;
 
-  SmallSet<unsigned, 16> HardenedAddrRegs;
+  SmallSet<Register, 16> HardenedAddrRegs;
 
-  SmallDenseMap<unsigned, unsigned, 32> AddrRegToHardenedReg;
+  SmallDenseMap<Register, Register, 32> AddrRegToHardenedReg;
 
   // Track the set of load-dependent registers through the basic block. Because
   // the values of these registers have an existing data dependency on a loaded
@@ -1297,11 +1297,11 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
         // FIXME: Do a more careful analysis of x86 to build a conservative
         // model here.
         if (llvm::any_of(MI.uses(), [&](MachineOperand &Op) {
-              return Op.isReg() && LoadDepRegs.test(Op.getReg());
+              return Op.isReg() && LoadDepRegs.test(Op.getReg().id());
             }))
           for (MachineOperand &Def : MI.defs())
             if (Def.isReg())
-              LoadDepRegs.set(Def.getReg());
+              LoadDepRegs.set(Def.getReg().id());
 
         // Both Intel and AMD are guiding that they will change the semantics of
         // LFENCE to be a speculation barrier, so if we see an LFENCE, there is
@@ -1333,11 +1333,11 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
 
         // If we have at least one (non-frame-index, non-RIP) register operand,
         // and neither operand is load-dependent, we need to check the load.
-        unsigned BaseReg = 0, IndexReg = 0;
+        Register BaseReg, IndexReg;
         if (!BaseMO.isFI() && BaseMO.getReg() != X86::RIP &&
-            BaseMO.getReg() != X86::NoRegister)
+            BaseMO.getReg().isValid())
           BaseReg = BaseMO.getReg();
-        if (IndexMO.getReg() != X86::NoRegister)
+        if (IndexMO.getReg().isValid())
           IndexReg = IndexMO.getReg();
 
         if (!BaseReg && !IndexReg)
@@ -1348,8 +1348,8 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
         // needn't check it.
         // FIXME: Is this true in the case where we are hardening loads after
         // they complete? Unclear, need to investigate.
-        if ((BaseReg && LoadDepRegs.test(BaseReg)) ||
-            (IndexReg && LoadDepRegs.test(IndexReg)))
+        if ((BaseReg && LoadDepRegs.test(BaseReg.id())) ||
+            (IndexReg && LoadDepRegs.test(IndexReg.id())))
           continue;
 
         // If post-load hardening is enabled, this load is compatible with
@@ -1378,7 +1378,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
 
         for (MachineOperand &Def : MI.defs())
           if (Def.isReg())
-            LoadDepRegs.set(Def.getReg());
+            LoadDepRegs.set(Def.getReg().id());
       }
 
     // Now re-walk the instructions in the basic block, and apply whichever
@@ -1433,7 +1433,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
             }
           }
 
-          unsigned HardenedReg = hardenPostLoad(MI);
+          Register HardenedReg = hardenPostLoad(MI);
 
           // Mark the resulting hardened register as such so we don't re-harden.
           AddrRegToHardenedReg[HardenedReg] = HardenedReg;
@@ -1489,7 +1489,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
 /// Note that LLVM can only lower very simple patterns of saved and restored
 /// EFLAGS registers. The restore should always be within the same basic block
 /// as the save so that no PHI nodes are inserted.
-unsigned X86SpeculativeLoadHardeningPass::saveEFLAGS(
+Register X86SpeculativeLoadHardeningPass::saveEFLAGS(
     MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
     const DebugLoc &Loc) {
   // FIXME: Hard coding this to a 32-bit register class seems weird, but matches
@@ -1520,7 +1520,7 @@ void X86SpeculativeLoadHardeningPass::restoreEFLAGS(
 /// across normal stack adjustments.
 void X86SpeculativeLoadHardeningPass::mergePredStateIntoSP(
     MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
-    const DebugLoc &Loc, unsigned PredStateReg) {
+    const DebugLoc &Loc, Register PredStateReg) {
   Register TmpReg = MRI->createVirtualRegister(PS->RC);
   // FIXME: This hard codes a shift distance based on the number of bits needed
   // to stay canonical on 64-bit. We should compute this somehow and support
@@ -1538,7 +1538,7 @@ void X86SpeculativeLoadHardeningPass::mergePredStateIntoSP(
 }
 
 /// Extracts the predicate state stored in the high bits of the stack pointer.
-unsigned X86SpeculativeLoadHardeningPass::extractPredStateFromSP(
+Register X86SpeculativeLoadHardeningPass::extractPredStateFromSP(
     MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
     const DebugLoc &Loc) {
   Register PredStateReg = MRI->createVirtualRegister(PS->RC);
@@ -1561,7 +1561,7 @@ unsigned X86SpeculativeLoadHardeningPass::extractPredStateFromSP(
 
 void X86SpeculativeLoadHardeningPass::hardenLoadAddr(
     MachineInstr &MI, MachineOperand &BaseMO, MachineOperand &IndexMO,
-    SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg) {
+    SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg) {
   MachineBasicBlock &MBB = *MI.getParent();
   const DebugLoc &Loc = MI.getDebugLoc();
 
@@ -1640,7 +1640,7 @@ void X86SpeculativeLoadHardeningPass::hardenLoadAddr(
   // If EFLAGS are live and we don't have access to instructions that avoid
   // clobbering EFLAGS we need to save and restore them. This in turn makes
   // the EFLAGS no longer live.
-  unsigned FlagsReg = 0;
+  Register FlagsReg;
   if (EFLAGSLive && !Subtarget->hasBMI2()) {
     EFLAGSLive = false;
     FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
@@ -1898,7 +1898,7 @@ bool X86SpeculativeLoadHardeningPass::canHardenRegister(Register Reg) {
 ///
 /// The new, hardened virtual register is returned. It will have the same
 /// register class as `Reg`.
-unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
+Register X86SpeculativeLoadHardeningPass::hardenValueInRegister(
     Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
     const DebugLoc &Loc) {
   assert(canHardenRegister(Reg) && "Cannot harden this register!");
@@ -1919,7 +1919,7 @@ unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
     StateReg = NarrowStateReg;
   }
 
-  unsigned FlagsReg = 0;
+  Register FlagsReg;
   if (isEFLAGSLive(MBB, InsertPt, *TRI))
     FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
 
@@ -1948,7 +1948,7 @@ unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
 /// execution and coercing them to one is sufficient.
 ///
 /// Returns the newly hardened register.
-unsigned X86SpeculativeLoadHardeningPass::hardenPostLoad(MachineInstr &MI) {
+Register X86SpeculativeLoadHardeningPass::hardenPostLoad(MachineInstr &MI) {
   MachineBasicBlock &MBB = *MI.getParent();
   const DebugLoc &Loc = MI.getDebugLoc();
 
@@ -1965,7 +1965,7 @@ unsigned X86SpeculativeLoadHardeningPass::hardenPostLoad(MachineInstr &MI) {
   // Now harden this register's value, getting a hardened reg that is safe to
   // use. Note that we insert the instructions to compute this *after* the
   // defining instruction, not before it.
-  unsigned HardenedReg = hardenValueInRegister(
+  Register HardenedReg = hardenValueInRegister(
       UnhardenedReg, MBB, std::next(MI.getIterator()), Loc);
 
   // Finally, replace the old register (which now only has the uses of the
@@ -2089,7 +2089,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
   MI.setPostInstrSymbol(MF, RetSymbol);
 
   const TargetRegisterClass *AddrRC = &X86::GR64RegClass;
-  unsigned ExpectedRetAddrReg = 0;
+  Register ExpectedRetAddrReg;
 
   // If we have no red zones or if the function returns twice (possibly without
   // using the `ret` instruction) like setjmp, we need to save the expected
@@ -2148,7 +2148,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
   }
 
   // Now we extract the callee's predicate state from the stack pointer.
-  unsigned NewStateReg = extractPredStateFromSP(MBB, InsertPt, Loc);
+  Register NewStateReg = extractPredStateFromSP(MBB, InsertPt, Loc);
 
   // Test the expected return address against our actual address. If we can
   // form this basic block's address as an immediate, this is easy. Otherwise
@@ -2207,7 +2207,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
 /// not already flagged and add them.
 void X86SpeculativeLoadHardeningPass::hardenIndirectCallOrJumpInstr(
     MachineInstr &MI,
-    SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg) {
+    SmallDenseMap<Register, Register, 32> &AddrRegToHardenedReg) {
   switch (MI.getOpcode()) {
   case X86::FARCALL16m:
   case X86::FARCALL32m:
@@ -2240,7 +2240,7 @@ void X86SpeculativeLoadHardeningPass::hardenIndirectCallOrJumpInstr(
   // Try to lookup a hardened version of this register. We retain a reference
   // here as we want to update the map to track any newly computed hardened
   // register.
-  unsigned &HardenedTargetReg = AddrRegToHardenedReg[OldTargetReg];
+  Register &HardenedTargetReg = AddrRegToHardenedReg[OldTargetReg];
 
   // If we don't have a hardened register yet, compute one. Otherwise, just use
   // the already hardened register.


        


More information about the llvm-commits mailing list