[llvm] [X86] Add MI-layer routine for getting the index of the first address operand (PR #78019)

Nicholas Mosier via llvm-commits llvm-commits at lists.llvm.org
Sat Jan 13 12:06:32 PST 2024


https://github.com/nmosier updated https://github.com/llvm/llvm-project/pull/78019

>From f432e6aac43a4c1a83715fdc128e8657fe5a731b Mon Sep 17 00:00:00 2001
From: Nicholas Mosier <nmosier at stanford.edu>
Date: Sat, 13 Jan 2024 01:14:13 +0000
Subject: [PATCH 1/2] [X86] Add MI-layer routine for getting the index of the
 first address operand

Add the MI-layer routine X86::getFirstAddrOperandIdx(), which returns the index
of the first address operand of a MachineInstr (or -1 if there is none).

X86II::getMemoryOperandNo(), the existing MC-layer routine used to obtain the index of the first address
operand in a 5-operand X86 memory reference, is incomplete:
it does not handle pseudo-instructions like TCRETURNmi, resulting
in missed optimizations (e.g., x86-optimize-LEAs)
and security holes (e.g., x86-slh and x86-lvi-load)
for the passes that use it.

X86::getFirstAddrOperandIdx() handles both pseudo and real instructions
and is thus more suitable for most use cases than X86II::getMemoryOperandNo().
For this reason, this patch replaces most uses of the latter with the former.
---
 .../X86/X86AvoidStoreForwardingBlocks.cpp     |  4 +-
 llvm/lib/Target/X86/X86DiscriminateMemOps.cpp |  2 +-
 llvm/lib/Target/X86/X86DomainReassignment.cpp |  9 +---
 llvm/lib/Target/X86/X86FixupLEAs.cpp          |  4 +-
 llvm/lib/Target/X86/X86InsertPrefetch.cpp     |  6 +--
 llvm/lib/Target/X86/X86InstrInfo.cpp          | 52 +++++++++++++++----
 llvm/lib/Target/X86/X86InstrInfo.h            |  7 +++
 .../X86LoadValueInjectionLoadHardening.cpp    |  5 +-
 llvm/lib/Target/X86/X86OptimizeLEAs.cpp       | 18 ++-----
 .../X86/X86SpeculativeLoadHardening.cpp       | 20 ++-----
 llvm/test/CodeGen/X86/vaargs.ll               |  3 +-
 11 files changed, 66 insertions(+), 64 deletions(-)

diff --git a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
index 04931afdec51c3..86614c79c2e69b 100644
--- a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
+++ b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
@@ -289,10 +289,8 @@ static unsigned getYMMtoXMMStoreOpcode(unsigned StoreOpcode) {
 }
 
 static int getAddrOffset(const MachineInstr *MI) {
-  const MCInstrDesc &Descl = MI->getDesc();
-  int AddrOffset = X86II::getMemoryOperandNo(Descl.TSFlags);
+  const int AddrOffset = X86::getFirstAddrOperandIdx(*MI);
   assert(AddrOffset != -1 && "Expected Memory Operand");
-  AddrOffset += X86II::getOperandBias(Descl);
   return AddrOffset;
 }
 
diff --git a/llvm/lib/Target/X86/X86DiscriminateMemOps.cpp b/llvm/lib/Target/X86/X86DiscriminateMemOps.cpp
index becd221e1e86ac..19a6a6bf0713d7 100644
--- a/llvm/lib/Target/X86/X86DiscriminateMemOps.cpp
+++ b/llvm/lib/Target/X86/X86DiscriminateMemOps.cpp
@@ -129,7 +129,7 @@ bool X86DiscriminateMemOps::runOnMachineFunction(MachineFunction &MF) {
   bool Changed = false;
   for (auto &MBB : MF) {
     for (auto &MI : MBB) {
-      if (X86II::getMemoryOperandNo(MI.getDesc().TSFlags) < 0)
+      if (X86::getFirstAddrOperandIdx(MI) < 0)
         continue;
       if (BypassPrefetchInstructions && IsPrefetchOpcode(MI.getDesc().Opcode))
         continue;
diff --git a/llvm/lib/Target/X86/X86DomainReassignment.cpp b/llvm/lib/Target/X86/X86DomainReassignment.cpp
index 20dbaf797e3272..456bd23b5af197 100644
--- a/llvm/lib/Target/X86/X86DomainReassignment.cpp
+++ b/llvm/lib/Target/X86/X86DomainReassignment.cpp
@@ -524,12 +524,10 @@ static bool usedAsAddr(const MachineInstr &MI, Register Reg,
   if (!MI.mayLoadOrStore())
     return false;
 
-  const MCInstrDesc &Desc = TII->get(MI.getOpcode());
-  int MemOpStart = X86II::getMemoryOperandNo(Desc.TSFlags);
+  const int MemOpStart = X86::getFirstAddrOperandIdx(MI);
   if (MemOpStart == -1)
     return false;
 
-  MemOpStart += X86II::getOperandBias(Desc);
   for (unsigned MemOpIdx = MemOpStart,
                 MemOpEnd = MemOpStart + X86::AddrNumOperands;
        MemOpIdx < MemOpEnd; ++MemOpIdx) {
@@ -559,10 +557,7 @@ void X86DomainReassignment::buildClosure(Closure &C, Register Reg) {
     // Do not add registers which are used in address calculation, they will be
     // added to a different closure.
     int OpEnd = DefMI->getNumOperands();
-    const MCInstrDesc &Desc = DefMI->getDesc();
-    int MemOp = X86II::getMemoryOperandNo(Desc.TSFlags);
-    if (MemOp != -1)
-      MemOp += X86II::getOperandBias(Desc);
+    const int MemOp = X86::getFirstAddrOperandIdx(*DefMI);
     for (int OpIdx = 0; OpIdx < OpEnd; ++OpIdx) {
       if (OpIdx == MemOp) {
         // skip address calculation.
diff --git a/llvm/lib/Target/X86/X86FixupLEAs.cpp b/llvm/lib/Target/X86/X86FixupLEAs.cpp
index beeebf42dfe81a..3c9d4326e8d9ce 100644
--- a/llvm/lib/Target/X86/X86FixupLEAs.cpp
+++ b/llvm/lib/Target/X86/X86FixupLEAs.cpp
@@ -650,10 +650,8 @@ void FixupLEAPass::processInstruction(MachineBasicBlock::iterator &I,
                                       MachineBasicBlock &MBB) {
   // Process a load, store, or LEA instruction.
   MachineInstr &MI = *I;
-  const MCInstrDesc &Desc = MI.getDesc();
-  int AddrOffset = X86II::getMemoryOperandNo(Desc.TSFlags);
+  const int AddrOffset = X86::getFirstAddrOperandIdx(MI);
   if (AddrOffset >= 0) {
-    AddrOffset += X86II::getOperandBias(Desc);
     MachineOperand &p = MI.getOperand(AddrOffset + X86::AddrBaseReg);
     if (p.isReg() && p.getReg() != X86::ESP) {
       seekLEAFixup(p, I, MBB);
diff --git a/llvm/lib/Target/X86/X86InsertPrefetch.cpp b/llvm/lib/Target/X86/X86InsertPrefetch.cpp
index 9aa70dff5f9327..00ee7d465210fe 100644
--- a/llvm/lib/Target/X86/X86InsertPrefetch.cpp
+++ b/llvm/lib/Target/X86/X86InsertPrefetch.cpp
@@ -200,11 +200,9 @@ bool X86InsertPrefetch::runOnMachineFunction(MachineFunction &MF) {
       auto Current = MI;
       ++MI;
 
-      int Offset = X86II::getMemoryOperandNo(Current->getDesc().TSFlags);
-      if (Offset < 0)
+      const int MemOpOffset = X86::getFirstAddrOperandIdx(*Current);
+      if (MemOpOffset < 0)
         continue;
-      unsigned Bias = X86II::getOperandBias(Current->getDesc());
-      int MemOpOffset = Offset + Bias;
       // FIXME(mtrofin): ORE message when the recommendation cannot be taken.
       if (!IsMemOpCompatibleWithPrefetch(*Current, MemOpOffset))
         continue;
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index bddda6891356e8..acfec1a3072742 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -3463,6 +3463,44 @@ bool X86::isX87Instruction(MachineInstr &MI) {
   return false;
 }
 
+int X86::getFirstAddrOperandIdx(const MachineInstr &MI) {
+  const auto isMemOp = [](const MCOperandInfo &OpInfo) -> bool {
+    return OpInfo.OperandType == MCOI::OPERAND_MEMORY;
+  };
+
+  const MCInstrDesc &Desc = MI.getDesc();
+
+  // An instruction cannot have a memory reference if it has fewer than
+  // AddrNumOperands (= 5) explicit operands.
+  if (Desc.getNumOperands() < X86::AddrNumOperands) {
+    assert(none_of(Desc.operands(), isMemOp) &&
+           "Expected no operands to have OPERAND_MEMORY type!");
+    return -1;
+  }
+
+  // The first operand with type OPERAND_MEMORY indicates the start of a memory
+  // reference. We expect the following AddrNumOperand-1 operands to also have
+  // OPERAND_MEMORY type.
+  for (unsigned i = 0; i <= Desc.getNumOperands() - X86::AddrNumOperands; ++i) {
+    if (Desc.operands()[i].OperandType == MCOI::OPERAND_MEMORY) {
+      assert(std::all_of(Desc.operands().begin() + i,
+                         Desc.operands().begin() + i + X86::AddrNumOperands,
+                         isMemOp) &&
+             "Expected all five operands in the memory reference to have "
+             "OPERAND_MEMORY type!");
+      return i;
+    }
+  }
+
+  // Fall back to the MC-layer routine, which only handles real (not pseudo)
+  // insturctions.
+  const int FallbackMemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
+  if (FallbackMemOpNo >= 0)
+    return FallbackMemOpNo + X86II::getOperandBias(Desc);
+
+  return -1;
+}
+
 bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const {
   switch (MI.getOpcode()) {
   case X86::TCRETURNdi:
@@ -3723,10 +3761,8 @@ bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
 }
 
 static int getJumpTableIndexFromAddr(const MachineInstr &MI) {
-  const MCInstrDesc &Desc = MI.getDesc();
-  int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
+  const int MemRefBegin = X86::getFirstAddrOperandIdx(MI);
   assert(MemRefBegin >= 0 && "instr should have memory operand");
-  MemRefBegin += X86II::getOperandBias(Desc);
 
   const MachineOperand &MO = MI.getOperand(MemRefBegin + X86::AddrDisp);
   if (!MO.isJTI())
@@ -4321,13 +4357,10 @@ static unsigned getLoadStoreRegOpcode(Register Reg,
 std::optional<ExtAddrMode>
 X86InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
                                       const TargetRegisterInfo *TRI) const {
-  const MCInstrDesc &Desc = MemI.getDesc();
-  int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
+  const int MemRefBegin = X86::getFirstAddrOperandIdx(MemI);
   if (MemRefBegin < 0)
     return std::nullopt;
 
-  MemRefBegin += X86II::getOperandBias(Desc);
-
   auto &BaseOp = MemI.getOperand(MemRefBegin + X86::AddrBaseReg);
   if (!BaseOp.isReg()) // Can be an MO_FrameIndex
     return std::nullopt;
@@ -4448,13 +4481,10 @@ bool X86InstrInfo::getMemOperandsWithOffsetWidth(
     const MachineInstr &MemOp, SmallVectorImpl<const MachineOperand *> &BaseOps,
     int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
     const TargetRegisterInfo *TRI) const {
-  const MCInstrDesc &Desc = MemOp.getDesc();
-  int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
+  const int MemRefBegin = X86::getFirstAddrOperandIdx(MemOp);
   if (MemRefBegin < 0)
     return false;
 
-  MemRefBegin += X86II::getOperandBias(Desc);
-
   const MachineOperand *BaseOp =
       &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
   if (!BaseOp->isReg()) // Can be an MO_FrameIndex
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index eb0734f9a61824..75d20b3ef9eb5c 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -77,6 +77,13 @@ unsigned getSwappedVCMPImm(unsigned Imm);
 
 /// Check if the instruction is X87 instruction.
 bool isX87Instruction(MachineInstr &MI);
+
+/// Return the index of the instruction's first address operand, if it has a
+/// memory reference, or -1 if it has none. Unlike X86II::getMemoryOperandNo(),
+/// this also works for both pseudo instructions (e.g., TCRETURNmi) as well as
+/// real instructions (e.g., JMP64m).
+int getFirstAddrOperandIdx(const MachineInstr &MI);
+
 } // namespace X86
 
 /// isGlobalStubReference - Return true if the specified TargetFlag operand is
diff --git a/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp b/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
index 2e492fa9c5eead..4dfe7556df0030 100644
--- a/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
@@ -770,16 +770,13 @@ bool X86LoadValueInjectionLoadHardeningPass::instrUsesRegToAccessMemory(
       MI.getOpcode() == X86::SFENCE || MI.getOpcode() == X86::LFENCE)
     return false;
 
-  // FIXME: This does not handle pseudo loading instruction like TCRETURN*
-  const MCInstrDesc &Desc = MI.getDesc();
-  int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
+  const int MemRefBeginIdx = X86::getFirstAddrOperandIdx(MI);
   if (MemRefBeginIdx < 0) {
     LLVM_DEBUG(dbgs() << "Warning: unable to obtain memory operand for loading "
                          "instruction:\n";
                MI.print(dbgs()); dbgs() << '\n';);
     return false;
   }
-  MemRefBeginIdx += X86II::getOperandBias(Desc);
 
   const MachineOperand &BaseMO =
       MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
diff --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
index 3172896a8f6092..819339923ded63 100644
--- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -341,8 +341,7 @@ bool X86OptimizeLEAPass::chooseBestLEA(
     MachineInstr *&BestLEA, int64_t &AddrDispShift, int &Dist) {
   const MachineFunction *MF = MI.getParent()->getParent();
   const MCInstrDesc &Desc = MI.getDesc();
-  int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags) +
-                X86II::getOperandBias(Desc);
+  const int MemOpNo = X86::getFirstAddrOperandIdx(MI);
 
   BestLEA = nullptr;
 
@@ -443,16 +442,13 @@ bool X86OptimizeLEAPass::isReplaceable(const MachineInstr &First,
     MachineInstr &MI = *MO.getParent();
 
     // Get the number of the first memory operand.
-    const MCInstrDesc &Desc = MI.getDesc();
-    int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
+    const int MemOpNo = X86::getFirstAddrOperandIdx(MI);
 
     // If the use instruction has no memory operand - the LEA is not
     // replaceable.
     if (MemOpNo < 0)
       return false;
 
-    MemOpNo += X86II::getOperandBias(Desc);
-
     // If the address base of the use instruction is not the LEA def register -
     // the LEA is not replaceable.
     if (!isIdenticalOp(MI.getOperand(MemOpNo + X86::AddrBaseReg), MO))
@@ -507,15 +503,12 @@ bool X86OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
       continue;
 
     // Get the number of the first memory operand.
-    const MCInstrDesc &Desc = MI.getDesc();
-    int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
+    const int MemOpNo = X86::getFirstAddrOperandIdx(MI);
 
     // If instruction has no memory operand - skip it.
     if (MemOpNo < 0)
       continue;
 
-    MemOpNo += X86II::getOperandBias(Desc);
-
     // Do not call chooseBestLEA if there was no matching LEA
     auto Insns = LEAs.find(getMemOpKey(MI, MemOpNo));
     if (Insns == LEAs.end())
@@ -668,10 +661,7 @@ bool X86OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) {
           }
 
           // Get the number of the first memory operand.
-          const MCInstrDesc &Desc = MI.getDesc();
-          int MemOpNo =
-              X86II::getMemoryOperandNo(Desc.TSFlags) +
-              X86II::getOperandBias(Desc);
+          const int MemOpNo = X86::getFirstAddrOperandIdx(MI);
 
           // Update address base.
           MO.setReg(FirstVReg);
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 6301285fe95457..ce03432fd8cc23 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -1317,12 +1317,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
           continue;
 
         // Extract the memory operand information about this instruction.
-        // FIXME: This doesn't handle loading pseudo instructions which we often
-        // could handle with similarly generic logic. We probably need to add an
-        // MI-layer routine similar to the MC-layer one we use here which maps
-        // pseudos much like this maps real instructions.
-        const MCInstrDesc &Desc = MI.getDesc();
-        int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
+        const int MemRefBeginIdx = X86::getFirstAddrOperandIdx(MI);
         if (MemRefBeginIdx < 0) {
           LLVM_DEBUG(dbgs()
                          << "WARNING: unable to harden loading instruction: ";
@@ -1330,8 +1325,6 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
           continue;
         }
 
-        MemRefBeginIdx += X86II::getOperandBias(Desc);
-
         MachineOperand &BaseMO =
             MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
         MachineOperand &IndexMO =
@@ -1365,7 +1358,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
         // could prune out subsequent loads.
         if (EnablePostLoadHardening && X86InstrInfo::isDataInvariantLoad(MI) &&
             !isEFLAGSDefLive(MI) && MI.getDesc().getNumDefs() == 1 &&
-            MI.getOperand(0).isReg() &&
+            MI.getOperand(0).isReg() && MI.getOperand(0).getReg().isVirtual() &&
             canHardenRegister(MI.getOperand(0).getReg()) &&
             !HardenedAddrRegs.count(BaseReg) &&
             !HardenedAddrRegs.count(IndexReg)) {
@@ -1400,12 +1393,9 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
 
         // Check if this is a load whose address needs to be hardened.
         if (HardenLoadAddr.erase(&MI)) {
-          const MCInstrDesc &Desc = MI.getDesc();
-          int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
+          const int MemRefBeginIdx = X86::getFirstAddrOperandIdx(MI);
           assert(MemRefBeginIdx >= 0 && "Cannot have an invalid index here!");
 
-          MemRefBeginIdx += X86II::getOperandBias(Desc);
-
           MachineOperand &BaseMO =
               MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
           MachineOperand &IndexMO =
@@ -1802,11 +1792,9 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
 
         // Otherwise, this is a load and the load component can't be data
         // invariant so check how this register is being used.
-        const MCInstrDesc &Desc = UseMI.getDesc();
-        int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
+        const int MemRefBeginIdx = X86::getFirstAddrOperandIdx(UseMI);
         assert(MemRefBeginIdx >= 0 &&
                "Should always have mem references here!");
-        MemRefBeginIdx += X86II::getOperandBias(Desc);
 
         MachineOperand &BaseMO =
             UseMI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
diff --git a/llvm/test/CodeGen/X86/vaargs.ll b/llvm/test/CodeGen/X86/vaargs.ll
index dc1c5d97716ba9..60c1070984f3b1 100644
--- a/llvm/test/CodeGen/X86/vaargs.ll
+++ b/llvm/test/CodeGen/X86/vaargs.ll
@@ -1,4 +1,5 @@
-; RUN: llc -verify-machineinstrs -mcpu=corei7-avx %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=NO-FLAGS
+; RUN: llc -verify-machineinstrs -mcpu=corei7-avx -disable-x86-lea-opt %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=NO-FLAGS
+; Disable the x86-optimize-LEAs pass, since it may introduce an intermediate LEA instruction that changes the base register of the vmovaps instructions from %rsp to something else.
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.9.0"
 

>From cf6b1d34faea3d6ebe6324b5656e24c6c8cec145 Mon Sep 17 00:00:00 2001
From: Nicholas Mosier <nmosier at stanford.edu>
Date: Sat, 13 Jan 2024 20:04:13 +0000
Subject: [PATCH 2/2] Address comments

Revert X86DiscriminateMemOps, X86OptimizeLEAs.
Remove extraneous change in X86SpeculativeLoadHardening.
Revert vaargs.ll test, since X86OptimizeLEAs was reverted.
---
 llvm/lib/Target/X86/X86DiscriminateMemOps.cpp  |  2 +-
 llvm/lib/Target/X86/X86OptimizeLEAs.cpp        | 18 ++++++++++++++----
 .../Target/X86/X86SpeculativeLoadHardening.cpp |  2 +-
 llvm/test/CodeGen/X86/vaargs.ll                |  3 +--
 4 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/X86/X86DiscriminateMemOps.cpp b/llvm/lib/Target/X86/X86DiscriminateMemOps.cpp
index 19a6a6bf0713d7..becd221e1e86ac 100644
--- a/llvm/lib/Target/X86/X86DiscriminateMemOps.cpp
+++ b/llvm/lib/Target/X86/X86DiscriminateMemOps.cpp
@@ -129,7 +129,7 @@ bool X86DiscriminateMemOps::runOnMachineFunction(MachineFunction &MF) {
   bool Changed = false;
   for (auto &MBB : MF) {
     for (auto &MI : MBB) {
-      if (X86::getFirstAddrOperandIdx(MI) < 0)
+      if (X86II::getMemoryOperandNo(MI.getDesc().TSFlags) < 0)
         continue;
       if (BypassPrefetchInstructions && IsPrefetchOpcode(MI.getDesc().Opcode))
         continue;
diff --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
index 819339923ded63..3172896a8f6092 100644
--- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -341,7 +341,8 @@ bool X86OptimizeLEAPass::chooseBestLEA(
     MachineInstr *&BestLEA, int64_t &AddrDispShift, int &Dist) {
   const MachineFunction *MF = MI.getParent()->getParent();
   const MCInstrDesc &Desc = MI.getDesc();
-  const int MemOpNo = X86::getFirstAddrOperandIdx(MI);
+  int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags) +
+                X86II::getOperandBias(Desc);
 
   BestLEA = nullptr;
 
@@ -442,13 +443,16 @@ bool X86OptimizeLEAPass::isReplaceable(const MachineInstr &First,
     MachineInstr &MI = *MO.getParent();
 
     // Get the number of the first memory operand.
-    const int MemOpNo = X86::getFirstAddrOperandIdx(MI);
+    const MCInstrDesc &Desc = MI.getDesc();
+    int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
 
     // If the use instruction has no memory operand - the LEA is not
     // replaceable.
     if (MemOpNo < 0)
       return false;
 
+    MemOpNo += X86II::getOperandBias(Desc);
+
     // If the address base of the use instruction is not the LEA def register -
     // the LEA is not replaceable.
     if (!isIdenticalOp(MI.getOperand(MemOpNo + X86::AddrBaseReg), MO))
@@ -503,12 +507,15 @@ bool X86OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
       continue;
 
     // Get the number of the first memory operand.
-    const int MemOpNo = X86::getFirstAddrOperandIdx(MI);
+    const MCInstrDesc &Desc = MI.getDesc();
+    int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
 
     // If instruction has no memory operand - skip it.
     if (MemOpNo < 0)
       continue;
 
+    MemOpNo += X86II::getOperandBias(Desc);
+
     // Do not call chooseBestLEA if there was no matching LEA
     auto Insns = LEAs.find(getMemOpKey(MI, MemOpNo));
     if (Insns == LEAs.end())
@@ -661,7 +668,10 @@ bool X86OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) {
           }
 
           // Get the number of the first memory operand.
-          const int MemOpNo = X86::getFirstAddrOperandIdx(MI);
+          const MCInstrDesc &Desc = MI.getDesc();
+          int MemOpNo =
+              X86II::getMemoryOperandNo(Desc.TSFlags) +
+              X86II::getOperandBias(Desc);
 
           // Update address base.
           MO.setReg(FirstVReg);
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index ce03432fd8cc23..4792e3388ae78e 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -1358,7 +1358,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
         // could prune out subsequent loads.
         if (EnablePostLoadHardening && X86InstrInfo::isDataInvariantLoad(MI) &&
             !isEFLAGSDefLive(MI) && MI.getDesc().getNumDefs() == 1 &&
-            MI.getOperand(0).isReg() && MI.getOperand(0).getReg().isVirtual() &&
+            MI.getOperand(0).isReg() &&
             canHardenRegister(MI.getOperand(0).getReg()) &&
             !HardenedAddrRegs.count(BaseReg) &&
             !HardenedAddrRegs.count(IndexReg)) {
diff --git a/llvm/test/CodeGen/X86/vaargs.ll b/llvm/test/CodeGen/X86/vaargs.ll
index 60c1070984f3b1..dc1c5d97716ba9 100644
--- a/llvm/test/CodeGen/X86/vaargs.ll
+++ b/llvm/test/CodeGen/X86/vaargs.ll
@@ -1,5 +1,4 @@
-; RUN: llc -verify-machineinstrs -mcpu=corei7-avx -disable-x86-lea-opt %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=NO-FLAGS
-; Disable the x86-optimize-LEAs pass, since it may introduce an intermediate LEA instruction that changes the base register of the vmovaps instructions from %rsp to something else.
+; RUN: llc -verify-machineinstrs -mcpu=corei7-avx %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=NO-FLAGS
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.9.0"
 



More information about the llvm-commits mailing list