[llvm] [NVPTX] Change the alloca address space in NVPTXLowerAlloca (PR #154814)

via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 21 11:17:21 PDT 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-debuginfo

Author: Theodoros Theodoridis (thetheodor)

<details>
<summary>Changes</summary>

This patch refactors NVPTXLowerAlloca to produce simpler IR for allocas. Previously, the implementation attached a pair of consecutive address space casts to each alloca: one from addrspace(0) (generic) to addrspace(5) (local), and another immediately back to addrspace(0). Downstream passes needed to recognize this idiom to generate efficient PTX. With this patch, NVPTXLowerAlloca directly changes the address space of each alloca to "local" and inserts a single addrspacecast from local back to generic. The InferAddressSpace pass can then remove the remaining cast. This change results in fewer address-space-change (ctva) instructions in the final PTX.

---

Patch is 57.89 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/154814.diff


22 Files Affected:

- (modified) llvm/include/llvm/Target/TargetMachine.h (+1-1) 
- (modified) llvm/lib/IR/Verifier.cpp (+8) 
- (modified) llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp (+2-1) 
- (modified) llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp (+2-2) 
- (modified) llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp (+1-4) 
- (modified) llvm/lib/Target/NVPTX/NVPTXLowerAlloca.cpp (+54-70) 
- (modified) llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp (+2-1) 
- (modified) llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp (+7-3) 
- (modified) llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp (+24) 
- (modified) llvm/lib/Target/NVPTX/NVPTXTargetMachine.h (+3) 
- (modified) llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll (+3-2) 
- (modified) llvm/test/CodeGen/NVPTX/dynamic-stackalloc-regression.ll (+7-7) 
- (modified) llvm/test/CodeGen/NVPTX/f32x2-instructions.ll (+3-4) 
- (modified) llvm/test/CodeGen/NVPTX/indirect_byval.ll (+10-12) 
- (modified) llvm/test/CodeGen/NVPTX/local-stack-frame.ll (+30-37) 
- (modified) llvm/test/CodeGen/NVPTX/lower-alloca.ll (+11-11) 
- (modified) llvm/test/CodeGen/NVPTX/lower-args-gridconstant.ll (+4-5) 
- (modified) llvm/test/CodeGen/NVPTX/lower-byval-args.ll (+104-127) 
- (modified) llvm/test/CodeGen/NVPTX/vaargs.ll (+11-11) 
- (modified) llvm/test/CodeGen/NVPTX/variadics-backend.ll (+53-56) 
- (modified) llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll (+1-2) 
- (modified) llvm/test/DebugInfo/NVPTX/dbg-value-const-byref.ll (+1-1) 


``````````diff
diff --git a/llvm/include/llvm/Target/TargetMachine.h b/llvm/include/llvm/Target/TargetMachine.h
index bf4e490554723..f4cd06bf93d40 100644
--- a/llvm/include/llvm/Target/TargetMachine.h
+++ b/llvm/include/llvm/Target/TargetMachine.h
@@ -208,7 +208,7 @@ class LLVM_ABI TargetMachine {
   /// The LLVM Module owns a DataLayout that is used for the target independent
   /// optimizations and code generation. This hook provides a target specific
   /// check on the validity of this DataLayout.
-  bool isCompatibleDataLayout(const DataLayout &Candidate) const {
+  virtual bool isCompatibleDataLayout(const DataLayout &Candidate) const {
     return DL == Candidate;
   }
 
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 9d9b51db98702..2afa2bda53b52 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -114,6 +114,7 @@
 #include "llvm/Pass.h"
 #include "llvm/ProfileData/InstrProf.h"
 #include "llvm/Support/AMDGPUAddrSpace.h"
+#include "llvm/Support/NVPTXAddrSpace.h"
 #include "llvm/Support/AtomicOrdering.h"
 #include "llvm/Support/Casting.h"
 #include "llvm/Support/CommandLine.h"
@@ -4498,6 +4499,13 @@ void Verifier::visitAllocaInst(AllocaInst &AI) {
           "alloca on amdgpu must be in addrspace(5)", &AI);
   }
 
+  if (TT.isNVPTX()) {
+    Check(AI.getAddressSpace() == NVPTXAS::ADDRESS_SPACE_LOCAL ||
+              AI.getAddressSpace() == NVPTXAS::ADDRESS_SPACE_GENERIC,
+          "AllocaInst can only be in Generic or Local address space for NVPTX.",
+          &AI);
+  }
+
   visitInstruction(AI);
 }
 
diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index 7391c2d488b57..8528cb41d9244 100644
--- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -1484,7 +1484,8 @@ void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters(
   if (NumBytes) {
     O << "\t.local .align " << MFI.getMaxAlign().value() << " .b8 \t"
       << DEPOTNAME << getFunctionNumber() << "[" << NumBytes << "];\n";
-    if (static_cast<const NVPTXTargetMachine &>(MF.getTarget()).is64Bit()) {
+    if (static_cast<const NVPTXTargetMachine &>(MF.getTarget())
+            .getPointerSize(ADDRESS_SPACE_LOCAL) == 8) {
       O << "\t.reg .b64 \t%SP;\n"
         << "\t.reg .b64 \t%SPL;\n";
     } else {
diff --git a/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp
index 47bc15f52bb96..78f18a93b869b 100644
--- a/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXFrameLowering.cpp
@@ -48,8 +48,8 @@ void NVPTXFrameLowering::emitPrologue(MachineFunction &MF,
     //   mov %SPL, %depot;
     //   cvta.local %SP, %SPL;
     // for local address accesses in MF.
-    bool Is64Bit =
-        static_cast<const NVPTXTargetMachine &>(MF.getTarget()).is64Bit();
+    bool Is64Bit = static_cast<const NVPTXTargetMachine &>(MF.getTarget())
+                       .getPointerSize(NVPTXAS::ADDRESS_SPACE_LOCAL) == 8;
     unsigned CvtaLocalOpcode =
         (Is64Bit ? NVPTX::cvta_local_64 : NVPTX::cvta_local);
     unsigned MovDepotOpcode =
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index bb4bb1195f78b..f35407ed99106 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -1803,10 +1803,7 @@ SDValue NVPTXTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
                   {Chain, DAG.getZExtOrTrunc(Size, DL, LocalVT),
                    DAG.getTargetConstant(Align, DL, MVT::i32)});
 
-  SDValue ASC = DAG.getAddrSpaceCast(
-      DL, Op.getValueType(), Alloc, ADDRESS_SPACE_LOCAL, ADDRESS_SPACE_GENERIC);
-
-  return DAG.getMergeValues({ASC, SDValue(Alloc.getNode(), 1)}, DL);
+  return Alloc;
 }
 
 SDValue NVPTXTargetLowering::LowerSTACKRESTORE(SDValue Op,
diff --git a/llvm/lib/Target/NVPTX/NVPTXLowerAlloca.cpp b/llvm/lib/Target/NVPTX/NVPTXLowerAlloca.cpp
index 88bc000f39bf7..03412edb9e23c 100644
--- a/llvm/lib/Target/NVPTX/NVPTXLowerAlloca.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXLowerAlloca.cpp
@@ -6,16 +6,16 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// For all alloca instructions, and add a pair of cast to local address for
-// each of them. For example,
+// Change the Module's DataLayout to have the local address space for alloca's.
+// Change the address space of each alloca to local and add an addrspacecast to
+// generic address space. For example,
 //
 //   %A = alloca i32
 //   store i32 0, i32* %A ; emits st.u32
 //
 // will be transformed to
 //
-//   %A = alloca i32
-//   %Local = addrspacecast i32* %A to i32 addrspace(5)*
+//   %A = alloca i32, addrspace(5)
 //   %Generic = addrspacecast i32 addrspace(5)* %A to i32*
 //   store i32 0, i32 addrspace(5)* %Generic ; emits st.local.u32
 //
@@ -24,18 +24,24 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "MCTargetDesc/NVPTXBaseInfo.h"
+#include "llvm/Support/NVPTXAddrSpace.h"
 #include "NVPTX.h"
+#include "llvm/ADT/SmallVector.h"
 #include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
 #include "llvm/IR/Type.h"
 #include "llvm/Pass.h"
 
 using namespace llvm;
+using namespace NVPTXAS;
 
 namespace {
 class NVPTXLowerAlloca : public FunctionPass {
   bool runOnFunction(Function &F) override;
+  bool doInitialization(Module &M) override;
 
 public:
   static char ID; // Pass identification, replacement for typeid
@@ -58,77 +64,55 @@ bool NVPTXLowerAlloca::runOnFunction(Function &F) {
   if (skipFunction(F))
     return false;
 
-  bool Changed = false;
+  SmallVector<AllocaInst *, 16> Allocas;
   for (auto &BB : F)
-    for (auto &I : BB) {
-      if (auto allocaInst = dyn_cast<AllocaInst>(&I)) {
-        Changed = true;
+    for (auto &I : BB)
+      if (auto *Alloca = dyn_cast<AllocaInst>(&I))
+        if (Alloca->getAddressSpace() != ADDRESS_SPACE_LOCAL)
+          Allocas.push_back(Alloca);
+
+  if (Allocas.empty())
+    return false;
 
-        PointerType *AllocInstPtrTy =
-            cast<PointerType>(allocaInst->getType()->getScalarType());
-        unsigned AllocAddrSpace = AllocInstPtrTy->getAddressSpace();
-        assert((AllocAddrSpace == ADDRESS_SPACE_GENERIC ||
-                AllocAddrSpace == ADDRESS_SPACE_LOCAL) &&
-               "AllocaInst can only be in Generic or Local address space for "
-               "NVPTX.");
+  for (AllocaInst *Alloca : Allocas) {
+    auto *NewAlloca = new AllocaInst(
+        Alloca->getAllocatedType(), ADDRESS_SPACE_LOCAL, Alloca->getArraySize(),
+        Alloca->getAlign(), Alloca->getName());
+    auto *Cast = new AddrSpaceCastInst(
+        NewAlloca,
+        PointerType::get(Alloca->getAllocatedType()->getContext(),
+                         ADDRESS_SPACE_GENERIC),
+        "");
+    Cast->insertBefore(Alloca->getIterator());
+    NewAlloca->insertBefore(Cast->getIterator());
+    for (auto &U : llvm::make_early_inc_range(Alloca->uses())) {
+      auto *II = dyn_cast<IntrinsicInst>(U.getUser());
+      if (!II || (II->getIntrinsicID() != Intrinsic::lifetime_start &&
+                  II->getIntrinsicID() != Intrinsic::lifetime_end))
+        continue;
 
-        Instruction *AllocaInLocalAS = allocaInst;
-        auto ETy = allocaInst->getAllocatedType();
+      IRBuilder<> Builder(II);
+      Builder.CreateIntrinsic(II->getIntrinsicID(), {NewAlloca->getType()},
+                              {NewAlloca});
+      II->eraseFromParent();
+    }
 
-        // We need to make sure that LLVM has info that alloca needs to go to
-        // ADDRESS_SPACE_LOCAL for InferAddressSpace pass.
-        //
-        // For allocas in ADDRESS_SPACE_LOCAL, we add addrspacecast to
-        // ADDRESS_SPACE_LOCAL and back to ADDRESS_SPACE_GENERIC, so that
-        // the alloca's users still use a generic pointer to operate on.
-        //
-        // For allocas already in ADDRESS_SPACE_LOCAL, we just need
-        // addrspacecast to ADDRESS_SPACE_GENERIC.
-        if (AllocAddrSpace == ADDRESS_SPACE_GENERIC) {
-          auto ASCastToLocalAS = new AddrSpaceCastInst(
-              allocaInst,
-              PointerType::get(ETy->getContext(), ADDRESS_SPACE_LOCAL), "");
-          ASCastToLocalAS->insertAfter(allocaInst->getIterator());
-          AllocaInLocalAS = ASCastToLocalAS;
-        }
+    Alloca->replaceAllUsesWith(Cast);
+    Alloca->eraseFromParent();
+  }
+  return true;
+}
 
-        auto AllocaInGenericAS = new AddrSpaceCastInst(
-            AllocaInLocalAS,
-            PointerType::get(ETy->getContext(), ADDRESS_SPACE_GENERIC), "");
-        AllocaInGenericAS->insertAfter(AllocaInLocalAS->getIterator());
+bool NVPTXLowerAlloca::doInitialization(Module &M) {
+  const auto &DL = M.getDataLayout();
+  if (DL.getAllocaAddrSpace() == ADDRESS_SPACE_LOCAL)
+    return false;
+  auto DLStr = DL.getStringRepresentation();
 
-        for (Use &AllocaUse : llvm::make_early_inc_range(allocaInst->uses())) {
-          // Check Load, Store, GEP, and BitCast Uses on alloca and make them
-          // use the converted generic address, in order to expose non-generic
-          // addrspacecast to NVPTXInferAddressSpaces. For other types
-          // of instructions this is unnecessary and may introduce redundant
-          // address cast.
-          auto LI = dyn_cast<LoadInst>(AllocaUse.getUser());
-          if (LI && LI->getPointerOperand() == allocaInst &&
-              !LI->isVolatile()) {
-            LI->setOperand(LI->getPointerOperandIndex(), AllocaInGenericAS);
-            continue;
-          }
-          auto SI = dyn_cast<StoreInst>(AllocaUse.getUser());
-          if (SI && SI->getPointerOperand() == allocaInst &&
-              !SI->isVolatile()) {
-            SI->setOperand(SI->getPointerOperandIndex(), AllocaInGenericAS);
-            continue;
-          }
-          auto GI = dyn_cast<GetElementPtrInst>(AllocaUse.getUser());
-          if (GI && GI->getPointerOperand() == allocaInst) {
-            GI->setOperand(GI->getPointerOperandIndex(), AllocaInGenericAS);
-            continue;
-          }
-          auto BI = dyn_cast<BitCastInst>(AllocaUse.getUser());
-          if (BI && BI->getOperand(0) == allocaInst) {
-            BI->setOperand(0, AllocaInGenericAS);
-            continue;
-          }
-        }
-      }
-    }
-  return Changed;
+  auto AddrSpaceStr = "A" + std::to_string(ADDRESS_SPACE_LOCAL);
+  assert(!StringRef(DLStr).contains("A") && "DataLayout should not contain A");
+  M.setDataLayout(DLStr.empty() ? AddrSpaceStr : DLStr + "-" + AddrSpaceStr);
+  return true;
 }
 
 FunctionPass *llvm::createNVPTXLowerAllocaPass() {
diff --git a/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp b/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
index e2bbe57c0085c..2b18ca9dd774a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
@@ -529,7 +529,8 @@ void copyByValParam(Function &F, Argument &Arg) {
   // the use of the byval parameter with this alloca instruction.
   AllocA->setAlignment(
       Arg.getParamAlign().value_or(DL.getPrefTypeAlign(StructType)));
-  Arg.replaceAllUsesWith(AllocA);
+  auto *AddressSpaceCast = IRB.CreateAddrSpaceCast(AllocA, Arg.getType(), Arg.getName());
+  Arg.replaceAllUsesWith(AddressSpaceCast);
 
   CallInst *ArgInParam = createNVVMInternalAddrspaceWrap(IRB, Arg);
 
diff --git a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
index 646b554878c70..0c56caeadcffe 100644
--- a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
@@ -119,7 +119,7 @@ bool NVPTXRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
                      MI.getOperand(FIOperandNum + 1).getImm();
 
   // Using I0 as the frame pointer
-  MI.getOperand(FIOperandNum).ChangeToRegister(getFrameRegister(MF), false);
+  MI.getOperand(FIOperandNum).ChangeToRegister(getFrameLocalRegister(MF), false);
   MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
   return false;
 }
@@ -127,14 +127,18 @@ bool NVPTXRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
 Register NVPTXRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
   const NVPTXTargetMachine &TM =
       static_cast<const NVPTXTargetMachine &>(MF.getTarget());
-  return TM.is64Bit() ? NVPTX::VRFrame64 : NVPTX::VRFrame32;
+  return TM.getPointerSize(NVPTXAS::ADDRESS_SPACE_LOCAL) == 8
+             ? NVPTX::VRFrame64
+             : NVPTX::VRFrame32;
 }
 
 Register
 NVPTXRegisterInfo::getFrameLocalRegister(const MachineFunction &MF) const {
   const NVPTXTargetMachine &TM =
       static_cast<const NVPTXTargetMachine &>(MF.getTarget());
-  return TM.is64Bit() ? NVPTX::VRFrameLocal64 : NVPTX::VRFrameLocal32;
+  return TM.getPointerSize(NVPTXAS::ADDRESS_SPACE_LOCAL) == 8
+             ? NVPTX::VRFrameLocal64
+             : NVPTX::VRFrameLocal32;
 }
 
 void NVPTXRegisterInfo::clearDebugRegisterMap() const {
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index 0603994606d71..209dd51a44a61 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -371,6 +371,8 @@ void NVPTXPassConfig::addIRPasses() {
   if (getOptLevel() != CodeGenOptLevel::None) {
     addAddressSpaceInferencePasses();
     addStraightLineScalarOptimizationPasses();
+  } else {
+    addPass(createNVPTXLowerAllocaPass());
   }
 
   addPass(createAtomicExpandLegacyPass());
@@ -502,3 +504,25 @@ void NVPTXPassConfig::addMachineSSAOptimization() {
   addPass(&PeepholeOptimizerLegacyID);
   printAndVerify("After codegen peephole optimization pass");
 }
+
+bool NVPTXTargetMachine::isCompatibleDataLayout(
+    const DataLayout &Candidate) const {
+  //XXX: Should we enforce that the Candidate DataLayout has the same address space for allocas?
+  if (DL == Candidate)
+    return true;
+
+  auto DLStr = DL.getStringRepresentation();
+  if (!StringRef(DLStr).contains("A"))
+    DLStr = DLStr.empty() ? "A" + std::to_string(ADDRESS_SPACE_LOCAL)
+                          : DLStr + "-A" + std::to_string(ADDRESS_SPACE_LOCAL);
+  auto NewDL = DataLayout(DLStr);
+
+  return NewDL == Candidate;
+}
+
+unsigned NVPTXTargetMachine::getAddressSpaceForPseudoSourceKind(unsigned Kind) const {
+  if (Kind == PseudoSourceValue::FixedStack) {
+    return ADDRESS_SPACE_LOCAL;
+  }
+  return CodeGenTargetMachineImpl::getAddressSpaceForPseudoSourceKind(Kind);
+}
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
index 118a01a0352f5..c2f09a89865c4 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
@@ -76,6 +76,9 @@ class NVPTXTargetMachine : public CodeGenTargetMachineImpl {
 
   std::pair<const Value *, unsigned>
   getPredicatedAddrSpace(const Value *V) const override;
+
+  bool isCompatibleDataLayout(const DataLayout &Candidate) const override;
+  unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override;
 }; // NVPTXTargetMachine.
 
 class NVPTXTargetMachine32 : public NVPTXTargetMachine {
diff --git a/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll b/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
index 0eb7f6462f6fa..e825f8ef19949 100644
--- a/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
+++ b/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
@@ -25,9 +25,10 @@ entry:
 
 ; CHECK: ld.param.b64 %rd[[A_REG:[0-9]+]], [kernel_func_param_0]
 ; CHECK: cvta.to.global.u64 %rd[[A1_REG:[0-9]+]], %rd[[A_REG]]
-; CHECK: add.u64 %rd[[SP_REG:[0-9]+]], %SP, 0
+; CHECK: add.u64 %rd[[SP_REG0:[0-9]+]], %SPL, 0
+; CHECK: cvta.local.u64 %rd[[SP_REG:[0-9]+]], %rd[[SP_REG0]];
 ; CHECK: ld.global.b32 %r[[A0_REG:[0-9]+]], [%rd[[A1_REG]]]
-; CHECK: st.local.b32 [{{%rd[0-9]+}}], %r[[A0_REG]]
+; CHECK: st.local.b32 [%SPL], %r[[A0_REG]]
 
   %0 = load float, ptr %a, align 4
   store float %0, ptr %buf, align 4
diff --git a/llvm/test/CodeGen/NVPTX/dynamic-stackalloc-regression.ll b/llvm/test/CodeGen/NVPTX/dynamic-stackalloc-regression.ll
index 0474d82556c1e..34702f1c177c5 100644
--- a/llvm/test/CodeGen/NVPTX/dynamic-stackalloc-regression.ll
+++ b/llvm/test/CodeGen/NVPTX/dynamic-stackalloc-regression.ll
@@ -13,13 +13,13 @@ define void @foo(i64 %a, ptr %p0, ptr %p1) {
 ; CHECK-NEXT:    add.s64 %rd2, %rd1, 7;
 ; CHECK-NEXT:    and.b64 %rd3, %rd2, -8;
 ; CHECK-NEXT:    alloca.u64 %rd4, %rd3, 16;
-; CHECK-NEXT:    cvta.local.u64 %rd5, %rd4;
-; CHECK-NEXT:    ld.param.b64 %rd6, [foo_param_1];
-; CHECK-NEXT:    alloca.u64 %rd7, %rd3, 16;
-; CHECK-NEXT:    cvta.local.u64 %rd8, %rd7;
-; CHECK-NEXT:    ld.param.b64 %rd9, [foo_param_2];
-; CHECK-NEXT:    st.b64 [%rd6], %rd5;
-; CHECK-NEXT:    st.b64 [%rd9], %rd8;
+; CHECK-NEXT:    ld.param.b64 %rd5, [foo_param_1];
+; CHECK-NEXT:    cvta.local.u64 %rd6, %rd4;
+; CHECK-NEXT:    ld.param.b64 %rd7, [foo_param_2];
+; CHECK-NEXT:    alloca.u64 %rd8, %rd3, 16;
+; CHECK-NEXT:    cvta.local.u64 %rd9, %rd8;
+; CHECK-NEXT:    st.b64 [%rd5], %rd6;
+; CHECK-NEXT:    st.b64 [%rd7], %rd9;
 ; CHECK-NEXT:    ret;
   %b = alloca i8, i64 %a, align 16
   %c = alloca i8, i64 %a, align 16
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
index 7ca16f702d8f3..cf00c2c9eaa3f 100644
--- a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
@@ -86,15 +86,14 @@ define float @test_extract_i(<2 x float> %a, i64 %idx) #0 {
 ; CHECK-NOF32X2-EMPTY:
 ; CHECK-NOF32X2-NEXT:  // %bb.0:
 ; CHECK-NOF32X2-NEXT:    mov.b64 %SPL, __local_depot3;
-; CHECK-NOF32X2-NEXT:    cvta.local.u64 %SP, %SPL;
 ; CHECK-NOF32X2-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_extract_i_param_0];
 ; CHECK-NOF32X2-NEXT:    ld.param.b64 %rd1, [test_extract_i_param_1];
-; CHECK-NOF32X2-NEXT:    st.v2.b32 [%SP], {%r1, %r2};
+; CHECK-NOF32X2-NEXT:    st.local.v2.b32 [%SPL], {%r1, %r2};
 ; CHECK-NOF32X2-NEXT:    and.b64 %rd2, %rd1, 1;
 ; CHECK-NOF32X2-NEXT:    shl.b64 %rd3, %rd2, 2;
-; CHECK-NOF32X2-NEXT:    add.u64 %rd4, %SP, 0;
+; CHECK-NOF32X2-NEXT:    add.u64 %rd4, %SPL, 0;
 ; CHECK-NOF32X2-NEXT:    or.b64 %rd5, %rd4, %rd3;
-; CHECK-NOF32X2-NEXT:    ld.b32 %r3, [%rd5];
+; CHECK-NOF32X2-NEXT:    ld.local.b32 %r3, [%rd5];
 ; CHECK-NOF32X2-NEXT:    st.param.b32 [func_retval0], %r3;
 ; CHECK-NOF32X2-NEXT:    ret;
 ;
diff --git a/llvm/test/CodeGen/NVPTX/indirect_byval.ll b/llvm/test/CodeGen/NVPTX/indirect_byval.ll
index 673fb73948268..39813efef9b64 100644
--- a/llvm/test/CodeGen/NVPTX/indirect_byval.ll
+++ b/llvm/test/CodeGen/NVPTX/indirect_byval.ll
@@ -21,19 +21,18 @@ define internal i32 @foo() {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    mov.b64 %SPL, __local_depot0;
-; CHECK-NEXT:    cvta.local.u64 %SP, %SPL;
-; CHECK-NEXT:    ld.global.b64 %rd1, [ptr];
+; CHECK-NEXT:    add.u64 %rd1, %SPL, 0;
+; CHECK-NEXT:    cvta.local.u64 %rd2, %rd1;
+; CHECK-NEXT:    ld.global.b64 %rd3, [ptr];
 ; CHECK-NEXT:    { // callseq 0, 0
 ; CHECK-NEXT:    .param .align 1 .b8 param0[1];
 ; CHECK-NEXT:    .param .b64 param1;
 ; CHECK-NEXT:    .param .b32 retval0;
-; CHECK-NEXT:    add.u64 %rd2, %SP, 0;
 ; CHECK-NEXT:    st.param.b64 [param1], %rd2;
-; CHECK-NEXT:    add.u64 %rd3, %SPL, 1;
-; CHECK-NEXT:    ld.local.b8 %rs1, [%rd3];
+; CHECK-NEXT:    ld.local.b8 %rs1, [%SPL+1];
 ; CHECK-NEXT:    st.param.b8 [param0], %rs1;
 ; CHECK-NEXT:    prototype_0 : .callprototype (.param .b32 _) _ (.param .align 1 .b8 _[1], .param .b64 _);
-; CHECK-NEXT:    call (retval0), %rd1, (param0, param1), prototype_0;
+; CHECK-NEXT:    call (retval0), %rd3, (param0, param1), prototype_0;
 ; CHECK-NEXT:    ld.param.b32 %r1, [retval0];
 ; CHECK-NEXT:    } // callseq 0
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r1;
@@ -58,19 +57,18 @@ define internal i32 @bar() {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0: // %entry
 ; CHECK-NEXT:    mov.b64 %SPL, __local_depot1;
-; CHECK-NEXT:    cvta.local.u64 %SP, %SPL;
-; CHECK-NEXT:    ld.global.b64 %rd1, [ptr];
+; CHECK-NEXT:    add.u64 %rd1, %SPL, 0;
+; CHECK-NEXT:    cvta.local.u64 %rd2, %rd1;
+; CHECK-NEXT:    ld.global.b64 %rd3, [ptr];
 ; CHECK-NEXT:    { // callseq 1, 0
 ; CHECK-NEXT:    .param .align 8 .b8 param0[8];
 ; CHECK-NEXT:    .param ...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/154814


More information about the llvm-commits mailing list