[llvm] 8bbef3d - [InstCombine] Iterative replacement in PtrReplacer (#137215)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 16 09:46:57 PDT 2025
Author: Anshil Gandhi
Date: 2025-06-16T12:46:54-04:00
New Revision: 8bbef3d1c9115b3c64365e9b8e4ee84275a4d001
URL: https://github.com/llvm/llvm-project/commit/8bbef3d1c9115b3c64365e9b8e4ee84275a4d001
DIFF: https://github.com/llvm/llvm-project/commit/8bbef3d1c9115b3c64365e9b8e4ee84275a4d001.diff
LOG: [InstCombine] Iterative replacement in PtrReplacer (#137215)
This patch enhances the PtrReplacer as follows:
1. Users are now collected iteratively to be generous on the stack. In
the case of PHIs with incoming values which have not yet been visited,
they are pushed back into the stack for reconsideration.
2. Replace users of the pointer root in a reverse-postorder traversal,
instead of a simple traversal over the collected users. This reordering
ensures that the operands of an instruction are replaced before
replacing the instruction itself.
3. During the replacement of PHI, use the same incoming value if it does
not have a replacement.
This patch specifically fixes the case when an incoming value of a PHI
is addrspacecasted.
Added:
llvm/test/Transforms/InstCombine/AMDGPU/ptr-replace-alloca.ll
Modified:
llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index a9751ab03e20e..9aec90120d8b0 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -243,11 +243,10 @@ class PointerReplacer {
void replacePointer(Value *V);
private:
- bool collectUsersRecursive(Instruction &I);
void replace(Instruction *I);
- Value *getReplacement(Value *I);
+ Value *getReplacement(Value *V) const { return WorkMap.lookup(V); }
bool isAvailable(Instruction *I) const {
- return I == &Root || Worklist.contains(I);
+ return I == &Root || UsersToReplace.contains(I);
}
bool isEqualOrValidAddrSpaceCast(const Instruction *I,
@@ -259,8 +258,7 @@ class PointerReplacer {
return (FromAS == ToAS) || IC.isValidAddrSpaceCast(FromAS, ToAS);
}
- SmallPtrSet<Instruction *, 32> ValuesToRevisit;
- SmallSetVector<Instruction *, 4> Worklist;
+ SmallSetVector<Instruction *, 32> UsersToReplace;
MapVector<Value *, Value *> WorkMap;
InstCombinerImpl &IC;
Instruction &Root;
@@ -269,72 +267,79 @@ class PointerReplacer {
} // end anonymous namespace
bool PointerReplacer::collectUsers() {
- if (!collectUsersRecursive(Root))
- return false;
-
- // Ensure that all outstanding (indirect) users of I
- // are inserted into the Worklist. Return false
- // otherwise.
- return llvm::set_is_subset(ValuesToRevisit, Worklist);
-}
+ SmallVector<Instruction *> Worklist;
+ SmallSetVector<Instruction *, 32> ValuesToRevisit;
+
+ auto PushUsersToWorklist = [&](Instruction *Inst) {
+ for (auto *U : Inst->users())
+ if (auto *I = dyn_cast<Instruction>(U))
+ if (!isAvailable(I) && !ValuesToRevisit.contains(I))
+ Worklist.emplace_back(I);
+ };
-bool PointerReplacer::collectUsersRecursive(Instruction &I) {
- for (auto *U : I.users()) {
- auto *Inst = cast<Instruction>(&*U);
+ PushUsersToWorklist(&Root);
+ while (!Worklist.empty()) {
+ Instruction *Inst = Worklist.pop_back_val();
if (auto *Load = dyn_cast<LoadInst>(Inst)) {
if (Load->isVolatile())
return false;
- Worklist.insert(Load);
+ UsersToReplace.insert(Load);
} else if (auto *PHI = dyn_cast<PHINode>(Inst)) {
- // All incoming values must be instructions for replacability
- if (any_of(PHI->incoming_values(),
- [](Value *V) { return !isa<Instruction>(V); }))
- return false;
-
- // If at least one incoming value of the PHI is not in Worklist,
- // store the PHI for revisiting and skip this iteration of the
- // loop.
- if (any_of(PHI->incoming_values(), [this](Value *V) {
- return !isAvailable(cast<Instruction>(V));
+ /// TODO: Handle poison and null pointers for PHI and select.
+ // If all incoming values are available, mark this PHI as
+ // replacable and push it's users into the worklist.
+ bool IsReplacable = true;
+ if (all_of(PHI->incoming_values(), [&](Value *V) {
+ if (!isa<Instruction>(V))
+ return IsReplacable = false;
+ return isAvailable(cast<Instruction>(V));
})) {
- ValuesToRevisit.insert(Inst);
+ UsersToReplace.insert(PHI);
+ PushUsersToWorklist(PHI);
continue;
}
- Worklist.insert(PHI);
- if (!collectUsersRecursive(*PHI))
- return false;
- } else if (auto *SI = dyn_cast<SelectInst>(Inst)) {
- if (!isa<Instruction>(SI->getTrueValue()) ||
- !isa<Instruction>(SI->getFalseValue()))
+ // Either an incoming value is not an instruction or not all
+ // incoming values are available. If this PHI was already
+ // visited prior to this iteration, return false.
+ if (!IsReplacable || !ValuesToRevisit.insert(PHI))
return false;
- if (!isAvailable(cast<Instruction>(SI->getTrueValue())) ||
- !isAvailable(cast<Instruction>(SI->getFalseValue()))) {
- ValuesToRevisit.insert(Inst);
- continue;
+ // Push PHI back into the stack, followed by unavailable
+ // incoming values.
+ Worklist.emplace_back(PHI);
+ for (unsigned Idx = 0; Idx < PHI->getNumIncomingValues(); ++Idx) {
+ auto *IncomingValue = cast<Instruction>(PHI->getIncomingValue(Idx));
+ if (UsersToReplace.contains(IncomingValue))
+ continue;
+ if (!ValuesToRevisit.insert(IncomingValue))
+ return false;
+ Worklist.emplace_back(IncomingValue);
}
- Worklist.insert(SI);
- if (!collectUsersRecursive(*SI))
- return false;
- } else if (isa<GetElementPtrInst>(Inst)) {
- Worklist.insert(Inst);
- if (!collectUsersRecursive(*Inst))
+ } else if (auto *SI = dyn_cast<SelectInst>(Inst)) {
+ auto *TrueInst = dyn_cast<Instruction>(SI->getTrueValue());
+ auto *FalseInst = dyn_cast<Instruction>(SI->getFalseValue());
+ if (!TrueInst || !FalseInst)
return false;
+
+ UsersToReplace.insert(SI);
+ PushUsersToWorklist(SI);
+ } else if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
+ UsersToReplace.insert(GEP);
+ PushUsersToWorklist(GEP);
} else if (auto *MI = dyn_cast<MemTransferInst>(Inst)) {
if (MI->isVolatile())
return false;
- Worklist.insert(Inst);
+ UsersToReplace.insert(Inst);
} else if (isEqualOrValidAddrSpaceCast(Inst, FromAS)) {
- Worklist.insert(Inst);
- if (!collectUsersRecursive(*Inst))
- return false;
+ UsersToReplace.insert(Inst);
+ PushUsersToWorklist(Inst);
} else if (Inst->isLifetimeStartOrEnd()) {
continue;
} else {
// TODO: For arbitrary uses with address space mismatches, should we check
// if we can introduce a valid addrspacecast?
- LLVM_DEBUG(dbgs() << "Cannot handle pointer user: " << *U << '\n');
+ LLVM_DEBUG(dbgs() << "Cannot handle pointer user: " << *Inst << '\n');
return false;
}
}
@@ -342,7 +347,39 @@ bool PointerReplacer::collectUsersRecursive(Instruction &I) {
return true;
}
-Value *PointerReplacer::getReplacement(Value *V) { return WorkMap.lookup(V); }
+void PointerReplacer::replacePointer(Value *V) {
+ assert(cast<PointerType>(Root.getType()) != cast<PointerType>(V->getType()) &&
+ "Invalid usage");
+ WorkMap[&Root] = V;
+ SmallVector<Instruction *> Worklist;
+ SetVector<Instruction *> PostOrderWorklist;
+ SmallPtrSet<Instruction *, 32> Visited;
+
+ // Perform a postorder traversal of the users of Root.
+ Worklist.push_back(&Root);
+ while (!Worklist.empty()) {
+ Instruction *I = Worklist.back();
+
+ // If I has not been processed before, push each of its
+ // replacable users into the worklist.
+ if (Visited.insert(I).second) {
+ for (auto *U : I->users()) {
+ auto *UserInst = cast<Instruction>(U);
+ if (UsersToReplace.contains(UserInst))
+ Worklist.push_back(UserInst);
+ }
+ // Otherwise, users of I have already been pushed into
+ // the PostOrderWorklist. Push I as well.
+ } else {
+ PostOrderWorklist.insert(I);
+ Worklist.pop_back();
+ }
+ }
+
+ // Replace pointers in reverse-postorder.
+ for (Instruction *I : reverse(PostOrderWorklist))
+ replace(I);
+}
void PointerReplacer::replace(Instruction *I) {
if (getReplacement(I))
@@ -364,13 +401,15 @@ void PointerReplacer::replace(Instruction *I) {
// replacement (new value).
WorkMap[NewI] = NewI;
} else if (auto *PHI = dyn_cast<PHINode>(I)) {
- Type *NewTy = getReplacement(PHI->getIncomingValue(0))->getType();
- auto *NewPHI = PHINode::Create(NewTy, PHI->getNumIncomingValues(),
- PHI->getName(), PHI->getIterator());
- for (unsigned int I = 0; I < PHI->getNumIncomingValues(); ++I)
- NewPHI->addIncoming(getReplacement(PHI->getIncomingValue(I)),
- PHI->getIncomingBlock(I));
- WorkMap[PHI] = NewPHI;
+ // Create a new PHI by replacing any incoming value that is a user of the
+ // root pointer and has a replacement.
+ Value *V = WorkMap.lookup(PHI->getIncomingValue(0));
+ PHI->mutateType(V ? V->getType() : PHI->getIncomingValue(0)->getType());
+ for (unsigned int I = 0; I < PHI->getNumIncomingValues(); ++I) {
+ Value *V = WorkMap.lookup(PHI->getIncomingValue(I));
+ PHI->setIncomingValue(I, V ? V : PHI->getIncomingValue(I));
+ }
+ WorkMap[PHI] = PHI;
} else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
auto *V = getReplacement(GEP->getPointerOperand());
assert(V && "Operand not replaced");
@@ -434,18 +473,6 @@ void PointerReplacer::replace(Instruction *I) {
}
}
-void PointerReplacer::replacePointer(Value *V) {
-#ifndef NDEBUG
- auto *PT = cast<PointerType>(Root.getType());
- auto *NT = cast<PointerType>(V->getType());
- assert(PT != NT && "Invalid usage");
-#endif
- WorkMap[&Root] = V;
-
- for (Instruction *Workitem : Worklist)
- replace(Workitem);
-}
-
Instruction *InstCombinerImpl::visitAllocaInst(AllocaInst &AI) {
if (auto *I = simplifyAllocaArraySize(*this, AI, DT))
return I;
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/ptr-replace-alloca.ll b/llvm/test/Transforms/InstCombine/AMDGPU/ptr-replace-alloca.ll
new file mode 100644
index 0000000000000..538cc19f9722e
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/ptr-replace-alloca.ll
@@ -0,0 +1,79 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=instcombine -S < %s | FileCheck %s
+
+%struct.type = type { [256 x <2 x i64>] }
+ at g1 = external hidden addrspace(3) global %struct.type, align 16
+
+; This test requires the PtrReplacer to replace users in an RPO traversal.
+; Furthermore, %ptr.else need not to be replaced so it must be retained in
+; %ptr.sink.
+define <2 x i64> @func(ptr addrspace(4) byref(%struct.type) align 16 %0, i1 %cmp.0) {
+; CHECK-LABEL: define <2 x i64> @func(
+; CHECK-SAME: ptr addrspace(4) byref([[STRUCT_TYPE:%.*]]) align 16 [[TMP0:%.*]], i1 [[CMP_0:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 [[CMP_0]], label %[[IF_THEN:.*]], label %[[IF_ELSE:.*]]
+; CHECK: [[IF_THEN]]:
+; CHECK-NEXT: [[VAL_THEN:%.*]] = addrspacecast ptr addrspace(4) [[TMP0]] to ptr
+; CHECK-NEXT: br label %[[SINK:.*]]
+; CHECK: [[IF_ELSE]]:
+; CHECK-NEXT: [[PTR_ELSE:%.*]] = load ptr, ptr addrspace(3) getelementptr inbounds nuw (i8, ptr addrspace(3) @g1, i32 32), align 16
+; CHECK-NEXT: br label %[[SINK]]
+; CHECK: [[SINK]]:
+; CHECK-NEXT: [[PTR_SINK:%.*]] = phi ptr [ [[PTR_ELSE]], %[[IF_ELSE]] ], [ [[VAL_THEN]], %[[IF_THEN]] ]
+; CHECK-NEXT: [[VAL_SINK:%.*]] = load <2 x i64>, ptr [[PTR_SINK]], align 16
+; CHECK-NEXT: ret <2 x i64> [[VAL_SINK]]
+;
+entry:
+ %coerce = alloca %struct.type, align 16, addrspace(5)
+ call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) align 16 %coerce, ptr addrspace(4) align 16 %0, i64 4096, i1 false)
+ br i1 %cmp.0, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ %ptr.then = getelementptr inbounds i8, ptr addrspace(5) %coerce, i64 0
+ %val.then = addrspacecast ptr addrspace(5) %ptr.then to ptr
+ br label %sink
+
+if.else: ; preds = %entry
+ %ptr.else = load ptr, ptr addrspace(3) getelementptr inbounds nuw (i8, ptr addrspace(3) @g1, i32 32), align 16
+ %val.else = getelementptr inbounds nuw i8, ptr %ptr.else, i64 0
+ br label %sink
+
+sink:
+ %ptr.sink = phi ptr [ %val.else, %if.else ], [ %val.then, %if.then ]
+ %val.sink = load <2 x i64>, ptr %ptr.sink, align 16
+ ret <2 x i64> %val.sink
+}
+
+define <2 x i64> @func_phi_loop(ptr addrspace(4) byref(%struct.type) align 16 %0, i1 %cmp.0) {
+; CHECK-LABEL: define <2 x i64> @func_phi_loop(
+; CHECK-SAME: ptr addrspace(4) byref([[STRUCT_TYPE:%.*]]) align 16 [[TMP0:%.*]], i1 [[CMP_0:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[VAL_0:%.*]] = addrspacecast ptr addrspace(4) [[TMP0]] to ptr
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[PTR_PHI_R:%.*]] = phi ptr [ [[PTR_1:%.*]], %[[LOOP]] ], [ [[VAL_0]], %[[ENTRY]] ]
+; CHECK-NEXT: [[PTR_1]] = load ptr, ptr addrspace(3) getelementptr inbounds nuw (i8, ptr addrspace(3) @g1, i32 32), align 16
+; CHECK-NEXT: br i1 [[CMP_0]], label %[[LOOP]], label %[[SINK:.*]]
+; CHECK: [[SINK]]:
+; CHECK-NEXT: [[VAL_SINK:%.*]] = load <2 x i64>, ptr [[PTR_PHI_R]], align 16
+; CHECK-NEXT: ret <2 x i64> [[VAL_SINK]]
+;
+entry:
+ %coerce = alloca %struct.type, align 16, addrspace(5)
+ call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) align 16 %coerce, ptr addrspace(4) align 16 %0, i64 4096, i1 false)
+ %ptr.0 = getelementptr inbounds i8, ptr addrspace(5) %coerce, i64 0
+ %val.0 = addrspacecast ptr addrspace(5) %ptr.0 to ptr
+ br label %loop
+
+loop:
+ %ptr.phi = phi ptr [ %val.1, %loop ], [ %val.0, %entry ]
+ %ptr.1 = load ptr, ptr addrspace(3) getelementptr inbounds nuw (i8, ptr addrspace(3) @g1, i32 32), align 16
+ %val.1 = getelementptr inbounds nuw i8, ptr %ptr.1, i64 0
+ br i1 %cmp.0, label %loop, label %sink
+
+sink:
+ %val.sink = load <2 x i64>, ptr %ptr.phi, align 16
+ ret <2 x i64> %val.sink
+}
+
+declare void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noalias writeonly captures(none), ptr addrspace(4) noalias readonly captures(none), i64, i1 immarg) #0
More information about the llvm-commits
mailing list