[llvm] [InferAs] Infer the address space of inttoptr (PR #175406)
Luo Yuanke via llvm-commits
llvm-commits at lists.llvm.org
Sun Jan 18 17:00:55 PST 2026
https://github.com/LuoYuanke updated https://github.com/llvm/llvm-project/pull/175406
>From 0d8c104c4a10c8da0f67723c6bbd4673a411ca7d Mon Sep 17 00:00:00 2001
From: Yuanke Luo <ykluo at birentech.com>
Date: Sun, 11 Jan 2026 11:53:49 +0800
Subject: [PATCH] [InferAs] Infer the address space of inttoptr
This patch follow the dicussion of #173244.
Currently the InferAddressSpaces would check if the bit value doesn't
change for <ptrtoint, inttoptr> address space cast. However the
condition is too strict. Sometimes only the low bit address changes for
swizzling, and the address space is not changed. Take below code as
an example, we can tranform `%gp2 = inttoptr i64 %b to ptr` to
`%gp2 = inttoptr i64 %b to ptr addrspace(2)` and specify addrspace(2)
for the following store instruction.
```
%gp = addrspacecast ptr addrspace(2) %sp to ptr
%a = ptrtoint ptr %gp to i64
%b = xor i64 7, %a
%gp2 = inttoptr i64 %b to ptr
store i16 0, ptr %gp2, align 2
```
This patch add intrinsic `void @llvm.ptr.bit.diff(ptr, ptr, i32, i64)`.
In InferAddrSpacesPrepare pass it check if the inttoptr is derived from
ptrtoint, and insert llvm.ptr.bit.diff to compute the address differnce.
The InstCombine pass would deduce the diff to be a constant value zero
when there is some bit operations on the low address bits. Then
InferAddrSpaces pass can get the address diff information and perform
the address space inferring. The target can specify how many low bits
are mutable to the address space cast. That means whatever the low bits
changes, the address space remain the same.
---
.../llvm/Analysis/TargetTransformInfo.h | 13 ++
.../llvm/Analysis/TargetTransformInfoImpl.h | 5 +
llvm/include/llvm/CodeGen/BasicTTIImpl.h | 7 +
llvm/include/llvm/IR/Intrinsics.td | 4 +
llvm/include/llvm/InitializePasses.h | 1 +
llvm/include/llvm/Target/TargetMachine.h | 5 +
llvm/include/llvm/Transforms/Scalar.h | 4 +
.../Scalar/InferAddressSpacesPrepare.h | 27 +++
llvm/lib/Analysis/TargetTransformInfo.cpp | 5 +
llvm/lib/Passes/PassBuilder.cpp | 1 +
llvm/lib/Passes/PassRegistry.def | 1 +
llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp | 10 +
llvm/lib/Target/NVPTX/NVPTXTargetMachine.h | 3 +
llvm/lib/Transforms/Scalar/CMakeLists.txt | 1 +
.../Transforms/Scalar/InferAddressSpaces.cpp | 124 +++++++++--
.../Scalar/InferAddressSpacesPrepare.cpp | 199 ++++++++++++++++++
llvm/lib/Transforms/Scalar/Scalar.cpp | 1 +
.../InferAddressSpaces/NVPTX/int2ptr.ll | 169 +++++++++++++++
.../llvm/lib/Transforms/Scalar/BUILD.gn | 1 +
19 files changed, 564 insertions(+), 17 deletions(-)
create mode 100644 llvm/include/llvm/Transforms/Scalar/InferAddressSpacesPrepare.h
create mode 100644 llvm/lib/Transforms/Scalar/InferAddressSpacesPrepare.cpp
create mode 100644 llvm/test/Transforms/InferAddressSpaces/NVPTX/int2ptr.ll
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index ff91b24ff17e5..5c17e7c8fc192 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -567,6 +567,19 @@ class TargetTransformInfo {
LLVM_ABI KnownBits computeKnownBitsAddrSpaceCast(
unsigned FromAS, unsigned ToAS, const KnownBits &FromPtrBits) const;
+ /// Return true if the LSB bit of the target address space is mutable to bit
+ /// change. It means the bit changes in LSB doesn't affect address space cast.
+ /// \p AsLSBSizePairs return the LSB bit size corresponding to the address
+ /// space. Take the following code on NVPTX as an example.
+ /// %gp = addrspacecast ptr addrspace(2) %sp to ptr
+ /// %a = ptrtoint ptr %gp to i64
+ /// %b = xor i64 7, %a
+ /// %gp2 = inttoptr i64 %b to ptr
+ /// Since only lsb 3 bits are changed, we can infer %gp2 is retain the address
+ /// space 2.
+ LLVM_ABI bool getMutableLSBSizeInAddrSpaces(
+ SmallVectorImpl<std::pair<unsigned, unsigned>> &AsLSBSizePairs) const;
+
/// Return true if globals in this address space can have initializers other
/// than `undef`.
LLVM_ABI bool
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 07b3755924fd1..7f756e6bf1bc9 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -193,6 +193,11 @@ class TargetTransformInfoImplBase {
return FromPtrBits.anyextOrTrunc(ToASBitSize);
}
+ virtual bool getMutableLSBSizeInAddrSpaces(
+ SmallVectorImpl<std::pair<unsigned, unsigned>> &AsLSBSizePairs) const {
+ return false;
+ }
+
virtual bool
canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const {
return AS == 0;
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index ef91c845ce9e7..15a54627fdfb5 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -432,6 +432,13 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
}
+ bool getMutableLSBSizeInAddrSpaces(
+ SmallVectorImpl<std::pair<unsigned, unsigned>> &AsLSBSizePairs)
+ const override {
+ return getTLI()->getTargetMachine().getMutableLSBSizeInAddrSpaces(
+ AsLSBSizePairs);
+ }
+
unsigned getAssumedAddrSpace(const Value *V) const override {
return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
}
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 24d51bdaf67fb..df628468bd630 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -961,6 +961,10 @@ def int_readsteadycounter : DefaultAttrsIntrinsic<[llvm_i64_ty]>;
def int_assume : DefaultAttrsIntrinsic<
[], [llvm_i1_ty], [IntrWriteMem, IntrInaccessibleMemOnly, NoUndef<ArgIndex<0>>]>;
+def int_ptr_bit_diff
+ : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty, llvm_anyint_ty],
+ [IntrWriteMem, IntrInaccessibleMemOnly]>;
+
// 'llvm.experimental.noalias.scope.decl' intrinsic: Inserted at the location of
// noalias scope declaration. Makes it possible to identify that a noalias scope
// is only valid inside the body of a loop.
diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h
index e9e3ca3cc93a0..5135f42ccaa58 100644
--- a/llvm/include/llvm/InitializePasses.h
+++ b/llvm/include/llvm/InitializePasses.h
@@ -144,6 +144,7 @@ LLVM_ABI void
initializeImmutableModuleSummaryIndexWrapperPassPass(PassRegistry &);
LLVM_ABI void initializeImplicitNullChecksPass(PassRegistry &);
LLVM_ABI void initializeIndirectBrExpandLegacyPassPass(PassRegistry &);
+LLVM_ABI void initializeInferAddressSpacesPreparePass(PassRegistry &);
LLVM_ABI void initializeInferAddressSpacesPass(PassRegistry &);
LLVM_ABI void initializeInstSimplifyLegacyPassPass(PassRegistry &);
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &);
diff --git a/llvm/include/llvm/Target/TargetMachine.h b/llvm/include/llvm/Target/TargetMachine.h
index d0fd483a8ddaa..aa47abf2f9cf3 100644
--- a/llvm/include/llvm/Target/TargetMachine.h
+++ b/llvm/include/llvm/Target/TargetMachine.h
@@ -367,6 +367,11 @@ class LLVM_ABI TargetMachine {
return false;
}
+ virtual bool getMutableLSBSizeInAddrSpaces(
+ SmallVectorImpl<std::pair<unsigned, unsigned>> &AsLSBSizePairs) const {
+ return false;
+ }
+
void setPGOOption(std::optional<PGOOptions> PGOOpt) { PGOOption = PGOOpt; }
const std::optional<PGOOptions> &getPGOOption() const { return PGOOption; }
diff --git a/llvm/include/llvm/Transforms/Scalar.h b/llvm/include/llvm/Transforms/Scalar.h
index 8e68b6a57e51f..3345d69c9192b 100644
--- a/llvm/include/llvm/Transforms/Scalar.h
+++ b/llvm/include/llvm/Transforms/Scalar.h
@@ -157,6 +157,10 @@ LLVM_ABI Pass *createMergeICmpsLegacyPass();
// on the target. If AddressSpace is left to its default value, it will be
// obtained from the TargetTransformInfo.
//
+LLVM_ABI FunctionPass *
+createInferAddressSpacesPreparePass(unsigned AddressSpace = ~0u);
+LLVM_ABI extern char &InferAddressSpacesPrepareID;
+
LLVM_ABI FunctionPass *
createInferAddressSpacesPass(unsigned AddressSpace = ~0u);
LLVM_ABI extern char &InferAddressSpacesID;
diff --git a/llvm/include/llvm/Transforms/Scalar/InferAddressSpacesPrepare.h b/llvm/include/llvm/Transforms/Scalar/InferAddressSpacesPrepare.h
new file mode 100644
index 0000000000000..961b0cfbb343a
--- /dev/null
+++ b/llvm/include/llvm/Transforms/Scalar/InferAddressSpacesPrepare.h
@@ -0,0 +1,27 @@
+//===- InferAddressSpace.h - ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_INFER_ADDRESSSPACES_PREPARE_H
+#define LLVM_TRANSFORMS_SCALAR_INFER_ADDRESSSPACES_PREPARE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct InferAddressSpacesPreparePass
+ : PassInfoMixin<InferAddressSpacesPreparePass> {
+ InferAddressSpacesPreparePass();
+ InferAddressSpacesPreparePass(unsigned AddressSpace);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+private:
+ unsigned FlatAddrSpace = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_INFER_ADDRESSSPACES_PREPARE_H
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index b2b77da4914d6..b19604ab51cf2 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -339,6 +339,11 @@ KnownBits TargetTransformInfo::computeKnownBitsAddrSpaceCast(
return TTIImpl->computeKnownBitsAddrSpaceCast(FromAS, ToAS, FromPtrBits);
}
+bool TargetTransformInfo::getMutableLSBSizeInAddrSpaces(
+ SmallVectorImpl<std::pair<unsigned, unsigned>> &AsLSBSizePairs) const {
+ return TTIImpl->getMutableLSBSizeInAddrSpaces(AsLSBSizePairs);
+}
+
bool TargetTransformInfo::canHaveNonUndefGlobalInitializerInAddressSpace(
unsigned AS) const {
return TTIImpl->canHaveNonUndefGlobalInitializerInAddressSpace(AS);
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index 8bb78c8c7df63..e473ac415a3cc 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -286,6 +286,7 @@
#include "llvm/Transforms/Scalar/IndVarSimplify.h"
#include "llvm/Transforms/Scalar/InductiveRangeCheckElimination.h"
#include "llvm/Transforms/Scalar/InferAddressSpaces.h"
+#include "llvm/Transforms/Scalar/InferAddressSpacesPrepare.h"
#include "llvm/Transforms/Scalar/InferAlignment.h"
#include "llvm/Transforms/Scalar/InstSimplifyPass.h"
#include "llvm/Transforms/Scalar/JumpTableToSwitch.h"
diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def
index 2cfb5b2592601..e26b9c07e042a 100644
--- a/llvm/lib/Passes/PassRegistry.def
+++ b/llvm/lib/Passes/PassRegistry.def
@@ -448,6 +448,7 @@ FUNCTION_PASS("gvn-hoist", GVNHoistPass())
FUNCTION_PASS("gvn-sink", GVNSinkPass())
FUNCTION_PASS("helloworld", HelloWorldPass())
FUNCTION_PASS("indirectbr-expand", IndirectBrExpandPass(*TM))
+FUNCTION_PASS("infer-address-spaces-prepare", InferAddressSpacesPreparePass())
FUNCTION_PASS("infer-address-spaces", InferAddressSpacesPass())
FUNCTION_PASS("infer-alignment", InferAlignmentPass())
FUNCTION_PASS("inject-tli-mappings", InjectTLIMappings())
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index 74bae28044e66..bff7c7fa0cfea 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -287,6 +287,16 @@ NVPTXTargetMachine::getPredicatedAddrSpace(const Value *V) const {
return std::make_pair(nullptr, -1);
}
+bool NVPTXTargetMachine::getMutableLSBSizeInAddrSpaces(
+ SmallVectorImpl<std::pair<unsigned, unsigned>> &AsLSBSizePairs) const {
+ // Address change within 4K size does not change the original address space
+ // and is safe to perform address cast form SrcAS to DstAS.
+ AsLSBSizePairs.clear();
+ AsLSBSizePairs.push_back({llvm::ADDRESS_SPACE_GLOBAL, 12});
+ AsLSBSizePairs.push_back({llvm::ADDRESS_SPACE_SHARED, 12});
+ return true;
+}
+
void NVPTXPassConfig::addEarlyCSEOrGVNPass() {
if (getOptLevel() == CodeGenOptLevel::Aggressive)
addPass(createGVNPass());
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
index 118a01a0352f5..1c4087b9ffe82 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
@@ -76,6 +76,9 @@ class NVPTXTargetMachine : public CodeGenTargetMachineImpl {
std::pair<const Value *, unsigned>
getPredicatedAddrSpace(const Value *V) const override;
+ bool getMutableLSBSizeInAddrSpaces(
+ SmallVectorImpl<std::pair<unsigned, unsigned>> &AsLSBSizePairs)
+ const override;
}; // NVPTXTargetMachine.
class NVPTXTargetMachine32 : public NVPTXTargetMachine {
diff --git a/llvm/lib/Transforms/Scalar/CMakeLists.txt b/llvm/lib/Transforms/Scalar/CMakeLists.txt
index 37dbb34605646..5c7464ae926ea 100644
--- a/llvm/lib/Transforms/Scalar/CMakeLists.txt
+++ b/llvm/lib/Transforms/Scalar/CMakeLists.txt
@@ -22,6 +22,7 @@ add_llvm_component_library(LLVMScalarOpts
IVUsersPrinter.cpp
InductiveRangeCheckElimination.cpp
IndVarSimplify.cpp
+ InferAddressSpacesPrepare.cpp
InferAddressSpaces.cpp
InferAlignment.cpp
InstSimplifyPass.cpp
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index 03efc156df1e8..b745cc19e2c84 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -136,7 +136,7 @@
using namespace llvm;
-static cl::opt<bool> AssumeDefaultIsFlatAddressSpace(
+cl::opt<bool> AssumeDefaultIsFlatAddressSpace(
"assume-default-is-flat-addrspace", cl::init(false), cl::ReallyHidden,
cl::desc("The default address space is assumed as the flat address space. "
"This is mainly for test purpose."));
@@ -310,6 +310,76 @@ static bool isNoopPtrIntCastPair(const Operator *I2P, const DataLayout &DL,
(P2IOp0AS == I2PAS || TTI->isNoopAddrSpaceCast(P2IOp0AS, I2PAS));
}
+// UninitializedAddressSpace indicate if any address space meet the requirement,
+// return the value.
+static Value *
+getSourcePtrFromIntToPtr(const Value *I2P,
+ unsigned AS = UninitializedAddressSpace) {
+ auto *I2PInst = dyn_cast<const Instruction>(I2P);
+ if (!I2PInst)
+ return nullptr;
+
+ for (auto &U : I2P->uses()) {
+ User *I = U.getUser();
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
+ if (!II || II->getIntrinsicID() != Intrinsic::ptr_bit_diff)
+ continue;
+ if (I2P != II->getArgOperand(0))
+ continue;
+ auto *Diff = dyn_cast<ConstantInt>(II->getArgOperand(3));
+ if (Diff->getZExtValue() != 0)
+ continue;
+ if (AS != UninitializedAddressSpace) {
+ unsigned DstAS = cast<ConstantInt>(II->getArgOperand(2))->getZExtValue();
+ if (AS != DstAS)
+ continue;
+ }
+ // Now we find ptr_bit_diff(P2I, I2P, AS, diff)
+ return II->getArgOperand(1);
+ }
+
+ return nullptr;
+}
+
+static bool isSafeToCastAddrSpace(const Value *Val, unsigned AS,
+ const DataLayout &DL,
+ const TargetTransformInfo *TTI) {
+ // The llvm.ptr.bit.diff(P2I, I2P, AS, diff) may have different diff for
+ // different address space. When collect flat address spaces, if the diff is
+ // zero for any address space we select it as a candidate. When address space
+ // is updated, we know the address space and need to check if the diff is
+ // still zero for the specific address space.
+ if (cast<Operator>(Val)->getOpcode() != Instruction::IntToPtr)
+ return true;
+ if (isNoopPtrIntCastPair(cast<Operator>(Val), DL, TTI))
+ return true;
+ if (!getSourcePtrFromIntToPtr(Val, AS))
+ return false;
+
+ return true;
+}
+
+static bool erasePtrBitDiffIntrinsics(Function &F) {
+ SmallVector<IntrinsicInst *, 6> DeadIntrinsics;
+ for (Instruction &I : instructions(F)) {
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
+ if (!II || II->getIntrinsicID() != Intrinsic::ptr_bit_diff)
+ continue;
+ DeadIntrinsics.push_back(II);
+ }
+ bool Changed = !DeadIntrinsics.empty();
+ for (auto *II : DeadIntrinsics) {
+ SmallVector<Value *, 4> DeadOpds = {
+ II->getArgOperand(0), II->getArgOperand(1), II->getArgOperand(2),
+ II->getArgOperand(3)};
+ II->eraseFromParent();
+ for (auto *Opd : DeadOpds)
+ RecursivelyDeleteTriviallyDeadInstructions(Opd);
+ }
+
+ return Changed;
+}
+
// Returns true if V is an address expression.
// TODO: Currently, we only consider:
// - arguments
@@ -339,8 +409,11 @@ static bool isAddressExpression(const Value &V, const DataLayout &DL,
const IntrinsicInst *II = dyn_cast<IntrinsicInst>(&V);
return II && II->getIntrinsicID() == Intrinsic::ptrmask;
}
- case Instruction::IntToPtr:
- return isNoopPtrIntCastPair(Op, DL, TTI);
+ case Instruction::IntToPtr: {
+ if (isNoopPtrIntCastPair(Op, DL, TTI) || getSourcePtrFromIntToPtr(Op))
+ return true;
+ return false;
+ }
default:
// That value is an address expression if it has an assumed address space.
return TTI->getAssumedAddrSpace(&V) != UninitializedAddressSpace;
@@ -375,9 +448,12 @@ getPointerOperands(const Value &V, const DataLayout &DL,
return {II.getArgOperand(0)};
}
case Instruction::IntToPtr: {
- assert(isNoopPtrIntCastPair(&Op, DL, TTI));
- auto *P2I = cast<Operator>(Op.getOperand(0));
- return {P2I->getOperand(0)};
+ if (isNoopPtrIntCastPair(&Op, DL, TTI)) {
+ auto *P2I = cast<Operator>(Op.getOperand(0));
+ return {P2I->getOperand(0)};
+ }
+ auto *Ptr = getSourcePtrFromIntToPtr(&Op);
+ return {Ptr};
}
default:
llvm_unreachable("Unexpected instruction type.");
@@ -592,6 +668,8 @@ InferAddressSpacesImpl::collectFlatAddressExpressions(Function &F) const {
} else if (auto *I2P = dyn_cast<IntToPtrInst>(&I)) {
if (isNoopPtrIntCastPair(cast<Operator>(I2P), *DL, TTI))
PushPtrOperand(cast<Operator>(I2P->getOperand(0))->getOperand(0));
+ if (auto *P2I = getSourcePtrFromIntToPtr(I2P))
+ PushPtrOperand(P2I);
} else if (auto *RI = dyn_cast<ReturnInst>(&I)) {
if (auto *RV = RI->getReturnValue();
RV && RV->getType()->isPtrOrPtrVectorTy())
@@ -838,15 +916,20 @@ Value *InferAddressSpacesImpl::cloneInstructionWithNewAddressSpace(
return SelectInst::Create(I->getOperand(0), NewPointerOperands[1],
NewPointerOperands[2], "", nullptr, I);
case Instruction::IntToPtr: {
- assert(isNoopPtrIntCastPair(cast<Operator>(I), *DL, TTI));
- Value *Src = cast<Operator>(I->getOperand(0))->getOperand(0);
- if (Src->getType() == NewPtrType)
- return Src;
-
- // If we had a no-op inttoptr/ptrtoint pair, we may still have inferred a
- // source address space from a generic pointer source need to insert a cast
- // back.
- return new AddrSpaceCastInst(Src, NewPtrType);
+ if (isNoopPtrIntCastPair(cast<Operator>(I), *DL, TTI)) {
+ Value *Src = cast<Operator>(I->getOperand(0))->getOperand(0);
+ if (Src->getType() == NewPtrType)
+ return Src;
+
+ // If we had a no-op inttoptr/ptrtoint pair, we may still have inferred a
+ // source address space from a generic pointer source need to insert a
+ // cast back.
+ return new AddrSpaceCastInst(Src, NewPtrType);
+ }
+ assert(getSourcePtrFromIntToPtr(I));
+ auto *Src = I->getOperand(0);
+ IntToPtrInst *NewI2P = new IntToPtrInst(Src, NewPtrType);
+ return NewI2P;
}
default:
llvm_unreachable("Unexpected opcode");
@@ -1011,8 +1094,13 @@ bool InferAddressSpacesImpl::run(Function &CurFn) {
// Changes the address spaces of the flat address expressions who are inferred
// to point to a specific address space.
- return rewriteWithNewAddressSpaces(Postorder, InferredAddrSpace,
- PredicatedAS);
+ bool Changed =
+ rewriteWithNewAddressSpaces(Postorder, InferredAddrSpace, PredicatedAS);
+
+ // The llvm.ptr.bit.diff intrinsics in useless now. Erase them from the
+ // function.
+ Changed |= erasePtrBitDiffIntrinsics(CurFn);
+ return Changed;
}
// Constants need to be tracked through RAUW to handle cases with nested
@@ -1138,6 +1226,8 @@ bool InferAddressSpacesImpl::updateAddressSpace(
return !isSafeToCastConstAddrSpace(C, NewAS);
}))
NewAS = FlatAddrSpace;
+ if (!isSafeToCastAddrSpace(&V, NewAS, *DL, TTI))
+ NewAS = FlatAddrSpace;
}
}
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpacesPrepare.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpacesPrepare.cpp
new file mode 100644
index 0000000000000..439542216dc00
--- /dev/null
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpacesPrepare.cpp
@@ -0,0 +1,199 @@
+//===- InferAddressSpacePrepre.cpp - --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Scalar/InferAddressSpacesPrepare.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Transforms/Scalar.h"
+#include <limits>
+
+#define DEBUG_TYPE "infer-address-spaces-prepare"
+
+using namespace llvm;
+
+extern cl::opt<bool> AssumeDefaultIsFlatAddressSpace;
+static const unsigned UninitializedAddressSpace =
+ std::numeric_limits<unsigned>::max();
+
+namespace {
+
+class InferAddressSpacesPrepare : public FunctionPass {
+ unsigned FlatAddrSpace = 0;
+
+public:
+ static char ID;
+
+ InferAddressSpacesPrepare()
+ : FunctionPass(ID), FlatAddrSpace(UninitializedAddressSpace) {
+ initializeInferAddressSpacesPass(*PassRegistry::getPassRegistry());
+ }
+ InferAddressSpacesPrepare(unsigned AS) : FunctionPass(ID), FlatAddrSpace(AS) {
+ initializeInferAddressSpacesPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
+ }
+
+ bool runOnFunction(Function &F) override;
+};
+
+class InferAddressSpacesPrepareImpl {
+ Function *F = nullptr;
+ const TargetTransformInfo *TTI = nullptr;
+
+ /// Target specific address space which uses of should be replaced if
+ /// possible.
+ unsigned FlatAddrSpace = 0;
+
+public:
+ InferAddressSpacesPrepareImpl(const TargetTransformInfo *TTI,
+ unsigned FlatAddrSpace)
+ : TTI(TTI), FlatAddrSpace(FlatAddrSpace) {}
+ bool run(Function &F);
+};
+
+// Find the ptrtoint instruction that the value of inttoptr is derived from.
+static Value *getPtrToIntRecursively(Value *Val, int Depth) {
+ Instruction *Inst = dyn_cast<Instruction>(Val);
+ if (!Inst)
+ return nullptr;
+ if (auto *P2I = dyn_cast<PtrToIntInst>(Val))
+ return P2I;
+
+ Depth--;
+ if (Depth <= 0)
+ return nullptr;
+
+ // Recursively look up each operand to find the ptrtoint instruction.
+ for (unsigned J = 0, E = Inst->getNumOperands(); J != E; ++J) {
+ if (auto *P2I = getPtrToIntRecursively(Inst->getOperand(J), Depth))
+ return P2I;
+ }
+ return nullptr;
+}
+
+bool InferAddressSpacesPrepareImpl::run(Function &CurFn) {
+ bool Changed = false;
+ F = &CurFn;
+
+ if (AssumeDefaultIsFlatAddressSpace)
+ FlatAddrSpace = 0;
+
+ if (FlatAddrSpace == UninitializedAddressSpace) {
+ FlatAddrSpace = TTI->getFlatAddressSpace();
+ if (FlatAddrSpace == UninitializedAddressSpace)
+ return false;
+ }
+
+ SmallVector<std::pair<unsigned, unsigned>, 2> AsLSBSizePairs;
+ if (!TTI->getMutableLSBSizeInAddrSpaces(AsLSBSizePairs))
+ return false;
+
+ SmallVector<std::pair<Instruction *, Instruction *>, 8> IntPtrPairs;
+ for (Instruction &I : instructions(F)) {
+ auto *I2P = dyn_cast<IntToPtrInst>(&I);
+ if (!I2P)
+ continue;
+ if (I2P->getAddressSpace() != FlatAddrSpace)
+ continue;
+ auto *P2I = getPtrToIntRecursively(I2P, 6);
+ if (!P2I)
+ continue;
+ IntPtrPairs.push_back({I2P, cast<PtrToIntInst>(P2I)});
+ }
+
+ if (!IntPtrPairs.empty())
+ Changed = true;
+ // Create ptr.bit.diff intrinsic to infer if there is any bit changes in the
+ // address high bits.
+ // call void @llvm.ptr.bit.diff.i64(ptr %src, ptr %derived, i32 AS, i64 diff)
+ // The InstCombine pass may deduce the diff to be 0 in compiling time, so that
+ // InferAddressSpaces pass knows the address space doesn't change after
+ // ptrtoint and integer computations.
+ for (auto Iter : IntPtrPairs) {
+ IntToPtrInst *I2P = cast<IntToPtrInst>(Iter.first);
+ PtrToIntInst *P2I = cast<PtrToIntInst>(Iter.second);
+ IRBuilder<> B(I2P->getParent(), ++cast<Instruction>(I2P)->getIterator());
+
+ for (auto [AS, LSBSize] : AsLSBSizePairs) {
+ // Duplicate the Xor instruction to facilitate InstCombine.
+ auto *Xor = B.CreateXor(I2P->getOperand(0), P2I);
+ auto DL = F->getDataLayout();
+ APInt Mask =
+ ~((APInt(DL.getAddressSizeInBits(FlatAddrSpace), 1) << LSBSize) - 1);
+ auto *And = B.CreateAnd(
+ Xor, ConstantInt::get(I2P->getOperand(0)->getType(), Mask));
+ B.CreateIntrinsic(
+ Type::getVoidTy(F->getContext()), Intrinsic::ptr_bit_diff,
+ {I2P, P2I->getPointerOperand(),
+ ConstantInt::get(Type::getInt32Ty(F->getContext()), AS), And});
+ }
+ }
+
+ return Changed;
+}
+
+} // end anonymous namespace
+
+char InferAddressSpacesPrepare::ID = 0;
+
+INITIALIZE_PASS_BEGIN(InferAddressSpacesPrepare, DEBUG_TYPE,
+ "Infer address spaces prepare", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
+INITIALIZE_PASS_END(InferAddressSpacesPrepare, DEBUG_TYPE,
+ "Infer address spaces prepare", false, false)
+
+PreservedAnalyses
+InferAddressSpacesPreparePass::run(Function &F, FunctionAnalysisManager &AM) {
+ bool Changed = InferAddressSpacesPrepareImpl(
+ &AM.getResult<TargetIRAnalysis>(F), FlatAddrSpace)
+ .run(F);
+ if (Changed) {
+ PreservedAnalyses PA;
+ return PA;
+ }
+ return PreservedAnalyses::all();
+}
+
+// Prepare for address spaces inferring
+bool InferAddressSpacesPrepare::runOnFunction(Function &F) {
+ if (skipFunction(F))
+ return false;
+
+ return InferAddressSpacesPrepareImpl(
+ &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F),
+ FlatAddrSpace)
+ .run(F);
+}
+
+FunctionPass *llvm::createInferAddressSpacesPreparePass(unsigned AddressSpace) {
+ return new InferAddressSpacesPrepare(AddressSpace);
+}
+
+InferAddressSpacesPreparePass::InferAddressSpacesPreparePass()
+ : FlatAddrSpace(UninitializedAddressSpace) {}
+InferAddressSpacesPreparePass::InferAddressSpacesPreparePass(
+ unsigned AddressSpace)
+ : FlatAddrSpace(AddressSpace) {}
diff --git a/llvm/lib/Transforms/Scalar/Scalar.cpp b/llvm/lib/Transforms/Scalar/Scalar.cpp
index 032a3a7792824..51105b538a446 100644
--- a/llvm/lib/Transforms/Scalar/Scalar.cpp
+++ b/llvm/lib/Transforms/Scalar/Scalar.cpp
@@ -26,6 +26,7 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeEarlyCSELegacyPassPass(Registry);
initializeEarlyCSEMemSSALegacyPassPass(Registry);
initializeFlattenCFGLegacyPassPass(Registry);
+ initializeInferAddressSpacesPreparePass(Registry);
initializeInferAddressSpacesPass(Registry);
initializeInstSimplifyLegacyPassPass(Registry);
initializeLegacyLICMPassPass(Registry);
diff --git a/llvm/test/Transforms/InferAddressSpaces/NVPTX/int2ptr.ll b/llvm/test/Transforms/InferAddressSpaces/NVPTX/int2ptr.ll
new file mode 100644
index 0000000000000..605a92aba8da0
--- /dev/null
+++ b/llvm/test/Transforms/InferAddressSpaces/NVPTX/int2ptr.ll
@@ -0,0 +1,169 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple=nvptx64-nvidia-cuda -passes=infer-address-spaces-prepare,instcombine,infer-address-spaces %s | FileCheck %s
+
+define void @test_smem_fail(ptr addrspace(3) %sp) {
+; CHECK-LABEL: define void @test_smem_fail(
+; CHECK-SAME: ptr addrspace(3) [[SP:%.*]]) {
+; CHECK-NEXT: [[GP:%.*]] = addrspacecast ptr addrspace(3) [[SP]] to ptr
+; CHECK-NEXT: [[A:%.*]] = ptrtoint ptr [[GP]] to i64
+; CHECK-NEXT: [[B:%.*]] = xor i64 [[A]], 8191
+; CHECK-NEXT: [[GP2:%.*]] = inttoptr i64 [[B]] to ptr
+; CHECK-NEXT: store i16 0, ptr [[GP2]], align 2
+; CHECK-NEXT: ret void
+;
+ %gp = addrspacecast ptr addrspace(3) %sp to ptr
+ %a = ptrtoint ptr %gp to i64
+ %b = xor i64 8191, %a
+ %gp2 = inttoptr i64 %b to ptr
+ store i16 0, ptr %gp2, align 2
+ ret void
+}
+
+define void @test_smem(ptr addrspace(3) %sp) {
+; CHECK-LABEL: define void @test_smem(
+; CHECK-SAME: ptr addrspace(3) [[SP:%.*]]) {
+; CHECK-NEXT: [[GP:%.*]] = addrspacecast ptr addrspace(3) [[SP]] to ptr
+; CHECK-NEXT: [[A:%.*]] = ptrtoint ptr [[GP]] to i64
+; CHECK-NEXT: [[B:%.*]] = xor i64 [[A]], 4095
+; CHECK-NEXT: [[GP2:%.*]] = inttoptr i64 [[B]] to ptr addrspace(3)
+; CHECK-NEXT: store i16 0, ptr addrspace(3) [[GP2]], align 2
+; CHECK-NEXT: ret void
+;
+ %gp = addrspacecast ptr addrspace(3) %sp to ptr
+ %a = ptrtoint ptr %gp to i64
+ %b = xor i64 4095, %a
+ %gp2 = inttoptr i64 %b to ptr
+ store i16 0, ptr %gp2, align 2
+ ret void
+}
+
+define void @test_gmem(ptr addrspace(1) %sp) {
+; CHECK-LABEL: define void @test_gmem(
+; CHECK-SAME: ptr addrspace(1) [[SP:%.*]]) {
+; CHECK-NEXT: [[GP:%.*]] = addrspacecast ptr addrspace(1) [[SP]] to ptr
+; CHECK-NEXT: [[A:%.*]] = ptrtoint ptr [[GP]] to i64
+; CHECK-NEXT: [[B:%.*]] = xor i64 [[A]], 7
+; CHECK-NEXT: [[GP2:%.*]] = inttoptr i64 [[B]] to ptr addrspace(1)
+; CHECK-NEXT: store i16 0, ptr addrspace(1) [[GP2]], align 2
+; CHECK-NEXT: ret void
+;
+ %gp = addrspacecast ptr addrspace(1) %sp to ptr
+ %a = ptrtoint ptr %gp to i64
+ %b = xor i64 7, %a
+ %gp2 = inttoptr i64 %b to ptr
+ store i16 0, ptr %gp2, align 2
+ ret void
+}
+
+define void @test_lmem(ptr addrspace(5) %sp) {
+; CHECK-LABEL: define void @test_lmem(
+; CHECK-SAME: ptr addrspace(5) [[SP:%.*]]) {
+; CHECK-NEXT: [[GP:%.*]] = addrspacecast ptr addrspace(5) [[SP]] to ptr
+; CHECK-NEXT: [[A:%.*]] = ptrtoint ptr [[GP]] to i64
+; CHECK-NEXT: [[B:%.*]] = xor i64 [[A]], 7
+; CHECK-NEXT: [[GP2:%.*]] = inttoptr i64 [[B]] to ptr
+; CHECK-NEXT: store i16 0, ptr [[GP2]], align 2
+; CHECK-NEXT: ret void
+;
+ %gp = addrspacecast ptr addrspace(5) %sp to ptr
+ %a = ptrtoint ptr %gp to i64
+ %b = xor i64 7, %a
+ %gp2 = inttoptr i64 %b to ptr
+ store i16 0, ptr %gp2, align 2
+ ret void
+}
+
+define void @test3(ptr addrspace(3) %sp) {
+; CHECK-LABEL: define void @test3(
+; CHECK-SAME: ptr addrspace(3) [[SP:%.*]]) {
+; CHECK-NEXT: [[GP:%.*]] = addrspacecast ptr addrspace(3) [[SP]] to ptr
+; CHECK-NEXT: [[T1:%.*]] = ptrtoint ptr [[GP]] to i64
+; CHECK-NEXT: [[AND:%.*]] = lshr i64 [[T1]], 8
+; CHECK-NEXT: [[SHR:%.*]] = and i64 [[AND]], 8
+; CHECK-NEXT: [[AND1:%.*]] = lshr i64 [[T1]], 10
+; CHECK-NEXT: [[SHR2:%.*]] = and i64 [[AND1]], 4
+; CHECK-NEXT: [[OR:%.*]] = or disjoint i64 [[SHR]], [[SHR2]]
+; CHECK-NEXT: [[AND3:%.*]] = lshr i64 [[T1]], 4
+; CHECK-NEXT: [[SHR4:%.*]] = and i64 [[AND3]], 112
+; CHECK-NEXT: [[OR5:%.*]] = or disjoint i64 [[OR]], [[SHR4]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i64 [[OR5]], [[T1]]
+; CHECK-NEXT: [[GP2:%.*]] = inttoptr i64 [[XOR]] to ptr addrspace(3)
+; CHECK-NEXT: store i16 0, ptr addrspace(3) [[GP2]], align 2
+; CHECK-NEXT: ret void
+;
+ %gp = addrspacecast ptr addrspace(3) %sp to ptr
+ %t1 = ptrtoint ptr %gp to i64
+ %and = lshr i64 %t1, 8
+ %shr = and i64 %and, 8
+ %and1 = lshr i64 %t1, 10
+ %shr2 = and i64 %and1, 4
+ %or = or i64 %shr, %shr2
+ %and3 = lshr i64 %t1, 4
+ %shr4 = and i64 %and3, 112
+ %or5 = or i64 %or, %shr4
+ %xor = xor i64 %or5, %t1
+ %gp2 = inttoptr i64 %xor to ptr
+ store i16 0, ptr %gp2, align 2
+ ret void
+}
+
+define void @test4(ptr addrspace(3) %sp) {
+; CHECK-LABEL: define void @test4(
+; CHECK-SAME: ptr addrspace(3) [[SP:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = addrspacecast ptr addrspace(3) [[SP]] to ptr
+; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
+; CHECK-NEXT: [[XOR:%.*]] = and i64 [[TMP0]], -32
+; CHECK-NEXT: [[OR:%.*]] = xor i64 [[XOR]], 144
+; CHECK-NEXT: [[GP2:%.*]] = inttoptr i64 [[OR]] to ptr addrspace(3)
+; CHECK-NEXT: store i16 0, ptr addrspace(3) [[GP2]], align 2
+; CHECK-NEXT: ret void
+;
+entry:
+ %a = addrspacecast ptr addrspace(3) %sp to ptr
+ %0 = ptrtoint ptr %a to i64
+ %xor = xor i64 %0, 128
+ %and = and i64 %xor, -16
+ %or = or i64 %and, 16
+ %gp2 = inttoptr i64 %or to ptr
+ store i16 0, ptr %gp2, align 2
+ ret void
+}
+
+define void @test5(ptr %a) {
+; CHECK-LABEL: define void @test5(
+; CHECK-SAME: ptr [[A:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
+; CHECK-NEXT: [[XOR:%.*]] = and i64 [[TMP0]], -32
+; CHECK-NEXT: [[OR:%.*]] = xor i64 [[XOR]], 144
+; CHECK-NEXT: [[GP2:%.*]] = inttoptr i64 [[OR]] to ptr
+; CHECK-NEXT: store i16 0, ptr [[GP2]], align 2
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = ptrtoint ptr %a to i64
+ %xor = xor i64 %0, 128
+ %and = and i64 %xor, -16
+ %or = or i64 %and, 16
+ %gp2 = inttoptr i64 %or to ptr
+ store i16 0, ptr %gp2, align 2
+ ret void
+}
+
+ at g = addrspace(1) global i32 0, align 4
+
+define void @test_ce() {
+; CHECK-LABEL: define void @test_ce() {
+; CHECK-NEXT: store i32 0, ptr inttoptr (i64 xor (i64 ptrtoint (ptr addrspacecast (ptr addrspace(1) @g to ptr) to i64), i64 7) to ptr), align 4
+; CHECK-NEXT: ret void
+;
+ store i32 0, ptr inttoptr (i64
+ xor (i64
+ ptrtoint (ptr
+ addrspacecast (ptr addrspace(1) @g to ptr)
+ to i64),
+ i64 7)
+ to ptr)
+ ret void
+}
diff --git a/llvm/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn
index 57403e8f5ba4c..b7c53576cf476 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn
@@ -34,6 +34,7 @@ static_library("Scalar") {
"IndVarSimplify.cpp",
"InductiveRangeCheckElimination.cpp",
"InferAddressSpaces.cpp",
+ "InferAddressSpacesPrepare.cpp",
"InferAlignment.cpp",
"InstSimplifyPass.cpp",
"JumpTableToSwitch.cpp",
More information about the llvm-commits
mailing list