[llvm-branch-commits] [llvm] [mlir] WIP (PR #183215)
Shilei Tian via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Feb 24 15:55:47 PST 2026
https://github.com/shiltian created https://github.com/llvm/llvm-project/pull/183215
None
>From eae5062ee09914f6d6259f26cd4cd2571bed04bc Mon Sep 17 00:00:00 2001
From: Shilei Tian <i at tianshilei.me>
Date: Mon, 16 Feb 2026 07:53:27 -0500
Subject: [PATCH] WIP
---
llvm/lib/Analysis/ConstantFolding.cpp | 7 ++
llvm/lib/AsmParser/LLParser.cpp | 5 +-
llvm/lib/IR/AsmWriter.cpp | 9 +++
llvm/lib/IR/ConstantFold.cpp | 51 ++++++++++----
llvm/lib/IR/Constants.cpp | 68 ++++++++++++++++++-
llvm/lib/TargetParser/TargetDataLayout.cpp | 5 +-
.../InstCombine/InstCombineCompares.cpp | 6 +-
.../Vectorize/LoadStoreVectorizer.cpp | 7 +-
.../ValueTracking/knownzero-addrspacecast.ll | 4 +-
...-addrspacecast-with-constantpointernull.ll | 8 +--
.../InferAddressSpaces/AMDGPU/basic.ll | 2 +-
.../InferAddressSpaces/AMDGPU/icmp.ll | 8 +--
.../InferAddressSpaces/AMDGPU/issue110433.ll | 2 +-
.../InferAddressSpaces/AMDGPU/phi-poison.ll | 4 +-
.../InferAddressSpaces/AMDGPU/ptrmask.ll | 16 ++---
.../InferAddressSpaces/AMDGPU/select.ll | 8 +--
.../Transforms/InstCombine/addrspacecast.ll | 4 +-
.../InstCombine/gep-inbounds-null.ll | 14 ++--
.../GPUToROCDL/LowerGpuOpsToROCDLOps.cpp | 2 +-
19 files changed, 168 insertions(+), 62 deletions(-)
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index efc575d424ea0..abcadbd8c593c 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -1592,6 +1592,13 @@ Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
: DestTy->getPointerAddressSpace();
if (DL.isNullPointerAllZeroes(AS))
return Constant::getNullValue(DestTy, &DL);
+ // Non-zero null: ptrtoint(ConstantPointerNull) folds to the actual
+ // null integer value for this address space.
+ if (isa<ConstantPointerNull>(C)) {
+ const APInt &NullVal = DL.getNullPtrValue(AS);
+ return ConstantInt::get(
+ DestTy, NullVal.zextOrTrunc(DestTy->getIntegerBitWidth()));
+ }
}
}
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index fa9019d5851b7..9b6d3a81f68d5 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -6769,7 +6769,10 @@ bool LLParser::convertValIDToValue(Type *Ty, ValID &ID, Value *&V,
if (auto *TETy = dyn_cast<TargetExtType>(Ty))
if (!TETy->hasProperty(TargetExtType::HasZeroInit))
return error(ID.Loc, "invalid type for null constant");
- V = Constant::getNullValue(Ty, &M->getDataLayout());
+ // zeroinitializer represents the all-zero-bits value. For pointer types
+ // this is distinct from null (the semantic null that may be non-zero).
+ // For non-pointer types, getZeroValue delegates to getNullValue.
+ V = Constant::getZeroValue(Ty);
return false;
case ValID::t_None:
if (!Ty->isTokenTy())
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp
index 35a19091aa097..0770276a5ed4f 100644
--- a/llvm/lib/IR/AsmWriter.cpp
+++ b/llvm/lib/IR/AsmWriter.cpp
@@ -1813,6 +1813,15 @@ static void writeConstantInternal(raw_ostream &Out, const Constant *CV,
}
if (const auto *CE = dyn_cast<ConstantExpr>(CV)) {
+ // Print inttoptr(zero to ptr) as zeroinitializer for scalar pointers.
+ // This is the canonical representation for an all-zero-bits pointer,
+ // distinct from ConstantPointerNull (null) which is the semantic null.
+ if (CE->getOpcode() == Instruction::IntToPtr &&
+ CE->getOperand(0)->isNullValue()) {
+ Out << "zeroinitializer";
+ return;
+ }
+
// Use the same shorthand for splat vector (i.e. "splat(Ty val)") as is
// permitted on IR input to reduce the output changes when enabling
// UseConstant{Int,FP}ForScalableSplat.
diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp
index c3803141e5561..fe55e50a4cb39 100644
--- a/llvm/lib/IR/ConstantFold.cpp
+++ b/llvm/lib/IR/ConstantFold.cpp
@@ -146,19 +146,37 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
return UndefValue::get(DestTy);
}
- if (V->isNullValue() && !DestTy->isX86_AMXTy() &&
- opc != Instruction::AddrSpaceCast) {
- // If the source or destination involves pointers and DL tells us that
- // null is not zero for the relevant address space, we cannot fold here.
- // Defer to the DL-aware folding in Analysis/ConstantFolding.cpp.
- if (DL) {
- bool SrcIsPtr = V->getType()->isPtrOrPtrVectorTy();
- bool DstIsPtr = DestTy->isPtrOrPtrVectorTy();
- if (SrcIsPtr || DstIsPtr) {
- unsigned AS = SrcIsPtr ? V->getType()->getPointerAddressSpace()
- : DestTy->getPointerAddressSpace();
- if (!DL->isNullPointerAllZeroes(AS))
+ if (V->isNullValue() && !DestTy->isX86_AMXTy()) {
+ bool SrcIsPtr = V->getType()->isPtrOrPtrVectorTy();
+ bool DstIsPtr = DestTy->isPtrOrPtrVectorTy();
+ if (SrcIsPtr || DstIsPtr) {
+ // addrspacecast of semantic null -> semantic null in target AS.
+ // This is always valid regardless of bit patterns.
+ if (opc == Instruction::AddrSpaceCast)
+ return Constant::getNullValue(DestTy);
+ // For other pointer casts (inttoptr, ptrtoint), the bit representation
+ // of null matters. Without DataLayout we cannot determine whether null
+ // is zero for this address space. Defer to the DL-aware fold in
+ // ConstantFoldCastOperand.
+ if (!DL)
+ return nullptr;
+ unsigned AS = SrcIsPtr ? V->getType()->getPointerAddressSpace()
+ : DestTy->getPointerAddressSpace();
+ if (!DL->isNullPointerAllZeroes(AS)) {
+ // ptrtoint(ConstantPointerNull): fold to the null pointer's actual
+ // integer bit pattern (non-zero for this AS).
+ if (isa<ConstantPointerNull>(V)) {
+ const APInt &NullVal = DL->getNullPtrValue(AS);
+ return ConstantInt::get(
+ DestTy, NullVal.zextOrTrunc(DestTy->getIntegerBitWidth()));
+ }
+ // inttoptr of a zero-bits value (e.g., ConstantInt(0) or CAZ) to a
+ // non-zero-null AS: the all-zero bit pattern is not the null pointer,
+ // so we cannot return ConstantPointerNull. Defer.
+ if (DstIsPtr)
return nullptr;
+ // ptrtoint of a zero-bits value (e.g., CAZ of a pointer vector):
+ // all-zero bits map to integer zero. Fall through to getNullValue.
}
}
return Constant::getNullValue(DestTy, DL);
@@ -482,8 +500,13 @@ Constant *llvm::ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
ConstantExpr::getExtractElement(V1, ConstantInt::get(Ty, 0));
// For scalable vectors, make sure this doesn't fold back into a
- // shufflevector.
- if (!MaskEltCount.isScalable() || Elt->isNullValue() || isa<UndefValue>(Elt))
+ // shufflevector. ConstantPointerNull is excluded because getSplat may
+ // not be able to represent it as CAZ (null may have non-zero bits),
+ // which would cause infinite recursion: getSplat -> getShuffleVector
+ // -> ConstantFoldShuffleVectorInstruction -> getSplat.
+ if (!MaskEltCount.isScalable() ||
+ (Elt->isNullValue() && !isa<ConstantPointerNull>(Elt)) ||
+ isa<UndefValue>(Elt))
return ConstantVector::getSplat(MaskEltCount, Elt);
}
diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp
index 0ded9be53a003..22d6762c999a4 100644
--- a/llvm/lib/IR/Constants.cpp
+++ b/llvm/lib/IR/Constants.cpp
@@ -392,7 +392,65 @@ bool Constant::containsConstantExpression() const {
return false;
}
+/// Check whether this type (recursively) contains any pointer sub-types.
+static bool containsPointerType(Type *Ty) {
+ if (Ty->isPointerTy())
+ return true;
+ if (auto *STy = dyn_cast<StructType>(Ty))
+ return llvm::any_of(STy->elements(), containsPointerType);
+ if (auto *ATy = dyn_cast<ArrayType>(Ty))
+ return containsPointerType(ATy->getElementType());
+ if (auto *VTy = dyn_cast<VectorType>(Ty))
+ return containsPointerType(VTy->getElementType());
+ return false;
+}
+
+/// Check whether all pointer sub-types in this type have an all-zero-bits null
+/// representation, meaning getZeroValue and getNullValue are equivalent.
+static bool hasAllZeroNullPointers(Type *Ty, const DataLayout &DL) {
+ if (Ty->isPointerTy())
+ return DL.isNullPointerAllZeroes(Ty->getPointerAddressSpace());
+ if (auto *STy = dyn_cast<StructType>(Ty))
+ return llvm::all_of(STy->elements(),
+ [&](Type *E) { return hasAllZeroNullPointers(E, DL); });
+ if (auto *ATy = dyn_cast<ArrayType>(Ty))
+ return hasAllZeroNullPointers(ATy->getElementType(), DL);
+ if (auto *VTy = dyn_cast<VectorType>(Ty))
+ return hasAllZeroNullPointers(VTy->getElementType(), DL);
+ return true;
+}
+
Constant *Constant::getNullValue(Type *Ty, const DataLayout *DL) {
+ // For pointer types, always return the semantic null pointer.
+ if (Ty->isPointerTy())
+ return ConstantPointerNull::get(cast<PointerType>(Ty));
+
+ // For aggregates/vectors containing pointers, we must ensure pointer elements
+ // get the semantic null (ConstantPointerNull). When DL is available and all
+ // pointer elements have all-zero-bits null, getZeroValue (CAZ) is equivalent
+ // and more efficient. Otherwise, construct element-by-element.
+ if (containsPointerType(Ty)) {
+ if (DL && hasAllZeroNullPointers(Ty, *DL))
+ return getZeroValue(Ty);
+
+ if (auto *STy = dyn_cast<StructType>(Ty)) {
+ SmallVector<Constant *, 8> Elts;
+ for (Type *ElemTy : STy->elements())
+ Elts.push_back(getNullValue(ElemTy, DL));
+ return ConstantStruct::get(STy, Elts);
+ }
+ if (auto *ATy = dyn_cast<ArrayType>(Ty)) {
+ SmallVector<Constant *> Elts(ATy->getNumElements(),
+ getNullValue(ATy->getElementType(), DL));
+ return ConstantArray::get(ATy, Elts);
+ }
+ if (auto *VTy = dyn_cast<VectorType>(Ty)) {
+ return ConstantVector::getSplat(
+ VTy->getElementCount(), getNullValue(VTy->getElementType(), DL), DL);
+ }
+ }
+
+ // For types without pointers, null == zero.
return getZeroValue(Ty);
}
@@ -409,8 +467,10 @@ Constant *Constant::getZeroValue(Type *Ty) {
case Type::PPC_FP128TyID:
return ConstantFP::get(Ty->getContext(),
APFloat::getZero(Ty->getFltSemantics()));
- case Type::PointerTyID:
- return ConstantPointerNull::get(cast<PointerType>(Ty));
+ case Type::PointerTyID: {
+ auto *Zero = ConstantInt::get(Type::getInt8Ty(Ty->getContext()), 0);
+ return ConstantExpr::getIntToPtr(Zero, Ty);
+ }
case Type::StructTyID:
case Type::ArrayTyID:
case Type::FixedVectorTyID:
@@ -1540,6 +1600,10 @@ Constant *ConstantVector::getSplat(ElementCount EC, Constant *V,
Type *VTy = VectorType::get(V->getType(), EC);
+ // Only map to CAZ when the value is known to be all-zero-bits. Do NOT use
+ // isNullValue() here: ConstantPointerNull may have non-zero bit pattern on
+ // some targets, and mapping it to CAZ would lose the semantic null meaning
+ // (CAZ elements extract via getZeroValue, not getNullValue).
if (V->isZeroValue(DL))
return ConstantAggregateZero::get(VTy);
if (isa<PoisonValue>(V))
diff --git a/llvm/lib/TargetParser/TargetDataLayout.cpp b/llvm/lib/TargetParser/TargetDataLayout.cpp
index b985c1eec4244..e27077fe2670c 100644
--- a/llvm/lib/TargetParser/TargetDataLayout.cpp
+++ b/llvm/lib/TargetParser/TargetDataLayout.cpp
@@ -273,7 +273,10 @@ static std::string computeAMDDataLayout(const Triple &TT) {
// (address space 7), and 128-bit non-integral buffer resourcees (address
// space 8) which cannot be non-trivilally accessed by LLVM memory operations
// like getelementptr.
- return "e-m:e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
+ // Region (AS 2), local (AS 3), and private (AS 5) have all-ones null pointer
+ // (0xFFFFFFFF), indicated by the 'o' flag in the pointer spec.
+ return "e-m:e-p:64:64-p1:64:64-po2:32:32-po3:32:32-p4:64:64-po5:32:32"
+ "-p6:32:32"
"-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-"
"v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-"
"v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9";
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index eee27f13453e2..78ca1e01485ce 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -6532,7 +6532,11 @@ Instruction *InstCombinerImpl::foldICmpWithCastOp(ICmpInst &ICmp) {
if (PtrSrc->getType() == Op0Src->getType())
NewOp1 = PtrToIntOp1->getOperand(0);
} else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
- NewOp1 = ConstantExpr::getIntToPtr(RHSC, SrcTy);
+ // Use DL-aware folding so inttoptr(0) folds to ConstantPointerNull
+ // immediately (when null is zero for the AS), avoiding fixpoint issues.
+ NewOp1 = ConstantFoldCastOperand(Instruction::IntToPtr, RHSC, SrcTy, DL);
+ if (!NewOp1)
+ NewOp1 = ConstantExpr::getIntToPtr(RHSC, SrcTy);
}
if (NewOp1)
diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 9fa4af24efbc0..dc40d52f548c8 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -73,6 +73,7 @@
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/TargetFolder.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
@@ -244,7 +245,7 @@ class Vectorizer {
ScalarEvolution &SE;
TargetTransformInfo &TTI;
const DataLayout &DL;
- IRBuilder<> Builder;
+ IRBuilder<TargetFolder> Builder;
/// We could erase instrs right after vectorizing them, but that can mess up
/// our BB iterators, and also can make the equivalence class keys point to
@@ -259,8 +260,8 @@ class Vectorizer {
public:
Vectorizer(Function &F, AliasAnalysis &AA, AssumptionCache &AC,
DominatorTree &DT, ScalarEvolution &SE, TargetTransformInfo &TTI)
- : F(F), AA(AA), AC(AC), DT(DT), SE(SE), TTI(TTI),
- DL(F.getDataLayout()), Builder(SE.getContext()) {}
+ : F(F), AA(AA), AC(AC), DT(DT), SE(SE), TTI(TTI), DL(F.getDataLayout()),
+ Builder(SE.getContext(), TargetFolder(DL)) {}
bool run();
diff --git a/llvm/test/Analysis/ValueTracking/knownzero-addrspacecast.ll b/llvm/test/Analysis/ValueTracking/knownzero-addrspacecast.ll
index 874d68e799a7f..9bd16b9ff93f4 100644
--- a/llvm/test/Analysis/ValueTracking/knownzero-addrspacecast.ll
+++ b/llvm/test/Analysis/ValueTracking/knownzero-addrspacecast.ll
@@ -15,8 +15,8 @@ define i64 @test_shift(ptr %p) {
}
; CHECK-LABEL: @test_null
-; A null pointer casted to another addr space may no longer have null value.
-; CHECK-NOT: ret i32 0
+; addrspacecast of semantic null is semantic null; ptrtoint(null) is 0 when DL says so.
+; CHECK: ret i32 0
define i32 @test_null() {
%g = addrspacecast ptr null to ptr addrspace(3)
%i = ptrtoint ptr addrspace(3) %g to i32
diff --git a/llvm/test/Transforms/Attributor/AMDGPU/do-not-replace-addrspacecast-with-constantpointernull.ll b/llvm/test/Transforms/Attributor/AMDGPU/do-not-replace-addrspacecast-with-constantpointernull.ll
index fb4153bac808e..7a8978d5b17b5 100644
--- a/llvm/test/Transforms/Attributor/AMDGPU/do-not-replace-addrspacecast-with-constantpointernull.ll
+++ b/llvm/test/Transforms/Attributor/AMDGPU/do-not-replace-addrspacecast-with-constantpointernull.ll
@@ -4,7 +4,7 @@
define i32 @addrspacecast_ptr(ptr %p0, ptr addrspace(5) %p5) {
; CHECK-LABEL: define i32 @addrspacecast_ptr(
; CHECK-SAME: ptr nofree readonly captures(none) [[P0:%.*]], ptr addrspace(5) nofree readonly [[P5:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[ICMP:%.*]] = icmp eq ptr addrspace(5) [[P5]], addrspacecast (ptr null to ptr addrspace(5))
+; CHECK-NEXT: [[ICMP:%.*]] = icmp eq ptr addrspace(5) [[P5]], null
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[ICMP]], ptr [[P0]], ptr null
; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[SELECT]], align 4
; CHECK-NEXT: ret i32 [[LOAD]]
@@ -19,7 +19,7 @@ define i32 @vec_addrspacecast_ptr(ptr %p0, ptr %p1, <2 x ptr addrspace(5)> %ptrv
; CHECK-LABEL: define i32 @vec_addrspacecast_ptr(
; CHECK-SAME: ptr nofree readonly captures(none) [[P0:%.*]], ptr nofree noundef nonnull readonly align 16 captures(none) dereferenceable(8) [[P1:%.*]], <2 x ptr addrspace(5)> [[PTRVEC:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[LOADVEC:%.*]] = load <2 x ptr addrspace(5)>, ptr [[P1]], align 16
-; CHECK-NEXT: [[ICMPVEC:%.*]] = icmp eq <2 x ptr addrspace(5)> [[LOADVEC]], <ptr addrspace(5) addrspacecast (ptr null to ptr addrspace(5)), ptr addrspace(5) addrspacecast (ptr null to ptr addrspace(5))>
+; CHECK-NEXT: [[ICMPVEC:%.*]] = icmp eq <2 x ptr addrspace(5)> [[LOADVEC]], <ptr addrspace(5) null, ptr addrspace(5) null>
; CHECK-NEXT: [[ICMP:%.*]] = extractelement <2 x i1> [[ICMPVEC]], i32 1
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[ICMP]], ptr [[P0]], ptr null
; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[SELECT]], align 4
@@ -37,7 +37,7 @@ define i32 @addrspacecast_vec_as1_ptr(ptr %p0, ptr %p1, <2 x ptr addrspace(5)> %
; CHECK-LABEL: define i32 @addrspacecast_vec_as1_ptr(
; CHECK-SAME: ptr nofree readonly captures(none) [[P0:%.*]], ptr nofree noundef nonnull readonly align 16 captures(none) dereferenceable(8) [[P1:%.*]], <2 x ptr addrspace(5)> [[PTRVEC:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[LOADVEC:%.*]] = load <2 x ptr addrspace(5)>, ptr [[P1]], align 16
-; CHECK-NEXT: [[ICMPVEC:%.*]] = icmp eq <2 x ptr addrspace(5)> [[LOADVEC]], <ptr addrspace(5) addrspacecast (ptr addrspace(1) null to ptr addrspace(5)), ptr addrspace(5) addrspacecast (ptr addrspace(1) null to ptr addrspace(5))>
+; CHECK-NEXT: [[ICMPVEC:%.*]] = icmp eq <2 x ptr addrspace(5)> [[LOADVEC]], zeroinitializer
; CHECK-NEXT: [[ICMP:%.*]] = extractelement <2 x i1> [[ICMPVEC]], i32 1
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[ICMP]], ptr [[P0]], ptr null
; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[SELECT]], align 4
@@ -55,7 +55,7 @@ define i32 @addrspacecast_vec_ptr(ptr %p0, ptr %p1, <2 x ptr addrspace(5)> %ptrv
; CHECK-LABEL: define i32 @addrspacecast_vec_ptr(
; CHECK-SAME: ptr nofree readonly captures(none) [[P0:%.*]], ptr nofree noundef nonnull readonly align 16 captures(none) dereferenceable(8) [[P1:%.*]], <2 x ptr addrspace(5)> [[PTRVEC:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[LOADVEC:%.*]] = load <2 x ptr addrspace(5)>, ptr [[P1]], align 16
-; CHECK-NEXT: [[ICMPVEC:%.*]] = icmp eq <2 x ptr addrspace(5)> [[LOADVEC]], <ptr addrspace(5) addrspacecast (ptr null to ptr addrspace(5)), ptr addrspace(5) addrspacecast (ptr null to ptr addrspace(5))>
+; CHECK-NEXT: [[ICMPVEC:%.*]] = icmp eq <2 x ptr addrspace(5)> [[LOADVEC]], zeroinitializer
; CHECK-NEXT: [[ICMP:%.*]] = extractelement <2 x i1> [[ICMPVEC]], i32 1
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[ICMP]], ptr [[P0]], ptr null
; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[SELECT]], align 4
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
index 1171c9c077b6a..48274bddc757f 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
@@ -225,7 +225,7 @@ define void @local_nullptr(ptr addrspace(1) nocapture %results, ptr addrspace(3)
; CHECK-LABEL: define void @local_nullptr(
; CHECK-SAME: ptr addrspace(1) captures(none) [[RESULTS:%.*]], ptr addrspace(3) [[A:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne ptr addrspace(3) [[A]], addrspacecast (ptr addrspace(5) null to ptr addrspace(3))
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne ptr addrspace(3) [[A]], null
; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[TOBOOL]] to i32
; CHECK-NEXT: store i32 [[CONV]], ptr addrspace(1) [[RESULTS]], align 4
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/icmp.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/icmp.ll
index 7e810e4909be0..7ce015d8f20f1 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/icmp.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/icmp.ll
@@ -62,7 +62,7 @@ define i1 @icmp_flat_to_group_cmp(ptr %flat.ptr.0, ptr %flat.ptr.1) #0 {
; constant cast if this is OK to change if 0 is a valid pointer.
; CHECK-LABEL: @icmp_group_flat_cmp_null(
-; CHECK: %cmp = icmp eq ptr addrspace(3) %group.ptr.0, addrspacecast (ptr null to ptr addrspace(3))
+; CHECK: %cmp = icmp eq ptr addrspace(3) %group.ptr.0, null
define i1 @icmp_group_flat_cmp_null(ptr addrspace(3) %group.ptr.0) #0 {
%cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
%cmp = icmp eq ptr %cast0, null
@@ -78,8 +78,7 @@ define i1 @icmp_group_flat_cmp_constant_inttoptr(ptr addrspace(3) %group.ptr.0)
}
; CHECK-LABEL: @icmp_mismatch_flat_group_private_cmp_null(
-; CHECK: %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
-; CHECK: %cmp = icmp eq ptr %cast0, addrspacecast (ptr addrspace(5) null to ptr)
+; CHECK: %cmp = icmp eq ptr addrspace(3) %group.ptr.0, null
define i1 @icmp_mismatch_flat_group_private_cmp_null(ptr addrspace(3) %group.ptr.0) #0 {
%cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
%cmp = icmp eq ptr %cast0, addrspacecast (ptr addrspace(5) zeroinitializer to ptr)
@@ -131,8 +130,7 @@ define i1 @icmp_group_flat_cmp_poison(ptr addrspace(3) %group.ptr.0) #0 {
; Test non-canonical orders
; CHECK-LABEL: @icmp_mismatch_flat_group_private_cmp_null_swap(
-; CHECK: %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
-; CHECK: %cmp = icmp eq ptr addrspacecast (ptr addrspace(5) null to ptr), %cast0
+; CHECK: %cmp = icmp eq ptr addrspace(3) null, %group.ptr.0
define i1 @icmp_mismatch_flat_group_private_cmp_null_swap(ptr addrspace(3) %group.ptr.0) #0 {
%cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
%cmp = icmp eq ptr addrspacecast (ptr addrspace(5) zeroinitializer to ptr), %cast0
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/issue110433.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/issue110433.ll
index 1928bb98cd2a7..edd5febbbb941 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/issue110433.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/issue110433.ll
@@ -6,7 +6,7 @@ define <8 x i1> @load_vector_of_flat_ptr_from_constant(ptr addrspace(4) %ptr) {
; CHECK-SAME: ptr addrspace(4) [[PTR:%.*]]) {
; CHECK-NEXT: [[LD:%.*]] = load <8 x ptr>, ptr addrspace(4) [[PTR]], align 128
; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast <8 x ptr> [[LD]] to <8 x ptr addrspace(1)>
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x ptr addrspace(1)> [[TMP1]], <ptr addrspace(1) addrspacecast (ptr null to ptr addrspace(1)), ptr addrspace(1) addrspacecast (ptr null to ptr addrspace(1)), ptr addrspace(1) addrspacecast (ptr null to ptr addrspace(1)), ptr addrspace(1) addrspacecast (ptr null to ptr addrspace(1)), ptr addrspace(1) addrspacecast (ptr null to ptr addrspace(1)), ptr addrspace(1) addrspacecast (ptr null to ptr addrspace(1)), ptr addrspace(1) addrspacecast (ptr null to ptr addrspace(1)), ptr addrspace(1) addrspacecast (ptr null to ptr addrspace(1))>
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x ptr addrspace(1)> [[TMP1]], zeroinitializer
; CHECK-NEXT: ret <8 x i1> [[CMP]]
;
%ld = load <8 x ptr>, ptr addrspace(4) %ptr, align 128
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/phi-poison.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/phi-poison.ll
index 0ccf7e3df8af9..e5880afb55135 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/phi-poison.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/phi-poison.ll
@@ -11,8 +11,8 @@ define void @phi_poison(ptr addrspace(1) %arg, <2 x ptr addrspace(1)> %arg1) {
; CHECK: merge:
; CHECK-NEXT: [[I:%.*]] = phi ptr addrspace(1) [ [[ARG:%.*]], [[LEADER]] ], [ poison, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[I2:%.*]] = phi <2 x ptr addrspace(1)> [ [[ARG1:%.*]], [[LEADER]] ], [ poison, [[ENTRY]] ]
-; CHECK-NEXT: [[J:%.*]] = icmp eq ptr addrspace(1) [[I]], addrspacecast (ptr null to ptr addrspace(1))
-; CHECK-NEXT: [[J1:%.*]] = icmp eq <2 x ptr addrspace(1)> [[I2]], <ptr addrspace(1) addrspacecast (ptr null to ptr addrspace(1)), ptr addrspace(1) addrspacecast (ptr null to ptr addrspace(1))>
+; CHECK-NEXT: [[J:%.*]] = icmp eq ptr addrspace(1) [[I]], null
+; CHECK-NEXT: [[J1:%.*]] = icmp eq <2 x ptr addrspace(1)> [[I2]], zeroinitializer
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/ptrmask.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/ptrmask.ll
index c4bd698099bdf..3e6d3725b5d25 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/ptrmask.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/ptrmask.ll
@@ -31,9 +31,7 @@ define <3 x ptr addrspace(3)> @ptrmask_vector_cast_local_to_flat(<3 x ptr addrsp
; Casting null does not necessarily result in null again.
define i8 @ptrmask_cast_local_null_to_flat(i64 %mask) {
; CHECK-LABEL: @ptrmask_cast_local_null_to_flat(
-; CHECK-NEXT: [[MASKED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr addrspacecast (ptr addrspace(3) null to ptr), i64 [[MASK:%.*]])
-; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[MASKED]] to ptr addrspace(3)
-; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(3) [[TMP1]], align 1
+; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr null, align 1
; CHECK-NEXT: ret i8 [[LOAD]]
;
%masked = call ptr @llvm.ptrmask.p0.i64(ptr addrspacecast (ptr addrspace(3) zeroinitializer to ptr), i64 %mask)
@@ -43,9 +41,7 @@ define i8 @ptrmask_cast_local_null_to_flat(i64 %mask) {
define <3 x ptr addrspace(3)> @ptrmask_vector_cast_local_null_to_flat(<3 x i64> %mask) {
; CHECK-LABEL: @ptrmask_vector_cast_local_null_to_flat(
-; CHECK-NEXT: [[MASKED:%.*]] = call <3 x ptr> @llvm.ptrmask.v3p0.v3i64(<3 x ptr> <ptr addrspacecast (ptr addrspace(3) null to ptr), ptr addrspacecast (ptr addrspace(3) null to ptr), ptr addrspacecast (ptr addrspace(3) null to ptr)>, <3 x i64> [[MASK:%.*]])
-; CHECK-NEXT: [[CAST:%.*]] = addrspacecast <3 x ptr> [[MASKED]] to <3 x ptr addrspace(3)>
-; CHECK-NEXT: ret <3 x ptr addrspace(3)> [[CAST]]
+; CHECK-NEXT: ret <3 x ptr addrspace(3)> zeroinitializer
;
%masked = call <3 x ptr> @llvm.ptrmask.v3p0.v3i64(<3 x ptr> addrspacecast (<3 x ptr addrspace(3)> <ptr addrspace(3) zeroinitializer, ptr addrspace(3) zeroinitializer, ptr addrspace(3) zeroinitializer> to <3 x ptr>), <3 x i64> %mask)
%cast = addrspacecast <3 x ptr> %masked to <3 x ptr addrspace(3)>
@@ -133,7 +129,7 @@ define <3 x ptr addrspace(3)> @ptrmask_vector_cast_flat_to_local(<3 x ptr> %src.
; truncating)
define i8 @ptrmask_cast_flat_null_to_local(i64 %mask) {
; CHECK-LABEL: @ptrmask_cast_flat_null_to_local(
-; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(3) addrspacecast (ptr null to ptr addrspace(3)), align 1
+; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(3) null, align 1
; CHECK-NEXT: ret i8 [[LOAD]]
;
%masked = call ptr @llvm.ptrmask.p0.i64(ptr null, i64 %mask)
@@ -144,7 +140,7 @@ define i8 @ptrmask_cast_flat_null_to_local(i64 %mask) {
define i8 @ptrmask_vector_cast_flat_null_to_local(<3 x i64> %mask, i32 %ptridx, i32 %idx) {
; CHECK-LABEL: @ptrmask_vector_cast_flat_null_to_local(
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr addrspace(3) addrspacecast (ptr null to ptr addrspace(3)), i32 [[IDX:%.*]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr addrspace(3) null, i32 [[IDX:%.*]]
; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(3) [[GEP]], align 1
; CHECK-NEXT: ret i8 [[LOAD]]
;
@@ -325,9 +321,7 @@ define i8 @ptrmask_cast_local_to_flat_const_mask_7fffffffffffffff(ptr addrspace(
; Do not fold: casting null does not necessarily result in null again
define i8 @ptrmask_cast_local_null_to_flat_const_mask_7fffffffffffffff() {
; CHECK-LABEL: @ptrmask_cast_local_null_to_flat_const_mask_7fffffffffffffff(
-; CHECK-NEXT: [[MASKED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr addrspacecast (ptr addrspace(3) null to ptr), i64 9223372036854775807)
-; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[MASKED]] to ptr addrspace(3)
-; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(3) [[TMP1]], align 1
+; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(3) null, align 1
; CHECK-NEXT: ret i8 [[LOAD]]
;
%cast = addrspacecast ptr addrspace(3) zeroinitializer to ptr
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/select.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/select.ll
index 4de45ab8bea64..24913e157de51 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/select.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/select.ll
@@ -55,7 +55,7 @@ define amdgpu_kernel void @store_select_mismatch_group_private_flat(i1 %c, ptr a
@lds1 = internal addrspace(3) global i32 456, align 4
; CHECK-LABEL: @store_select_group_flat_null(
-; CHECK: %select = select i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) addrspacecast (ptr null to ptr addrspace(3))
+; CHECK: %select = select i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) null
; CHECK: store i32 -1, ptr addrspace(3) %select
define amdgpu_kernel void @store_select_group_flat_null(i1 %c, ptr addrspace(3) %group.ptr.0) #0 {
%cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
@@ -65,7 +65,7 @@ define amdgpu_kernel void @store_select_group_flat_null(i1 %c, ptr addrspace(3)
}
; CHECK-LABEL: @store_select_group_flat_null_swap(
-; CHECK: %select = select i1 %c, ptr addrspace(3) addrspacecast (ptr null to ptr addrspace(3)), ptr addrspace(3) %group.ptr.0
+; CHECK: %select = select i1 %c, ptr addrspace(3) null, ptr addrspace(3) %group.ptr.0
; CHECK: store i32 -1, ptr addrspace(3) %select
define amdgpu_kernel void @store_select_group_flat_null_swap(i1 %c, ptr addrspace(3) %group.ptr.0) #0 {
%cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
@@ -95,7 +95,7 @@ define amdgpu_kernel void @store_select_group_flat_poison_swap(i1 %c, ptr addrsp
}
; CHECK-LABEL: @store_select_gep_group_flat_null(
-; CHECK: %select = select i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) addrspacecast (ptr null to ptr addrspace(3))
+; CHECK: %select = select i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) null
; CHECK: %gep = getelementptr i32, ptr addrspace(3) %select, i64 16
; CHECK: store i32 -1, ptr addrspace(3) %gep
define amdgpu_kernel void @store_select_gep_group_flat_null(i1 %c, ptr addrspace(3) %group.ptr.0) #0 {
@@ -161,7 +161,7 @@ define amdgpu_kernel void @store_select_group_global_mismatch_flat_constexpr_swa
}
; CHECK-LABEL: @store_select_group_global_mismatch_null_null(
-; CHECK: %select = select i1 %c, ptr addrspacecast (ptr addrspace(3) null to ptr), ptr addrspacecast (ptr addrspace(1) null to ptr)
+; CHECK: %select = select i1 %c, ptr null, ptr null
; CHECK: store i32 7, ptr %select
define amdgpu_kernel void @store_select_group_global_mismatch_null_null(i1 %c) #0 {
%select = select i1 %c, ptr addrspacecast (ptr addrspace(3) zeroinitializer to ptr), ptr addrspacecast (ptr addrspace(1) null to ptr)
diff --git a/llvm/test/Transforms/InstCombine/addrspacecast.ll b/llvm/test/Transforms/InstCombine/addrspacecast.ll
index 8f3270cd60609..33f3439cd366f 100644
--- a/llvm/test/Transforms/InstCombine/addrspacecast.ll
+++ b/llvm/test/Transforms/InstCombine/addrspacecast.ll
@@ -173,7 +173,7 @@ end:
define void @constant_fold_null() #0 {
; CHECK-LABEL: @constant_fold_null(
-; CHECK-NEXT: store i32 7, ptr addrspace(4) addrspacecast (ptr addrspace(3) null to ptr addrspace(4)), align 4
+; CHECK-NEXT: store i32 7, ptr addrspace(4) null, align 4
; CHECK-NEXT: ret void
;
%cast = addrspacecast ptr addrspace(3) null to ptr addrspace(4)
@@ -191,7 +191,7 @@ define ptr addrspace(4) @constant_fold_undef() #0 {
define <4 x ptr addrspace(4)> @constant_fold_null_vector() #0 {
; CHECK-LABEL: @constant_fold_null_vector(
-; CHECK-NEXT: ret <4 x ptr addrspace(4)> <ptr addrspace(4) addrspacecast (ptr addrspace(3) null to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr addrspace(3) null to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr addrspace(3) null to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr addrspace(3) null to ptr addrspace(4))>
+; CHECK-NEXT: ret <4 x ptr addrspace(4)> zeroinitializer
;
%cast = addrspacecast <4 x ptr addrspace(3)> zeroinitializer to <4 x ptr addrspace(4)>
ret <4 x ptr addrspace(4)> %cast
diff --git a/llvm/test/Transforms/InstCombine/gep-inbounds-null.ll b/llvm/test/Transforms/InstCombine/gep-inbounds-null.ll
index cd7eac6bcba28..39880f0b97475 100644
--- a/llvm/test/Transforms/InstCombine/gep-inbounds-null.ll
+++ b/llvm/test/Transforms/InstCombine/gep-inbounds-null.ll
@@ -205,15 +205,13 @@ entry:
ret i1 %cnd
}
-; Test for an assert from trying to create an invalid constantexpr
-; bitcast between different address spaces. The addrspacecast is
-; stripped off and the addrspace(0) null can be treated as invalid.
-; FIXME: This should be able to fold to ret i1 false
+; addrspacecast(ptr null to ptr addrspace(5)) folds to ptr addrspace(5) null,
+; so gep inbounds %ptr, 1 == null folds to false (inbounds non-zero offset
+; from a valid pointer can never produce null).
define i1 @invalid_bitcast_icmp_addrspacecast_as0_null(ptr addrspace(5) %ptr) {
; CHECK-LABEL: @invalid_bitcast_icmp_addrspacecast_as0_null(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr addrspace(5) [[PTR:%.*]], addrspacecast (ptr null to ptr addrspace(5))
-; CHECK-NEXT: ret i1 [[TMP2]]
+; CHECK-NEXT: ret i1 false
;
bb:
%tmp1 = getelementptr inbounds i32, ptr addrspace(5) %ptr, i32 1
@@ -224,7 +222,9 @@ bb:
define i1 @invalid_bitcast_icmp_addrspacecast_as0_null_var(ptr addrspace(5) %ptr, i32 %idx) {
; CHECK-LABEL: @invalid_bitcast_icmp_addrspacecast_as0_null_var(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr addrspace(5) [[PTR:%.*]], addrspacecast (ptr null to ptr addrspace(5))
+; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr addrspace(5) [[PTR:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr addrspace(5) [[TMP1]], null
; CHECK-NEXT: ret i1 [[TMP2]]
;
bb:
diff --git a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
index 096554d53e031..1f7dbc476a7c0 100644
--- a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
+++ b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
@@ -103,7 +103,7 @@ static Value getLaneId(RewriterBase &rewriter, Location loc) {
}
static constexpr StringLiteral amdgcnDataLayout =
- "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
+ "e-p:64:64-p1:64:64-po2:32:32-po3:32:32-p4:64:64-po5:32:32-p6:32:32"
"-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-v16:16-v24:"
"32-v32:"
"32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:"
More information about the llvm-branch-commits
mailing list