[llvm] [InferAS] Infer the address space of inttoptr (PR #173244)

via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 22 03:57:26 PST 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-analysis

Author: Luo Yuanke (LuoYuanke)

<details>
<summary>Changes</summary>

Currently the InferAddressSpaces would check if the bit value doesn't
change for <ptrtoint, inttoptr> address space cast. However the condition
is too strict. Sometime only the low bit address only changes for
swizzling, and the address space is not changed. Take below code as
example, we can tranform `%gp2 = inttoptr i64 %b to ptr` to
`%gp2 = inttoptr i64 %b to ptr addrspace(2)` and specify addrspace(2)
for the following store instruction.

   %gp = addrspacecast ptr addrspace(2) %sp to ptr
   %a = ptrtoint ptr %gp to i64
   %b = xor i64 7, %a
   %gp2 = inttoptr i64 %b to ptr
   store i16 0, ptr %gp2, align 2

This patch tries to infer the unchanged leading bit for the address
and let the target determine if it is safe to perform address space
cast for inttoptr instruction.


---
Full diff: https://github.com/llvm/llvm-project/pull/173244.diff


6 Files Affected:

- (modified) llvm/include/llvm/Analysis/TargetTransformInfo.h (+14) 
- (modified) llvm/include/llvm/Analysis/TargetTransformInfoImpl.h (+5) 
- (modified) llvm/lib/Analysis/TargetTransformInfo.cpp (+6) 
- (modified) llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h (+16) 
- (modified) llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp (+93-24) 
- (added) llvm/test/Transforms/InferAddressSpaces/NVPTX/int2ptr.ll (+125) 


``````````diff
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 5a4eb8daf0af6..bd81a9cecfde9 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -1927,6 +1927,20 @@ class TargetTransformInfo {
   /// This should also apply to lowering for vector funnel shifts (rotates).
   LLVM_ABI bool isVectorShiftByScalarCheap(Type *Ty) const;
 
+  /// Return true if it is safe to cast integer to pointer with new address
+  /// space. The address of integer form may only change in the least
+  /// significant bit (e.g. within a page). In that case target can determine
+  /// if it is safe to cast the generic address space to the original address
+  /// space. For below example, we can replace `%gp2 = inttoptr i64 %b to ptr`
+  /// with `%gp2 = inttoptr i64 %b to ptr addrspace(2)`
+  ///   %gp = addrspacecast ptr addrspace(2) %sp to ptr
+  ///   %a = ptrtoint ptr %gp to i64
+  ///   %b = xor i64 7, %a
+  ///   %gp2 = inttoptr i64 %b to ptr
+  ///   store i16 0, ptr %gp2, align 2
+  LLVM_ABI bool isSafeToCastIntPtrWithAS(unsigned AddrUnchangedLeadingBit,
+                                         unsigned SrcAS, unsigned DstAS) const;
+
   struct VPLegalization {
     enum VPTransform {
       // keep the predicating parameter
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 74857a5b83aba..aa9ea0586a373 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -1117,6 +1117,11 @@ class TargetTransformInfoImplBase {
 
   virtual bool isVectorShiftByScalarCheap(Type *Ty) const { return false; }
 
+  virtual bool isSafeToCastIntPtrWithAS(unsigned AddrUnchangedLeadingBit,
+                                        unsigned SrcAS, unsigned DstAS) const {
+    return false;
+  }
+
   virtual TargetTransformInfo::VPLegalization
   getVPLegalizationStrategy(const VPIntrinsic &PI) const {
     return TargetTransformInfo::VPLegalization(
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 2961d9361e5fa..02de6c0eda237 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -1483,6 +1483,12 @@ bool TargetTransformInfo::isVectorShiftByScalarCheap(Type *Ty) const {
   return TTIImpl->isVectorShiftByScalarCheap(Ty);
 }
 
+bool TargetTransformInfo::isSafeToCastIntPtrWithAS(
+    unsigned AddrUnchangedLeadingBit, unsigned SrcAS, unsigned DstAS) const {
+  return TTIImpl->isSafeToCastIntPtrWithAS(AddrUnchangedLeadingBit, SrcAS,
+                                           DstAS);
+}
+
 unsigned
 TargetTransformInfo::getNumBytesToPadGlobalArray(unsigned Size,
                                                  Type *ArrayType) const {
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
index ae12a6ea3baa3..c5847ebe3fc0e 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
@@ -180,6 +180,22 @@ class NVPTXTTIImpl final : public BasicTTIImplBase<NVPTXTTIImpl> {
     }
   }
 
+  bool isSafeToCastIntPtrWithAS(unsigned AddrUnchangedLeadingBit,
+                                unsigned SrcAS, unsigned DstAS) const override {
+    if (SrcAS != llvm::ADDRESS_SPACE_GENERIC)
+      return false;
+    if (DstAS != llvm::ADDRESS_SPACE_GLOBAL &&
+        DstAS != llvm::ADDRESS_SPACE_SHARED)
+      return false;
+
+    // Address change within 4K size does not change the original address space
+    // and is safe to perform address cast form SrcAS to DstAS.
+    if (AddrUnchangedLeadingBit >= 52)
+      return true;
+
+    return false;
+  }
+
   bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
                                   Intrinsic::ID IID) const override;
 
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index 352a1b331001a..aeeb06291ee8a 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -122,6 +122,7 @@
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Transforms/Utils/Local.h"
@@ -191,6 +192,19 @@ class InferAddressSpacesImpl {
   /// Target specific address space which uses of should be replaced if
   /// possible.
   unsigned FlatAddrSpace = 0;
+  mutable DenseMap<const Value *, Value *> PtrIntCastPairs;
+
+  bool isSafeToCastPtrIntPair(const Operator *I2P, const DataLayout &DL) const;
+  bool isAddressExpression(const Value &V, const DataLayout &DL,
+                           const TargetTransformInfo *TTI) const;
+  Value *cloneConstantExprWithNewAddressSpace(
+      ConstantExpr *CE, unsigned NewAddrSpace,
+      const ValueToValueMapTy &ValueWithNewAddrSpace, const DataLayout *DL,
+      const TargetTransformInfo *TTI) const;
+
+  SmallVector<Value *, 2>
+  getPointerOperands(const Value &V, const DataLayout &DL,
+                     const TargetTransformInfo *TTI) const;
 
   // Try to update the address space of V. If V is updated, returns true and
   // false otherwise.
@@ -308,8 +322,9 @@ static bool isNoopPtrIntCastPair(const Operator *I2P, const DataLayout &DL,
 // TODO: Currently, we only consider:
 //   - arguments
 //   - phi, bitcast, addrspacecast, and getelementptr operators
-static bool isAddressExpression(const Value &V, const DataLayout &DL,
-                                const TargetTransformInfo *TTI) {
+bool InferAddressSpacesImpl::isAddressExpression(
+    const Value &V, const DataLayout &DL,
+    const TargetTransformInfo *TTI) const {
 
   if (const Argument *Arg = dyn_cast<Argument>(&V))
     return Arg->getType()->isPointerTy() &&
@@ -334,7 +349,7 @@ static bool isAddressExpression(const Value &V, const DataLayout &DL,
     return II && II->getIntrinsicID() == Intrinsic::ptrmask;
   }
   case Instruction::IntToPtr:
-    return isNoopPtrIntCastPair(Op, DL, TTI);
+    return isNoopPtrIntCastPair(Op, DL, TTI) || isSafeToCastPtrIntPair(Op, DL);
   default:
     // That value is an address expression if it has an assumed address space.
     return TTI->getAssumedAddrSpace(&V) != UninitializedAddressSpace;
@@ -344,9 +359,9 @@ static bool isAddressExpression(const Value &V, const DataLayout &DL,
 // Returns the pointer operands of V.
 //
 // Precondition: V is an address expression.
-static SmallVector<Value *, 2>
-getPointerOperands(const Value &V, const DataLayout &DL,
-                   const TargetTransformInfo *TTI) {
+SmallVector<Value *, 2> InferAddressSpacesImpl::getPointerOperands(
+    const Value &V, const DataLayout &DL,
+    const TargetTransformInfo *TTI) const {
   if (isa<Argument>(&V))
     return {};
 
@@ -369,15 +384,56 @@ getPointerOperands(const Value &V, const DataLayout &DL,
     return {II.getArgOperand(0)};
   }
   case Instruction::IntToPtr: {
-    assert(isNoopPtrIntCastPair(&Op, DL, TTI));
-    auto *P2I = cast<Operator>(Op.getOperand(0));
-    return {P2I->getOperand(0)};
+    if (isNoopPtrIntCastPair(&Op, DL, TTI)) {
+      auto *P2I = cast<Operator>(Op.getOperand(0));
+      return {P2I->getOperand(0)};
+    }
+    assert(isSafeToCastPtrIntPair(&Op, DL));
+    return {PtrIntCastPairs[&Op]};
   }
   default:
     llvm_unreachable("Unexpected instruction type.");
   }
 }
 
+bool InferAddressSpacesImpl::isSafeToCastPtrIntPair(
+    const Operator *I2P, const DataLayout &DL) const {
+  assert(I2P->getOpcode() == Instruction::IntToPtr);
+  if (PtrIntCastPairs.count(I2P))
+    return true;
+
+  if (I2P->getType()->isVectorTy())
+    return false;
+
+  auto *Xor = dyn_cast<Operator>(I2P->getOperand(0));
+  if (!Xor || Xor->getOpcode() != Instruction::Xor)
+    return false;
+
+  auto *LHS = Xor->getOperand(0);
+  auto *Mask = Xor->getOperand(1);
+  auto *P2I = dyn_cast<Operator>(LHS);
+  if (!P2I || P2I->getOpcode() != Instruction::PtrToInt)
+    std::swap(LHS, Mask);
+  P2I = dyn_cast<Operator>(LHS);
+  if (!P2I || P2I->getOpcode() != Instruction::PtrToInt)
+    return false;
+
+  auto *ASCast = dyn_cast<Operator>(P2I->getOperand(0));
+  if (!ASCast || ASCast->getOpcode() != Instruction::AddrSpaceCast)
+    return false;
+
+  KnownBits Known = computeKnownBits(Mask, DL, &AC, nullptr, DT);
+  unsigned SrcAS = I2P->getType()->getPointerAddressSpace();
+  unsigned DstAS = ASCast->getOperand(0)->getType()->getPointerAddressSpace();
+  unsigned AddrUnchangedLeadingBit = Known.Zero.countLeadingOnes();
+  if (TTI->isSafeToCastIntPtrWithAS(AddrUnchangedLeadingBit, SrcAS, DstAS)) {
+    PtrIntCastPairs[I2P] = P2I->getOperand(0);
+    return true;
+  }
+
+  return false;
+}
+
 bool InferAddressSpacesImpl::rewriteIntrinsicOperands(IntrinsicInst *II,
                                                       Value *OldV,
                                                       Value *NewV) const {
@@ -586,6 +642,8 @@ InferAddressSpacesImpl::collectFlatAddressExpressions(Function &F) const {
     } else if (auto *I2P = dyn_cast<IntToPtrInst>(&I)) {
       if (isNoopPtrIntCastPair(cast<Operator>(I2P), *DL, TTI))
         PushPtrOperand(cast<Operator>(I2P->getOperand(0))->getOperand(0));
+      else if (isSafeToCastPtrIntPair(cast<Operator>(I2P), *DL))
+        PushPtrOperand(PtrIntCastPairs[I2P]);
     } else if (auto *RI = dyn_cast<ReturnInst>(&I)) {
       if (auto *RV = RI->getReturnValue();
           RV && RV->getType()->isPtrOrPtrVectorTy())
@@ -784,15 +842,20 @@ Value *InferAddressSpacesImpl::cloneInstructionWithNewAddressSpace(
     return SelectInst::Create(I->getOperand(0), NewPointerOperands[1],
                               NewPointerOperands[2], "", nullptr, I);
   case Instruction::IntToPtr: {
-    assert(isNoopPtrIntCastPair(cast<Operator>(I), *DL, TTI));
-    Value *Src = cast<Operator>(I->getOperand(0))->getOperand(0);
-    if (Src->getType() == NewPtrType)
-      return Src;
-
-    // If we had a no-op inttoptr/ptrtoint pair, we may still have inferred a
-    // source address space from a generic pointer source need to insert a cast
-    // back.
-    return new AddrSpaceCastInst(Src, NewPtrType);
+    if (isNoopPtrIntCastPair(cast<Operator>(I), *DL, TTI)) {
+      Value *Src = cast<Operator>(I->getOperand(0))->getOperand(0);
+      if (Src->getType() == NewPtrType)
+        return Src;
+
+      // If we had a no-op inttoptr/ptrtoint pair, we may still have inferred a
+      // source address space from a generic pointer source need to insert a
+      // cast back.
+      return new AddrSpaceCastInst(Src, NewPtrType);
+    }
+    assert(isSafeToCastPtrIntPair(cast<Operator>(I), *DL));
+    auto *Src = I->getOperand(0);
+    IntToPtrInst *NewI2P = new IntToPtrInst(Src, NewPtrType);
+    return NewI2P;
   }
   default:
     llvm_unreachable("Unexpected opcode");
@@ -802,10 +865,10 @@ Value *InferAddressSpacesImpl::cloneInstructionWithNewAddressSpace(
 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
 // constant expression `CE` with its operands replaced as specified in
 // ValueWithNewAddrSpace.
-static Value *cloneConstantExprWithNewAddressSpace(
+Value *InferAddressSpacesImpl::cloneConstantExprWithNewAddressSpace(
     ConstantExpr *CE, unsigned NewAddrSpace,
     const ValueToValueMapTy &ValueWithNewAddrSpace, const DataLayout *DL,
-    const TargetTransformInfo *TTI) {
+    const TargetTransformInfo *TTI) const {
   Type *TargetType =
       CE->getType()->isPtrOrPtrVectorTy()
           ? getPtrOrVecOfPtrsWithNewAS(CE->getType(), NewAddrSpace)
@@ -827,10 +890,15 @@ static Value *cloneConstantExprWithNewAddressSpace(
   }
 
   if (CE->getOpcode() == Instruction::IntToPtr) {
-    assert(isNoopPtrIntCastPair(cast<Operator>(CE), *DL, TTI));
-    Constant *Src = cast<ConstantExpr>(CE->getOperand(0))->getOperand(0);
-    assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
-    return Src;
+    if (isNoopPtrIntCastPair(cast<Operator>(CE), *DL, TTI)) {
+      Constant *Src = cast<ConstantExpr>(CE->getOperand(0))->getOperand(0);
+      assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
+      return Src;
+    }
+    assert(isSafeToCastPtrIntPair(cast<Operator>(CE), *DL));
+    auto *Src = CE->getOperand(0);
+    return ConstantExpr::getIntToPtr(Src, TargetType);
+    // return ConstantExpr::getAddrSpaceCast(CE, TargetType);
   }
 
   // Computes the operands of the new constant expression.
@@ -936,6 +1004,7 @@ unsigned InferAddressSpacesImpl::joinAddressSpaces(unsigned AS1,
 bool InferAddressSpacesImpl::run(Function &CurFn) {
   F = &CurFn;
   DL = &F->getDataLayout();
+  PtrIntCastPairs.clear();
 
   if (AssumeDefaultIsFlatAddressSpace)
     FlatAddrSpace = 0;
diff --git a/llvm/test/Transforms/InferAddressSpaces/NVPTX/int2ptr.ll b/llvm/test/Transforms/InferAddressSpaces/NVPTX/int2ptr.ll
new file mode 100644
index 0000000000000..1b4a8114a0ca8
--- /dev/null
+++ b/llvm/test/Transforms/InferAddressSpaces/NVPTX/int2ptr.ll
@@ -0,0 +1,125 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple=nvptx64-nvidia-cuda -passes=infer-address-spaces %s | FileCheck %s
+
+define void @test_smem_fail(ptr addrspace(3) %sp) {
+; CHECK-LABEL: define void @test_smem_fail(
+; CHECK-SAME: ptr addrspace(3) [[SP:%.*]]) {
+; CHECK-NEXT:    [[GP:%.*]] = addrspacecast ptr addrspace(3) [[SP]] to ptr
+; CHECK-NEXT:    [[A:%.*]] = ptrtoint ptr [[GP]] to i64
+; CHECK-NEXT:    [[B:%.*]] = xor i64 8191, [[A]]
+; CHECK-NEXT:    [[GP2:%.*]] = inttoptr i64 [[B]] to ptr
+; CHECK-NEXT:    store i16 0, ptr [[GP2]], align 2
+; CHECK-NEXT:    ret void
+;
+  %gp = addrspacecast ptr addrspace(3) %sp to ptr
+  %a = ptrtoint ptr %gp to i64
+  %b = xor i64 8191, %a
+  %gp2 = inttoptr i64 %b to ptr
+  store i16 0, ptr %gp2, align 2
+  ret void
+}
+
+define void @test_smem(ptr addrspace(3) %sp) {
+; CHECK-LABEL: define void @test_smem(
+; CHECK-SAME: ptr addrspace(3) [[SP:%.*]]) {
+; CHECK-NEXT:    [[GP:%.*]] = addrspacecast ptr addrspace(3) [[SP]] to ptr
+; CHECK-NEXT:    [[A:%.*]] = ptrtoint ptr [[GP]] to i64
+; CHECK-NEXT:    [[B:%.*]] = xor i64 4095, [[A]]
+; CHECK-NEXT:    [[GP2:%.*]] = inttoptr i64 [[B]] to ptr addrspace(3)
+; CHECK-NEXT:    store i16 0, ptr addrspace(3) [[GP2]], align 2
+; CHECK-NEXT:    ret void
+;
+  %gp = addrspacecast ptr addrspace(3) %sp to ptr
+  %a = ptrtoint ptr %gp to i64
+  %b = xor i64 4095, %a
+  %gp2 = inttoptr i64 %b to ptr
+  store i16 0, ptr %gp2, align 2
+  ret void
+}
+
+define void @test_gmem(ptr addrspace(1) %sp) {
+; CHECK-LABEL: define void @test_gmem(
+; CHECK-SAME: ptr addrspace(1) [[SP:%.*]]) {
+; CHECK-NEXT:    [[GP:%.*]] = addrspacecast ptr addrspace(1) [[SP]] to ptr
+; CHECK-NEXT:    [[A:%.*]] = ptrtoint ptr [[GP]] to i64
+; CHECK-NEXT:    [[B:%.*]] = xor i64 7, [[A]]
+; CHECK-NEXT:    [[GP2:%.*]] = inttoptr i64 [[B]] to ptr addrspace(1)
+; CHECK-NEXT:    store i16 0, ptr addrspace(1) [[GP2]], align 2
+; CHECK-NEXT:    ret void
+;
+  %gp = addrspacecast ptr addrspace(1) %sp to ptr
+  %a = ptrtoint ptr %gp to i64
+  %b = xor i64 7, %a
+  %gp2 = inttoptr i64 %b to ptr
+  store i16 0, ptr %gp2, align 2
+  ret void
+}
+
+define void @test_lmem(ptr addrspace(5) %sp) {
+; CHECK-LABEL: define void @test_lmem(
+; CHECK-SAME: ptr addrspace(5) [[SP:%.*]]) {
+; CHECK-NEXT:    [[GP:%.*]] = addrspacecast ptr addrspace(5) [[SP]] to ptr
+; CHECK-NEXT:    [[A:%.*]] = ptrtoint ptr [[GP]] to i64
+; CHECK-NEXT:    [[B:%.*]] = xor i64 7, [[A]]
+; CHECK-NEXT:    [[GP2:%.*]] = inttoptr i64 [[B]] to ptr
+; CHECK-NEXT:    store i16 0, ptr [[GP2]], align 2
+; CHECK-NEXT:    ret void
+;
+  %gp = addrspacecast ptr addrspace(5) %sp to ptr
+  %a = ptrtoint ptr %gp to i64
+  %b = xor i64 7, %a
+  %gp2 = inttoptr i64 %b to ptr
+  store i16 0, ptr %gp2, align 2
+  ret void
+}
+
+define void @test3(ptr addrspace(3) %sp) {
+; CHECK-LABEL: define void @test3(
+; CHECK-SAME: ptr addrspace(3) [[SP:%.*]]) {
+; CHECK-NEXT:    [[GP:%.*]] = addrspacecast ptr addrspace(3) [[SP]] to ptr
+; CHECK-NEXT:    [[T1:%.*]] = ptrtoint ptr [[GP]] to i64
+; CHECK-NEXT:    [[AND:%.*]] = lshr i64 [[T1]], 8
+; CHECK-NEXT:    [[SHR:%.*]] = and i64 [[AND]], 8
+; CHECK-NEXT:    [[AND1:%.*]] = lshr i64 [[T1]], 10
+; CHECK-NEXT:    [[SHR2:%.*]] = and i64 [[AND1]], 4
+; CHECK-NEXT:    [[OR:%.*]] = or i64 [[SHR]], [[SHR2]]
+; CHECK-NEXT:    [[AND3:%.*]] = lshr i64 [[T1]], 4
+; CHECK-NEXT:    [[SHR4:%.*]] = and i64 [[AND3]], 112
+; CHECK-NEXT:    [[OR5:%.*]] = or i64 [[OR]], [[SHR4]]
+; CHECK-NEXT:    [[XOR:%.*]] = xor i64 [[OR5]], [[T1]]
+; CHECK-NEXT:    [[GP2:%.*]] = inttoptr i64 [[XOR]] to ptr addrspace(3)
+; CHECK-NEXT:    store i16 0, ptr addrspace(3) [[GP2]], align 2
+; CHECK-NEXT:    ret void
+;
+  %gp = addrspacecast ptr addrspace(3) %sp to ptr
+  %t1 = ptrtoint ptr %gp to i64
+  %and = lshr i64 %t1, 8
+  %shr = and i64 %and, 8
+  %and1 = lshr i64 %t1, 10
+  %shr2 = and i64 %and1, 4
+  %or = or i64 %shr, %shr2
+  %and3 = lshr i64 %t1, 4
+  %shr4 = and i64 %and3, 112
+  %or5 = or i64 %or, %shr4
+  %xor = xor i64 %or5, %t1
+  %gp2 = inttoptr i64 %xor to ptr
+  store i16 0, ptr %gp2, align 2
+  ret void
+}
+
+ at g = addrspace(1) global i32 0, align 4
+
+define void @test_ce() {
+; CHECK-LABEL: define void @test_ce() {
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 xor (i64 ptrtoint (ptr addrspacecast (ptr addrspace(1) @g to ptr) to i64), i64 7) to ptr), align 4
+; CHECK-NEXT:    ret void
+;
+  store i32 0, ptr inttoptr (i64
+  xor (i64
+  ptrtoint (ptr
+  addrspacecast (ptr addrspace(1) @g to ptr)
+  to i64),
+  i64 7)
+  to ptr)
+  ret void
+}

``````````

</details>


https://github.com/llvm/llvm-project/pull/173244


More information about the llvm-commits mailing list