[llvm] 756166e - [AMDGPU] Improve detection of non-null addrspacecast operands (#82311)

via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 1 05:01:14 PST 2024


Author: Pierre van Houtryve
Date: 2024-03-01T14:01:10+01:00
New Revision: 756166e342a3692fd8de1ad1c5620df516bac33a

URL: https://github.com/llvm/llvm-project/commit/756166e342a3692fd8de1ad1c5620df516bac33a
DIFF: https://github.com/llvm/llvm-project/commit/756166e342a3692fd8de1ad1c5620df516bac33a.diff

LOG: [AMDGPU] Improve detection of non-null addrspacecast operands (#82311)

Use IR analysis to infer when an addrspacecast operand is nonnull, then
lower it to an intrinsic that the DAG can use to skip the null check.

I did this using an intrinsic as it's non-intrusive. An alternative
would have been to allow something like `!nonnull` on `addrspacecast`
then lower that to a custom opcode (or add an operand to the
addrspacecast MIR/DAG opcodes), but it's a lot of boilerplate for just
one target's use case IMO.

I'm hoping that when we switch to GISel that we can move all this logic
to the MIR level without losing info, but currently the DAG doesn't see
enough so we need to act in CGP.

Fixes: SWDEV-316445

Added: 
    llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null-vector.ll
    llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.addrspacecast.nonnull.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsAMDGPU.td
    llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.h

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 0f29653f1f5bec..051e603c0819d2 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -3196,4 +3196,11 @@ def int_amdgcn_fdiv_fast : DefaultAttrsIntrinsic<
   [llvm_float_ty], [llvm_float_ty, llvm_float_ty],
   [IntrNoMem, IntrSpeculatable]
 >;
+
+/// Emit an addrspacecast without null pointer checking.
+/// Should only be inserted by a pass based on analysis of an addrspacecast's src.
+def int_amdgcn_addrspacecast_nonnull : DefaultAttrsIntrinsic<
+  [llvm_anyptr_ty], [llvm_anyptr_ty],
+  [IntrNoMem, IntrSpeculatable]
+>;
 }

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 1c75c5a47c9d27..0edbbf7cb0af54 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -99,6 +99,7 @@ class AMDGPUCodeGenPrepareImpl
     : public InstVisitor<AMDGPUCodeGenPrepareImpl, bool> {
 public:
   const GCNSubtarget *ST = nullptr;
+  const AMDGPUTargetMachine *TM = nullptr;
   const TargetLibraryInfo *TLInfo = nullptr;
   AssumptionCache *AC = nullptr;
   DominatorTree *DT = nullptr;
@@ -310,6 +311,7 @@ class AMDGPUCodeGenPrepareImpl
   bool visitICmpInst(ICmpInst &I);
   bool visitSelectInst(SelectInst &I);
   bool visitPHINode(PHINode &I);
+  bool visitAddrSpaceCastInst(AddrSpaceCastInst &I);
 
   bool visitIntrinsicInst(IntrinsicInst &I);
   bool visitBitreverseIntrinsicInst(IntrinsicInst &I);
@@ -2013,6 +2015,75 @@ bool AMDGPUCodeGenPrepareImpl::visitPHINode(PHINode &I) {
   return true;
 }
 
+/// \param V  Value to check
+/// \param DL DataLayout
+/// \param TM TargetMachine (TODO: remove once DL contains nullptr values)
+/// \param AS Target Address Space
+/// \return true if \p V cannot be the null value of \p AS, false otherwise.
+static bool isPtrKnownNeverNull(const Value *V, const DataLayout &DL,
+                                const AMDGPUTargetMachine &TM, unsigned AS) {
+  // Pointer cannot be null if it's a block address, GV or alloca.
+  // NOTE: We don't support extern_weak, but if we did, we'd need to check for
+  // it as the symbol could be null in such cases.
+  if (isa<BlockAddress>(V) || isa<GlobalValue>(V) || isa<AllocaInst>(V))
+    return true;
+
+  // Check nonnull arguments.
+  if (const auto *Arg = dyn_cast<Argument>(V); Arg && Arg->hasNonNullAttr())
+    return true;
+
+  // TODO: Calls that return nonnull?
+
+  // For all other things, use KnownBits.
+  // We either use 0 or all bits set to indicate null, so check whether the
+  // value can be zero or all ones.
+  //
+  // TODO: Use ValueTracking's isKnownNeverNull if it becomes aware that some
+  // address spaces have non-zero null values.
+  auto SrcPtrKB = computeKnownBits(V, DL).trunc(DL.getPointerSizeInBits(AS));
+  const auto NullVal = TM.getNullPointerValue(AS);
+  assert((NullVal == 0 || NullVal == -1) &&
+         "don't know how to check for this null value!");
+  return NullVal ? !SrcPtrKB.getMaxValue().isAllOnes() : SrcPtrKB.isNonZero();
+}
+
+bool AMDGPUCodeGenPrepareImpl::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
+  // Intrinsic doesn't support vectors, also it seems that it's often 
diff icult
+  // to prove that a vector cannot have any nulls in it so it's unclear if it's
+  // worth supporting.
+  if (I.getType()->isVectorTy())
+    return false;
+
+  // Check if this can be lowered to a amdgcn.addrspacecast.nonnull.
+  // This is only worthwhile for casts from/to priv/local to flat.
+  const unsigned SrcAS = I.getSrcAddressSpace();
+  const unsigned DstAS = I.getDestAddressSpace();
+
+  bool CanLower = false;
+  if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
+    CanLower = (DstAS == AMDGPUAS::LOCAL_ADDRESS ||
+                DstAS == AMDGPUAS::PRIVATE_ADDRESS);
+  else if (DstAS == AMDGPUAS::FLAT_ADDRESS)
+    CanLower = (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
+                SrcAS == AMDGPUAS::PRIVATE_ADDRESS);
+  if (!CanLower)
+    return false;
+
+  SmallVector<const Value *, 4> WorkList;
+  getUnderlyingObjects(I.getOperand(0), WorkList);
+  if (!all_of(WorkList, [&](const Value *V) {
+        return isPtrKnownNeverNull(V, *DL, *TM, SrcAS);
+      }))
+    return false;
+
+  IRBuilder<> B(&I);
+  auto *Intrin = B.CreateIntrinsic(
+      I.getType(), Intrinsic::amdgcn_addrspacecast_nonnull, {I.getOperand(0)});
+  I.replaceAllUsesWith(Intrin);
+  I.eraseFromParent();
+  return true;
+}
+
 bool AMDGPUCodeGenPrepareImpl::visitIntrinsicInst(IntrinsicInst &I) {
   switch (I.getIntrinsicID()) {
   case Intrinsic::bitreverse:
@@ -2196,6 +2267,7 @@ bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
     return false;
 
   const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>();
+  Impl.TM = &TM;
   Impl.TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
   Impl.ST = &TM.getSubtarget<GCNSubtarget>(F);
   Impl.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
@@ -2214,6 +2286,7 @@ PreservedAnalyses AMDGPUCodeGenPreparePass::run(Function &F,
   AMDGPUCodeGenPrepareImpl Impl;
   Impl.Mod = F.getParent();
   Impl.DL = &Impl.Mod->getDataLayout();
+  Impl.TM = static_cast<const AMDGPUTargetMachine *>(&TM);
   Impl.TLInfo = &FAM.getResult<TargetLibraryAnalysis>(F);
   Impl.ST = &TM.getSubtarget<GCNSubtarget>(F);
   Impl.AC = &FAM.getResult<AssumptionAnalysis>(F);

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 13d7510729139b..4c3b983f2960df 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -2247,10 +2247,16 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
   MachineIRBuilder &B) const {
   MachineFunction &MF = B.getMF();
 
+  // MI can either be a G_ADDRSPACE_CAST or a
+  // G_INTRINSIC @llvm.amdgcn.addrspacecast.nonnull
+  assert(MI.getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
+         (isa<GIntrinsic>(MI) && cast<GIntrinsic>(MI).getIntrinsicID() ==
+                                     Intrinsic::amdgcn_addrspacecast_nonnull));
+
   const LLT S32 = LLT::scalar(32);
   Register Dst = MI.getOperand(0).getReg();
-  Register Src = MI.getOperand(1).getReg();
-
+  Register Src = isa<GIntrinsic>(MI) ? MI.getOperand(2).getReg()
+                                     : MI.getOperand(1).getReg();
   LLT DstTy = MRI.getType(Dst);
   LLT SrcTy = MRI.getType(Src);
   unsigned DestAS = DstTy.getAddressSpace();
@@ -2271,7 +2277,9 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
   if (SrcAS == AMDGPUAS::FLAT_ADDRESS &&
       (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
        DestAS == AMDGPUAS::PRIVATE_ADDRESS)) {
-    if (isKnownNonNull(Src, MRI, TM, SrcAS)) {
+    // For llvm.amdgcn.addrspacecast.nonnull we can always assume non-null, for
+    // G_ADDRSPACE_CAST we need to guess.
+    if (isa<GIntrinsic>(MI) || isKnownNonNull(Src, MRI, TM, SrcAS)) {
       // Extract low 32-bits of the pointer.
       B.buildExtract(Dst, Src, 0);
       MI.eraseFromParent();
@@ -2308,7 +2316,9 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
     // avoid the ptrtoint?
     auto BuildPtr = B.buildMergeLikeInstr(DstTy, {SrcAsInt, ApertureReg});
 
-    if (isKnownNonNull(Src, MRI, TM, SrcAS)) {
+    // For llvm.amdgcn.addrspacecast.nonnull we can always assume non-null, for
+    // G_ADDRSPACE_CAST we need to guess.
+    if (isa<GIntrinsic>(MI) || isKnownNonNull(Src, MRI, TM, SrcAS)) {
       B.buildCopy(Dst, BuildPtr);
       MI.eraseFromParent();
       return true;
@@ -7020,6 +7030,8 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
 
     return false;
   }
+  case Intrinsic::amdgcn_addrspacecast_nonnull:
+    return legalizeAddrSpaceCast(MI, MRI, B);
   case Intrinsic::amdgcn_make_buffer_rsrc:
     return legalizePointerAsRsrcIntrin(MI, MRI, B);
   case Intrinsic::amdgcn_kernarg_segment_ptr:

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 84ef9679ab9563..34c6038115329f 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1415,6 +1415,23 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
   }
 }
 
+void SITargetLowering::CollectTargetIntrinsicOperands(
+    const CallInst &I, SmallVectorImpl<SDValue> &Ops, SelectionDAG &DAG) const {
+  switch (cast<IntrinsicInst>(I).getIntrinsicID()) {
+  case Intrinsic::amdgcn_addrspacecast_nonnull: {
+    // The DAG's ValueType loses the addrspaces.
+    // Add them as 2 extra Constant operands "from" and "to".
+    unsigned SrcAS = I.getOperand(0)->getType()->getPointerAddressSpace();
+    unsigned DstAS = I.getType()->getPointerAddressSpace();
+    Ops.push_back(DAG.getTargetConstant(SrcAS, SDLoc(), MVT::i32));
+    Ops.push_back(DAG.getTargetConstant(DstAS, SDLoc(), MVT::i32));
+    break;
+  }
+  default:
+    break;
+  }
+}
+
 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
                                             SmallVectorImpl<Value*> &Ops,
                                             Type *&AccessTy) const {
@@ -6635,24 +6652,36 @@ static bool isKnownNonNull(SDValue Val, SelectionDAG &DAG,
 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
                                              SelectionDAG &DAG) const {
   SDLoc SL(Op);
-  const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
-
-  SDValue Src = ASC->getOperand(0);
-  SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
-  unsigned SrcAS = ASC->getSrcAddressSpace();
 
   const AMDGPUTargetMachine &TM =
     static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
 
+  unsigned DestAS, SrcAS;
+  SDValue Src;
+  bool IsNonNull = false;
+  if (const auto *ASC = dyn_cast<AddrSpaceCastSDNode>(Op)) {
+    SrcAS = ASC->getSrcAddressSpace();
+    Src = ASC->getOperand(0);
+    DestAS = ASC->getDestAddressSpace();
+  } else {
+    assert(Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
+           Op.getConstantOperandVal(0) ==
+               Intrinsic::amdgcn_addrspacecast_nonnull);
+    Src = Op->getOperand(1);
+    SrcAS = Op->getConstantOperandVal(2);
+    DestAS = Op->getConstantOperandVal(3);
+    IsNonNull = true;
+  }
+
+  SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
+
   // flat -> local/private
   if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
-    unsigned DestAS = ASC->getDestAddressSpace();
-
     if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
         DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
       SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
 
-      if (isKnownNonNull(Src, DAG, TM, SrcAS))
+      if (IsNonNull || isKnownNonNull(Op, DAG, TM, SrcAS))
         return Ptr;
 
       unsigned NullVal = TM.getNullPointerValue(DestAS);
@@ -6665,16 +6694,16 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
   }
 
   // local/private -> flat
-  if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
+  if (DestAS == AMDGPUAS::FLAT_ADDRESS) {
     if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
         SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
 
-      SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
+      SDValue Aperture = getSegmentAperture(SrcAS, SL, DAG);
       SDValue CvtPtr =
           DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
       CvtPtr = DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr);
 
-      if (isKnownNonNull(Src, DAG, TM, SrcAS))
+      if (IsNonNull || isKnownNonNull(Op, DAG, TM, SrcAS))
         return CvtPtr;
 
       unsigned NullVal = TM.getNullPointerValue(SrcAS);
@@ -6697,7 +6726,7 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
   }
 
-  if (ASC->getDestAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
+  if (DestAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
       Src.getValueType() == MVT::i64)
     return DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
 
@@ -6708,7 +6737,7 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
     MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
   DAG.getContext()->diagnose(InvalidAddrSpaceCast);
 
-  return DAG.getUNDEF(ASC->getValueType(0));
+  return DAG.getUNDEF(Op->getValueType(0));
 }
 
 // This lowers an INSERT_SUBVECTOR by extracting the individual elements from
@@ -8325,6 +8354,8 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
                         Op.getOperand(3), Op.getOperand(4), Op.getOperand(5),
                         IndexKeyi32, Op.getOperand(7)});
   }
+  case Intrinsic::amdgcn_addrspacecast_nonnull:
+    return lowerADDRSPACECAST(Op, DAG);
   default:
     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index f6e1d198f40aec..fc90a208fa0b3a 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -305,6 +305,10 @@ class SITargetLowering final : public AMDGPUTargetLowering {
                           MachineFunction &MF,
                           unsigned IntrinsicID) const override;
 
+  void CollectTargetIntrinsicOperands(const CallInst &I,
+                                      SmallVectorImpl<SDValue> &Ops,
+                                      SelectionDAG &DAG) const override;
+
   bool getAddrModeArguments(IntrinsicInst * /*I*/,
                             SmallVectorImpl<Value*> &/*Ops*/,
                             Type *&/*AccessTy*/) const override;

diff  --git a/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null-vector.ll b/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null-vector.ll
new file mode 100644
index 00000000000000..94c571a29f9911
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null-vector.ll
@@ -0,0 +1,13 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -mtriple=amdgcn-- -amdgpu-codegenprepare -S < %s | FileCheck -check-prefix=OPT %s
+
+; Check that CGP doesn't try to create a amdgcn.addrspace.nonnull of vector, as that's not supported.
+
+define <4 x ptr> @vec_of_local_to_flat_nonnull_arg() {
+; OPT-LABEL: define <4 x ptr> @vec_of_local_to_flat_nonnull_arg() {
+; OPT-NEXT:    [[X:%.*]] = addrspacecast <4 x ptr addrspace(3)> zeroinitializer to <4 x ptr>
+; OPT-NEXT:    ret <4 x ptr> [[X]]
+;
+  %x = addrspacecast <4 x ptr addrspace(3)> zeroinitializer to <4 x ptr>
+  ret <4 x ptr> %x
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll b/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll
new file mode 100644
index 00000000000000..bcdfb75ab1ef98
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll
@@ -0,0 +1,272 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -mtriple=amdgcn-- -amdgpu-codegenprepare -S < %s | FileCheck -check-prefix=OPT %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=ASM,DAGISEL-ASM
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -global-isel -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=ASM,GISEL-ASM
+
+; Tests that we can avoid nullptr checks for addrspacecasts from/to priv/local.
+;
+; Whenever a testcase is successful, we should see the addrspacecast replaced with the intrinsic
+; and the resulting code should have no select/cndmask null check for the pointer.
+
+define void @local_to_flat_nonnull_arg(ptr addrspace(3) nonnull %ptr) {
+; OPT-LABEL: define void @local_to_flat_nonnull_arg(
+; OPT-SAME: ptr addrspace(3) nonnull [[PTR:%.*]]) {
+; OPT-NEXT:    [[TMP1:%.*]] = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p3(ptr addrspace(3) [[PTR]])
+; OPT-NEXT:    store volatile i32 7, ptr [[TMP1]], align 4
+; OPT-NEXT:    ret void
+;
+; ASM-LABEL: local_to_flat_nonnull_arg:
+; ASM:       ; %bb.0:
+; ASM-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT:    s_mov_b64 s[4:5], src_shared_base
+; ASM-NEXT:    v_mov_b32_e32 v1, s5
+; ASM-NEXT:    v_mov_b32_e32 v2, 7
+; ASM-NEXT:    flat_store_dword v[0:1], v2
+; ASM-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT:    s_setpc_b64 s[30:31]
+  %x = addrspacecast ptr addrspace(3) %ptr to ptr
+  store volatile i32 7, ptr %x
+  ret void
+}
+
+define void @private_to_flat_nonnull_arg(ptr addrspace(5) nonnull %ptr) {
+; OPT-LABEL: define void @private_to_flat_nonnull_arg(
+; OPT-SAME: ptr addrspace(5) nonnull [[PTR:%.*]]) {
+; OPT-NEXT:    [[TMP1:%.*]] = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) [[PTR]])
+; OPT-NEXT:    store volatile i32 7, ptr [[TMP1]], align 4
+; OPT-NEXT:    ret void
+;
+; ASM-LABEL: private_to_flat_nonnull_arg:
+; ASM:       ; %bb.0:
+; ASM-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT:    s_mov_b64 s[4:5], src_private_base
+; ASM-NEXT:    v_mov_b32_e32 v1, s5
+; ASM-NEXT:    v_mov_b32_e32 v2, 7
+; ASM-NEXT:    flat_store_dword v[0:1], v2
+; ASM-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT:    s_setpc_b64 s[30:31]
+  %x = addrspacecast ptr addrspace(5) %ptr to ptr
+  store volatile i32 7, ptr %x
+  ret void
+}
+
+define void @flat_to_local_nonnull_arg(ptr nonnull %ptr) {
+; OPT-LABEL: define void @flat_to_local_nonnull_arg(
+; OPT-SAME: ptr nonnull [[PTR:%.*]]) {
+; OPT-NEXT:    [[TMP1:%.*]] = call ptr addrspace(3) @llvm.amdgcn.addrspacecast.nonnull.p3.p0(ptr [[PTR]])
+; OPT-NEXT:    store volatile i32 7, ptr addrspace(3) [[TMP1]], align 4
+; OPT-NEXT:    ret void
+;
+; ASM-LABEL: flat_to_local_nonnull_arg:
+; ASM:       ; %bb.0:
+; ASM-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT:    v_mov_b32_e32 v1, 7
+; ASM-NEXT:    ds_write_b32 v0, v1
+; ASM-NEXT:    s_waitcnt lgkmcnt(0)
+; ASM-NEXT:    s_setpc_b64 s[30:31]
+  %x = addrspacecast ptr %ptr to ptr addrspace(3)
+  store volatile i32 7, ptr addrspace(3) %x
+  ret void
+}
+
+define void @flat_to_private_nonnull_arg(ptr nonnull %ptr) {
+; OPT-LABEL: define void @flat_to_private_nonnull_arg(
+; OPT-SAME: ptr nonnull [[PTR:%.*]]) {
+; OPT-NEXT:    [[TMP1:%.*]] = call ptr addrspace(5) @llvm.amdgcn.addrspacecast.nonnull.p5.p0(ptr [[PTR]])
+; OPT-NEXT:    store volatile i32 7, ptr addrspace(5) [[TMP1]], align 4
+; OPT-NEXT:    ret void
+;
+; ASM-LABEL: flat_to_private_nonnull_arg:
+; ASM:       ; %bb.0:
+; ASM-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT:    v_mov_b32_e32 v1, 7
+; ASM-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
+; ASM-NEXT:    s_waitcnt vmcnt(0)
+; ASM-NEXT:    s_setpc_b64 s[30:31]
+  %x = addrspacecast ptr %ptr to ptr addrspace(5)
+  store volatile i32 7, ptr addrspace(5) %x
+  ret void
+}
+
+define void @private_alloca_to_flat(ptr %ptr) {
+; OPT-LABEL: define void @private_alloca_to_flat(
+; OPT-SAME: ptr [[PTR:%.*]]) {
+; OPT-NEXT:    [[ALLOCA:%.*]] = alloca i8, align 1, addrspace(5)
+; OPT-NEXT:    [[TMP1:%.*]] = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) [[ALLOCA]])
+; OPT-NEXT:    store volatile i32 7, ptr [[TMP1]], align 4
+; OPT-NEXT:    ret void
+;
+; ASM-LABEL: private_alloca_to_flat:
+; ASM:       ; %bb.0:
+; ASM-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT:    s_mov_b64 s[4:5], src_private_base
+; ASM-NEXT:    v_lshrrev_b32_e64 v0, 6, s32
+; ASM-NEXT:    v_mov_b32_e32 v1, s5
+; ASM-NEXT:    v_mov_b32_e32 v2, 7
+; ASM-NEXT:    flat_store_dword v[0:1], v2
+; ASM-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT:    s_setpc_b64 s[30:31]
+  %alloca = alloca i8, addrspace(5)
+  %x = addrspacecast ptr addrspace(5) %alloca to ptr
+  store volatile i32 7, ptr %x
+  ret void
+}
+
+ at lds = internal unnamed_addr addrspace(3) global i8 poison, align 4
+
+define void @knownbits_on_flat_to_priv(ptr %ptr) {
+; OPT-LABEL: define void @knownbits_on_flat_to_priv(
+; OPT-SAME: ptr [[PTR:%.*]]) {
+; OPT-NEXT:    [[PTR_INT:%.*]] = ptrtoint ptr [[PTR]] to i64
+; OPT-NEXT:    [[PTR_OR:%.*]] = or i64 [[PTR_INT]], 15
+; OPT-NEXT:    [[KB_PTR:%.*]] = inttoptr i64 [[PTR_OR]] to ptr
+; OPT-NEXT:    [[TMP1:%.*]] = call ptr addrspace(5) @llvm.amdgcn.addrspacecast.nonnull.p5.p0(ptr [[KB_PTR]])
+; OPT-NEXT:    store volatile i32 7, ptr addrspace(5) [[TMP1]], align 4
+; OPT-NEXT:    ret void
+;
+; ASM-LABEL: knownbits_on_flat_to_priv:
+; ASM:       ; %bb.0:
+; ASM-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT:    v_or_b32_e32 v0, 15, v0
+; ASM-NEXT:    v_mov_b32_e32 v1, 7
+; ASM-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
+; ASM-NEXT:    s_waitcnt vmcnt(0)
+; ASM-NEXT:    s_setpc_b64 s[30:31]
+  %ptr.int = ptrtoint ptr %ptr to i64
+  %ptr.or = or i64 %ptr.int, 15 ; set some low bits
+  %kb.ptr = inttoptr i64 %ptr.or to ptr
+  %x = addrspacecast ptr %kb.ptr to ptr addrspace(5)
+  store volatile i32 7, ptr addrspace(5) %x
+  ret void
+}
+
+define void @knownbits_on_priv_to_flat(ptr addrspace(5) %ptr) {
+; OPT-LABEL: define void @knownbits_on_priv_to_flat(
+; OPT-SAME: ptr addrspace(5) [[PTR:%.*]]) {
+; OPT-NEXT:    [[PTR_INT:%.*]] = ptrtoint ptr addrspace(5) [[PTR]] to i32
+; OPT-NEXT:    [[PTR_OR:%.*]] = and i32 [[PTR_INT]], 65535
+; OPT-NEXT:    [[KB_PTR:%.*]] = inttoptr i32 [[PTR_OR]] to ptr addrspace(5)
+; OPT-NEXT:    [[TMP1:%.*]] = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) [[KB_PTR]])
+; OPT-NEXT:    store volatile i32 7, ptr [[TMP1]], align 4
+; OPT-NEXT:    ret void
+;
+; ASM-LABEL: knownbits_on_priv_to_flat:
+; ASM:       ; %bb.0:
+; ASM-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT:    s_mov_b64 s[4:5], src_private_base
+; ASM-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; ASM-NEXT:    v_mov_b32_e32 v1, s5
+; ASM-NEXT:    v_mov_b32_e32 v2, 7
+; ASM-NEXT:    flat_store_dword v[0:1], v2
+; ASM-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT:    s_setpc_b64 s[30:31]
+  %ptr.int = ptrtoint ptr addrspace(5) %ptr to i32
+  %ptr.or = and i32 %ptr.int, 65535 ; ensure only lower 16 bits can be set.
+  %kb.ptr = inttoptr i32 %ptr.or to ptr addrspace(5)
+  %x = addrspacecast ptr addrspace(5) %kb.ptr to ptr
+  store volatile i32 7, ptr %x
+  ret void
+}
+
+define void @recursive_phis(i1 %cond, ptr addrspace(5) %ptr) {
+; OPT-LABEL: define void @recursive_phis(
+; OPT-SAME: i1 [[COND:%.*]], ptr addrspace(5) [[PTR:%.*]]) {
+; OPT-NEXT:  entry:
+; OPT-NEXT:    [[ALLOCA:%.*]] = alloca i8, align 1, addrspace(5)
+; OPT-NEXT:    br i1 [[COND]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; OPT:       then:
+; OPT-NEXT:    [[PTR_INT:%.*]] = ptrtoint ptr addrspace(5) [[PTR]] to i32
+; OPT-NEXT:    [[PTR_OR:%.*]] = and i32 [[PTR_INT]], 65535
+; OPT-NEXT:    [[KB_PTR:%.*]] = inttoptr i32 [[PTR_OR]] to ptr addrspace(5)
+; OPT-NEXT:    br label [[FINALLY:%.*]]
+; OPT:       else:
+; OPT-NEXT:    [[OTHER_PHI:%.*]] = phi ptr addrspace(5) [ [[ALLOCA]], [[ENTRY:%.*]] ], [ [[PHI_PTR:%.*]], [[FINALLY]] ]
+; OPT-NEXT:    br label [[FINALLY]]
+; OPT:       finally:
+; OPT-NEXT:    [[PHI_PTR]] = phi ptr addrspace(5) [ [[KB_PTR]], [[THEN]] ], [ [[OTHER_PHI]], [[ELSE]] ]
+; OPT-NEXT:    [[TMP0:%.*]] = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) [[PHI_PTR]])
+; OPT-NEXT:    store volatile i32 7, ptr [[TMP0]], align 4
+; OPT-NEXT:    br i1 [[COND]], label [[ELSE]], label [[END:%.*]]
+; OPT:       end:
+; OPT-NEXT:    ret void
+;
+; DAGISEL-ASM-LABEL: recursive_phis:
+; DAGISEL-ASM:       ; %bb.0: ; %entry
+; DAGISEL-ASM-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-ASM-NEXT:    v_and_b32_e32 v0, 1, v0
+; DAGISEL-ASM-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
+; DAGISEL-ASM-NEXT:    v_lshrrev_b32_e64 v0, 6, s32
+; DAGISEL-ASM-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; DAGISEL-ASM-NEXT:  ; %bb.1: ; %then
+; DAGISEL-ASM-NEXT:    v_and_b32_e32 v0, 0xffff, v1
+; DAGISEL-ASM-NEXT:  ; %bb.2: ; %finallyendcf.split
+; DAGISEL-ASM-NEXT:    s_or_b64 exec, exec, s[4:5]
+; DAGISEL-ASM-NEXT:    s_xor_b64 s[6:7], vcc, -1
+; DAGISEL-ASM-NEXT:    s_mov_b64 s[4:5], 0
+; DAGISEL-ASM-NEXT:    s_mov_b64 s[8:9], src_private_base
+; DAGISEL-ASM-NEXT:    v_mov_b32_e32 v2, 7
+; DAGISEL-ASM-NEXT:  .LBB7_3: ; %finally
+; DAGISEL-ASM-NEXT:    ; =>This Inner Loop Header: Depth=1
+; DAGISEL-ASM-NEXT:    s_and_b64 s[10:11], exec, s[6:7]
+; DAGISEL-ASM-NEXT:    s_or_b64 s[4:5], s[10:11], s[4:5]
+; DAGISEL-ASM-NEXT:    v_mov_b32_e32 v1, s9
+; DAGISEL-ASM-NEXT:    flat_store_dword v[0:1], v2
+; DAGISEL-ASM-NEXT:    s_waitcnt vmcnt(0)
+; DAGISEL-ASM-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; DAGISEL-ASM-NEXT:    s_cbranch_execnz .LBB7_3
+; DAGISEL-ASM-NEXT:  ; %bb.4: ; %end
+; DAGISEL-ASM-NEXT:    s_or_b64 exec, exec, s[4:5]
+; DAGISEL-ASM-NEXT:    s_waitcnt lgkmcnt(0)
+; DAGISEL-ASM-NEXT:    s_setpc_b64 s[30:31]
+;
+; GISEL-ASM-LABEL: recursive_phis:
+; GISEL-ASM:       ; %bb.0: ; %entry
+; GISEL-ASM-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-ASM-NEXT:    v_and_b32_e32 v0, 1, v0
+; GISEL-ASM-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GISEL-ASM-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GISEL-ASM-NEXT:    v_lshrrev_b32_e64 v0, 6, s32
+; GISEL-ASM-NEXT:    s_and_saveexec_b64 s[6:7], vcc
+; GISEL-ASM-NEXT:  ; %bb.1: ; %then
+; GISEL-ASM-NEXT:    v_and_b32_e32 v0, 0xffff, v1
+; GISEL-ASM-NEXT:  ; %bb.2: ; %finallyendcf.split
+; GISEL-ASM-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GISEL-ASM-NEXT:    s_mov_b64 s[8:9], src_private_base
+; GISEL-ASM-NEXT:    s_mov_b64 s[6:7], 0
+; GISEL-ASM-NEXT:    v_mov_b32_e32 v1, s9
+; GISEL-ASM-NEXT:    v_mov_b32_e32 v2, 7
+; GISEL-ASM-NEXT:  .LBB7_3: ; %finally
+; GISEL-ASM-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GISEL-ASM-NEXT:    s_and_b64 s[8:9], exec, s[4:5]
+; GISEL-ASM-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
+; GISEL-ASM-NEXT:    flat_store_dword v[0:1], v2
+; GISEL-ASM-NEXT:    s_waitcnt vmcnt(0)
+; GISEL-ASM-NEXT:    s_andn2_b64 exec, exec, s[6:7]
+; GISEL-ASM-NEXT:    s_cbranch_execnz .LBB7_3
+; GISEL-ASM-NEXT:  ; %bb.4: ; %end
+; GISEL-ASM-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GISEL-ASM-NEXT:    s_waitcnt lgkmcnt(0)
+; GISEL-ASM-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %alloca = alloca i8, addrspace(5)
+  br i1 %cond, label %then, label %else
+
+then:
+  %ptr.int = ptrtoint ptr addrspace(5) %ptr to i32
+  %ptr.or = and i32 %ptr.int, 65535 ; ensure low bits are zeroes
+  %kb.ptr = inttoptr i32 %ptr.or to ptr addrspace(5)
+  br label %finally
+
+else:
+  %other.phi = phi ptr addrspace(5) [%alloca, %entry], [%phi.ptr, %finally]
+  br label %finally
+
+finally:
+  %phi.ptr = phi ptr addrspace(5) [%kb.ptr, %then], [%other.phi, %else]
+  %x = addrspacecast ptr addrspace(5) %phi.ptr to ptr
+  store volatile i32 7, ptr %x
+  br i1 %cond, label %else, label %end
+
+end:
+  ret void
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.addrspacecast.nonnull.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.addrspacecast.nonnull.ll
new file mode 100644
index 00000000000000..265353675b349c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.addrspacecast.nonnull.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=ASM,DAGISEL-ASM
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -global-isel -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=ASM,GISEL-ASM
+
+define void @local_to_flat(ptr addrspace(3) %ptr) {
+; ASM-LABEL: local_to_flat:
+; ASM:       ; %bb.0:
+; ASM-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT:    s_mov_b64 s[4:5], src_shared_base
+; ASM-NEXT:    v_mov_b32_e32 v1, s5
+; ASM-NEXT:    v_mov_b32_e32 v2, 7
+; ASM-NEXT:    flat_store_dword v[0:1], v2
+; ASM-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT:    s_setpc_b64 s[30:31]
+  %1 = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p3(ptr addrspace(3) %ptr)
+  store volatile i32 7, ptr %1, align 4
+  ret void
+}
+
+define void @private_to_flat(ptr addrspace(5) %ptr) {
+; ASM-LABEL: private_to_flat:
+; ASM:       ; %bb.0:
+; ASM-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT:    s_mov_b64 s[4:5], src_private_base
+; ASM-NEXT:    v_mov_b32_e32 v1, s5
+; ASM-NEXT:    v_mov_b32_e32 v2, 7
+; ASM-NEXT:    flat_store_dword v[0:1], v2
+; ASM-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT:    s_setpc_b64 s[30:31]
+  %1 = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) %ptr)
+  store volatile i32 7, ptr %1, align 4
+  ret void
+}
+
+define void @flat_to_local(ptr %ptr) {
+; ASM-LABEL: flat_to_local:
+; ASM:       ; %bb.0:
+; ASM-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT:    v_mov_b32_e32 v1, 7
+; ASM-NEXT:    ds_write_b32 v0, v1
+; ASM-NEXT:    s_waitcnt lgkmcnt(0)
+; ASM-NEXT:    s_setpc_b64 s[30:31]
+  %1 = call ptr addrspace(3) @llvm.amdgcn.addrspacecast.nonnull.p3.p0(ptr %ptr)
+  store volatile i32 7, ptr addrspace(3) %1, align 4
+  ret void
+}
+
+define void @flat_to_private(ptr %ptr) {
+; ASM-LABEL: flat_to_private:
+; ASM:       ; %bb.0:
+; ASM-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT:    v_mov_b32_e32 v1, 7
+; ASM-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
+; ASM-NEXT:    s_waitcnt vmcnt(0)
+; ASM-NEXT:    s_setpc_b64 s[30:31]
+  %1 = call ptr addrspace(5) @llvm.amdgcn.addrspacecast.nonnull.p5.p0(ptr %ptr)
+  store volatile i32 7, ptr addrspace(5) %1, align 4
+  ret void
+}
+
+declare ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p3(ptr addrspace(3))
+declare ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5))
+declare ptr addrspace(3) @llvm.amdgcn.addrspacecast.nonnull.p3.p0(ptr)
+declare ptr addrspace(5) @llvm.amdgcn.addrspacecast.nonnull.p5.p0(ptr)
+
+declare <4 x ptr> @llvm.amdgcn.addrspacecast.nonnull.v4p0.v4p3(<4 x ptr addrspace(3)>)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; DAGISEL-ASM: {{.*}}
+; GISEL-ASM: {{.*}}


        


More information about the llvm-commits mailing list