[llvm] r371009 - AMDGPU: Add intrinsics for address space identification
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 4 19:20:39 PDT 2019
Author: arsenm
Date: Wed Sep 4 19:20:39 2019
New Revision: 371009
URL: http://llvm.org/viewvc/llvm-project?rev=371009&view=rev
Log:
AMDGPU: Add intrinsics for address space identification
The library currently uses ptrtoint and directly checks the queue ptr
for this, which counts as a pointer capture.
Added:
llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.private.ll
llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.shared.ll
llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll
llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.is.shared.ll
llvm/trunk/test/Transforms/InferAddressSpaces/AMDGPU/address-space-id-funcs.ll
Modified:
llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td
llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
llvm/trunk/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
Modified: llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td?rev=371009&r1=371008&r2=371009&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td (original)
+++ llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td Wed Sep 4 19:20:39 2019
@@ -1469,6 +1469,18 @@ def int_amdgcn_set_inactive :
LLVMMatchType<0>], // value for the inactive lanes to take
[IntrNoMem, IntrConvergent]>;
+// Return if the given flat pointer points to a local memory address.
+def int_amdgcn_is_shared : GCCBuiltin<"__builtin_amdgcn_is_shared">,
+ Intrinsic<[llvm_i1_ty], [llvm_ptr_ty],
+ [IntrNoMem, IntrSpeculatable, NoCapture<0>]
+>;
+
+// Return if the given flat pointer points to a prvate memory address.
+def int_amdgcn_is_private : GCCBuiltin<"__builtin_amdgcn_is_private">,
+ Intrinsic<[llvm_i1_ty], [llvm_ptr_ty],
+ [IntrNoMem, IntrSpeculatable, NoCapture<0>]
+>;
+
//===----------------------------------------------------------------------===//
// CI+ Intrinsics
//===----------------------------------------------------------------------===//
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp?rev=371009&r1=371008&r2=371009&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp Wed Sep 4 19:20:39 2019
@@ -173,6 +173,9 @@ static StringRef intrinsicToAttrName(Int
case Intrinsic::amdgcn_implicitarg_ptr:
return "amdgpu-implicitarg-ptr";
case Intrinsic::amdgcn_queue_ptr:
+ case Intrinsic::amdgcn_is_shared:
+ case Intrinsic::amdgcn_is_private:
+ // TODO: Does not require queue ptr on gfx9+
case Intrinsic::trap:
case Intrinsic::debugtrap:
IsQueuePtr = true;
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp?rev=371009&r1=371008&r2=371009&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp Wed Sep 4 19:20:39 2019
@@ -1436,6 +1436,18 @@ bool AMDGPULegalizerInfo::legalizeImplic
return true;
}
+bool AMDGPULegalizerInfo::legalizeIsAddrSpace(MachineInstr &MI,
+ MachineRegisterInfo &MRI,
+ MachineIRBuilder &B,
+ unsigned AddrSpace) const {
+ B.setInstr(MI);
+ Register ApertureReg = getSegmentAperture(AddrSpace, MRI, B);
+ auto Hi32 = B.buildExtract(LLT::scalar(32), MI.getOperand(2).getReg(), 32);
+ B.buildICmp(ICmpInst::ICMP_EQ, MI.getOperand(0), Hi32, ApertureReg);
+ MI.eraseFromParent();
+ return true;
+}
+
bool AMDGPULegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
MachineRegisterInfo &MRI,
MachineIRBuilder &B) const {
@@ -1518,6 +1530,10 @@ bool AMDGPULegalizerInfo::legalizeIntrin
AMDGPUFunctionArgInfo::DISPATCH_ID);
case Intrinsic::amdgcn_fdiv_fast:
return legalizeFDIVFast(MI, MRI, B);
+ case Intrinsic::amdgcn_is_shared:
+ return legalizeIsAddrSpace(MI, MRI, B, AMDGPUAS::LOCAL_ADDRESS);
+ case Intrinsic::amdgcn_is_private:
+ return legalizeIsAddrSpace(MI, MRI, B, AMDGPUAS::PRIVATE_ADDRESS);
default:
return true;
}
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.h?rev=371009&r1=371008&r2=371009&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.h Wed Sep 4 19:20:39 2019
@@ -72,6 +72,8 @@ public:
bool legalizeImplicitArgPtr(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B) const;
+ bool legalizeIsAddrSpace(MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &B, unsigned AddrSpace) const;
bool legalizeIntrinsic(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &MIRBuilder) const override;
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp?rev=371009&r1=371008&r2=371009&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp Wed Sep 4 19:20:39 2019
@@ -598,6 +598,8 @@ bool GCNTTIImpl::collectFlatAddressOpera
case Intrinsic::amdgcn_ds_fadd:
case Intrinsic::amdgcn_ds_fmin:
case Intrinsic::amdgcn_ds_fmax:
+ case Intrinsic::amdgcn_is_shared:
+ case Intrinsic::amdgcn_is_private:
OpIndexes.push_back(0);
return true;
default:
@@ -607,7 +609,8 @@ bool GCNTTIImpl::collectFlatAddressOpera
bool GCNTTIImpl::rewriteIntrinsicWithAddressSpace(
IntrinsicInst *II, Value *OldV, Value *NewV) const {
- switch (II->getIntrinsicID()) {
+ auto IntrID = II->getIntrinsicID();
+ switch (IntrID) {
case Intrinsic::amdgcn_atomic_inc:
case Intrinsic::amdgcn_atomic_dec:
case Intrinsic::amdgcn_ds_fadd:
@@ -625,6 +628,18 @@ bool GCNTTIImpl::rewriteIntrinsicWithAdd
II->setCalledFunction(NewDecl);
return true;
}
+ case Intrinsic::amdgcn_is_shared:
+ case Intrinsic::amdgcn_is_private: {
+ unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ?
+ AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
+ unsigned NewAS = NewV->getType()->getPointerAddressSpace();
+ LLVMContext &Ctx = NewV->getType()->getContext();
+ ConstantInt *NewVal = (TrueAS == NewAS) ?
+ ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx);
+ II->replaceAllUsesWith(NewVal);
+ II->eraseFromParent();
+ return true;
+ }
default:
return false;
}
Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=371009&r1=371008&r2=371009&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Wed Sep 4 19:20:39 2019
@@ -6059,6 +6059,19 @@ SDValue SITargetLowering::LowerINTRINSIC
SIInstrInfo::MO_ABS32_LO);
return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0};
}
+ case Intrinsic::amdgcn_is_shared:
+ case Intrinsic::amdgcn_is_private: {
+ SDLoc SL(Op);
+ unsigned AS = (IntrinsicID == Intrinsic::amdgcn_is_shared) ?
+ AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
+ SDValue Aperture = getSegmentAperture(AS, SL, DAG);
+ SDValue SrcVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32,
+ Op.getOperand(1));
+
+ SDValue SrcHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, SrcVec,
+ DAG.getConstant(1, SL, MVT::i32));
+ return DAG.getSetCC(SL, MVT::i1, SrcHi, Aperture, ISD::SETEQ);
+ }
default:
if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
Added: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.private.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.private.ll?rev=371009&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.private.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.private.ll Wed Sep 4 19:20:39 2019
@@ -0,0 +1,103 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=CI %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GFX9 %s
+
+; TODO: Merge with DAG test
+
+define amdgpu_kernel void @is_private_vgpr(i8* addrspace(1)* %ptr.ptr) {
+; CI-LABEL: is_private_vgpr:
+; CI: ; %bb.0:
+; CI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; CI-NEXT: v_mul_lo_u32 v2, 0, v0
+; CI-NEXT: v_mul_lo_u32 v1, 8, v1
+; CI-NEXT: v_mul_lo_u32 v3, 8, v0
+; CI-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
+; CI-NEXT: v_mul_hi_u32 v0, 8, v0
+; CI-NEXT: v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT: v_add_i32_e32 v1, vcc, v1, v0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_add_i32_e32 v0, vcc, s0, v3
+; CI-NEXT: v_mov_b32_e32 v2, s1
+; CI-NEXT: v_addc_u32_e32 v1, vcc, v2, v1, vcc
+; CI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; CI-NEXT: s_load_dword s0, s[4:5], 0x11
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, s0, v1
+; CI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; CI-NEXT: flat_store_dword v[0:1], v0
+; CI-NEXT: s_endpgm
+;
+; GFX9-LABEL: is_private_vgpr:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GFX9-NEXT: v_mul_lo_u32 v2, 0, v0
+; GFX9-NEXT: v_mul_lo_u32 v1, 8, v1
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
+; GFX9-NEXT: v_mul_hi_u32 v3, 8, v0
+; GFX9-NEXT: v_mul_lo_u32 v0, 8, v0
+; GFX9-NEXT: v_add_u32_e32 v1, v2, v1
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v2, v1, vcc
+; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
+; GFX9-NEXT: s_getreg_b32 s0, hwreg(HW_REG_SH_MEM_BASES, 0, 16)
+; GFX9-NEXT: s_lshl_b32 s0, s0, 16
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, s0, v1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: global_store_dword v[0:1], v0, off
+; GFX9-NEXT: s_endpgm
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds i8*, i8* addrspace(1)* %ptr.ptr, i32 %id
+ %ptr = load volatile i8*, i8* addrspace(1)* %gep
+ %val = call i1 @llvm.amdgcn.is.private(i8* %ptr)
+ %ext = zext i1 %val to i32
+ store i32 %ext, i32 addrspace(1)* undef
+ ret void
+}
+
+define amdgpu_kernel void @is_private_sgpr(i8* %ptr) {
+; CI-LABEL: is_private_sgpr:
+; CI: ; %bb.0:
+; CI-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dword s0, s[4:5], 0x11
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_cmp_eq_u32 s1, s0
+; CI-NEXT: s_cbranch_scc0 BB1_2
+; CI-NEXT: ; %bb.1: ; %bb0
+; CI-NEXT: v_mov_b32_e32 v0, 0
+; CI-NEXT: flat_store_dword v[0:1], v0
+; CI-NEXT: BB1_2: ; %bb1
+; CI-NEXT: s_endpgm
+;
+; GFX9-LABEL: is_private_sgpr:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_getreg_b32 s0, hwreg(HW_REG_SH_MEM_BASES, 0, 16)
+; GFX9-NEXT: s_lshl_b32 s0, s0, 16
+; GFX9-NEXT: s_cmp_eq_u32 s1, s0
+; GFX9-NEXT: s_cbranch_scc0 BB1_2
+; GFX9-NEXT: ; %bb.1: ; %bb0
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: global_store_dword v[0:1], v0, off
+; GFX9-NEXT: BB1_2: ; %bb1
+; GFX9-NEXT: s_endpgm
+ %val = call i1 @llvm.amdgcn.is.private(i8* %ptr)
+ br i1 %val, label %bb0, label %bb1
+
+bb0:
+ store volatile i32 0, i32 addrspace(1)* undef
+ br label %bb1
+
+bb1:
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+declare i1 @llvm.amdgcn.is.private(i8* nocapture) #0
+
+attributes #0 = { nounwind readnone speculatable }
Added: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.shared.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.shared.ll?rev=371009&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.shared.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.shared.ll Wed Sep 4 19:20:39 2019
@@ -0,0 +1,103 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=CI %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GFX9 %s
+
+; TODO: Merge with DAG test
+
+define amdgpu_kernel void @is_local_vgpr(i8* addrspace(1)* %ptr.ptr) {
+; CI-LABEL: is_local_vgpr:
+; CI: ; %bb.0:
+; CI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; CI-NEXT: v_mul_lo_u32 v2, 0, v0
+; CI-NEXT: v_mul_lo_u32 v1, 8, v1
+; CI-NEXT: v_mul_lo_u32 v3, 8, v0
+; CI-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
+; CI-NEXT: v_mul_hi_u32 v0, 8, v0
+; CI-NEXT: v_add_i32_e32 v1, vcc, v2, v1
+; CI-NEXT: v_add_i32_e32 v1, vcc, v1, v0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_add_i32_e32 v0, vcc, s0, v3
+; CI-NEXT: v_mov_b32_e32 v2, s1
+; CI-NEXT: v_addc_u32_e32 v1, vcc, v2, v1, vcc
+; CI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; CI-NEXT: s_load_dword s0, s[4:5], 0x10
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, s0, v1
+; CI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; CI-NEXT: flat_store_dword v[0:1], v0
+; CI-NEXT: s_endpgm
+;
+; GFX9-LABEL: is_local_vgpr:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GFX9-NEXT: v_mul_lo_u32 v2, 0, v0
+; GFX9-NEXT: v_mul_lo_u32 v1, 8, v1
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
+; GFX9-NEXT: v_mul_hi_u32 v3, 8, v0
+; GFX9-NEXT: v_mul_lo_u32 v0, 8, v0
+; GFX9-NEXT: v_add_u32_e32 v1, v2, v1
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v2, v1, vcc
+; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
+; GFX9-NEXT: s_getreg_b32 s0, hwreg(HW_REG_SH_MEM_BASES, 16, 16)
+; GFX9-NEXT: s_lshl_b32 s0, s0, 16
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, s0, v1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: global_store_dword v[0:1], v0, off
+; GFX9-NEXT: s_endpgm
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds i8*, i8* addrspace(1)* %ptr.ptr, i32 %id
+ %ptr = load volatile i8*, i8* addrspace(1)* %gep
+ %val = call i1 @llvm.amdgcn.is.shared(i8* %ptr)
+ %ext = zext i1 %val to i32
+ store i32 %ext, i32 addrspace(1)* undef
+ ret void
+}
+
+define amdgpu_kernel void @is_local_sgpr(i8* %ptr) {
+; CI-LABEL: is_local_sgpr:
+; CI: ; %bb.0:
+; CI-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_load_dword s0, s[4:5], 0x10
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_cmp_eq_u32 s1, s0
+; CI-NEXT: s_cbranch_scc0 BB1_2
+; CI-NEXT: ; %bb.1: ; %bb0
+; CI-NEXT: v_mov_b32_e32 v0, 0
+; CI-NEXT: flat_store_dword v[0:1], v0
+; CI-NEXT: BB1_2: ; %bb1
+; CI-NEXT: s_endpgm
+;
+; GFX9-LABEL: is_local_sgpr:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_getreg_b32 s0, hwreg(HW_REG_SH_MEM_BASES, 16, 16)
+; GFX9-NEXT: s_lshl_b32 s0, s0, 16
+; GFX9-NEXT: s_cmp_eq_u32 s1, s0
+; GFX9-NEXT: s_cbranch_scc0 BB1_2
+; GFX9-NEXT: ; %bb.1: ; %bb0
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: global_store_dword v[0:1], v0, off
+; GFX9-NEXT: BB1_2: ; %bb1
+; GFX9-NEXT: s_endpgm
+ %val = call i1 @llvm.amdgcn.is.shared(i8* %ptr)
+ br i1 %val, label %bb0, label %bb1
+
+bb0:
+ store volatile i32 0, i32 addrspace(1)* undef
+ br label %bb1
+
+bb1:
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+declare i1 @llvm.amdgcn.is.shared(i8* nocapture) #0
+
+attributes #0 = { nounwind readnone speculatable }
Modified: llvm/trunk/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll?rev=371009&r1=371008&r2=371009&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll Wed Sep 4 19:20:39 2019
@@ -12,6 +12,9 @@ declare i8 addrspace(4)* @llvm.amdgcn.di
declare i8 addrspace(4)* @llvm.amdgcn.queue.ptr() #0
declare i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() #0
+declare i1 @llvm.amdgcn.is.shared(i8* nocapture) #2
+declare i1 @llvm.amdgcn.is.private(i8* nocapture) #2
+
; HSA: define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
%val = call i32 @llvm.amdgcn.workgroup.id.x()
@@ -231,6 +234,22 @@ define amdgpu_kernel void @use_flat_to_c
ret void
}
+; HSA: define amdgpu_kernel void @use_is_shared(i8* %ptr) #11 {
+define amdgpu_kernel void @use_is_shared(i8* %ptr) #1 {
+ %is.shared = call i1 @llvm.amdgcn.is.shared(i8* %ptr)
+ %ext = zext i1 %is.shared to i32
+ store i32 %ext, i32 addrspace(1)* undef
+ ret void
+}
+
+; HSA: define amdgpu_kernel void @use_is_private(i8* %ptr) #11 {
+define amdgpu_kernel void @use_is_private(i8* %ptr) #1 {
+ %is.private = call i1 @llvm.amdgcn.is.private(i8* %ptr)
+ %ext = zext i1 %is.private to i32
+ store i32 %ext, i32 addrspace(1)* undef
+ ret void
+}
+
attributes #0 = { nounwind readnone speculatable }
attributes #1 = { nounwind }
Added: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll?rev=371009&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll Wed Sep 4 19:20:39 2019
@@ -0,0 +1,50 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
+
+; GCN-LABEL: {{^}}is_private_vgpr:
+; GCN-DAG: {{flat|global}}_load_dwordx2 v{{\[[0-9]+}}:[[PTR_HI:[0-9]+]]{{\]}}
+; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11
+; GFX9-DAG: s_getreg_b32 [[APERTURE:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 0, 16)
+; GFX9: s_lshl_b32 [[APERTURE]], [[APERTURE]], 16
+; GCN: v_cmp_eq_u32_e32 vcc, [[APERTURE]], v[[PTR_HI]]
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
+define amdgpu_kernel void @is_private_vgpr(i8* addrspace(1)* %ptr.ptr) {
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds i8*, i8* addrspace(1)* %ptr.ptr, i32 %id
+ %ptr = load volatile i8*, i8* addrspace(1)* %gep
+ %val = call i1 @llvm.amdgcn.is.private(i8* %ptr)
+ %ext = zext i1 %val to i32
+ store i32 %ext, i32 addrspace(1)* undef
+ ret void
+}
+
+; FIXME: setcc (zero_extend (setcc)), 1) not folded out, resulting in
+; select and vcc branch.
+
+; GCN-LABEL: {{^}}is_private_sgpr:
+; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11{{$}}
+; GFX9-DAG: s_getreg_b32 [[APERTURE:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 0, 16)
+
+; CI-DAG: s_load_dword [[PTR_HI:s[0-9]+]], s[6:7], 0x1{{$}}
+; GFX9-DAG: s_load_dword [[PTR_HI:s[0-9]+]], s[6:7], 0x4{{$}}
+; GFX9: s_lshl_b32 [[APERTURE]], [[APERTURE]], 16
+
+; GCN: v_mov_b32_e32 [[V_APERTURE:v[0-9]+]], [[APERTURE]]
+; GCN: v_cmp_eq_u32_e32 vcc, [[PTR_HI]], [[V_APERTURE]]
+; GCN: s_cbranch_vccnz
+define amdgpu_kernel void @is_private_sgpr(i8* %ptr) {
+ %val = call i1 @llvm.amdgcn.is.private(i8* %ptr)
+ br i1 %val, label %bb0, label %bb1
+
+bb0:
+ store volatile i32 0, i32 addrspace(1)* undef
+ br label %bb1
+
+bb1:
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+declare i1 @llvm.amdgcn.is.private(i8* nocapture) #0
+
+attributes #0 = { nounwind readnone speculatable }
Added: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.is.shared.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.is.shared.ll?rev=371009&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.is.shared.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.is.shared.ll Wed Sep 4 19:20:39 2019
@@ -0,0 +1,51 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
+
+; GCN-LABEL: {{^}}is_local_vgpr:
+; GCN-DAG: {{flat|global}}_load_dwordx2 v{{\[[0-9]+}}:[[PTR_HI:[0-9]+]]{{\]}}
+; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10
+; GFX9-DAG: s_getreg_b32 [[APERTURE:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 16, 16)
+; GFX9: s_lshl_b32 [[APERTURE]], [[APERTURE]], 16
+
+; GCN: v_cmp_eq_u32_e32 vcc, [[APERTURE]], v[[PTR_HI]]
+; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
+define amdgpu_kernel void @is_local_vgpr(i8* addrspace(1)* %ptr.ptr) {
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds i8*, i8* addrspace(1)* %ptr.ptr, i32 %id
+ %ptr = load volatile i8*, i8* addrspace(1)* %gep
+ %val = call i1 @llvm.amdgcn.is.shared(i8* %ptr)
+ %ext = zext i1 %val to i32
+ store i32 %ext, i32 addrspace(1)* undef
+ ret void
+}
+
+; FIXME: setcc (zero_extend (setcc)), 1) not folded out, resulting in
+; select and vcc branch.
+
+; GCN-LABEL: {{^}}is_local_sgpr:
+; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10{{$}}
+; GFX9-DAG: s_getreg_b32 [[APERTURE:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 16, 16)
+; GFX9-DAG: s_lshl_b32 [[APERTURE]], [[APERTURE]], 16
+
+; CI-DAG: s_load_dword [[PTR_HI:s[0-9]+]], s[6:7], 0x1{{$}}
+; GFX9-DAG: s_load_dword [[PTR_HI:s[0-9]+]], s[6:7], 0x4{{$}}
+
+; GCN: v_mov_b32_e32 [[V_APERTURE:v[0-9]+]], [[APERTURE]]
+; GCN: v_cmp_eq_u32_e32 vcc, [[PTR_HI]], [[V_APERTURE]]
+; GCN: s_cbranch_vccnz
+define amdgpu_kernel void @is_local_sgpr(i8* %ptr) {
+ %val = call i1 @llvm.amdgcn.is.shared(i8* %ptr)
+ br i1 %val, label %bb0, label %bb1
+
+bb0:
+ store volatile i32 0, i32 addrspace(1)* undef
+ br label %bb1
+
+bb1:
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+declare i1 @llvm.amdgcn.is.shared(i8* nocapture) #0
+
+attributes #0 = { nounwind readnone speculatable }
Added: llvm/trunk/test/Transforms/InferAddressSpaces/AMDGPU/address-space-id-funcs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InferAddressSpaces/AMDGPU/address-space-id-funcs.ll?rev=371009&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InferAddressSpaces/AMDGPU/address-space-id-funcs.ll (added)
+++ llvm/trunk/test/Transforms/InferAddressSpaces/AMDGPU/address-space-id-funcs.ll Wed Sep 4 19:20:39 2019
@@ -0,0 +1,55 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces -instsimplify %s | FileCheck %s
+
+define amdgpu_kernel void @is_local_true(i8 addrspace(3)* %lptr) {
+; CHECK-LABEL: @is_local_true(
+; CHECK-NEXT: store i32 1, i32 addrspace(1)* undef
+; CHECK-NEXT: ret void
+;
+ %cast = addrspacecast i8 addrspace(3)* %lptr to i8*
+ %is.shared = call i1 @llvm.amdgcn.is.shared(i8* %cast)
+ %ext = zext i1 %is.shared to i32
+ store i32 %ext, i32 addrspace(1)* undef
+ ret void
+}
+
+define amdgpu_kernel void @is_local_false(i8 addrspace(1)* %gptr) {
+; CHECK-LABEL: @is_local_false(
+; CHECK-NEXT: store i32 0, i32 addrspace(1)* undef
+; CHECK-NEXT: ret void
+;
+ %cast = addrspacecast i8 addrspace(1)* %gptr to i8*
+ %is.shared = call i1 @llvm.amdgcn.is.shared(i8* %cast)
+ %ext = zext i1 %is.shared to i32
+ store i32 %ext, i32 addrspace(1)* undef
+ ret void
+}
+
+define void @is_private_true(i8 addrspace(5)* %lptr) {
+; CHECK-LABEL: @is_private_true(
+; CHECK-NEXT: store i32 1, i32 addrspace(1)* undef
+; CHECK-NEXT: ret void
+;
+ %cast = addrspacecast i8 addrspace(5)* %lptr to i8*
+ %is.private = call i1 @llvm.amdgcn.is.private(i8* %cast)
+ %ext = zext i1 %is.private to i32
+ store i32 %ext, i32 addrspace(1)* undef
+ ret void
+}
+
+define void @is_private_false(i8 addrspace(1)* %gptr) {
+; CHECK-LABEL: @is_private_false(
+; CHECK-NEXT: store i32 0, i32 addrspace(1)* undef
+; CHECK-NEXT: ret void
+;
+ %cast = addrspacecast i8 addrspace(1)* %gptr to i8*
+ %is.private = call i1 @llvm.amdgcn.is.private(i8* %cast)
+ %ext = zext i1 %is.private to i32
+ store i32 %ext, i32 addrspace(1)* undef
+ ret void
+}
+
+declare i1 @llvm.amdgcn.is.shared(i8* nocapture) #0
+declare i1 @llvm.amdgcn.is.private(i8* nocapture) #0
+
+attributes #0 = { nounwind readnone speculatable }
More information about the llvm-commits
mailing list