[llvm] r308226 - AMDGPU: Annotate features from x work item/group IDs.
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 17 15:35:50 PDT 2017
Author: arsenm
Date: Mon Jul 17 15:35:50 2017
New Revision: 308226
URL: http://llvm.org/viewvc/llvm-project?rev=308226&view=rev
Log:
AMDGPU: Annotate features from x work item/group IDs.
This wasn't necessary before since they are always enabled
for kernels, but this is necessary if they need to be
forwarded to a callable function.
Modified:
llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp
llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.h
llvm/trunk/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp?rev=308226&r1=308225&r2=308226&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp Mon Jul 17 15:35:50 2017
@@ -129,8 +129,16 @@ bool AMDGPUAnnotateKernelFeatures::visit
//
// TODO: We should not add the attributes if the known compile time workgroup
// size is 1 for y/z.
-static StringRef intrinsicToAttrName(Intrinsic::ID ID, bool &IsQueuePtr) {
+static StringRef intrinsicToAttrName(Intrinsic::ID ID,
+ bool &NonKernelOnly,
+ bool &IsQueuePtr) {
switch (ID) {
+ case Intrinsic::amdgcn_workitem_id_x:
+ NonKernelOnly = true;
+ return "amdgpu-work-item-id-x";
+ case Intrinsic::amdgcn_workgroup_id_x:
+ NonKernelOnly = true;
+ return "amdgpu-work-group-id-x";
case Intrinsic::amdgcn_workitem_id_y:
case Intrinsic::r600_read_tidig_y:
return "amdgpu-work-item-id-y";
@@ -172,12 +180,12 @@ static bool handleAttr(Function &Parent,
static void copyFeaturesToFunction(Function &Parent, const Function &Callee,
bool &NeedQueuePtr) {
-
+ // X ids unnecessarily propagated to kernels.
static const StringRef AttrNames[] = {
- // .x omitted
+ { "amdgpu-work-item-id-x" },
{ "amdgpu-work-item-id-y" },
{ "amdgpu-work-item-id-z" },
- // .x omitted
+ { "amdgpu-work-group-id-x" },
{ "amdgpu-work-group-id-y" },
{ "amdgpu-work-group-id-z" },
{ "amdgpu-dispatch-ptr" },
@@ -198,6 +206,7 @@ bool AMDGPUAnnotateKernelFeatures::addFe
bool Changed = false;
bool NeedQueuePtr = false;
+ bool IsFunc = !AMDGPU::isEntryFunctionCC(F.getCallingConv());
for (BasicBlock &BB : F) {
for (Instruction &I : BB) {
@@ -214,8 +223,10 @@ bool AMDGPUAnnotateKernelFeatures::addFe
copyFeaturesToFunction(F, *Callee, NeedQueuePtr);
Changed = true;
} else {
- StringRef AttrName = intrinsicToAttrName(IID, NeedQueuePtr);
- if (!AttrName.empty()) {
+ bool NonKernelOnly = false;
+ StringRef AttrName = intrinsicToAttrName(IID,
+ NonKernelOnly, NeedQueuePtr);
+ if (!AttrName.empty() && (IsFunc || !NonKernelOnly)) {
F.addFnAttr(AttrName);
Changed = true;
}
Modified: llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp?rev=308226&r1=308225&r2=308226&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp Mon Jul 17 15:35:50 2017
@@ -42,6 +42,9 @@ SIMachineFunctionInfo::SIMachineFunction
WorkGroupIDZSystemSGPR(AMDGPU::NoRegister),
WorkGroupInfoSystemSGPR(AMDGPU::NoRegister),
PrivateSegmentWaveByteOffsetSystemSGPR(AMDGPU::NoRegister),
+ WorkItemIDXVGPR(AMDGPU::NoRegister),
+ WorkItemIDYVGPR(AMDGPU::NoRegister),
+ WorkItemIDZVGPR(AMDGPU::NoRegister),
PSInputAddr(0),
PSInputEnable(0),
ReturnsVoid(true),
@@ -87,7 +90,6 @@ SIMachineFunctionInfo::SIMachineFunction
ScratchWaveOffsetReg = AMDGPU::SGPR4;
FrameOffsetReg = AMDGPU::SGPR5;
StackPtrOffsetReg = AMDGPU::SGPR32;
- return;
}
CallingConv::ID CC = F->getCallingConv();
@@ -101,17 +103,25 @@ SIMachineFunctionInfo::SIMachineFunction
if (ST.debuggerEmitPrologue()) {
// Enable everything.
+ WorkGroupIDX = true;
WorkGroupIDY = true;
WorkGroupIDZ = true;
+ WorkItemIDX = true;
WorkItemIDY = true;
WorkItemIDZ = true;
} else {
+ if (F->hasFnAttribute("amdgpu-work-group-id-x"))
+ WorkGroupIDX = true;
+
if (F->hasFnAttribute("amdgpu-work-group-id-y"))
WorkGroupIDY = true;
if (F->hasFnAttribute("amdgpu-work-group-id-z"))
WorkGroupIDZ = true;
+ if (F->hasFnAttribute("amdgpu-work-item-id-x"))
+ WorkItemIDX = true;
+
if (F->hasFnAttribute("amdgpu-work-item-id-y"))
WorkItemIDY = true;
@@ -119,22 +129,24 @@ SIMachineFunctionInfo::SIMachineFunction
WorkItemIDZ = true;
}
- // X, XY, and XYZ are the only supported combinations, so make sure Y is
- // enabled if Z is.
- if (WorkItemIDZ)
- WorkItemIDY = true;
-
const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
bool MaySpill = ST.isVGPRSpillingEnabled(*F);
bool HasStackObjects = FrameInfo.hasStackObjects() || FrameInfo.hasCalls();
- if (HasStackObjects || MaySpill) {
- PrivateSegmentWaveByteOffset = true;
+ if (isEntryFunction()) {
+ // X, XY, and XYZ are the only supported combinations, so make sure Y is
+ // enabled if Z is.
+ if (WorkItemIDZ)
+ WorkItemIDY = true;
+
+ if (HasStackObjects || MaySpill) {
+ PrivateSegmentWaveByteOffset = true;
- // HS and GS always have the scratch wave offset in SGPR5 on GFX9.
- if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 &&
- (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS))
- PrivateSegmentWaveByteOffsetSystemSGPR = AMDGPU::SGPR5;
+ // HS and GS always have the scratch wave offset in SGPR5 on GFX9.
+ if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 &&
+ (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS))
+ PrivateSegmentWaveByteOffsetSystemSGPR = AMDGPU::SGPR5;
+ }
}
if (ST.isAmdCodeObjectV2(MF)) {
@@ -160,7 +172,8 @@ SIMachineFunctionInfo::SIMachineFunction
// We don't need to worry about accessing spills with flat instructions.
// TODO: On VI where we must use flat for global, we should be able to omit
// this if it is never used for generic access.
- if (HasStackObjects && ST.hasFlatAddressSpace() && ST.isAmdHsaOS())
+ if (HasStackObjects && ST.hasFlatAddressSpace() && ST.isAmdHsaOS() &&
+ isEntryFunction())
FlatScratchInit = true;
}
Modified: llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.h?rev=308226&r1=308225&r2=308226&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.h Mon Jul 17 15:35:50 2017
@@ -119,6 +119,11 @@ class SIMachineFunctionInfo final : publ
unsigned WorkGroupInfoSystemSGPR;
unsigned PrivateSegmentWaveByteOffsetSystemSGPR;
+ // VGPR inputs. These are always v0, v1 and v2 for entry functions.
+ unsigned WorkItemIDXVGPR;
+ unsigned WorkItemIDYVGPR;
+ unsigned WorkItemIDZVGPR;
+
// Graphics info.
unsigned PSInputAddr;
unsigned PSInputEnable;
Modified: llvm/trunk/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll?rev=308226&r1=308225&r2=308226&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll Mon Jul 17 15:35:50 2017
@@ -1,8 +1,10 @@
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -amdgpu-annotate-kernel-features %s | FileCheck -check-prefix=HSA %s
+declare i32 @llvm.amdgcn.workgroup.id.x() #0
declare i32 @llvm.amdgcn.workgroup.id.y() #0
declare i32 @llvm.amdgcn.workgroup.id.z() #0
+declare i32 @llvm.amdgcn.workitem.id.x() #0
declare i32 @llvm.amdgcn.workitem.id.y() #0
declare i32 @llvm.amdgcn.workitem.id.z() #0
@@ -12,56 +14,70 @@ declare i8 addrspace(2)* @llvm.amdgcn.ke
declare i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr() #0
declare i64 @llvm.amdgcn.dispatch.id() #0
-; HSA: define void @use_workitem_id_y() #1 {
+; HSA: define void @use_workitem_id_x() #1 {
+define void @use_workitem_id_x() #1 {
+ %val = call i32 @llvm.amdgcn.workitem.id.x()
+ store volatile i32 %val, i32 addrspace(1)* undef
+ ret void
+}
+
+; HSA: define void @use_workitem_id_y() #2 {
define void @use_workitem_id_y() #1 {
%val = call i32 @llvm.amdgcn.workitem.id.y()
store volatile i32 %val, i32 addrspace(1)* undef
ret void
}
-; HSA: define void @use_workitem_id_z() #2 {
+; HSA: define void @use_workitem_id_z() #3 {
define void @use_workitem_id_z() #1 {
%val = call i32 @llvm.amdgcn.workitem.id.z()
store volatile i32 %val, i32 addrspace(1)* undef
ret void
}
-; HSA: define void @use_workgroup_id_y() #3 {
+; HSA: define void @use_workgroup_id_x() #4 {
+define void @use_workgroup_id_x() #1 {
+ %val = call i32 @llvm.amdgcn.workgroup.id.x()
+ store volatile i32 %val, i32 addrspace(1)* undef
+ ret void
+}
+
+; HSA: define void @use_workgroup_id_y() #5 {
define void @use_workgroup_id_y() #1 {
%val = call i32 @llvm.amdgcn.workgroup.id.y()
store volatile i32 %val, i32 addrspace(1)* undef
ret void
}
-; HSA: define void @use_workgroup_id_z() #4 {
+; HSA: define void @use_workgroup_id_z() #6 {
define void @use_workgroup_id_z() #1 {
%val = call i32 @llvm.amdgcn.workgroup.id.z()
store volatile i32 %val, i32 addrspace(1)* undef
ret void
}
-; HSA: define void @use_dispatch_ptr() #5 {
+; HSA: define void @use_dispatch_ptr() #7 {
define void @use_dispatch_ptr() #1 {
%dispatch.ptr = call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
store volatile i8 addrspace(2)* %dispatch.ptr, i8 addrspace(2)* addrspace(1)* undef
ret void
}
-; HSA: define void @use_queue_ptr() #6 {
+; HSA: define void @use_queue_ptr() #8 {
define void @use_queue_ptr() #1 {
%queue.ptr = call i8 addrspace(2)* @llvm.amdgcn.queue.ptr()
store volatile i8 addrspace(2)* %queue.ptr, i8 addrspace(2)* addrspace(1)* undef
ret void
}
-; HSA: define void @use_dispatch_id() #7 {
+; HSA: define void @use_dispatch_id() #9 {
define void @use_dispatch_id() #1 {
%val = call i64 @llvm.amdgcn.dispatch.id()
store volatile i64 %val, i64 addrspace(1)* undef
ret void
}
-; HSA: define void @use_workgroup_id_y_workgroup_id_z() #8 {
+; HSA: define void @use_workgroup_id_y_workgroup_id_z() #10 {
define void @use_workgroup_id_y_workgroup_id_z() #1 {
%val0 = call i32 @llvm.amdgcn.workgroup.id.y()
%val1 = call i32 @llvm.amdgcn.workgroup.id.z()
@@ -70,67 +86,91 @@ define void @use_workgroup_id_y_workgrou
ret void
}
-; HSA: define void @func_indirect_use_workitem_id_y() #1 {
+; HSA: define void @func_indirect_use_workitem_id_x() #1 {
+define void @func_indirect_use_workitem_id_x() #1 {
+ call void @use_workitem_id_x()
+ ret void
+}
+
+; HSA: define void @kernel_indirect_use_workitem_id_x() #1 {
+define void @kernel_indirect_use_workitem_id_x() #1 {
+ call void @use_workitem_id_x()
+ ret void
+}
+
+; HSA: define void @func_indirect_use_workitem_id_y() #2 {
define void @func_indirect_use_workitem_id_y() #1 {
call void @use_workitem_id_y()
ret void
}
-; HSA: define void @func_indirect_use_workitem_id_z() #2 {
+; HSA: define void @func_indirect_use_workitem_id_z() #3 {
define void @func_indirect_use_workitem_id_z() #1 {
call void @use_workitem_id_z()
ret void
}
-; HSA: define void @func_indirect_use_workgroup_id_y() #3 {
+; HSA: define void @func_indirect_use_workgroup_id_x() #4 {
+define void @func_indirect_use_workgroup_id_x() #1 {
+ call void @use_workgroup_id_x()
+ ret void
+}
+
+; HSA: define void @kernel_indirect_use_workgroup_id_x() #4 {
+define void @kernel_indirect_use_workgroup_id_x() #1 {
+ call void @use_workgroup_id_x()
+ ret void
+}
+
+; HSA: define void @func_indirect_use_workgroup_id_y() #5 {
define void @func_indirect_use_workgroup_id_y() #1 {
call void @use_workgroup_id_y()
ret void
}
-; HSA: define void @func_indirect_use_workgroup_id_z() #4 {
+; HSA: define void @func_indirect_use_workgroup_id_z() #6 {
define void @func_indirect_use_workgroup_id_z() #1 {
call void @use_workgroup_id_z()
ret void
}
-; HSA: define void @func_indirect_indirect_use_workgroup_id_y() #3 {
+; HSA: define void @func_indirect_indirect_use_workgroup_id_y() #5 {
define void @func_indirect_indirect_use_workgroup_id_y() #1 {
call void @func_indirect_use_workgroup_id_y()
ret void
}
-; HSA: define void @indirect_x2_use_workgroup_id_y() #3 {
+; HSA: define void @indirect_x2_use_workgroup_id_y() #5 {
define void @indirect_x2_use_workgroup_id_y() #1 {
call void @func_indirect_indirect_use_workgroup_id_y()
ret void
}
-; HSA: define void @func_indirect_use_dispatch_ptr() #5 {
+; HSA: define void @func_indirect_use_dispatch_ptr() #7 {
define void @func_indirect_use_dispatch_ptr() #1 {
call void @use_dispatch_ptr()
ret void
}
-; HSA: define void @func_indirect_use_queue_ptr() #6 {
+; HSA: define void @func_indirect_use_queue_ptr() #8 {
define void @func_indirect_use_queue_ptr() #1 {
call void @use_queue_ptr()
ret void
}
-; HSA: define void @func_indirect_use_dispatch_id() #7 {
+; HSA: define void @func_indirect_use_dispatch_id() #9 {
define void @func_indirect_use_dispatch_id() #1 {
call void @use_dispatch_id()
ret void
}
-; HSA: define void @func_indirect_use_workgroup_id_y_workgroup_id_z() #9 {
+; HSA: define void @func_indirect_use_workgroup_id_y_workgroup_id_z() #11 {
define void @func_indirect_use_workgroup_id_y_workgroup_id_z() #1 {
call void @func_indirect_use_workgroup_id_y_workgroup_id_z()
ret void
}
-; HSA: define void @recursive_use_workitem_id_y() #1 {
+; HSA: define void @recursive_use_workitem_id_y() #2 {
define void @recursive_use_workitem_id_y() #1 {
%val = call i32 @llvm.amdgcn.workitem.id.y()
store volatile i32 %val, i32 addrspace(1)* undef
@@ -138,27 +178,27 @@ define void @recursive_use_workitem_id_y
ret void
}
-; HSA: define void @call_recursive_use_workitem_id_y() #1 {
+; HSA: define void @call_recursive_use_workitem_id_y() #2 {
define void @call_recursive_use_workitem_id_y() #1 {
call void @recursive_use_workitem_id_y()
ret void
}
-; HSA: define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #6 {
+; HSA: define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #8 {
define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #1 {
%stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
store volatile i32 0, i32 addrspace(4)* %stof
ret void
}
-; HSA: define void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* %ptr) #10 {
+; HSA: define void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* %ptr) #12 {
define void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* %ptr) #2 {
%stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
store volatile i32 0, i32 addrspace(4)* %stof
ret void
}
-; HSA: define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* %ptr) #11 {
+; HSA: define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* %ptr) #13 {
define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* %ptr) #2 {
%stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
store volatile i32 0, i32 addrspace(4)* %stof
@@ -166,45 +206,45 @@ define void @use_group_to_flat_addrspace
ret void
}
-; HSA: define void @indirect_use_group_to_flat_addrspacecast() #6 {
+; HSA: define void @indirect_use_group_to_flat_addrspacecast() #8 {
define void @indirect_use_group_to_flat_addrspacecast() #1 {
call void @use_group_to_flat_addrspacecast(i32 addrspace(3)* null)
ret void
}
-; HSA: define void @indirect_use_group_to_flat_addrspacecast_gfx9() #9 {
+; HSA: define void @indirect_use_group_to_flat_addrspacecast_gfx9() #11 {
define void @indirect_use_group_to_flat_addrspacecast_gfx9() #1 {
call void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* null)
ret void
}
-; HSA: define void @indirect_use_group_to_flat_addrspacecast_queue_ptr_gfx9() #6 {
+; HSA: define void @indirect_use_group_to_flat_addrspacecast_queue_ptr_gfx9() #8 {
define void @indirect_use_group_to_flat_addrspacecast_queue_ptr_gfx9() #1 {
call void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* null)
ret void
}
-; HSA: define void @use_kernarg_segment_ptr() #12 {
+; HSA: define void @use_kernarg_segment_ptr() #14 {
define void @use_kernarg_segment_ptr() #1 {
%kernarg.segment.ptr = call i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr()
store volatile i8 addrspace(2)* %kernarg.segment.ptr, i8 addrspace(2)* addrspace(1)* undef
ret void
}
-; HSA: define void @func_indirect_use_kernarg_segment_ptr() #12 {
+; HSA: define void @func_indirect_use_kernarg_segment_ptr() #14 {
define void @func_indirect_use_kernarg_segment_ptr() #1 {
call void @use_kernarg_segment_ptr()
ret void
}
-; HSA: define void @use_implicitarg_ptr() #12 {
+; HSA: define void @use_implicitarg_ptr() #14 {
define void @use_implicitarg_ptr() #1 {
%implicitarg.ptr = call i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr()
store volatile i8 addrspace(2)* %implicitarg.ptr, i8 addrspace(2)* addrspace(1)* undef
ret void
}
-; HSA: define void @func_indirect_use_implicitarg_ptr() #12 {
+; HSA: define void @func_indirect_use_implicitarg_ptr() #14 {
define void @func_indirect_use_implicitarg_ptr() #1 {
call void @use_implicitarg_ptr()
ret void
@@ -215,15 +255,17 @@ attributes #1 = { nounwind "target-cpu"=
attributes #2 = { nounwind "target-cpu"="gfx900" }
; HSA: attributes #0 = { nounwind readnone speculatable }
-; HSA: attributes #1 = { nounwind "amdgpu-work-item-id-y" "target-cpu"="fiji" }
-; HSA: attributes #2 = { nounwind "amdgpu-work-item-id-z" "target-cpu"="fiji" }
-; HSA: attributes #3 = { nounwind "amdgpu-work-group-id-y" "target-cpu"="fiji" }
-; HSA: attributes #4 = { nounwind "amdgpu-work-group-id-z" "target-cpu"="fiji" }
-; HSA: attributes #5 = { nounwind "amdgpu-dispatch-ptr" "target-cpu"="fiji" }
-; HSA: attributes #6 = { nounwind "amdgpu-queue-ptr" "target-cpu"="fiji" }
-; HSA: attributes #7 = { nounwind "amdgpu-dispatch-id" "target-cpu"="fiji" }
-; HSA: attributes #8 = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "target-cpu"="fiji" }
-; HSA: attributes #9 = { nounwind "target-cpu"="fiji" }
-; HSA: attributes #10 = { nounwind "target-cpu"="gfx900" }
-; HSA: attributes #11 = { nounwind "amdgpu-queue-ptr" "target-cpu"="gfx900" }
-; HSA: attributes #12 = { nounwind "amdgpu-kernarg-segment-ptr" "target-cpu"="fiji" }
+; HSA: attributes #1 = { nounwind "amdgpu-work-item-id-x" "target-cpu"="fiji" }
+; HSA: attributes #2 = { nounwind "amdgpu-work-item-id-y" "target-cpu"="fiji" }
+; HSA: attributes #3 = { nounwind "amdgpu-work-item-id-z" "target-cpu"="fiji" }
+; HSA: attributes #4 = { nounwind "amdgpu-work-group-id-x" "target-cpu"="fiji" }
+; HSA: attributes #5 = { nounwind "amdgpu-work-group-id-y" "target-cpu"="fiji" }
+; HSA: attributes #6 = { nounwind "amdgpu-work-group-id-z" "target-cpu"="fiji" }
+; HSA: attributes #7 = { nounwind "amdgpu-dispatch-ptr" "target-cpu"="fiji" }
+; HSA: attributes #8 = { nounwind "amdgpu-queue-ptr" "target-cpu"="fiji" }
+; HSA: attributes #9 = { nounwind "amdgpu-dispatch-id" "target-cpu"="fiji" }
+; HSA: attributes #10 = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "target-cpu"="fiji" }
+; HSA: attributes #11 = { nounwind "target-cpu"="fiji" }
+; HSA: attributes #12 = { nounwind "target-cpu"="gfx900" }
+; HSA: attributes #13 = { nounwind "amdgpu-queue-ptr" "target-cpu"="gfx900" }
+; HSA: attributes #14 = { nounwind "amdgpu-kernarg-segment-ptr" "target-cpu"="fiji" }
More information about the llvm-commits
mailing list