[llvm] [AMDGPU] Add new llvm.amdgcn.subgroup.shuffle intrinsic (PR #167372)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 10 14:52:08 PST 2025
https://github.com/saxlungs updated https://github.com/llvm/llvm-project/pull/167372
>From 4467218dac14fbc34e00bb3a3fddca8369e6ce1d Mon Sep 17 00:00:00 2001
From: Domenic Nutile <domenic.nutile at gmail.com>
Date: Mon, 10 Nov 2025 14:11:23 -0500
Subject: [PATCH 1/2] [AMDGPU] Add new llvm.amdgcn.subgroup.shuffle intrinsic
This intrinsic will be useful for implementing the OpGroupNonUniformShuffle operation in the SPIR-V reference
Signed-off-by: Domenic Nutile <domenic.nutile at gmail.com>
---
llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 9 ++
llvm/lib/Target/AMDGPU/GCNSubtarget.h | 4 +
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 89 +++++++++++++++++++
.../AMDGPU/llvm.amdgcn.subgroup.shuffle.ll | 76 ++++++++++++++++
4 files changed, 178 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.subgroup.shuffle.ll
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 8e35109061792..a41723e1e9db8 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -2718,6 +2718,15 @@ def int_amdgcn_call_whole_wave:
llvm_vararg_ty], // The arguments to the callee.
[IntrConvergent]>;
+// <result>
+// llvm.amdgcn.subgroup.shuffle <value> <id>
+// value and result can be any scalar of floating-point, integer,
+// or Boolean types, but must be the same type
+def int_amdgcn_subgroup_shuffle :
+ Intrinsic<[llvm_any_ty], // return types
+ [LLVMMatchType<0>, llvm_i32_ty], // arg types
+ [IntrConvergent, IntrNoMem, IntrNoFree, IntrWillReturn, IntrNoCallback]>; // flags
+
//===----------------------------------------------------------------------===//
// CI+ Intrinsics
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index da4bd878b8853..7c38f474cc38d 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -1868,6 +1868,10 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
bool requiresWaitsBeforeSystemScopeStores() const {
return RequiresWaitsBeforeSystemScopeStores;
}
+
+ bool supportsWaveWideBPermute() const {
+ return ((getGeneration() == AMDGPUSubtarget::GFX12) || isWave32());
+ }
};
class GCNUserSGPRUsageInfo {
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 8bb28084159e8..b6dedc0c1de89 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -7269,6 +7269,93 @@ static SDValue lowerLaneOp(const SITargetLowering &TLI, SDNode *N,
return DAG.getBitcast(VT, UnrolledLaneOp);
}
+// Right now, only subgroup.shuffle implemented, but other
+// future subgroup ops can use this function too
+static SDValue lowerSubgroupOp(const SITargetLowering &TLI, SDNode *N,
+ SelectionDAG &DAG) {
+ EVT VT = N->getValueType(0);
+ unsigned ValSize = VT.getSizeInBits();
+ unsigned IID = N->getConstantOperandVal(0);
+ SDLoc SL(N);
+
+ SDValue Value = N->getOperand(1);
+ SDValue Index = N->getOperand(2);
+
+ // ds_bpermute requires index to be multiplied by 4
+ SDValue ShiftAmount = DAG.getTargetConstant(2, SL, MVT::i32);
+ SDValue ShiftedIndex = DAG.getNode(ISD::SHL, SL, Index.getValueType(), Index,
+ ShiftAmount);
+
+ // Intrinsics will require i32 to operate on
+ SDValue Value32 = Value;
+ if ((ValSize != 32) || (VT.isFloatingPoint()))
+ Value32 = DAG.getBitcast(MVT::i32, Value);
+
+ auto MakeIntrinsic = [&DAG, &SL](unsigned IID, MVT RetVT,
+ SmallVector<SDValue> IntrinArgs) -> SDValue {
+ SmallVector<SDValue> Operands(1);
+ Operands[0] = DAG.getTargetConstant(IID, SL, MVT::i32);
+ Operands.append(IntrinArgs);
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SL, RetVT, Operands);
+ };
+
+ switch (IID) {
+ case Intrinsic::amdgcn_subgroup_shuffle:
+ if (TLI.getSubtarget()->supportsWaveWideBPermute()) {
+ // If we can bpermute across the whole wave, then just do that
+ SDValue BPermute = MakeIntrinsic(Intrinsic::amdgcn_ds_bpermute,
+ MVT::i32, {ShiftedIndex, Value32});
+ return DAG.getBitcast(VT, BPermute);
+ } else {
+ assert(TLI.getSubtarget()->isWave64());
+
+ // Otherwise, we need to make use of whole wave mode
+ SDValue PoisonVal = DAG.getPOISON(Value32->getValueType(0));
+ SDValue PoisonIndex = DAG.getPOISON(ShiftedIndex->getValueType(0));
+
+ // Set inactive lanes to poison
+ SDValue WWMValue = MakeIntrinsic(Intrinsic::amdgcn_set_inactive,
+ MVT::i32, {Value32, PoisonVal});
+ SDValue WWMIndex = MakeIntrinsic(Intrinsic::amdgcn_set_inactive,
+ MVT::i32, {ShiftedIndex, PoisonIndex});
+
+ SDValue Swapped = MakeIntrinsic(Intrinsic::amdgcn_permlane64,
+ MVT::i32, {WWMValue});
+
+ // Get permutation of each half, then we'll select which one to use
+ SDValue BPermSameHalf = MakeIntrinsic(Intrinsic::amdgcn_ds_bpermute,
+ MVT::i32, {WWMIndex, WWMValue});
+ SDValue BPermOtherHalf = MakeIntrinsic(Intrinsic::amdgcn_ds_bpermute,
+ MVT::i32, {WWMIndex, Swapped});
+ SDValue BPermOtherHalfWWM = MakeIntrinsic(Intrinsic::amdgcn_wwm,
+ MVT::i32, {BPermOtherHalf});
+
+ // Select which side to take the permute from
+ SDValue ThreadIDMask = DAG.getTargetConstant(UINT32_MAX, SL, MVT::i32);
+ SDValue ThreadIDLo = MakeIntrinsic(Intrinsic::amdgcn_mbcnt_lo, MVT::i32,
+ {ThreadIDMask,
+ DAG.getTargetConstant(0, SL,
+ MVT::i32)});
+ SDValue ThreadID = MakeIntrinsic(Intrinsic::amdgcn_mbcnt_hi, MVT::i32,
+ {ThreadIDMask, ThreadIDLo});
+
+ SDValue SameOrOtherHalf = DAG.getNode(ISD::AND, SL, MVT::i32,
+ DAG.getNode(ISD::XOR, SL, MVT::i32,
+ ThreadID, Index),
+ DAG.getTargetConstant(32, SL,
+ MVT::i32));
+ SDValue UseSameHalf = DAG.getSetCC(SL, MVT::i1, SameOrOtherHalf,
+ DAG.getConstant(0, SL, MVT::i32),
+ ISD::SETEQ);
+ SDValue Result = DAG.getSelect(SL, MVT::i32, UseSameHalf,
+ BPermSameHalf, BPermOtherHalfWWM);
+ return DAG.getBitcast(VT, Result);
+ }
+ default:
+ return SDValue();
+ }
+}
+
void SITargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const {
@@ -10176,6 +10263,8 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
Poisons.push_back(DAG.getPOISON(ValTy));
return DAG.getMergeValues(Poisons, SDLoc(Op));
}
+ case Intrinsic::amdgcn_subgroup_shuffle:
+ return lowerSubgroupOp(*this, Op.getNode(), DAG);
default:
if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.subgroup.shuffle.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.subgroup.shuffle.ll
new file mode 100644
index 0000000000000..e31894c6cfa18
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.subgroup.shuffle.ll
@@ -0,0 +1,76 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12 %s
+
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX11-64 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-64 %s
+
+declare float @llvm.amdgcn.subgroup.shuffle.float(float, i32)
+
+define float @test_subgroup_shuffle_scalar(float %val, i32 %idx) {
+; GFX11-LABEL: test_subgroup_shuffle_scalar:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX11-NEXT: ds_bpermute_b32 v0, v1, v0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_subgroup_shuffle_scalar:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX12-NEXT: ds_bpermute_b32 v0, v1, v0
+; GFX12-NEXT: s_wait_dscnt 0x0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-64-LABEL: test_subgroup_shuffle_scalar:
+; GFX11-64: ; %bb.0: ; %entry
+; GFX11-64-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-64-NEXT: s_xor_saveexec_b64 s[0:1], -1
+; GFX11-64-NEXT: scratch_store_b32 off, v2, s32 ; 4-byte Folded Spill
+; GFX11-64-NEXT: s_mov_b64 exec, s[0:1]
+; GFX11-64-NEXT: v_lshlrev_b32_e32 v3, 2, v1
+; GFX11-64-NEXT: ; kill: def $vgpr0 killed $vgpr0 killed $exec
+; GFX11-64-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $exec
+; GFX11-64-NEXT: s_or_saveexec_b64 s[0:1], -1
+; GFX11-64-NEXT: v_permlane64_b32 v2, v0
+; GFX11-64-NEXT: ds_bpermute_b32 v2, v3, v2
+; GFX11-64-NEXT: s_mov_b64 exec, s[0:1]
+; GFX11-64-NEXT: v_mbcnt_lo_u32_b32 v4, -1, 0
+; GFX11-64-NEXT: ds_bpermute_b32 v0, v3, v0
+; GFX11-64-NEXT: v_mbcnt_hi_u32_b32 v3, -1, v4
+; GFX11-64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-64-NEXT: v_xor_b32_e32 v1, v3, v1
+; GFX11-64-NEXT: s_waitcnt lgkmcnt(1)
+; GFX11-64-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-64-NEXT: v_and_b32_e32 v1, 32, v1
+; GFX11-64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-64-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX11-64-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-64-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX11-64-NEXT: s_xor_saveexec_b64 s[0:1], -1
+; GFX11-64-NEXT: scratch_load_b32 v2, off, s32 ; 4-byte Folded Reload
+; GFX11-64-NEXT: s_mov_b64 exec, s[0:1]
+; GFX11-64-NEXT: s_waitcnt vmcnt(0)
+; GFX11-64-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-64-LABEL: test_subgroup_shuffle_scalar:
+; GFX12-64: ; %bb.0: ; %entry
+; GFX12-64-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-64-NEXT: s_wait_expcnt 0x0
+; GFX12-64-NEXT: s_wait_samplecnt 0x0
+; GFX12-64-NEXT: s_wait_bvhcnt 0x0
+; GFX12-64-NEXT: s_wait_kmcnt 0x0
+; GFX12-64-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX12-64-NEXT: ds_bpermute_b32 v0, v1, v0
+; GFX12-64-NEXT: s_wait_dscnt 0x0
+; GFX12-64-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %0 = tail call float @llvm.amdgcn.subgroup.shuffle(float %val, i32 %idx)
+ ret float %0
+}
>From eadfd8b8954a491b7df4b4318641d06e426cbb8c Mon Sep 17 00:00:00 2001
From: Domenic Nutile <domenic.nutile at gmail.com>
Date: Mon, 10 Nov 2025 17:51:57 -0500
Subject: [PATCH 2/2] PR feedback
Update test prefixes, refactor lower function for just Subgroup Shuffle, clang format
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 110 +++++++--------
.../AMDGPU/llvm.amdgcn.subgroup.shuffle.ll | 126 +++++++++---------
2 files changed, 113 insertions(+), 123 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index b6dedc0c1de89..6d867f0eaf9ae 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -7269,13 +7269,10 @@ static SDValue lowerLaneOp(const SITargetLowering &TLI, SDNode *N,
return DAG.getBitcast(VT, UnrolledLaneOp);
}
-// Right now, only subgroup.shuffle implemented, but other
-// future subgroup ops can use this function too
-static SDValue lowerSubgroupOp(const SITargetLowering &TLI, SDNode *N,
- SelectionDAG &DAG) {
+static SDValue lowerSubgroupShuffle(const SITargetLowering &TLI, SDNode *N,
+ SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
unsigned ValSize = VT.getSizeInBits();
- unsigned IID = N->getConstantOperandVal(0);
SDLoc SL(N);
SDValue Value = N->getOperand(1);
@@ -7299,60 +7296,53 @@ static SDValue lowerSubgroupOp(const SITargetLowering &TLI, SDNode *N,
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SL, RetVT, Operands);
};
- switch (IID) {
- case Intrinsic::amdgcn_subgroup_shuffle:
- if (TLI.getSubtarget()->supportsWaveWideBPermute()) {
- // If we can bpermute across the whole wave, then just do that
- SDValue BPermute = MakeIntrinsic(Intrinsic::amdgcn_ds_bpermute,
- MVT::i32, {ShiftedIndex, Value32});
- return DAG.getBitcast(VT, BPermute);
- } else {
- assert(TLI.getSubtarget()->isWave64());
-
- // Otherwise, we need to make use of whole wave mode
- SDValue PoisonVal = DAG.getPOISON(Value32->getValueType(0));
- SDValue PoisonIndex = DAG.getPOISON(ShiftedIndex->getValueType(0));
-
- // Set inactive lanes to poison
- SDValue WWMValue = MakeIntrinsic(Intrinsic::amdgcn_set_inactive,
- MVT::i32, {Value32, PoisonVal});
- SDValue WWMIndex = MakeIntrinsic(Intrinsic::amdgcn_set_inactive,
- MVT::i32, {ShiftedIndex, PoisonIndex});
-
- SDValue Swapped = MakeIntrinsic(Intrinsic::amdgcn_permlane64,
- MVT::i32, {WWMValue});
-
- // Get permutation of each half, then we'll select which one to use
- SDValue BPermSameHalf = MakeIntrinsic(Intrinsic::amdgcn_ds_bpermute,
- MVT::i32, {WWMIndex, WWMValue});
- SDValue BPermOtherHalf = MakeIntrinsic(Intrinsic::amdgcn_ds_bpermute,
- MVT::i32, {WWMIndex, Swapped});
- SDValue BPermOtherHalfWWM = MakeIntrinsic(Intrinsic::amdgcn_wwm,
- MVT::i32, {BPermOtherHalf});
-
- // Select which side to take the permute from
- SDValue ThreadIDMask = DAG.getTargetConstant(UINT32_MAX, SL, MVT::i32);
- SDValue ThreadIDLo = MakeIntrinsic(Intrinsic::amdgcn_mbcnt_lo, MVT::i32,
- {ThreadIDMask,
- DAG.getTargetConstant(0, SL,
- MVT::i32)});
- SDValue ThreadID = MakeIntrinsic(Intrinsic::amdgcn_mbcnt_hi, MVT::i32,
- {ThreadIDMask, ThreadIDLo});
-
- SDValue SameOrOtherHalf = DAG.getNode(ISD::AND, SL, MVT::i32,
- DAG.getNode(ISD::XOR, SL, MVT::i32,
- ThreadID, Index),
- DAG.getTargetConstant(32, SL,
- MVT::i32));
- SDValue UseSameHalf = DAG.getSetCC(SL, MVT::i1, SameOrOtherHalf,
- DAG.getConstant(0, SL, MVT::i32),
- ISD::SETEQ);
- SDValue Result = DAG.getSelect(SL, MVT::i32, UseSameHalf,
- BPermSameHalf, BPermOtherHalfWWM);
- return DAG.getBitcast(VT, Result);
- }
- default:
- return SDValue();
+ if (TLI.getSubtarget()->supportsWaveWideBPermute()) {
+ // If we can bpermute across the whole wave, then just do that
+ SDValue BPermute = MakeIntrinsic(Intrinsic::amdgcn_ds_bpermute, MVT::i32,
+ {ShiftedIndex, Value32});
+ return DAG.getBitcast(VT, BPermute);
+ } else {
+ assert(TLI.getSubtarget()->isWave64());
+
+ // Otherwise, we need to make use of whole wave mode
+ SDValue PoisonVal = DAG.getPOISON(Value32->getValueType(0));
+ SDValue PoisonIndex = DAG.getPOISON(ShiftedIndex->getValueType(0));
+
+ // Set inactive lanes to poison
+ SDValue WWMValue = MakeIntrinsic(Intrinsic::amdgcn_set_inactive, MVT::i32,
+ {Value32, PoisonVal});
+ SDValue WWMIndex = MakeIntrinsic(Intrinsic::amdgcn_set_inactive, MVT::i32,
+ {ShiftedIndex, PoisonIndex});
+
+ SDValue Swapped =
+ MakeIntrinsic(Intrinsic::amdgcn_permlane64, MVT::i32, {WWMValue});
+
+ // Get permutation of each half, then we'll select which one to use
+ SDValue BPermSameHalf = MakeIntrinsic(Intrinsic::amdgcn_ds_bpermute,
+ MVT::i32, {WWMIndex, WWMValue});
+ SDValue BPermOtherHalf = MakeIntrinsic(Intrinsic::amdgcn_ds_bpermute,
+ MVT::i32, {WWMIndex, Swapped});
+ SDValue BPermOtherHalfWWM =
+ MakeIntrinsic(Intrinsic::amdgcn_wwm, MVT::i32, {BPermOtherHalf});
+
+ // Select which side to take the permute from
+ SDValue ThreadIDMask = DAG.getTargetConstant(UINT32_MAX, SL, MVT::i32);
+ SDValue ThreadIDLo =
+ MakeIntrinsic(Intrinsic::amdgcn_mbcnt_lo, MVT::i32,
+ {ThreadIDMask, DAG.getTargetConstant(0, SL, MVT::i32)});
+ SDValue ThreadID = MakeIntrinsic(Intrinsic::amdgcn_mbcnt_hi, MVT::i32,
+ {ThreadIDMask, ThreadIDLo});
+
+ SDValue SameOrOtherHalf =
+ DAG.getNode(ISD::AND, SL, MVT::i32,
+ DAG.getNode(ISD::XOR, SL, MVT::i32, ThreadID, Index),
+ DAG.getTargetConstant(32, SL, MVT::i32));
+ SDValue UseSameHalf =
+ DAG.getSetCC(SL, MVT::i1, SameOrOtherHalf,
+ DAG.getConstant(0, SL, MVT::i32), ISD::SETEQ);
+ SDValue Result = DAG.getSelect(SL, MVT::i32, UseSameHalf, BPermSameHalf,
+ BPermOtherHalfWWM);
+ return DAG.getBitcast(VT, Result);
}
}
@@ -10264,7 +10254,7 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getMergeValues(Poisons, SDLoc(Op));
}
case Intrinsic::amdgcn_subgroup_shuffle:
- return lowerSubgroupOp(*this, Op.getNode(), DAG);
+ return lowerSubgroupShuffle(*this, Op.getNode(), DAG);
default:
if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.subgroup.shuffle.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.subgroup.shuffle.ll
index e31894c6cfa18..4572c0ff9a2f1 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.subgroup.shuffle.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.subgroup.shuffle.ll
@@ -1,75 +1,75 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
-; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11 %s
-; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11-W32 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12-W32 %s
-; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX11-64 %s
-; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-64 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX11-W64 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-W64 %s
declare float @llvm.amdgcn.subgroup.shuffle.float(float, i32)
define float @test_subgroup_shuffle_scalar(float %val, i32 %idx) {
-; GFX11-LABEL: test_subgroup_shuffle_scalar:
-; GFX11: ; %bb.0: ; %entry
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v1, 2, v1
-; GFX11-NEXT: ds_bpermute_b32 v0, v1, v0
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-W32-LABEL: test_subgroup_shuffle_scalar:
+; GFX11-W32: ; %bb.0: ; %entry
+; GFX11-W32-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-W32-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX11-W32-NEXT: ds_bpermute_b32 v0, v1, v0
+; GFX11-W32-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-W32-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: test_subgroup_shuffle_scalar:
-; GFX12: ; %bb.0: ; %entry
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_lshlrev_b32_e32 v1, 2, v1
-; GFX12-NEXT: ds_bpermute_b32 v0, v1, v0
-; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX12-W32-LABEL: test_subgroup_shuffle_scalar:
+; GFX12-W32: ; %bb.0: ; %entry
+; GFX12-W32-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-W32-NEXT: s_wait_expcnt 0x0
+; GFX12-W32-NEXT: s_wait_samplecnt 0x0
+; GFX12-W32-NEXT: s_wait_bvhcnt 0x0
+; GFX12-W32-NEXT: s_wait_kmcnt 0x0
+; GFX12-W32-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX12-W32-NEXT: ds_bpermute_b32 v0, v1, v0
+; GFX12-W32-NEXT: s_wait_dscnt 0x0
+; GFX12-W32-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11-64-LABEL: test_subgroup_shuffle_scalar:
-; GFX11-64: ; %bb.0: ; %entry
-; GFX11-64-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-64-NEXT: s_xor_saveexec_b64 s[0:1], -1
-; GFX11-64-NEXT: scratch_store_b32 off, v2, s32 ; 4-byte Folded Spill
-; GFX11-64-NEXT: s_mov_b64 exec, s[0:1]
-; GFX11-64-NEXT: v_lshlrev_b32_e32 v3, 2, v1
-; GFX11-64-NEXT: ; kill: def $vgpr0 killed $vgpr0 killed $exec
-; GFX11-64-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $exec
-; GFX11-64-NEXT: s_or_saveexec_b64 s[0:1], -1
-; GFX11-64-NEXT: v_permlane64_b32 v2, v0
-; GFX11-64-NEXT: ds_bpermute_b32 v2, v3, v2
-; GFX11-64-NEXT: s_mov_b64 exec, s[0:1]
-; GFX11-64-NEXT: v_mbcnt_lo_u32_b32 v4, -1, 0
-; GFX11-64-NEXT: ds_bpermute_b32 v0, v3, v0
-; GFX11-64-NEXT: v_mbcnt_hi_u32_b32 v3, -1, v4
-; GFX11-64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-64-NEXT: v_xor_b32_e32 v1, v3, v1
-; GFX11-64-NEXT: s_waitcnt lgkmcnt(1)
-; GFX11-64-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-64-NEXT: v_and_b32_e32 v1, 32, v1
-; GFX11-64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-64-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; GFX11-64-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-64-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
-; GFX11-64-NEXT: s_xor_saveexec_b64 s[0:1], -1
-; GFX11-64-NEXT: scratch_load_b32 v2, off, s32 ; 4-byte Folded Reload
-; GFX11-64-NEXT: s_mov_b64 exec, s[0:1]
-; GFX11-64-NEXT: s_waitcnt vmcnt(0)
-; GFX11-64-NEXT: s_setpc_b64 s[30:31]
+; GFX11-W64-LABEL: test_subgroup_shuffle_scalar:
+; GFX11-W64: ; %bb.0: ; %entry
+; GFX11-W64-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-W64-NEXT: s_xor_saveexec_b64 s[0:1], -1
+; GFX11-W64-NEXT: scratch_store_b32 off, v2, s32 ; 4-byte Folded Spill
+; GFX11-W64-NEXT: s_mov_b64 exec, s[0:1]
+; GFX11-W64-NEXT: v_lshlrev_b32_e32 v3, 2, v1
+; GFX11-W64-NEXT: ; kill: def $vgpr0 killed $vgpr0 killed $exec
+; GFX11-W64-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $exec
+; GFX11-W64-NEXT: s_or_saveexec_b64 s[0:1], -1
+; GFX11-W64-NEXT: v_permlane64_b32 v2, v0
+; GFX11-W64-NEXT: ds_bpermute_b32 v2, v3, v2
+; GFX11-W64-NEXT: s_mov_b64 exec, s[0:1]
+; GFX11-W64-NEXT: v_mbcnt_lo_u32_b32 v4, -1, 0
+; GFX11-W64-NEXT: ds_bpermute_b32 v0, v3, v0
+; GFX11-W64-NEXT: v_mbcnt_hi_u32_b32 v3, -1, v4
+; GFX11-W64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-W64-NEXT: v_xor_b32_e32 v1, v3, v1
+; GFX11-W64-NEXT: s_waitcnt lgkmcnt(1)
+; GFX11-W64-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-W64-NEXT: v_and_b32_e32 v1, 32, v1
+; GFX11-W64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-W64-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX11-W64-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-W64-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX11-W64-NEXT: s_xor_saveexec_b64 s[0:1], -1
+; GFX11-W64-NEXT: scratch_load_b32 v2, off, s32 ; 4-byte Folded Reload
+; GFX11-W64-NEXT: s_mov_b64 exec, s[0:1]
+; GFX11-W64-NEXT: s_waitcnt vmcnt(0)
+; GFX11-W64-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-64-LABEL: test_subgroup_shuffle_scalar:
-; GFX12-64: ; %bb.0: ; %entry
-; GFX12-64-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-64-NEXT: s_wait_expcnt 0x0
-; GFX12-64-NEXT: s_wait_samplecnt 0x0
-; GFX12-64-NEXT: s_wait_bvhcnt 0x0
-; GFX12-64-NEXT: s_wait_kmcnt 0x0
-; GFX12-64-NEXT: v_lshlrev_b32_e32 v1, 2, v1
-; GFX12-64-NEXT: ds_bpermute_b32 v0, v1, v0
-; GFX12-64-NEXT: s_wait_dscnt 0x0
-; GFX12-64-NEXT: s_setpc_b64 s[30:31]
+; GFX12-W64-LABEL: test_subgroup_shuffle_scalar:
+; GFX12-W64: ; %bb.0: ; %entry
+; GFX12-W64-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-W64-NEXT: s_wait_expcnt 0x0
+; GFX12-W64-NEXT: s_wait_samplecnt 0x0
+; GFX12-W64-NEXT: s_wait_bvhcnt 0x0
+; GFX12-W64-NEXT: s_wait_kmcnt 0x0
+; GFX12-W64-NEXT: v_lshlrev_b32_e32 v1, 2, v1
+; GFX12-W64-NEXT: ds_bpermute_b32 v0, v1, v0
+; GFX12-W64-NEXT: s_wait_dscnt 0x0
+; GFX12-W64-NEXT: s_setpc_b64 s[30:31]
entry:
%0 = tail call float @llvm.amdgcn.subgroup.shuffle(float %val, i32 %idx)
ret float %0
More information about the llvm-commits
mailing list