[llvm] [AMDGPU] Select v_bfe_u32 for i8/i16 (and (srl x, c), mask) (PR #182446)
Vigneshwar Jayakumar via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 4 09:13:23 PST 2026
https://github.com/VigneshwarJ updated https://github.com/llvm/llvm-project/pull/182446
>From 216d2093aa76fd7427c007e28de77e6f4028e97f Mon Sep 17 00:00:00 2001
From: Vigneshwar Jayakumar <vjayakum at amd.com>
Date: Fri, 20 Feb 2026 00:53:42 -0600
Subject: [PATCH 1/5] [AMDGPU] Lower i8/i16 (and (srl x, c), mask) to v_bfe_u32
Optimiza i8 and i16 and and shl mask instructions to v_bfe_32 in
PerformAndCombine, skips this optimization for mask < 1 (to prevent
loss of folding for vector i1 promoted to i8) and truei16 tergets and
for SDWA targets
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 38 +++++++++
llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll | 84 +++++++++++++++++++
.../CodeGen/AMDGPU/bitcast_vector_bigint.ll | 9 +-
...ffer-fat-pointers-contents-legalization.ll | 12 ++-
4 files changed, 131 insertions(+), 12 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index b043d5354042d..0f90c7fff7491 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -13608,6 +13608,44 @@ SDValue SITargetLowering::performAndCombine(SDNode *N,
return Split;
}
+ // and (srl x, c), mask => bfe x, c, popcount(mask)
+ // where mask is a contiguous set of bits starting from bit 0
+ // This handles i8 and i16 types by promoting to i32 for BFE.
+ // Skip for True16 targets where native i16 ops are more efficient.
+ if (CRHS && (VT == MVT::i16 || VT == MVT::i8) && N->isDivergent() &&
+ !getSubtarget()->useRealTrue16Insts()) {
+ uint64_t Mask = CRHS->getZExtValue();
+ unsigned Bits = llvm::popcount(Mask);
+ // Mask > 1: skip single-bit extracts. Scalarized <N x i1> vectors produce
+ // adjacent 1-bit (srl + and) pairs that later combine into a single wider
+ // AND mask; converting individual bits to BFE prevents that fold.
+ if (LHS->getOpcode() == ISD::SRL && isMask_64(Mask) && Mask > 1) {
+ if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
+ unsigned Shift = CShift->getZExtValue();
+ unsigned SrcBits = LHS->getOperand(0).getValueType().getSizeInBits();
+ if (Shift + Bits > SrcBits)
+ return SDValue();
+ // 8 or 16 bit fields starting at byte boundary are better handled
+ // by SDWA for GFX8+ in the SDWA peephole pass.
+ if (getSubtarget()->hasSDWA() && (Bits == 8 || Bits == 16))
+ return SDValue();
+ SDLoc SL(N);
+
+ SDValue Src = DAG.getZExtOrTrunc(LHS->getOperand(0), SL, MVT::i32);
+ DCI.AddToWorklist(Src.getNode());
+
+ SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, Src,
+ DAG.getConstant(Shift, SL, MVT::i32),
+ DAG.getConstant(Bits, SL, MVT::i32));
+ DCI.AddToWorklist(BFE.getNode());
+
+ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, VT, BFE);
+ DCI.AddToWorklist(Trunc.getNode());
+ return Trunc;
+ }
+ }
+ }
+
if (CRHS && VT == MVT::i32) {
// and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
// nb = number of trailing zeroes in mask
diff --git a/llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll b/llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll
new file mode 100644
index 0000000000000..126c89c9e6d2d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll
@@ -0,0 +1,84 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck %s --check-prefix=GFX9
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck %s --check-prefix=GFX12-TRUE16
+
+; Test that the DAG combine in performAndCombine recognizes
+; (and (lshr x, C), mask) for i8/i16 and lowers it to v_bfe_u32.
+
+define i16 @bfe_i16(i16 %a) {
+; GFX9-LABEL: bfe_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_bfe_u32 v0, v0, 4, 4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-TRUE16-LABEL: bfe_i16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b16 v0.l, 4, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b16 v0.l, v0.l, 15
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+ %shr = lshr i16 %a, 4
+ %and = and i16 %shr, 15
+ ret i16 %and
+}
+
+; i8: 4 bits at offset 4.
+define i8 @bfe_i8(i8 %a) {
+; GFX9-LABEL: bfe_i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_bfe_u32 v0, v0, 4, 4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-TRUE16-LABEL: bfe_i8:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b16 v0.l, 4, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b16 v0.l, v0.l, 15
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+ %shr = lshr i8 %a, 4
+ %and = and i8 %shr, 15
+ ret i8 %and
+}
+
+; Negative: <2 x i1> element extractions should combine into a single AND
+; mask (v_and 3), not produce individual BFE instructions.
+define void @no_bfe_v2i1(ptr addrspace(1) %in, ptr addrspace(1) %out) {
+; GFX9-LABEL: no_bfe_v2i1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: global_load_ubyte v0, v[0:1], off
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_and_b32_e32 v0, 3, v0
+; GFX9-NEXT: global_store_byte v[2:3], v0, off
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-TRUE16-LABEL: no_bfe_v2i1:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: global_load_d16_u8 v0, v[0:1], off
+; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_and_b16 v0.l, v0.l, 3
+; GFX12-TRUE16-NEXT: global_store_b8 v[2:3], v0, off
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+ %a = load <2 x i1>, ptr addrspace(1) %in
+ %freeze = freeze <2 x i1> %a
+ store <2 x i1> %freeze, ptr addrspace(1) %out
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/bitcast_vector_bigint.ll b/llvm/test/CodeGen/AMDGPU/bitcast_vector_bigint.ll
index f788cd663b309..740d6ba8fe549 100644
--- a/llvm/test/CodeGen/AMDGPU/bitcast_vector_bigint.ll
+++ b/llvm/test/CodeGen/AMDGPU/bitcast_vector_bigint.ll
@@ -281,8 +281,7 @@ define <2 x i6> @bitcast_i12_to_v2i6(i12 %int) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_and_b32_e32 v2, 63, v0
-; GFX9-NEXT: v_lshrrev_b16_e32 v0, 6, v0
-; GFX9-NEXT: v_and_b32_e32 v1, 63, v0
+; GFX9-NEXT: v_bfe_u32 v1, v0, 6, 6
; GFX9-NEXT: v_mov_b32_e32 v0, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -306,10 +305,10 @@ define <2 x i6> @bitcast_i12_to_v2i6(i12 %int) {
; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b16 v1, 6, v0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 63, v0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 63, v0
+; GFX12-FAKE16-NEXT: v_bfe_u32 v1, v0, 6, 6
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 63, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v2
; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
%bitcast = bitcast i12 %int to <2 x i6>
ret <2 x i6> %bitcast
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-contents-legalization.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-contents-legalization.ll
index 867ec0488d199..0f5c566e1e2f6 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-contents-legalization.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-contents-legalization.ll
@@ -3262,16 +3262,14 @@ define <4 x i4> @load_v4i4(ptr addrspace(8) inreg %buf) {
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SDAG-NEXT: buffer_load_ushort v0, off, s[16:19], 0
-; SDAG-NEXT: v_mov_b32_e32 v2, 15
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: buffer_store_short v0, off, s[0:3], s32
-; SDAG-NEXT: buffer_load_ushort v1, off, s[0:3], s32
+; SDAG-NEXT: buffer_load_ushort v2, off, s[0:3], s32
; SDAG-NEXT: s_waitcnt vmcnt(0)
-; SDAG-NEXT: v_lshrrev_b16_e32 v4, 4, v1
-; SDAG-NEXT: v_and_b32_e32 v0, 15, v1
-; SDAG-NEXT: v_lshrrev_b16_e32 v3, 12, v1
-; SDAG-NEXT: v_and_b32_sdwa v2, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; SDAG-NEXT: v_and_b32_e32 v1, 15, v4
+; SDAG-NEXT: v_and_b32_e32 v0, 15, v2
+; SDAG-NEXT: v_lshrrev_b16_e32 v3, 12, v2
+; SDAG-NEXT: v_bfe_u32 v1, v2, 4, 4
+; SDAG-NEXT: v_bfe_u32 v2, v2, 8, 4
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: load_v4i4:
>From ddee5e72727cd5565dcc976163fc712497ba69c7 Mon Sep 17 00:00:00 2001
From: Vigneshwar Jayakumar <vjayakum at amd.com>
Date: Mon, 23 Feb 2026 16:45:51 -0600
Subject: [PATCH 2/5] change to td
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 38 -------
llvm/lib/Target/AMDGPU/SIInstructions.td | 19 ++++
llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll | 119 ++++++++++++++++++----
llvm/test/CodeGen/AMDGPU/fptoi.i128.ll | 34 +++----
llvm/test/CodeGen/AMDGPU/permute_i8.ll | 16 +--
5 files changed, 143 insertions(+), 83 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 0f90c7fff7491..b043d5354042d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -13608,44 +13608,6 @@ SDValue SITargetLowering::performAndCombine(SDNode *N,
return Split;
}
- // and (srl x, c), mask => bfe x, c, popcount(mask)
- // where mask is a contiguous set of bits starting from bit 0
- // This handles i8 and i16 types by promoting to i32 for BFE.
- // Skip for True16 targets where native i16 ops are more efficient.
- if (CRHS && (VT == MVT::i16 || VT == MVT::i8) && N->isDivergent() &&
- !getSubtarget()->useRealTrue16Insts()) {
- uint64_t Mask = CRHS->getZExtValue();
- unsigned Bits = llvm::popcount(Mask);
- // Mask > 1: skip single-bit extracts. Scalarized <N x i1> vectors produce
- // adjacent 1-bit (srl + and) pairs that later combine into a single wider
- // AND mask; converting individual bits to BFE prevents that fold.
- if (LHS->getOpcode() == ISD::SRL && isMask_64(Mask) && Mask > 1) {
- if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
- unsigned Shift = CShift->getZExtValue();
- unsigned SrcBits = LHS->getOperand(0).getValueType().getSizeInBits();
- if (Shift + Bits > SrcBits)
- return SDValue();
- // 8 or 16 bit fields starting at byte boundary are better handled
- // by SDWA for GFX8+ in the SDWA peephole pass.
- if (getSubtarget()->hasSDWA() && (Bits == 8 || Bits == 16))
- return SDValue();
- SDLoc SL(N);
-
- SDValue Src = DAG.getZExtOrTrunc(LHS->getOperand(0), SL, MVT::i32);
- DCI.AddToWorklist(Src.getNode());
-
- SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, Src,
- DAG.getConstant(Shift, SL, MVT::i32),
- DAG.getConstant(Bits, SL, MVT::i32));
- DCI.AddToWorklist(BFE.getNode());
-
- SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, VT, BFE);
- DCI.AddToWorklist(Trunc.getNode());
- return Trunc;
- }
- }
- }
-
if (CRHS && VT == MVT::i32) {
// and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
// nb = number of trailing zeroes in mask
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index d30e7fd0523a5..7fab1a5a436fb 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -4140,17 +4140,36 @@ def IMMZeroBasedBitfieldMask : ImmLeaf <i32, [{
return isMask_32(Imm);
}]>;
+def IMMZeroBasedBitfieldMask16 : ImmLeaf <i16, [{
+ return isUInt<16>(Imm) && isMask_32(Imm);
+}]>;
+
+def ShiftAmt16Imm : ImmLeaf <i16, [{
+ return Imm < 16;
+}]>;
+
def IMMPopCount : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(llvm::popcount(N->getZExtValue()), SDLoc(N),
MVT::i32);
}]>;
+def IMMAsI32 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
+}]>;
+
def : AMDGPUPat <
(DivergentBinFrag<and> (i32 (srl i32:$src, i32:$rshift)),
IMMZeroBasedBitfieldMask:$mask),
(V_BFE_U32_e64 $src, $rshift, (i32 (IMMPopCount $mask)))
>;
+let True16Predicate = NotUseRealTrue16Insts in
+def : AMDGPUPat <
+ (i16 (DivergentBinFrag<and> (srl_oneuse i16:$src, (i16 ShiftAmt16Imm:$rshift)),
+ IMMZeroBasedBitfieldMask16:$mask)),
+ (V_BFE_U32_e64 $src, (i32 (IMMAsI32 $rshift)), (i32 (IMMPopCount $mask)))
+>;
+
// x & ((1 << y) - 1)
def : AMDGPUPat <
(DivergentBinFrag<and> i32:$src, (add_oneuse (shl_oneuse 1, i32:$width), -1)),
diff --git a/llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll b/llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll
index 126c89c9e6d2d..b303772510179 100644
--- a/llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll
@@ -2,8 +2,8 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck %s --check-prefix=GFX9
; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck %s --check-prefix=GFX12-TRUE16
-; Test that the DAG combine in performAndCombine recognizes
-; (and (lshr x, C), mask) for i8/i16 and lowers it to v_bfe_u32.
+; Test that isel patterns recognize (and (lshr x, C), mask) for i8/i16
+; and lower it to v_bfe_u32 when real true16 instructions are not used.
define i16 @bfe_i16(i16 %a) {
; GFX9-LABEL: bfe_i16:
@@ -52,33 +52,118 @@ define i8 @bfe_i8(i8 %a) {
ret i8 %and
}
-; Negative: <2 x i1> element extractions should combine into a single AND
-; mask (v_and 3), not produce individual BFE instructions.
-define void @no_bfe_v2i1(ptr addrspace(1) %in, ptr addrspace(1) %out) {
-; GFX9-LABEL: no_bfe_v2i1:
+; Negative: multiple uses of the shifted value should not match the one-use
+; narrow BFE pattern.
+define i16 @no_bfe_i16_multi_use(i16 %a) {
+; GFX9-LABEL: no_bfe_i16_multi_use:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_ubyte v0, v[0:1], off
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_lshrrev_b16_e32 v0, 4, v0
+; GFX9-NEXT: v_and_b32_e32 v1, 15, v0
; GFX9-NEXT: v_and_b32_e32 v0, 3, v0
-; GFX9-NEXT: global_store_byte v[2:3], v0, off
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-TRUE16-LABEL: no_bfe_v2i1:
+; GFX12-TRUE16-LABEL: no_bfe_i16_multi_use:
; GFX12-TRUE16: ; %bb.0:
; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-TRUE16-NEXT: global_load_d16_u8 v0, v[0:1], off
-; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-TRUE16-NEXT: v_lshrrev_b16 v0.l, 4, v0.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b16 v0.h, v0.l, 15
; GFX12-TRUE16-NEXT: v_and_b16 v0.l, v0.l, 3
-; GFX12-TRUE16-NEXT: global_store_b8 v[2:3], v0, off
+; GFX12-TRUE16-NEXT: v_xor_b16 v0.l, v0.h, v0.l
; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
- %a = load <2 x i1>, ptr addrspace(1) %in
- %freeze = freeze <2 x i1> %a
- store <2 x i1> %freeze, ptr addrspace(1) %out
+ %shr = lshr i16 %a, 4
+ %and0 = and i16 %shr, 15
+ %and1 = and i16 %shr, 3
+ %xor = xor i16 %and0, %and1
+ ret i16 %xor
+}
+
+; Pure uniform (SGPR) case.
+define amdgpu_kernel void @bfe_i16_uniform(i16 %a, ptr addrspace(1) %out) {
+; GFX9-LABEL: bfe_i16_uniform:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dword s2, s[4:5], 0x24
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2c
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_bfe_u32 s2, s2, 0x40004
+; GFX9-NEXT: v_mov_b32_e32 v1, s2
+; GFX9-NEXT: global_store_short v0, v1, s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; GFX12-TRUE16-LABEL: bfe_i16_uniform:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_clause 0x1
+; GFX12-TRUE16-NEXT: s_load_b32 s2, s[4:5], 0x24
+; GFX12-TRUE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x2c
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: s_bfe_u32 s2, s2, 0x40004
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX12-TRUE16-NEXT: global_store_b16 v0, v1, s[0:1]
+; GFX12-TRUE16-NEXT: s_endpgm
+ %shr = lshr i16 %a, 4
+ %and = and i16 %shr, 15
+ store i16 %and, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_kernel void @bfe_i8_uniform(i8 %a, ptr addrspace(1) %out) {
+; GFX9-LABEL: bfe_i8_uniform:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dword s2, s[4:5], 0x24
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2c
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_bfe_u32 s2, s2, 0x40004
+; GFX9-NEXT: v_mov_b32_e32 v1, s2
+; GFX9-NEXT: global_store_byte v0, v1, s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; GFX12-TRUE16-LABEL: bfe_i8_uniform:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_clause 0x1
+; GFX12-TRUE16-NEXT: s_load_b32 s2, s[4:5], 0x24
+; GFX12-TRUE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x2c
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: s_bfe_u32 s2, s2, 0x40004
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX12-TRUE16-NEXT: global_store_b8 v0, v1, s[0:1]
+; GFX12-TRUE16-NEXT: s_endpgm
+ %shr = lshr i8 %a, 4
+ %and = and i8 %shr, 15
+ store i8 %and, ptr addrspace(1) %out
+ ret void
+}
+
+; Vector case: keep the packed shift/and lowering.
+define <2 x i16> @bfe_v2i16(<2 x i16> %a) {
+; GFX9-LABEL: bfe_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_lshrrev_b16 v0, 4, v0 op_sel_hi:[0,1]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xf000f, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-TRUE16-LABEL: bfe_v2i16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_pk_lshrrev_b16 v0, 4, v0 op_sel_hi:[0,1]
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 0xf000f, v0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+ %shr = lshr <2 x i16> %a, <i16 4, i16 4>
+ %and = and <2 x i16> %shr, <i16 15, i16 15>
+ ret <2 x i16> %and
+}
diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
index 200fbf5d220b4..2d260870ad902 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
@@ -1363,18 +1363,17 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
; SDAG: ; %bb.0: ; %fp-to-i-entry
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SDAG-NEXT: v_mov_b32_e32 v4, v0
-; SDAG-NEXT: v_lshrrev_b16_e32 v7, 7, v4
+; SDAG-NEXT: v_bfe_u32 v7, v4, 7, 8
; SDAG-NEXT: s_movk_i32 s4, 0x7e
; SDAG-NEXT: v_mov_b32_e32 v0, 0
; SDAG-NEXT: v_mov_b32_e32 v2, 0
; SDAG-NEXT: v_mov_b32_e32 v1, 0
; SDAG-NEXT: v_mov_b32_e32 v3, 0
-; SDAG-NEXT: v_cmp_gt_u16_sdwa s[4:5], v7, s4 src0_sel:BYTE_0 src1_sel:DWORD
-; SDAG-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: v_cmp_lt_u16_e32 vcc, s4, v7
+; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
; SDAG-NEXT: s_cbranch_execz .LBB6_10
; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-check.saturate
-; SDAG-NEXT: s_movk_i32 s4, 0xff01
-; SDAG-NEXT: v_add_u16_sdwa v0, v7, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; SDAG-NEXT: v_add_u16_e32 v0, 0xff01, v7
; SDAG-NEXT: s_movk_i32 s4, 0xff7f
; SDAG-NEXT: v_cmp_lt_i16_e32 vcc, -1, v4
; SDAG-NEXT: v_cmp_lt_u16_e64 s[4:5], s4, v0
@@ -1390,7 +1389,7 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
; SDAG-NEXT: v_cndmask_b32_e64 v6, -1, 0, vcc
; SDAG-NEXT: v_cndmask_b32_e64 v5, -1, 1, vcc
; SDAG-NEXT: v_or_b32_e32 v4, 0x80, v0
-; SDAG-NEXT: v_cmp_gt_u16_sdwa s[4:5], v7, s4 src0_sel:BYTE_0 src1_sel:DWORD
+; SDAG-NEXT: v_cmp_lt_u16_e64 s[4:5], s4, v7
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
; SDAG-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
@@ -1398,9 +1397,8 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
; SDAG-NEXT: s_cbranch_execz .LBB6_4
; SDAG-NEXT: ; %bb.3: ; %fp-to-i-if-exp.large
; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_add_u16_e32 v10, 0xff7a, v7
; SDAG-NEXT: v_add_co_u32_e64 v9, s[4:5], -1, v0
-; SDAG-NEXT: s_movk_i32 s4, 0xff7a
-; SDAG-NEXT: v_add_u16_sdwa v10, v7, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v4
; SDAG-NEXT: v_mov_b32_e32 v1, s6
; SDAG-NEXT: v_sub_u32_e32 v2, 64, v10
@@ -1440,8 +1438,7 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
; SDAG-NEXT: .LBB6_4: ; %Flow
; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[12:13]
; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-exp.small
-; SDAG-NEXT: s_movk_i32 s6, 0x86
-; SDAG-NEXT: v_sub_u16_sdwa v0, s6, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; SDAG-NEXT: v_sub_u16_e32 v0, 0x86, v7
; SDAG-NEXT: v_lshrrev_b16_e32 v0, v0, v4
; SDAG-NEXT: v_mul_hi_i32_i24_e32 v1, v0, v5
; SDAG-NEXT: v_ashrrev_i32_e32 v2, 31, v1
@@ -1684,18 +1681,17 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
; SDAG: ; %bb.0: ; %fp-to-i-entry
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SDAG-NEXT: v_mov_b32_e32 v4, v0
-; SDAG-NEXT: v_lshrrev_b16_e32 v7, 7, v4
+; SDAG-NEXT: v_bfe_u32 v7, v4, 7, 8
; SDAG-NEXT: s_movk_i32 s4, 0x7e
; SDAG-NEXT: v_mov_b32_e32 v0, 0
; SDAG-NEXT: v_mov_b32_e32 v2, 0
; SDAG-NEXT: v_mov_b32_e32 v1, 0
; SDAG-NEXT: v_mov_b32_e32 v3, 0
-; SDAG-NEXT: v_cmp_gt_u16_sdwa s[4:5], v7, s4 src0_sel:BYTE_0 src1_sel:DWORD
-; SDAG-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; SDAG-NEXT: v_cmp_lt_u16_e32 vcc, s4, v7
+; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
; SDAG-NEXT: s_cbranch_execz .LBB7_10
; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-check.saturate
-; SDAG-NEXT: s_movk_i32 s4, 0xff01
-; SDAG-NEXT: v_add_u16_sdwa v0, v7, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; SDAG-NEXT: v_add_u16_e32 v0, 0xff01, v7
; SDAG-NEXT: s_movk_i32 s4, 0xff7f
; SDAG-NEXT: v_cmp_lt_i16_e32 vcc, -1, v4
; SDAG-NEXT: v_cmp_lt_u16_e64 s[4:5], s4, v0
@@ -1711,7 +1707,7 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
; SDAG-NEXT: v_cndmask_b32_e64 v6, -1, 0, vcc
; SDAG-NEXT: v_cndmask_b32_e64 v5, -1, 1, vcc
; SDAG-NEXT: v_or_b32_e32 v4, 0x80, v0
-; SDAG-NEXT: v_cmp_gt_u16_sdwa s[4:5], v7, s4 src0_sel:BYTE_0 src1_sel:DWORD
+; SDAG-NEXT: v_cmp_lt_u16_e64 s[4:5], s4, v7
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
; SDAG-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
@@ -1719,9 +1715,8 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
; SDAG-NEXT: s_cbranch_execz .LBB7_4
; SDAG-NEXT: ; %bb.3: ; %fp-to-i-if-exp.large
; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_add_u16_e32 v10, 0xff7a, v7
; SDAG-NEXT: v_add_co_u32_e64 v9, s[4:5], -1, v0
-; SDAG-NEXT: s_movk_i32 s4, 0xff7a
-; SDAG-NEXT: v_add_u16_sdwa v10, v7, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v4
; SDAG-NEXT: v_mov_b32_e32 v1, s6
; SDAG-NEXT: v_sub_u32_e32 v2, 64, v10
@@ -1761,8 +1756,7 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
; SDAG-NEXT: .LBB7_4: ; %Flow
; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[12:13]
; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-exp.small
-; SDAG-NEXT: s_movk_i32 s6, 0x86
-; SDAG-NEXT: v_sub_u16_sdwa v0, s6, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; SDAG-NEXT: v_sub_u16_e32 v0, 0x86, v7
; SDAG-NEXT: v_lshrrev_b16_e32 v0, v0, v4
; SDAG-NEXT: v_mul_hi_i32_i24_e32 v1, v0, v5
; SDAG-NEXT: v_ashrrev_i32_e32 v2, 31, v1
diff --git a/llvm/test/CodeGen/AMDGPU/permute_i8.ll b/llvm/test/CodeGen/AMDGPU/permute_i8.ll
index 58317966a4295..08109212c0ff4 100644
--- a/llvm/test/CodeGen/AMDGPU/permute_i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/permute_i8.ll
@@ -2146,12 +2146,12 @@ define hidden void @trunc_store_div(ptr addrspace(1) %in0, ptr addrspace(1) %in1
; GFX10-NEXT: global_load_dword v9, v[2:3], off
; GFX10-NEXT: v_mov_b32_e32 v0, 1
; GFX10-NEXT: s_waitcnt vmcnt(1)
-; GFX10-NEXT: v_and_b32_sdwa v1, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_and_b32_sdwa v0, v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX10-NEXT: v_lshlrev_b16 v1, 1, v1
-; GFX10-NEXT: v_lshlrev_b16 v2, 2, v0
-; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX10-NEXT: v_bfe_u32 v1, v9, 8, 1
+; GFX10-NEXT: v_lshlrev_b16 v0, 1, v0
+; GFX10-NEXT: v_lshlrev_b16 v2, 2, v1
+; GFX10-NEXT: v_or_b32_e32 v0, v1, v0
; GFX10-NEXT: v_lshlrev_b16 v1, 3, v4
; GFX10-NEXT: v_or_b32_e32 v0, v0, v2
; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
@@ -2175,12 +2175,12 @@ define hidden void @trunc_store_div(ptr addrspace(1) %in0, ptr addrspace(1) %in1
; GFX9-NEXT: v_mov_b32_e32 v0, 1
; GFX9-NEXT: s_mov_b32 s4, 0x50205
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshlrev_b16_e32 v3, 3, v4
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_and_b32_sdwa v2, v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_bfe_u32 v2, v9, 8, 1
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 1, v0
; GFX9-NEXT: v_perm_b32 v1, v9, v4, s4
+; GFX9-NEXT: v_lshlrev_b16_e32 v3, 3, v4
; GFX9-NEXT: v_lshlrev_b16_e32 v4, 2, v2
; GFX9-NEXT: v_or_b32_e32 v0, v2, v0
; GFX9-NEXT: v_or_b32_e32 v0, v0, v4
>From 86ac34980b9b6a086fcd6f7ce858dd85ba62ff89 Mon Sep 17 00:00:00 2001
From: vigneshwar jayakumar <vigneshwar.jayakumar at amd.com>
Date: Mon, 2 Mar 2026 14:24:25 -0600
Subject: [PATCH 3/5] review comments #2
---
llvm/lib/Target/AMDGPU/SIInstructions.td | 6 +--
llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll | 52 ++++++------------------
2 files changed, 13 insertions(+), 45 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 68803700e118f..ac83c3d7c5adb 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -4148,10 +4148,6 @@ def IMMZeroBasedBitfieldMask16 : ImmLeaf <i16, [{
return isUInt<16>(Imm) && isMask_32(Imm);
}]>;
-def ShiftAmt16Imm : ImmLeaf <i16, [{
- return Imm < 16;
-}]>;
-
def IMMPopCount : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(llvm::popcount(N->getZExtValue()), SDLoc(N),
MVT::i32);
@@ -4169,7 +4165,7 @@ def : AMDGPUPat <
let True16Predicate = NotUseRealTrue16Insts in
def : AMDGPUPat <
- (i16 (DivergentBinFrag<and> (srl_oneuse i16:$src, (i16 ShiftAmt16Imm:$rshift)),
+ (i16 (DivergentBinFrag<and> (srl_oneuse i16:$src, (i16 imm:$rshift)),
IMMZeroBasedBitfieldMask16:$mask)),
(V_BFE_U32_e64 $src, (i32 (IMMAsI32 $rshift)), (i32 (IMMPopCount $mask)))
>;
diff --git a/llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll b/llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll
index b303772510179..d2bf50d125e7c 100644
--- a/llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfe-i8-i16.ll
@@ -85,62 +85,34 @@ define i16 @no_bfe_i16_multi_use(i16 %a) {
}
; Pure uniform (SGPR) case.
-define amdgpu_kernel void @bfe_i16_uniform(i16 %a, ptr addrspace(1) %out) {
+define amdgpu_ps i16 @bfe_i16_uniform(i16 inreg %a) {
; GFX9-LABEL: bfe_i16_uniform:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_load_dword s2, s[4:5], 0x24
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2c
-; GFX9-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_bfe_u32 s2, s2, 0x40004
-; GFX9-NEXT: v_mov_b32_e32 v1, s2
-; GFX9-NEXT: global_store_short v0, v1, s[0:1]
-; GFX9-NEXT: s_endpgm
+; GFX9-NEXT: s_bfe_u32 s0, s0, 0x40004
+; GFX9-NEXT: ; return to shader part epilog
;
; GFX12-TRUE16-LABEL: bfe_i16_uniform:
; GFX12-TRUE16: ; %bb.0:
-; GFX12-TRUE16-NEXT: s_clause 0x1
-; GFX12-TRUE16-NEXT: s_load_b32 s2, s[4:5], 0x24
-; GFX12-TRUE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x2c
-; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-TRUE16-NEXT: s_bfe_u32 s2, s2, 0x40004
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-TRUE16-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
-; GFX12-TRUE16-NEXT: global_store_b16 v0, v1, s[0:1]
-; GFX12-TRUE16-NEXT: s_endpgm
+; GFX12-TRUE16-NEXT: s_bfe_u32 s0, s0, 0x40004
+; GFX12-TRUE16-NEXT: ; return to shader part epilog
%shr = lshr i16 %a, 4
%and = and i16 %shr, 15
- store i16 %and, ptr addrspace(1) %out
- ret void
+ ret i16 %and
}
-define amdgpu_kernel void @bfe_i8_uniform(i8 %a, ptr addrspace(1) %out) {
+define amdgpu_ps i8 @bfe_i8_uniform(i8 inreg %a) {
; GFX9-LABEL: bfe_i8_uniform:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_load_dword s2, s[4:5], 0x24
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2c
-; GFX9-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_bfe_u32 s2, s2, 0x40004
-; GFX9-NEXT: v_mov_b32_e32 v1, s2
-; GFX9-NEXT: global_store_byte v0, v1, s[0:1]
-; GFX9-NEXT: s_endpgm
+; GFX9-NEXT: s_bfe_u32 s0, s0, 0x40004
+; GFX9-NEXT: ; return to shader part epilog
;
; GFX12-TRUE16-LABEL: bfe_i8_uniform:
; GFX12-TRUE16: ; %bb.0:
-; GFX12-TRUE16-NEXT: s_clause 0x1
-; GFX12-TRUE16-NEXT: s_load_b32 s2, s[4:5], 0x24
-; GFX12-TRUE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x2c
-; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-TRUE16-NEXT: s_bfe_u32 s2, s2, 0x40004
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-TRUE16-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
-; GFX12-TRUE16-NEXT: global_store_b8 v0, v1, s[0:1]
-; GFX12-TRUE16-NEXT: s_endpgm
+; GFX12-TRUE16-NEXT: s_bfe_u32 s0, s0, 0x40004
+; GFX12-TRUE16-NEXT: ; return to shader part epilog
%shr = lshr i8 %a, 4
%and = and i8 %shr, 15
- store i8 %and, ptr addrspace(1) %out
- ret void
+ ret i8 %and
}
; Vector case: keep the packed shift/and lowering.
>From 85ac5fd8a12eece73dde9f0a9467710d8f02aa41 Mon Sep 17 00:00:00 2001
From: vigneshwar jayakumar <vigneshwar.jayakumar at amd.com>
Date: Tue, 3 Mar 2026 12:02:23 -0600
Subject: [PATCH 4/5] review comments#3
---
llvm/lib/Target/AMDGPU/SIInstructions.td | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 806020a9d4df1..eecde75ac4bf7 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -4166,10 +4166,6 @@ def IMMPopCount : SDNodeXForm<imm, [{
MVT::i32);
}]>;
-def IMMAsI32 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
-}]>;
-
def : AMDGPUPat <
(DivergentBinFrag<and> (i32 (srl i32:$src, i32:$rshift)),
IMMZeroBasedBitfieldMask:$mask),
@@ -4180,7 +4176,7 @@ let True16Predicate = NotUseRealTrue16Insts in
def : AMDGPUPat <
(i16 (DivergentBinFrag<and> (srl_oneuse i16:$src, (i16 imm:$rshift)),
IMMZeroBasedBitfieldMask16:$mask)),
- (V_BFE_U32_e64 $src, (i32 (IMMAsI32 $rshift)), (i32 (IMMPopCount $mask)))
+ (V_BFE_U32_e64 $src, (i32 (as_i32timm $rshift)), (i32 (IMMPopCount $mask)))
>;
// x & ((1 << y) - 1)
>From e08bca79a831a2c7f5ba080e6314489f8a572f37 Mon Sep 17 00:00:00 2001
From: vigneshwar jayakumar <vigneshwar.jayakumar at amd.com>
Date: Wed, 4 Mar 2026 11:02:07 -0600
Subject: [PATCH 5/5] change popcount
---
llvm/lib/Target/AMDGPU/AMDGPUGISel.td | 4 ++--
llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp | 7 +++----
llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h | 4 ++--
llvm/lib/Target/AMDGPU/SIInstructions.td | 10 +++++-----
4 files changed, 12 insertions(+), 13 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
index cfef04644835c..84c0348c1d611 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
@@ -441,8 +441,8 @@ def gi_bitcast_fpimm_to_i32 : GICustomOperandRenderer<"renderBitcastFPImm32">,
def gi_bitcast_fpimm_to_i64 : GICustomOperandRenderer<"renderBitcastFPImm64">,
GISDNodeXFormEquiv<bitcast_fpimm_to_i64>;
-def gi_IMMPopCount : GICustomOperandRenderer<"renderPopcntImm">,
- GISDNodeXFormEquiv<IMMPopCount>;
+def gi_IMMCountTrailingOnes : GICustomOperandRenderer<"renderCountTrailingOnesImm">,
+ GISDNodeXFormEquiv<IMMCountTrailingOnes>;
def gi_extract_cpol : GICustomOperandRenderer<"renderExtractCPol">,
GISDNodeXFormEquiv<extract_cpol>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 3ae638f14ee40..23d01f8f4a00d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -7224,12 +7224,11 @@ void AMDGPUInstructionSelector::renderBitcastFPImm(MachineInstrBuilder &MIB,
MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
}
-void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
- const MachineInstr &MI,
- int OpIdx) const {
+void AMDGPUInstructionSelector::renderCountTrailingOnesImm(
+ MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
"Expected G_CONSTANT");
- MIB.addImm(MI.getOperand(1).getCImm()->getValue().popcount());
+ MIB.addImm(MI.getOperand(1).getCImm()->getValue().countTrailingOnes());
}
/// This only really exists to satisfy DAG type checking machinery, so is a
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
index 98c4e7837a1ff..cc121632e101d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h
@@ -400,8 +400,8 @@ class AMDGPUInstructionSelector final : public InstructionSelector {
renderBitcastFPImm(MIB, MI, OpIdx);
}
- void renderPopcntImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
- int OpIdx) const;
+ void renderCountTrailingOnesImm(MachineInstrBuilder &MIB,
+ const MachineInstr &MI, int OpIdx) const;
void renderExtractCPol(MachineInstrBuilder &MIB, const MachineInstr &MI,
int OpIdx) const;
void renderExtractSWZ(MachineInstrBuilder &MIB, const MachineInstr &MI,
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index eecde75ac4bf7..bc2dcdf9b591b 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -4161,22 +4161,22 @@ def IMMZeroBasedBitfieldMask16 : ImmLeaf <i16, [{
return isUInt<16>(Imm) && isMask_32(Imm);
}]>;
-def IMMPopCount : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(llvm::popcount(N->getZExtValue()), SDLoc(N),
- MVT::i32);
+def IMMCountTrailingOnes : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(llvm::countr_one(N->getZExtValue()),
+ SDLoc(N), MVT::i32);
}]>;
def : AMDGPUPat <
(DivergentBinFrag<and> (i32 (srl i32:$src, i32:$rshift)),
IMMZeroBasedBitfieldMask:$mask),
- (V_BFE_U32_e64 $src, $rshift, (i32 (IMMPopCount $mask)))
+ (V_BFE_U32_e64 $src, $rshift, (i32 (IMMCountTrailingOnes $mask)))
>;
let True16Predicate = NotUseRealTrue16Insts in
def : AMDGPUPat <
(i16 (DivergentBinFrag<and> (srl_oneuse i16:$src, (i16 imm:$rshift)),
IMMZeroBasedBitfieldMask16:$mask)),
- (V_BFE_U32_e64 $src, (i32 (as_i32timm $rshift)), (i32 (IMMPopCount $mask)))
+ (V_BFE_U32_e64 $src, (i32 (as_i32timm $rshift)), (i32 (IMMCountTrailingOnes $mask)))
>;
// x & ((1 << y) - 1)
More information about the llvm-commits
mailing list