[llvm] [AMDGPU] s_quadmask* implicitly defines SCC (PR #161582)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Oct 1 13:42:15 PDT 2025
https://github.com/LU-JOHN created https://github.com/llvm/llvm-project/pull/161582
Fix s_quadmask* instruction description so that it defines SCC.
>From 6af0eee389c21dc901708249e6c4c1cd60e668ad Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Wed, 1 Oct 2025 15:40:44 -0500
Subject: [PATCH] s_quadmask* implicitly defines SCC
Signed-off-by: John Lu <John.Lu at amd.com>
---
llvm/lib/Target/AMDGPU/SOPInstructions.td | 2 +
llvm/test/CodeGen/AMDGPU/scc_quadmask.ll | 57 +++++++++++++++++++++++
2 files changed, 59 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/scc_quadmask.ll
diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index b3fd8c70dd045..84287b621fe78 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -352,10 +352,12 @@ def S_XNOR_SAVEEXEC_B64 : SOP1_64 <"s_xnor_saveexec_b64">;
} // End hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC]
+let Defs = [SCC] in {
def S_QUADMASK_B32 : SOP1_32 <"s_quadmask_b32",
[(set i32:$sdst, (int_amdgcn_s_quadmask i32:$src0))]>;
def S_QUADMASK_B64 : SOP1_64 <"s_quadmask_b64",
[(set i64:$sdst, (int_amdgcn_s_quadmask i64:$src0))]>;
+}
let Uses = [M0] in {
def S_MOVRELS_B32 : SOP1_32R <"s_movrels_b32">;
diff --git a/llvm/test/CodeGen/AMDGPU/scc_quadmask.ll b/llvm/test/CodeGen/AMDGPU/scc_quadmask.ll
new file mode 100644
index 0000000000000..34fb0a19f2ab4
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/scc_quadmask.ll
@@ -0,0 +1,57 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck %s
+;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_32 implicitly defines SCC.
+
+define amdgpu_kernel void @quadmask_32(i32 %val0, i32 %val1, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: quadmask_32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_and_b32 s0, s0, 1
+; CHECK-NEXT: s_quadmask_b32 s1, s1
+; CHECK-NEXT: s_cmp_eq_u32 s0, 0
+; CHECK-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT: global_store_dword v2, v3, s[2:3]
+; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; CHECK-NEXT: global_store_dword v[0:1], v2, off
+; CHECK-NEXT: s_endpgm
+ %and = and i32 %val0, 1
+ %result = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %val1) nounwind readnone
+ store i32 %result, ptr addrspace(1) %ptr
+ %cmp = icmp eq i32 %and, 0
+ %sel = select i1 %cmp, i32 1, i32 0
+ store i32 %sel, ptr addrspace(1) null, align 4
+ ret void
+}
+
+define amdgpu_kernel void @quadmask_64(i32 %val0, i64 %val1, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: quadmask_64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_dword s6, s[4:5], 0x0
+; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x8
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_and_b32 s4, s6, 1
+; CHECK-NEXT: s_quadmask_b64 s[0:1], s[0:1]
+; CHECK-NEXT: v_mov_b32_e32 v0, s0
+; CHECK-NEXT: v_mov_b32_e32 v1, s1
+; CHECK-NEXT: s_cmp_eq_u32 s4, 0
+; CHECK-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; CHECK-NEXT: global_store_dword v[0:1], v2, off
+; CHECK-NEXT: s_endpgm
+ %and = and i32 %val0, 1
+ %result = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %val1) nounwind readnone
+ store i64 %result, ptr addrspace(1) %ptr
+ %cmp = icmp eq i32 %and, 0
+ %sel = select i1 %cmp, i32 1, i32 0
+ store i32 %sel, ptr addrspace(1) null, align 4
+ ret void
+}
More information about the llvm-commits
mailing list