[llvm] [AMDGPU] s_quadmask* implicitly defines SCC (PR #161582)

via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 2 07:51:30 PDT 2025


https://github.com/LU-JOHN updated https://github.com/llvm/llvm-project/pull/161582

>From 6af0eee389c21dc901708249e6c4c1cd60e668ad Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Wed, 1 Oct 2025 15:40:44 -0500
Subject: [PATCH 1/2] s_quadmask* implicitly defines SCC

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SOPInstructions.td |  2 +
 llvm/test/CodeGen/AMDGPU/scc_quadmask.ll  | 57 +++++++++++++++++++++++
 2 files changed, 59 insertions(+)
 create mode 100644 llvm/test/CodeGen/AMDGPU/scc_quadmask.ll

diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index b3fd8c70dd045..84287b621fe78 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -352,10 +352,12 @@ def S_XNOR_SAVEEXEC_B64 : SOP1_64 <"s_xnor_saveexec_b64">;
 
 } // End hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC]
 
+let Defs = [SCC] in {
 def S_QUADMASK_B32 : SOP1_32 <"s_quadmask_b32",
   [(set i32:$sdst, (int_amdgcn_s_quadmask i32:$src0))]>;
 def S_QUADMASK_B64 : SOP1_64 <"s_quadmask_b64",
   [(set i64:$sdst, (int_amdgcn_s_quadmask i64:$src0))]>;
+}
 
 let Uses = [M0] in {
 def S_MOVRELS_B32 : SOP1_32R <"s_movrels_b32">;
diff --git a/llvm/test/CodeGen/AMDGPU/scc_quadmask.ll b/llvm/test/CodeGen/AMDGPU/scc_quadmask.ll
new file mode 100644
index 0000000000000..34fb0a19f2ab4
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/scc_quadmask.ll
@@ -0,0 +1,57 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck %s
+;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_32 implicitly defines SCC.
+
+define amdgpu_kernel void @quadmask_32(i32 %val0, i32 %val1, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: quadmask_32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v1, 0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    s_and_b32 s0, s0, 1
+; CHECK-NEXT:    s_quadmask_b32 s1, s1
+; CHECK-NEXT:    s_cmp_eq_u32 s0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    global_store_dword v2, v3, s[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; CHECK-NEXT:    global_store_dword v[0:1], v2, off
+; CHECK-NEXT:    s_endpgm
+  %and = and i32 %val0, 1
+  %result = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %val1) nounwind readnone
+  store i32 %result, ptr addrspace(1) %ptr
+  %cmp = icmp eq i32 %and, 0
+  %sel = select i1 %cmp, i32 1, i32 0
+  store i32 %sel, ptr addrspace(1) null, align 4
+  ret void
+}
+
+define amdgpu_kernel void @quadmask_64(i32 %val0, i64 %val1, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: quadmask_64:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_load_dword s6, s[4:5], 0x0
+; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x8
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    s_and_b32 s4, s6, 1
+; CHECK-NEXT:    s_quadmask_b64 s[0:1], s[0:1]
+; CHECK-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-NEXT:    s_cmp_eq_u32 s4, 0
+; CHECK-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_mov_b32_e32 v1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; CHECK-NEXT:    global_store_dword v[0:1], v2, off
+; CHECK-NEXT:    s_endpgm
+  %and = and i32 %val0, 1
+  %result = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %val1) nounwind readnone
+  store i64 %result, ptr addrspace(1) %ptr
+  %cmp = icmp eq i32 %and, 0
+  %sel = select i1 %cmp, i32 1, i32 0
+  store i32 %sel, ptr addrspace(1) null, align 4
+  ret void
+}

>From 023596e534f75abb254a757b698345f61482c506 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Thu, 2 Oct 2025 09:51:17 -0500
Subject: [PATCH 2/2] Move new tests to old file

Signed-off-by: John Lu <John.Lu at amd.com>
---
 .../CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll    | 92 ++++++++++++++++++-
 llvm/test/CodeGen/AMDGPU/scc_quadmask.ll      | 57 ------------
 2 files changed, 90 insertions(+), 59 deletions(-)
 delete mode 100644 llvm/test/CodeGen/AMDGPU/scc_quadmask.ll

diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll
index de7d2346a0b42..b9bf76c1423b6 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 < %s | FileCheck  -check-prefixes=GFX11 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 < %s | FileCheck  -check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 < %s | FileCheck  -check-prefixes=GFX11,GFX11-GISEL %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 < %s | FileCheck  -check-prefixes=GFX11,GFX11-SDAG %s
 
 declare i32 @llvm.amdgcn.s.quadmask.i32(i32)
 declare i64 @llvm.amdgcn.s.quadmask.i64(i64)
@@ -172,3 +172,91 @@ entry:
   %qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %mask)
   ret i64 %qm
 }
+
+;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_b32 implicitly defines SCC.
+define amdgpu_kernel void @test_scc_quadmask_32(i32 %val0, i32 %val1, ptr addrspace(1) %ptr) {
+; GFX11-GISEL-LABEL: test_scc_quadmask_32:
+; GFX11-GISEL:       ; %bb.0:
+; GFX11-GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX11-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT:    s_and_b32 s0, s0, 1
+; GFX11-GISEL-NEXT:    s_quadmask_b32 s1, s1
+; GFX11-GISEL-NEXT:    s_cmp_eq_u32 s0, 0
+; GFX11-GISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, s1
+; GFX11-GISEL-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX11-GISEL-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v4, s0
+; GFX11-GISEL-NEXT:    global_store_b32 v2, v3, s[2:3]
+; GFX11-GISEL-NEXT:    global_store_b32 v[0:1], v4, off
+; GFX11-GISEL-NEXT:    s_endpgm
+;
+; GFX11-SDAG-LABEL: test_scc_quadmask_32:
+; GFX11-SDAG:       ; %bb.0:
+; GFX11-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT:    s_and_b32 s0, s0, 1
+; GFX11-SDAG-NEXT:    s_quadmask_b32 s1, s1
+; GFX11-SDAG-NEXT:    s_cmp_eq_u32 s0, 0
+; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1
+; GFX11-SDAG-NEXT:    s_cselect_b32 s0, -1, 0
+; GFX11-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v4, 0, 1, s0
+; GFX11-SDAG-NEXT:    global_store_b32 v2, v3, s[2:3]
+; GFX11-SDAG-NEXT:    global_store_b32 v[0:1], v4, off
+; GFX11-SDAG-NEXT:    s_endpgm
+  %and = and i32 %val0, 1
+  %result = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %val1) nounwind readnone
+  store i32 %result, ptr addrspace(1) %ptr
+  %cmp = icmp eq i32 %and, 0
+  %sel = select i1 %cmp, i32 1, i32 0
+  store i32 %sel, ptr addrspace(1) null, align 4
+  ret void
+}
+
+;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_b64 implicitly defines SCC.
+define amdgpu_kernel void @test_scc_quadmask_64(i32 %val0, i64 %val1, ptr addrspace(1) %ptr) {
+; GFX11-GISEL-LABEL: test_scc_quadmask_64:
+; GFX11-GISEL:       ; %bb.0:
+; GFX11-GISEL-NEXT:    s_clause 0x1
+; GFX11-GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x2c
+; GFX11-GISEL-NEXT:    s_load_b32 s4, s[4:5], 0x24
+; GFX11-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT:    s_quadmask_b64 s[0:1], s[0:1]
+; GFX11-GISEL-NEXT:    s_and_b32 s4, s4, 1
+; GFX11-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX11-GISEL-NEXT:    s_cmp_eq_u32 s4, 0
+; GFX11-GISEL-NEXT:    v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v1, s1
+; GFX11-GISEL-NEXT:    s_cselect_b32 s0, 1, 0
+; GFX11-GISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v5, s0
+; GFX11-GISEL-NEXT:    v_mov_b32_e32 v3, 0
+; GFX11-GISEL-NEXT:    global_store_b64 v4, v[0:1], s[2:3]
+; GFX11-GISEL-NEXT:    global_store_b32 v[2:3], v5, off
+; GFX11-GISEL-NEXT:    s_endpgm
+;
+; GFX11-SDAG-LABEL: test_scc_quadmask_64:
+; GFX11-SDAG:       ; %bb.0:
+; GFX11-SDAG-NEXT:    s_clause 0x1
+; GFX11-SDAG-NEXT:    s_load_b32 s6, s[4:5], 0x24
+; GFX11-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x2c
+; GFX11-SDAG-NEXT:    v_mov_b32_e32 v4, 0
+; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT:    s_and_b32 s4, s6, 1
+; GFX11-SDAG-NEXT:    s_quadmask_b64 s[0:1], s[0:1]
+; GFX11-SDAG-NEXT:    s_cmp_eq_u32 s4, 0
+; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1
+; GFX11-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX11-SDAG-NEXT:    s_cselect_b32 s0, -1, 0
+; GFX11-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX11-SDAG-NEXT:    v_cndmask_b32_e64 v5, 0, 1, s0
+; GFX11-SDAG-NEXT:    global_store_b64 v4, v[2:3], s[2:3]
+; GFX11-SDAG-NEXT:    global_store_b32 v[0:1], v5, off
+; GFX11-SDAG-NEXT:    s_endpgm
+  %and = and i32 %val0, 1
+  %result = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %val1) nounwind readnone
+  store i64 %result, ptr addrspace(1) %ptr
+  %cmp = icmp eq i32 %and, 0
+  %sel = select i1 %cmp, i32 1, i32 0
+  store i32 %sel, ptr addrspace(1) null, align 4
+  ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/scc_quadmask.ll b/llvm/test/CodeGen/AMDGPU/scc_quadmask.ll
deleted file mode 100644
index 34fb0a19f2ab4..0000000000000
--- a/llvm/test/CodeGen/AMDGPU/scc_quadmask.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck %s
-;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_32 implicitly defines SCC.
-
-define amdgpu_kernel void @quadmask_32(i32 %val0, i32 %val1, ptr addrspace(1) %ptr) {
-; CHECK-LABEL: quadmask_32:
-; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CHECK-NEXT:    v_mov_b32_e32 v2, 0
-; CHECK-NEXT:    v_mov_b32_e32 v0, 0
-; CHECK-NEXT:    v_mov_b32_e32 v1, 0
-; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
-; CHECK-NEXT:    s_and_b32 s0, s0, 1
-; CHECK-NEXT:    s_quadmask_b32 s1, s1
-; CHECK-NEXT:    s_cmp_eq_u32 s0, 0
-; CHECK-NEXT:    v_mov_b32_e32 v3, s1
-; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; CHECK-NEXT:    global_store_dword v2, v3, s[2:3]
-; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; CHECK-NEXT:    global_store_dword v[0:1], v2, off
-; CHECK-NEXT:    s_endpgm
-  %and = and i32 %val0, 1
-  %result = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %val1) nounwind readnone
-  store i32 %result, ptr addrspace(1) %ptr
-  %cmp = icmp eq i32 %and, 0
-  %sel = select i1 %cmp, i32 1, i32 0
-  store i32 %sel, ptr addrspace(1) null, align 4
-  ret void
-}
-
-define amdgpu_kernel void @quadmask_64(i32 %val0, i64 %val1, ptr addrspace(1) %ptr) {
-; CHECK-LABEL: quadmask_64:
-; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_load_dword s6, s[4:5], 0x0
-; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x8
-; CHECK-NEXT:    v_mov_b32_e32 v2, 0
-; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
-; CHECK-NEXT:    s_and_b32 s4, s6, 1
-; CHECK-NEXT:    s_quadmask_b64 s[0:1], s[0:1]
-; CHECK-NEXT:    v_mov_b32_e32 v0, s0
-; CHECK-NEXT:    v_mov_b32_e32 v1, s1
-; CHECK-NEXT:    s_cmp_eq_u32 s4, 0
-; CHECK-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
-; CHECK-NEXT:    v_mov_b32_e32 v0, 0
-; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; CHECK-NEXT:    v_mov_b32_e32 v1, 0
-; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; CHECK-NEXT:    global_store_dword v[0:1], v2, off
-; CHECK-NEXT:    s_endpgm
-  %and = and i32 %val0, 1
-  %result = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %val1) nounwind readnone
-  store i64 %result, ptr addrspace(1) %ptr
-  %cmp = icmp eq i32 %and, 0
-  %sel = select i1 %cmp, i32 1, i32 0
-  store i32 %sel, ptr addrspace(1) null, align 4
-  ret void
-}



More information about the llvm-commits mailing list