[llvm] f7a034d - [AMDGPU] (x or y) xor -1 -> x nor y (#130264)

via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 28 03:20:20 PDT 2025


Author: Ana Mihajlovic
Date: 2025-03-28T11:20:17+01:00
New Revision: f7a034d400860501a26e3429e1c6a9f310f07f76

URL: https://github.com/llvm/llvm-project/commit/f7a034d400860501a26e3429e1c6a9f310f07f76
DIFF: https://github.com/llvm/llvm-project/commit/f7a034d400860501a26e3429e1c6a9f310f07f76.diff

LOG: [AMDGPU] (x or y) xor -1 -> x nor y (#130264)

Added pattern so s_nor is selected for ((i1 x or i1 y) xor -1) instead
of s_or and s_xor . This patch is for i1 divergent. The ballot in the
test is added for the retrieval of lanemask. The control flow is needed
because the combiner can't pass through phi instructions.

Added: 
    llvm/test/CodeGen/AMDGPU/nor-divergent-lanemask.ll

Modified: 
    llvm/lib/Target/AMDGPU/SOPInstructions.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index eeac9c1ad1084..73f4655f735a2 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -1925,6 +1925,20 @@ def : ScalarNot2Pat<S_ORN2_B32, or, v2i16>;
 def : ScalarNot2Pat<S_ORN2_B64, or, v4i16>;
 def : ScalarNot2Pat<S_ORN2_B64, or, v2i32>;
 
+let WaveSizePredicate = isWave32 in {
+def : GCNPat<
+  (i1 (not (or_oneuse i1:$src0, i1:$src1))),
+  (S_NOR_B32 i1:$src0, i1:$src1)
+>;
+}
+
+let WaveSizePredicate = isWave64 in {
+def : GCNPat<
+  (i1 (not (or_oneuse i1:$src0, i1:$src1))),
+  (S_NOR_B64 i1:$src0, i1:$src1)
+>;
+}
+
 //===----------------------------------------------------------------------===//
 // Target-specific instruction encodings.
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/AMDGPU/nor-divergent-lanemask.ll b/llvm/test/CodeGen/AMDGPU/nor-divergent-lanemask.ll
new file mode 100644
index 0000000000000..420539346b400
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/nor-divergent-lanemask.ll
@@ -0,0 +1,107 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr="-wavefrontsize32,+wavefrontsize64" -o - < %s | FileCheck -check-prefixes=SDAG-W64 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr="-wavefrontsize32,+wavefrontsize64" -o - < %s | FileCheck -check-prefixes=GISEL-W64 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr="+wavefrontsize32,-wavefrontsize64" -o - < %s | FileCheck -check-prefixes=SDAG-W32 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr="+wavefrontsize32,-wavefrontsize64" -o - < %s | FileCheck -check-prefixes=GISEL-W32 %s
+
+; Use ballot for easy access to lanemask
+
+define amdgpu_ps i64 @test_nor(i64 inreg %a, i64 inreg %b) {
+; SDAG-W64-LABEL: test_nor:
+; SDAG-W64:       ; %bb.0:
+; SDAG-W64-NEXT:    s_nor_b64 s[0:1], s[0:1], s[2:3]
+; SDAG-W64-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; SDAG-W64-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; SDAG-W64-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v0
+; SDAG-W64-NEXT:    ; return to shader part epilog
+;
+; GISEL-W64-LABEL: test_nor:
+; GISEL-W64:       ; %bb.0:
+; GISEL-W64-NEXT:    s_nor_b64 s[0:1], s[0:1], s[2:3]
+; GISEL-W64-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-W64-NEXT:    s_and_b64 s[0:1], s[0:1], exec
+; GISEL-W64-NEXT:    ; return to shader part epilog
+;
+; SDAG-W32-LABEL: test_nor:
+; SDAG-W32:       ; %bb.0:
+; SDAG-W32-NEXT:    s_nor_b32 s0, s0, s2
+; SDAG-W32-NEXT:    s_mov_b32 s1, 0
+; SDAG-W32-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; SDAG-W32-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; SDAG-W32-NEXT:    v_cmp_ne_u32_e64 s0, 0, v0
+; SDAG-W32-NEXT:    ; return to shader part epilog
+;
+; GISEL-W32-LABEL: test_nor:
+; GISEL-W32:       ; %bb.0:
+; GISEL-W32-NEXT:    s_nor_b32 s0, s0, s2
+; GISEL-W32-NEXT:    s_mov_b32 s1, 0
+; GISEL-W32-NEXT:    s_and_b32 s0, s0, exec_lo
+; GISEL-W32-NEXT:    ; return to shader part epilog
+  %a.lanemask = call i1 @llvm.amdgcn.inverse.ballot.i64(i64 %a)
+  %b.lanemask = call i1 @llvm.amdgcn.inverse.ballot.i64(i64 %b)
+  %or = or i1 %a.lanemask, %b.lanemask
+  %xor = xor i1 %or, true
+  %r = call i64 @llvm.amdgcn.ballot.i64(i1 %xor)
+  ret i64 %r
+}
+
+define amdgpu_ps i64 @test_or_two_uses(i64 inreg %a, i64 inreg %b) {
+; SDAG-W64-LABEL: test_or_two_uses:
+; SDAG-W64:       ; %bb.0:
+; SDAG-W64-NEXT:    s_or_b64 s[0:1], s[0:1], s[2:3]
+; SDAG-W64-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
+; SDAG-W64-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; SDAG-W64-NEXT:    s_xor_b64 s[0:1], s[0:1], -1
+; SDAG-W64-NEXT:    s_waitcnt_depctr 0xfffe
+; SDAG-W64-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; SDAG-W64-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; SDAG-W64-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; SDAG-W64-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v1
+; SDAG-W64-NEXT:    s_and_b64 s[0:1], s[0:1], vcc
+; SDAG-W64-NEXT:    s_waitcnt_depctr 0xfffe
+; SDAG-W64-NEXT:    ; return to shader part epilog
+;
+; GISEL-W64-LABEL: test_or_two_uses:
+; GISEL-W64:       ; %bb.0:
+; GISEL-W64-NEXT:    s_or_b64 s[0:1], s[0:1], s[2:3]
+; GISEL-W64-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GISEL-W64-NEXT:    s_xor_b64 s[2:3], s[0:1], -1
+; GISEL-W64-NEXT:    s_and_b64 s[0:1], s[0:1], exec
+; GISEL-W64-NEXT:    s_and_b64 s[2:3], s[2:3], exec
+; GISEL-W64-NEXT:    s_and_b64 s[0:1], s[2:3], s[0:1]
+; GISEL-W64-NEXT:    ; return to shader part epilog
+;
+; SDAG-W32-LABEL: test_or_two_uses:
+; SDAG-W32:       ; %bb.0:
+; SDAG-W32-NEXT:    s_or_b32 s0, s0, s2
+; SDAG-W32-NEXT:    s_mov_b32 s3, 0
+; SDAG-W32-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; SDAG-W32-NEXT:    s_xor_b32 s0, s0, -1
+; SDAG-W32-NEXT:    s_mov_b32 s1, s3
+; SDAG-W32-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s0
+; SDAG-W32-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; SDAG-W32-NEXT:    v_cmp_ne_u32_e64 s0, 0, v0
+; SDAG-W32-NEXT:    v_cmp_ne_u32_e64 s2, 0, v1
+; SDAG-W32-NEXT:    s_and_b64 s[0:1], s[2:3], s[0:1]
+; SDAG-W32-NEXT:    ; return to shader part epilog
+;
+; GISEL-W32-LABEL: test_or_two_uses:
+; GISEL-W32:       ; %bb.0:
+; GISEL-W32-NEXT:    s_or_b32 s0, s0, s2
+; GISEL-W32-NEXT:    s_mov_b32 s1, 0
+; GISEL-W32-NEXT:    s_xor_b32 s4, s0, -1
+; GISEL-W32-NEXT:    s_and_b32 s2, s0, exec_lo
+; GISEL-W32-NEXT:    s_mov_b32 s3, s1
+; GISEL-W32-NEXT:    s_and_b32 s0, s4, exec_lo
+; GISEL-W32-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-W32-NEXT:    s_and_b64 s[0:1], s[0:1], s[2:3]
+; GISEL-W32-NEXT:    ; return to shader part epilog
+  %a.lanemask = call i1 @llvm.amdgcn.inverse.ballot.i64(i64 %a)
+  %b.lanemask = call i1 @llvm.amdgcn.inverse.ballot.i64(i64 %b)
+  %or = or i1 %a.lanemask, %b.lanemask
+  %xor = xor i1 %or, true
+  %r0 = call i64 @llvm.amdgcn.ballot.i64(i1 %xor)
+  %r1 = call i64 @llvm.amdgcn.ballot.i64(i1 %or)
+  %r = and i64 %r0, %r1
+  ret i64 %r
+}
\ No newline at end of file


        


More information about the llvm-commits mailing list