[llvm] [AMDGPU][NFC] Pre-commit test for redundant s_cmp_lg_* sX, 0 removal (PR #162351)

via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 7 21:12:42 PDT 2025


================
@@ -0,0 +1,585 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck %s
+
+declare i32 @llvm.ctpop.i32(i32)
+declare i64 @llvm.ctpop.i64(i64)
+declare i32 @llvm.amdgcn.s.quadmask.i32(i32)
+declare i64 @llvm.amdgcn.s.quadmask.i64(i64)
+
+define amdgpu_ps i32 @shl32(i32 inreg %val0, i32 inreg %val1) {
+; CHECK-LABEL: shl32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_lshl_b32 s0, s0, s1
+; CHECK-NEXT:    s_cmp_lg_u32 s0, 0
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
+; CHECK-NEXT:    ; return to shader part epilog
+  %result = shl i32 %val0, %val1
+  %cmp = icmp ne i32 %result, 0
+  %zext = zext i1 %cmp to i32
+  ret i32 %zext
+}
+
+define amdgpu_ps i32 @shl64(i64 inreg %val0, i64 inreg %val1) {
+; CHECK-LABEL: shl64:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_lshl_b64 s[0:1], s[0:1], s2
+; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
+; CHECK-NEXT:    ; return to shader part epilog
+  %result = shl i64 %val0, %val1
+  %cmp = icmp ne i64 %result, 0
+  %zext = zext i1 %cmp to i32
+  ret i32 %zext
+}
+
+define amdgpu_ps i32 @lshr32(i32 inreg %val0, i32 inreg %val1) {
+; CHECK-LABEL: lshr32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_lshr_b32 s0, s0, s1
+; CHECK-NEXT:    s_cmp_lg_u32 s0, 0
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
+; CHECK-NEXT:    ; return to shader part epilog
+  %result = lshr i32 %val0, %val1
+  %cmp = icmp ne i32 %result, 0
+  %zext = zext i1 %cmp to i32
+  ret i32 %zext
+}
+
+define amdgpu_ps i32 @lshr64(i64 inreg %val0, i64 inreg %val1) {
+; CHECK-LABEL: lshr64:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_lshr_b64 s[0:1], s[0:1], s2
+; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
+; CHECK-NEXT:    ; return to shader part epilog
+  %result = lshr i64 %val0, %val1
+  %cmp = icmp ne i64 %result, 0
+  %zext = zext i1 %cmp to i32
+  ret i32 %zext
+}
+
+define amdgpu_ps i32 @and32(i32 inreg %val0, i32 inreg %val1) {
+; CHECK-LABEL: and32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_and_b32 s0, s0, s1
+; CHECK-NEXT:    s_cmp_lg_u32 s0, 0
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
+; CHECK-NEXT:    ; return to shader part epilog
+  %result = and i32 %val0, %val1
+  %cmp = icmp ne i32 %result, 0
+  %zext = zext i1 %cmp to i32
+  ret i32 %zext
+}
+
+define amdgpu_ps i32 @and64(i64 inreg %val0, i64 inreg %val1) {
+; CHECK-LABEL: and64:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_and_b64 s[0:1], s[0:1], s[2:3]
+; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
+; CHECK-NEXT:    ; return to shader part epilog
+  %result = and i64 %val0, %val1
+  %cmp = icmp ne i64 %result, 0
+  %zext = zext i1 %cmp to i32
+  ret i32 %zext
+}
+
+define amdgpu_ps i32 @or32(i32 inreg %val0, i32 inreg %val1) {
+; CHECK-LABEL: or32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_or_b32 s0, s0, s1
+; CHECK-NEXT:    s_cmp_lg_u32 s0, 0
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
+; CHECK-NEXT:    ; return to shader part epilog
+  %result = or i32 %val0, %val1
+  %cmp = icmp ne i32 %result, 0
+  %zext = zext i1 %cmp to i32
+  ret i32 %zext
+}
+
+define amdgpu_ps i32 @or64(i64 inreg %val0, i64 inreg %val1) {
+; CHECK-LABEL: or64:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_or_b64 s[0:1], s[0:1], s[2:3]
+; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
+; CHECK-NEXT:    ; return to shader part epilog
+  %result = or i64 %val0, %val1
+  %cmp = icmp ne i64 %result, 0
+  %zext = zext i1 %cmp to i32
+  ret i32 %zext
+}
+
+define amdgpu_ps i32 @xor32(i32 inreg %val0, i32 inreg %val1) {
+; CHECK-LABEL: xor32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_xor_b32 s0, s0, s1
+; CHECK-NEXT:    s_cmp_lg_u32 s0, 0
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
+; CHECK-NEXT:    ; return to shader part epilog
+  %result = xor i32 %val0, %val1
+  %cmp = icmp ne i32 %result, 0
+  %zext = zext i1 %cmp to i32
+  ret i32 %zext
+}
+
+define amdgpu_ps i32 @xor64(i64 inreg %val0, i64 inreg %val1) {
+; CHECK-LABEL: xor64:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
+; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
+; CHECK-NEXT:    ; return to shader part epilog
+  %result = xor i64 %val0, %val1
+  %cmp = icmp ne i64 %result, 0
+  %zext = zext i1 %cmp to i32
+  ret i32 %zext
+}
+
+define amdgpu_ps i32 @nand32(i32 inreg %val0, i32 inreg %val1, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: nand32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_nand_b32 s0, s0, s1
+; CHECK-NEXT:    s_cmp_lg_u32 s0, 0
+; CHECK-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    global_store_dword v[0:1], v2, off
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    ; return to shader part epilog
+  %result = and i32 %val0, %val1
+  %result2 = xor i32 %result, -1
+  store i32 %result2, ptr addrspace(1) %ptr
----------------
LU-JOHN wrote:

> Using VALU stores for tests for scalar handling is suspicious, are these just for adding an extra use?

The stores are done to ensure the compiler cannot optimize the operation into another instruction.


https://github.com/llvm/llvm-project/pull/162351


More information about the llvm-commits mailing list