[PATCH] D138993: [AMDGPU] Use s_cmp instead of s_cmpk

Jay Foad via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 30 10:04:08 PST 2022


This revision was automatically updated to reflect the committed changes.
Closed by commit rG3d9e226081cf: [AMDGPU] Use s_cmp instead of s_cmpk (authored by foad).

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D138993/new/

https://reviews.llvm.org/D138993

Files:
  llvm/lib/Target/AMDGPU/SIISelLowering.cpp
  llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
  llvm/test/CodeGen/AMDGPU/udiv.ll


Index: llvm/test/CodeGen/AMDGPU/udiv.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/udiv.ll
+++ llvm/test/CodeGen/AMDGPU/udiv.ll
@@ -2807,7 +2807,7 @@
 ; GFX1030-NEXT:    s_add_u32 s4, 0x4237, s4
 ; GFX1030-NEXT:    s_addc_u32 s5, 0, 0
 ; GFX1030-NEXT:    v_add_co_u32 v2, s4, 0xa9000000, s4
-; GFX1030-NEXT:    s_cmpk_lg_u32 s4, 0x0
+; GFX1030-NEXT:    s_cmp_lg_u32 s4, 0
 ; GFX1030-NEXT:    s_addc_u32 s5, s5, 0xa7c5
 ; GFX1030-NEXT:    v_readfirstlane_b32 s4, v2
 ; GFX1030-NEXT:    s_mul_i32 s6, s5, 0xfffe7960
@@ -2830,7 +2830,7 @@
 ; GFX1030-NEXT:    s_add_u32 s4, s4, s7
 ; GFX1030-NEXT:    s_addc_u32 s6, 0, s6
 ; GFX1030-NEXT:    v_add_co_u32 v4, s4, v2, s4
-; GFX1030-NEXT:    s_cmpk_lg_u32 s4, 0x0
+; GFX1030-NEXT:    s_cmp_lg_u32 s4, 0
 ; GFX1030-NEXT:    s_addc_u32 s4, s5, s6
 ; GFX1030-NEXT:    v_mul_hi_u32 v8, v0, v4
 ; GFX1030-NEXT:    v_mad_u64_u32 v[2:3], null, v0, s4, 0
Index: llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
===================================================================
--- llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
+++ llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
@@ -50,7 +50,7 @@
 ; GFX10-NEXT:    s_load_dword s4, s[4:5], 0x0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    v_add_co_u32 v0, s5, s4, s4
-; GFX10-NEXT:    s_cmpk_lg_u32 s5, 0x0
+; GFX10-NEXT:    s_cmp_lg_u32 s5, 0
 ; GFX10-NEXT:    s_addc_u32 s5, s4, 0
 ; GFX10-NEXT:    s_cselect_b32 s6, -1, 0
 ; GFX10-NEXT:    s_and_b32 s6, s6, exec_lo
@@ -69,7 +69,7 @@
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    v_add_co_u32 v0, s1, s0, s0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    s_cmpk_lg_u32 s1, 0x0
+; GFX11-NEXT:    s_cmp_lg_u32 s1, 0
 ; GFX11-NEXT:    s_addc_u32 s1, s0, 0
 ; GFX11-NEXT:    s_cselect_b32 s2, -1, 0
 ; GFX11-NEXT:    s_and_b32 s2, s2, exec_lo
@@ -154,7 +154,7 @@
 ; GFX10-NEXT:    s_cmp_lt_u32 s1, s0
 ; GFX10-NEXT:    s_cselect_b32 s1, -1, 0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s1
-; GFX10-NEXT:    s_cmpk_lg_u32 s1, 0x0
+; GFX10-NEXT:    s_cmp_lg_u32 s1, 0
 ; GFX10-NEXT:    s_addc_u32 s0, s0, 0
 ; GFX10-NEXT:    v_cmp_ge_u32_e32 vcc_lo, s0, v0
 ; GFX10-NEXT:    s_cbranch_vccnz .LBB1_2
@@ -181,7 +181,7 @@
 ; GFX11-NEXT:    s_cmp_lt_u32 s1, s0
 ; GFX11-NEXT:    s_cselect_b32 s1, -1, 0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s1
-; GFX11-NEXT:    s_cmpk_lg_u32 s1, 0x0
+; GFX11-NEXT:    s_cmp_lg_u32 s1, 0
 ; GFX11-NEXT:    s_addc_u32 s0, s0, 0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
 ; GFX11-NEXT:    v_cmp_ge_u32_e32 vcc_lo, s0, v0
Index: llvm/lib/Target/AMDGPU/SIISelLowering.cpp
===================================================================
--- llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -4202,7 +4202,7 @@
             .addImm(0);
       }
     } else {
-      BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMPK_LG_U32))
+      BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
           .addReg(Src2.getReg())
           .addImm(0);
     }


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D138993.479004.patch
Type: text/x-patch
Size: 3182 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20221130/6d4339e7/attachment.bin>


More information about the llvm-commits mailing list