[llvm] 7d14733 - [AMDGPU] Generate s_absdiff_i32 (#164835)

via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 27 12:41:00 PDT 2025


Author: LU-JOHN
Date: 2025-10-27T14:40:56-05:00
New Revision: 7d14733c12d909e1bdd1499c9557e78565aca4ae

URL: https://github.com/llvm/llvm-project/commit/7d14733c12d909e1bdd1499c9557e78565aca4ae
DIFF: https://github.com/llvm/llvm-project/commit/7d14733c12d909e1bdd1499c9557e78565aca4ae.diff

LOG: [AMDGPU] Generate s_absdiff_i32 (#164835)

Generate s_absdiff_i32. Tested in absdiff.ll. Also update s_cmp_0.ll to
test that s_absdiff_i32 is foldable with a s_cmp_lg_u32 sX, 0.

---------

Signed-off-by: John Lu <John.Lu at amd.com>

Added: 
    llvm/test/CodeGen/AMDGPU/absdiff.ll

Modified: 
    llvm/lib/Target/AMDGPU/SOPInstructions.td
    llvm/test/CodeGen/AMDGPU/s_cmp_0.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index 84287b621fe78..1931e0be15152 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -838,9 +838,10 @@ def S_CBRANCH_G_FORK : SOP2_Pseudo <
   let SubtargetPredicate = isGFX6GFX7GFX8GFX9;
 }
 
-let Defs = [SCC] in {
-def S_ABSDIFF_I32 : SOP2_32 <"s_abs
diff _i32">;
-} // End Defs = [SCC]
+let isCommutable = 1, Defs = [SCC] in
+def S_ABSDIFF_I32 : SOP2_32 <"s_abs
diff _i32",
+  [(set i32:$sdst, (UniformUnaryFrag<abs> (sub_oneuse i32:$src0, i32:$src1)))]
+>;
 
 let SubtargetPredicate = isGFX8GFX9 in {
   def S_RFE_RESTORE_B64 : SOP2_Pseudo <

diff  --git a/llvm/test/CodeGen/AMDGPU/abs
diff .ll b/llvm/test/CodeGen/AMDGPU/abs
diff .ll
new file mode 100644
index 0000000000000..9cb397fb9d1c6
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/abs
diff .ll
@@ -0,0 +1,104 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck %s
+
+define amdgpu_ps i16 @abs
diff _i16_false(i16 inreg %arg0, i16 inreg %arg1) {
+; CHECK-LABEL: abs
diff _i16_false:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_sub_i32 s0, s0, s1
+; CHECK-NEXT:    s_sext_i32_i16 s1, s0
+; CHECK-NEXT:    s_sub_i32 s0, 0, s0
+; CHECK-NEXT:    s_sext_i32_i16 s0, s0
+; CHECK-NEXT:    s_max_i32 s0, s1, s0
+; CHECK-NEXT:    ; return to shader part epilog
+  %
diff  = sub i16 %arg0, %arg1
+  %res = call i16 @llvm.abs.i16(i16 %
diff , i1 false) ; INT_MIN input returns INT_MIN
+  ret i16 %res
+}
+
+define amdgpu_ps i16 @abs
diff _i16_true(i16 inreg %arg0, i16 inreg %arg1) {
+; CHECK-LABEL: abs
diff _i16_true:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_sub_i32 s0, s0, s1
+; CHECK-NEXT:    s_sext_i32_i16 s1, s0
+; CHECK-NEXT:    s_sub_i32 s0, 0, s0
+; CHECK-NEXT:    s_sext_i32_i16 s0, s0
+; CHECK-NEXT:    s_max_i32 s0, s1, s0
+; CHECK-NEXT:    ; return to shader part epilog
+  %
diff  = sub i16 %arg0, %arg1
+  %res = call i16 @llvm.abs.i16(i16 %
diff , i1 true) ; INT_MIN input returns poison
+  ret i16 %res
+}
+
+define amdgpu_ps i32 @abs
diff _i32_false(i32 inreg %arg0, i32 inreg %arg1) {
+; CHECK-LABEL: abs
diff _i32_false:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_abs
diff _i32 s0, s0, s1
+; CHECK-NEXT:    ; return to shader part epilog
+  %
diff  = sub i32 %arg0, %arg1
+  %res = call i32 @llvm.abs.i32(i32 %
diff , i1 false) ; INT_MIN input returns INT_MIN
+  ret i32 %res
+}
+
+define amdgpu_ps i32 @abs
diff _i32_true(i32 inreg %arg0, i32 inreg %arg1) {
+; CHECK-LABEL: abs
diff _i32_true:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_abs
diff _i32 s0, s0, s1
+; CHECK-NEXT:    ; return to shader part epilog
+  %
diff  = sub i32 %arg0, %arg1
+  %res = call i32 @llvm.abs.i32(i32 %
diff , i1 true) ; INT_MIN input returns poison
+  ret i32 %res
+}
+
+; Multiple uses of %
diff .  No benefit for using s_abs
diff _i32.
+define amdgpu_ps i32 @abs
diff _i32_false_multi_use(i32 inreg %arg0, i32 inreg %arg1) {
+; CHECK-LABEL: abs
diff _i32_false_multi_use:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_sub_i32 s1, s0, s1
+; CHECK-NEXT:    s_abs_i32 s0, s1
+; CHECK-NEXT:    ;;#ASMSTART
+; CHECK-NEXT:    ; use s1
+; CHECK-NEXT:    ;;#ASMEND
+; CHECK-NEXT:    ; return to shader part epilog
+  %
diff  = sub i32 %arg0, %arg1
+  %res = call i32 @llvm.abs.i32(i32 %
diff , i1 false) ; INT_MIN input returns INT_MIN
+  call void asm "; use $0", "s"(i32 %
diff )
+  ret i32 %res
+}
+
+define <2 x i32> @abs
diff _2xi32_false(<2 x i32> %arg0, <2 x i32> %arg1) {
+; CHECK-LABEL: abs
diff _2xi32_false:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_sub_u32_e32 v0, v0, v2
+; CHECK-NEXT:    v_sub_u32_e32 v1, v1, v3
+; CHECK-NEXT:    v_sub_u32_e32 v2, 0, v0
+; CHECK-NEXT:    v_max_i32_e32 v0, v2, v0
+; CHECK-NEXT:    v_sub_u32_e32 v2, 0, v1
+; CHECK-NEXT:    v_max_i32_e32 v1, v2, v1
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %
diff  = sub <2 x i32> %arg0, %arg1
+  %res = call <2 x i32> @llvm.abs.v2i32(<2 x i32> %
diff , i1 false) ; INT_MIN input returns INT_MIN
+  ret <2 x i32> %res
+}
+
+define <4 x i32> @abs
diff _4xi32_false(<4 x i32> %arg0, <4 x i32> %arg1) {
+; CHECK-LABEL: abs
diff _4xi32_false:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_sub_u32_e32 v0, v0, v4
+; CHECK-NEXT:    v_sub_u32_e32 v1, v1, v5
+; CHECK-NEXT:    v_sub_u32_e32 v4, 0, v0
+; CHECK-NEXT:    v_sub_u32_e32 v2, v2, v6
+; CHECK-NEXT:    v_max_i32_e32 v0, v4, v0
+; CHECK-NEXT:    v_sub_u32_e32 v4, 0, v1
+; CHECK-NEXT:    v_sub_u32_e32 v3, v3, v7
+; CHECK-NEXT:    v_max_i32_e32 v1, v4, v1
+; CHECK-NEXT:    v_sub_u32_e32 v4, 0, v2
+; CHECK-NEXT:    v_max_i32_e32 v2, v4, v2
+; CHECK-NEXT:    v_sub_u32_e32 v4, 0, v3
+; CHECK-NEXT:    v_max_i32_e32 v3, v4, v3
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %
diff  = sub <4 x i32> %arg0, %arg1
+  %res = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %
diff , i1 false) ; INT_MIN input returns INT_MIN
+  ret <4 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/s_cmp_0.ll b/llvm/test/CodeGen/AMDGPU/s_cmp_0.ll
index dd5f838b4a206..0166d7ac7ddc2 100644
--- a/llvm/test/CodeGen/AMDGPU/s_cmp_0.ll
+++ b/llvm/test/CodeGen/AMDGPU/s_cmp_0.ll
@@ -110,6 +110,21 @@ define amdgpu_ps i32 @abs32(i32 inreg %val0) {
   ret i32 %zext
 }
 
+define amdgpu_ps i32 @abs
diff 32(i32 inreg %val0, i32 inreg %val1) {
+; CHECK-LABEL: abs
diff 32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_abs
diff _i32 s0, s0, s1
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
+; CHECK-NEXT:    ; return to shader part epilog
+  %
diff  = sub i32 %val0, %val1
+  %result = call i32 @llvm.abs.i32(i32 %
diff , i1 false)
+  %cmp = icmp ne i32 %result, 0
+  %zext = zext i1 %cmp to i32
+  ret i32 %zext
+}
+
 define amdgpu_ps i32 @and32(i32 inreg %val0, i32 inreg %val1) {
 ; CHECK-LABEL: and32:
 ; CHECK:       ; %bb.0:
@@ -608,14 +623,14 @@ define amdgpu_ps i32 @si_pc_add_rel_offset_must_not_optimize() {
 ; CHECK-NEXT:    s_add_u32 s0, s0, __unnamed_1 at rel32@lo+4
 ; CHECK-NEXT:    s_addc_u32 s1, s1, __unnamed_1 at rel32@hi+12
 ; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; CHECK-NEXT:    s_cbranch_scc0 .LBB35_2
+; CHECK-NEXT:    s_cbranch_scc0 .LBB36_2
 ; CHECK-NEXT:  ; %bb.1: ; %endif
 ; CHECK-NEXT:    s_mov_b32 s0, 1
-; CHECK-NEXT:    s_branch .LBB35_3
-; CHECK-NEXT:  .LBB35_2: ; %if
+; CHECK-NEXT:    s_branch .LBB36_3
+; CHECK-NEXT:  .LBB36_2: ; %if
 ; CHECK-NEXT:    s_mov_b32 s0, 0
-; CHECK-NEXT:    s_branch .LBB35_3
-; CHECK-NEXT:  .LBB35_3:
+; CHECK-NEXT:    s_branch .LBB36_3
+; CHECK-NEXT:  .LBB36_3:
   %cmp = icmp ne ptr addrspace(4) @1, null
   br i1 %cmp, label %endif, label %if
 


        


More information about the llvm-commits mailing list