[llvm] 9ffbc8a - AMDGPU: Add overflow operations to isBoolSGPR (#141803)

via llvm-commits llvm-commits at lists.llvm.org
Wed May 28 12:35:07 PDT 2025


Author: Matt Arsenault
Date: 2025-05-28T21:35:04+02:00
New Revision: 9ffbc8a4ce456c75d425dcfd5cf2d32181a5290c

URL: https://github.com/llvm/llvm-project/commit/9ffbc8a4ce456c75d425dcfd5cf2d32181a5290c
DIFF: https://github.com/llvm/llvm-project/commit/9ffbc8a4ce456c75d425dcfd5cf2d32181a5290c.diff

LOG: AMDGPU: Add overflow operations to isBoolSGPR (#141803)

The particular use in the test doesn't seem to do anything for
the expanded cases (i.e. the signed add/sub or multiplies).

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/test/CodeGen/AMDGPU/combine-and-sext-bool.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 70205195891e1..fca94725e9cb8 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -11908,6 +11908,13 @@ bool llvm::isBoolSGPR(SDValue V) {
   case ISD::OR:
   case ISD::XOR:
     return isBoolSGPR(V.getOperand(0)) && isBoolSGPR(V.getOperand(1));
+  case ISD::SADDO:
+  case ISD::UADDO:
+  case ISD::SSUBO:
+  case ISD::USUBO:
+  case ISD::SMULO:
+  case ISD::UMULO:
+    return V.getResNo() == 1;
   }
   return false;
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/combine-and-sext-bool.ll b/llvm/test/CodeGen/AMDGPU/combine-and-sext-bool.ll
index bdad6f40480d3..b98c81db5da99 100644
--- a/llvm/test/CodeGen/AMDGPU/combine-and-sext-bool.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine-and-sext-bool.ll
@@ -45,6 +45,95 @@ define i32 @and_sext_bool_fpclass(float %x, i32 %y) {
   ret i32 %and
 }
 
+; GCN-LABEL: {{^}}and_sext_bool_uadd_w_overflow:
+; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
+; GCN-NEXT: s_setpc_b64
+define i32 @and_sext_bool_uadd_w_overflow(i32 %x, i32 %y) {
+  %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
+  %carry = extractvalue { i32, i1 } %uadd, 1
+  %sext = sext i1 %carry to i32
+  %and = and i32 %sext, %y
+  ret i32 %and
+}
+
+; GCN-LABEL: {{^}}and_sext_bool_usub_w_overflow:
+; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
+; GCN-NEXT: s_setpc_b64
+define i32 @and_sext_bool_usub_w_overflow(i32 %x, i32 %y) {
+  %uadd = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %x, i32 %y)
+  %carry = extractvalue { i32, i1 } %uadd, 1
+  %sext = sext i1 %carry to i32
+  %and = and i32 %sext, %y
+  ret i32 %and
+}
+
+; GCN-LABEL: {{^}}and_sext_bool_sadd_w_overflow:
+; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1
+; GCN-NEXT: v_add_i32_e64 v2, s[4:5], v0, v1
+; GCN-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0
+; GCN-NEXT: s_xor_b64 vcc, vcc, s[4:5]
+; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
+; GCN-NEXT: s_setpc_b64
+define i32 @and_sext_bool_sadd_w_overflow(i32 %x, i32 %y) {
+  %uadd = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %x, i32 %y)
+  %carry = extractvalue { i32, i1 } %uadd, 1
+  %sext = sext i1 %carry to i32
+  %and = and i32 %sext, %y
+  ret i32 %and
+}
+
+; GCN-LABEL: {{^}}and_sext_bool_ssub_w_overflow:
+; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1
+; GCN-NEXT: v_add_i32_e64 v2, s[4:5], v0, v1
+; GCN-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0
+; GCN-NEXT: s_xor_b64 vcc, vcc, s[4:5]
+; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
+; GCN-NEXT: s_setpc_b64
+define i32 @and_sext_bool_ssub_w_overflow(i32 %x, i32 %y) {
+  %uadd = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %x, i32 %y)
+  %carry = extractvalue { i32, i1 } %uadd, 1
+  %sext = sext i1 %carry to i32
+  %and = and i32 %sext, %y
+  ret i32 %and
+}
+
+; GCN-LABEL: {{^}}and_sext_bool_smul_w_overflow:
+; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_hi_i32 v2, v0, v1
+; GCN-NEXT: v_mul_lo_u32 v0, v0, v1
+; GCN-NEXT: v_ashrrev_i32_e32 v0, 31, v0
+; GCN-NEXT: v_cmp_ne_u32_e32 vcc, v2, v0
+; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
+; GCN-NEXT: s_setpc_b64
+define i32 @and_sext_bool_smul_w_overflow(i32 %x, i32 %y) {
+  %uadd = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %x, i32 %y)
+  %carry = extractvalue { i32, i1 } %uadd, 1
+  %sext = sext i1 %carry to i32
+  %and = and i32 %sext, %y
+  ret i32 %and
+}
+
+; GCN-LABEL: {{^}}and_sext_bool_umul_w_overflow:
+; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_hi_u32 v0, v0, v1
+; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
+; GCN-NEXT: s_setpc_b64
+define i32 @and_sext_bool_umul_w_overflow(i32 %x, i32 %y) {
+  %uadd = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+  %carry = extractvalue { i32, i1 } %uadd, 1
+  %sext = sext i1 %carry to i32
+  %and = and i32 %sext, %y
+  ret i32 %and
+}
+
+
 declare i32 @llvm.amdgcn.workitem.id.x() #0
 
 declare i32 @llvm.amdgcn.workitem.id.y() #0


        


More information about the llvm-commits mailing list