[llvm] Remove the uaddo-only specification (PR #160392)

via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 23 13:50:52 PDT 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-x86

Author: AZero13 (AZero13)

<details>
<summary>Changes</summary>



---
Full diff: https://github.com/llvm/llvm-project/pull/160392.diff


4 Files Affected:

- (modified) llvm/include/llvm/CodeGen/TargetLowering.h (+3-5) 
- (modified) llvm/lib/CodeGen/CodeGenPrepare.cpp (+6) 
- (modified) llvm/lib/Target/X86/X86ISelLowering.cpp (+2-4) 
- (modified) llvm/test/CodeGen/X86/usub_inc_iv.ll (+5-5) 


``````````diff
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 4c2d991308d30..7ec876cd11d9d 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -3455,16 +3455,14 @@ class LLVM_ABI TargetLoweringBase {
   /// matching of other patterns.
   virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
                                     bool MathUsed) const {
-    // TODO: The default logic is inherited from code in CodeGenPrepare.
-    // The opcode should not make a difference by default?
-    if (Opcode != ISD::UADDO)
-      return false;
-
     // Allow the transform as long as we have an integer type that is not
     // obviously illegal and unsupported and if the math result is used
     // besides the overflow check. On some targets (e.g. SPARC), it is
     // not profitable to form on overflow op if the math result has no
     // concrete users.
+    if (MathUsed && isOperationLegal(Opcode, VT))
+      return true;
+
     if (VT.isVector())
       return false;
     return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT));
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index d290f202f3cca..eb73d01b3558c 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -1749,6 +1749,12 @@ bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp,
                                  Sub->hasNUsesOrMore(1)))
     return false;
 
+  // We don't want to move around uses of condition values this late, so we
+  // check if it is legal to create the call to the intrinsic in the basic
+  // block containing the icmp.
+  if (Sub->getParent() != Cmp->getParent() && !Sub->hasOneUse())
+    return false;
+
   if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1),
                                    Cmp, Intrinsic::usub_with_overflow))
     return false;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2feb76e0eb7b4..39a3fa98a8579 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -3405,10 +3405,8 @@ bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
 
 bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT,
                                              bool) const {
-  // TODO: Allow vectors?
-  if (VT.isVector())
-    return false;
-  return VT.isSimple() || !isOperationExpand(Opcode, VT);
+
+  return TargetLowering::shouldFormOverflowOp(Opcode, VT, true);
 }
 
 bool X86TargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
diff --git a/llvm/test/CodeGen/X86/usub_inc_iv.ll b/llvm/test/CodeGen/X86/usub_inc_iv.ll
index 88bfddb51f2d4..ff06aaabd1b0c 100644
--- a/llvm/test/CodeGen/X86/usub_inc_iv.ll
+++ b/llvm/test/CodeGen/X86/usub_inc_iv.ll
@@ -303,14 +303,14 @@ define i32 @test_06(ptr %p, i64 %len, i32 %x) {
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[MATH:%.*]], [[BACKEDGE:%.*]] ], [ [[LEN:%.*]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    [[TMP0:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[IV]], i64 1)
-; CHECK-NEXT:    [[MATH]] = extractvalue { i64, i1 } [[TMP0]], 0
-; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
+; CHECK-NEXT:    [[OV:%.*]] = icmp eq i64 [[IV]], 0
 ; CHECK-NEXT:    br i1 [[OV]], label [[EXIT:%.*]], label [[BACKEDGE]]
 ; CHECK:       backedge:
-; CHECK-NEXT:    [[SUNKADDR:%.*]] = mul i64 [[MATH]], 4
+; CHECK-NEXT:    [[SUNKADDR:%.*]] = mul i64 [[IV]], 4
 ; CHECK-NEXT:    [[SUNKADDR1:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[SUNKADDR]]
-; CHECK-NEXT:    [[LOADED:%.*]] = load atomic i32, ptr [[SUNKADDR1]] unordered, align 4
+; CHECK-NEXT:    [[SUNKADDR2:%.*]] = getelementptr i8, ptr [[SUNKADDR1]], i64 -4
+; CHECK-NEXT:    [[LOADED:%.*]] = load atomic i32, ptr [[SUNKADDR2]] unordered, align 4
+; CHECK-NEXT:    [[MATH]] = add i64 [[IV]], -1
 ; CHECK-NEXT:    [[COND_2:%.*]] = icmp eq i32 [[LOADED]], [[X:%.*]]
 ; CHECK-NEXT:    br i1 [[COND_2]], label [[FAILURE:%.*]], label [[LOOP]]
 ; CHECK:       exit:

``````````

</details>


https://github.com/llvm/llvm-project/pull/160392


More information about the llvm-commits mailing list