[llvm] [X86] Optimized ADD + ADC to ADC (PR #173543)

via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 25 03:15:10 PST 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-x86

Author: None (JaydeepChauhan14)

<details>
<summary>Changes</summary>

- This PR fixing the https://github.com/llvm/llvm-project/issues/173408 issue.

---
Full diff: https://github.com/llvm/llvm-project/pull/173543.diff


2 Files Affected:

- (modified) llvm/lib/Target/X86/X86ISelLowering.cpp (+7) 
- (modified) llvm/test/CodeGen/X86/combine-adc.ll (+30) 


``````````diff
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 811ffb090d751..fd6e7da196d2f 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -58164,6 +58164,13 @@ static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
     }
   }
 
+  // Fold ADD(ADC(Y,0,CF), C) -> ADC(Y, C, CF)
+  if (!IsSub && LHS.getOpcode() == X86ISD::ADC &&
+      X86::isZeroNode(LHS.getOperand(1)) && isa<ConstantSDNode>(RHS)) {
+    return DAG.getNode(X86ISD::ADC, DL, N->getVTList(), LHS.getOperand(0), RHS,
+                       LHS.getOperand(2));
+  }
+
   // TODO: Can we drop the ZeroSecondOpOnly limit? This is to guarantee that the
   // EFLAGS result doesn't change.
   return combineAddOrSubToADCOrSBB(IsSub, DL, VT, LHS, RHS, DAG,
diff --git a/llvm/test/CodeGen/X86/combine-adc.ll b/llvm/test/CodeGen/X86/combine-adc.ll
index a2aaea31aa6ff..03e60c10cec4b 100644
--- a/llvm/test/CodeGen/X86/combine-adc.ll
+++ b/llvm/test/CodeGen/X86/combine-adc.ll
@@ -136,5 +136,35 @@ define i32 @adc_merge_sub(i32 %a0) nounwind {
   ret i32 %result
 }
 
+define i32 @optimize_adc(i32 %0, i32 %1, i32 %2) {
+; X86-LABEL: optimize_adc:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl %eax, %ecx
+; X86-NEXT:    adcl $42, %edx
+; X86-NEXT:    js .LBB4_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB4_2:
+; X86-NEXT:    retl
+;
+; X64-LABEL: optimize_adc:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    cmpl %esi, %edi
+; X64-NEXT:    adcl $42, %edx
+; X64-NEXT:    cmovsl %esi, %eax
+; X64-NEXT:    retq
+  %4 = icmp ult i32 %0, %1
+  %5 = zext i1 %4 to i32
+  %6 = add i32 %2, 42
+  %7 = add i32 %6, %5
+  %8 = icmp slt i32 %7, 0
+  %9 = select i1 %8, i32 %1, i32 %0
+  ret i32 %9
+}
+
 declare { i8, i32 } @llvm.x86.addcarry.32(i8, i32, i32)
 declare void @use(i8)

``````````

</details>


https://github.com/llvm/llvm-project/pull/173543


More information about the llvm-commits mailing list