[llvm] [AArch64] Support commuted operands in performFlagSettingCombine (PR #162496)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Oct 8 08:13:58 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-aarch64
Author: Cullen Rhodes (c-rhodes)
<details>
<summary>Changes</summary>
Fixes #<!-- -->122624.
Assisted-by: gpt-5-codex
---
Full diff: https://github.com/llvm/llvm-project/pull/162496.diff
3 Files Affected:
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+15-3)
- (modified) llvm/test/CodeGen/AArch64/adds_cmn.ll (+2-4)
- (modified) llvm/test/CodeGen/AArch64/sat-add.ll (+2-4)
``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index dc8e7c84f5e2c..b8d81291a85d8 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -26193,9 +26193,21 @@ static SDValue performFlagSettingCombine(SDNode *N,
}
// Combine identical generic nodes into this node, re-using the result.
- if (SDNode *Generic = DCI.DAG.getNodeIfExists(
- GenericOpcode, DCI.DAG.getVTList(VT), {LHS, RHS}))
- DCI.CombineTo(Generic, SDValue(N, 0));
+ auto CombineWithExistingGeneric = [&](SDValue Op0, SDValue Op1) {
+ if (SDNode *Generic = DCI.DAG.getNodeIfExists(
+ GenericOpcode, DCI.DAG.getVTList(VT), {Op0, Op1})) {
+ DCI.CombineTo(Generic, SDValue(N, 0));
+ return true;
+ }
+ return false;
+ };
+
+ if (CombineWithExistingGeneric(LHS, RHS))
+ return SDValue();
+
+ if (DCI.DAG.getTargetLoweringInfo().isCommutativeBinOp(GenericOpcode) &&
+ CombineWithExistingGeneric(RHS, LHS))
+ return SDValue();
return SDValue();
}
diff --git a/llvm/test/CodeGen/AArch64/adds_cmn.ll b/llvm/test/CodeGen/AArch64/adds_cmn.ll
index aa070b7886ba5..9b456a5419d61 100644
--- a/llvm/test/CodeGen/AArch64/adds_cmn.ll
+++ b/llvm/test/CodeGen/AArch64/adds_cmn.ll
@@ -22,10 +22,8 @@ entry:
define { i32, i32 } @adds_cmn_c(i32 noundef %x, i32 noundef %y) {
; CHECK-LABEL: adds_cmn_c:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn w0, w1
-; CHECK-NEXT: add w1, w1, w0
-; CHECK-NEXT: cset w8, lo
-; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: adds w1, w0, w1
+; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
entry:
%0 = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
diff --git a/llvm/test/CodeGen/AArch64/sat-add.ll b/llvm/test/CodeGen/AArch64/sat-add.ll
index ecd48d6b7c65b..149b4c4fd26c9 100644
--- a/llvm/test/CodeGen/AArch64/sat-add.ll
+++ b/llvm/test/CodeGen/AArch64/sat-add.ll
@@ -290,8 +290,7 @@ define i32 @unsigned_sat_variable_i32_using_cmp_sum(i32 %x, i32 %y) {
define i32 @unsigned_sat_variable_i32_using_cmp_notval(i32 %x, i32 %y) {
; CHECK-LABEL: unsigned_sat_variable_i32_using_cmp_notval:
; CHECK: // %bb.0:
-; CHECK-NEXT: add w8, w0, w1
-; CHECK-NEXT: cmn w1, w0
+; CHECK-NEXT: adds w8, w1, w0
; CHECK-NEXT: csinv w0, w8, wzr, lo
; CHECK-NEXT: ret
%noty = xor i32 %y, -1
@@ -331,8 +330,7 @@ define i64 @unsigned_sat_variable_i64_using_cmp_sum(i64 %x, i64 %y) {
define i64 @unsigned_sat_variable_i64_using_cmp_notval(i64 %x, i64 %y) {
; CHECK-LABEL: unsigned_sat_variable_i64_using_cmp_notval:
; CHECK: // %bb.0:
-; CHECK-NEXT: add x8, x0, x1
-; CHECK-NEXT: cmn x1, x0
+; CHECK-NEXT: adds x8, x1, x0
; CHECK-NEXT: csinv x0, x8, xzr, lo
; CHECK-NEXT: ret
%noty = xor i64 %y, -1
``````````
</details>
https://github.com/llvm/llvm-project/pull/162496
More information about the llvm-commits
mailing list