[llvm] 1f58cbe - [DAG] Fold (umin (sub a b) a) -> (usubo a b); (select usubo.1 a usubo.0) (#161651)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Nov 12 06:38:25 PST 2025
Author: Chaitanya Koparkar
Date: 2025-11-12T14:38:21Z
New Revision: 1f58cbe60a01d4651cec3ada480ab8b63586b680
URL: https://github.com/llvm/llvm-project/commit/1f58cbe60a01d4651cec3ada480ab8b63586b680
DIFF: https://github.com/llvm/llvm-project/commit/1f58cbe60a01d4651cec3ada480ab8b63586b680.diff
LOG: [DAG] Fold (umin (sub a b) a) -> (usubo a b); (select usubo.1 a usubo.0) (#161651)
Fixes #161036.
Added:
llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll
llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll
Modified:
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index df353c4d91b1a..d9d3a3ec01757 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6219,6 +6219,25 @@ SDValue DAGCombiner::visitIMINMAX(SDNode *N) {
SDLoc(N), VT, N0, N1))
return SD;
+ if (TLI.isOperationLegalOrCustom(ISD::USUBO, VT) &&
+ !TLI.isOperationLegalOrCustom(ISD::UMIN, VT)) {
+ SDValue B;
+
+ // (umin (sub a, b), a) -> (usubo a, b); (select usubo.1, a, usubo.0)
+ if (sd_match(N0, m_Sub(m_Specific(N1), m_Value(B)))) {
+ SDVTList VTs = DAG.getVTList(VT, getSetCCResultType(VT));
+ SDValue USO = DAG.getNode(ISD::USUBO, DL, VTs, N1, B);
+ return DAG.getSelect(DL, VT, USO.getValue(1), N1, USO.getValue(0));
+ }
+
+ // (umin a, (sub a, b)) -> (usubo a, b); (select usubo.1, a, usubo.0)
+ if (sd_match(N1, m_Sub(m_Specific(N0), m_Value(B)))) {
+ SDVTList VTs = DAG.getVTList(VT, getSetCCResultType(VT));
+ SDValue USO = DAG.getNode(ISD::USUBO, DL, VTs, N0, B);
+ return DAG.getSelect(DL, VT, USO.getValue(1), N0, USO.getValue(0));
+ }
+ }
+
// Simplify the operands using demanded-bits information.
if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
diff --git a/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll b/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll
new file mode 100644
index 0000000000000..fe3eee06db65e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/umin-sub-to-usubo-select-combine.ll
@@ -0,0 +1,151 @@
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+
+; GitHub issue #161036
+
+; Positive test : umin(sub(a,b),a) with scalar types should be folded
+define i64 @underflow_compare_fold_i64(i64 %a, i64 %b) {
+; CHECK-LABEL: underflow_compare_fold_i64
+; CHECK-LABEL: %bb.0:
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: csel x0, x0, x8, lo
+; CHECK-NEXT: ret
+ %sub = sub i64 %a, %b
+ %cond = tail call i64 @llvm.umin.i64(i64 %sub, i64 %a)
+ ret i64 %cond
+}
+
+; Positive test : umin(a,sub(a,b)) with scalar types should be folded
+define i64 @underflow_compare_fold_i64_commute(i64 %a, i64 %b) {
+; CHECK-LABEL: underflow_compare_fold_i64_commute
+; CHECK-LABEL: %bb.0:
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: csel x0, x0, x8, lo
+; CHECK-NEXT: ret
+ %sub = sub i64 %a, %b
+ %cond = tail call i64 @llvm.umin.i64(i64 %a, i64 %sub)
+ ret i64 %cond
+}
+
+; Positive test : multi-use is OK since the sub instruction still runs once
+define i64 @underflow_compare_fold_i64_multi_use(i64 %a, i64 %b, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: underflow_compare_fold_i64_multi_use
+; CHECK-LABEL: %bb.0:
+; CHECK-NEXT: subs x8, x0, x1
+; CHECK-NEXT: csel x0, x0, x8, lo
+; CHECK-NEXT: str x8, [x2]
+; CHECK-NEXT: ret
+ %sub = sub i64 %a, %b
+ store i64 %sub, ptr addrspace(1) %ptr
+ %cond = call i64 @llvm.umin.i64(i64 %sub, i64 %a)
+ ret i64 %cond
+}
+
+; Positive test : i32
+define i32 @underflow_compare_fold_i32(i32 %a, i32 %b) {
+; CHECK-LABEL: underflow_compare_fold_i32
+; CHECK-LABEL: %bb.0:
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: csel w0, w0, w8, lo
+; CHECK-NEXT: ret
+ %sub = sub i32 %a, %b
+ %cond = tail call i32 @llvm.umin.i32(i32 %sub, i32 %a)
+ ret i32 %cond
+}
+
+; Positive test : i32
+define i32 @underflow_compare_fold_i32_commute(i32 %a, i32 %b) {
+; CHECK-LABEL: underflow_compare_fold_i32_commute
+; CHECK-LABEL: %bb.0:
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: csel w0, w0, w8, lo
+; CHECK-NEXT: ret
+ %sub = sub i32 %a, %b
+ %cond = tail call i32 @llvm.umin.i32(i32 %a, i32 %sub)
+ ret i32 %cond
+}
+
+; Positive test : i32
+define i32 @underflow_compare_fold_i32_multi_use(i32 %a, i32 %b, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: underflow_compare_fold_i32_multi_use
+; CHECK-LABEL: %bb.0:
+; CHECK-NEXT: subs w8, w0, w1
+; CHECK-NEXT: csel w0, w0, w8, lo
+; CHECK-NEXT: str w8, [x2]
+; CHECK-NEXT: ret
+ %sub = sub i32 %a, %b
+ store i32 %sub, ptr addrspace(1) %ptr
+ %cond = call i32 @llvm.umin.i32(i32 %sub, i32 %a)
+ ret i32 %cond
+}
+
+; Negative test : i16
+define i16 @underflow_compare_fold_i16(i16 %a, i16 %b) {
+; CHECK-LABEL: underflow_compare_fold_i16
+; CHECK-LABEL: %bb.0:
+; CHECK-LABEL: sub w8, w0, w1
+; CHECK-LABEL: and w9, w0, #0xffff
+; CHECK-LABEL: and w8, w8, #0xffff
+; CHECK-LABEL: cmp w8, w9
+; CHECK-LABEL: csel w0, w8, w9, lo
+; CHECK-LABEL: ret
+ %sub = sub i16 %a, %b
+ %cond = tail call i16 @llvm.umin.i16(i16 %sub, i16 %a)
+ ret i16 %cond
+}
+
+; Negative test : i16
+define i16 @underflow_compare_fold_i16_commute(i16 %a, i16 %b) {
+; CHECK-LABEL: underflow_compare_fold_i16_commute
+; CHECK-LABEL: %bb.0:
+; CHECK-LABEL: sub w8, w0, w1
+; CHECK-LABEL: and w9, w0, #0xffff
+; CHECK-LABEL: and w8, w8, #0xffff
+; CHECK-LABEL: cmp w9, w8
+; CHECK-LABEL: csel w0, w9, w8, lo
+; CHECK-LABEL: ret
+ %sub = sub i16 %a, %b
+ %cond = tail call i16 @llvm.umin.i16(i16 %a, i16 %sub)
+ ret i16 %cond
+}
+
+; Negative test : i16
+define i16 @underflow_compare_fold_i16_multi_use(i16 %a, i16 %b, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: underflow_compare_fold_i16_multi_use
+; CHECK-LABEL: %bb.0:
+; CHECK-LABEL: sub w8, w0, w1
+; CHECK-LABEL: and w9, w0, #0xffff
+; CHECK-LABEL: and w10, w8, #0xffff
+; CHECK-LABEL: strh w8, [x2]
+; CHECK-LABEL: cmp w10, w9
+; CHECK-LABEL: csel w0, w10, w9, lo
+; CHECK-LABEL: ret
+ %sub = sub i16 %a, %b
+ store i16 %sub, ptr addrspace(1) %ptr
+ %cond = call i16 @llvm.umin.i16(i16 %sub, i16 %a)
+ ret i16 %cond
+}
+
+; Negative test, vector types : umin(sub(a,b),a) but with vectors
+define <16 x i8> @underflow_compare_dontfold_vectors(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: underflow_compare_dontfold_vectors
+; CHECK-LABEL: %bb.0
+; CHECK-NEXT: sub v1.16b, v0.16b, v1.16b
+; CHECK-NEXT: umin v0.16b, v1.16b, v0.16b
+; CHECK-NEXT: ret
+ %sub = sub <16 x i8> %a, %b
+ %cond = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %sub, <16 x i8> %a)
+ ret <16 x i8> %cond
+}
+
+; Negative test, pattern mismatch : umin(add(a,b),a)
+define i64 @umin_add(i64 %a, i64 %b) {
+; CHECK-LABEL: umin_add
+; CHECK-LABEL: %bb.0
+; CHECK-NEXT: add x8, x0, x1
+; CHECK-NEXT: cmp x8, x0
+; CHECK-NEXT: csel x0, x8, x0, lo
+; CHECK-NEXT: ret
+ %add = add i64 %a, %b
+ %cond = tail call i64 @llvm.umin.i64(i64 %add, i64 %a)
+ ret i64 %cond
+}
diff --git a/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll b/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll
new file mode 100644
index 0000000000000..e9756b411eb2c
--- /dev/null
+++ b/llvm/test/CodeGen/X86/umin-sub-to-usubo-select-combine.ll
@@ -0,0 +1,156 @@
+; RUN: llc < %s -mtriple=x86_64 | FileCheck %s
+
+; GitHub issue #161036
+
+; Positive test : umin(sub(a,b),a) with scalar types should be folded
+define i64 @underflow_compare_fold_i64(i64 %a, i64 %b) {
+; CHECK-LABEL: underflow_compare_fold_i64
+; CHECK-LABEL: %bb.0
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: subq %rsi, %rax
+; CHECK-NEXT: cmovbq %rdi, %rax
+; CHECK-NEXT: retq
+ %sub = sub i64 %a, %b
+ %cond = tail call i64 @llvm.umin.i64(i64 %sub, i64 %a)
+ ret i64 %cond
+}
+
+; Positive test : umin(a,sub(a,b)) with scalar types should be folded
+define i64 @underflow_compare_fold_i64_commute(i64 %a, i64 %b) {
+; CHECK-LABEL: underflow_compare_fold_i64_commute
+; CHECK-LABEL: %bb.0
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: subq %rsi, %rax
+; CHECK-NEXT: cmovbq %rdi, %rax
+; CHECK-NEXT: retq
+ %sub = sub i64 %a, %b
+ %cond = tail call i64 @llvm.umin.i64(i64 %a, i64 %sub)
+ ret i64 %cond
+}
+
+; Positive test : multi-use is OK since the sub instruction still runs once
+define i64 @underflow_compare_fold_i64_multi_use(i64 %a, i64 %b, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: underflow_compare_fold_i64_multi_use
+; CHECK-LABEL: %bb.0
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: subq %rsi, %rax
+; CHECK-NEXT: movq %rax, (%rdx)
+; CHECK-NEXT: cmovbq %rdi, %rax
+; CHECK-NEXT: retq
+ %sub = sub i64 %a, %b
+ store i64 %sub, ptr addrspace(1) %ptr
+ %cond = call i64 @llvm.umin.i64(i64 %sub, i64 %a)
+ ret i64 %cond
+}
+
+; Positive test : i32
+define i32 @underflow_compare_fold_i32(i32 %a, i32 %b) {
+; CHECK-LABEL: underflow_compare_fold_i32
+; CHECK-LABEL: %bb.0
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: subl %esi, %eax
+; CHECK-NEXT: cmovbl %edi, %eax
+; CHECK-NEXT: retq
+ %sub = sub i32 %a, %b
+ %cond = tail call i32 @llvm.umin.i32(i32 %sub, i32 %a)
+ ret i32 %cond
+}
+
+; Positive test : i32
+define i32 @underflow_compare_fold_i32_commute(i32 %a, i32 %b) {
+; CHECK-LABEL: underflow_compare_fold_i32_commute
+; CHECK-LABEL: %bb.0
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: subl %esi, %eax
+; CHECK-NEXT: cmovbl %edi, %eax
+; CHECK-NEXT: retq
+ %sub = sub i32 %a, %b
+ %cond = tail call i32 @llvm.umin.i32(i32 %a, i32 %sub)
+ ret i32 %cond
+}
+
+; Positive test : i32
+define i32 @underflow_compare_fold_i32_multi_use(i32 %a, i32 %b, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: underflow_compare_fold_i32_multi_use
+; CHECK-LABEL: %bb.0
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: subl %esi, %eax
+; CHECK-NEXT: movl %eax, (%rdx)
+; CHECK-NEXT: cmovbl %edi, %eax
+; CHECK-NEXT: retq
+ %sub = sub i32 %a, %b
+ store i32 %sub, ptr addrspace(1) %ptr
+ %cond = call i32 @llvm.umin.i32(i32 %sub, i32 %a)
+ ret i32 %cond
+}
+
+; Positive test : i16
+define i16 @underflow_compare_fold_i16(i16 %a, i16 %b) {
+; CHECK-LABEL: underflow_compare_fold_i16
+; CHECK-LABEL: %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: subw %si, %ax
+; CHECK-NEXT: cmovbl %edi, %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
+; CHECK-NEXT: retq
+ %sub = sub i16 %a, %b
+ %cond = tail call i16 @llvm.umin.i16(i16 %sub, i16 %a)
+ ret i16 %cond
+}
+
+; Positive test : i16
+define i16 @underflow_compare_fold_i16_commute(i16 %a, i16 %b) {
+; CHECK-LABEL: underflow_compare_fold_i16_commute
+; CHECK-LABEL: %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: subw %si, %ax
+; CHECK-NEXT: cmovbl %edi, %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
+; CHECK-NEXT: retq
+ %sub = sub i16 %a, %b
+ %cond = tail call i16 @llvm.umin.i16(i16 %a, i16 %sub)
+ ret i16 %cond
+}
+
+; Positive test : i16
+define i16 @underflow_compare_fold_i16_multi_use(i16 %a, i16 %b, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: underflow_compare_fold_i16_multi_use
+; CHECK-LABEL: %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: subw %si, %ax
+; CHECK-NEXT: movw %ax, (%rdx)
+; CHECK-NEXT: cmovbl %edi, %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
+; CHECK-NEXT: retq
+ %sub = sub i16 %a, %b
+ store i16 %sub, ptr addrspace(1) %ptr
+ %cond = call i16 @llvm.umin.i16(i16 %sub, i16 %a)
+ ret i16 %cond
+}
+
+
+; Negative test, vector types : umin(sub(a,b),a) but with vectors
+define <16 x i8> @underflow_compare_dontfold_vectors(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: underflow_compare_dontfold_vectors
+; CHECK-LABEL: %bb.0
+; CHECK-NEXT: movdqa %xmm0, %xmm2
+; CHECK-NEXT: psubb %xmm1, %xmm2
+; CHECK-NEXT: pminub %xmm2, %xmm0
+; CHECK-NEXT: retq
+ %sub = sub <16 x i8> %a, %b
+ %cond = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %sub, <16 x i8> %a)
+ ret <16 x i8> %cond
+}
+
+; Negative test, pattern mismatch : umin(add(a,b),a)
+define i64 @umin_add(i64 %a, i64 %b) {
+; CHECK-LABEL: umin_add
+; CHECK-LABEL: %bb.0
+; CHECK-NEXT: leaq (%rsi,%rdi), %rax
+; CHECK-NEXT: cmpq %rdi, %rax
+; CHECK-NEXT: cmovaeq %rdi, %rax
+; CHECK-NEXT: retq
+ %add = add i64 %a, %b
+ %cond = tail call i64 @llvm.umin.i64(i64 %add, i64 %a)
+ ret i64 %cond
+}
More information about the llvm-commits
mailing list