[llvm] [DAGCombiner] Fold subtraction if above threshold to `umin` (PR #134235)

Piotr Fusik via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 8 06:57:13 PDT 2025


https://github.com/pfusik updated https://github.com/llvm/llvm-project/pull/134235

>From 530a213ec57a08ecf8477f30a3fe250412295204 Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Thu, 3 Apr 2025 13:14:30 +0200
Subject: [PATCH 1/5] [RISCV][test] Add tests for subtraction if above
 threshold

---
 llvm/test/CodeGen/RISCV/rv32zbb.ll | 186 +++++++++++++++++++++++++
 llvm/test/CodeGen/RISCV/rv64zbb.ll | 217 +++++++++++++++++++++++++++++
 2 files changed, 403 insertions(+)

diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index 90a8eadb3f974..8103ddcc427cd 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -1479,3 +1479,189 @@ entry:
   %cmp = icmp ne i32 %popcnt, 1
   ret i1 %cmp
 }
+
+define i8 @sub_if_uge_i8(i8 %x, i8 %y) {
+; CHECK-LABEL: sub_if_uge_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    zext.b a2, a1
+; CHECK-NEXT:    zext.b a3, a0
+; CHECK-NEXT:    sltu a2, a3, a2
+; CHECK-NEXT:    addi a2, a2, -1
+; CHECK-NEXT:    and a1, a2, a1
+; CHECK-NEXT:    sub a0, a0, a1
+; CHECK-NEXT:    ret
+  %cmp = icmp ult i8 %x, %y
+  %select = select i1 %cmp, i8 0, i8 %y
+  %sub = sub nuw i8 %x, %select
+  ret i8 %sub
+}
+
+define i16 @sub_if_uge_i16(i16 %x, i16 %y) {
+; RV32I-LABEL: sub_if_uge_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a3, a1, a2
+; RV32I-NEXT:    and a2, a0, a2
+; RV32I-NEXT:    sltu a2, a2, a3
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: sub_if_uge_i16:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    zext.h a2, a1
+; RV32ZBB-NEXT:    zext.h a3, a0
+; RV32ZBB-NEXT:    sltu a2, a3, a2
+; RV32ZBB-NEXT:    addi a2, a2, -1
+; RV32ZBB-NEXT:    and a1, a2, a1
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    ret
+  %cmp = icmp ult i16 %x, %y
+  %select = select i1 %cmp, i16 0, i16 %y
+  %sub = sub nuw i16 %x, %select
+  ret i16 %sub
+}
+
+define i32 @sub_if_uge_i32(i32 %x, i32 %y) {
+; CHECK-LABEL: sub_if_uge_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    sltu a2, a0, a1
+; CHECK-NEXT:    addi a2, a2, -1
+; CHECK-NEXT:    and a1, a2, a1
+; CHECK-NEXT:    sub a0, a0, a1
+; CHECK-NEXT:    ret
+  %cmp = icmp ult i32 %x, %y
+  %select = select i1 %cmp, i32 0, i32 %y
+  %sub = sub nuw i32 %x, %select
+  ret i32 %sub
+}
+
+define i64 @sub_if_uge_i64(i64 %x, i64 %y) {
+; CHECK-LABEL: sub_if_uge_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    beq a1, a3, .LBB52_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    sltu a4, a1, a3
+; CHECK-NEXT:    j .LBB52_3
+; CHECK-NEXT:  .LBB52_2:
+; CHECK-NEXT:    sltu a4, a0, a2
+; CHECK-NEXT:  .LBB52_3:
+; CHECK-NEXT:    addi a4, a4, -1
+; CHECK-NEXT:    and a3, a4, a3
+; CHECK-NEXT:    and a2, a4, a2
+; CHECK-NEXT:    sltu a4, a0, a2
+; CHECK-NEXT:    sub a1, a1, a3
+; CHECK-NEXT:    sub a1, a1, a4
+; CHECK-NEXT:    sub a0, a0, a2
+; CHECK-NEXT:    ret
+  %cmp = icmp ult i64 %x, %y
+  %select = select i1 %cmp, i64 0, i64 %y
+  %sub = sub nuw i64 %x, %select
+  ret i64 %sub
+}
+
+define i128 @sub_if_uge_i128(i128 %x, i128 %y) {
+; CHECK-LABEL: sub_if_uge_i128:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lw a7, 4(a2)
+; CHECK-NEXT:    lw a6, 8(a2)
+; CHECK-NEXT:    lw t0, 12(a2)
+; CHECK-NEXT:    lw a4, 12(a1)
+; CHECK-NEXT:    lw a3, 4(a1)
+; CHECK-NEXT:    lw a5, 8(a1)
+; CHECK-NEXT:    beq a4, t0, .LBB53_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    sltu t1, a4, t0
+; CHECK-NEXT:    j .LBB53_3
+; CHECK-NEXT:  .LBB53_2:
+; CHECK-NEXT:    sltu t1, a5, a6
+; CHECK-NEXT:  .LBB53_3:
+; CHECK-NEXT:    lw a2, 0(a2)
+; CHECK-NEXT:    lw a1, 0(a1)
+; CHECK-NEXT:    beq a3, a7, .LBB53_5
+; CHECK-NEXT:  # %bb.4:
+; CHECK-NEXT:    sltu t2, a3, a7
+; CHECK-NEXT:    j .LBB53_6
+; CHECK-NEXT:  .LBB53_5:
+; CHECK-NEXT:    sltu t2, a1, a2
+; CHECK-NEXT:  .LBB53_6:
+; CHECK-NEXT:    xor t3, a4, t0
+; CHECK-NEXT:    xor t4, a5, a6
+; CHECK-NEXT:    or t3, t4, t3
+; CHECK-NEXT:    beqz t3, .LBB53_8
+; CHECK-NEXT:  # %bb.7:
+; CHECK-NEXT:    mv t2, t1
+; CHECK-NEXT:  .LBB53_8:
+; CHECK-NEXT:    addi t2, t2, -1
+; CHECK-NEXT:    and t1, t2, t0
+; CHECK-NEXT:    and t0, t2, a2
+; CHECK-NEXT:    and a7, t2, a7
+; CHECK-NEXT:    sltu a2, a1, t0
+; CHECK-NEXT:    and t2, t2, a6
+; CHECK-NEXT:    mv a6, a2
+; CHECK-NEXT:    beq a3, a7, .LBB53_10
+; CHECK-NEXT:  # %bb.9:
+; CHECK-NEXT:    sltu a6, a3, a7
+; CHECK-NEXT:  .LBB53_10:
+; CHECK-NEXT:    sub t3, a5, t2
+; CHECK-NEXT:    sltu a5, a5, t2
+; CHECK-NEXT:    sub a4, a4, t1
+; CHECK-NEXT:    sub a3, a3, a7
+; CHECK-NEXT:    sub a1, a1, t0
+; CHECK-NEXT:    sltu a7, t3, a6
+; CHECK-NEXT:    sub a4, a4, a5
+; CHECK-NEXT:    sub a5, t3, a6
+; CHECK-NEXT:    sub a3, a3, a2
+; CHECK-NEXT:    sub a2, a4, a7
+; CHECK-NEXT:    sw a1, 0(a0)
+; CHECK-NEXT:    sw a3, 4(a0)
+; CHECK-NEXT:    sw a5, 8(a0)
+; CHECK-NEXT:    sw a2, 12(a0)
+; CHECK-NEXT:    ret
+  %cmp = icmp ult i128 %x, %y
+  %select = select i1 %cmp, i128 0, i128 %y
+  %sub = sub nuw i128 %x, %select
+  ret i128 %sub
+}
+
+define i32 @sub_if_uge_multiuse_select_i32(i32 %x, i32 %y) {
+; CHECK-LABEL: sub_if_uge_multiuse_select_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    sltu a2, a0, a1
+; CHECK-NEXT:    addi a2, a2, -1
+; CHECK-NEXT:    and a1, a2, a1
+; CHECK-NEXT:    sub a0, a0, a1
+; CHECK-NEXT:    sll a0, a0, a1
+; CHECK-NEXT:    ret
+  %cmp = icmp ult i32 %x, %y
+  %select = select i1 %cmp, i32 0, i32 %y
+  %sub = sub nuw i32 %x, %select
+  %shl = shl i32 %sub, %select
+  ret i32 %shl
+}
+
+define i32 @sub_if_uge_multiuse_cmp_i32(i32 %x, i32 %y) {
+; CHECK-LABEL: sub_if_uge_multiuse_cmp_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    sltu a2, a0, a1
+; CHECK-NEXT:    addi a2, a2, -1
+; CHECK-NEXT:    and a2, a2, a1
+; CHECK-NEXT:    sub a2, a0, a2
+; CHECK-NEXT:    bltu a0, a1, .LBB55_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 4
+; CHECK-NEXT:    sll a0, a2, a0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB55_2:
+; CHECK-NEXT:    li a0, 2
+; CHECK-NEXT:    sll a0, a2, a0
+; CHECK-NEXT:    ret
+  %cmp = icmp ult i32 %x, %y
+  %select = select i1 %cmp, i32 0, i32 %y
+  %sub = sub nuw i32 %x, %select
+  %select2 = select i1 %cmp, i32 2, i32 4
+  %shl = shl i32 %sub, %select2
+  ret i32 %shl
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index d67db77c04a8e..106c3049c6818 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -1699,3 +1699,220 @@ entry:
   %cmp = icmp eq i32 %popcnt, 1
   ret i1 %cmp
 }
+
+define i8 @sub_if_uge_i8(i8 %x, i8 %y) {
+; RV64I-LABEL: sub_if_uge_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    zext.b a2, a1
+; RV64I-NEXT:    zext.b a3, a0
+; RV64I-NEXT:    sltu a2, a3, a2
+; RV64I-NEXT:    addi a2, a2, -1
+; RV64I-NEXT:    and a1, a2, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: sub_if_uge_i8:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    zext.b a2, a1
+; RV64ZBB-NEXT:    zext.b a3, a0
+; RV64ZBB-NEXT:    sltu a2, a3, a2
+; RV64ZBB-NEXT:    addi a2, a2, -1
+; RV64ZBB-NEXT:    and a1, a2, a1
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp ult i8 %x, %y
+  %select = select i1 %cmp, i8 0, i8 %y
+  %sub = sub nuw i8 %x, %select
+  ret i8 %sub
+}
+
+define i16 @sub_if_uge_i16(i16 %x, i16 %y) {
+; RV64I-LABEL: sub_if_uge_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a3, a1, a2
+; RV64I-NEXT:    and a2, a0, a2
+; RV64I-NEXT:    sltu a2, a2, a3
+; RV64I-NEXT:    addi a2, a2, -1
+; RV64I-NEXT:    and a1, a2, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: sub_if_uge_i16:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    zext.h a2, a1
+; RV64ZBB-NEXT:    zext.h a3, a0
+; RV64ZBB-NEXT:    sltu a2, a3, a2
+; RV64ZBB-NEXT:    addi a2, a2, -1
+; RV64ZBB-NEXT:    and a1, a2, a1
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp ult i16 %x, %y
+  %select = select i1 %cmp, i16 0, i16 %y
+  %sub = sub nuw i16 %x, %select
+  ret i16 %sub
+}
+
+define i32 @sub_if_uge_i32(i32 %x, i32 %y) {
+; RV64I-LABEL: sub_if_uge_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a2, a1
+; RV64I-NEXT:    sext.w a3, a0
+; RV64I-NEXT:    sltu a2, a3, a2
+; RV64I-NEXT:    addi a2, a2, -1
+; RV64I-NEXT:    and a1, a2, a1
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: sub_if_uge_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a2, a1
+; RV64ZBB-NEXT:    sext.w a3, a0
+; RV64ZBB-NEXT:    sltu a2, a3, a2
+; RV64ZBB-NEXT:    addi a2, a2, -1
+; RV64ZBB-NEXT:    and a1, a2, a1
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp ult i32 %x, %y
+  %select = select i1 %cmp, i32 0, i32 %y
+  %sub = sub nuw i32 %x, %select
+  ret i32 %sub
+}
+
+define i64 @sub_if_uge_i64(i64 %x, i64 %y) {
+; RV64I-LABEL: sub_if_uge_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltu a2, a0, a1
+; RV64I-NEXT:    addi a2, a2, -1
+; RV64I-NEXT:    and a1, a2, a1
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: sub_if_uge_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a2, a0, a1
+; RV64ZBB-NEXT:    addi a2, a2, -1
+; RV64ZBB-NEXT:    and a1, a2, a1
+; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp ult i64 %x, %y
+  %select = select i1 %cmp, i64 0, i64 %y
+  %sub = sub nuw i64 %x, %select
+  ret i64 %sub
+}
+
+define i128 @sub_if_uge_i128(i128 %x, i128 %y) {
+; RV64I-LABEL: sub_if_uge_i128:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    beq a1, a3, .LBB66_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    sltu a4, a1, a3
+; RV64I-NEXT:    j .LBB66_3
+; RV64I-NEXT:  .LBB66_2:
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:  .LBB66_3:
+; RV64I-NEXT:    addi a4, a4, -1
+; RV64I-NEXT:    and a3, a4, a3
+; RV64I-NEXT:    and a2, a4, a2
+; RV64I-NEXT:    sltu a4, a0, a2
+; RV64I-NEXT:    sub a1, a1, a3
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    sub a0, a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: sub_if_uge_i128:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    beq a1, a3, .LBB66_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    sltu a4, a1, a3
+; RV64ZBB-NEXT:    j .LBB66_3
+; RV64ZBB-NEXT:  .LBB66_2:
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:  .LBB66_3:
+; RV64ZBB-NEXT:    addi a4, a4, -1
+; RV64ZBB-NEXT:    and a3, a4, a3
+; RV64ZBB-NEXT:    and a2, a4, a2
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp ult i128 %x, %y
+  %select = select i1 %cmp, i128 0, i128 %y
+  %sub = sub nuw i128 %x, %select
+  ret i128 %sub
+}
+
+define i32 @sub_if_uge_multiuse_select_i32(i32 %x, i32 %y) {
+; RV64I-LABEL: sub_if_uge_multiuse_select_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a2, a1
+; RV64I-NEXT:    sext.w a3, a0
+; RV64I-NEXT:    sltu a2, a3, a2
+; RV64I-NEXT:    addi a2, a2, -1
+; RV64I-NEXT:    and a1, a2, a1
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: sub_if_uge_multiuse_select_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a2, a1
+; RV64ZBB-NEXT:    sext.w a3, a0
+; RV64ZBB-NEXT:    sltu a2, a3, a2
+; RV64ZBB-NEXT:    addi a2, a2, -1
+; RV64ZBB-NEXT:    and a1, a2, a1
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    sllw a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp ult i32 %x, %y
+  %select = select i1 %cmp, i32 0, i32 %y
+  %sub = sub nuw i32 %x, %select
+  %shl = shl i32 %sub, %select
+  ret i32 %shl
+}
+
+define i32 @sub_if_uge_multiuse_cmp_i32(i32 %x, i32 %y) {
+; RV64I-LABEL: sub_if_uge_multiuse_cmp_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a2, a1
+; RV64I-NEXT:    sext.w a3, a0
+; RV64I-NEXT:    sltu a4, a3, a2
+; RV64I-NEXT:    addi a4, a4, -1
+; RV64I-NEXT:    and a1, a4, a1
+; RV64I-NEXT:    subw a0, a0, a1
+; RV64I-NEXT:    bltu a3, a2, .LBB68_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    li a1, 4
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB68_2:
+; RV64I-NEXT:    li a1, 2
+; RV64I-NEXT:    sllw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: sub_if_uge_multiuse_cmp_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sext.w a2, a1
+; RV64ZBB-NEXT:    sext.w a3, a0
+; RV64ZBB-NEXT:    sltu a4, a3, a2
+; RV64ZBB-NEXT:    addi a4, a4, -1
+; RV64ZBB-NEXT:    and a1, a4, a1
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    bltu a3, a2, .LBB68_2
+; RV64ZBB-NEXT:  # %bb.1:
+; RV64ZBB-NEXT:    li a1, 4
+; RV64ZBB-NEXT:    sllw a0, a0, a1
+; RV64ZBB-NEXT:    ret
+; RV64ZBB-NEXT:  .LBB68_2:
+; RV64ZBB-NEXT:    li a1, 2
+; RV64ZBB-NEXT:    sllw a0, a0, a1
+; RV64ZBB-NEXT:    ret
+  %cmp = icmp ult i32 %x, %y
+  %select = select i1 %cmp, i32 0, i32 %y
+  %sub = sub nuw i32 %x, %select
+  %select2 = select i1 %cmp, i32 2, i32 4
+  %shl = shl i32 %sub, %select2
+  ret i32 %shl
+}

>From ff51ddf01e5c85a102d1ddef872da090ed887990 Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Thu, 3 Apr 2025 13:14:40 +0200
Subject: [PATCH 2/5] [RISCV] Fold subtraction if above threshold to Zbb `minu`

Folds patterns such as:

    unsigned foo(unsigned x, unsigned y) {
      return x >= y ? x - y : x;
    }

Before:

    sltu    a2, a0, a1
    addi    a2, a2, -1
    and     a1, a1, a2
    subw    a0, a0, a1

Or, with Zicond:

    sltu    a2, a0, a1
    czero.nez       a1, a1, a2
    subw    a0, a0, a1

After:

    subw    a1, a0, a1
    minu    a0, a0, a1

Only applies to unsigned comparisons.
If `x >= y` then `x - y` is less than or equal `x`.
Otherwise, `x - y` wraps and is greater than `x`.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp |  13 +
 llvm/test/CodeGen/RISCV/rv32zbb.ll          | 326 +++++++++++++-------
 llvm/test/CodeGen/RISCV/rv64zbb.ll          |  55 ++--
 3 files changed, 252 insertions(+), 142 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 2a1dd2b2def17..e76d4dac8985c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -14895,6 +14895,19 @@ static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
   if (SDValue V = combineSubShiftToOrcB(N, DAG, Subtarget))
     return V;
 
+  if (Subtarget.hasStdExtZbb()) {
+    // fold (sub x, (select (ult x, y), 0, y)) -> (umin x, (sub x, y))
+    using namespace llvm::SDPatternMatch;
+    SDValue Y;
+    if (sd_match(N1, m_OneUse(m_Select(m_SetCC(m_Specific(N0), m_Value(Y),
+                                               m_SpecificCondCode(ISD::SETULT)),
+                                       m_Zero(), m_Deferred(Y))))) {
+      SDLoc DL(N);
+      return DAG.getNode(ISD::UMIN, DL, VT, N0,
+                         DAG.getNode(ISD::SUB, DL, VT, N0, Y));
+    }
+  }
+
   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
   //      (select lhs, rhs, cc, x, (sub x, y))
   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false, Subtarget);
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index 8103ddcc427cd..50b198443b3a8 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -1481,15 +1481,23 @@ entry:
 }
 
 define i8 @sub_if_uge_i8(i8 %x, i8 %y) {
-; CHECK-LABEL: sub_if_uge_i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    zext.b a2, a1
-; CHECK-NEXT:    zext.b a3, a0
-; CHECK-NEXT:    sltu a2, a3, a2
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a1, a2, a1
-; CHECK-NEXT:    sub a0, a0, a1
-; CHECK-NEXT:    ret
+; RV32I-LABEL: sub_if_uge_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    zext.b a2, a1
+; RV32I-NEXT:    zext.b a3, a0
+; RV32I-NEXT:    sltu a2, a3, a2
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: sub_if_uge_i8:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    zext.b a2, a0
+; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    zext.b a0, a0
+; RV32ZBB-NEXT:    minu a0, a2, a0
+; RV32ZBB-NEXT:    ret
   %cmp = icmp ult i8 %x, %y
   %select = select i1 %cmp, i8 0, i8 %y
   %sub = sub nuw i8 %x, %select
@@ -1511,12 +1519,10 @@ define i16 @sub_if_uge_i16(i16 %x, i16 %y) {
 ;
 ; RV32ZBB-LABEL: sub_if_uge_i16:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    zext.h a2, a1
-; RV32ZBB-NEXT:    zext.h a3, a0
-; RV32ZBB-NEXT:    sltu a2, a3, a2
-; RV32ZBB-NEXT:    addi a2, a2, -1
-; RV32ZBB-NEXT:    and a1, a2, a1
+; RV32ZBB-NEXT:    zext.h a2, a0
 ; RV32ZBB-NEXT:    sub a0, a0, a1
+; RV32ZBB-NEXT:    zext.h a0, a0
+; RV32ZBB-NEXT:    minu a0, a2, a0
 ; RV32ZBB-NEXT:    ret
   %cmp = icmp ult i16 %x, %y
   %select = select i1 %cmp, i16 0, i16 %y
@@ -1525,13 +1531,19 @@ define i16 @sub_if_uge_i16(i16 %x, i16 %y) {
 }
 
 define i32 @sub_if_uge_i32(i32 %x, i32 %y) {
-; CHECK-LABEL: sub_if_uge_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    sltu a2, a0, a1
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a1, a2, a1
-; CHECK-NEXT:    sub a0, a0, a1
-; CHECK-NEXT:    ret
+; RV32I-LABEL: sub_if_uge_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: sub_if_uge_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a1, a0, a1
+; RV32ZBB-NEXT:    minu a0, a0, a1
+; RV32ZBB-NEXT:    ret
   %cmp = icmp ult i32 %x, %y
   %select = select i1 %cmp, i32 0, i32 %y
   %sub = sub nuw i32 %x, %select
@@ -1539,23 +1551,43 @@ define i32 @sub_if_uge_i32(i32 %x, i32 %y) {
 }
 
 define i64 @sub_if_uge_i64(i64 %x, i64 %y) {
-; CHECK-LABEL: sub_if_uge_i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    beq a1, a3, .LBB52_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    sltu a4, a1, a3
-; CHECK-NEXT:    j .LBB52_3
-; CHECK-NEXT:  .LBB52_2:
-; CHECK-NEXT:    sltu a4, a0, a2
-; CHECK-NEXT:  .LBB52_3:
-; CHECK-NEXT:    addi a4, a4, -1
-; CHECK-NEXT:    and a3, a4, a3
-; CHECK-NEXT:    and a2, a4, a2
-; CHECK-NEXT:    sltu a4, a0, a2
-; CHECK-NEXT:    sub a1, a1, a3
-; CHECK-NEXT:    sub a1, a1, a4
-; CHECK-NEXT:    sub a0, a0, a2
-; CHECK-NEXT:    ret
+; RV32I-LABEL: sub_if_uge_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    beq a1, a3, .LBB52_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a4, a1, a3
+; RV32I-NEXT:    j .LBB52_3
+; RV32I-NEXT:  .LBB52_2:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:  .LBB52_3:
+; RV32I-NEXT:    addi a4, a4, -1
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    and a2, a4, a2
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    sub a1, a1, a3
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: sub_if_uge_i64:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    sub a3, a1, a3
+; RV32ZBB-NEXT:    sub a3, a3, a4
+; RV32ZBB-NEXT:    sub a2, a0, a2
+; RV32ZBB-NEXT:    beq a1, a3, .LBB52_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu a4, a1, a3
+; RV32ZBB-NEXT:    beqz a4, .LBB52_3
+; RV32ZBB-NEXT:    j .LBB52_4
+; RV32ZBB-NEXT:  .LBB52_2:
+; RV32ZBB-NEXT:    sltu a4, a0, a2
+; RV32ZBB-NEXT:    bnez a4, .LBB52_4
+; RV32ZBB-NEXT:  .LBB52_3:
+; RV32ZBB-NEXT:    mv a0, a2
+; RV32ZBB-NEXT:    mv a1, a3
+; RV32ZBB-NEXT:  .LBB52_4:
+; RV32ZBB-NEXT:    ret
   %cmp = icmp ult i64 %x, %y
   %select = select i1 %cmp, i64 0, i64 %y
   %sub = sub nuw i64 %x, %select
@@ -1563,63 +1595,123 @@ define i64 @sub_if_uge_i64(i64 %x, i64 %y) {
 }
 
 define i128 @sub_if_uge_i128(i128 %x, i128 %y) {
-; CHECK-LABEL: sub_if_uge_i128:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lw a7, 4(a2)
-; CHECK-NEXT:    lw a6, 8(a2)
-; CHECK-NEXT:    lw t0, 12(a2)
-; CHECK-NEXT:    lw a4, 12(a1)
-; CHECK-NEXT:    lw a3, 4(a1)
-; CHECK-NEXT:    lw a5, 8(a1)
-; CHECK-NEXT:    beq a4, t0, .LBB53_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    sltu t1, a4, t0
-; CHECK-NEXT:    j .LBB53_3
-; CHECK-NEXT:  .LBB53_2:
-; CHECK-NEXT:    sltu t1, a5, a6
-; CHECK-NEXT:  .LBB53_3:
-; CHECK-NEXT:    lw a2, 0(a2)
-; CHECK-NEXT:    lw a1, 0(a1)
-; CHECK-NEXT:    beq a3, a7, .LBB53_5
-; CHECK-NEXT:  # %bb.4:
-; CHECK-NEXT:    sltu t2, a3, a7
-; CHECK-NEXT:    j .LBB53_6
-; CHECK-NEXT:  .LBB53_5:
-; CHECK-NEXT:    sltu t2, a1, a2
-; CHECK-NEXT:  .LBB53_6:
-; CHECK-NEXT:    xor t3, a4, t0
-; CHECK-NEXT:    xor t4, a5, a6
-; CHECK-NEXT:    or t3, t4, t3
-; CHECK-NEXT:    beqz t3, .LBB53_8
-; CHECK-NEXT:  # %bb.7:
-; CHECK-NEXT:    mv t2, t1
-; CHECK-NEXT:  .LBB53_8:
-; CHECK-NEXT:    addi t2, t2, -1
-; CHECK-NEXT:    and t1, t2, t0
-; CHECK-NEXT:    and t0, t2, a2
-; CHECK-NEXT:    and a7, t2, a7
-; CHECK-NEXT:    sltu a2, a1, t0
-; CHECK-NEXT:    and t2, t2, a6
-; CHECK-NEXT:    mv a6, a2
-; CHECK-NEXT:    beq a3, a7, .LBB53_10
-; CHECK-NEXT:  # %bb.9:
-; CHECK-NEXT:    sltu a6, a3, a7
-; CHECK-NEXT:  .LBB53_10:
-; CHECK-NEXT:    sub t3, a5, t2
-; CHECK-NEXT:    sltu a5, a5, t2
-; CHECK-NEXT:    sub a4, a4, t1
-; CHECK-NEXT:    sub a3, a3, a7
-; CHECK-NEXT:    sub a1, a1, t0
-; CHECK-NEXT:    sltu a7, t3, a6
-; CHECK-NEXT:    sub a4, a4, a5
-; CHECK-NEXT:    sub a5, t3, a6
-; CHECK-NEXT:    sub a3, a3, a2
-; CHECK-NEXT:    sub a2, a4, a7
-; CHECK-NEXT:    sw a1, 0(a0)
-; CHECK-NEXT:    sw a3, 4(a0)
-; CHECK-NEXT:    sw a5, 8(a0)
-; CHECK-NEXT:    sw a2, 12(a0)
-; CHECK-NEXT:    ret
+; RV32I-LABEL: sub_if_uge_i128:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lw a7, 4(a2)
+; RV32I-NEXT:    lw a6, 8(a2)
+; RV32I-NEXT:    lw t0, 12(a2)
+; RV32I-NEXT:    lw a4, 12(a1)
+; RV32I-NEXT:    lw a3, 4(a1)
+; RV32I-NEXT:    lw a5, 8(a1)
+; RV32I-NEXT:    beq a4, t0, .LBB53_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu t1, a4, t0
+; RV32I-NEXT:    j .LBB53_3
+; RV32I-NEXT:  .LBB53_2:
+; RV32I-NEXT:    sltu t1, a5, a6
+; RV32I-NEXT:  .LBB53_3:
+; RV32I-NEXT:    lw a2, 0(a2)
+; RV32I-NEXT:    lw a1, 0(a1)
+; RV32I-NEXT:    beq a3, a7, .LBB53_5
+; RV32I-NEXT:  # %bb.4:
+; RV32I-NEXT:    sltu t2, a3, a7
+; RV32I-NEXT:    j .LBB53_6
+; RV32I-NEXT:  .LBB53_5:
+; RV32I-NEXT:    sltu t2, a1, a2
+; RV32I-NEXT:  .LBB53_6:
+; RV32I-NEXT:    xor t3, a4, t0
+; RV32I-NEXT:    xor t4, a5, a6
+; RV32I-NEXT:    or t3, t4, t3
+; RV32I-NEXT:    beqz t3, .LBB53_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    mv t2, t1
+; RV32I-NEXT:  .LBB53_8:
+; RV32I-NEXT:    addi t2, t2, -1
+; RV32I-NEXT:    and t1, t2, t0
+; RV32I-NEXT:    and t0, t2, a2
+; RV32I-NEXT:    and a7, t2, a7
+; RV32I-NEXT:    sltu a2, a1, t0
+; RV32I-NEXT:    and t2, t2, a6
+; RV32I-NEXT:    mv a6, a2
+; RV32I-NEXT:    beq a3, a7, .LBB53_10
+; RV32I-NEXT:  # %bb.9:
+; RV32I-NEXT:    sltu a6, a3, a7
+; RV32I-NEXT:  .LBB53_10:
+; RV32I-NEXT:    sub t3, a5, t2
+; RV32I-NEXT:    sltu a5, a5, t2
+; RV32I-NEXT:    sub a4, a4, t1
+; RV32I-NEXT:    sub a3, a3, a7
+; RV32I-NEXT:    sub a1, a1, t0
+; RV32I-NEXT:    sltu a7, t3, a6
+; RV32I-NEXT:    sub a4, a4, a5
+; RV32I-NEXT:    sub a5, t3, a6
+; RV32I-NEXT:    sub a3, a3, a2
+; RV32I-NEXT:    sub a2, a4, a7
+; RV32I-NEXT:    sw a1, 0(a0)
+; RV32I-NEXT:    sw a3, 4(a0)
+; RV32I-NEXT:    sw a5, 8(a0)
+; RV32I-NEXT:    sw a2, 12(a0)
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: sub_if_uge_i128:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lw a7, 0(a2)
+; RV32ZBB-NEXT:    lw t0, 4(a2)
+; RV32ZBB-NEXT:    lw a5, 8(a2)
+; RV32ZBB-NEXT:    lw a6, 12(a2)
+; RV32ZBB-NEXT:    lw a2, 8(a1)
+; RV32ZBB-NEXT:    lw a3, 12(a1)
+; RV32ZBB-NEXT:    lw a4, 0(a1)
+; RV32ZBB-NEXT:    lw a1, 4(a1)
+; RV32ZBB-NEXT:    sltu t1, a2, a5
+; RV32ZBB-NEXT:    sub a6, a3, a6
+; RV32ZBB-NEXT:    sltu t2, a4, a7
+; RV32ZBB-NEXT:    sub a6, a6, t1
+; RV32ZBB-NEXT:    mv t1, t2
+; RV32ZBB-NEXT:    beq a1, t0, .LBB53_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    sltu t1, a1, t0
+; RV32ZBB-NEXT:  .LBB53_2:
+; RV32ZBB-NEXT:    sub t3, a2, a5
+; RV32ZBB-NEXT:    sltu a5, t3, t1
+; RV32ZBB-NEXT:    sub a5, a6, a5
+; RV32ZBB-NEXT:    sub a6, t3, t1
+; RV32ZBB-NEXT:    beq a3, a5, .LBB53_4
+; RV32ZBB-NEXT:  # %bb.3:
+; RV32ZBB-NEXT:    sltu t1, a3, a5
+; RV32ZBB-NEXT:    j .LBB53_5
+; RV32ZBB-NEXT:  .LBB53_4:
+; RV32ZBB-NEXT:    sltu t1, a2, a6
+; RV32ZBB-NEXT:  .LBB53_5:
+; RV32ZBB-NEXT:    sub t0, a1, t0
+; RV32ZBB-NEXT:    sub t0, t0, t2
+; RV32ZBB-NEXT:    sub a7, a4, a7
+; RV32ZBB-NEXT:    beq a1, t0, .LBB53_7
+; RV32ZBB-NEXT:  # %bb.6:
+; RV32ZBB-NEXT:    sltu t2, a1, t0
+; RV32ZBB-NEXT:    j .LBB53_8
+; RV32ZBB-NEXT:  .LBB53_7:
+; RV32ZBB-NEXT:    sltu t2, a4, a7
+; RV32ZBB-NEXT:  .LBB53_8:
+; RV32ZBB-NEXT:    xor t3, a3, a5
+; RV32ZBB-NEXT:    xor t4, a2, a6
+; RV32ZBB-NEXT:    or t3, t4, t3
+; RV32ZBB-NEXT:    beqz t3, .LBB53_10
+; RV32ZBB-NEXT:  # %bb.9:
+; RV32ZBB-NEXT:    mv t2, t1
+; RV32ZBB-NEXT:  .LBB53_10:
+; RV32ZBB-NEXT:    bnez t2, .LBB53_12
+; RV32ZBB-NEXT:  # %bb.11:
+; RV32ZBB-NEXT:    mv a4, a7
+; RV32ZBB-NEXT:    mv a1, t0
+; RV32ZBB-NEXT:    mv a2, a6
+; RV32ZBB-NEXT:    mv a3, a5
+; RV32ZBB-NEXT:  .LBB53_12:
+; RV32ZBB-NEXT:    sw a4, 0(a0)
+; RV32ZBB-NEXT:    sw a1, 4(a0)
+; RV32ZBB-NEXT:    sw a2, 8(a0)
+; RV32ZBB-NEXT:    sw a3, 12(a0)
+; RV32ZBB-NEXT:    ret
   %cmp = icmp ult i128 %x, %y
   %select = select i1 %cmp, i128 0, i128 %y
   %sub = sub nuw i128 %x, %select
@@ -1643,21 +1735,35 @@ define i32 @sub_if_uge_multiuse_select_i32(i32 %x, i32 %y) {
 }
 
 define i32 @sub_if_uge_multiuse_cmp_i32(i32 %x, i32 %y) {
-; CHECK-LABEL: sub_if_uge_multiuse_cmp_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    sltu a2, a0, a1
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a1
-; CHECK-NEXT:    sub a2, a0, a2
-; CHECK-NEXT:    bltu a0, a1, .LBB55_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    li a0, 4
-; CHECK-NEXT:    sll a0, a2, a0
-; CHECK-NEXT:    ret
-; CHECK-NEXT:  .LBB55_2:
-; CHECK-NEXT:    li a0, 2
-; CHECK-NEXT:    sll a0, a2, a0
-; CHECK-NEXT:    ret
+; RV32I-LABEL: sub_if_uge_multiuse_cmp_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a2, a2, a1
+; RV32I-NEXT:    sub a2, a0, a2
+; RV32I-NEXT:    bltu a0, a1, .LBB55_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    li a0, 4
+; RV32I-NEXT:    sll a0, a2, a0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB55_2:
+; RV32I-NEXT:    li a0, 2
+; RV32I-NEXT:    sll a0, a2, a0
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: sub_if_uge_multiuse_cmp_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    sub a2, a0, a1
+; RV32ZBB-NEXT:    minu a2, a0, a2
+; RV32ZBB-NEXT:    bltu a0, a1, .LBB55_2
+; RV32ZBB-NEXT:  # %bb.1:
+; RV32ZBB-NEXT:    li a0, 4
+; RV32ZBB-NEXT:    sll a0, a2, a0
+; RV32ZBB-NEXT:    ret
+; RV32ZBB-NEXT:  .LBB55_2:
+; RV32ZBB-NEXT:    li a0, 2
+; RV32ZBB-NEXT:    sll a0, a2, a0
+; RV32ZBB-NEXT:    ret
   %cmp = icmp ult i32 %x, %y
   %select = select i1 %cmp, i32 0, i32 %y
   %sub = sub nuw i32 %x, %select
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index 106c3049c6818..49cf84e22e6c7 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -1713,12 +1713,10 @@ define i8 @sub_if_uge_i8(i8 %x, i8 %y) {
 ;
 ; RV64ZBB-LABEL: sub_if_uge_i8:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    zext.b a2, a1
-; RV64ZBB-NEXT:    zext.b a3, a0
-; RV64ZBB-NEXT:    sltu a2, a3, a2
-; RV64ZBB-NEXT:    addi a2, a2, -1
-; RV64ZBB-NEXT:    and a1, a2, a1
-; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    zext.b a2, a0
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    zext.b a0, a0
+; RV64ZBB-NEXT:    minu a0, a2, a0
 ; RV64ZBB-NEXT:    ret
   %cmp = icmp ult i8 %x, %y
   %select = select i1 %cmp, i8 0, i8 %y
@@ -1741,12 +1739,10 @@ define i16 @sub_if_uge_i16(i16 %x, i16 %y) {
 ;
 ; RV64ZBB-LABEL: sub_if_uge_i16:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    zext.h a2, a1
-; RV64ZBB-NEXT:    zext.h a3, a0
-; RV64ZBB-NEXT:    sltu a2, a3, a2
-; RV64ZBB-NEXT:    addi a2, a2, -1
-; RV64ZBB-NEXT:    and a1, a2, a1
-; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    zext.h a2, a0
+; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    zext.h a0, a0
+; RV64ZBB-NEXT:    minu a0, a2, a0
 ; RV64ZBB-NEXT:    ret
   %cmp = icmp ult i16 %x, %y
   %select = select i1 %cmp, i16 0, i16 %y
@@ -1767,12 +1763,9 @@ define i32 @sub_if_uge_i32(i32 %x, i32 %y) {
 ;
 ; RV64ZBB-LABEL: sub_if_uge_i32:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    sext.w a2, a1
-; RV64ZBB-NEXT:    sext.w a3, a0
-; RV64ZBB-NEXT:    sltu a2, a3, a2
-; RV64ZBB-NEXT:    addi a2, a2, -1
-; RV64ZBB-NEXT:    and a1, a2, a1
+; RV64ZBB-NEXT:    sext.w a2, a0
 ; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    minu a0, a2, a0
 ; RV64ZBB-NEXT:    ret
   %cmp = icmp ult i32 %x, %y
   %select = select i1 %cmp, i32 0, i32 %y
@@ -1791,10 +1784,8 @@ define i64 @sub_if_uge_i64(i64 %x, i64 %y) {
 ;
 ; RV64ZBB-LABEL: sub_if_uge_i64:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    sltu a2, a0, a1
-; RV64ZBB-NEXT:    addi a2, a2, -1
-; RV64ZBB-NEXT:    and a1, a2, a1
-; RV64ZBB-NEXT:    sub a0, a0, a1
+; RV64ZBB-NEXT:    sub a1, a0, a1
+; RV64ZBB-NEXT:    minu a0, a0, a1
 ; RV64ZBB-NEXT:    ret
   %cmp = icmp ult i64 %x, %y
   %select = select i1 %cmp, i64 0, i64 %y
@@ -1823,20 +1814,22 @@ define i128 @sub_if_uge_i128(i128 %x, i128 %y) {
 ;
 ; RV64ZBB-LABEL: sub_if_uge_i128:
 ; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    sub a3, a1, a3
+; RV64ZBB-NEXT:    sub a3, a3, a4
+; RV64ZBB-NEXT:    sub a2, a0, a2
 ; RV64ZBB-NEXT:    beq a1, a3, .LBB66_2
 ; RV64ZBB-NEXT:  # %bb.1:
 ; RV64ZBB-NEXT:    sltu a4, a1, a3
-; RV64ZBB-NEXT:    j .LBB66_3
+; RV64ZBB-NEXT:    beqz a4, .LBB66_3
+; RV64ZBB-NEXT:    j .LBB66_4
 ; RV64ZBB-NEXT:  .LBB66_2:
 ; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    bnez a4, .LBB66_4
 ; RV64ZBB-NEXT:  .LBB66_3:
-; RV64ZBB-NEXT:    addi a4, a4, -1
-; RV64ZBB-NEXT:    and a3, a4, a3
-; RV64ZBB-NEXT:    and a2, a4, a2
-; RV64ZBB-NEXT:    sltu a4, a0, a2
-; RV64ZBB-NEXT:    sub a1, a1, a3
-; RV64ZBB-NEXT:    sub a1, a1, a4
-; RV64ZBB-NEXT:    sub a0, a0, a2
+; RV64ZBB-NEXT:    mv a0, a2
+; RV64ZBB-NEXT:    mv a1, a3
+; RV64ZBB-NEXT:  .LBB66_4:
 ; RV64ZBB-NEXT:    ret
   %cmp = icmp ult i128 %x, %y
   %select = select i1 %cmp, i128 0, i128 %y
@@ -1896,10 +1889,8 @@ define i32 @sub_if_uge_multiuse_cmp_i32(i32 %x, i32 %y) {
 ; RV64ZBB:       # %bb.0:
 ; RV64ZBB-NEXT:    sext.w a2, a1
 ; RV64ZBB-NEXT:    sext.w a3, a0
-; RV64ZBB-NEXT:    sltu a4, a3, a2
-; RV64ZBB-NEXT:    addi a4, a4, -1
-; RV64ZBB-NEXT:    and a1, a4, a1
 ; RV64ZBB-NEXT:    subw a0, a0, a1
+; RV64ZBB-NEXT:    minu a0, a3, a0
 ; RV64ZBB-NEXT:    bltu a3, a2, .LBB68_2
 ; RV64ZBB-NEXT:  # %bb.1:
 ; RV64ZBB-NEXT:    li a1, 4

>From 9e71239f0d5b8930e14931b2a4e7c00cd0108653 Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Thu, 3 Apr 2025 19:15:27 +0200
Subject: [PATCH 3/5] [RISCV] Prevent transform on >XLEN types

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp |   6 +-
 llvm/test/CodeGen/RISCV/rv32zbb.ll          | 228 +++++++-------------
 llvm/test/CodeGen/RISCV/rv64zbb.ll          |  18 +-
 3 files changed, 87 insertions(+), 165 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e76d4dac8985c..97d252ff045e6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -14895,7 +14895,11 @@ static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
   if (SDValue V = combineSubShiftToOrcB(N, DAG, Subtarget))
     return V;
 
-  if (Subtarget.hasStdExtZbb()) {
+  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+  auto LK = TLI.getTypeConversion(*DAG.getContext(), VT);
+  if ((LK.first == TargetLoweringBase::TypeLegal ||
+       LK.first == TargetLoweringBase::TypePromoteInteger) &&
+      TLI.isOperationLegal(ISD::UMIN, LK.second)) {
     // fold (sub x, (select (ult x, y), 0, y)) -> (umin x, (sub x, y))
     using namespace llvm::SDPatternMatch;
     SDValue Y;
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index 50b198443b3a8..f87057358f384 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -1551,43 +1551,23 @@ define i32 @sub_if_uge_i32(i32 %x, i32 %y) {
 }
 
 define i64 @sub_if_uge_i64(i64 %x, i64 %y) {
-; RV32I-LABEL: sub_if_uge_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    beq a1, a3, .LBB52_2
-; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    sltu a4, a1, a3
-; RV32I-NEXT:    j .LBB52_3
-; RV32I-NEXT:  .LBB52_2:
-; RV32I-NEXT:    sltu a4, a0, a2
-; RV32I-NEXT:  .LBB52_3:
-; RV32I-NEXT:    addi a4, a4, -1
-; RV32I-NEXT:    and a3, a4, a3
-; RV32I-NEXT:    and a2, a4, a2
-; RV32I-NEXT:    sltu a4, a0, a2
-; RV32I-NEXT:    sub a1, a1, a3
-; RV32I-NEXT:    sub a1, a1, a4
-; RV32I-NEXT:    sub a0, a0, a2
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: sub_if_uge_i64:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    sltu a4, a0, a2
-; RV32ZBB-NEXT:    sub a3, a1, a3
-; RV32ZBB-NEXT:    sub a3, a3, a4
-; RV32ZBB-NEXT:    sub a2, a0, a2
-; RV32ZBB-NEXT:    beq a1, a3, .LBB52_2
-; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    sltu a4, a1, a3
-; RV32ZBB-NEXT:    beqz a4, .LBB52_3
-; RV32ZBB-NEXT:    j .LBB52_4
-; RV32ZBB-NEXT:  .LBB52_2:
-; RV32ZBB-NEXT:    sltu a4, a0, a2
-; RV32ZBB-NEXT:    bnez a4, .LBB52_4
-; RV32ZBB-NEXT:  .LBB52_3:
-; RV32ZBB-NEXT:    mv a0, a2
-; RV32ZBB-NEXT:    mv a1, a3
-; RV32ZBB-NEXT:  .LBB52_4:
-; RV32ZBB-NEXT:    ret
+; CHECK-LABEL: sub_if_uge_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    beq a1, a3, .LBB52_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    sltu a4, a1, a3
+; CHECK-NEXT:    j .LBB52_3
+; CHECK-NEXT:  .LBB52_2:
+; CHECK-NEXT:    sltu a4, a0, a2
+; CHECK-NEXT:  .LBB52_3:
+; CHECK-NEXT:    addi a4, a4, -1
+; CHECK-NEXT:    and a3, a4, a3
+; CHECK-NEXT:    and a2, a4, a2
+; CHECK-NEXT:    sltu a4, a0, a2
+; CHECK-NEXT:    sub a1, a1, a3
+; CHECK-NEXT:    sub a1, a1, a4
+; CHECK-NEXT:    sub a0, a0, a2
+; CHECK-NEXT:    ret
   %cmp = icmp ult i64 %x, %y
   %select = select i1 %cmp, i64 0, i64 %y
   %sub = sub nuw i64 %x, %select
@@ -1595,123 +1575,63 @@ define i64 @sub_if_uge_i64(i64 %x, i64 %y) {
 }
 
 define i128 @sub_if_uge_i128(i128 %x, i128 %y) {
-; RV32I-LABEL: sub_if_uge_i128:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    lw a7, 4(a2)
-; RV32I-NEXT:    lw a6, 8(a2)
-; RV32I-NEXT:    lw t0, 12(a2)
-; RV32I-NEXT:    lw a4, 12(a1)
-; RV32I-NEXT:    lw a3, 4(a1)
-; RV32I-NEXT:    lw a5, 8(a1)
-; RV32I-NEXT:    beq a4, t0, .LBB53_2
-; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    sltu t1, a4, t0
-; RV32I-NEXT:    j .LBB53_3
-; RV32I-NEXT:  .LBB53_2:
-; RV32I-NEXT:    sltu t1, a5, a6
-; RV32I-NEXT:  .LBB53_3:
-; RV32I-NEXT:    lw a2, 0(a2)
-; RV32I-NEXT:    lw a1, 0(a1)
-; RV32I-NEXT:    beq a3, a7, .LBB53_5
-; RV32I-NEXT:  # %bb.4:
-; RV32I-NEXT:    sltu t2, a3, a7
-; RV32I-NEXT:    j .LBB53_6
-; RV32I-NEXT:  .LBB53_5:
-; RV32I-NEXT:    sltu t2, a1, a2
-; RV32I-NEXT:  .LBB53_6:
-; RV32I-NEXT:    xor t3, a4, t0
-; RV32I-NEXT:    xor t4, a5, a6
-; RV32I-NEXT:    or t3, t4, t3
-; RV32I-NEXT:    beqz t3, .LBB53_8
-; RV32I-NEXT:  # %bb.7:
-; RV32I-NEXT:    mv t2, t1
-; RV32I-NEXT:  .LBB53_8:
-; RV32I-NEXT:    addi t2, t2, -1
-; RV32I-NEXT:    and t1, t2, t0
-; RV32I-NEXT:    and t0, t2, a2
-; RV32I-NEXT:    and a7, t2, a7
-; RV32I-NEXT:    sltu a2, a1, t0
-; RV32I-NEXT:    and t2, t2, a6
-; RV32I-NEXT:    mv a6, a2
-; RV32I-NEXT:    beq a3, a7, .LBB53_10
-; RV32I-NEXT:  # %bb.9:
-; RV32I-NEXT:    sltu a6, a3, a7
-; RV32I-NEXT:  .LBB53_10:
-; RV32I-NEXT:    sub t3, a5, t2
-; RV32I-NEXT:    sltu a5, a5, t2
-; RV32I-NEXT:    sub a4, a4, t1
-; RV32I-NEXT:    sub a3, a3, a7
-; RV32I-NEXT:    sub a1, a1, t0
-; RV32I-NEXT:    sltu a7, t3, a6
-; RV32I-NEXT:    sub a4, a4, a5
-; RV32I-NEXT:    sub a5, t3, a6
-; RV32I-NEXT:    sub a3, a3, a2
-; RV32I-NEXT:    sub a2, a4, a7
-; RV32I-NEXT:    sw a1, 0(a0)
-; RV32I-NEXT:    sw a3, 4(a0)
-; RV32I-NEXT:    sw a5, 8(a0)
-; RV32I-NEXT:    sw a2, 12(a0)
-; RV32I-NEXT:    ret
-;
-; RV32ZBB-LABEL: sub_if_uge_i128:
-; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    lw a7, 0(a2)
-; RV32ZBB-NEXT:    lw t0, 4(a2)
-; RV32ZBB-NEXT:    lw a5, 8(a2)
-; RV32ZBB-NEXT:    lw a6, 12(a2)
-; RV32ZBB-NEXT:    lw a2, 8(a1)
-; RV32ZBB-NEXT:    lw a3, 12(a1)
-; RV32ZBB-NEXT:    lw a4, 0(a1)
-; RV32ZBB-NEXT:    lw a1, 4(a1)
-; RV32ZBB-NEXT:    sltu t1, a2, a5
-; RV32ZBB-NEXT:    sub a6, a3, a6
-; RV32ZBB-NEXT:    sltu t2, a4, a7
-; RV32ZBB-NEXT:    sub a6, a6, t1
-; RV32ZBB-NEXT:    mv t1, t2
-; RV32ZBB-NEXT:    beq a1, t0, .LBB53_2
-; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    sltu t1, a1, t0
-; RV32ZBB-NEXT:  .LBB53_2:
-; RV32ZBB-NEXT:    sub t3, a2, a5
-; RV32ZBB-NEXT:    sltu a5, t3, t1
-; RV32ZBB-NEXT:    sub a5, a6, a5
-; RV32ZBB-NEXT:    sub a6, t3, t1
-; RV32ZBB-NEXT:    beq a3, a5, .LBB53_4
-; RV32ZBB-NEXT:  # %bb.3:
-; RV32ZBB-NEXT:    sltu t1, a3, a5
-; RV32ZBB-NEXT:    j .LBB53_5
-; RV32ZBB-NEXT:  .LBB53_4:
-; RV32ZBB-NEXT:    sltu t1, a2, a6
-; RV32ZBB-NEXT:  .LBB53_5:
-; RV32ZBB-NEXT:    sub t0, a1, t0
-; RV32ZBB-NEXT:    sub t0, t0, t2
-; RV32ZBB-NEXT:    sub a7, a4, a7
-; RV32ZBB-NEXT:    beq a1, t0, .LBB53_7
-; RV32ZBB-NEXT:  # %bb.6:
-; RV32ZBB-NEXT:    sltu t2, a1, t0
-; RV32ZBB-NEXT:    j .LBB53_8
-; RV32ZBB-NEXT:  .LBB53_7:
-; RV32ZBB-NEXT:    sltu t2, a4, a7
-; RV32ZBB-NEXT:  .LBB53_8:
-; RV32ZBB-NEXT:    xor t3, a3, a5
-; RV32ZBB-NEXT:    xor t4, a2, a6
-; RV32ZBB-NEXT:    or t3, t4, t3
-; RV32ZBB-NEXT:    beqz t3, .LBB53_10
-; RV32ZBB-NEXT:  # %bb.9:
-; RV32ZBB-NEXT:    mv t2, t1
-; RV32ZBB-NEXT:  .LBB53_10:
-; RV32ZBB-NEXT:    bnez t2, .LBB53_12
-; RV32ZBB-NEXT:  # %bb.11:
-; RV32ZBB-NEXT:    mv a4, a7
-; RV32ZBB-NEXT:    mv a1, t0
-; RV32ZBB-NEXT:    mv a2, a6
-; RV32ZBB-NEXT:    mv a3, a5
-; RV32ZBB-NEXT:  .LBB53_12:
-; RV32ZBB-NEXT:    sw a4, 0(a0)
-; RV32ZBB-NEXT:    sw a1, 4(a0)
-; RV32ZBB-NEXT:    sw a2, 8(a0)
-; RV32ZBB-NEXT:    sw a3, 12(a0)
-; RV32ZBB-NEXT:    ret
+; CHECK-LABEL: sub_if_uge_i128:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lw a7, 4(a2)
+; CHECK-NEXT:    lw a6, 8(a2)
+; CHECK-NEXT:    lw t0, 12(a2)
+; CHECK-NEXT:    lw a4, 12(a1)
+; CHECK-NEXT:    lw a3, 4(a1)
+; CHECK-NEXT:    lw a5, 8(a1)
+; CHECK-NEXT:    beq a4, t0, .LBB53_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    sltu t1, a4, t0
+; CHECK-NEXT:    j .LBB53_3
+; CHECK-NEXT:  .LBB53_2:
+; CHECK-NEXT:    sltu t1, a5, a6
+; CHECK-NEXT:  .LBB53_3:
+; CHECK-NEXT:    lw a2, 0(a2)
+; CHECK-NEXT:    lw a1, 0(a1)
+; CHECK-NEXT:    beq a3, a7, .LBB53_5
+; CHECK-NEXT:  # %bb.4:
+; CHECK-NEXT:    sltu t2, a3, a7
+; CHECK-NEXT:    j .LBB53_6
+; CHECK-NEXT:  .LBB53_5:
+; CHECK-NEXT:    sltu t2, a1, a2
+; CHECK-NEXT:  .LBB53_6:
+; CHECK-NEXT:    xor t3, a4, t0
+; CHECK-NEXT:    xor t4, a5, a6
+; CHECK-NEXT:    or t3, t4, t3
+; CHECK-NEXT:    beqz t3, .LBB53_8
+; CHECK-NEXT:  # %bb.7:
+; CHECK-NEXT:    mv t2, t1
+; CHECK-NEXT:  .LBB53_8:
+; CHECK-NEXT:    addi t2, t2, -1
+; CHECK-NEXT:    and t1, t2, t0
+; CHECK-NEXT:    and t0, t2, a2
+; CHECK-NEXT:    and a7, t2, a7
+; CHECK-NEXT:    sltu a2, a1, t0
+; CHECK-NEXT:    and t2, t2, a6
+; CHECK-NEXT:    mv a6, a2
+; CHECK-NEXT:    beq a3, a7, .LBB53_10
+; CHECK-NEXT:  # %bb.9:
+; CHECK-NEXT:    sltu a6, a3, a7
+; CHECK-NEXT:  .LBB53_10:
+; CHECK-NEXT:    sub t3, a5, t2
+; CHECK-NEXT:    sltu a5, a5, t2
+; CHECK-NEXT:    sub a4, a4, t1
+; CHECK-NEXT:    sub a3, a3, a7
+; CHECK-NEXT:    sub a1, a1, t0
+; CHECK-NEXT:    sltu a7, t3, a6
+; CHECK-NEXT:    sub a4, a4, a5
+; CHECK-NEXT:    sub a5, t3, a6
+; CHECK-NEXT:    sub a3, a3, a2
+; CHECK-NEXT:    sub a2, a4, a7
+; CHECK-NEXT:    sw a1, 0(a0)
+; CHECK-NEXT:    sw a3, 4(a0)
+; CHECK-NEXT:    sw a5, 8(a0)
+; CHECK-NEXT:    sw a2, 12(a0)
+; CHECK-NEXT:    ret
   %cmp = icmp ult i128 %x, %y
   %select = select i1 %cmp, i128 0, i128 %y
   %sub = sub nuw i128 %x, %select
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index 49cf84e22e6c7..64830e64b39b5 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -1814,22 +1814,20 @@ define i128 @sub_if_uge_i128(i128 %x, i128 %y) {
 ;
 ; RV64ZBB-LABEL: sub_if_uge_i128:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    sltu a4, a0, a2
-; RV64ZBB-NEXT:    sub a3, a1, a3
-; RV64ZBB-NEXT:    sub a3, a3, a4
-; RV64ZBB-NEXT:    sub a2, a0, a2
 ; RV64ZBB-NEXT:    beq a1, a3, .LBB66_2
 ; RV64ZBB-NEXT:  # %bb.1:
 ; RV64ZBB-NEXT:    sltu a4, a1, a3
-; RV64ZBB-NEXT:    beqz a4, .LBB66_3
-; RV64ZBB-NEXT:    j .LBB66_4
+; RV64ZBB-NEXT:    j .LBB66_3
 ; RV64ZBB-NEXT:  .LBB66_2:
 ; RV64ZBB-NEXT:    sltu a4, a0, a2
-; RV64ZBB-NEXT:    bnez a4, .LBB66_4
 ; RV64ZBB-NEXT:  .LBB66_3:
-; RV64ZBB-NEXT:    mv a0, a2
-; RV64ZBB-NEXT:    mv a1, a3
-; RV64ZBB-NEXT:  .LBB66_4:
+; RV64ZBB-NEXT:    addi a4, a4, -1
+; RV64ZBB-NEXT:    and a3, a4, a3
+; RV64ZBB-NEXT:    and a2, a4, a2
+; RV64ZBB-NEXT:    sltu a4, a0, a2
+; RV64ZBB-NEXT:    sub a1, a1, a3
+; RV64ZBB-NEXT:    sub a1, a1, a4
+; RV64ZBB-NEXT:    sub a0, a0, a2
 ; RV64ZBB-NEXT:    ret
   %cmp = icmp ult i128 %x, %y
   %select = select i1 %cmp, i128 0, i128 %y

>From 63201050ebdcc9288619f54df8909f04a55f84e0 Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Mon, 7 Apr 2025 16:17:37 +0200
Subject: [PATCH 4/5] [DAGCombiner] Move the transform from RISCVISelLowering

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 13 +++++++++++++
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 17 -----------------
 2 files changed, 13 insertions(+), 17 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 38376de5783ae..c39a03d16163e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -4251,6 +4251,19 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
       sd_match(N1, m_UMaxLike(m_Specific(A), m_Specific(B))))
     return DAG.getNegative(DAG.getNode(ISD::ABDU, DL, VT, A, B), DL, VT);
 
+  // (sub x, (select (ult x, y), 0, y)) -> (umin x, (sub x, y))
+  auto LK = TLI.getTypeConversion(*DAG.getContext(), VT);
+  if ((LK.first == TargetLoweringBase::TypeLegal ||
+       LK.first == TargetLoweringBase::TypePromoteInteger) &&
+      TLI.isOperationLegal(ISD::UMIN, LK.second)) {
+    SDValue Y;
+    if (sd_match(N1, m_OneUse(m_Select(m_SetCC(m_Specific(N0), m_Value(Y),
+                                               m_SpecificCondCode(ISD::SETULT)),
+                                       m_Zero(), m_Deferred(Y)))))
+      return DAG.getNode(ISD::UMIN, DL, VT, N0,
+                         DAG.getNode(ISD::SUB, DL, VT, N0, Y));
+  }
+
   return SDValue();
 }
 
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 97d252ff045e6..2a1dd2b2def17 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -14895,23 +14895,6 @@ static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
   if (SDValue V = combineSubShiftToOrcB(N, DAG, Subtarget))
     return V;
 
-  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-  auto LK = TLI.getTypeConversion(*DAG.getContext(), VT);
-  if ((LK.first == TargetLoweringBase::TypeLegal ||
-       LK.first == TargetLoweringBase::TypePromoteInteger) &&
-      TLI.isOperationLegal(ISD::UMIN, LK.second)) {
-    // fold (sub x, (select (ult x, y), 0, y)) -> (umin x, (sub x, y))
-    using namespace llvm::SDPatternMatch;
-    SDValue Y;
-    if (sd_match(N1, m_OneUse(m_Select(m_SetCC(m_Specific(N0), m_Value(Y),
-                                               m_SpecificCondCode(ISD::SETULT)),
-                                       m_Zero(), m_Deferred(Y))))) {
-      SDLoc DL(N);
-      return DAG.getNode(ISD::UMIN, DL, VT, N0,
-                         DAG.getNode(ISD::SUB, DL, VT, N0, Y));
-    }
-  }
-
   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
   //      (select lhs, rhs, cc, x, (sub x, y))
   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false, Subtarget);

>From aa72145ab2b7fb40b21675e42e711de93b2657bc Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Tue, 8 Apr 2025 15:56:37 +0200
Subject: [PATCH 5/5] [RISCV][test] Add a common CHECK prefix

---
 llvm/test/CodeGen/RISCV/rv64zbb.ll | 138 ++++++++++-------------------
 1 file changed, 45 insertions(+), 93 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index 64830e64b39b5..4d3c28c733778 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV64I
+; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64I
 ; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s -check-prefix=RV64ZBB
+; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBB
 
 declare i32 @llvm.ctlz.i32(i32, i1)
 
@@ -1562,17 +1562,11 @@ define i64 @orc_b_i64(i64 %a) {
 }
 
 define i64 @srai_slli(i16 signext %0) {
-; RV64I-LABEL: srai_slli:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 57
-; RV64I-NEXT:    srai a0, a0, 63
-; RV64I-NEXT:    ret
-;
-; RV64ZBB-LABEL: srai_slli:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    slli a0, a0, 57
-; RV64ZBB-NEXT:    srai a0, a0, 63
-; RV64ZBB-NEXT:    ret
+; CHECK-LABEL: srai_slli:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    slli a0, a0, 57
+; CHECK-NEXT:    srai a0, a0, 63
+; CHECK-NEXT:    ret
   %2 = shl i16 %0, 9
   %sext = ashr i16 %2, 15
   %3 = sext i16 %sext to i64
@@ -1580,17 +1574,11 @@ define i64 @srai_slli(i16 signext %0) {
 }
 
 define i64 @srai_slli2(i16 signext %0) {
-; RV64I-LABEL: srai_slli2:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 57
-; RV64I-NEXT:    srai a0, a0, 62
-; RV64I-NEXT:    ret
-;
-; RV64ZBB-LABEL: srai_slli2:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    slli a0, a0, 57
-; RV64ZBB-NEXT:    srai a0, a0, 62
-; RV64ZBB-NEXT:    ret
+; CHECK-LABEL: srai_slli2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    slli a0, a0, 57
+; CHECK-NEXT:    srai a0, a0, 62
+; CHECK-NEXT:    ret
   %2 = shl i16 %0, 9
   %sext = ashr i16 %2, 14
   %3 = sext i16 %sext to i64
@@ -1598,19 +1586,12 @@ define i64 @srai_slli2(i16 signext %0) {
 }
 
 define signext i32 @func0000000000000001(i32 signext %0, i8 signext %1) #0 {
-; RV64I-LABEL: func0000000000000001:
-; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    slli a1, a1, 59
-; RV64I-NEXT:    srai a1, a1, 63
-; RV64I-NEXT:    addw a0, a1, a0
-; RV64I-NEXT:    ret
-;
-; RV64ZBB-LABEL: func0000000000000001:
-; RV64ZBB:       # %bb.0: # %entry
-; RV64ZBB-NEXT:    slli a1, a1, 59
-; RV64ZBB-NEXT:    srai a1, a1, 63
-; RV64ZBB-NEXT:    addw a0, a1, a0
-; RV64ZBB-NEXT:    ret
+; CHECK-LABEL: func0000000000000001:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    slli a1, a1, 59
+; CHECK-NEXT:    srai a1, a1, 63
+; CHECK-NEXT:    addw a0, a1, a0
+; CHECK-NEXT:    ret
 entry:
   %2 = shl i8 %1, 3
   %3 = ashr i8 %2, 7
@@ -1794,41 +1775,23 @@ define i64 @sub_if_uge_i64(i64 %x, i64 %y) {
 }
 
 define i128 @sub_if_uge_i128(i128 %x, i128 %y) {
-; RV64I-LABEL: sub_if_uge_i128:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    beq a1, a3, .LBB66_2
-; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    sltu a4, a1, a3
-; RV64I-NEXT:    j .LBB66_3
-; RV64I-NEXT:  .LBB66_2:
-; RV64I-NEXT:    sltu a4, a0, a2
-; RV64I-NEXT:  .LBB66_3:
-; RV64I-NEXT:    addi a4, a4, -1
-; RV64I-NEXT:    and a3, a4, a3
-; RV64I-NEXT:    and a2, a4, a2
-; RV64I-NEXT:    sltu a4, a0, a2
-; RV64I-NEXT:    sub a1, a1, a3
-; RV64I-NEXT:    sub a1, a1, a4
-; RV64I-NEXT:    sub a0, a0, a2
-; RV64I-NEXT:    ret
-;
-; RV64ZBB-LABEL: sub_if_uge_i128:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    beq a1, a3, .LBB66_2
-; RV64ZBB-NEXT:  # %bb.1:
-; RV64ZBB-NEXT:    sltu a4, a1, a3
-; RV64ZBB-NEXT:    j .LBB66_3
-; RV64ZBB-NEXT:  .LBB66_2:
-; RV64ZBB-NEXT:    sltu a4, a0, a2
-; RV64ZBB-NEXT:  .LBB66_3:
-; RV64ZBB-NEXT:    addi a4, a4, -1
-; RV64ZBB-NEXT:    and a3, a4, a3
-; RV64ZBB-NEXT:    and a2, a4, a2
-; RV64ZBB-NEXT:    sltu a4, a0, a2
-; RV64ZBB-NEXT:    sub a1, a1, a3
-; RV64ZBB-NEXT:    sub a1, a1, a4
-; RV64ZBB-NEXT:    sub a0, a0, a2
-; RV64ZBB-NEXT:    ret
+; CHECK-LABEL: sub_if_uge_i128:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    beq a1, a3, .LBB66_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    sltu a4, a1, a3
+; CHECK-NEXT:    j .LBB66_3
+; CHECK-NEXT:  .LBB66_2:
+; CHECK-NEXT:    sltu a4, a0, a2
+; CHECK-NEXT:  .LBB66_3:
+; CHECK-NEXT:    addi a4, a4, -1
+; CHECK-NEXT:    and a3, a4, a3
+; CHECK-NEXT:    and a2, a4, a2
+; CHECK-NEXT:    sltu a4, a0, a2
+; CHECK-NEXT:    sub a1, a1, a3
+; CHECK-NEXT:    sub a1, a1, a4
+; CHECK-NEXT:    sub a0, a0, a2
+; CHECK-NEXT:    ret
   %cmp = icmp ult i128 %x, %y
   %select = select i1 %cmp, i128 0, i128 %y
   %sub = sub nuw i128 %x, %select
@@ -1836,27 +1799,16 @@ define i128 @sub_if_uge_i128(i128 %x, i128 %y) {
 }
 
 define i32 @sub_if_uge_multiuse_select_i32(i32 %x, i32 %y) {
-; RV64I-LABEL: sub_if_uge_multiuse_select_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a2, a1
-; RV64I-NEXT:    sext.w a3, a0
-; RV64I-NEXT:    sltu a2, a3, a2
-; RV64I-NEXT:    addi a2, a2, -1
-; RV64I-NEXT:    and a1, a2, a1
-; RV64I-NEXT:    subw a0, a0, a1
-; RV64I-NEXT:    sllw a0, a0, a1
-; RV64I-NEXT:    ret
-;
-; RV64ZBB-LABEL: sub_if_uge_multiuse_select_i32:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    sext.w a2, a1
-; RV64ZBB-NEXT:    sext.w a3, a0
-; RV64ZBB-NEXT:    sltu a2, a3, a2
-; RV64ZBB-NEXT:    addi a2, a2, -1
-; RV64ZBB-NEXT:    and a1, a2, a1
-; RV64ZBB-NEXT:    subw a0, a0, a1
-; RV64ZBB-NEXT:    sllw a0, a0, a1
-; RV64ZBB-NEXT:    ret
+; CHECK-LABEL: sub_if_uge_multiuse_select_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    sext.w a2, a1
+; CHECK-NEXT:    sext.w a3, a0
+; CHECK-NEXT:    sltu a2, a3, a2
+; CHECK-NEXT:    addi a2, a2, -1
+; CHECK-NEXT:    and a1, a2, a1
+; CHECK-NEXT:    subw a0, a0, a1
+; CHECK-NEXT:    sllw a0, a0, a1
+; CHECK-NEXT:    ret
   %cmp = icmp ult i32 %x, %y
   %select = select i1 %cmp, i32 0, i32 %y
   %sub = sub nuw i32 %x, %select



More information about the llvm-commits mailing list