[llvm] [RISCV] Support umin/umax in tryFoldSelectIntoOp (PR #157548)

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 9 18:25:34 PDT 2025


https://github.com/preames updated https://github.com/llvm/llvm-project/pull/157548

>From 5417701f758f9a409da4dba1990481956d0f8285 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Mon, 8 Sep 2025 13:09:34 -0700
Subject: [PATCH 1/2] [RISCV] Add coverage for select with minmax arm [nfc]

This is coverage for an upcoming change, but I thought the choice of
configurations to check was probably worth a moment of consideration
as well.
---
 llvm/test/CodeGen/RISCV/select-zbb.ll | 1622 +++++++++++++++++++++++++
 1 file changed, 1622 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/select-zbb.ll

diff --git a/llvm/test/CodeGen/RISCV/select-zbb.ll b/llvm/test/CodeGen/RISCV/select-zbb.ll
new file mode 100644
index 0000000000000..13e637909b43b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/select-zbb.ll
@@ -0,0 +1,1622 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IM %s
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IM %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IMZBB %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IMZBB %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IMZICOND %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IMZICOND %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zicond,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IMBOTH %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zicond,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IMBOTH %s
+
+
+define i32 @select_umin_1(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_umin_1:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    bgeu a1, a2, .LBB0_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB0_4
+; RV32IM-NEXT:  .LBB0_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB0_3: # %entry
+; RV32IM-NEXT:    mv a1, a2
+; RV32IM-NEXT:    bnez a0, .LBB0_2
+; RV32IM-NEXT:  .LBB0_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umin_1:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a3, a2
+; RV64IM-NEXT:    sext.w a1, a1
+; RV64IM-NEXT:    bgeu a1, a3, .LBB0_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB0_4
+; RV64IM-NEXT:  .LBB0_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB0_3: # %entry
+; RV64IM-NEXT:    mv a1, a3
+; RV64IM-NEXT:    bnez a0, .LBB0_2
+; RV64IM-NEXT:  .LBB0_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umin_1:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    beqz a0, .LBB0_2
+; RV32IMZBB-NEXT:  # %bb.1:
+; RV32IMZBB-NEXT:    minu a2, a1, a2
+; RV32IMZBB-NEXT:  .LBB0_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a2
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umin_1:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    beqz a0, .LBB0_2
+; RV64IMZBB-NEXT:  # %bb.1:
+; RV64IMZBB-NEXT:    sext.w a2, a2
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    minu a2, a1, a2
+; RV64IMZBB-NEXT:  .LBB0_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a2
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umin_1:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sltu a3, a1, a2
+; RV32IMZICOND-NEXT:    czero.nez a4, a2, a3
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a3
+; RV32IMZICOND-NEXT:    or a1, a1, a4
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umin_1:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a3, a2
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    sltu a4, a1, a3
+; RV64IMZICOND-NEXT:    czero.nez a3, a3, a4
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a4
+; RV64IMZICOND-NEXT:    or a1, a1, a3
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umin_1:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    minu a1, a1, a2
+; RV32IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV32IMBOTH-NEXT:    czero.eqz a0, a1, a0
+; RV32IMBOTH-NEXT:    or a0, a0, a2
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umin_1:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a3, a2
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    minu a1, a1, a3
+; RV64IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV64IMBOTH-NEXT:    czero.eqz a0, a1, a0
+; RV64IMBOTH-NEXT:    or a0, a0, a2
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.umin(i32 %a, i32 %b)
+  %res = select i1 %cond, i32 %c, i32 %b
+  ret i32 %res
+}
+
+define i32 @select_umin_2(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_umin_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    li a3, 32
+; RV32IM-NEXT:    mv a2, a1
+; RV32IM-NEXT:    bgeu a1, a3, .LBB1_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB1_4
+; RV32IM-NEXT:  .LBB1_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB1_3: # %entry
+; RV32IM-NEXT:    li a2, 32
+; RV32IM-NEXT:    bnez a0, .LBB1_2
+; RV32IM-NEXT:  .LBB1_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umin_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a3, 32
+; RV64IM-NEXT:    bgeu a2, a3, .LBB1_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB1_4
+; RV64IM-NEXT:  .LBB1_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB1_3: # %entry
+; RV64IM-NEXT:    li a2, 32
+; RV64IM-NEXT:    bnez a0, .LBB1_2
+; RV64IM-NEXT:  .LBB1_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umin_2:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    bnez a0, .LBB1_2
+; RV32IMZBB-NEXT:  # %bb.1: # %entry
+; RV32IMZBB-NEXT:    li a0, 32
+; RV32IMZBB-NEXT:    minu a1, a1, a0
+; RV32IMZBB-NEXT:  .LBB1_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umin_2:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    bnez a0, .LBB1_2
+; RV64IMZBB-NEXT:  # %bb.1: # %entry
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    li a0, 32
+; RV64IMZBB-NEXT:    minu a1, a1, a0
+; RV64IMZBB-NEXT:  .LBB1_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umin_2:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sltiu a2, a1, 32
+; RV32IMZICOND-NEXT:    addi a3, a1, -32
+; RV32IMZICOND-NEXT:    czero.eqz a2, a3, a2
+; RV32IMZICOND-NEXT:    addi a2, a2, 32
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umin_2:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a2, a1
+; RV64IMZICOND-NEXT:    sltiu a3, a2, 32
+; RV64IMZICOND-NEXT:    addi a2, a2, -32
+; RV64IMZICOND-NEXT:    czero.eqz a2, a2, a3
+; RV64IMZICOND-NEXT:    addi a2, a2, 32
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umin_2:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    li a2, 32
+; RV32IMBOTH-NEXT:    minu a2, a1, a2
+; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umin_2:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a2, a1
+; RV64IMBOTH-NEXT:    li a3, 32
+; RV64IMBOTH-NEXT:    minu a2, a2, a3
+; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.umin(i32 %a, i32 32)
+  %res = select i1 %cond, i32 %a, i32 %c
+  ret i32 %res
+}
+
+define i32 @select_umin_3(i1 zeroext %cond, i32 %a) {
+; RV32IM-LABEL: select_umin_3:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    li a3, 32
+; RV32IM-NEXT:    mv a2, a1
+; RV32IM-NEXT:    bgeu a1, a3, .LBB2_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB2_4
+; RV32IM-NEXT:  .LBB2_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB2_3: # %entry
+; RV32IM-NEXT:    li a2, 32
+; RV32IM-NEXT:    bnez a0, .LBB2_2
+; RV32IM-NEXT:  .LBB2_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umin_3:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a3, 32
+; RV64IM-NEXT:    bgeu a2, a3, .LBB2_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB2_4
+; RV64IM-NEXT:  .LBB2_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB2_3: # %entry
+; RV64IM-NEXT:    li a2, 32
+; RV64IM-NEXT:    bnez a0, .LBB2_2
+; RV64IM-NEXT:  .LBB2_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umin_3:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    bnez a0, .LBB2_2
+; RV32IMZBB-NEXT:  # %bb.1: # %entry
+; RV32IMZBB-NEXT:    li a0, 32
+; RV32IMZBB-NEXT:    minu a1, a1, a0
+; RV32IMZBB-NEXT:  .LBB2_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umin_3:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    bnez a0, .LBB2_2
+; RV64IMZBB-NEXT:  # %bb.1: # %entry
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    li a0, 32
+; RV64IMZBB-NEXT:    minu a1, a1, a0
+; RV64IMZBB-NEXT:  .LBB2_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umin_3:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sltiu a2, a1, 32
+; RV32IMZICOND-NEXT:    addi a3, a1, -32
+; RV32IMZICOND-NEXT:    czero.eqz a2, a3, a2
+; RV32IMZICOND-NEXT:    addi a2, a2, 32
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umin_3:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a2, a1
+; RV64IMZICOND-NEXT:    sltiu a3, a2, 32
+; RV64IMZICOND-NEXT:    addi a2, a2, -32
+; RV64IMZICOND-NEXT:    czero.eqz a2, a2, a3
+; RV64IMZICOND-NEXT:    addi a2, a2, 32
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umin_3:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    li a2, 32
+; RV32IMBOTH-NEXT:    minu a2, a1, a2
+; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umin_3:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a2, a1
+; RV64IMBOTH-NEXT:    li a3, 32
+; RV64IMBOTH-NEXT:    minu a2, a2, a3
+; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.umin(i32 %a, i32 32)
+  %res = select i1 %cond, i32 %a, i32 %c
+  ret i32 %res
+}
+
+define i32 @select_umin_4(i1 zeroext %cond, i32 %x) {
+; RV32IM-LABEL: select_umin_4:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    li a2, 128
+; RV32IM-NEXT:    bgeu a1, a2, .LBB3_3
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    beqz a0, .LBB3_4
+; RV32IM-NEXT:  .LBB3_2:
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB3_3:
+; RV32IM-NEXT:    li a1, 128
+; RV32IM-NEXT:    bnez a0, .LBB3_2
+; RV32IM-NEXT:  .LBB3_4:
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umin_4:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a1, 128
+; RV64IM-NEXT:    bgeu a2, a1, .LBB3_3
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    beqz a0, .LBB3_4
+; RV64IM-NEXT:  .LBB3_2:
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB3_3:
+; RV64IM-NEXT:    li a2, 128
+; RV64IM-NEXT:    bnez a0, .LBB3_2
+; RV64IM-NEXT:  .LBB3_4:
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umin_4:
+; RV32IMZBB:       # %bb.0:
+; RV32IMZBB-NEXT:    mv a2, a0
+; RV32IMZBB-NEXT:    li a0, 128
+; RV32IMZBB-NEXT:    bnez a2, .LBB3_2
+; RV32IMZBB-NEXT:  # %bb.1:
+; RV32IMZBB-NEXT:    minu a0, a1, a0
+; RV32IMZBB-NEXT:  .LBB3_2:
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umin_4:
+; RV64IMZBB:       # %bb.0:
+; RV64IMZBB-NEXT:    mv a2, a0
+; RV64IMZBB-NEXT:    li a0, 128
+; RV64IMZBB-NEXT:    bnez a2, .LBB3_2
+; RV64IMZBB-NEXT:  # %bb.1:
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    minu a0, a1, a0
+; RV64IMZBB-NEXT:  .LBB3_2:
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umin_4:
+; RV32IMZICOND:       # %bb.0:
+; RV32IMZICOND-NEXT:    sltiu a2, a1, 128
+; RV32IMZICOND-NEXT:    addi a1, a1, -128
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV32IMZICOND-NEXT:    czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT:    addi a0, a0, 128
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umin_4:
+; RV64IMZICOND:       # %bb.0:
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    sltiu a2, a1, 128
+; RV64IMZICOND-NEXT:    addi a1, a1, -128
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64IMZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT:    addi a0, a0, 128
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umin_4:
+; RV32IMBOTH:       # %bb.0:
+; RV32IMBOTH-NEXT:    li a2, 128
+; RV32IMBOTH-NEXT:    minu a1, a1, a2
+; RV32IMBOTH-NEXT:    addi a1, a1, -128
+; RV32IMBOTH-NEXT:    czero.nez a0, a1, a0
+; RV32IMBOTH-NEXT:    addi a0, a0, 128
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umin_4:
+; RV64IMBOTH:       # %bb.0:
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    li a2, 128
+; RV64IMBOTH-NEXT:    minu a1, a1, a2
+; RV64IMBOTH-NEXT:    addi a1, a1, -128
+; RV64IMBOTH-NEXT:    czero.nez a0, a1, a0
+; RV64IMBOTH-NEXT:    addi a0, a0, 128
+; RV64IMBOTH-NEXT:    ret
+  %add = call i32 @llvm.umin(i32 %x, i32 128)
+  %sel = select i1 %cond, i32 128, i32 %add
+  ret i32 %sel
+}
+
+define i32 @select_umax_1(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_umax_1:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    bgeu a2, a1, .LBB4_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB4_4
+; RV32IM-NEXT:  .LBB4_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB4_3: # %entry
+; RV32IM-NEXT:    mv a1, a2
+; RV32IM-NEXT:    bnez a0, .LBB4_2
+; RV32IM-NEXT:  .LBB4_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umax_1:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a1, a1
+; RV64IM-NEXT:    sext.w a3, a2
+; RV64IM-NEXT:    bgeu a3, a1, .LBB4_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB4_4
+; RV64IM-NEXT:  .LBB4_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB4_3: # %entry
+; RV64IM-NEXT:    mv a1, a3
+; RV64IM-NEXT:    bnez a0, .LBB4_2
+; RV64IM-NEXT:  .LBB4_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umax_1:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    beqz a0, .LBB4_2
+; RV32IMZBB-NEXT:  # %bb.1:
+; RV32IMZBB-NEXT:    maxu a2, a1, a2
+; RV32IMZBB-NEXT:  .LBB4_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a2
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umax_1:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    beqz a0, .LBB4_2
+; RV64IMZBB-NEXT:  # %bb.1:
+; RV64IMZBB-NEXT:    sext.w a2, a2
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    maxu a2, a1, a2
+; RV64IMZBB-NEXT:  .LBB4_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a2
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umax_1:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sltu a3, a2, a1
+; RV32IMZICOND-NEXT:    czero.nez a4, a2, a3
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a3
+; RV32IMZICOND-NEXT:    or a1, a1, a4
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umax_1:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    sext.w a3, a2
+; RV64IMZICOND-NEXT:    sltu a4, a3, a1
+; RV64IMZICOND-NEXT:    czero.nez a3, a3, a4
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a4
+; RV64IMZICOND-NEXT:    or a1, a1, a3
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umax_1:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    maxu a1, a1, a2
+; RV32IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV32IMBOTH-NEXT:    czero.eqz a0, a1, a0
+; RV32IMBOTH-NEXT:    or a0, a0, a2
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umax_1:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a3, a2
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    maxu a1, a1, a3
+; RV64IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV64IMBOTH-NEXT:    czero.eqz a0, a1, a0
+; RV64IMBOTH-NEXT:    or a0, a0, a2
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.umax(i32 %a, i32 %b)
+  %res = select i1 %cond, i32 %c, i32 %b
+  ret i32 %res
+}
+
+define i32 @select_umax_2(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_umax_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    li a3, 32
+; RV32IM-NEXT:    mv a2, a1
+; RV32IM-NEXT:    bgeu a3, a1, .LBB5_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB5_4
+; RV32IM-NEXT:  .LBB5_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB5_3: # %entry
+; RV32IM-NEXT:    li a2, 32
+; RV32IM-NEXT:    bnez a0, .LBB5_2
+; RV32IM-NEXT:  .LBB5_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umax_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a3, 32
+; RV64IM-NEXT:    bgeu a3, a2, .LBB5_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB5_4
+; RV64IM-NEXT:  .LBB5_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB5_3: # %entry
+; RV64IM-NEXT:    li a2, 32
+; RV64IM-NEXT:    bnez a0, .LBB5_2
+; RV64IM-NEXT:  .LBB5_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umax_2:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    bnez a0, .LBB5_2
+; RV32IMZBB-NEXT:  # %bb.1: # %entry
+; RV32IMZBB-NEXT:    li a0, 32
+; RV32IMZBB-NEXT:    maxu a1, a1, a0
+; RV32IMZBB-NEXT:  .LBB5_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umax_2:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    bnez a0, .LBB5_2
+; RV64IMZBB-NEXT:  # %bb.1: # %entry
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    li a0, 32
+; RV64IMZBB-NEXT:    maxu a1, a1, a0
+; RV64IMZBB-NEXT:  .LBB5_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umax_2:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sltiu a2, a1, 33
+; RV32IMZICOND-NEXT:    addi a3, a1, -32
+; RV32IMZICOND-NEXT:    czero.nez a2, a3, a2
+; RV32IMZICOND-NEXT:    addi a2, a2, 32
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umax_2:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a2, a1
+; RV64IMZICOND-NEXT:    sltiu a3, a2, 33
+; RV64IMZICOND-NEXT:    addi a2, a2, -32
+; RV64IMZICOND-NEXT:    czero.nez a2, a2, a3
+; RV64IMZICOND-NEXT:    addi a2, a2, 32
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umax_2:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    li a2, 32
+; RV32IMBOTH-NEXT:    maxu a2, a1, a2
+; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umax_2:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a2, a1
+; RV64IMBOTH-NEXT:    li a3, 32
+; RV64IMBOTH-NEXT:    maxu a2, a2, a3
+; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.umax(i32 %a, i32 32)
+  %res = select i1 %cond, i32 %a, i32 %c
+  ret i32 %res
+}
+
+define i32 @select_umax_3(i1 zeroext %cond, i32 %a) {
+; RV32IM-LABEL: select_umax_3:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    li a3, 32
+; RV32IM-NEXT:    mv a2, a1
+; RV32IM-NEXT:    bgeu a3, a1, .LBB6_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB6_4
+; RV32IM-NEXT:  .LBB6_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB6_3: # %entry
+; RV32IM-NEXT:    li a2, 32
+; RV32IM-NEXT:    bnez a0, .LBB6_2
+; RV32IM-NEXT:  .LBB6_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umax_3:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a3, 32
+; RV64IM-NEXT:    bgeu a3, a2, .LBB6_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB6_4
+; RV64IM-NEXT:  .LBB6_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB6_3: # %entry
+; RV64IM-NEXT:    li a2, 32
+; RV64IM-NEXT:    bnez a0, .LBB6_2
+; RV64IM-NEXT:  .LBB6_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umax_3:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    bnez a0, .LBB6_2
+; RV32IMZBB-NEXT:  # %bb.1: # %entry
+; RV32IMZBB-NEXT:    li a0, 32
+; RV32IMZBB-NEXT:    maxu a1, a1, a0
+; RV32IMZBB-NEXT:  .LBB6_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umax_3:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    bnez a0, .LBB6_2
+; RV64IMZBB-NEXT:  # %bb.1: # %entry
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    li a0, 32
+; RV64IMZBB-NEXT:    maxu a1, a1, a0
+; RV64IMZBB-NEXT:  .LBB6_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umax_3:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sltiu a2, a1, 33
+; RV32IMZICOND-NEXT:    addi a3, a1, -32
+; RV32IMZICOND-NEXT:    czero.nez a2, a3, a2
+; RV32IMZICOND-NEXT:    addi a2, a2, 32
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umax_3:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a2, a1
+; RV64IMZICOND-NEXT:    sltiu a3, a2, 33
+; RV64IMZICOND-NEXT:    addi a2, a2, -32
+; RV64IMZICOND-NEXT:    czero.nez a2, a2, a3
+; RV64IMZICOND-NEXT:    addi a2, a2, 32
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umax_3:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    li a2, 32
+; RV32IMBOTH-NEXT:    maxu a2, a1, a2
+; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umax_3:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a2, a1
+; RV64IMBOTH-NEXT:    li a3, 32
+; RV64IMBOTH-NEXT:    maxu a2, a2, a3
+; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.umax(i32 %a, i32 32)
+  %res = select i1 %cond, i32 %a, i32 %c
+  ret i32 %res
+}
+
+define i32 @select_umax_4(i1 zeroext %cond, i32 %x) {
+; RV32IM-LABEL: select_umax_4:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    li a2, 128
+; RV32IM-NEXT:    bgeu a2, a1, .LBB7_3
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    beqz a0, .LBB7_4
+; RV32IM-NEXT:  .LBB7_2:
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB7_3:
+; RV32IM-NEXT:    li a1, 128
+; RV32IM-NEXT:    bnez a0, .LBB7_2
+; RV32IM-NEXT:  .LBB7_4:
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umax_4:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a1, 128
+; RV64IM-NEXT:    bgeu a1, a2, .LBB7_3
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    beqz a0, .LBB7_4
+; RV64IM-NEXT:  .LBB7_2:
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB7_3:
+; RV64IM-NEXT:    li a2, 128
+; RV64IM-NEXT:    bnez a0, .LBB7_2
+; RV64IM-NEXT:  .LBB7_4:
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umax_4:
+; RV32IMZBB:       # %bb.0:
+; RV32IMZBB-NEXT:    mv a2, a0
+; RV32IMZBB-NEXT:    li a0, 128
+; RV32IMZBB-NEXT:    bnez a2, .LBB7_2
+; RV32IMZBB-NEXT:  # %bb.1:
+; RV32IMZBB-NEXT:    maxu a0, a1, a0
+; RV32IMZBB-NEXT:  .LBB7_2:
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umax_4:
+; RV64IMZBB:       # %bb.0:
+; RV64IMZBB-NEXT:    mv a2, a0
+; RV64IMZBB-NEXT:    li a0, 128
+; RV64IMZBB-NEXT:    bnez a2, .LBB7_2
+; RV64IMZBB-NEXT:  # %bb.1:
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    maxu a0, a1, a0
+; RV64IMZBB-NEXT:  .LBB7_2:
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umax_4:
+; RV32IMZICOND:       # %bb.0:
+; RV32IMZICOND-NEXT:    sltiu a2, a1, 129
+; RV32IMZICOND-NEXT:    addi a1, a1, -128
+; RV32IMZICOND-NEXT:    czero.nez a1, a1, a2
+; RV32IMZICOND-NEXT:    czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT:    addi a0, a0, 128
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umax_4:
+; RV64IMZICOND:       # %bb.0:
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    sltiu a2, a1, 129
+; RV64IMZICOND-NEXT:    addi a1, a1, -128
+; RV64IMZICOND-NEXT:    czero.nez a1, a1, a2
+; RV64IMZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT:    addi a0, a0, 128
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umax_4:
+; RV32IMBOTH:       # %bb.0:
+; RV32IMBOTH-NEXT:    li a2, 128
+; RV32IMBOTH-NEXT:    maxu a1, a1, a2
+; RV32IMBOTH-NEXT:    addi a1, a1, -128
+; RV32IMBOTH-NEXT:    czero.nez a0, a1, a0
+; RV32IMBOTH-NEXT:    addi a0, a0, 128
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umax_4:
+; RV64IMBOTH:       # %bb.0:
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    li a2, 128
+; RV64IMBOTH-NEXT:    maxu a1, a1, a2
+; RV64IMBOTH-NEXT:    addi a1, a1, -128
+; RV64IMBOTH-NEXT:    czero.nez a0, a1, a0
+; RV64IMBOTH-NEXT:    addi a0, a0, 128
+; RV64IMBOTH-NEXT:    ret
+  %add = call i32 @llvm.umax(i32 %x, i32 128)
+  %sel = select i1 %cond, i32 128, i32 %add
+  ret i32 %sel
+}
+
+define i32 @select_smin_1(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_smin_1:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    bge a1, a2, .LBB8_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB8_4
+; RV32IM-NEXT:  .LBB8_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB8_3: # %entry
+; RV32IM-NEXT:    mv a1, a2
+; RV32IM-NEXT:    bnez a0, .LBB8_2
+; RV32IM-NEXT:  .LBB8_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_smin_1:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a3, a2
+; RV64IM-NEXT:    sext.w a1, a1
+; RV64IM-NEXT:    bge a1, a3, .LBB8_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB8_4
+; RV64IM-NEXT:  .LBB8_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB8_3: # %entry
+; RV64IM-NEXT:    mv a1, a3
+; RV64IM-NEXT:    bnez a0, .LBB8_2
+; RV64IM-NEXT:  .LBB8_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_smin_1:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    beqz a0, .LBB8_2
+; RV32IMZBB-NEXT:  # %bb.1:
+; RV32IMZBB-NEXT:    min a2, a1, a2
+; RV32IMZBB-NEXT:  .LBB8_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a2
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_smin_1:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    beqz a0, .LBB8_2
+; RV64IMZBB-NEXT:  # %bb.1:
+; RV64IMZBB-NEXT:    sext.w a2, a2
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    min a2, a1, a2
+; RV64IMZBB-NEXT:  .LBB8_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a2
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_smin_1:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    slt a3, a1, a2
+; RV32IMZICOND-NEXT:    czero.nez a4, a2, a3
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a3
+; RV32IMZICOND-NEXT:    or a1, a1, a4
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_smin_1:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a3, a2
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    slt a4, a1, a3
+; RV64IMZICOND-NEXT:    czero.nez a3, a3, a4
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a4
+; RV64IMZICOND-NEXT:    or a1, a1, a3
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_smin_1:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    min a1, a1, a2
+; RV32IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV32IMBOTH-NEXT:    czero.eqz a0, a1, a0
+; RV32IMBOTH-NEXT:    or a0, a0, a2
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_smin_1:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a3, a2
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    min a1, a1, a3
+; RV64IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV64IMBOTH-NEXT:    czero.eqz a0, a1, a0
+; RV64IMBOTH-NEXT:    or a0, a0, a2
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.smin(i32 %a, i32 %b)
+  %res = select i1 %cond, i32 %c, i32 %b
+  ret i32 %res
+}
+
+define i32 @select_smin_2(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_smin_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    li a3, 32
+; RV32IM-NEXT:    mv a2, a1
+; RV32IM-NEXT:    bge a1, a3, .LBB9_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB9_4
+; RV32IM-NEXT:  .LBB9_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB9_3: # %entry
+; RV32IM-NEXT:    li a2, 32
+; RV32IM-NEXT:    bnez a0, .LBB9_2
+; RV32IM-NEXT:  .LBB9_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_smin_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a3, 32
+; RV64IM-NEXT:    bge a2, a3, .LBB9_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB9_4
+; RV64IM-NEXT:  .LBB9_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB9_3: # %entry
+; RV64IM-NEXT:    li a2, 32
+; RV64IM-NEXT:    bnez a0, .LBB9_2
+; RV64IM-NEXT:  .LBB9_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_smin_2:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    bnez a0, .LBB9_2
+; RV32IMZBB-NEXT:  # %bb.1: # %entry
+; RV32IMZBB-NEXT:    li a0, 32
+; RV32IMZBB-NEXT:    min a1, a1, a0
+; RV32IMZBB-NEXT:  .LBB9_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_smin_2:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    bnez a0, .LBB9_2
+; RV64IMZBB-NEXT:  # %bb.1: # %entry
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    li a0, 32
+; RV64IMZBB-NEXT:    min a1, a1, a0
+; RV64IMZBB-NEXT:  .LBB9_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_smin_2:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    slti a2, a1, 32
+; RV32IMZICOND-NEXT:    addi a3, a1, -32
+; RV32IMZICOND-NEXT:    czero.eqz a2, a3, a2
+; RV32IMZICOND-NEXT:    addi a2, a2, 32
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_smin_2:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a2, a1
+; RV64IMZICOND-NEXT:    slti a3, a2, 32
+; RV64IMZICOND-NEXT:    addi a2, a2, -32
+; RV64IMZICOND-NEXT:    czero.eqz a2, a2, a3
+; RV64IMZICOND-NEXT:    addi a2, a2, 32
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_smin_2:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    li a2, 32
+; RV32IMBOTH-NEXT:    min a2, a1, a2
+; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_smin_2:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a2, a1
+; RV64IMBOTH-NEXT:    li a3, 32
+; RV64IMBOTH-NEXT:    min a2, a2, a3
+; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.smin(i32 %a, i32 32)
+  %res = select i1 %cond, i32 %a, i32 %c
+  ret i32 %res
+}
+
+define i32 @select_smin_3(i1 zeroext %cond, i32 %a) {
+; RV32IM-LABEL: select_smin_3:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    li a3, 32
+; RV32IM-NEXT:    mv a2, a1
+; RV32IM-NEXT:    bge a1, a3, .LBB10_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB10_4
+; RV32IM-NEXT:  .LBB10_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB10_3: # %entry
+; RV32IM-NEXT:    li a2, 32
+; RV32IM-NEXT:    bnez a0, .LBB10_2
+; RV32IM-NEXT:  .LBB10_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_smin_3:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a3, 32
+; RV64IM-NEXT:    bge a2, a3, .LBB10_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB10_4
+; RV64IM-NEXT:  .LBB10_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB10_3: # %entry
+; RV64IM-NEXT:    li a2, 32
+; RV64IM-NEXT:    bnez a0, .LBB10_2
+; RV64IM-NEXT:  .LBB10_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_smin_3:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    bnez a0, .LBB10_2
+; RV32IMZBB-NEXT:  # %bb.1: # %entry
+; RV32IMZBB-NEXT:    li a0, 32
+; RV32IMZBB-NEXT:    min a1, a1, a0
+; RV32IMZBB-NEXT:  .LBB10_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_smin_3:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    bnez a0, .LBB10_2
+; RV64IMZBB-NEXT:  # %bb.1: # %entry
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    li a0, 32
+; RV64IMZBB-NEXT:    min a1, a1, a0
+; RV64IMZBB-NEXT:  .LBB10_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_smin_3:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    slti a2, a1, 32
+; RV32IMZICOND-NEXT:    addi a3, a1, -32
+; RV32IMZICOND-NEXT:    czero.eqz a2, a3, a2
+; RV32IMZICOND-NEXT:    addi a2, a2, 32
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_smin_3:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a2, a1
+; RV64IMZICOND-NEXT:    slti a3, a2, 32
+; RV64IMZICOND-NEXT:    addi a2, a2, -32
+; RV64IMZICOND-NEXT:    czero.eqz a2, a2, a3
+; RV64IMZICOND-NEXT:    addi a2, a2, 32
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_smin_3:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    li a2, 32
+; RV32IMBOTH-NEXT:    min a2, a1, a2
+; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_smin_3:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a2, a1
+; RV64IMBOTH-NEXT:    li a3, 32
+; RV64IMBOTH-NEXT:    min a2, a2, a3
+; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.smin(i32 %a, i32 32)
+  %res = select i1 %cond, i32 %a, i32 %c
+  ret i32 %res
+}
+
+define i32 @select_smin_4(i1 zeroext %cond, i32 %x) {
+; RV32IM-LABEL: select_smin_4:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    li a2, 128
+; RV32IM-NEXT:    bge a1, a2, .LBB11_3
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    beqz a0, .LBB11_4
+; RV32IM-NEXT:  .LBB11_2:
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB11_3:
+; RV32IM-NEXT:    li a1, 128
+; RV32IM-NEXT:    bnez a0, .LBB11_2
+; RV32IM-NEXT:  .LBB11_4:
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_smin_4:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a1, 128
+; RV64IM-NEXT:    bge a2, a1, .LBB11_3
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    beqz a0, .LBB11_4
+; RV64IM-NEXT:  .LBB11_2:
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB11_3:
+; RV64IM-NEXT:    li a2, 128
+; RV64IM-NEXT:    bnez a0, .LBB11_2
+; RV64IM-NEXT:  .LBB11_4:
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_smin_4:
+; RV32IMZBB:       # %bb.0:
+; RV32IMZBB-NEXT:    mv a2, a0
+; RV32IMZBB-NEXT:    li a0, 128
+; RV32IMZBB-NEXT:    bnez a2, .LBB11_2
+; RV32IMZBB-NEXT:  # %bb.1:
+; RV32IMZBB-NEXT:    min a0, a1, a0
+; RV32IMZBB-NEXT:  .LBB11_2:
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_smin_4:
+; RV64IMZBB:       # %bb.0:
+; RV64IMZBB-NEXT:    mv a2, a0
+; RV64IMZBB-NEXT:    li a0, 128
+; RV64IMZBB-NEXT:    bnez a2, .LBB11_2
+; RV64IMZBB-NEXT:  # %bb.1:
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    min a0, a1, a0
+; RV64IMZBB-NEXT:  .LBB11_2:
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_smin_4:
+; RV32IMZICOND:       # %bb.0:
+; RV32IMZICOND-NEXT:    slti a2, a1, 128
+; RV32IMZICOND-NEXT:    addi a1, a1, -128
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV32IMZICOND-NEXT:    czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT:    addi a0, a0, 128
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_smin_4:
+; RV64IMZICOND:       # %bb.0:
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    slti a2, a1, 128
+; RV64IMZICOND-NEXT:    addi a1, a1, -128
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64IMZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT:    addi a0, a0, 128
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_smin_4:
+; RV32IMBOTH:       # %bb.0:
+; RV32IMBOTH-NEXT:    li a2, 128
+; RV32IMBOTH-NEXT:    min a1, a1, a2
+; RV32IMBOTH-NEXT:    addi a1, a1, -128
+; RV32IMBOTH-NEXT:    czero.nez a0, a1, a0
+; RV32IMBOTH-NEXT:    addi a0, a0, 128
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_smin_4:
+; RV64IMBOTH:       # %bb.0:
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    li a2, 128
+; RV64IMBOTH-NEXT:    min a1, a1, a2
+; RV64IMBOTH-NEXT:    addi a1, a1, -128
+; RV64IMBOTH-NEXT:    czero.nez a0, a1, a0
+; RV64IMBOTH-NEXT:    addi a0, a0, 128
+; RV64IMBOTH-NEXT:    ret
+  %add = call i32 @llvm.smin(i32 %x, i32 128)
+  %sel = select i1 %cond, i32 128, i32 %add
+  ret i32 %sel
+}
+
+define i32 @select_smax_1(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_smax_1:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    bge a2, a1, .LBB12_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB12_4
+; RV32IM-NEXT:  .LBB12_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB12_3: # %entry
+; RV32IM-NEXT:    mv a1, a2
+; RV32IM-NEXT:    bnez a0, .LBB12_2
+; RV32IM-NEXT:  .LBB12_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_smax_1:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a1, a1
+; RV64IM-NEXT:    sext.w a3, a2
+; RV64IM-NEXT:    bge a3, a1, .LBB12_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB12_4
+; RV64IM-NEXT:  .LBB12_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB12_3: # %entry
+; RV64IM-NEXT:    mv a1, a3
+; RV64IM-NEXT:    bnez a0, .LBB12_2
+; RV64IM-NEXT:  .LBB12_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_smax_1:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    beqz a0, .LBB12_2
+; RV32IMZBB-NEXT:  # %bb.1:
+; RV32IMZBB-NEXT:    max a2, a1, a2
+; RV32IMZBB-NEXT:  .LBB12_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a2
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_smax_1:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    beqz a0, .LBB12_2
+; RV64IMZBB-NEXT:  # %bb.1:
+; RV64IMZBB-NEXT:    sext.w a2, a2
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    max a2, a1, a2
+; RV64IMZBB-NEXT:  .LBB12_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a2
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_smax_1:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    slt a3, a2, a1
+; RV32IMZICOND-NEXT:    czero.nez a4, a2, a3
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a3
+; RV32IMZICOND-NEXT:    or a1, a1, a4
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_smax_1:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    sext.w a3, a2
+; RV64IMZICOND-NEXT:    slt a4, a3, a1
+; RV64IMZICOND-NEXT:    czero.nez a3, a3, a4
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a4
+; RV64IMZICOND-NEXT:    or a1, a1, a3
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_smax_1:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    max a1, a1, a2
+; RV32IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV32IMBOTH-NEXT:    czero.eqz a0, a1, a0
+; RV32IMBOTH-NEXT:    or a0, a0, a2
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_smax_1:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a3, a2
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    max a1, a1, a3
+; RV64IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV64IMBOTH-NEXT:    czero.eqz a0, a1, a0
+; RV64IMBOTH-NEXT:    or a0, a0, a2
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.smax(i32 %a, i32 %b)
+  %res = select i1 %cond, i32 %c, i32 %b
+  ret i32 %res
+}
+
+define i32 @select_smax_2(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_smax_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    li a3, 32
+; RV32IM-NEXT:    mv a2, a1
+; RV32IM-NEXT:    bge a3, a1, .LBB13_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB13_4
+; RV32IM-NEXT:  .LBB13_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB13_3: # %entry
+; RV32IM-NEXT:    li a2, 32
+; RV32IM-NEXT:    bnez a0, .LBB13_2
+; RV32IM-NEXT:  .LBB13_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_smax_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a3, 32
+; RV64IM-NEXT:    bge a3, a2, .LBB13_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB13_4
+; RV64IM-NEXT:  .LBB13_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB13_3: # %entry
+; RV64IM-NEXT:    li a2, 32
+; RV64IM-NEXT:    bnez a0, .LBB13_2
+; RV64IM-NEXT:  .LBB13_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_smax_2:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    bnez a0, .LBB13_2
+; RV32IMZBB-NEXT:  # %bb.1: # %entry
+; RV32IMZBB-NEXT:    li a0, 32
+; RV32IMZBB-NEXT:    max a1, a1, a0
+; RV32IMZBB-NEXT:  .LBB13_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_smax_2:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    bnez a0, .LBB13_2
+; RV64IMZBB-NEXT:  # %bb.1: # %entry
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    li a0, 32
+; RV64IMZBB-NEXT:    max a1, a1, a0
+; RV64IMZBB-NEXT:  .LBB13_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_smax_2:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    slti a2, a1, 33
+; RV32IMZICOND-NEXT:    addi a3, a1, -32
+; RV32IMZICOND-NEXT:    czero.nez a2, a3, a2
+; RV32IMZICOND-NEXT:    addi a2, a2, 32
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_smax_2:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a2, a1
+; RV64IMZICOND-NEXT:    slti a3, a2, 33
+; RV64IMZICOND-NEXT:    addi a2, a2, -32
+; RV64IMZICOND-NEXT:    czero.nez a2, a2, a3
+; RV64IMZICOND-NEXT:    addi a2, a2, 32
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_smax_2:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    li a2, 32
+; RV32IMBOTH-NEXT:    max a2, a1, a2
+; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_smax_2:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a2, a1
+; RV64IMBOTH-NEXT:    li a3, 32
+; RV64IMBOTH-NEXT:    max a2, a2, a3
+; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.smax(i32 %a, i32 32)
+  %res = select i1 %cond, i32 %a, i32 %c
+  ret i32 %res
+}
+
+define i32 @select_smax_3(i1 zeroext %cond, i32 %a) {
+; RV32IM-LABEL: select_smax_3:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    li a3, 32
+; RV32IM-NEXT:    mv a2, a1
+; RV32IM-NEXT:    bge a3, a1, .LBB14_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB14_4
+; RV32IM-NEXT:  .LBB14_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB14_3: # %entry
+; RV32IM-NEXT:    li a2, 32
+; RV32IM-NEXT:    bnez a0, .LBB14_2
+; RV32IM-NEXT:  .LBB14_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_smax_3:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a3, 32
+; RV64IM-NEXT:    bge a3, a2, .LBB14_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB14_4
+; RV64IM-NEXT:  .LBB14_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB14_3: # %entry
+; RV64IM-NEXT:    li a2, 32
+; RV64IM-NEXT:    bnez a0, .LBB14_2
+; RV64IM-NEXT:  .LBB14_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_smax_3:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    bnez a0, .LBB14_2
+; RV32IMZBB-NEXT:  # %bb.1: # %entry
+; RV32IMZBB-NEXT:    li a0, 32
+; RV32IMZBB-NEXT:    max a1, a1, a0
+; RV32IMZBB-NEXT:  .LBB14_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_smax_3:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    bnez a0, .LBB14_2
+; RV64IMZBB-NEXT:  # %bb.1: # %entry
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    li a0, 32
+; RV64IMZBB-NEXT:    max a1, a1, a0
+; RV64IMZBB-NEXT:  .LBB14_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_smax_3:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    slti a2, a1, 33
+; RV32IMZICOND-NEXT:    addi a3, a1, -32
+; RV32IMZICOND-NEXT:    czero.nez a2, a3, a2
+; RV32IMZICOND-NEXT:    addi a2, a2, 32
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_smax_3:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a2, a1
+; RV64IMZICOND-NEXT:    slti a3, a2, 33
+; RV64IMZICOND-NEXT:    addi a2, a2, -32
+; RV64IMZICOND-NEXT:    czero.nez a2, a2, a3
+; RV64IMZICOND-NEXT:    addi a2, a2, 32
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_smax_3:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    li a2, 32
+; RV32IMBOTH-NEXT:    max a2, a1, a2
+; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_smax_3:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a2, a1
+; RV64IMBOTH-NEXT:    li a3, 32
+; RV64IMBOTH-NEXT:    max a2, a2, a3
+; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.smax(i32 %a, i32 32)
+  %res = select i1 %cond, i32 %a, i32 %c
+  ret i32 %res
+}
+
+define i32 @select_smax_4(i1 zeroext %cond, i32 %x) {
+; RV32IM-LABEL: select_smax_4:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    li a2, 128
+; RV32IM-NEXT:    bge a2, a1, .LBB15_3
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    beqz a0, .LBB15_4
+; RV32IM-NEXT:  .LBB15_2:
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB15_3:
+; RV32IM-NEXT:    li a1, 128
+; RV32IM-NEXT:    bnez a0, .LBB15_2
+; RV32IM-NEXT:  .LBB15_4:
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_smax_4:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a1, 128
+; RV64IM-NEXT:    bge a1, a2, .LBB15_3
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    beqz a0, .LBB15_4
+; RV64IM-NEXT:  .LBB15_2:
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB15_3:
+; RV64IM-NEXT:    li a2, 128
+; RV64IM-NEXT:    bnez a0, .LBB15_2
+; RV64IM-NEXT:  .LBB15_4:
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_smax_4:
+; RV32IMZBB:       # %bb.0:
+; RV32IMZBB-NEXT:    mv a2, a0
+; RV32IMZBB-NEXT:    li a0, 128
+; RV32IMZBB-NEXT:    bnez a2, .LBB15_2
+; RV32IMZBB-NEXT:  # %bb.1:
+; RV32IMZBB-NEXT:    max a0, a1, a0
+; RV32IMZBB-NEXT:  .LBB15_2:
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_smax_4:
+; RV64IMZBB:       # %bb.0:
+; RV64IMZBB-NEXT:    mv a2, a0
+; RV64IMZBB-NEXT:    li a0, 128
+; RV64IMZBB-NEXT:    bnez a2, .LBB15_2
+; RV64IMZBB-NEXT:  # %bb.1:
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    max a0, a1, a0
+; RV64IMZBB-NEXT:  .LBB15_2:
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_smax_4:
+; RV32IMZICOND:       # %bb.0:
+; RV32IMZICOND-NEXT:    slti a2, a1, 129
+; RV32IMZICOND-NEXT:    addi a1, a1, -128
+; RV32IMZICOND-NEXT:    czero.nez a1, a1, a2
+; RV32IMZICOND-NEXT:    czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT:    addi a0, a0, 128
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_smax_4:
+; RV64IMZICOND:       # %bb.0:
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    slti a2, a1, 129
+; RV64IMZICOND-NEXT:    addi a1, a1, -128
+; RV64IMZICOND-NEXT:    czero.nez a1, a1, a2
+; RV64IMZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT:    addi a0, a0, 128
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_smax_4:
+; RV32IMBOTH:       # %bb.0:
+; RV32IMBOTH-NEXT:    li a2, 128
+; RV32IMBOTH-NEXT:    max a1, a1, a2
+; RV32IMBOTH-NEXT:    addi a1, a1, -128
+; RV32IMBOTH-NEXT:    czero.nez a0, a1, a0
+; RV32IMBOTH-NEXT:    addi a0, a0, 128
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_smax_4:
+; RV64IMBOTH:       # %bb.0:
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    li a2, 128
+; RV64IMBOTH-NEXT:    max a1, a1, a2
+; RV64IMBOTH-NEXT:    addi a1, a1, -128
+; RV64IMBOTH-NEXT:    czero.nez a0, a1, a0
+; RV64IMBOTH-NEXT:    addi a0, a0, 128
+; RV64IMBOTH-NEXT:    ret
+  %add = call i32 @llvm.smax(i32 %x, i32 128)
+  %sel = select i1 %cond, i32 128, i32 %add
+  ret i32 %sel
+}

>From 79573fa3b2b7c2a340e2fcc8645118af8ea14b54 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Mon, 8 Sep 2025 13:29:12 -0700
Subject: [PATCH 2/2] [RISCV] Support umin/umax in tryFoldSelectIntoOp

The neutral values for these are -1U, and 0 respectively.  We already
have good arithmetic lowerings for selects with one arm equal to these
values.  smin/smax are a bit harder, and will be a separate change.

Somewhat surprisingly, this looks to be a net code improvement in all
of the configurations.  With both zbb, it's a clear win.  With only
zicond, we still seem to come out ahead because we reduce the
number of ziconds needed (since we lower min/max to them).  Without
either zbb or zicond, we're a bit more of wash, but the available
arithmetic sequences are good enough that doing the select
unconditionally before using branches for the min/max is probably
still worthwhile?
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp |   2 +
 llvm/test/CodeGen/RISCV/select-zbb.ll       | 720 ++++++++------------
 2 files changed, 282 insertions(+), 440 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 5f01633126c7b..1fed0721c994d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18835,6 +18835,8 @@ static SDValue tryFoldSelectIntoOp(SDNode *N, SelectionDAG &DAG,
   case ISD::ADD:
   case ISD::OR:
   case ISD::XOR:
+  case ISD::UMIN:
+  case ISD::UMAX:
     break;
   }
 
diff --git a/llvm/test/CodeGen/RISCV/select-zbb.ll b/llvm/test/CodeGen/RISCV/select-zbb.ll
index 13e637909b43b..6bf4009eceea1 100644
--- a/llvm/test/CodeGen/RISCV/select-zbb.ll
+++ b/llvm/test/CodeGen/RISCV/select-zbb.ll
@@ -12,96 +12,80 @@
 define i32 @select_umin_1(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV32IM-LABEL: select_umin_1:
 ; RV32IM:       # %bb.0: # %entry
-; RV32IM-NEXT:    bgeu a1, a2, .LBB0_3
+; RV32IM-NEXT:    addi a0, a0, -1
+; RV32IM-NEXT:    or a1, a0, a1
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    bltu a2, a1, .LBB0_2
 ; RV32IM-NEXT:  # %bb.1: # %entry
-; RV32IM-NEXT:    beqz a0, .LBB0_4
-; RV32IM-NEXT:  .LBB0_2: # %entry
 ; RV32IM-NEXT:    mv a0, a1
-; RV32IM-NEXT:    ret
-; RV32IM-NEXT:  .LBB0_3: # %entry
-; RV32IM-NEXT:    mv a1, a2
-; RV32IM-NEXT:    bnez a0, .LBB0_2
-; RV32IM-NEXT:  .LBB0_4: # %entry
-; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:  .LBB0_2: # %entry
 ; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_umin_1:
 ; RV64IM:       # %bb.0: # %entry
-; RV64IM-NEXT:    sext.w a3, a2
+; RV64IM-NEXT:    mv a3, a0
+; RV64IM-NEXT:    sext.w a0, a2
+; RV64IM-NEXT:    addi a3, a3, -1
+; RV64IM-NEXT:    or a1, a3, a1
 ; RV64IM-NEXT:    sext.w a1, a1
-; RV64IM-NEXT:    bgeu a1, a3, .LBB0_3
+; RV64IM-NEXT:    bltu a0, a1, .LBB0_2
 ; RV64IM-NEXT:  # %bb.1: # %entry
-; RV64IM-NEXT:    beqz a0, .LBB0_4
-; RV64IM-NEXT:  .LBB0_2: # %entry
 ; RV64IM-NEXT:    mv a0, a1
-; RV64IM-NEXT:    ret
-; RV64IM-NEXT:  .LBB0_3: # %entry
-; RV64IM-NEXT:    mv a1, a3
-; RV64IM-NEXT:    bnez a0, .LBB0_2
-; RV64IM-NEXT:  .LBB0_4: # %entry
-; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:  .LBB0_2: # %entry
 ; RV64IM-NEXT:    ret
 ;
 ; RV32IMZBB-LABEL: select_umin_1:
 ; RV32IMZBB:       # %bb.0: # %entry
-; RV32IMZBB-NEXT:    beqz a0, .LBB0_2
-; RV32IMZBB-NEXT:  # %bb.1:
-; RV32IMZBB-NEXT:    minu a2, a1, a2
-; RV32IMZBB-NEXT:  .LBB0_2: # %entry
-; RV32IMZBB-NEXT:    mv a0, a2
+; RV32IMZBB-NEXT:    addi a0, a0, -1
+; RV32IMZBB-NEXT:    or a0, a0, a1
+; RV32IMZBB-NEXT:    minu a0, a2, a0
 ; RV32IMZBB-NEXT:    ret
 ;
 ; RV64IMZBB-LABEL: select_umin_1:
 ; RV64IMZBB:       # %bb.0: # %entry
-; RV64IMZBB-NEXT:    beqz a0, .LBB0_2
-; RV64IMZBB-NEXT:  # %bb.1:
 ; RV64IMZBB-NEXT:    sext.w a2, a2
-; RV64IMZBB-NEXT:    sext.w a1, a1
-; RV64IMZBB-NEXT:    minu a2, a1, a2
-; RV64IMZBB-NEXT:  .LBB0_2: # %entry
-; RV64IMZBB-NEXT:    mv a0, a2
+; RV64IMZBB-NEXT:    addi a0, a0, -1
+; RV64IMZBB-NEXT:    or a0, a0, a1
+; RV64IMZBB-NEXT:    sext.w a0, a0
+; RV64IMZBB-NEXT:    minu a0, a2, a0
 ; RV64IMZBB-NEXT:    ret
 ;
 ; RV32IMZICOND-LABEL: select_umin_1:
 ; RV32IMZICOND:       # %bb.0: # %entry
-; RV32IMZICOND-NEXT:    sltu a3, a1, a2
-; RV32IMZICOND-NEXT:    czero.nez a4, a2, a3
-; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a3
-; RV32IMZICOND-NEXT:    or a1, a1, a4
-; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
-; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    addi a0, a0, -1
+; RV32IMZICOND-NEXT:    or a0, a0, a1
+; RV32IMZICOND-NEXT:    sltu a1, a2, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a0, a1
+; RV32IMZICOND-NEXT:    czero.eqz a1, a2, a1
 ; RV32IMZICOND-NEXT:    or a0, a1, a0
 ; RV32IMZICOND-NEXT:    ret
 ;
 ; RV64IMZICOND-LABEL: select_umin_1:
 ; RV64IMZICOND:       # %bb.0: # %entry
-; RV64IMZICOND-NEXT:    sext.w a3, a2
-; RV64IMZICOND-NEXT:    sext.w a1, a1
-; RV64IMZICOND-NEXT:    sltu a4, a1, a3
-; RV64IMZICOND-NEXT:    czero.nez a3, a3, a4
-; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a4
-; RV64IMZICOND-NEXT:    or a1, a1, a3
-; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
-; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    sext.w a2, a2
+; RV64IMZICOND-NEXT:    addi a0, a0, -1
+; RV64IMZICOND-NEXT:    or a0, a0, a1
+; RV64IMZICOND-NEXT:    sext.w a0, a0
+; RV64IMZICOND-NEXT:    sltu a1, a2, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a0, a1
+; RV64IMZICOND-NEXT:    czero.eqz a1, a2, a1
 ; RV64IMZICOND-NEXT:    or a0, a1, a0
 ; RV64IMZICOND-NEXT:    ret
 ;
 ; RV32IMBOTH-LABEL: select_umin_1:
 ; RV32IMBOTH:       # %bb.0: # %entry
-; RV32IMBOTH-NEXT:    minu a1, a1, a2
-; RV32IMBOTH-NEXT:    czero.nez a2, a2, a0
-; RV32IMBOTH-NEXT:    czero.eqz a0, a1, a0
-; RV32IMBOTH-NEXT:    or a0, a0, a2
+; RV32IMBOTH-NEXT:    addi a0, a0, -1
+; RV32IMBOTH-NEXT:    or a0, a0, a1
+; RV32IMBOTH-NEXT:    minu a0, a2, a0
 ; RV32IMBOTH-NEXT:    ret
 ;
 ; RV64IMBOTH-LABEL: select_umin_1:
 ; RV64IMBOTH:       # %bb.0: # %entry
-; RV64IMBOTH-NEXT:    sext.w a3, a2
-; RV64IMBOTH-NEXT:    sext.w a1, a1
-; RV64IMBOTH-NEXT:    minu a1, a1, a3
-; RV64IMBOTH-NEXT:    czero.nez a2, a2, a0
-; RV64IMBOTH-NEXT:    czero.eqz a0, a1, a0
-; RV64IMBOTH-NEXT:    or a0, a0, a2
+; RV64IMBOTH-NEXT:    sext.w a2, a2
+; RV64IMBOTH-NEXT:    addi a0, a0, -1
+; RV64IMBOTH-NEXT:    or a0, a0, a1
+; RV64IMBOTH-NEXT:    sext.w a0, a0
+; RV64IMBOTH-NEXT:    minu a0, a2, a0
 ; RV64IMBOTH-NEXT:    ret
 entry:
   %c = call i32 @llvm.umin(i32 %a, i32 %b)
@@ -112,99 +96,76 @@ entry:
 define i32 @select_umin_2(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV32IM-LABEL: select_umin_2:
 ; RV32IM:       # %bb.0: # %entry
-; RV32IM-NEXT:    li a3, 32
-; RV32IM-NEXT:    mv a2, a1
-; RV32IM-NEXT:    bgeu a1, a3, .LBB1_3
-; RV32IM-NEXT:  # %bb.1: # %entry
-; RV32IM-NEXT:    beqz a0, .LBB1_4
-; RV32IM-NEXT:  .LBB1_2: # %entry
+; RV32IM-NEXT:    neg a0, a0
+; RV32IM-NEXT:    ori a2, a0, 32
 ; RV32IM-NEXT:    mv a0, a1
-; RV32IM-NEXT:    ret
-; RV32IM-NEXT:  .LBB1_3: # %entry
-; RV32IM-NEXT:    li a2, 32
-; RV32IM-NEXT:    bnez a0, .LBB1_2
-; RV32IM-NEXT:  .LBB1_4: # %entry
+; RV32IM-NEXT:    bltu a1, a2, .LBB1_2
+; RV32IM-NEXT:  # %bb.1: # %entry
 ; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:  .LBB1_2: # %entry
 ; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_umin_2:
 ; RV64IM:       # %bb.0: # %entry
-; RV64IM-NEXT:    sext.w a2, a1
-; RV64IM-NEXT:    li a3, 32
-; RV64IM-NEXT:    bgeu a2, a3, .LBB1_3
+; RV64IM-NEXT:    mv a2, a0
+; RV64IM-NEXT:    sext.w a0, a1
+; RV64IM-NEXT:    neg a1, a2
+; RV64IM-NEXT:    ori a1, a1, 32
+; RV64IM-NEXT:    bltu a0, a1, .LBB1_2
 ; RV64IM-NEXT:  # %bb.1: # %entry
-; RV64IM-NEXT:    beqz a0, .LBB1_4
-; RV64IM-NEXT:  .LBB1_2: # %entry
 ; RV64IM-NEXT:    mv a0, a1
-; RV64IM-NEXT:    ret
-; RV64IM-NEXT:  .LBB1_3: # %entry
-; RV64IM-NEXT:    li a2, 32
-; RV64IM-NEXT:    bnez a0, .LBB1_2
-; RV64IM-NEXT:  .LBB1_4: # %entry
-; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:  .LBB1_2: # %entry
 ; RV64IM-NEXT:    ret
 ;
 ; RV32IMZBB-LABEL: select_umin_2:
 ; RV32IMZBB:       # %bb.0: # %entry
-; RV32IMZBB-NEXT:    bnez a0, .LBB1_2
-; RV32IMZBB-NEXT:  # %bb.1: # %entry
-; RV32IMZBB-NEXT:    li a0, 32
-; RV32IMZBB-NEXT:    minu a1, a1, a0
-; RV32IMZBB-NEXT:  .LBB1_2: # %entry
-; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    neg a0, a0
+; RV32IMZBB-NEXT:    ori a0, a0, 32
+; RV32IMZBB-NEXT:    minu a0, a1, a0
 ; RV32IMZBB-NEXT:    ret
 ;
 ; RV64IMZBB-LABEL: select_umin_2:
 ; RV64IMZBB:       # %bb.0: # %entry
-; RV64IMZBB-NEXT:    bnez a0, .LBB1_2
-; RV64IMZBB-NEXT:  # %bb.1: # %entry
 ; RV64IMZBB-NEXT:    sext.w a1, a1
-; RV64IMZBB-NEXT:    li a0, 32
-; RV64IMZBB-NEXT:    minu a1, a1, a0
-; RV64IMZBB-NEXT:  .LBB1_2: # %entry
-; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    neg a0, a0
+; RV64IMZBB-NEXT:    ori a0, a0, 32
+; RV64IMZBB-NEXT:    minu a0, a1, a0
 ; RV64IMZBB-NEXT:    ret
 ;
 ; RV32IMZICOND-LABEL: select_umin_2:
 ; RV32IMZICOND:       # %bb.0: # %entry
-; RV32IMZICOND-NEXT:    sltiu a2, a1, 32
-; RV32IMZICOND-NEXT:    addi a3, a1, -32
-; RV32IMZICOND-NEXT:    czero.eqz a2, a3, a2
-; RV32IMZICOND-NEXT:    addi a2, a2, 32
-; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
-; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    neg a0, a0
+; RV32IMZICOND-NEXT:    ori a0, a0, 32
+; RV32IMZICOND-NEXT:    sltu a2, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a0, a2
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a2
 ; RV32IMZICOND-NEXT:    or a0, a1, a0
 ; RV32IMZICOND-NEXT:    ret
 ;
 ; RV64IMZICOND-LABEL: select_umin_2:
 ; RV64IMZICOND:       # %bb.0: # %entry
-; RV64IMZICOND-NEXT:    sext.w a2, a1
-; RV64IMZICOND-NEXT:    sltiu a3, a2, 32
-; RV64IMZICOND-NEXT:    addi a2, a2, -32
-; RV64IMZICOND-NEXT:    czero.eqz a2, a2, a3
-; RV64IMZICOND-NEXT:    addi a2, a2, 32
-; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
-; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    neg a0, a0
+; RV64IMZICOND-NEXT:    ori a0, a0, 32
+; RV64IMZICOND-NEXT:    sltu a2, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a0, a2
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a2
 ; RV64IMZICOND-NEXT:    or a0, a1, a0
 ; RV64IMZICOND-NEXT:    ret
 ;
 ; RV32IMBOTH-LABEL: select_umin_2:
 ; RV32IMBOTH:       # %bb.0: # %entry
-; RV32IMBOTH-NEXT:    li a2, 32
-; RV32IMBOTH-NEXT:    minu a2, a1, a2
-; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
-; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
-; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    neg a0, a0
+; RV32IMBOTH-NEXT:    ori a0, a0, 32
+; RV32IMBOTH-NEXT:    minu a0, a1, a0
 ; RV32IMBOTH-NEXT:    ret
 ;
 ; RV64IMBOTH-LABEL: select_umin_2:
 ; RV64IMBOTH:       # %bb.0: # %entry
-; RV64IMBOTH-NEXT:    sext.w a2, a1
-; RV64IMBOTH-NEXT:    li a3, 32
-; RV64IMBOTH-NEXT:    minu a2, a2, a3
-; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
-; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
-; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    neg a0, a0
+; RV64IMBOTH-NEXT:    ori a0, a0, 32
+; RV64IMBOTH-NEXT:    minu a0, a1, a0
 ; RV64IMBOTH-NEXT:    ret
 entry:
   %c = call i32 @llvm.umin(i32 %a, i32 32)
@@ -215,99 +176,76 @@ entry:
 define i32 @select_umin_3(i1 zeroext %cond, i32 %a) {
 ; RV32IM-LABEL: select_umin_3:
 ; RV32IM:       # %bb.0: # %entry
-; RV32IM-NEXT:    li a3, 32
-; RV32IM-NEXT:    mv a2, a1
-; RV32IM-NEXT:    bgeu a1, a3, .LBB2_3
-; RV32IM-NEXT:  # %bb.1: # %entry
-; RV32IM-NEXT:    beqz a0, .LBB2_4
-; RV32IM-NEXT:  .LBB2_2: # %entry
+; RV32IM-NEXT:    neg a0, a0
+; RV32IM-NEXT:    ori a2, a0, 32
 ; RV32IM-NEXT:    mv a0, a1
-; RV32IM-NEXT:    ret
-; RV32IM-NEXT:  .LBB2_3: # %entry
-; RV32IM-NEXT:    li a2, 32
-; RV32IM-NEXT:    bnez a0, .LBB2_2
-; RV32IM-NEXT:  .LBB2_4: # %entry
+; RV32IM-NEXT:    bltu a1, a2, .LBB2_2
+; RV32IM-NEXT:  # %bb.1: # %entry
 ; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:  .LBB2_2: # %entry
 ; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_umin_3:
 ; RV64IM:       # %bb.0: # %entry
-; RV64IM-NEXT:    sext.w a2, a1
-; RV64IM-NEXT:    li a3, 32
-; RV64IM-NEXT:    bgeu a2, a3, .LBB2_3
+; RV64IM-NEXT:    mv a2, a0
+; RV64IM-NEXT:    sext.w a0, a1
+; RV64IM-NEXT:    neg a1, a2
+; RV64IM-NEXT:    ori a1, a1, 32
+; RV64IM-NEXT:    bltu a0, a1, .LBB2_2
 ; RV64IM-NEXT:  # %bb.1: # %entry
-; RV64IM-NEXT:    beqz a0, .LBB2_4
-; RV64IM-NEXT:  .LBB2_2: # %entry
 ; RV64IM-NEXT:    mv a0, a1
-; RV64IM-NEXT:    ret
-; RV64IM-NEXT:  .LBB2_3: # %entry
-; RV64IM-NEXT:    li a2, 32
-; RV64IM-NEXT:    bnez a0, .LBB2_2
-; RV64IM-NEXT:  .LBB2_4: # %entry
-; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:  .LBB2_2: # %entry
 ; RV64IM-NEXT:    ret
 ;
 ; RV32IMZBB-LABEL: select_umin_3:
 ; RV32IMZBB:       # %bb.0: # %entry
-; RV32IMZBB-NEXT:    bnez a0, .LBB2_2
-; RV32IMZBB-NEXT:  # %bb.1: # %entry
-; RV32IMZBB-NEXT:    li a0, 32
-; RV32IMZBB-NEXT:    minu a1, a1, a0
-; RV32IMZBB-NEXT:  .LBB2_2: # %entry
-; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    neg a0, a0
+; RV32IMZBB-NEXT:    ori a0, a0, 32
+; RV32IMZBB-NEXT:    minu a0, a1, a0
 ; RV32IMZBB-NEXT:    ret
 ;
 ; RV64IMZBB-LABEL: select_umin_3:
 ; RV64IMZBB:       # %bb.0: # %entry
-; RV64IMZBB-NEXT:    bnez a0, .LBB2_2
-; RV64IMZBB-NEXT:  # %bb.1: # %entry
 ; RV64IMZBB-NEXT:    sext.w a1, a1
-; RV64IMZBB-NEXT:    li a0, 32
-; RV64IMZBB-NEXT:    minu a1, a1, a0
-; RV64IMZBB-NEXT:  .LBB2_2: # %entry
-; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    neg a0, a0
+; RV64IMZBB-NEXT:    ori a0, a0, 32
+; RV64IMZBB-NEXT:    minu a0, a1, a0
 ; RV64IMZBB-NEXT:    ret
 ;
 ; RV32IMZICOND-LABEL: select_umin_3:
 ; RV32IMZICOND:       # %bb.0: # %entry
-; RV32IMZICOND-NEXT:    sltiu a2, a1, 32
-; RV32IMZICOND-NEXT:    addi a3, a1, -32
-; RV32IMZICOND-NEXT:    czero.eqz a2, a3, a2
-; RV32IMZICOND-NEXT:    addi a2, a2, 32
-; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
-; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    neg a0, a0
+; RV32IMZICOND-NEXT:    ori a0, a0, 32
+; RV32IMZICOND-NEXT:    sltu a2, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a0, a2
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a2
 ; RV32IMZICOND-NEXT:    or a0, a1, a0
 ; RV32IMZICOND-NEXT:    ret
 ;
 ; RV64IMZICOND-LABEL: select_umin_3:
 ; RV64IMZICOND:       # %bb.0: # %entry
-; RV64IMZICOND-NEXT:    sext.w a2, a1
-; RV64IMZICOND-NEXT:    sltiu a3, a2, 32
-; RV64IMZICOND-NEXT:    addi a2, a2, -32
-; RV64IMZICOND-NEXT:    czero.eqz a2, a2, a3
-; RV64IMZICOND-NEXT:    addi a2, a2, 32
-; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
-; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    neg a0, a0
+; RV64IMZICOND-NEXT:    ori a0, a0, 32
+; RV64IMZICOND-NEXT:    sltu a2, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a0, a2
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a2
 ; RV64IMZICOND-NEXT:    or a0, a1, a0
 ; RV64IMZICOND-NEXT:    ret
 ;
 ; RV32IMBOTH-LABEL: select_umin_3:
 ; RV32IMBOTH:       # %bb.0: # %entry
-; RV32IMBOTH-NEXT:    li a2, 32
-; RV32IMBOTH-NEXT:    minu a2, a1, a2
-; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
-; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
-; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    neg a0, a0
+; RV32IMBOTH-NEXT:    ori a0, a0, 32
+; RV32IMBOTH-NEXT:    minu a0, a1, a0
 ; RV32IMBOTH-NEXT:    ret
 ;
 ; RV64IMBOTH-LABEL: select_umin_3:
 ; RV64IMBOTH:       # %bb.0: # %entry
-; RV64IMBOTH-NEXT:    sext.w a2, a1
-; RV64IMBOTH-NEXT:    li a3, 32
-; RV64IMBOTH-NEXT:    minu a2, a2, a3
-; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
-; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
-; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    neg a0, a0
+; RV64IMBOTH-NEXT:    ori a0, a0, 32
+; RV64IMBOTH-NEXT:    minu a0, a1, a0
 ; RV64IMBOTH-NEXT:    ret
 entry:
   %c = call i32 @llvm.umin(i32 %a, i32 32)
@@ -318,94 +256,80 @@ entry:
 define i32 @select_umin_4(i1 zeroext %cond, i32 %x) {
 ; RV32IM-LABEL: select_umin_4:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    li a2, 128
-; RV32IM-NEXT:    bgeu a1, a2, .LBB3_3
+; RV32IM-NEXT:    neg a0, a0
+; RV32IM-NEXT:    or a0, a0, a1
+; RV32IM-NEXT:    li a1, 128
+; RV32IM-NEXT:    bltu a0, a1, .LBB3_2
 ; RV32IM-NEXT:  # %bb.1:
-; RV32IM-NEXT:    beqz a0, .LBB3_4
+; RV32IM-NEXT:    li a0, 128
 ; RV32IM-NEXT:  .LBB3_2:
-; RV32IM-NEXT:    mv a0, a2
-; RV32IM-NEXT:    ret
-; RV32IM-NEXT:  .LBB3_3:
-; RV32IM-NEXT:    li a1, 128
-; RV32IM-NEXT:    bnez a0, .LBB3_2
-; RV32IM-NEXT:  .LBB3_4:
-; RV32IM-NEXT:    mv a0, a1
 ; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_umin_4:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    neg a0, a0
+; RV64IM-NEXT:    or a0, a0, a1
+; RV64IM-NEXT:    sext.w a0, a0
 ; RV64IM-NEXT:    li a1, 128
-; RV64IM-NEXT:    bgeu a2, a1, .LBB3_3
+; RV64IM-NEXT:    bltu a0, a1, .LBB3_2
 ; RV64IM-NEXT:  # %bb.1:
-; RV64IM-NEXT:    beqz a0, .LBB3_4
+; RV64IM-NEXT:    li a0, 128
 ; RV64IM-NEXT:  .LBB3_2:
-; RV64IM-NEXT:    mv a0, a1
-; RV64IM-NEXT:    ret
-; RV64IM-NEXT:  .LBB3_3:
-; RV64IM-NEXT:    li a2, 128
-; RV64IM-NEXT:    bnez a0, .LBB3_2
-; RV64IM-NEXT:  .LBB3_4:
-; RV64IM-NEXT:    mv a0, a2
 ; RV64IM-NEXT:    ret
 ;
 ; RV32IMZBB-LABEL: select_umin_4:
 ; RV32IMZBB:       # %bb.0:
-; RV32IMZBB-NEXT:    mv a2, a0
-; RV32IMZBB-NEXT:    li a0, 128
-; RV32IMZBB-NEXT:    bnez a2, .LBB3_2
-; RV32IMZBB-NEXT:  # %bb.1:
-; RV32IMZBB-NEXT:    minu a0, a1, a0
-; RV32IMZBB-NEXT:  .LBB3_2:
+; RV32IMZBB-NEXT:    neg a0, a0
+; RV32IMZBB-NEXT:    or a0, a0, a1
+; RV32IMZBB-NEXT:    li a1, 128
+; RV32IMZBB-NEXT:    minu a0, a0, a1
 ; RV32IMZBB-NEXT:    ret
 ;
 ; RV64IMZBB-LABEL: select_umin_4:
 ; RV64IMZBB:       # %bb.0:
-; RV64IMZBB-NEXT:    mv a2, a0
-; RV64IMZBB-NEXT:    li a0, 128
-; RV64IMZBB-NEXT:    bnez a2, .LBB3_2
-; RV64IMZBB-NEXT:  # %bb.1:
-; RV64IMZBB-NEXT:    sext.w a1, a1
-; RV64IMZBB-NEXT:    minu a0, a1, a0
-; RV64IMZBB-NEXT:  .LBB3_2:
+; RV64IMZBB-NEXT:    neg a0, a0
+; RV64IMZBB-NEXT:    or a0, a0, a1
+; RV64IMZBB-NEXT:    sext.w a0, a0
+; RV64IMZBB-NEXT:    li a1, 128
+; RV64IMZBB-NEXT:    minu a0, a0, a1
 ; RV64IMZBB-NEXT:    ret
 ;
 ; RV32IMZICOND-LABEL: select_umin_4:
 ; RV32IMZICOND:       # %bb.0:
-; RV32IMZICOND-NEXT:    sltiu a2, a1, 128
-; RV32IMZICOND-NEXT:    addi a1, a1, -128
-; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a2
-; RV32IMZICOND-NEXT:    czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT:    neg a0, a0
+; RV32IMZICOND-NEXT:    or a0, a0, a1
+; RV32IMZICOND-NEXT:    sltiu a1, a0, 128
+; RV32IMZICOND-NEXT:    addi a0, a0, -128
+; RV32IMZICOND-NEXT:    czero.eqz a0, a0, a1
 ; RV32IMZICOND-NEXT:    addi a0, a0, 128
 ; RV32IMZICOND-NEXT:    ret
 ;
 ; RV64IMZICOND-LABEL: select_umin_4:
 ; RV64IMZICOND:       # %bb.0:
-; RV64IMZICOND-NEXT:    sext.w a1, a1
-; RV64IMZICOND-NEXT:    sltiu a2, a1, 128
-; RV64IMZICOND-NEXT:    addi a1, a1, -128
-; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a2
-; RV64IMZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT:    neg a0, a0
+; RV64IMZICOND-NEXT:    or a0, a0, a1
+; RV64IMZICOND-NEXT:    sext.w a0, a0
+; RV64IMZICOND-NEXT:    sltiu a1, a0, 128
+; RV64IMZICOND-NEXT:    addi a0, a0, -128
+; RV64IMZICOND-NEXT:    czero.eqz a0, a0, a1
 ; RV64IMZICOND-NEXT:    addi a0, a0, 128
 ; RV64IMZICOND-NEXT:    ret
 ;
 ; RV32IMBOTH-LABEL: select_umin_4:
 ; RV32IMBOTH:       # %bb.0:
-; RV32IMBOTH-NEXT:    li a2, 128
-; RV32IMBOTH-NEXT:    minu a1, a1, a2
-; RV32IMBOTH-NEXT:    addi a1, a1, -128
-; RV32IMBOTH-NEXT:    czero.nez a0, a1, a0
-; RV32IMBOTH-NEXT:    addi a0, a0, 128
+; RV32IMBOTH-NEXT:    neg a0, a0
+; RV32IMBOTH-NEXT:    or a0, a0, a1
+; RV32IMBOTH-NEXT:    li a1, 128
+; RV32IMBOTH-NEXT:    minu a0, a0, a1
 ; RV32IMBOTH-NEXT:    ret
 ;
 ; RV64IMBOTH-LABEL: select_umin_4:
 ; RV64IMBOTH:       # %bb.0:
-; RV64IMBOTH-NEXT:    sext.w a1, a1
-; RV64IMBOTH-NEXT:    li a2, 128
-; RV64IMBOTH-NEXT:    minu a1, a1, a2
-; RV64IMBOTH-NEXT:    addi a1, a1, -128
-; RV64IMBOTH-NEXT:    czero.nez a0, a1, a0
-; RV64IMBOTH-NEXT:    addi a0, a0, 128
+; RV64IMBOTH-NEXT:    neg a0, a0
+; RV64IMBOTH-NEXT:    or a0, a0, a1
+; RV64IMBOTH-NEXT:    sext.w a0, a0
+; RV64IMBOTH-NEXT:    li a1, 128
+; RV64IMBOTH-NEXT:    minu a0, a0, a1
 ; RV64IMBOTH-NEXT:    ret
   %add = call i32 @llvm.umin(i32 %x, i32 128)
   %sel = select i1 %cond, i32 128, i32 %add
@@ -415,96 +339,76 @@ define i32 @select_umin_4(i1 zeroext %cond, i32 %x) {
 define i32 @select_umax_1(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV32IM-LABEL: select_umax_1:
 ; RV32IM:       # %bb.0: # %entry
-; RV32IM-NEXT:    bgeu a2, a1, .LBB4_3
+; RV32IM-NEXT:    neg a0, a0
+; RV32IM-NEXT:    and a1, a0, a1
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    bltu a1, a2, .LBB4_2
 ; RV32IM-NEXT:  # %bb.1: # %entry
-; RV32IM-NEXT:    beqz a0, .LBB4_4
-; RV32IM-NEXT:  .LBB4_2: # %entry
 ; RV32IM-NEXT:    mv a0, a1
-; RV32IM-NEXT:    ret
-; RV32IM-NEXT:  .LBB4_3: # %entry
-; RV32IM-NEXT:    mv a1, a2
-; RV32IM-NEXT:    bnez a0, .LBB4_2
-; RV32IM-NEXT:  .LBB4_4: # %entry
-; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:  .LBB4_2: # %entry
 ; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_umax_1:
 ; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    mv a3, a0
+; RV64IM-NEXT:    sext.w a0, a2
+; RV64IM-NEXT:    neg a2, a3
+; RV64IM-NEXT:    and a1, a2, a1
 ; RV64IM-NEXT:    sext.w a1, a1
-; RV64IM-NEXT:    sext.w a3, a2
-; RV64IM-NEXT:    bgeu a3, a1, .LBB4_3
+; RV64IM-NEXT:    bltu a1, a0, .LBB4_2
 ; RV64IM-NEXT:  # %bb.1: # %entry
-; RV64IM-NEXT:    beqz a0, .LBB4_4
-; RV64IM-NEXT:  .LBB4_2: # %entry
 ; RV64IM-NEXT:    mv a0, a1
-; RV64IM-NEXT:    ret
-; RV64IM-NEXT:  .LBB4_3: # %entry
-; RV64IM-NEXT:    mv a1, a3
-; RV64IM-NEXT:    bnez a0, .LBB4_2
-; RV64IM-NEXT:  .LBB4_4: # %entry
-; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:  .LBB4_2: # %entry
 ; RV64IM-NEXT:    ret
 ;
 ; RV32IMZBB-LABEL: select_umax_1:
 ; RV32IMZBB:       # %bb.0: # %entry
-; RV32IMZBB-NEXT:    beqz a0, .LBB4_2
-; RV32IMZBB-NEXT:  # %bb.1:
-; RV32IMZBB-NEXT:    maxu a2, a1, a2
-; RV32IMZBB-NEXT:  .LBB4_2: # %entry
-; RV32IMZBB-NEXT:    mv a0, a2
+; RV32IMZBB-NEXT:    neg a0, a0
+; RV32IMZBB-NEXT:    and a0, a0, a1
+; RV32IMZBB-NEXT:    maxu a0, a2, a0
 ; RV32IMZBB-NEXT:    ret
 ;
 ; RV64IMZBB-LABEL: select_umax_1:
 ; RV64IMZBB:       # %bb.0: # %entry
-; RV64IMZBB-NEXT:    beqz a0, .LBB4_2
-; RV64IMZBB-NEXT:  # %bb.1:
 ; RV64IMZBB-NEXT:    sext.w a2, a2
-; RV64IMZBB-NEXT:    sext.w a1, a1
-; RV64IMZBB-NEXT:    maxu a2, a1, a2
-; RV64IMZBB-NEXT:  .LBB4_2: # %entry
-; RV64IMZBB-NEXT:    mv a0, a2
+; RV64IMZBB-NEXT:    neg a0, a0
+; RV64IMZBB-NEXT:    and a0, a0, a1
+; RV64IMZBB-NEXT:    sext.w a0, a0
+; RV64IMZBB-NEXT:    maxu a0, a2, a0
 ; RV64IMZBB-NEXT:    ret
 ;
 ; RV32IMZICOND-LABEL: select_umax_1:
 ; RV32IMZICOND:       # %bb.0: # %entry
-; RV32IMZICOND-NEXT:    sltu a3, a2, a1
-; RV32IMZICOND-NEXT:    czero.nez a4, a2, a3
-; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a3
-; RV32IMZICOND-NEXT:    or a1, a1, a4
-; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
-; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV32IMZICOND-NEXT:    sltu a1, a0, a2
+; RV32IMZICOND-NEXT:    czero.nez a0, a0, a1
+; RV32IMZICOND-NEXT:    czero.eqz a1, a2, a1
 ; RV32IMZICOND-NEXT:    or a0, a1, a0
 ; RV32IMZICOND-NEXT:    ret
 ;
 ; RV64IMZICOND-LABEL: select_umax_1:
 ; RV64IMZICOND:       # %bb.0: # %entry
-; RV64IMZICOND-NEXT:    sext.w a1, a1
-; RV64IMZICOND-NEXT:    sext.w a3, a2
-; RV64IMZICOND-NEXT:    sltu a4, a3, a1
-; RV64IMZICOND-NEXT:    czero.nez a3, a3, a4
-; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a4
-; RV64IMZICOND-NEXT:    or a1, a1, a3
-; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
-; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    sext.w a2, a2
+; RV64IMZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64IMZICOND-NEXT:    sext.w a0, a0
+; RV64IMZICOND-NEXT:    sltu a1, a0, a2
+; RV64IMZICOND-NEXT:    czero.nez a0, a0, a1
+; RV64IMZICOND-NEXT:    czero.eqz a1, a2, a1
 ; RV64IMZICOND-NEXT:    or a0, a1, a0
 ; RV64IMZICOND-NEXT:    ret
 ;
 ; RV32IMBOTH-LABEL: select_umax_1:
 ; RV32IMBOTH:       # %bb.0: # %entry
-; RV32IMBOTH-NEXT:    maxu a1, a1, a2
-; RV32IMBOTH-NEXT:    czero.nez a2, a2, a0
 ; RV32IMBOTH-NEXT:    czero.eqz a0, a1, a0
-; RV32IMBOTH-NEXT:    or a0, a0, a2
+; RV32IMBOTH-NEXT:    maxu a0, a2, a0
 ; RV32IMBOTH-NEXT:    ret
 ;
 ; RV64IMBOTH-LABEL: select_umax_1:
 ; RV64IMBOTH:       # %bb.0: # %entry
-; RV64IMBOTH-NEXT:    sext.w a3, a2
-; RV64IMBOTH-NEXT:    sext.w a1, a1
-; RV64IMBOTH-NEXT:    maxu a1, a1, a3
-; RV64IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV64IMBOTH-NEXT:    sext.w a2, a2
 ; RV64IMBOTH-NEXT:    czero.eqz a0, a1, a0
-; RV64IMBOTH-NEXT:    or a0, a0, a2
+; RV64IMBOTH-NEXT:    sext.w a0, a0
+; RV64IMBOTH-NEXT:    maxu a0, a2, a0
 ; RV64IMBOTH-NEXT:    ret
 entry:
   %c = call i32 @llvm.umax(i32 %a, i32 %b)
@@ -515,99 +419,76 @@ entry:
 define i32 @select_umax_2(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV32IM-LABEL: select_umax_2:
 ; RV32IM:       # %bb.0: # %entry
-; RV32IM-NEXT:    li a3, 32
-; RV32IM-NEXT:    mv a2, a1
-; RV32IM-NEXT:    bgeu a3, a1, .LBB5_3
-; RV32IM-NEXT:  # %bb.1: # %entry
-; RV32IM-NEXT:    beqz a0, .LBB5_4
-; RV32IM-NEXT:  .LBB5_2: # %entry
+; RV32IM-NEXT:    addi a0, a0, -1
+; RV32IM-NEXT:    andi a2, a0, 32
 ; RV32IM-NEXT:    mv a0, a1
-; RV32IM-NEXT:    ret
-; RV32IM-NEXT:  .LBB5_3: # %entry
-; RV32IM-NEXT:    li a2, 32
-; RV32IM-NEXT:    bnez a0, .LBB5_2
-; RV32IM-NEXT:  .LBB5_4: # %entry
+; RV32IM-NEXT:    bltu a2, a1, .LBB5_2
+; RV32IM-NEXT:  # %bb.1: # %entry
 ; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:  .LBB5_2: # %entry
 ; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_umax_2:
 ; RV64IM:       # %bb.0: # %entry
-; RV64IM-NEXT:    sext.w a2, a1
-; RV64IM-NEXT:    li a3, 32
-; RV64IM-NEXT:    bgeu a3, a2, .LBB5_3
+; RV64IM-NEXT:    mv a2, a0
+; RV64IM-NEXT:    sext.w a0, a1
+; RV64IM-NEXT:    addi a2, a2, -1
+; RV64IM-NEXT:    andi a1, a2, 32
+; RV64IM-NEXT:    bltu a1, a0, .LBB5_2
 ; RV64IM-NEXT:  # %bb.1: # %entry
-; RV64IM-NEXT:    beqz a0, .LBB5_4
-; RV64IM-NEXT:  .LBB5_2: # %entry
 ; RV64IM-NEXT:    mv a0, a1
-; RV64IM-NEXT:    ret
-; RV64IM-NEXT:  .LBB5_3: # %entry
-; RV64IM-NEXT:    li a2, 32
-; RV64IM-NEXT:    bnez a0, .LBB5_2
-; RV64IM-NEXT:  .LBB5_4: # %entry
-; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:  .LBB5_2: # %entry
 ; RV64IM-NEXT:    ret
 ;
 ; RV32IMZBB-LABEL: select_umax_2:
 ; RV32IMZBB:       # %bb.0: # %entry
-; RV32IMZBB-NEXT:    bnez a0, .LBB5_2
-; RV32IMZBB-NEXT:  # %bb.1: # %entry
-; RV32IMZBB-NEXT:    li a0, 32
-; RV32IMZBB-NEXT:    maxu a1, a1, a0
-; RV32IMZBB-NEXT:  .LBB5_2: # %entry
-; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    addi a0, a0, -1
+; RV32IMZBB-NEXT:    andi a0, a0, 32
+; RV32IMZBB-NEXT:    maxu a0, a1, a0
 ; RV32IMZBB-NEXT:    ret
 ;
 ; RV64IMZBB-LABEL: select_umax_2:
 ; RV64IMZBB:       # %bb.0: # %entry
-; RV64IMZBB-NEXT:    bnez a0, .LBB5_2
-; RV64IMZBB-NEXT:  # %bb.1: # %entry
 ; RV64IMZBB-NEXT:    sext.w a1, a1
-; RV64IMZBB-NEXT:    li a0, 32
-; RV64IMZBB-NEXT:    maxu a1, a1, a0
-; RV64IMZBB-NEXT:  .LBB5_2: # %entry
-; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    addi a0, a0, -1
+; RV64IMZBB-NEXT:    andi a0, a0, 32
+; RV64IMZBB-NEXT:    maxu a0, a1, a0
 ; RV64IMZBB-NEXT:    ret
 ;
 ; RV32IMZICOND-LABEL: select_umax_2:
 ; RV32IMZICOND:       # %bb.0: # %entry
-; RV32IMZICOND-NEXT:    sltiu a2, a1, 33
-; RV32IMZICOND-NEXT:    addi a3, a1, -32
-; RV32IMZICOND-NEXT:    czero.nez a2, a3, a2
-; RV32IMZICOND-NEXT:    addi a2, a2, 32
-; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
-; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    addi a0, a0, -1
+; RV32IMZICOND-NEXT:    andi a0, a0, 32
+; RV32IMZICOND-NEXT:    sltu a2, a0, a1
+; RV32IMZICOND-NEXT:    czero.nez a0, a0, a2
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a2
 ; RV32IMZICOND-NEXT:    or a0, a1, a0
 ; RV32IMZICOND-NEXT:    ret
 ;
 ; RV64IMZICOND-LABEL: select_umax_2:
 ; RV64IMZICOND:       # %bb.0: # %entry
-; RV64IMZICOND-NEXT:    sext.w a2, a1
-; RV64IMZICOND-NEXT:    sltiu a3, a2, 33
-; RV64IMZICOND-NEXT:    addi a2, a2, -32
-; RV64IMZICOND-NEXT:    czero.nez a2, a2, a3
-; RV64IMZICOND-NEXT:    addi a2, a2, 32
-; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
-; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    addi a0, a0, -1
+; RV64IMZICOND-NEXT:    andi a0, a0, 32
+; RV64IMZICOND-NEXT:    sltu a2, a0, a1
+; RV64IMZICOND-NEXT:    czero.nez a0, a0, a2
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a2
 ; RV64IMZICOND-NEXT:    or a0, a1, a0
 ; RV64IMZICOND-NEXT:    ret
 ;
 ; RV32IMBOTH-LABEL: select_umax_2:
 ; RV32IMBOTH:       # %bb.0: # %entry
-; RV32IMBOTH-NEXT:    li a2, 32
-; RV32IMBOTH-NEXT:    maxu a2, a1, a2
-; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
-; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
-; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    addi a0, a0, -1
+; RV32IMBOTH-NEXT:    andi a0, a0, 32
+; RV32IMBOTH-NEXT:    maxu a0, a1, a0
 ; RV32IMBOTH-NEXT:    ret
 ;
 ; RV64IMBOTH-LABEL: select_umax_2:
 ; RV64IMBOTH:       # %bb.0: # %entry
-; RV64IMBOTH-NEXT:    sext.w a2, a1
-; RV64IMBOTH-NEXT:    li a3, 32
-; RV64IMBOTH-NEXT:    maxu a2, a2, a3
-; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
-; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
-; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    addi a0, a0, -1
+; RV64IMBOTH-NEXT:    andi a0, a0, 32
+; RV64IMBOTH-NEXT:    maxu a0, a1, a0
 ; RV64IMBOTH-NEXT:    ret
 entry:
   %c = call i32 @llvm.umax(i32 %a, i32 32)
@@ -618,99 +499,76 @@ entry:
 define i32 @select_umax_3(i1 zeroext %cond, i32 %a) {
 ; RV32IM-LABEL: select_umax_3:
 ; RV32IM:       # %bb.0: # %entry
-; RV32IM-NEXT:    li a3, 32
-; RV32IM-NEXT:    mv a2, a1
-; RV32IM-NEXT:    bgeu a3, a1, .LBB6_3
-; RV32IM-NEXT:  # %bb.1: # %entry
-; RV32IM-NEXT:    beqz a0, .LBB6_4
-; RV32IM-NEXT:  .LBB6_2: # %entry
+; RV32IM-NEXT:    addi a0, a0, -1
+; RV32IM-NEXT:    andi a2, a0, 32
 ; RV32IM-NEXT:    mv a0, a1
-; RV32IM-NEXT:    ret
-; RV32IM-NEXT:  .LBB6_3: # %entry
-; RV32IM-NEXT:    li a2, 32
-; RV32IM-NEXT:    bnez a0, .LBB6_2
-; RV32IM-NEXT:  .LBB6_4: # %entry
+; RV32IM-NEXT:    bltu a2, a1, .LBB6_2
+; RV32IM-NEXT:  # %bb.1: # %entry
 ; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:  .LBB6_2: # %entry
 ; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_umax_3:
 ; RV64IM:       # %bb.0: # %entry
-; RV64IM-NEXT:    sext.w a2, a1
-; RV64IM-NEXT:    li a3, 32
-; RV64IM-NEXT:    bgeu a3, a2, .LBB6_3
+; RV64IM-NEXT:    mv a2, a0
+; RV64IM-NEXT:    sext.w a0, a1
+; RV64IM-NEXT:    addi a2, a2, -1
+; RV64IM-NEXT:    andi a1, a2, 32
+; RV64IM-NEXT:    bltu a1, a0, .LBB6_2
 ; RV64IM-NEXT:  # %bb.1: # %entry
-; RV64IM-NEXT:    beqz a0, .LBB6_4
-; RV64IM-NEXT:  .LBB6_2: # %entry
 ; RV64IM-NEXT:    mv a0, a1
-; RV64IM-NEXT:    ret
-; RV64IM-NEXT:  .LBB6_3: # %entry
-; RV64IM-NEXT:    li a2, 32
-; RV64IM-NEXT:    bnez a0, .LBB6_2
-; RV64IM-NEXT:  .LBB6_4: # %entry
-; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:  .LBB6_2: # %entry
 ; RV64IM-NEXT:    ret
 ;
 ; RV32IMZBB-LABEL: select_umax_3:
 ; RV32IMZBB:       # %bb.0: # %entry
-; RV32IMZBB-NEXT:    bnez a0, .LBB6_2
-; RV32IMZBB-NEXT:  # %bb.1: # %entry
-; RV32IMZBB-NEXT:    li a0, 32
-; RV32IMZBB-NEXT:    maxu a1, a1, a0
-; RV32IMZBB-NEXT:  .LBB6_2: # %entry
-; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    addi a0, a0, -1
+; RV32IMZBB-NEXT:    andi a0, a0, 32
+; RV32IMZBB-NEXT:    maxu a0, a1, a0
 ; RV32IMZBB-NEXT:    ret
 ;
 ; RV64IMZBB-LABEL: select_umax_3:
 ; RV64IMZBB:       # %bb.0: # %entry
-; RV64IMZBB-NEXT:    bnez a0, .LBB6_2
-; RV64IMZBB-NEXT:  # %bb.1: # %entry
 ; RV64IMZBB-NEXT:    sext.w a1, a1
-; RV64IMZBB-NEXT:    li a0, 32
-; RV64IMZBB-NEXT:    maxu a1, a1, a0
-; RV64IMZBB-NEXT:  .LBB6_2: # %entry
-; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    addi a0, a0, -1
+; RV64IMZBB-NEXT:    andi a0, a0, 32
+; RV64IMZBB-NEXT:    maxu a0, a1, a0
 ; RV64IMZBB-NEXT:    ret
 ;
 ; RV32IMZICOND-LABEL: select_umax_3:
 ; RV32IMZICOND:       # %bb.0: # %entry
-; RV32IMZICOND-NEXT:    sltiu a2, a1, 33
-; RV32IMZICOND-NEXT:    addi a3, a1, -32
-; RV32IMZICOND-NEXT:    czero.nez a2, a3, a2
-; RV32IMZICOND-NEXT:    addi a2, a2, 32
-; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
-; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    addi a0, a0, -1
+; RV32IMZICOND-NEXT:    andi a0, a0, 32
+; RV32IMZICOND-NEXT:    sltu a2, a0, a1
+; RV32IMZICOND-NEXT:    czero.nez a0, a0, a2
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a2
 ; RV32IMZICOND-NEXT:    or a0, a1, a0
 ; RV32IMZICOND-NEXT:    ret
 ;
 ; RV64IMZICOND-LABEL: select_umax_3:
 ; RV64IMZICOND:       # %bb.0: # %entry
-; RV64IMZICOND-NEXT:    sext.w a2, a1
-; RV64IMZICOND-NEXT:    sltiu a3, a2, 33
-; RV64IMZICOND-NEXT:    addi a2, a2, -32
-; RV64IMZICOND-NEXT:    czero.nez a2, a2, a3
-; RV64IMZICOND-NEXT:    addi a2, a2, 32
-; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
-; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    addi a0, a0, -1
+; RV64IMZICOND-NEXT:    andi a0, a0, 32
+; RV64IMZICOND-NEXT:    sltu a2, a0, a1
+; RV64IMZICOND-NEXT:    czero.nez a0, a0, a2
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a2
 ; RV64IMZICOND-NEXT:    or a0, a1, a0
 ; RV64IMZICOND-NEXT:    ret
 ;
 ; RV32IMBOTH-LABEL: select_umax_3:
 ; RV32IMBOTH:       # %bb.0: # %entry
-; RV32IMBOTH-NEXT:    li a2, 32
-; RV32IMBOTH-NEXT:    maxu a2, a1, a2
-; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
-; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
-; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    addi a0, a0, -1
+; RV32IMBOTH-NEXT:    andi a0, a0, 32
+; RV32IMBOTH-NEXT:    maxu a0, a1, a0
 ; RV32IMBOTH-NEXT:    ret
 ;
 ; RV64IMBOTH-LABEL: select_umax_3:
 ; RV64IMBOTH:       # %bb.0: # %entry
-; RV64IMBOTH-NEXT:    sext.w a2, a1
-; RV64IMBOTH-NEXT:    li a3, 32
-; RV64IMBOTH-NEXT:    maxu a2, a2, a3
-; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
-; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
-; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    addi a0, a0, -1
+; RV64IMBOTH-NEXT:    andi a0, a0, 32
+; RV64IMBOTH-NEXT:    maxu a0, a1, a0
 ; RV64IMBOTH-NEXT:    ret
 entry:
   %c = call i32 @llvm.umax(i32 %a, i32 32)
@@ -721,94 +579,76 @@ entry:
 define i32 @select_umax_4(i1 zeroext %cond, i32 %x) {
 ; RV32IM-LABEL: select_umax_4:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    li a2, 128
-; RV32IM-NEXT:    bgeu a2, a1, .LBB7_3
+; RV32IM-NEXT:    addi a0, a0, -1
+; RV32IM-NEXT:    and a0, a0, a1
+; RV32IM-NEXT:    li a1, 128
+; RV32IM-NEXT:    bltu a1, a0, .LBB7_2
 ; RV32IM-NEXT:  # %bb.1:
-; RV32IM-NEXT:    beqz a0, .LBB7_4
+; RV32IM-NEXT:    li a0, 128
 ; RV32IM-NEXT:  .LBB7_2:
-; RV32IM-NEXT:    mv a0, a2
-; RV32IM-NEXT:    ret
-; RV32IM-NEXT:  .LBB7_3:
-; RV32IM-NEXT:    li a1, 128
-; RV32IM-NEXT:    bnez a0, .LBB7_2
-; RV32IM-NEXT:  .LBB7_4:
-; RV32IM-NEXT:    mv a0, a1
 ; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_umax_4:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    addi a0, a0, -1
+; RV64IM-NEXT:    and a0, a0, a1
+; RV64IM-NEXT:    sext.w a0, a0
 ; RV64IM-NEXT:    li a1, 128
-; RV64IM-NEXT:    bgeu a1, a2, .LBB7_3
+; RV64IM-NEXT:    bltu a1, a0, .LBB7_2
 ; RV64IM-NEXT:  # %bb.1:
-; RV64IM-NEXT:    beqz a0, .LBB7_4
+; RV64IM-NEXT:    li a0, 128
 ; RV64IM-NEXT:  .LBB7_2:
-; RV64IM-NEXT:    mv a0, a1
-; RV64IM-NEXT:    ret
-; RV64IM-NEXT:  .LBB7_3:
-; RV64IM-NEXT:    li a2, 128
-; RV64IM-NEXT:    bnez a0, .LBB7_2
-; RV64IM-NEXT:  .LBB7_4:
-; RV64IM-NEXT:    mv a0, a2
 ; RV64IM-NEXT:    ret
 ;
 ; RV32IMZBB-LABEL: select_umax_4:
 ; RV32IMZBB:       # %bb.0:
-; RV32IMZBB-NEXT:    mv a2, a0
-; RV32IMZBB-NEXT:    li a0, 128
-; RV32IMZBB-NEXT:    bnez a2, .LBB7_2
-; RV32IMZBB-NEXT:  # %bb.1:
-; RV32IMZBB-NEXT:    maxu a0, a1, a0
-; RV32IMZBB-NEXT:  .LBB7_2:
+; RV32IMZBB-NEXT:    addi a0, a0, -1
+; RV32IMZBB-NEXT:    and a0, a0, a1
+; RV32IMZBB-NEXT:    li a1, 128
+; RV32IMZBB-NEXT:    maxu a0, a0, a1
 ; RV32IMZBB-NEXT:    ret
 ;
 ; RV64IMZBB-LABEL: select_umax_4:
 ; RV64IMZBB:       # %bb.0:
-; RV64IMZBB-NEXT:    mv a2, a0
-; RV64IMZBB-NEXT:    li a0, 128
-; RV64IMZBB-NEXT:    bnez a2, .LBB7_2
-; RV64IMZBB-NEXT:  # %bb.1:
-; RV64IMZBB-NEXT:    sext.w a1, a1
-; RV64IMZBB-NEXT:    maxu a0, a1, a0
-; RV64IMZBB-NEXT:  .LBB7_2:
+; RV64IMZBB-NEXT:    addi a0, a0, -1
+; RV64IMZBB-NEXT:    and a0, a0, a1
+; RV64IMZBB-NEXT:    sext.w a0, a0
+; RV64IMZBB-NEXT:    li a1, 128
+; RV64IMZBB-NEXT:    maxu a0, a0, a1
 ; RV64IMZBB-NEXT:    ret
 ;
 ; RV32IMZICOND-LABEL: select_umax_4:
 ; RV32IMZICOND:       # %bb.0:
-; RV32IMZICOND-NEXT:    sltiu a2, a1, 129
-; RV32IMZICOND-NEXT:    addi a1, a1, -128
-; RV32IMZICOND-NEXT:    czero.nez a1, a1, a2
 ; RV32IMZICOND-NEXT:    czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT:    sltiu a1, a0, 129
+; RV32IMZICOND-NEXT:    addi a0, a0, -128
+; RV32IMZICOND-NEXT:    czero.nez a0, a0, a1
 ; RV32IMZICOND-NEXT:    addi a0, a0, 128
 ; RV32IMZICOND-NEXT:    ret
 ;
 ; RV64IMZICOND-LABEL: select_umax_4:
 ; RV64IMZICOND:       # %bb.0:
-; RV64IMZICOND-NEXT:    sext.w a1, a1
-; RV64IMZICOND-NEXT:    sltiu a2, a1, 129
-; RV64IMZICOND-NEXT:    addi a1, a1, -128
-; RV64IMZICOND-NEXT:    czero.nez a1, a1, a2
 ; RV64IMZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT:    sext.w a0, a0
+; RV64IMZICOND-NEXT:    sltiu a1, a0, 129
+; RV64IMZICOND-NEXT:    addi a0, a0, -128
+; RV64IMZICOND-NEXT:    czero.nez a0, a0, a1
 ; RV64IMZICOND-NEXT:    addi a0, a0, 128
 ; RV64IMZICOND-NEXT:    ret
 ;
 ; RV32IMBOTH-LABEL: select_umax_4:
 ; RV32IMBOTH:       # %bb.0:
-; RV32IMBOTH-NEXT:    li a2, 128
-; RV32IMBOTH-NEXT:    maxu a1, a1, a2
-; RV32IMBOTH-NEXT:    addi a1, a1, -128
 ; RV32IMBOTH-NEXT:    czero.nez a0, a1, a0
-; RV32IMBOTH-NEXT:    addi a0, a0, 128
+; RV32IMBOTH-NEXT:    li a1, 128
+; RV32IMBOTH-NEXT:    maxu a0, a0, a1
 ; RV32IMBOTH-NEXT:    ret
 ;
 ; RV64IMBOTH-LABEL: select_umax_4:
 ; RV64IMBOTH:       # %bb.0:
-; RV64IMBOTH-NEXT:    sext.w a1, a1
-; RV64IMBOTH-NEXT:    li a2, 128
-; RV64IMBOTH-NEXT:    maxu a1, a1, a2
-; RV64IMBOTH-NEXT:    addi a1, a1, -128
 ; RV64IMBOTH-NEXT:    czero.nez a0, a1, a0
-; RV64IMBOTH-NEXT:    addi a0, a0, 128
+; RV64IMBOTH-NEXT:    sext.w a0, a0
+; RV64IMBOTH-NEXT:    li a1, 128
+; RV64IMBOTH-NEXT:    maxu a0, a0, a1
 ; RV64IMBOTH-NEXT:    ret
   %add = call i32 @llvm.umax(i32 %x, i32 128)
   %sel = select i1 %cond, i32 128, i32 %add



More information about the llvm-commits mailing list