[llvm] fa21fcb - [RISCV] Add short forward branch support for `min`, `max`, `maxu` and `minu` (#164394)

via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 30 21:49:29 PDT 2025


Author: quic_hchandel
Date: 2025-10-31T10:19:25+05:30
New Revision: fa21fcbb5bbe26a70547625bd0bf929768f6ac43

URL: https://github.com/llvm/llvm-project/commit/fa21fcbb5bbe26a70547625bd0bf929768f6ac43
DIFF: https://github.com/llvm/llvm-project/commit/fa21fcbb5bbe26a70547625bd0bf929768f6ac43.diff

LOG: [RISCV] Add short forward branch support for `min`, `max`, `maxu` and `minu` (#164394)

Added: 
    llvm/test/CodeGen/RISCV/short-forward-branch-opt-min-max.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
    llvm/lib/Target/RISCV/RISCVFeatures.td
    llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoSFB.td
    llvm/test/CodeGen/RISCV/features-info.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
index 410561855e181..526675a682d86 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -127,6 +127,10 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
   case RISCV::PseudoCCAND:
   case RISCV::PseudoCCOR:
   case RISCV::PseudoCCXOR:
+  case RISCV::PseudoCCMAX:
+  case RISCV::PseudoCCMAXU:
+  case RISCV::PseudoCCMIN:
+  case RISCV::PseudoCCMINU:
   case RISCV::PseudoCCADDW:
   case RISCV::PseudoCCSUBW:
   case RISCV::PseudoCCSLL:
@@ -217,6 +221,7 @@ bool RISCVExpandPseudo::expandCCOp(MachineBasicBlock &MBB,
         .addImm(0);
   } else {
     unsigned NewOpc;
+    // clang-format off
     switch (MI.getOpcode()) {
     default:
       llvm_unreachable("Unexpected opcode!");
@@ -228,6 +233,10 @@ bool RISCVExpandPseudo::expandCCOp(MachineBasicBlock &MBB,
     case RISCV::PseudoCCAND:   NewOpc = RISCV::AND;   break;
     case RISCV::PseudoCCOR:    NewOpc = RISCV::OR;    break;
     case RISCV::PseudoCCXOR:   NewOpc = RISCV::XOR;   break;
+    case RISCV::PseudoCCMAX:   NewOpc = RISCV::MAX;   break;
+    case RISCV::PseudoCCMIN:   NewOpc = RISCV::MIN;   break;
+    case RISCV::PseudoCCMAXU:  NewOpc = RISCV::MAXU;  break;
+    case RISCV::PseudoCCMINU:  NewOpc = RISCV::MINU;  break;
     case RISCV::PseudoCCADDI:  NewOpc = RISCV::ADDI;  break;
     case RISCV::PseudoCCSLLI:  NewOpc = RISCV::SLLI;  break;
     case RISCV::PseudoCCSRLI:  NewOpc = RISCV::SRLI;  break;
@@ -250,6 +259,7 @@ bool RISCVExpandPseudo::expandCCOp(MachineBasicBlock &MBB,
     case RISCV::PseudoCCNDS_BFOS: NewOpc = RISCV::NDS_BFOS; break;
     case RISCV::PseudoCCNDS_BFOZ: NewOpc = RISCV::NDS_BFOZ; break;
     }
+    // clang-format on
 
     if (NewOpc == RISCV::NDS_BFOZ || NewOpc == RISCV::NDS_BFOS) {
       BuildMI(TrueBB, DL, TII->get(NewOpc), DestReg)

diff  --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index b4556f66473d6..cfee6ab22d4ff 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1851,6 +1851,11 @@ def TuneShortForwardBranchOpt
 def HasShortForwardBranchOpt : Predicate<"Subtarget->hasShortForwardBranchOpt()">;
 def NoShortForwardBranchOpt : Predicate<"!Subtarget->hasShortForwardBranchOpt()">;
 
+def TuneShortForwardBranchIMinMax
+    : SubtargetFeature<"short-forward-branch-i-minmax", "HasShortForwardBranchIMinMax",
+                       "true", "Enable short forward branch optimization for min,max instructions in Zbb",
+                       [TuneShortForwardBranchOpt]>;
+
 // Some subtargets require a S2V transfer buffer to move scalars into vectors.
 // FIXME: Forming .vx/.vf/.wx/.wf can reduce register pressure.
 def TuneNoSinkSplatOperands

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 912b82d294f44..3a7013d9efae6 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1699,6 +1699,10 @@ unsigned getPredicatedOpcode(unsigned Opcode) {
   case RISCV::AND:   return RISCV::PseudoCCAND;
   case RISCV::OR:    return RISCV::PseudoCCOR;
   case RISCV::XOR:   return RISCV::PseudoCCXOR;
+  case RISCV::MAX:   return RISCV::PseudoCCMAX;
+  case RISCV::MAXU:  return RISCV::PseudoCCMAXU;
+  case RISCV::MIN:   return RISCV::PseudoCCMIN;
+  case RISCV::MINU:  return RISCV::PseudoCCMINU;
 
   case RISCV::ADDI:  return RISCV::PseudoCCADDI;
   case RISCV::SLLI:  return RISCV::PseudoCCSLLI;
@@ -1735,7 +1739,8 @@ unsigned getPredicatedOpcode(unsigned Opcode) {
 /// return the defining instruction.
 static MachineInstr *canFoldAsPredicatedOp(Register Reg,
                                            const MachineRegisterInfo &MRI,
-                                           const TargetInstrInfo *TII) {
+                                           const TargetInstrInfo *TII,
+                                           const RISCVSubtarget &STI) {
   if (!Reg.isVirtual())
     return nullptr;
   if (!MRI.hasOneNonDBGUse(Reg))
@@ -1743,6 +1748,12 @@ static MachineInstr *canFoldAsPredicatedOp(Register Reg,
   MachineInstr *MI = MRI.getVRegDef(Reg);
   if (!MI)
     return nullptr;
+
+  if (!STI.hasShortForwardBranchIMinMax() &&
+      (MI->getOpcode() == RISCV::MAX || MI->getOpcode() == RISCV::MIN ||
+       MI->getOpcode() == RISCV::MINU || MI->getOpcode() == RISCV::MAXU))
+    return nullptr;
+
   // Check if MI can be predicated and folded into the CCMOV.
   if (getPredicatedOpcode(MI->getOpcode()) == RISCV::INSTRUCTION_LIST_END)
     return nullptr;
@@ -1806,10 +1817,10 @@ RISCVInstrInfo::optimizeSelect(MachineInstr &MI,
 
   MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
   MachineInstr *DefMI =
-      canFoldAsPredicatedOp(MI.getOperand(5).getReg(), MRI, this);
+      canFoldAsPredicatedOp(MI.getOperand(5).getReg(), MRI, this, STI);
   bool Invert = !DefMI;
   if (!DefMI)
-    DefMI = canFoldAsPredicatedOp(MI.getOperand(4).getReg(), MRI, this);
+    DefMI = canFoldAsPredicatedOp(MI.getOperand(4).getReg(), MRI, this, STI);
   if (!DefMI)
     return nullptr;
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoSFB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoSFB.td
index 0114fbdc56302..5a67a5aaba293 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoSFB.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoSFB.td
@@ -106,6 +106,10 @@ def PseudoCCSRA : SFBALU_rr;
 def PseudoCCAND : SFBALU_rr;
 def PseudoCCOR  : SFBALU_rr;
 def PseudoCCXOR : SFBALU_rr;
+def PseudoCCMAX : SFBALU_rr;
+def PseudoCCMIN : SFBALU_rr;
+def PseudoCCMAXU : SFBALU_rr;
+def PseudoCCMINU : SFBALU_rr;
 
 def PseudoCCADDI : SFBALU_ri;
 def PseudoCCANDI : SFBALU_ri;

diff  --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index 37e11dbb12731..988d0490afeb6 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -136,6 +136,7 @@
 ; CHECK-NEXT:   shgatpa                          - 'Shgatpa' (SvNNx4 mode supported for all modes supported by satp, as well as Bare).
 ; CHECK-NEXT:   shifted-zextw-fusion             - Enable SLLI+SRLI to be fused when computing (shifted) word zero extension.
 ; CHECK-NEXT:   shlcofideleg                     - 'Shlcofideleg' (Delegating LCOFI Interrupts to VS-mode).
+; CHECK-NEXT:   short-forward-branch-i-minmax    - Enable short forward branch optimization for min,max instructions in Zbb.
 ; CHECK-NEXT:   short-forward-branch-opt         - Enable short forward branch optimization.
 ; CHECK-NEXT:   shtvala                          - 'Shtvala' (htval provides all needed values).
 ; CHECK-NEXT:   shvsatpa                         - 'Shvsatpa' (vsatp supports all modes supported by satp).

diff  --git a/llvm/test/CodeGen/RISCV/short-forward-branch-opt-min-max.ll b/llvm/test/CodeGen/RISCV/short-forward-branch-opt-min-max.ll
new file mode 100644
index 0000000000000..05e06cea9967a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/short-forward-branch-opt-min-max.ll
@@ -0,0 +1,703 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=riscv32 -mattr=+zbb | FileCheck %s --check-prefixes=RV32I-ZBB
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb | FileCheck %s --check-prefixes=RV64I-ZBB
+; RUN: llc < %s -mtriple=riscv32 -mattr=+zbb,+short-forward-branch-opt | \
+; RUN:   FileCheck %s --check-prefixes=RV32I-SFB-ZBB
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb,+short-forward-branch-opt | \
+; RUN:   FileCheck %s --check-prefixes=RV64I-SFB-ZBB
+; RUN: llc < %s -mtriple=riscv32 -mattr=+zbb,+short-forward-branch-i-minmax | \
+; RUN:   FileCheck %s --check-prefixes=RV32I-SFBIMinMax-ZBB
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb,+short-forward-branch-i-minmax | \
+; RUN:   FileCheck %s --check-prefixes=RV64I-SFBIMinMax-ZBB
+
+define i32 @select_example_smax(i32 %a, i32 %b, i1 zeroext %x, i32 %y) {
+; RV32I-ZBB-LABEL: select_example_smax:
+; RV32I-ZBB:       # %bb.0: # %entry
+; RV32I-ZBB-NEXT:    beqz a2, .LBB0_2
+; RV32I-ZBB-NEXT:  # %bb.1:
+; RV32I-ZBB-NEXT:    max a1, a0, a3
+; RV32I-ZBB-NEXT:  .LBB0_2: # %entry
+; RV32I-ZBB-NEXT:    mv a0, a1
+; RV32I-ZBB-NEXT:    ret
+;
+; RV64I-ZBB-LABEL: select_example_smax:
+; RV64I-ZBB:       # %bb.0: # %entry
+; RV64I-ZBB-NEXT:    beqz a2, .LBB0_2
+; RV64I-ZBB-NEXT:  # %bb.1:
+; RV64I-ZBB-NEXT:    sext.w a3, a3
+; RV64I-ZBB-NEXT:    sext.w a0, a0
+; RV64I-ZBB-NEXT:    max a1, a0, a3
+; RV64I-ZBB-NEXT:  .LBB0_2: # %entry
+; RV64I-ZBB-NEXT:    mv a0, a1
+; RV64I-ZBB-NEXT:    ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_smax:
+; RV32I-SFB-ZBB:       # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT:    max a0, a0, a3
+; RV32I-SFB-ZBB-NEXT:    bnez a2, .LBB0_2
+; RV32I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a0, a1
+; RV32I-SFB-ZBB-NEXT:  .LBB0_2: # %entry
+; RV32I-SFB-ZBB-NEXT:    ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_smax:
+; RV64I-SFB-ZBB:       # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT:    sext.w a3, a3
+; RV64I-SFB-ZBB-NEXT:    sext.w a0, a0
+; RV64I-SFB-ZBB-NEXT:    max a0, a0, a3
+; RV64I-SFB-ZBB-NEXT:    bnez a2, .LBB0_2
+; RV64I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT:    mv a0, a1
+; RV64I-SFB-ZBB-NEXT:  .LBB0_2: # %entry
+; RV64I-SFB-ZBB-NEXT:    ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_smax:
+; RV32I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    beqz a2, .LBB0_2
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    max a1, a0, a3
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB0_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a0, a1
+; RV32I-SFBIMinMax-ZBB-NEXT:    ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_smax:
+; RV64I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    sext.w a3, a3
+; RV64I-SFBIMinMax-ZBB-NEXT:    sext.w a0, a0
+; RV64I-SFBIMinMax-ZBB-NEXT:    beqz a2, .LBB0_2
+; RV64I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    max a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT:  .LBB0_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT:    ret
+entry:
+  %res = call i32 @llvm.smax.i32(i32 %a, i32 %y)
+  %sel = select i1 %x, i32 %res, i32 %b
+  ret i32 %sel
+}
+
+define i32 @select_example_smin(i32 %a, i32 %b, i1 zeroext %x, i32 %y) {
+; RV32I-ZBB-LABEL: select_example_smin:
+; RV32I-ZBB:       # %bb.0: # %entry
+; RV32I-ZBB-NEXT:    beqz a2, .LBB1_2
+; RV32I-ZBB-NEXT:  # %bb.1:
+; RV32I-ZBB-NEXT:    min a1, a0, a3
+; RV32I-ZBB-NEXT:  .LBB1_2: # %entry
+; RV32I-ZBB-NEXT:    mv a0, a1
+; RV32I-ZBB-NEXT:    ret
+;
+; RV64I-ZBB-LABEL: select_example_smin:
+; RV64I-ZBB:       # %bb.0: # %entry
+; RV64I-ZBB-NEXT:    beqz a2, .LBB1_2
+; RV64I-ZBB-NEXT:  # %bb.1:
+; RV64I-ZBB-NEXT:    sext.w a3, a3
+; RV64I-ZBB-NEXT:    sext.w a0, a0
+; RV64I-ZBB-NEXT:    min a1, a0, a3
+; RV64I-ZBB-NEXT:  .LBB1_2: # %entry
+; RV64I-ZBB-NEXT:    mv a0, a1
+; RV64I-ZBB-NEXT:    ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_smin:
+; RV32I-SFB-ZBB:       # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT:    min a0, a0, a3
+; RV32I-SFB-ZBB-NEXT:    bnez a2, .LBB1_2
+; RV32I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a0, a1
+; RV32I-SFB-ZBB-NEXT:  .LBB1_2: # %entry
+; RV32I-SFB-ZBB-NEXT:    ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_smin:
+; RV64I-SFB-ZBB:       # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT:    sext.w a3, a3
+; RV64I-SFB-ZBB-NEXT:    sext.w a0, a0
+; RV64I-SFB-ZBB-NEXT:    min a0, a0, a3
+; RV64I-SFB-ZBB-NEXT:    bnez a2, .LBB1_2
+; RV64I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT:    mv a0, a1
+; RV64I-SFB-ZBB-NEXT:  .LBB1_2: # %entry
+; RV64I-SFB-ZBB-NEXT:    ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_smin:
+; RV32I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    beqz a2, .LBB1_2
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    min a1, a0, a3
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB1_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a0, a1
+; RV32I-SFBIMinMax-ZBB-NEXT:    ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_smin:
+; RV64I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    sext.w a3, a3
+; RV64I-SFBIMinMax-ZBB-NEXT:    sext.w a0, a0
+; RV64I-SFBIMinMax-ZBB-NEXT:    beqz a2, .LBB1_2
+; RV64I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    min a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT:  .LBB1_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT:    ret
+entry:
+  %res = call i32 @llvm.smin.i32(i32 %a, i32 %y)
+  %sel = select i1 %x, i32 %res, i32 %b
+  ret i32 %sel
+}
+
+define i32 @select_example_umax(i32 %a, i32 %b, i1 zeroext %x, i32 %y) {
+; RV32I-ZBB-LABEL: select_example_umax:
+; RV32I-ZBB:       # %bb.0: # %entry
+; RV32I-ZBB-NEXT:    beqz a2, .LBB2_2
+; RV32I-ZBB-NEXT:  # %bb.1:
+; RV32I-ZBB-NEXT:    maxu a1, a0, a3
+; RV32I-ZBB-NEXT:  .LBB2_2: # %entry
+; RV32I-ZBB-NEXT:    mv a0, a1
+; RV32I-ZBB-NEXT:    ret
+;
+; RV64I-ZBB-LABEL: select_example_umax:
+; RV64I-ZBB:       # %bb.0: # %entry
+; RV64I-ZBB-NEXT:    beqz a2, .LBB2_2
+; RV64I-ZBB-NEXT:  # %bb.1:
+; RV64I-ZBB-NEXT:    sext.w a3, a3
+; RV64I-ZBB-NEXT:    sext.w a0, a0
+; RV64I-ZBB-NEXT:    maxu a1, a0, a3
+; RV64I-ZBB-NEXT:  .LBB2_2: # %entry
+; RV64I-ZBB-NEXT:    mv a0, a1
+; RV64I-ZBB-NEXT:    ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_umax:
+; RV32I-SFB-ZBB:       # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT:    maxu a0, a0, a3
+; RV32I-SFB-ZBB-NEXT:    bnez a2, .LBB2_2
+; RV32I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a0, a1
+; RV32I-SFB-ZBB-NEXT:  .LBB2_2: # %entry
+; RV32I-SFB-ZBB-NEXT:    ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_umax:
+; RV64I-SFB-ZBB:       # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT:    sext.w a3, a3
+; RV64I-SFB-ZBB-NEXT:    sext.w a0, a0
+; RV64I-SFB-ZBB-NEXT:    maxu a0, a0, a3
+; RV64I-SFB-ZBB-NEXT:    bnez a2, .LBB2_2
+; RV64I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT:    mv a0, a1
+; RV64I-SFB-ZBB-NEXT:  .LBB2_2: # %entry
+; RV64I-SFB-ZBB-NEXT:    ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_umax:
+; RV32I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    beqz a2, .LBB2_2
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    maxu a1, a0, a3
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB2_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a0, a1
+; RV32I-SFBIMinMax-ZBB-NEXT:    ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_umax:
+; RV64I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    sext.w a3, a3
+; RV64I-SFBIMinMax-ZBB-NEXT:    sext.w a0, a0
+; RV64I-SFBIMinMax-ZBB-NEXT:    beqz a2, .LBB2_2
+; RV64I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    maxu a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT:  .LBB2_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT:    ret
+entry:
+  %res = call i32 @llvm.umax.i32(i32 %a, i32 %y)
+  %sel = select i1 %x, i32 %res, i32 %b
+  ret i32 %sel
+}
+
+define i32 @select_example_umin(i32 %a, i32 %b, i1 zeroext %x, i32 %y) {
+; RV32I-ZBB-LABEL: select_example_umin:
+; RV32I-ZBB:       # %bb.0: # %entry
+; RV32I-ZBB-NEXT:    beqz a2, .LBB3_2
+; RV32I-ZBB-NEXT:  # %bb.1:
+; RV32I-ZBB-NEXT:    minu a1, a0, a3
+; RV32I-ZBB-NEXT:  .LBB3_2: # %entry
+; RV32I-ZBB-NEXT:    mv a0, a1
+; RV32I-ZBB-NEXT:    ret
+;
+; RV64I-ZBB-LABEL: select_example_umin:
+; RV64I-ZBB:       # %bb.0: # %entry
+; RV64I-ZBB-NEXT:    beqz a2, .LBB3_2
+; RV64I-ZBB-NEXT:  # %bb.1:
+; RV64I-ZBB-NEXT:    sext.w a3, a3
+; RV64I-ZBB-NEXT:    sext.w a0, a0
+; RV64I-ZBB-NEXT:    minu a1, a0, a3
+; RV64I-ZBB-NEXT:  .LBB3_2: # %entry
+; RV64I-ZBB-NEXT:    mv a0, a1
+; RV64I-ZBB-NEXT:    ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_umin:
+; RV32I-SFB-ZBB:       # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT:    minu a0, a0, a3
+; RV32I-SFB-ZBB-NEXT:    bnez a2, .LBB3_2
+; RV32I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a0, a1
+; RV32I-SFB-ZBB-NEXT:  .LBB3_2: # %entry
+; RV32I-SFB-ZBB-NEXT:    ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_umin:
+; RV64I-SFB-ZBB:       # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT:    sext.w a3, a3
+; RV64I-SFB-ZBB-NEXT:    sext.w a0, a0
+; RV64I-SFB-ZBB-NEXT:    minu a0, a0, a3
+; RV64I-SFB-ZBB-NEXT:    bnez a2, .LBB3_2
+; RV64I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT:    mv a0, a1
+; RV64I-SFB-ZBB-NEXT:  .LBB3_2: # %entry
+; RV64I-SFB-ZBB-NEXT:    ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_umin:
+; RV32I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    beqz a2, .LBB3_2
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    minu a1, a0, a3
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB3_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a0, a1
+; RV32I-SFBIMinMax-ZBB-NEXT:    ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_umin:
+; RV64I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    sext.w a3, a3
+; RV64I-SFBIMinMax-ZBB-NEXT:    sext.w a0, a0
+; RV64I-SFBIMinMax-ZBB-NEXT:    beqz a2, .LBB3_2
+; RV64I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    minu a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT:  .LBB3_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT:    ret
+entry:
+  %res = call i32 @llvm.umin.i32(i32 %a, i32 %y)
+  %sel = select i1 %x, i32 %res, i32 %b
+  ret i32 %sel
+}
+
+define i64 @select_example_smax_1(i64 %a, i64 %b, i1 zeroext %x, i64 %y) {
+; RV32I-ZBB-LABEL: select_example_smax_1:
+; RV32I-ZBB:       # %bb.0: # %entry
+; RV32I-ZBB-NEXT:    beq a1, a6, .LBB4_2
+; RV32I-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-ZBB-NEXT:    slt a7, a6, a1
+; RV32I-ZBB-NEXT:    beqz a7, .LBB4_3
+; RV32I-ZBB-NEXT:    j .LBB4_4
+; RV32I-ZBB-NEXT:  .LBB4_2:
+; RV32I-ZBB-NEXT:    sltu a7, a5, a0
+; RV32I-ZBB-NEXT:    bnez a7, .LBB4_4
+; RV32I-ZBB-NEXT:  .LBB4_3: # %entry
+; RV32I-ZBB-NEXT:    mv a1, a6
+; RV32I-ZBB-NEXT:    mv a0, a5
+; RV32I-ZBB-NEXT:  .LBB4_4: # %entry
+; RV32I-ZBB-NEXT:    beqz a4, .LBB4_6
+; RV32I-ZBB-NEXT:  # %bb.5: # %entry
+; RV32I-ZBB-NEXT:    ret
+; RV32I-ZBB-NEXT:  .LBB4_6: # %entry
+; RV32I-ZBB-NEXT:    mv a0, a2
+; RV32I-ZBB-NEXT:    mv a1, a3
+; RV32I-ZBB-NEXT:    ret
+;
+; RV64I-ZBB-LABEL: select_example_smax_1:
+; RV64I-ZBB:       # %bb.0: # %entry
+; RV64I-ZBB-NEXT:    beqz a2, .LBB4_2
+; RV64I-ZBB-NEXT:  # %bb.1:
+; RV64I-ZBB-NEXT:    max a1, a0, a3
+; RV64I-ZBB-NEXT:  .LBB4_2: # %entry
+; RV64I-ZBB-NEXT:    mv a0, a1
+; RV64I-ZBB-NEXT:    ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_smax_1:
+; RV32I-SFB-ZBB:       # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT:    sltu a7, a5, a0
+; RV32I-SFB-ZBB-NEXT:    slt t0, a6, a1
+; RV32I-SFB-ZBB-NEXT:    bne a1, a6, .LBB4_2
+; RV32I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv t0, a7
+; RV32I-SFB-ZBB-NEXT:  .LBB4_2: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez t0, .LBB4_4
+; RV32I-SFB-ZBB-NEXT:  # %bb.3: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a1, a6
+; RV32I-SFB-ZBB-NEXT:  .LBB4_4: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez t0, .LBB4_6
+; RV32I-SFB-ZBB-NEXT:  # %bb.5: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a0, a5
+; RV32I-SFB-ZBB-NEXT:  .LBB4_6: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez a4, .LBB4_8
+; RV32I-SFB-ZBB-NEXT:  # %bb.7: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a0, a2
+; RV32I-SFB-ZBB-NEXT:  .LBB4_8: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez a4, .LBB4_10
+; RV32I-SFB-ZBB-NEXT:  # %bb.9: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a1, a3
+; RV32I-SFB-ZBB-NEXT:  .LBB4_10: # %entry
+; RV32I-SFB-ZBB-NEXT:    ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_smax_1:
+; RV64I-SFB-ZBB:       # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT:    max a0, a0, a3
+; RV64I-SFB-ZBB-NEXT:    bnez a2, .LBB4_2
+; RV64I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT:    mv a0, a1
+; RV64I-SFB-ZBB-NEXT:  .LBB4_2: # %entry
+; RV64I-SFB-ZBB-NEXT:    ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_smax_1:
+; RV32I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    sltu a7, a5, a0
+; RV32I-SFBIMinMax-ZBB-NEXT:    slt t0, a6, a1
+; RV32I-SFBIMinMax-ZBB-NEXT:    bne a1, a6, .LBB4_2
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv t0, a7
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB4_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez t0, .LBB4_4
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.3: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a1, a6
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB4_4: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez t0, .LBB4_6
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.5: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a0, a5
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB4_6: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez a4, .LBB4_8
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.7: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a0, a2
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB4_8: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez a4, .LBB4_10
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.9: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a1, a3
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB4_10: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_smax_1:
+; RV64I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    beqz a2, .LBB4_2
+; RV64I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    max a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT:  .LBB4_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT:    ret
+entry:
+  %res = call i64 @llvm.smax.i64(i64 %a, i64 %y)
+  %sel = select i1 %x, i64 %res, i64 %b
+  ret i64 %sel
+}
+
+define i64 @select_example_smin_1(i64 %a, i64 %b, i1 zeroext %x, i64 %y) {
+; RV32I-ZBB-LABEL: select_example_smin_1:
+; RV32I-ZBB:       # %bb.0: # %entry
+; RV32I-ZBB-NEXT:    beq a1, a6, .LBB5_2
+; RV32I-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-ZBB-NEXT:    slt a7, a1, a6
+; RV32I-ZBB-NEXT:    beqz a7, .LBB5_3
+; RV32I-ZBB-NEXT:    j .LBB5_4
+; RV32I-ZBB-NEXT:  .LBB5_2:
+; RV32I-ZBB-NEXT:    sltu a7, a0, a5
+; RV32I-ZBB-NEXT:    bnez a7, .LBB5_4
+; RV32I-ZBB-NEXT:  .LBB5_3: # %entry
+; RV32I-ZBB-NEXT:    mv a1, a6
+; RV32I-ZBB-NEXT:    mv a0, a5
+; RV32I-ZBB-NEXT:  .LBB5_4: # %entry
+; RV32I-ZBB-NEXT:    beqz a4, .LBB5_6
+; RV32I-ZBB-NEXT:  # %bb.5: # %entry
+; RV32I-ZBB-NEXT:    ret
+; RV32I-ZBB-NEXT:  .LBB5_6: # %entry
+; RV32I-ZBB-NEXT:    mv a0, a2
+; RV32I-ZBB-NEXT:    mv a1, a3
+; RV32I-ZBB-NEXT:    ret
+;
+; RV64I-ZBB-LABEL: select_example_smin_1:
+; RV64I-ZBB:       # %bb.0: # %entry
+; RV64I-ZBB-NEXT:    beqz a2, .LBB5_2
+; RV64I-ZBB-NEXT:  # %bb.1:
+; RV64I-ZBB-NEXT:    min a1, a0, a3
+; RV64I-ZBB-NEXT:  .LBB5_2: # %entry
+; RV64I-ZBB-NEXT:    mv a0, a1
+; RV64I-ZBB-NEXT:    ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_smin_1:
+; RV32I-SFB-ZBB:       # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT:    sltu a7, a0, a5
+; RV32I-SFB-ZBB-NEXT:    slt t0, a1, a6
+; RV32I-SFB-ZBB-NEXT:    bne a1, a6, .LBB5_2
+; RV32I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv t0, a7
+; RV32I-SFB-ZBB-NEXT:  .LBB5_2: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez t0, .LBB5_4
+; RV32I-SFB-ZBB-NEXT:  # %bb.3: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a1, a6
+; RV32I-SFB-ZBB-NEXT:  .LBB5_4: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez t0, .LBB5_6
+; RV32I-SFB-ZBB-NEXT:  # %bb.5: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a0, a5
+; RV32I-SFB-ZBB-NEXT:  .LBB5_6: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez a4, .LBB5_8
+; RV32I-SFB-ZBB-NEXT:  # %bb.7: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a0, a2
+; RV32I-SFB-ZBB-NEXT:  .LBB5_8: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez a4, .LBB5_10
+; RV32I-SFB-ZBB-NEXT:  # %bb.9: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a1, a3
+; RV32I-SFB-ZBB-NEXT:  .LBB5_10: # %entry
+; RV32I-SFB-ZBB-NEXT:    ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_smin_1:
+; RV64I-SFB-ZBB:       # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT:    min a0, a0, a3
+; RV64I-SFB-ZBB-NEXT:    bnez a2, .LBB5_2
+; RV64I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT:    mv a0, a1
+; RV64I-SFB-ZBB-NEXT:  .LBB5_2: # %entry
+; RV64I-SFB-ZBB-NEXT:    ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_smin_1:
+; RV32I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    sltu a7, a0, a5
+; RV32I-SFBIMinMax-ZBB-NEXT:    slt t0, a1, a6
+; RV32I-SFBIMinMax-ZBB-NEXT:    bne a1, a6, .LBB5_2
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv t0, a7
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB5_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez t0, .LBB5_4
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.3: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a1, a6
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB5_4: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez t0, .LBB5_6
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.5: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a0, a5
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB5_6: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez a4, .LBB5_8
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.7: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a0, a2
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB5_8: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez a4, .LBB5_10
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.9: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a1, a3
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB5_10: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_smin_1:
+; RV64I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    beqz a2, .LBB5_2
+; RV64I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    min a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT:  .LBB5_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT:    ret
+entry:
+  %res = call i64 @llvm.smin.i64(i64 %a, i64 %y)
+  %sel = select i1 %x, i64 %res, i64 %b
+  ret i64 %sel
+}
+
+define i64 @select_example_umax_1(i64 %a, i64 %b, i1 zeroext %x, i64 %y) {
+; RV32I-ZBB-LABEL: select_example_umax_1:
+; RV32I-ZBB:       # %bb.0: # %entry
+; RV32I-ZBB-NEXT:    beq a1, a6, .LBB6_2
+; RV32I-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-ZBB-NEXT:    sltu a7, a6, a1
+; RV32I-ZBB-NEXT:    beqz a7, .LBB6_3
+; RV32I-ZBB-NEXT:    j .LBB6_4
+; RV32I-ZBB-NEXT:  .LBB6_2:
+; RV32I-ZBB-NEXT:    sltu a7, a5, a0
+; RV32I-ZBB-NEXT:    bnez a7, .LBB6_4
+; RV32I-ZBB-NEXT:  .LBB6_3: # %entry
+; RV32I-ZBB-NEXT:    mv a1, a6
+; RV32I-ZBB-NEXT:    mv a0, a5
+; RV32I-ZBB-NEXT:  .LBB6_4: # %entry
+; RV32I-ZBB-NEXT:    beqz a4, .LBB6_6
+; RV32I-ZBB-NEXT:  # %bb.5: # %entry
+; RV32I-ZBB-NEXT:    ret
+; RV32I-ZBB-NEXT:  .LBB6_6: # %entry
+; RV32I-ZBB-NEXT:    mv a0, a2
+; RV32I-ZBB-NEXT:    mv a1, a3
+; RV32I-ZBB-NEXT:    ret
+;
+; RV64I-ZBB-LABEL: select_example_umax_1:
+; RV64I-ZBB:       # %bb.0: # %entry
+; RV64I-ZBB-NEXT:    beqz a2, .LBB6_2
+; RV64I-ZBB-NEXT:  # %bb.1:
+; RV64I-ZBB-NEXT:    maxu a1, a0, a3
+; RV64I-ZBB-NEXT:  .LBB6_2: # %entry
+; RV64I-ZBB-NEXT:    mv a0, a1
+; RV64I-ZBB-NEXT:    ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_umax_1:
+; RV32I-SFB-ZBB:       # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT:    sltu a7, a5, a0
+; RV32I-SFB-ZBB-NEXT:    sltu t0, a6, a1
+; RV32I-SFB-ZBB-NEXT:    bne a1, a6, .LBB6_2
+; RV32I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv t0, a7
+; RV32I-SFB-ZBB-NEXT:  .LBB6_2: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez t0, .LBB6_4
+; RV32I-SFB-ZBB-NEXT:  # %bb.3: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a1, a6
+; RV32I-SFB-ZBB-NEXT:  .LBB6_4: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez t0, .LBB6_6
+; RV32I-SFB-ZBB-NEXT:  # %bb.5: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a0, a5
+; RV32I-SFB-ZBB-NEXT:  .LBB6_6: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez a4, .LBB6_8
+; RV32I-SFB-ZBB-NEXT:  # %bb.7: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a0, a2
+; RV32I-SFB-ZBB-NEXT:  .LBB6_8: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez a4, .LBB6_10
+; RV32I-SFB-ZBB-NEXT:  # %bb.9: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a1, a3
+; RV32I-SFB-ZBB-NEXT:  .LBB6_10: # %entry
+; RV32I-SFB-ZBB-NEXT:    ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_umax_1:
+; RV64I-SFB-ZBB:       # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT:    maxu a0, a0, a3
+; RV64I-SFB-ZBB-NEXT:    bnez a2, .LBB6_2
+; RV64I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT:    mv a0, a1
+; RV64I-SFB-ZBB-NEXT:  .LBB6_2: # %entry
+; RV64I-SFB-ZBB-NEXT:    ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_umax_1:
+; RV32I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    sltu a7, a5, a0
+; RV32I-SFBIMinMax-ZBB-NEXT:    sltu t0, a6, a1
+; RV32I-SFBIMinMax-ZBB-NEXT:    bne a1, a6, .LBB6_2
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv t0, a7
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB6_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez t0, .LBB6_4
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.3: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a1, a6
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB6_4: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez t0, .LBB6_6
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.5: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a0, a5
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB6_6: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez a4, .LBB6_8
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.7: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a0, a2
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB6_8: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez a4, .LBB6_10
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.9: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a1, a3
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB6_10: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_umax_1:
+; RV64I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    beqz a2, .LBB6_2
+; RV64I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    maxu a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT:  .LBB6_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT:    ret
+entry:
+  %res = call i64 @llvm.umax.i64(i64 %a, i64 %y)
+  %sel = select i1 %x, i64 %res, i64 %b
+  ret i64 %sel
+}
+
+define i64 @select_example_umin_1(i64 %a, i64 %b, i1 zeroext %x, i64 %y) {
+; RV32I-ZBB-LABEL: select_example_umin_1:
+; RV32I-ZBB:       # %bb.0: # %entry
+; RV32I-ZBB-NEXT:    beq a1, a6, .LBB7_2
+; RV32I-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-ZBB-NEXT:    sltu a7, a1, a6
+; RV32I-ZBB-NEXT:    beqz a7, .LBB7_3
+; RV32I-ZBB-NEXT:    j .LBB7_4
+; RV32I-ZBB-NEXT:  .LBB7_2:
+; RV32I-ZBB-NEXT:    sltu a7, a0, a5
+; RV32I-ZBB-NEXT:    bnez a7, .LBB7_4
+; RV32I-ZBB-NEXT:  .LBB7_3: # %entry
+; RV32I-ZBB-NEXT:    mv a1, a6
+; RV32I-ZBB-NEXT:    mv a0, a5
+; RV32I-ZBB-NEXT:  .LBB7_4: # %entry
+; RV32I-ZBB-NEXT:    beqz a4, .LBB7_6
+; RV32I-ZBB-NEXT:  # %bb.5: # %entry
+; RV32I-ZBB-NEXT:    ret
+; RV32I-ZBB-NEXT:  .LBB7_6: # %entry
+; RV32I-ZBB-NEXT:    mv a0, a2
+; RV32I-ZBB-NEXT:    mv a1, a3
+; RV32I-ZBB-NEXT:    ret
+;
+; RV64I-ZBB-LABEL: select_example_umin_1:
+; RV64I-ZBB:       # %bb.0: # %entry
+; RV64I-ZBB-NEXT:    beqz a2, .LBB7_2
+; RV64I-ZBB-NEXT:  # %bb.1:
+; RV64I-ZBB-NEXT:    minu a1, a0, a3
+; RV64I-ZBB-NEXT:  .LBB7_2: # %entry
+; RV64I-ZBB-NEXT:    mv a0, a1
+; RV64I-ZBB-NEXT:    ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_umin_1:
+; RV32I-SFB-ZBB:       # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT:    sltu a7, a0, a5
+; RV32I-SFB-ZBB-NEXT:    sltu t0, a1, a6
+; RV32I-SFB-ZBB-NEXT:    bne a1, a6, .LBB7_2
+; RV32I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv t0, a7
+; RV32I-SFB-ZBB-NEXT:  .LBB7_2: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez t0, .LBB7_4
+; RV32I-SFB-ZBB-NEXT:  # %bb.3: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a1, a6
+; RV32I-SFB-ZBB-NEXT:  .LBB7_4: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez t0, .LBB7_6
+; RV32I-SFB-ZBB-NEXT:  # %bb.5: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a0, a5
+; RV32I-SFB-ZBB-NEXT:  .LBB7_6: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez a4, .LBB7_8
+; RV32I-SFB-ZBB-NEXT:  # %bb.7: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a0, a2
+; RV32I-SFB-ZBB-NEXT:  .LBB7_8: # %entry
+; RV32I-SFB-ZBB-NEXT:    bnez a4, .LBB7_10
+; RV32I-SFB-ZBB-NEXT:  # %bb.9: # %entry
+; RV32I-SFB-ZBB-NEXT:    mv a1, a3
+; RV32I-SFB-ZBB-NEXT:  .LBB7_10: # %entry
+; RV32I-SFB-ZBB-NEXT:    ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_umin_1:
+; RV64I-SFB-ZBB:       # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT:    minu a0, a0, a3
+; RV64I-SFB-ZBB-NEXT:    bnez a2, .LBB7_2
+; RV64I-SFB-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT:    mv a0, a1
+; RV64I-SFB-ZBB-NEXT:  .LBB7_2: # %entry
+; RV64I-SFB-ZBB-NEXT:    ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_umin_1:
+; RV32I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    sltu a7, a0, a5
+; RV32I-SFBIMinMax-ZBB-NEXT:    sltu t0, a1, a6
+; RV32I-SFBIMinMax-ZBB-NEXT:    bne a1, a6, .LBB7_2
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv t0, a7
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB7_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez t0, .LBB7_4
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.3: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a1, a6
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB7_4: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez t0, .LBB7_6
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.5: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a0, a5
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB7_6: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez a4, .LBB7_8
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.7: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a0, a2
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB7_8: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    bnez a4, .LBB7_10
+; RV32I-SFBIMinMax-ZBB-NEXT:  # %bb.9: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    mv a1, a3
+; RV32I-SFBIMinMax-ZBB-NEXT:  .LBB7_10: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT:    ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_umin_1:
+; RV64I-SFBIMinMax-ZBB:       # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    beqz a2, .LBB7_2
+; RV64I-SFBIMinMax-ZBB-NEXT:  # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    minu a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT:  .LBB7_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT:    mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT:    ret
+entry:
+  %res = call i64 @llvm.umin.i64(i64 %a, i64 %y)
+  %sel = select i1 %x, i64 %res, i64 %b
+  ret i64 %sel
+}


        


More information about the llvm-commits mailing list