[llvm] 6e5d008 - [RISCV] Add coverage for select with minmax arm [nfc] (#157539)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 9 14:56:52 PDT 2025
Author: Philip Reames
Date: 2025-09-09T14:56:48-07:00
New Revision: 6e5d008af6b11fa1ccdebfd8003ed28c766b147e
URL: https://github.com/llvm/llvm-project/commit/6e5d008af6b11fa1ccdebfd8003ed28c766b147e
DIFF: https://github.com/llvm/llvm-project/commit/6e5d008af6b11fa1ccdebfd8003ed28c766b147e.diff
LOG: [RISCV] Add coverage for select with minmax arm [nfc] (#157539)
This is coverage for an upcoming change, but I thought the choice of
configurations to check was probably worth a moment of consideration as
well.
Added:
llvm/test/CodeGen/RISCV/select-zbb.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/select-zbb.ll b/llvm/test/CodeGen/RISCV/select-zbb.ll
new file mode 100644
index 0000000000000..0af699aae3288
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/select-zbb.ll
@@ -0,0 +1,1614 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IM %s
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IM %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IMZBB %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IMZBB %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IMZICOND %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IMZICOND %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zicond,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IMBOTH %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zicond,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IMBOTH %s
+
+
+define i32 @select_umin_1(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_umin_1:
+; RV32IM: # %bb.0: # %entry
+; RV32IM-NEXT: bgeu a1, a2, .LBB0_3
+; RV32IM-NEXT: # %bb.1: # %entry
+; RV32IM-NEXT: beqz a0, .LBB0_4
+; RV32IM-NEXT: .LBB0_2: # %entry
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB0_3: # %entry
+; RV32IM-NEXT: mv a1, a2
+; RV32IM-NEXT: bnez a0, .LBB0_2
+; RV32IM-NEXT: .LBB0_4: # %entry
+; RV32IM-NEXT: mv a0, a2
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_umin_1:
+; RV64IM: # %bb.0: # %entry
+; RV64IM-NEXT: sext.w a3, a2
+; RV64IM-NEXT: sext.w a1, a1
+; RV64IM-NEXT: bgeu a1, a3, .LBB0_3
+; RV64IM-NEXT: # %bb.1: # %entry
+; RV64IM-NEXT: beqz a0, .LBB0_4
+; RV64IM-NEXT: .LBB0_2: # %entry
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB0_3: # %entry
+; RV64IM-NEXT: mv a1, a3
+; RV64IM-NEXT: bnez a0, .LBB0_2
+; RV64IM-NEXT: .LBB0_4: # %entry
+; RV64IM-NEXT: mv a0, a2
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_umin_1:
+; RV32IMZBB: # %bb.0: # %entry
+; RV32IMZBB-NEXT: beqz a0, .LBB0_2
+; RV32IMZBB-NEXT: # %bb.1:
+; RV32IMZBB-NEXT: minu a2, a1, a2
+; RV32IMZBB-NEXT: .LBB0_2: # %entry
+; RV32IMZBB-NEXT: mv a0, a2
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_umin_1:
+; RV64IMZBB: # %bb.0: # %entry
+; RV64IMZBB-NEXT: beqz a0, .LBB0_2
+; RV64IMZBB-NEXT: # %bb.1:
+; RV64IMZBB-NEXT: sext.w a2, a2
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: minu a2, a1, a2
+; RV64IMZBB-NEXT: .LBB0_2: # %entry
+; RV64IMZBB-NEXT: mv a0, a2
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_umin_1:
+; RV32IMZICOND: # %bb.0: # %entry
+; RV32IMZICOND-NEXT: sltu a3, a1, a2
+; RV32IMZICOND-NEXT: czero.nez a4, a2, a3
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a3
+; RV32IMZICOND-NEXT: or a1, a1, a4
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT: or a0, a1, a0
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_umin_1:
+; RV64IMZICOND: # %bb.0: # %entry
+; RV64IMZICOND-NEXT: sext.w a3, a2
+; RV64IMZICOND-NEXT: sext.w a1, a1
+; RV64IMZICOND-NEXT: sltu a4, a1, a3
+; RV64IMZICOND-NEXT: czero.nez a3, a3, a4
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a4
+; RV64IMZICOND-NEXT: or a1, a1, a3
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT: or a0, a1, a0
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_umin_1:
+; RV32IMBOTH: # %bb.0: # %entry
+; RV32IMBOTH-NEXT: minu a1, a1, a2
+; RV32IMBOTH-NEXT: czero.nez a2, a2, a0
+; RV32IMBOTH-NEXT: czero.eqz a0, a1, a0
+; RV32IMBOTH-NEXT: or a0, a0, a2
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_umin_1:
+; RV64IMBOTH: # %bb.0: # %entry
+; RV64IMBOTH-NEXT: sext.w a3, a2
+; RV64IMBOTH-NEXT: sext.w a1, a1
+; RV64IMBOTH-NEXT: minu a1, a1, a3
+; RV64IMBOTH-NEXT: czero.nez a2, a2, a0
+; RV64IMBOTH-NEXT: czero.eqz a0, a1, a0
+; RV64IMBOTH-NEXT: or a0, a0, a2
+; RV64IMBOTH-NEXT: ret
+entry:
+ %c = call i32 @llvm.umin(i32 %a, i32 %b)
+ %res = select i1 %cond, i32 %c, i32 %b
+ ret i32 %res
+}
+
+define i32 @select_umin_2(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_umin_2:
+; RV32IM: # %bb.0: # %entry
+; RV32IM-NEXT: mv a3, a1
+; RV32IM-NEXT: bgeu a1, a2, .LBB1_3
+; RV32IM-NEXT: # %bb.1: # %entry
+; RV32IM-NEXT: beqz a0, .LBB1_4
+; RV32IM-NEXT: .LBB1_2: # %entry
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB1_3: # %entry
+; RV32IM-NEXT: mv a3, a2
+; RV32IM-NEXT: bnez a0, .LBB1_2
+; RV32IM-NEXT: .LBB1_4: # %entry
+; RV32IM-NEXT: mv a0, a3
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_umin_2:
+; RV64IM: # %bb.0: # %entry
+; RV64IM-NEXT: sext.w a3, a2
+; RV64IM-NEXT: sext.w a2, a1
+; RV64IM-NEXT: bgeu a2, a3, .LBB1_3
+; RV64IM-NEXT: # %bb.1: # %entry
+; RV64IM-NEXT: beqz a0, .LBB1_4
+; RV64IM-NEXT: .LBB1_2: # %entry
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB1_3: # %entry
+; RV64IM-NEXT: mv a2, a3
+; RV64IM-NEXT: bnez a0, .LBB1_2
+; RV64IM-NEXT: .LBB1_4: # %entry
+; RV64IM-NEXT: mv a0, a2
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_umin_2:
+; RV32IMZBB: # %bb.0: # %entry
+; RV32IMZBB-NEXT: bnez a0, .LBB1_2
+; RV32IMZBB-NEXT: # %bb.1: # %entry
+; RV32IMZBB-NEXT: minu a1, a1, a2
+; RV32IMZBB-NEXT: .LBB1_2: # %entry
+; RV32IMZBB-NEXT: mv a0, a1
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_umin_2:
+; RV64IMZBB: # %bb.0: # %entry
+; RV64IMZBB-NEXT: bnez a0, .LBB1_2
+; RV64IMZBB-NEXT: # %bb.1: # %entry
+; RV64IMZBB-NEXT: sext.w a2, a2
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: minu a1, a1, a2
+; RV64IMZBB-NEXT: .LBB1_2: # %entry
+; RV64IMZBB-NEXT: mv a0, a1
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_umin_2:
+; RV32IMZICOND: # %bb.0: # %entry
+; RV32IMZICOND-NEXT: sltu a3, a1, a2
+; RV32IMZICOND-NEXT: czero.nez a2, a2, a3
+; RV32IMZICOND-NEXT: czero.eqz a3, a1, a3
+; RV32IMZICOND-NEXT: or a2, a3, a2
+; RV32IMZICOND-NEXT: czero.nez a2, a2, a0
+; RV32IMZICOND-NEXT: czero.eqz a0, a1, a0
+; RV32IMZICOND-NEXT: or a0, a0, a2
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_umin_2:
+; RV64IMZICOND: # %bb.0: # %entry
+; RV64IMZICOND-NEXT: sext.w a2, a2
+; RV64IMZICOND-NEXT: sext.w a3, a1
+; RV64IMZICOND-NEXT: sltu a4, a3, a2
+; RV64IMZICOND-NEXT: czero.nez a2, a2, a4
+; RV64IMZICOND-NEXT: czero.eqz a3, a3, a4
+; RV64IMZICOND-NEXT: or a2, a3, a2
+; RV64IMZICOND-NEXT: czero.nez a2, a2, a0
+; RV64IMZICOND-NEXT: czero.eqz a0, a1, a0
+; RV64IMZICOND-NEXT: or a0, a0, a2
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_umin_2:
+; RV32IMBOTH: # %bb.0: # %entry
+; RV32IMBOTH-NEXT: minu a2, a1, a2
+; RV32IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT: or a0, a1, a0
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_umin_2:
+; RV64IMBOTH: # %bb.0: # %entry
+; RV64IMBOTH-NEXT: sext.w a2, a2
+; RV64IMBOTH-NEXT: sext.w a3, a1
+; RV64IMBOTH-NEXT: minu a2, a3, a2
+; RV64IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT: or a0, a1, a0
+; RV64IMBOTH-NEXT: ret
+entry:
+ %c = call i32 @llvm.umin(i32 %a, i32 %b)
+ %res = select i1 %cond, i32 %a, i32 %c
+ ret i32 %res
+}
+
+define i32 @select_umin_3(i1 zeroext %cond, i32 %a) {
+; RV32IM-LABEL: select_umin_3:
+; RV32IM: # %bb.0: # %entry
+; RV32IM-NEXT: li a3, 32
+; RV32IM-NEXT: mv a2, a1
+; RV32IM-NEXT: bgeu a1, a3, .LBB2_3
+; RV32IM-NEXT: # %bb.1: # %entry
+; RV32IM-NEXT: beqz a0, .LBB2_4
+; RV32IM-NEXT: .LBB2_2: # %entry
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB2_3: # %entry
+; RV32IM-NEXT: li a2, 32
+; RV32IM-NEXT: bnez a0, .LBB2_2
+; RV32IM-NEXT: .LBB2_4: # %entry
+; RV32IM-NEXT: mv a0, a2
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_umin_3:
+; RV64IM: # %bb.0: # %entry
+; RV64IM-NEXT: sext.w a2, a1
+; RV64IM-NEXT: li a3, 32
+; RV64IM-NEXT: bgeu a2, a3, .LBB2_3
+; RV64IM-NEXT: # %bb.1: # %entry
+; RV64IM-NEXT: beqz a0, .LBB2_4
+; RV64IM-NEXT: .LBB2_2: # %entry
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB2_3: # %entry
+; RV64IM-NEXT: li a2, 32
+; RV64IM-NEXT: bnez a0, .LBB2_2
+; RV64IM-NEXT: .LBB2_4: # %entry
+; RV64IM-NEXT: mv a0, a2
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_umin_3:
+; RV32IMZBB: # %bb.0: # %entry
+; RV32IMZBB-NEXT: bnez a0, .LBB2_2
+; RV32IMZBB-NEXT: # %bb.1: # %entry
+; RV32IMZBB-NEXT: li a0, 32
+; RV32IMZBB-NEXT: minu a1, a1, a0
+; RV32IMZBB-NEXT: .LBB2_2: # %entry
+; RV32IMZBB-NEXT: mv a0, a1
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_umin_3:
+; RV64IMZBB: # %bb.0: # %entry
+; RV64IMZBB-NEXT: bnez a0, .LBB2_2
+; RV64IMZBB-NEXT: # %bb.1: # %entry
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: li a0, 32
+; RV64IMZBB-NEXT: minu a1, a1, a0
+; RV64IMZBB-NEXT: .LBB2_2: # %entry
+; RV64IMZBB-NEXT: mv a0, a1
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_umin_3:
+; RV32IMZICOND: # %bb.0: # %entry
+; RV32IMZICOND-NEXT: sltiu a2, a1, 32
+; RV32IMZICOND-NEXT: addi a3, a1, -32
+; RV32IMZICOND-NEXT: czero.eqz a2, a3, a2
+; RV32IMZICOND-NEXT: addi a2, a2, 32
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT: or a0, a1, a0
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_umin_3:
+; RV64IMZICOND: # %bb.0: # %entry
+; RV64IMZICOND-NEXT: sext.w a2, a1
+; RV64IMZICOND-NEXT: sltiu a3, a2, 32
+; RV64IMZICOND-NEXT: addi a2, a2, -32
+; RV64IMZICOND-NEXT: czero.eqz a2, a2, a3
+; RV64IMZICOND-NEXT: addi a2, a2, 32
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT: or a0, a1, a0
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_umin_3:
+; RV32IMBOTH: # %bb.0: # %entry
+; RV32IMBOTH-NEXT: li a2, 32
+; RV32IMBOTH-NEXT: minu a2, a1, a2
+; RV32IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT: or a0, a1, a0
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_umin_3:
+; RV64IMBOTH: # %bb.0: # %entry
+; RV64IMBOTH-NEXT: sext.w a2, a1
+; RV64IMBOTH-NEXT: li a3, 32
+; RV64IMBOTH-NEXT: minu a2, a2, a3
+; RV64IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT: or a0, a1, a0
+; RV64IMBOTH-NEXT: ret
+entry:
+ %c = call i32 @llvm.umin(i32 %a, i32 32)
+ %res = select i1 %cond, i32 %a, i32 %c
+ ret i32 %res
+}
+
+define i32 @select_umin_4(i1 zeroext %cond, i32 %x) {
+; RV32IM-LABEL: select_umin_4:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: li a2, 128
+; RV32IM-NEXT: bgeu a1, a2, .LBB3_3
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: beqz a0, .LBB3_4
+; RV32IM-NEXT: .LBB3_2:
+; RV32IM-NEXT: mv a0, a2
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB3_3:
+; RV32IM-NEXT: li a1, 128
+; RV32IM-NEXT: bnez a0, .LBB3_2
+; RV32IM-NEXT: .LBB3_4:
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_umin_4:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: sext.w a2, a1
+; RV64IM-NEXT: li a1, 128
+; RV64IM-NEXT: bgeu a2, a1, .LBB3_3
+; RV64IM-NEXT: # %bb.1:
+; RV64IM-NEXT: beqz a0, .LBB3_4
+; RV64IM-NEXT: .LBB3_2:
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB3_3:
+; RV64IM-NEXT: li a2, 128
+; RV64IM-NEXT: bnez a0, .LBB3_2
+; RV64IM-NEXT: .LBB3_4:
+; RV64IM-NEXT: mv a0, a2
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_umin_4:
+; RV32IMZBB: # %bb.0:
+; RV32IMZBB-NEXT: mv a2, a0
+; RV32IMZBB-NEXT: li a0, 128
+; RV32IMZBB-NEXT: bnez a2, .LBB3_2
+; RV32IMZBB-NEXT: # %bb.1:
+; RV32IMZBB-NEXT: minu a0, a1, a0
+; RV32IMZBB-NEXT: .LBB3_2:
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_umin_4:
+; RV64IMZBB: # %bb.0:
+; RV64IMZBB-NEXT: mv a2, a0
+; RV64IMZBB-NEXT: li a0, 128
+; RV64IMZBB-NEXT: bnez a2, .LBB3_2
+; RV64IMZBB-NEXT: # %bb.1:
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: minu a0, a1, a0
+; RV64IMZBB-NEXT: .LBB3_2:
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_umin_4:
+; RV32IMZICOND: # %bb.0:
+; RV32IMZICOND-NEXT: sltiu a2, a1, 128
+; RV32IMZICOND-NEXT: addi a1, a1, -128
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a2
+; RV32IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT: addi a0, a0, 128
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_umin_4:
+; RV64IMZICOND: # %bb.0:
+; RV64IMZICOND-NEXT: sext.w a1, a1
+; RV64IMZICOND-NEXT: sltiu a2, a1, 128
+; RV64IMZICOND-NEXT: addi a1, a1, -128
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a2
+; RV64IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT: addi a0, a0, 128
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_umin_4:
+; RV32IMBOTH: # %bb.0:
+; RV32IMBOTH-NEXT: li a2, 128
+; RV32IMBOTH-NEXT: minu a1, a1, a2
+; RV32IMBOTH-NEXT: addi a1, a1, -128
+; RV32IMBOTH-NEXT: czero.nez a0, a1, a0
+; RV32IMBOTH-NEXT: addi a0, a0, 128
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_umin_4:
+; RV64IMBOTH: # %bb.0:
+; RV64IMBOTH-NEXT: sext.w a1, a1
+; RV64IMBOTH-NEXT: li a2, 128
+; RV64IMBOTH-NEXT: minu a1, a1, a2
+; RV64IMBOTH-NEXT: addi a1, a1, -128
+; RV64IMBOTH-NEXT: czero.nez a0, a1, a0
+; RV64IMBOTH-NEXT: addi a0, a0, 128
+; RV64IMBOTH-NEXT: ret
+ %minmax = call i32 @llvm.umin(i32 %x, i32 128)
+ %sel = select i1 %cond, i32 128, i32 %minmax
+ ret i32 %sel
+}
+
+define i32 @select_umax_1(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_umax_1:
+; RV32IM: # %bb.0: # %entry
+; RV32IM-NEXT: bgeu a2, a1, .LBB4_3
+; RV32IM-NEXT: # %bb.1: # %entry
+; RV32IM-NEXT: beqz a0, .LBB4_4
+; RV32IM-NEXT: .LBB4_2: # %entry
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB4_3: # %entry
+; RV32IM-NEXT: mv a1, a2
+; RV32IM-NEXT: bnez a0, .LBB4_2
+; RV32IM-NEXT: .LBB4_4: # %entry
+; RV32IM-NEXT: mv a0, a2
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_umax_1:
+; RV64IM: # %bb.0: # %entry
+; RV64IM-NEXT: sext.w a1, a1
+; RV64IM-NEXT: sext.w a3, a2
+; RV64IM-NEXT: bgeu a3, a1, .LBB4_3
+; RV64IM-NEXT: # %bb.1: # %entry
+; RV64IM-NEXT: beqz a0, .LBB4_4
+; RV64IM-NEXT: .LBB4_2: # %entry
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB4_3: # %entry
+; RV64IM-NEXT: mv a1, a3
+; RV64IM-NEXT: bnez a0, .LBB4_2
+; RV64IM-NEXT: .LBB4_4: # %entry
+; RV64IM-NEXT: mv a0, a2
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_umax_1:
+; RV32IMZBB: # %bb.0: # %entry
+; RV32IMZBB-NEXT: beqz a0, .LBB4_2
+; RV32IMZBB-NEXT: # %bb.1:
+; RV32IMZBB-NEXT: maxu a2, a1, a2
+; RV32IMZBB-NEXT: .LBB4_2: # %entry
+; RV32IMZBB-NEXT: mv a0, a2
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_umax_1:
+; RV64IMZBB: # %bb.0: # %entry
+; RV64IMZBB-NEXT: beqz a0, .LBB4_2
+; RV64IMZBB-NEXT: # %bb.1:
+; RV64IMZBB-NEXT: sext.w a2, a2
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: maxu a2, a1, a2
+; RV64IMZBB-NEXT: .LBB4_2: # %entry
+; RV64IMZBB-NEXT: mv a0, a2
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_umax_1:
+; RV32IMZICOND: # %bb.0: # %entry
+; RV32IMZICOND-NEXT: sltu a3, a2, a1
+; RV32IMZICOND-NEXT: czero.nez a4, a2, a3
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a3
+; RV32IMZICOND-NEXT: or a1, a1, a4
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT: or a0, a1, a0
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_umax_1:
+; RV64IMZICOND: # %bb.0: # %entry
+; RV64IMZICOND-NEXT: sext.w a1, a1
+; RV64IMZICOND-NEXT: sext.w a3, a2
+; RV64IMZICOND-NEXT: sltu a4, a3, a1
+; RV64IMZICOND-NEXT: czero.nez a3, a3, a4
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a4
+; RV64IMZICOND-NEXT: or a1, a1, a3
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT: or a0, a1, a0
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_umax_1:
+; RV32IMBOTH: # %bb.0: # %entry
+; RV32IMBOTH-NEXT: maxu a1, a1, a2
+; RV32IMBOTH-NEXT: czero.nez a2, a2, a0
+; RV32IMBOTH-NEXT: czero.eqz a0, a1, a0
+; RV32IMBOTH-NEXT: or a0, a0, a2
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_umax_1:
+; RV64IMBOTH: # %bb.0: # %entry
+; RV64IMBOTH-NEXT: sext.w a3, a2
+; RV64IMBOTH-NEXT: sext.w a1, a1
+; RV64IMBOTH-NEXT: maxu a1, a1, a3
+; RV64IMBOTH-NEXT: czero.nez a2, a2, a0
+; RV64IMBOTH-NEXT: czero.eqz a0, a1, a0
+; RV64IMBOTH-NEXT: or a0, a0, a2
+; RV64IMBOTH-NEXT: ret
+entry:
+ %c = call i32 @llvm.umax(i32 %a, i32 %b)
+ %res = select i1 %cond, i32 %c, i32 %b
+ ret i32 %res
+}
+
+define i32 @select_umax_2(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_umax_2:
+; RV32IM: # %bb.0: # %entry
+; RV32IM-NEXT: mv a3, a1
+; RV32IM-NEXT: bgeu a2, a1, .LBB5_3
+; RV32IM-NEXT: # %bb.1: # %entry
+; RV32IM-NEXT: beqz a0, .LBB5_4
+; RV32IM-NEXT: .LBB5_2: # %entry
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB5_3: # %entry
+; RV32IM-NEXT: mv a3, a2
+; RV32IM-NEXT: bnez a0, .LBB5_2
+; RV32IM-NEXT: .LBB5_4: # %entry
+; RV32IM-NEXT: mv a0, a3
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_umax_2:
+; RV64IM: # %bb.0: # %entry
+; RV64IM-NEXT: sext.w a3, a1
+; RV64IM-NEXT: sext.w a2, a2
+; RV64IM-NEXT: bgeu a2, a3, .LBB5_3
+; RV64IM-NEXT: # %bb.1: # %entry
+; RV64IM-NEXT: beqz a0, .LBB5_4
+; RV64IM-NEXT: .LBB5_2: # %entry
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB5_3: # %entry
+; RV64IM-NEXT: mv a3, a2
+; RV64IM-NEXT: bnez a0, .LBB5_2
+; RV64IM-NEXT: .LBB5_4: # %entry
+; RV64IM-NEXT: mv a0, a3
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_umax_2:
+; RV32IMZBB: # %bb.0: # %entry
+; RV32IMZBB-NEXT: bnez a0, .LBB5_2
+; RV32IMZBB-NEXT: # %bb.1: # %entry
+; RV32IMZBB-NEXT: maxu a1, a1, a2
+; RV32IMZBB-NEXT: .LBB5_2: # %entry
+; RV32IMZBB-NEXT: mv a0, a1
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_umax_2:
+; RV64IMZBB: # %bb.0: # %entry
+; RV64IMZBB-NEXT: bnez a0, .LBB5_2
+; RV64IMZBB-NEXT: # %bb.1: # %entry
+; RV64IMZBB-NEXT: sext.w a2, a2
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: maxu a1, a1, a2
+; RV64IMZBB-NEXT: .LBB5_2: # %entry
+; RV64IMZBB-NEXT: mv a0, a1
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_umax_2:
+; RV32IMZICOND: # %bb.0: # %entry
+; RV32IMZICOND-NEXT: sltu a3, a2, a1
+; RV32IMZICOND-NEXT: czero.nez a2, a2, a3
+; RV32IMZICOND-NEXT: czero.eqz a3, a1, a3
+; RV32IMZICOND-NEXT: or a2, a3, a2
+; RV32IMZICOND-NEXT: czero.nez a2, a2, a0
+; RV32IMZICOND-NEXT: czero.eqz a0, a1, a0
+; RV32IMZICOND-NEXT: or a0, a0, a2
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_umax_2:
+; RV64IMZICOND: # %bb.0: # %entry
+; RV64IMZICOND-NEXT: sext.w a3, a1
+; RV64IMZICOND-NEXT: sext.w a2, a2
+; RV64IMZICOND-NEXT: sltu a4, a2, a3
+; RV64IMZICOND-NEXT: czero.nez a2, a2, a4
+; RV64IMZICOND-NEXT: czero.eqz a3, a3, a4
+; RV64IMZICOND-NEXT: or a2, a3, a2
+; RV64IMZICOND-NEXT: czero.nez a2, a2, a0
+; RV64IMZICOND-NEXT: czero.eqz a0, a1, a0
+; RV64IMZICOND-NEXT: or a0, a0, a2
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_umax_2:
+; RV32IMBOTH: # %bb.0: # %entry
+; RV32IMBOTH-NEXT: maxu a2, a1, a2
+; RV32IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT: or a0, a1, a0
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_umax_2:
+; RV64IMBOTH: # %bb.0: # %entry
+; RV64IMBOTH-NEXT: sext.w a2, a2
+; RV64IMBOTH-NEXT: sext.w a3, a1
+; RV64IMBOTH-NEXT: maxu a2, a3, a2
+; RV64IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT: or a0, a1, a0
+; RV64IMBOTH-NEXT: ret
+entry:
+ %c = call i32 @llvm.umax(i32 %a, i32 %b)
+ %res = select i1 %cond, i32 %a, i32 %c
+ ret i32 %res
+}
+
+define i32 @select_umax_3(i1 zeroext %cond, i32 %a) {
+; RV32IM-LABEL: select_umax_3:
+; RV32IM: # %bb.0: # %entry
+; RV32IM-NEXT: li a3, 32
+; RV32IM-NEXT: mv a2, a1
+; RV32IM-NEXT: bgeu a3, a1, .LBB6_3
+; RV32IM-NEXT: # %bb.1: # %entry
+; RV32IM-NEXT: beqz a0, .LBB6_4
+; RV32IM-NEXT: .LBB6_2: # %entry
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB6_3: # %entry
+; RV32IM-NEXT: li a2, 32
+; RV32IM-NEXT: bnez a0, .LBB6_2
+; RV32IM-NEXT: .LBB6_4: # %entry
+; RV32IM-NEXT: mv a0, a2
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_umax_3:
+; RV64IM: # %bb.0: # %entry
+; RV64IM-NEXT: sext.w a2, a1
+; RV64IM-NEXT: li a3, 32
+; RV64IM-NEXT: bgeu a3, a2, .LBB6_3
+; RV64IM-NEXT: # %bb.1: # %entry
+; RV64IM-NEXT: beqz a0, .LBB6_4
+; RV64IM-NEXT: .LBB6_2: # %entry
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB6_3: # %entry
+; RV64IM-NEXT: li a2, 32
+; RV64IM-NEXT: bnez a0, .LBB6_2
+; RV64IM-NEXT: .LBB6_4: # %entry
+; RV64IM-NEXT: mv a0, a2
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_umax_3:
+; RV32IMZBB: # %bb.0: # %entry
+; RV32IMZBB-NEXT: bnez a0, .LBB6_2
+; RV32IMZBB-NEXT: # %bb.1: # %entry
+; RV32IMZBB-NEXT: li a0, 32
+; RV32IMZBB-NEXT: maxu a1, a1, a0
+; RV32IMZBB-NEXT: .LBB6_2: # %entry
+; RV32IMZBB-NEXT: mv a0, a1
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_umax_3:
+; RV64IMZBB: # %bb.0: # %entry
+; RV64IMZBB-NEXT: bnez a0, .LBB6_2
+; RV64IMZBB-NEXT: # %bb.1: # %entry
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: li a0, 32
+; RV64IMZBB-NEXT: maxu a1, a1, a0
+; RV64IMZBB-NEXT: .LBB6_2: # %entry
+; RV64IMZBB-NEXT: mv a0, a1
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_umax_3:
+; RV32IMZICOND: # %bb.0: # %entry
+; RV32IMZICOND-NEXT: sltiu a2, a1, 33
+; RV32IMZICOND-NEXT: addi a3, a1, -32
+; RV32IMZICOND-NEXT: czero.nez a2, a3, a2
+; RV32IMZICOND-NEXT: addi a2, a2, 32
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT: or a0, a1, a0
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_umax_3:
+; RV64IMZICOND: # %bb.0: # %entry
+; RV64IMZICOND-NEXT: sext.w a2, a1
+; RV64IMZICOND-NEXT: sltiu a3, a2, 33
+; RV64IMZICOND-NEXT: addi a2, a2, -32
+; RV64IMZICOND-NEXT: czero.nez a2, a2, a3
+; RV64IMZICOND-NEXT: addi a2, a2, 32
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT: or a0, a1, a0
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_umax_3:
+; RV32IMBOTH: # %bb.0: # %entry
+; RV32IMBOTH-NEXT: li a2, 32
+; RV32IMBOTH-NEXT: maxu a2, a1, a2
+; RV32IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT: or a0, a1, a0
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_umax_3:
+; RV64IMBOTH: # %bb.0: # %entry
+; RV64IMBOTH-NEXT: sext.w a2, a1
+; RV64IMBOTH-NEXT: li a3, 32
+; RV64IMBOTH-NEXT: maxu a2, a2, a3
+; RV64IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT: or a0, a1, a0
+; RV64IMBOTH-NEXT: ret
+entry:
+ %c = call i32 @llvm.umax(i32 %a, i32 32)
+ %res = select i1 %cond, i32 %a, i32 %c
+ ret i32 %res
+}
+
+define i32 @select_umax_4(i1 zeroext %cond, i32 %x) {
+; RV32IM-LABEL: select_umax_4:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: li a2, 128
+; RV32IM-NEXT: bgeu a2, a1, .LBB7_3
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: beqz a0, .LBB7_4
+; RV32IM-NEXT: .LBB7_2:
+; RV32IM-NEXT: mv a0, a2
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB7_3:
+; RV32IM-NEXT: li a1, 128
+; RV32IM-NEXT: bnez a0, .LBB7_2
+; RV32IM-NEXT: .LBB7_4:
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_umax_4:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: sext.w a2, a1
+; RV64IM-NEXT: li a1, 128
+; RV64IM-NEXT: bgeu a1, a2, .LBB7_3
+; RV64IM-NEXT: # %bb.1:
+; RV64IM-NEXT: beqz a0, .LBB7_4
+; RV64IM-NEXT: .LBB7_2:
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB7_3:
+; RV64IM-NEXT: li a2, 128
+; RV64IM-NEXT: bnez a0, .LBB7_2
+; RV64IM-NEXT: .LBB7_4:
+; RV64IM-NEXT: mv a0, a2
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_umax_4:
+; RV32IMZBB: # %bb.0:
+; RV32IMZBB-NEXT: mv a2, a0
+; RV32IMZBB-NEXT: li a0, 128
+; RV32IMZBB-NEXT: bnez a2, .LBB7_2
+; RV32IMZBB-NEXT: # %bb.1:
+; RV32IMZBB-NEXT: maxu a0, a1, a0
+; RV32IMZBB-NEXT: .LBB7_2:
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_umax_4:
+; RV64IMZBB: # %bb.0:
+; RV64IMZBB-NEXT: mv a2, a0
+; RV64IMZBB-NEXT: li a0, 128
+; RV64IMZBB-NEXT: bnez a2, .LBB7_2
+; RV64IMZBB-NEXT: # %bb.1:
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: maxu a0, a1, a0
+; RV64IMZBB-NEXT: .LBB7_2:
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_umax_4:
+; RV32IMZICOND: # %bb.0:
+; RV32IMZICOND-NEXT: sltiu a2, a1, 129
+; RV32IMZICOND-NEXT: addi a1, a1, -128
+; RV32IMZICOND-NEXT: czero.nez a1, a1, a2
+; RV32IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT: addi a0, a0, 128
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_umax_4:
+; RV64IMZICOND: # %bb.0:
+; RV64IMZICOND-NEXT: sext.w a1, a1
+; RV64IMZICOND-NEXT: sltiu a2, a1, 129
+; RV64IMZICOND-NEXT: addi a1, a1, -128
+; RV64IMZICOND-NEXT: czero.nez a1, a1, a2
+; RV64IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT: addi a0, a0, 128
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_umax_4:
+; RV32IMBOTH: # %bb.0:
+; RV32IMBOTH-NEXT: li a2, 128
+; RV32IMBOTH-NEXT: maxu a1, a1, a2
+; RV32IMBOTH-NEXT: addi a1, a1, -128
+; RV32IMBOTH-NEXT: czero.nez a0, a1, a0
+; RV32IMBOTH-NEXT: addi a0, a0, 128
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_umax_4:
+; RV64IMBOTH: # %bb.0:
+; RV64IMBOTH-NEXT: sext.w a1, a1
+; RV64IMBOTH-NEXT: li a2, 128
+; RV64IMBOTH-NEXT: maxu a1, a1, a2
+; RV64IMBOTH-NEXT: addi a1, a1, -128
+; RV64IMBOTH-NEXT: czero.nez a0, a1, a0
+; RV64IMBOTH-NEXT: addi a0, a0, 128
+; RV64IMBOTH-NEXT: ret
+ %minmax = call i32 @llvm.umax(i32 %x, i32 128)
+ %sel = select i1 %cond, i32 128, i32 %minmax
+ ret i32 %sel
+}
+
+define i32 @select_smin_1(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_smin_1:
+; RV32IM: # %bb.0: # %entry
+; RV32IM-NEXT: bge a1, a2, .LBB8_3
+; RV32IM-NEXT: # %bb.1: # %entry
+; RV32IM-NEXT: beqz a0, .LBB8_4
+; RV32IM-NEXT: .LBB8_2: # %entry
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB8_3: # %entry
+; RV32IM-NEXT: mv a1, a2
+; RV32IM-NEXT: bnez a0, .LBB8_2
+; RV32IM-NEXT: .LBB8_4: # %entry
+; RV32IM-NEXT: mv a0, a2
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_smin_1:
+; RV64IM: # %bb.0: # %entry
+; RV64IM-NEXT: sext.w a3, a2
+; RV64IM-NEXT: sext.w a1, a1
+; RV64IM-NEXT: bge a1, a3, .LBB8_3
+; RV64IM-NEXT: # %bb.1: # %entry
+; RV64IM-NEXT: beqz a0, .LBB8_4
+; RV64IM-NEXT: .LBB8_2: # %entry
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB8_3: # %entry
+; RV64IM-NEXT: mv a1, a3
+; RV64IM-NEXT: bnez a0, .LBB8_2
+; RV64IM-NEXT: .LBB8_4: # %entry
+; RV64IM-NEXT: mv a0, a2
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_smin_1:
+; RV32IMZBB: # %bb.0: # %entry
+; RV32IMZBB-NEXT: beqz a0, .LBB8_2
+; RV32IMZBB-NEXT: # %bb.1:
+; RV32IMZBB-NEXT: min a2, a1, a2
+; RV32IMZBB-NEXT: .LBB8_2: # %entry
+; RV32IMZBB-NEXT: mv a0, a2
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_smin_1:
+; RV64IMZBB: # %bb.0: # %entry
+; RV64IMZBB-NEXT: beqz a0, .LBB8_2
+; RV64IMZBB-NEXT: # %bb.1:
+; RV64IMZBB-NEXT: sext.w a2, a2
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: min a2, a1, a2
+; RV64IMZBB-NEXT: .LBB8_2: # %entry
+; RV64IMZBB-NEXT: mv a0, a2
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_smin_1:
+; RV32IMZICOND: # %bb.0: # %entry
+; RV32IMZICOND-NEXT: slt a3, a1, a2
+; RV32IMZICOND-NEXT: czero.nez a4, a2, a3
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a3
+; RV32IMZICOND-NEXT: or a1, a1, a4
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT: or a0, a1, a0
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_smin_1:
+; RV64IMZICOND: # %bb.0: # %entry
+; RV64IMZICOND-NEXT: sext.w a3, a2
+; RV64IMZICOND-NEXT: sext.w a1, a1
+; RV64IMZICOND-NEXT: slt a4, a1, a3
+; RV64IMZICOND-NEXT: czero.nez a3, a3, a4
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a4
+; RV64IMZICOND-NEXT: or a1, a1, a3
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT: or a0, a1, a0
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_smin_1:
+; RV32IMBOTH: # %bb.0: # %entry
+; RV32IMBOTH-NEXT: min a1, a1, a2
+; RV32IMBOTH-NEXT: czero.nez a2, a2, a0
+; RV32IMBOTH-NEXT: czero.eqz a0, a1, a0
+; RV32IMBOTH-NEXT: or a0, a0, a2
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_smin_1:
+; RV64IMBOTH: # %bb.0: # %entry
+; RV64IMBOTH-NEXT: sext.w a3, a2
+; RV64IMBOTH-NEXT: sext.w a1, a1
+; RV64IMBOTH-NEXT: min a1, a1, a3
+; RV64IMBOTH-NEXT: czero.nez a2, a2, a0
+; RV64IMBOTH-NEXT: czero.eqz a0, a1, a0
+; RV64IMBOTH-NEXT: or a0, a0, a2
+; RV64IMBOTH-NEXT: ret
+entry:
+ %c = call i32 @llvm.smin(i32 %a, i32 %b)
+ %res = select i1 %cond, i32 %c, i32 %b
+ ret i32 %res
+}
+
+define i32 @select_smin_2(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_smin_2:
+; RV32IM: # %bb.0: # %entry
+; RV32IM-NEXT: mv a3, a1
+; RV32IM-NEXT: bge a1, a2, .LBB9_3
+; RV32IM-NEXT: # %bb.1: # %entry
+; RV32IM-NEXT: beqz a0, .LBB9_4
+; RV32IM-NEXT: .LBB9_2: # %entry
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB9_3: # %entry
+; RV32IM-NEXT: mv a3, a2
+; RV32IM-NEXT: bnez a0, .LBB9_2
+; RV32IM-NEXT: .LBB9_4: # %entry
+; RV32IM-NEXT: mv a0, a3
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_smin_2:
+; RV64IM: # %bb.0: # %entry
+; RV64IM-NEXT: sext.w a3, a2
+; RV64IM-NEXT: sext.w a2, a1
+; RV64IM-NEXT: bge a2, a3, .LBB9_3
+; RV64IM-NEXT: # %bb.1: # %entry
+; RV64IM-NEXT: beqz a0, .LBB9_4
+; RV64IM-NEXT: .LBB9_2: # %entry
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB9_3: # %entry
+; RV64IM-NEXT: mv a2, a3
+; RV64IM-NEXT: bnez a0, .LBB9_2
+; RV64IM-NEXT: .LBB9_4: # %entry
+; RV64IM-NEXT: mv a0, a2
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_smin_2:
+; RV32IMZBB: # %bb.0: # %entry
+; RV32IMZBB-NEXT: bnez a0, .LBB9_2
+; RV32IMZBB-NEXT: # %bb.1: # %entry
+; RV32IMZBB-NEXT: min a1, a1, a2
+; RV32IMZBB-NEXT: .LBB9_2: # %entry
+; RV32IMZBB-NEXT: mv a0, a1
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_smin_2:
+; RV64IMZBB: # %bb.0: # %entry
+; RV64IMZBB-NEXT: bnez a0, .LBB9_2
+; RV64IMZBB-NEXT: # %bb.1: # %entry
+; RV64IMZBB-NEXT: sext.w a2, a2
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: min a1, a1, a2
+; RV64IMZBB-NEXT: .LBB9_2: # %entry
+; RV64IMZBB-NEXT: mv a0, a1
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_smin_2:
+; RV32IMZICOND: # %bb.0: # %entry
+; RV32IMZICOND-NEXT: slt a3, a1, a2
+; RV32IMZICOND-NEXT: czero.nez a2, a2, a3
+; RV32IMZICOND-NEXT: czero.eqz a3, a1, a3
+; RV32IMZICOND-NEXT: or a2, a3, a2
+; RV32IMZICOND-NEXT: czero.nez a2, a2, a0
+; RV32IMZICOND-NEXT: czero.eqz a0, a1, a0
+; RV32IMZICOND-NEXT: or a0, a0, a2
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_smin_2:
+; RV64IMZICOND: # %bb.0: # %entry
+; RV64IMZICOND-NEXT: sext.w a2, a2
+; RV64IMZICOND-NEXT: sext.w a3, a1
+; RV64IMZICOND-NEXT: slt a4, a3, a2
+; RV64IMZICOND-NEXT: czero.nez a2, a2, a4
+; RV64IMZICOND-NEXT: czero.eqz a3, a3, a4
+; RV64IMZICOND-NEXT: or a2, a3, a2
+; RV64IMZICOND-NEXT: czero.nez a2, a2, a0
+; RV64IMZICOND-NEXT: czero.eqz a0, a1, a0
+; RV64IMZICOND-NEXT: or a0, a0, a2
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_smin_2:
+; RV32IMBOTH: # %bb.0: # %entry
+; RV32IMBOTH-NEXT: min a2, a1, a2
+; RV32IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT: or a0, a1, a0
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_smin_2:
+; RV64IMBOTH: # %bb.0: # %entry
+; RV64IMBOTH-NEXT: sext.w a2, a2
+; RV64IMBOTH-NEXT: sext.w a3, a1
+; RV64IMBOTH-NEXT: min a2, a3, a2
+; RV64IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT: or a0, a1, a0
+; RV64IMBOTH-NEXT: ret
+entry:
+ %c = call i32 @llvm.smin(i32 %a, i32 %b)
+ %res = select i1 %cond, i32 %a, i32 %c
+ ret i32 %res
+}
+
+define i32 @select_smin_3(i1 zeroext %cond, i32 %a) {
+; RV32IM-LABEL: select_smin_3:
+; RV32IM: # %bb.0: # %entry
+; RV32IM-NEXT: li a3, 32
+; RV32IM-NEXT: mv a2, a1
+; RV32IM-NEXT: bge a1, a3, .LBB10_3
+; RV32IM-NEXT: # %bb.1: # %entry
+; RV32IM-NEXT: beqz a0, .LBB10_4
+; RV32IM-NEXT: .LBB10_2: # %entry
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB10_3: # %entry
+; RV32IM-NEXT: li a2, 32
+; RV32IM-NEXT: bnez a0, .LBB10_2
+; RV32IM-NEXT: .LBB10_4: # %entry
+; RV32IM-NEXT: mv a0, a2
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_smin_3:
+; RV64IM: # %bb.0: # %entry
+; RV64IM-NEXT: sext.w a2, a1
+; RV64IM-NEXT: li a3, 32
+; RV64IM-NEXT: bge a2, a3, .LBB10_3
+; RV64IM-NEXT: # %bb.1: # %entry
+; RV64IM-NEXT: beqz a0, .LBB10_4
+; RV64IM-NEXT: .LBB10_2: # %entry
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB10_3: # %entry
+; RV64IM-NEXT: li a2, 32
+; RV64IM-NEXT: bnez a0, .LBB10_2
+; RV64IM-NEXT: .LBB10_4: # %entry
+; RV64IM-NEXT: mv a0, a2
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_smin_3:
+; RV32IMZBB: # %bb.0: # %entry
+; RV32IMZBB-NEXT: bnez a0, .LBB10_2
+; RV32IMZBB-NEXT: # %bb.1: # %entry
+; RV32IMZBB-NEXT: li a0, 32
+; RV32IMZBB-NEXT: min a1, a1, a0
+; RV32IMZBB-NEXT: .LBB10_2: # %entry
+; RV32IMZBB-NEXT: mv a0, a1
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_smin_3:
+; RV64IMZBB: # %bb.0: # %entry
+; RV64IMZBB-NEXT: bnez a0, .LBB10_2
+; RV64IMZBB-NEXT: # %bb.1: # %entry
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: li a0, 32
+; RV64IMZBB-NEXT: min a1, a1, a0
+; RV64IMZBB-NEXT: .LBB10_2: # %entry
+; RV64IMZBB-NEXT: mv a0, a1
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_smin_3:
+; RV32IMZICOND: # %bb.0: # %entry
+; RV32IMZICOND-NEXT: slti a2, a1, 32
+; RV32IMZICOND-NEXT: addi a3, a1, -32
+; RV32IMZICOND-NEXT: czero.eqz a2, a3, a2
+; RV32IMZICOND-NEXT: addi a2, a2, 32
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT: or a0, a1, a0
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_smin_3:
+; RV64IMZICOND: # %bb.0: # %entry
+; RV64IMZICOND-NEXT: sext.w a2, a1
+; RV64IMZICOND-NEXT: slti a3, a2, 32
+; RV64IMZICOND-NEXT: addi a2, a2, -32
+; RV64IMZICOND-NEXT: czero.eqz a2, a2, a3
+; RV64IMZICOND-NEXT: addi a2, a2, 32
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT: or a0, a1, a0
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_smin_3:
+; RV32IMBOTH: # %bb.0: # %entry
+; RV32IMBOTH-NEXT: li a2, 32
+; RV32IMBOTH-NEXT: min a2, a1, a2
+; RV32IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT: or a0, a1, a0
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_smin_3:
+; RV64IMBOTH: # %bb.0: # %entry
+; RV64IMBOTH-NEXT: sext.w a2, a1
+; RV64IMBOTH-NEXT: li a3, 32
+; RV64IMBOTH-NEXT: min a2, a2, a3
+; RV64IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT: or a0, a1, a0
+; RV64IMBOTH-NEXT: ret
+entry:
+ %c = call i32 @llvm.smin(i32 %a, i32 32)
+ %res = select i1 %cond, i32 %a, i32 %c
+ ret i32 %res
+}
+
+define i32 @select_smin_4(i1 zeroext %cond, i32 %x) {
+; RV32IM-LABEL: select_smin_4:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: li a2, 128
+; RV32IM-NEXT: bge a1, a2, .LBB11_3
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: beqz a0, .LBB11_4
+; RV32IM-NEXT: .LBB11_2:
+; RV32IM-NEXT: mv a0, a2
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB11_3:
+; RV32IM-NEXT: li a1, 128
+; RV32IM-NEXT: bnez a0, .LBB11_2
+; RV32IM-NEXT: .LBB11_4:
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_smin_4:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: sext.w a2, a1
+; RV64IM-NEXT: li a1, 128
+; RV64IM-NEXT: bge a2, a1, .LBB11_3
+; RV64IM-NEXT: # %bb.1:
+; RV64IM-NEXT: beqz a0, .LBB11_4
+; RV64IM-NEXT: .LBB11_2:
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB11_3:
+; RV64IM-NEXT: li a2, 128
+; RV64IM-NEXT: bnez a0, .LBB11_2
+; RV64IM-NEXT: .LBB11_4:
+; RV64IM-NEXT: mv a0, a2
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_smin_4:
+; RV32IMZBB: # %bb.0:
+; RV32IMZBB-NEXT: mv a2, a0
+; RV32IMZBB-NEXT: li a0, 128
+; RV32IMZBB-NEXT: bnez a2, .LBB11_2
+; RV32IMZBB-NEXT: # %bb.1:
+; RV32IMZBB-NEXT: min a0, a1, a0
+; RV32IMZBB-NEXT: .LBB11_2:
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_smin_4:
+; RV64IMZBB: # %bb.0:
+; RV64IMZBB-NEXT: mv a2, a0
+; RV64IMZBB-NEXT: li a0, 128
+; RV64IMZBB-NEXT: bnez a2, .LBB11_2
+; RV64IMZBB-NEXT: # %bb.1:
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: min a0, a1, a0
+; RV64IMZBB-NEXT: .LBB11_2:
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_smin_4:
+; RV32IMZICOND: # %bb.0:
+; RV32IMZICOND-NEXT: slti a2, a1, 128
+; RV32IMZICOND-NEXT: addi a1, a1, -128
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a2
+; RV32IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT: addi a0, a0, 128
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_smin_4:
+; RV64IMZICOND: # %bb.0:
+; RV64IMZICOND-NEXT: sext.w a1, a1
+; RV64IMZICOND-NEXT: slti a2, a1, 128
+; RV64IMZICOND-NEXT: addi a1, a1, -128
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a2
+; RV64IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT: addi a0, a0, 128
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_smin_4:
+; RV32IMBOTH: # %bb.0:
+; RV32IMBOTH-NEXT: li a2, 128
+; RV32IMBOTH-NEXT: min a1, a1, a2
+; RV32IMBOTH-NEXT: addi a1, a1, -128
+; RV32IMBOTH-NEXT: czero.nez a0, a1, a0
+; RV32IMBOTH-NEXT: addi a0, a0, 128
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_smin_4:
+; RV64IMBOTH: # %bb.0:
+; RV64IMBOTH-NEXT: sext.w a1, a1
+; RV64IMBOTH-NEXT: li a2, 128
+; RV64IMBOTH-NEXT: min a1, a1, a2
+; RV64IMBOTH-NEXT: addi a1, a1, -128
+; RV64IMBOTH-NEXT: czero.nez a0, a1, a0
+; RV64IMBOTH-NEXT: addi a0, a0, 128
+; RV64IMBOTH-NEXT: ret
+ %minmax = call i32 @llvm.smin(i32 %x, i32 128)
+ %sel = select i1 %cond, i32 128, i32 %minmax
+ ret i32 %sel
+}
+
+define i32 @select_smax_1(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_smax_1:
+; RV32IM: # %bb.0: # %entry
+; RV32IM-NEXT: bge a2, a1, .LBB12_3
+; RV32IM-NEXT: # %bb.1: # %entry
+; RV32IM-NEXT: beqz a0, .LBB12_4
+; RV32IM-NEXT: .LBB12_2: # %entry
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB12_3: # %entry
+; RV32IM-NEXT: mv a1, a2
+; RV32IM-NEXT: bnez a0, .LBB12_2
+; RV32IM-NEXT: .LBB12_4: # %entry
+; RV32IM-NEXT: mv a0, a2
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_smax_1:
+; RV64IM: # %bb.0: # %entry
+; RV64IM-NEXT: sext.w a1, a1
+; RV64IM-NEXT: sext.w a3, a2
+; RV64IM-NEXT: bge a3, a1, .LBB12_3
+; RV64IM-NEXT: # %bb.1: # %entry
+; RV64IM-NEXT: beqz a0, .LBB12_4
+; RV64IM-NEXT: .LBB12_2: # %entry
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB12_3: # %entry
+; RV64IM-NEXT: mv a1, a3
+; RV64IM-NEXT: bnez a0, .LBB12_2
+; RV64IM-NEXT: .LBB12_4: # %entry
+; RV64IM-NEXT: mv a0, a2
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_smax_1:
+; RV32IMZBB: # %bb.0: # %entry
+; RV32IMZBB-NEXT: beqz a0, .LBB12_2
+; RV32IMZBB-NEXT: # %bb.1:
+; RV32IMZBB-NEXT: max a2, a1, a2
+; RV32IMZBB-NEXT: .LBB12_2: # %entry
+; RV32IMZBB-NEXT: mv a0, a2
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_smax_1:
+; RV64IMZBB: # %bb.0: # %entry
+; RV64IMZBB-NEXT: beqz a0, .LBB12_2
+; RV64IMZBB-NEXT: # %bb.1:
+; RV64IMZBB-NEXT: sext.w a2, a2
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: max a2, a1, a2
+; RV64IMZBB-NEXT: .LBB12_2: # %entry
+; RV64IMZBB-NEXT: mv a0, a2
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_smax_1:
+; RV32IMZICOND: # %bb.0: # %entry
+; RV32IMZICOND-NEXT: slt a3, a2, a1
+; RV32IMZICOND-NEXT: czero.nez a4, a2, a3
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a3
+; RV32IMZICOND-NEXT: or a1, a1, a4
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT: or a0, a1, a0
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_smax_1:
+; RV64IMZICOND: # %bb.0: # %entry
+; RV64IMZICOND-NEXT: sext.w a1, a1
+; RV64IMZICOND-NEXT: sext.w a3, a2
+; RV64IMZICOND-NEXT: slt a4, a3, a1
+; RV64IMZICOND-NEXT: czero.nez a3, a3, a4
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a4
+; RV64IMZICOND-NEXT: or a1, a1, a3
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT: or a0, a1, a0
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_smax_1:
+; RV32IMBOTH: # %bb.0: # %entry
+; RV32IMBOTH-NEXT: max a1, a1, a2
+; RV32IMBOTH-NEXT: czero.nez a2, a2, a0
+; RV32IMBOTH-NEXT: czero.eqz a0, a1, a0
+; RV32IMBOTH-NEXT: or a0, a0, a2
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_smax_1:
+; RV64IMBOTH: # %bb.0: # %entry
+; RV64IMBOTH-NEXT: sext.w a3, a2
+; RV64IMBOTH-NEXT: sext.w a1, a1
+; RV64IMBOTH-NEXT: max a1, a1, a3
+; RV64IMBOTH-NEXT: czero.nez a2, a2, a0
+; RV64IMBOTH-NEXT: czero.eqz a0, a1, a0
+; RV64IMBOTH-NEXT: or a0, a0, a2
+; RV64IMBOTH-NEXT: ret
+entry:
+ %c = call i32 @llvm.smax(i32 %a, i32 %b)
+ %res = select i1 %cond, i32 %c, i32 %b
+ ret i32 %res
+}
+
+define i32 @select_smax_2(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_smax_2:
+; RV32IM: # %bb.0: # %entry
+; RV32IM-NEXT: mv a3, a1
+; RV32IM-NEXT: bge a2, a1, .LBB13_3
+; RV32IM-NEXT: # %bb.1: # %entry
+; RV32IM-NEXT: beqz a0, .LBB13_4
+; RV32IM-NEXT: .LBB13_2: # %entry
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB13_3: # %entry
+; RV32IM-NEXT: mv a3, a2
+; RV32IM-NEXT: bnez a0, .LBB13_2
+; RV32IM-NEXT: .LBB13_4: # %entry
+; RV32IM-NEXT: mv a0, a3
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_smax_2:
+; RV64IM: # %bb.0: # %entry
+; RV64IM-NEXT: sext.w a3, a1
+; RV64IM-NEXT: sext.w a2, a2
+; RV64IM-NEXT: bge a2, a3, .LBB13_3
+; RV64IM-NEXT: # %bb.1: # %entry
+; RV64IM-NEXT: beqz a0, .LBB13_4
+; RV64IM-NEXT: .LBB13_2: # %entry
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB13_3: # %entry
+; RV64IM-NEXT: mv a3, a2
+; RV64IM-NEXT: bnez a0, .LBB13_2
+; RV64IM-NEXT: .LBB13_4: # %entry
+; RV64IM-NEXT: mv a0, a3
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_smax_2:
+; RV32IMZBB: # %bb.0: # %entry
+; RV32IMZBB-NEXT: bnez a0, .LBB13_2
+; RV32IMZBB-NEXT: # %bb.1: # %entry
+; RV32IMZBB-NEXT: max a1, a1, a2
+; RV32IMZBB-NEXT: .LBB13_2: # %entry
+; RV32IMZBB-NEXT: mv a0, a1
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_smax_2:
+; RV64IMZBB: # %bb.0: # %entry
+; RV64IMZBB-NEXT: bnez a0, .LBB13_2
+; RV64IMZBB-NEXT: # %bb.1: # %entry
+; RV64IMZBB-NEXT: sext.w a2, a2
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: max a1, a1, a2
+; RV64IMZBB-NEXT: .LBB13_2: # %entry
+; RV64IMZBB-NEXT: mv a0, a1
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_smax_2:
+; RV32IMZICOND: # %bb.0: # %entry
+; RV32IMZICOND-NEXT: slt a3, a2, a1
+; RV32IMZICOND-NEXT: czero.nez a2, a2, a3
+; RV32IMZICOND-NEXT: czero.eqz a3, a1, a3
+; RV32IMZICOND-NEXT: or a2, a3, a2
+; RV32IMZICOND-NEXT: czero.nez a2, a2, a0
+; RV32IMZICOND-NEXT: czero.eqz a0, a1, a0
+; RV32IMZICOND-NEXT: or a0, a0, a2
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_smax_2:
+; RV64IMZICOND: # %bb.0: # %entry
+; RV64IMZICOND-NEXT: sext.w a3, a1
+; RV64IMZICOND-NEXT: sext.w a2, a2
+; RV64IMZICOND-NEXT: slt a4, a2, a3
+; RV64IMZICOND-NEXT: czero.nez a2, a2, a4
+; RV64IMZICOND-NEXT: czero.eqz a3, a3, a4
+; RV64IMZICOND-NEXT: or a2, a3, a2
+; RV64IMZICOND-NEXT: czero.nez a2, a2, a0
+; RV64IMZICOND-NEXT: czero.eqz a0, a1, a0
+; RV64IMZICOND-NEXT: or a0, a0, a2
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_smax_2:
+; RV32IMBOTH: # %bb.0: # %entry
+; RV32IMBOTH-NEXT: max a2, a1, a2
+; RV32IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT: or a0, a1, a0
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_smax_2:
+; RV64IMBOTH: # %bb.0: # %entry
+; RV64IMBOTH-NEXT: sext.w a2, a2
+; RV64IMBOTH-NEXT: sext.w a3, a1
+; RV64IMBOTH-NEXT: max a2, a3, a2
+; RV64IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT: or a0, a1, a0
+; RV64IMBOTH-NEXT: ret
+entry:
+ %c = call i32 @llvm.smax(i32 %a, i32 %b)
+ %res = select i1 %cond, i32 %a, i32 %c
+ ret i32 %res
+}
+
+define i32 @select_smax_3(i1 zeroext %cond, i32 %a) {
+; RV32IM-LABEL: select_smax_3:
+; RV32IM: # %bb.0: # %entry
+; RV32IM-NEXT: li a3, 32
+; RV32IM-NEXT: mv a2, a1
+; RV32IM-NEXT: bge a3, a1, .LBB14_3
+; RV32IM-NEXT: # %bb.1: # %entry
+; RV32IM-NEXT: beqz a0, .LBB14_4
+; RV32IM-NEXT: .LBB14_2: # %entry
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB14_3: # %entry
+; RV32IM-NEXT: li a2, 32
+; RV32IM-NEXT: bnez a0, .LBB14_2
+; RV32IM-NEXT: .LBB14_4: # %entry
+; RV32IM-NEXT: mv a0, a2
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_smax_3:
+; RV64IM: # %bb.0: # %entry
+; RV64IM-NEXT: sext.w a2, a1
+; RV64IM-NEXT: li a3, 32
+; RV64IM-NEXT: bge a3, a2, .LBB14_3
+; RV64IM-NEXT: # %bb.1: # %entry
+; RV64IM-NEXT: beqz a0, .LBB14_4
+; RV64IM-NEXT: .LBB14_2: # %entry
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB14_3: # %entry
+; RV64IM-NEXT: li a2, 32
+; RV64IM-NEXT: bnez a0, .LBB14_2
+; RV64IM-NEXT: .LBB14_4: # %entry
+; RV64IM-NEXT: mv a0, a2
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_smax_3:
+; RV32IMZBB: # %bb.0: # %entry
+; RV32IMZBB-NEXT: bnez a0, .LBB14_2
+; RV32IMZBB-NEXT: # %bb.1: # %entry
+; RV32IMZBB-NEXT: li a0, 32
+; RV32IMZBB-NEXT: max a1, a1, a0
+; RV32IMZBB-NEXT: .LBB14_2: # %entry
+; RV32IMZBB-NEXT: mv a0, a1
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_smax_3:
+; RV64IMZBB: # %bb.0: # %entry
+; RV64IMZBB-NEXT: bnez a0, .LBB14_2
+; RV64IMZBB-NEXT: # %bb.1: # %entry
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: li a0, 32
+; RV64IMZBB-NEXT: max a1, a1, a0
+; RV64IMZBB-NEXT: .LBB14_2: # %entry
+; RV64IMZBB-NEXT: mv a0, a1
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_smax_3:
+; RV32IMZICOND: # %bb.0: # %entry
+; RV32IMZICOND-NEXT: slti a2, a1, 33
+; RV32IMZICOND-NEXT: addi a3, a1, -32
+; RV32IMZICOND-NEXT: czero.nez a2, a3, a2
+; RV32IMZICOND-NEXT: addi a2, a2, 32
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT: or a0, a1, a0
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_smax_3:
+; RV64IMZICOND: # %bb.0: # %entry
+; RV64IMZICOND-NEXT: sext.w a2, a1
+; RV64IMZICOND-NEXT: slti a3, a2, 33
+; RV64IMZICOND-NEXT: addi a2, a2, -32
+; RV64IMZICOND-NEXT: czero.nez a2, a2, a3
+; RV64IMZICOND-NEXT: addi a2, a2, 32
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT: or a0, a1, a0
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_smax_3:
+; RV32IMBOTH: # %bb.0: # %entry
+; RV32IMBOTH-NEXT: li a2, 32
+; RV32IMBOTH-NEXT: max a2, a1, a2
+; RV32IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT: or a0, a1, a0
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_smax_3:
+; RV64IMBOTH: # %bb.0: # %entry
+; RV64IMBOTH-NEXT: sext.w a2, a1
+; RV64IMBOTH-NEXT: li a3, 32
+; RV64IMBOTH-NEXT: max a2, a2, a3
+; RV64IMBOTH-NEXT: czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT: czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT: or a0, a1, a0
+; RV64IMBOTH-NEXT: ret
+entry:
+ %c = call i32 @llvm.smax(i32 %a, i32 32)
+ %res = select i1 %cond, i32 %a, i32 %c
+ ret i32 %res
+}
+
+define i32 @select_smax_4(i1 zeroext %cond, i32 %x) {
+; RV32IM-LABEL: select_smax_4:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: li a2, 128
+; RV32IM-NEXT: bge a2, a1, .LBB15_3
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: beqz a0, .LBB15_4
+; RV32IM-NEXT: .LBB15_2:
+; RV32IM-NEXT: mv a0, a2
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB15_3:
+; RV32IM-NEXT: li a1, 128
+; RV32IM-NEXT: bnez a0, .LBB15_2
+; RV32IM-NEXT: .LBB15_4:
+; RV32IM-NEXT: mv a0, a1
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_smax_4:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: sext.w a2, a1
+; RV64IM-NEXT: li a1, 128
+; RV64IM-NEXT: bge a1, a2, .LBB15_3
+; RV64IM-NEXT: # %bb.1:
+; RV64IM-NEXT: beqz a0, .LBB15_4
+; RV64IM-NEXT: .LBB15_2:
+; RV64IM-NEXT: mv a0, a1
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB15_3:
+; RV64IM-NEXT: li a2, 128
+; RV64IM-NEXT: bnez a0, .LBB15_2
+; RV64IM-NEXT: .LBB15_4:
+; RV64IM-NEXT: mv a0, a2
+; RV64IM-NEXT: ret
+;
+; RV32IMZBB-LABEL: select_smax_4:
+; RV32IMZBB: # %bb.0:
+; RV32IMZBB-NEXT: mv a2, a0
+; RV32IMZBB-NEXT: li a0, 128
+; RV32IMZBB-NEXT: bnez a2, .LBB15_2
+; RV32IMZBB-NEXT: # %bb.1:
+; RV32IMZBB-NEXT: max a0, a1, a0
+; RV32IMZBB-NEXT: .LBB15_2:
+; RV32IMZBB-NEXT: ret
+;
+; RV64IMZBB-LABEL: select_smax_4:
+; RV64IMZBB: # %bb.0:
+; RV64IMZBB-NEXT: mv a2, a0
+; RV64IMZBB-NEXT: li a0, 128
+; RV64IMZBB-NEXT: bnez a2, .LBB15_2
+; RV64IMZBB-NEXT: # %bb.1:
+; RV64IMZBB-NEXT: sext.w a1, a1
+; RV64IMZBB-NEXT: max a0, a1, a0
+; RV64IMZBB-NEXT: .LBB15_2:
+; RV64IMZBB-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_smax_4:
+; RV32IMZICOND: # %bb.0:
+; RV32IMZICOND-NEXT: slti a2, a1, 129
+; RV32IMZICOND-NEXT: addi a1, a1, -128
+; RV32IMZICOND-NEXT: czero.nez a1, a1, a2
+; RV32IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT: addi a0, a0, 128
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_smax_4:
+; RV64IMZICOND: # %bb.0:
+; RV64IMZICOND-NEXT: sext.w a1, a1
+; RV64IMZICOND-NEXT: slti a2, a1, 129
+; RV64IMZICOND-NEXT: addi a1, a1, -128
+; RV64IMZICOND-NEXT: czero.nez a1, a1, a2
+; RV64IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT: addi a0, a0, 128
+; RV64IMZICOND-NEXT: ret
+;
+; RV32IMBOTH-LABEL: select_smax_4:
+; RV32IMBOTH: # %bb.0:
+; RV32IMBOTH-NEXT: li a2, 128
+; RV32IMBOTH-NEXT: max a1, a1, a2
+; RV32IMBOTH-NEXT: addi a1, a1, -128
+; RV32IMBOTH-NEXT: czero.nez a0, a1, a0
+; RV32IMBOTH-NEXT: addi a0, a0, 128
+; RV32IMBOTH-NEXT: ret
+;
+; RV64IMBOTH-LABEL: select_smax_4:
+; RV64IMBOTH: # %bb.0:
+; RV64IMBOTH-NEXT: sext.w a1, a1
+; RV64IMBOTH-NEXT: li a2, 128
+; RV64IMBOTH-NEXT: max a1, a1, a2
+; RV64IMBOTH-NEXT: addi a1, a1, -128
+; RV64IMBOTH-NEXT: czero.nez a0, a1, a0
+; RV64IMBOTH-NEXT: addi a0, a0, 128
+; RV64IMBOTH-NEXT: ret
+ %minmax = call i32 @llvm.smax(i32 %x, i32 128)
+ %sel = select i1 %cond, i32 128, i32 %minmax
+ ret i32 %sel
+}
More information about the llvm-commits
mailing list