[llvm] [RISCV] Add coverage for select with minmax arm [nfc] (PR #157539)

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 9 06:48:38 PDT 2025


================
@@ -0,0 +1,1622 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IM %s
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IM %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IMZBB %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IMZBB %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IMZICOND %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IMZICOND %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zicond,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IMBOTH %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zicond,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IMBOTH %s
+
+
+define i32 @select_umin_1(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_umin_1:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    bgeu a1, a2, .LBB0_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB0_4
+; RV32IM-NEXT:  .LBB0_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB0_3: # %entry
+; RV32IM-NEXT:    mv a1, a2
+; RV32IM-NEXT:    bnez a0, .LBB0_2
+; RV32IM-NEXT:  .LBB0_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umin_1:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a3, a2
+; RV64IM-NEXT:    sext.w a1, a1
+; RV64IM-NEXT:    bgeu a1, a3, .LBB0_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB0_4
+; RV64IM-NEXT:  .LBB0_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB0_3: # %entry
+; RV64IM-NEXT:    mv a1, a3
+; RV64IM-NEXT:    bnez a0, .LBB0_2
+; RV64IM-NEXT:  .LBB0_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umin_1:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    beqz a0, .LBB0_2
+; RV32IMZBB-NEXT:  # %bb.1:
+; RV32IMZBB-NEXT:    minu a2, a1, a2
+; RV32IMZBB-NEXT:  .LBB0_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a2
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umin_1:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    beqz a0, .LBB0_2
+; RV64IMZBB-NEXT:  # %bb.1:
+; RV64IMZBB-NEXT:    sext.w a2, a2
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    minu a2, a1, a2
+; RV64IMZBB-NEXT:  .LBB0_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a2
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umin_1:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sltu a3, a1, a2
+; RV32IMZICOND-NEXT:    czero.nez a4, a2, a3
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a3
+; RV32IMZICOND-NEXT:    or a1, a1, a4
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umin_1:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a3, a2
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    sltu a4, a1, a3
+; RV64IMZICOND-NEXT:    czero.nez a3, a3, a4
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a4
+; RV64IMZICOND-NEXT:    or a1, a1, a3
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umin_1:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    minu a1, a1, a2
+; RV32IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV32IMBOTH-NEXT:    czero.eqz a0, a1, a0
+; RV32IMBOTH-NEXT:    or a0, a0, a2
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umin_1:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a3, a2
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    minu a1, a1, a3
+; RV64IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV64IMBOTH-NEXT:    czero.eqz a0, a1, a0
+; RV64IMBOTH-NEXT:    or a0, a0, a2
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.umin(i32 %a, i32 %b)
+  %res = select i1 %cond, i32 %c, i32 %b
+  ret i32 %res
+}
+
+define i32 @select_umin_2(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_umin_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    li a3, 32
+; RV32IM-NEXT:    mv a2, a1
+; RV32IM-NEXT:    bgeu a1, a3, .LBB1_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB1_4
+; RV32IM-NEXT:  .LBB1_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB1_3: # %entry
+; RV32IM-NEXT:    li a2, 32
+; RV32IM-NEXT:    bnez a0, .LBB1_2
+; RV32IM-NEXT:  .LBB1_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umin_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a3, 32
+; RV64IM-NEXT:    bgeu a2, a3, .LBB1_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB1_4
+; RV64IM-NEXT:  .LBB1_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB1_3: # %entry
+; RV64IM-NEXT:    li a2, 32
+; RV64IM-NEXT:    bnez a0, .LBB1_2
+; RV64IM-NEXT:  .LBB1_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umin_2:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    bnez a0, .LBB1_2
+; RV32IMZBB-NEXT:  # %bb.1: # %entry
+; RV32IMZBB-NEXT:    li a0, 32
+; RV32IMZBB-NEXT:    minu a1, a1, a0
+; RV32IMZBB-NEXT:  .LBB1_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umin_2:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    bnez a0, .LBB1_2
+; RV64IMZBB-NEXT:  # %bb.1: # %entry
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    li a0, 32
+; RV64IMZBB-NEXT:    minu a1, a1, a0
+; RV64IMZBB-NEXT:  .LBB1_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umin_2:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sltiu a2, a1, 32
+; RV32IMZICOND-NEXT:    addi a3, a1, -32
+; RV32IMZICOND-NEXT:    czero.eqz a2, a3, a2
+; RV32IMZICOND-NEXT:    addi a2, a2, 32
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umin_2:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a2, a1
+; RV64IMZICOND-NEXT:    sltiu a3, a2, 32
+; RV64IMZICOND-NEXT:    addi a2, a2, -32
+; RV64IMZICOND-NEXT:    czero.eqz a2, a2, a3
+; RV64IMZICOND-NEXT:    addi a2, a2, 32
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umin_2:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    li a2, 32
+; RV32IMBOTH-NEXT:    minu a2, a1, a2
+; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umin_2:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a2, a1
+; RV64IMBOTH-NEXT:    li a3, 32
+; RV64IMBOTH-NEXT:    minu a2, a2, a3
+; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.umin(i32 %a, i32 32)
+  %res = select i1 %cond, i32 %a, i32 %c
+  ret i32 %res
+}
+
+define i32 @select_umin_3(i1 zeroext %cond, i32 %a) {
+; RV32IM-LABEL: select_umin_3:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    li a3, 32
+; RV32IM-NEXT:    mv a2, a1
+; RV32IM-NEXT:    bgeu a1, a3, .LBB2_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB2_4
+; RV32IM-NEXT:  .LBB2_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB2_3: # %entry
+; RV32IM-NEXT:    li a2, 32
+; RV32IM-NEXT:    bnez a0, .LBB2_2
+; RV32IM-NEXT:  .LBB2_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umin_3:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a3, 32
+; RV64IM-NEXT:    bgeu a2, a3, .LBB2_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB2_4
+; RV64IM-NEXT:  .LBB2_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB2_3: # %entry
+; RV64IM-NEXT:    li a2, 32
+; RV64IM-NEXT:    bnez a0, .LBB2_2
+; RV64IM-NEXT:  .LBB2_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umin_3:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    bnez a0, .LBB2_2
+; RV32IMZBB-NEXT:  # %bb.1: # %entry
+; RV32IMZBB-NEXT:    li a0, 32
+; RV32IMZBB-NEXT:    minu a1, a1, a0
+; RV32IMZBB-NEXT:  .LBB2_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umin_3:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    bnez a0, .LBB2_2
+; RV64IMZBB-NEXT:  # %bb.1: # %entry
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    li a0, 32
+; RV64IMZBB-NEXT:    minu a1, a1, a0
+; RV64IMZBB-NEXT:  .LBB2_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umin_3:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sltiu a2, a1, 32
+; RV32IMZICOND-NEXT:    addi a3, a1, -32
+; RV32IMZICOND-NEXT:    czero.eqz a2, a3, a2
+; RV32IMZICOND-NEXT:    addi a2, a2, 32
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umin_3:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a2, a1
+; RV64IMZICOND-NEXT:    sltiu a3, a2, 32
+; RV64IMZICOND-NEXT:    addi a2, a2, -32
+; RV64IMZICOND-NEXT:    czero.eqz a2, a2, a3
+; RV64IMZICOND-NEXT:    addi a2, a2, 32
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umin_3:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    li a2, 32
+; RV32IMBOTH-NEXT:    minu a2, a1, a2
+; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umin_3:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a2, a1
+; RV64IMBOTH-NEXT:    li a3, 32
+; RV64IMBOTH-NEXT:    minu a2, a2, a3
+; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.umin(i32 %a, i32 32)
+  %res = select i1 %cond, i32 %a, i32 %c
----------------
preames wrote:

There isn't one.  From the tests I copied from (select.ll), the _2 is a commuted _1.  I switched it to that.  

https://github.com/llvm/llvm-project/pull/157539


More information about the llvm-commits mailing list