[llvm] 6d254a2 - [CSKY][test][NFC] Add tests of IR pattern icmp-select

Ben Shi via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 30 07:55:35 PDT 2023


Author: Ben Shi
Date: 2023-06-30T22:55:24+08:00
New Revision: 6d254a25cbe6c3ff5fbcb543e244e3fd75c71c6e

URL: https://github.com/llvm/llvm-project/commit/6d254a25cbe6c3ff5fbcb543e244e3fd75c71c6e
DIFF: https://github.com/llvm/llvm-project/commit/6d254a25cbe6c3ff5fbcb543e244e3fd75c71c6e.diff

LOG: [CSKY][test][NFC] Add tests of IR pattern icmp-select

These tests will be optimized with INCT32/INCF32/DECT32/DECF32
in the future.

Reviewed By: zixuan-wu

Differential Revision: https://reviews.llvm.org/D153434

Added: 
    llvm/test/CodeGen/CSKY/dect-decf.ll
    llvm/test/CodeGen/CSKY/inct-incf.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/CSKY/dect-decf.ll b/llvm/test/CodeGen/CSKY/dect-decf.ll
new file mode 100644
index 0000000000000..fb3f1a52776f8
--- /dev/null
+++ b/llvm/test/CodeGen/CSKY/dect-decf.ll
@@ -0,0 +1,358 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -verify-machineinstrs -csky-no-aliases -mattr=+2e3 < %s -mtriple=csky | FileCheck %s
+
+define i32 @select_by_icmp_ugt(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ugt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a2, 10
+; CHECK-NEXT:    cmphs16 a1, a0
+; CHECK-NEXT:    movf32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp ugt i32 %t0, %t1
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_sgt(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_sgt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a2, 10
+; CHECK-NEXT:    cmplt16 a1, a0
+; CHECK-NEXT:    movt32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp sgt i32 %t0, %t1
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_uge(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_uge:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a2, 10
+; CHECK-NEXT:    cmphs16 a0, a1
+; CHECK-NEXT:    movt32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp uge i32 %t0, %t1
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_sge(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_sge:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a2, 10
+; CHECK-NEXT:    cmplt16 a0, a1
+; CHECK-NEXT:    movf32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp sge i32 %t0, %t1
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_ult(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ult:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a2, 10
+; CHECK-NEXT:    cmphs16 a0, a1
+; CHECK-NEXT:    movf32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp ult i32 %t0, %t1
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_slt(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_slt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a2, 10
+; CHECK-NEXT:    cmplt16 a0, a1
+; CHECK-NEXT:    movt32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp slt i32 %t0, %t1
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_ule(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ule:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a2, 10
+; CHECK-NEXT:    cmphs16 a1, a0
+; CHECK-NEXT:    movt32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp ule i32 %t0, %t1
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_sle(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a2, 10
+; CHECK-NEXT:    cmplt16 a1, a0
+; CHECK-NEXT:    movf32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp sle i32 %t0, %t1
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_ne(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ne:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a2, 10
+; CHECK-NEXT:    cmpne16 a0, a1
+; CHECK-NEXT:    movt32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp ne i32 %t0, %t1
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_eq(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_eq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a2, 10
+; CHECK-NEXT:    cmpne16 a0, a1
+; CHECK-NEXT:    movf32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp eq i32 %t0, %t1
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_ugt_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ugt_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a1, 10
+; CHECK-NEXT:    movi16 a3, 128
+; CHECK-NEXT:    cmphs16 a3, a0
+; CHECK-NEXT:    movf32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp ugt i32 %t0, 128
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_sgt_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_sgt_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a1, 10
+; CHECK-NEXT:    movi16 a3, 128
+; CHECK-NEXT:    cmplt16 a3, a0
+; CHECK-NEXT:    movt32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp sgt i32 %t0, 128
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_uge_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_uge_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a1, 10
+; CHECK-NEXT:    movi16 a3, 127
+; CHECK-NEXT:    cmphs16 a3, a0
+; CHECK-NEXT:    movf32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp uge i32 %t0, 128
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_sge_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_sge_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a1, 10
+; CHECK-NEXT:    movi16 a3, 127
+; CHECK-NEXT:    cmplt16 a3, a0
+; CHECK-NEXT:    movt32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp sge i32 %t0, 128
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_ult_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ult_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a1, 10
+; CHECK-NEXT:    cmphsi32 a0, 128
+; CHECK-NEXT:    movf32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp ult i32 %t0, 128
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_slt_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_slt_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a1, 10
+; CHECK-NEXT:    cmplti32 a0, 128
+; CHECK-NEXT:    movt32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp slt i32 %t0, 128
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_ule_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ule_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a1, 10
+; CHECK-NEXT:    cmphsi32 a0, 129
+; CHECK-NEXT:    movf32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp ule i32 %t0, 128
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_sle_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_sle_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a1, 10
+; CHECK-NEXT:    cmplti32 a0, 129
+; CHECK-NEXT:    movt32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp sle i32 %t0, 128
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_ne_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ne_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a1, 10
+; CHECK-NEXT:    cmpnei32 a0, 128
+; CHECK-NEXT:    movt32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp ne i32 %t0, 128
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_eq_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_eq_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 a1, 10
+; CHECK-NEXT:    cmpnei32 a0, 128
+; CHECK-NEXT:    movf32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp eq i32 %t0, 128
+  %t5 = sub i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_call_t(i32 %t0, i32 %t1, i32 %t2) {
+; CHECK-LABEL: select_by_call_t:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 sp, sp, 12
+; CHECK-NEXT:    .cfi_def_cfa_offset 12
+; CHECK-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CHECK-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
+; CHECK-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset l1, -4
+; CHECK-NEXT:    .cfi_offset l0, -8
+; CHECK-NEXT:    .cfi_offset lr, -12
+; CHECK-NEXT:    .cfi_def_cfa_offset 12
+; CHECK-NEXT:    mov16 l0, a2
+; CHECK-NEXT:    mov16 l1, a1
+; CHECK-NEXT:    jsri32 [.LCPI20_0]
+; CHECK-NEXT:    subi32 a1, l1, 10
+; CHECK-NEXT:    btsti16 a0, 0
+; CHECK-NEXT:    movt32 l0, a1
+; CHECK-NEXT:    mov16 a0, l0
+; CHECK-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
+; CHECK-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CHECK-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CHECK-NEXT:    addi16 sp, sp, 12
+; CHECK-NEXT:    rts16
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    .p2align 2, 0x0
+; CHECK-NEXT:  .LCPI20_0:
+; CHECK-NEXT:    .long check_val
+  %t3 = tail call i1 @check_val(i32 %t0)
+  %t4 = sub i32 %t1, 10
+  %t5 = select i1 %t3, i32 %t4, i32 %t2
+  ret i32 %t5
+}
+
+define i32 @select_by_call_f(i32 %t0, i32 %t1, i32 %t2) {
+; CHECK-LABEL: select_by_call_f:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 sp, sp, 12
+; CHECK-NEXT:    .cfi_def_cfa_offset 12
+; CHECK-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CHECK-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
+; CHECK-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset l1, -4
+; CHECK-NEXT:    .cfi_offset l0, -8
+; CHECK-NEXT:    .cfi_offset lr, -12
+; CHECK-NEXT:    .cfi_def_cfa_offset 12
+; CHECK-NEXT:    mov16 l0, a2
+; CHECK-NEXT:    mov16 l1, a1
+; CHECK-NEXT:    jsri32 [.LCPI21_0]
+; CHECK-NEXT:    subi32 a1, l1, 10
+; CHECK-NEXT:    btsti16 a0, 0
+; CHECK-NEXT:    movt32 a1, l0
+; CHECK-NEXT:    mov16 a0, a1
+; CHECK-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
+; CHECK-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CHECK-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CHECK-NEXT:    addi16 sp, sp, 12
+; CHECK-NEXT:    rts16
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    .p2align 2, 0x0
+; CHECK-NEXT:  .LCPI21_0:
+; CHECK-NEXT:    .long check_val
+  %t3 = tail call i1 @check_val(i32 %t0)
+  %t4 = sub i32 %t1, 10
+  %t5 = select i1 %t3, i32 %t2, i32 %t4
+  ret i32 %t5
+}
+
+declare i1 @check_val(i32)

diff  --git a/llvm/test/CodeGen/CSKY/inct-incf.ll b/llvm/test/CodeGen/CSKY/inct-incf.ll
new file mode 100644
index 0000000000000..ea1a76a709650
--- /dev/null
+++ b/llvm/test/CodeGen/CSKY/inct-incf.ll
@@ -0,0 +1,358 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -verify-machineinstrs -csky-no-aliases -mattr=+2e3 < %s -mtriple=csky | FileCheck %s
+
+define i32 @select_by_icmp_ugt(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ugt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a2, 10
+; CHECK-NEXT:    cmphs16 a1, a0
+; CHECK-NEXT:    movf32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp ugt i32 %t0, %t1
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_sgt(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_sgt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a2, 10
+; CHECK-NEXT:    cmplt16 a1, a0
+; CHECK-NEXT:    movt32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp sgt i32 %t0, %t1
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_uge(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_uge:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a2, 10
+; CHECK-NEXT:    cmphs16 a0, a1
+; CHECK-NEXT:    movt32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp uge i32 %t0, %t1
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_sge(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_sge:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a2, 10
+; CHECK-NEXT:    cmplt16 a0, a1
+; CHECK-NEXT:    movf32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp sge i32 %t0, %t1
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_ult(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ult:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a2, 10
+; CHECK-NEXT:    cmphs16 a0, a1
+; CHECK-NEXT:    movf32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp ult i32 %t0, %t1
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_slt(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_slt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a2, 10
+; CHECK-NEXT:    cmplt16 a0, a1
+; CHECK-NEXT:    movt32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp slt i32 %t0, %t1
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_ule(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ule:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a2, 10
+; CHECK-NEXT:    cmphs16 a1, a0
+; CHECK-NEXT:    movt32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp ule i32 %t0, %t1
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_sle(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a2, 10
+; CHECK-NEXT:    cmplt16 a1, a0
+; CHECK-NEXT:    movf32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp sle i32 %t0, %t1
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_ne(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ne:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a2, 10
+; CHECK-NEXT:    cmpne16 a0, a1
+; CHECK-NEXT:    movt32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp ne i32 %t0, %t1
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_eq(i32 %t0, i32 %t1, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_eq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a2, 10
+; CHECK-NEXT:    cmpne16 a0, a1
+; CHECK-NEXT:    movf32 a3, a2
+; CHECK-NEXT:    mov16 a0, a3
+; CHECK-NEXT:    rts16
+  %t4 = icmp eq i32 %t0, %t1
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_ugt_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ugt_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a1, 10
+; CHECK-NEXT:    movi16 a3, 128
+; CHECK-NEXT:    cmphs16 a3, a0
+; CHECK-NEXT:    movf32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp ugt i32 %t0, 128
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_sgt_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_sgt_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a1, 10
+; CHECK-NEXT:    movi16 a3, 128
+; CHECK-NEXT:    cmplt16 a3, a0
+; CHECK-NEXT:    movt32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp sgt i32 %t0, 128
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_uge_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_uge_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a1, 10
+; CHECK-NEXT:    movi16 a3, 127
+; CHECK-NEXT:    cmphs16 a3, a0
+; CHECK-NEXT:    movf32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp uge i32 %t0, 128
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_sge_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_sge_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a1, 10
+; CHECK-NEXT:    movi16 a3, 127
+; CHECK-NEXT:    cmplt16 a3, a0
+; CHECK-NEXT:    movt32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp sge i32 %t0, 128
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_ult_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ult_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a1, 10
+; CHECK-NEXT:    cmphsi32 a0, 128
+; CHECK-NEXT:    movf32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp ult i32 %t0, 128
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_slt_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_slt_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a1, 10
+; CHECK-NEXT:    cmplti32 a0, 128
+; CHECK-NEXT:    movt32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp slt i32 %t0, 128
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_ule_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ule_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a1, 10
+; CHECK-NEXT:    cmphsi32 a0, 129
+; CHECK-NEXT:    movf32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp ule i32 %t0, 128
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_sle_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_sle_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a1, 10
+; CHECK-NEXT:    cmplti32 a0, 129
+; CHECK-NEXT:    movt32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp sle i32 %t0, 128
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_ne_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_ne_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a1, 10
+; CHECK-NEXT:    cmpnei32 a0, 128
+; CHECK-NEXT:    movt32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp ne i32 %t0, 128
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_icmp_eq_imm(i32 %t0, i32 %t2, i32 %t3) {
+; CHECK-LABEL: select_by_icmp_eq_imm:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi16 a1, 10
+; CHECK-NEXT:    cmpnei32 a0, 128
+; CHECK-NEXT:    movf32 a2, a1
+; CHECK-NEXT:    mov16 a0, a2
+; CHECK-NEXT:    rts16
+  %t4 = icmp eq i32 %t0, 128
+  %t5 = add i32 %t2, 10
+  %t6 = select i1 %t4, i32 %t5, i32 %t3
+  ret i32 %t6
+}
+
+define i32 @select_by_call_t(i32 %t0, i32 %t1, i32 %t2) {
+; CHECK-LABEL: select_by_call_t:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 sp, sp, 12
+; CHECK-NEXT:    .cfi_def_cfa_offset 12
+; CHECK-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CHECK-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
+; CHECK-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset l1, -4
+; CHECK-NEXT:    .cfi_offset l0, -8
+; CHECK-NEXT:    .cfi_offset lr, -12
+; CHECK-NEXT:    .cfi_def_cfa_offset 12
+; CHECK-NEXT:    mov16 l0, a2
+; CHECK-NEXT:    mov16 l1, a1
+; CHECK-NEXT:    jsri32 [.LCPI20_0]
+; CHECK-NEXT:    addi32 a1, l1, 10
+; CHECK-NEXT:    btsti16 a0, 0
+; CHECK-NEXT:    movt32 l0, a1
+; CHECK-NEXT:    mov16 a0, l0
+; CHECK-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
+; CHECK-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CHECK-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CHECK-NEXT:    addi16 sp, sp, 12
+; CHECK-NEXT:    rts16
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    .p2align 2, 0x0
+; CHECK-NEXT:  .LCPI20_0:
+; CHECK-NEXT:    .long check_val
+  %t3 = tail call i1 @check_val(i32 %t0)
+  %t4 = add i32 %t1, 10
+  %t5 = select i1 %t3, i32 %t4, i32 %t2
+  ret i32 %t5
+}
+
+define i32 @select_by_call_f(i32 %t0, i32 %t1, i32 %t2) {
+; CHECK-LABEL: select_by_call_f:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subi16 sp, sp, 12
+; CHECK-NEXT:    .cfi_def_cfa_offset 12
+; CHECK-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CHECK-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
+; CHECK-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset l1, -4
+; CHECK-NEXT:    .cfi_offset l0, -8
+; CHECK-NEXT:    .cfi_offset lr, -12
+; CHECK-NEXT:    .cfi_def_cfa_offset 12
+; CHECK-NEXT:    mov16 l0, a2
+; CHECK-NEXT:    mov16 l1, a1
+; CHECK-NEXT:    jsri32 [.LCPI21_0]
+; CHECK-NEXT:    addi32 a1, l1, 10
+; CHECK-NEXT:    btsti16 a0, 0
+; CHECK-NEXT:    movt32 a1, l0
+; CHECK-NEXT:    mov16 a0, a1
+; CHECK-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
+; CHECK-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CHECK-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CHECK-NEXT:    addi16 sp, sp, 12
+; CHECK-NEXT:    rts16
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    .p2align 2, 0x0
+; CHECK-NEXT:  .LCPI21_0:
+; CHECK-NEXT:    .long check_val
+  %t3 = tail call i1 @check_val(i32 %t0)
+  %t4 = add i32 %t1, 10
+  %t5 = select i1 %t3, i32 %t2, i32 %t4
+  ret i32 %t5
+}
+
+declare i1 @check_val(i32)


        


More information about the llvm-commits mailing list