[llvm] 60216ad - [RISCV] Add test cases for D101485. NFC
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 29 09:46:58 PDT 2021
- Previous message: [llvm] 9239932 - [COST] Improve shuffle kind detection if shuffle mask is provided.
- Next message: [llvm] dcdda2b - [RISCV] Teach DAG combine to fold (and (select_cc lhs, rhs, cc, -1, c), x) -> (select_cc lhs, rhs, cc, x, (and, x, c))
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
Author: Craig Topper
Date: 2021-04-29T09:43:51-07:00
New Revision: 60216adef1c8599eaf6d183dad546c1dafd81964
URL: https://github.com/llvm/llvm-project/commit/60216adef1c8599eaf6d183dad546c1dafd81964
DIFF: https://github.com/llvm/llvm-project/commit/60216adef1c8599eaf6d183dad546c1dafd81964.diff
LOG: [RISCV] Add test cases for D101485. NFC
Added:
llvm/test/CodeGen/RISCV/select-binop-identity.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/select-binop-identity.ll b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
new file mode 100644
index 000000000000..79a2da033da3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
@@ -0,0 +1,164 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64I %s
+
+; InstCombine canonicalizes (c ? x | y : x) to (x | (c ? y : 0)) similar for
+; other binary operations using their identity value as the constant.
+
+; TODO: We can reverse this for and/or/xor. Allowing us to pull the binop into
+; the basic block we create when we expand select.
+
+define signext i32 @and_select_all_ones_i32(i1 zeroext %c, i32 signext %x, i32 %y) {
+; RV32I-LABEL: and_select_all_ones_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: bnez a0, .LBB0_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: addi a1, zero, -1
+; RV32I-NEXT: .LBB0_2:
+; RV32I-NEXT: and a0, a1, a2
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: and_select_all_ones_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bnez a0, .LBB0_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: addi a1, zero, -1
+; RV64I-NEXT: .LBB0_2:
+; RV64I-NEXT: and a0, a1, a2
+; RV64I-NEXT: sext.w a0, a0
+; RV64I-NEXT: ret
+ %a = select i1 %c, i32 %x, i32 -1
+ %b = and i32 %a, %y
+ ret i32 %b
+}
+
+define i64 @and_select_all_ones_i64(i1 zeroext %c, i64 %x, i64 %y) {
+; RV32I-LABEL: and_select_all_ones_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a6, zero, -1
+; RV32I-NEXT: addi a5, zero, -1
+; RV32I-NEXT: bnez a0, .LBB1_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a6, a2
+; RV32I-NEXT: mv a5, a1
+; RV32I-NEXT: .LBB1_2:
+; RV32I-NEXT: and a0, a3, a5
+; RV32I-NEXT: and a1, a4, a6
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: and_select_all_ones_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a3, zero, -1
+; RV64I-NEXT: bnez a0, .LBB1_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a3, a1
+; RV64I-NEXT: .LBB1_2:
+; RV64I-NEXT: and a0, a2, a3
+; RV64I-NEXT: ret
+ %a = select i1 %c, i64 -1, i64 %x
+ %b = and i64 %y, %a
+ ret i64 %b
+}
+
+define signext i32 @or_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32 signext %y) {
+; RV32I-LABEL: or_select_all_zeros_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: bnez a0, .LBB2_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a1, zero
+; RV32I-NEXT: .LBB2_2:
+; RV32I-NEXT: or a0, a2, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: or_select_all_zeros_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bnez a0, .LBB2_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a1, zero
+; RV64I-NEXT: .LBB2_2:
+; RV64I-NEXT: or a0, a2, a1
+; RV64I-NEXT: ret
+ %a = select i1 %c, i32 %x, i32 0
+ %b = or i32 %y, %a
+ ret i32 %b
+}
+
+define i64 @or_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
+; RV32I-LABEL: or_select_all_zeros_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: mv a6, zero
+; RV32I-NEXT: mv a5, zero
+; RV32I-NEXT: bnez a0, .LBB3_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a6, a2
+; RV32I-NEXT: mv a5, a1
+; RV32I-NEXT: .LBB3_2:
+; RV32I-NEXT: or a0, a5, a3
+; RV32I-NEXT: or a1, a6, a4
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: or_select_all_zeros_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: mv a3, zero
+; RV64I-NEXT: bnez a0, .LBB3_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a3, a1
+; RV64I-NEXT: .LBB3_2:
+; RV64I-NEXT: or a0, a3, a2
+; RV64I-NEXT: ret
+ %a = select i1 %c, i64 0, i64 %x
+ %b = or i64 %a, %y
+ ret i64 %b
+}
+
+define signext i32 @xor_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32 signext %y) {
+; RV32I-LABEL: xor_select_all_zeros_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: mv a3, zero
+; RV32I-NEXT: bnez a0, .LBB4_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a3, a1
+; RV32I-NEXT: .LBB4_2:
+; RV32I-NEXT: xor a0, a2, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: xor_select_all_zeros_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: mv a3, zero
+; RV64I-NEXT: bnez a0, .LBB4_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a3, a1
+; RV64I-NEXT: .LBB4_2:
+; RV64I-NEXT: xor a0, a2, a3
+; RV64I-NEXT: ret
+ %a = select i1 %c, i32 0, i32 %x
+ %b = xor i32 %y, %a
+ ret i32 %b
+}
+
+define i64 @xor_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
+; RV32I-LABEL: xor_select_all_zeros_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: bnez a0, .LBB5_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: mv a2, zero
+; RV32I-NEXT: mv a1, zero
+; RV32I-NEXT: .LBB5_2:
+; RV32I-NEXT: xor a0, a1, a3
+; RV32I-NEXT: xor a1, a2, a4
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: xor_select_all_zeros_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: bnez a0, .LBB5_2
+; RV64I-NEXT: # %bb.1:
+; RV64I-NEXT: mv a1, zero
+; RV64I-NEXT: .LBB5_2:
+; RV64I-NEXT: xor a0, a1, a2
+; RV64I-NEXT: ret
+ %a = select i1 %c, i64 %x, i64 0
+ %b = xor i64 %a, %y
+ ret i64 %b
+}
- Previous message: [llvm] 9239932 - [COST] Improve shuffle kind detection if shuffle mask is provided.
- Next message: [llvm] dcdda2b - [RISCV] Teach DAG combine to fold (and (select_cc lhs, rhs, cc, -1, c), x) -> (select_cc lhs, rhs, cc, x, (and, x, c))
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
More information about the llvm-commits
mailing list