[llvm] e5489f7 - [LoongArch] Add baseline tests for translating the selection of constants into mathematical operations
via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 8 17:50:51 PST 2023
Author: gonglingqin
Date: 2023-02-09T09:45:58+08:00
New Revision: e5489f72640537fdc718a7e4f7c09d5ae7848072
URL: https://github.com/llvm/llvm-project/commit/e5489f72640537fdc718a7e4f7c09d5ae7848072
DIFF: https://github.com/llvm/llvm-project/commit/e5489f72640537fdc718a7e4f7c09d5ae7848072.diff
LOG: [LoongArch] Add baseline tests for translating the selection of constants into mathematical operations
Added:
llvm/test/CodeGen/LoongArch/select-const.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/LoongArch/select-const.ll b/llvm/test/CodeGen/LoongArch/select-const.ll
new file mode 100644
index 0000000000000..a37092a46a97c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/select-const.ll
@@ -0,0 +1,402 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+;; TODO: Avoid using masknez + maskeqz when selecting constants that
+;; can be computed easily.
+
+define signext i32 @select_const_int_one_away(i1 zeroext %a) nounwind {
+; LA32-LABEL: select_const_int_one_away:
+; LA32: # %bb.0:
+; LA32-NEXT: ori $a1, $zero, 4
+; LA32-NEXT: masknez $a1, $a1, $a0
+; LA32-NEXT: ori $a2, $zero, 3
+; LA32-NEXT: maskeqz $a0, $a2, $a0
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_const_int_one_away:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a1, $zero, 4
+; LA64-NEXT: masknez $a1, $a1, $a0
+; LA64-NEXT: ori $a2, $zero, 3
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %1 = select i1 %a, i32 3, i32 4
+ ret i32 %1
+}
+
+define signext i32 @select_const_int_pow2_zero(i1 zeroext %a) nounwind {
+; LA32-LABEL: select_const_int_pow2_zero:
+; LA32: # %bb.0:
+; LA32-NEXT: masknez $a1, $zero, $a0
+; LA32-NEXT: ori $a2, $zero, 4
+; LA32-NEXT: maskeqz $a0, $a2, $a0
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_const_int_pow2_zero:
+; LA64: # %bb.0:
+; LA64-NEXT: masknez $a1, $zero, $a0
+; LA64-NEXT: ori $a2, $zero, 4
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %1 = select i1 %a, i32 4, i32 0
+ ret i32 %1
+}
+
+define signext i32 @select_eq_zero_negone(i32 signext %a, i32 signext %b) nounwind {
+; LA32-LABEL: select_eq_zero_negone:
+; LA32: # %bb.0:
+; LA32-NEXT: xor $a0, $a0, $a1
+; LA32-NEXT: sltui $a0, $a0, 1
+; LA32-NEXT: masknez $a1, $zero, $a0
+; LA32-NEXT: addi.w $a2, $zero, -1
+; LA32-NEXT: maskeqz $a0, $a2, $a0
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_eq_zero_negone:
+; LA64: # %bb.0:
+; LA64-NEXT: xor $a0, $a0, $a1
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a1, $zero, $a0
+; LA64-NEXT: addi.w $a2, $zero, -1
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %1 = icmp eq i32 %a, %b
+ %2 = select i1 %1, i32 -1, i32 0
+ ret i32 %2
+}
+
+define signext i32 @select_ne_zero_negone(i32 signext %a, i32 signext %b) nounwind {
+; LA32-LABEL: select_ne_zero_negone:
+; LA32: # %bb.0:
+; LA32-NEXT: xor $a0, $a0, $a1
+; LA32-NEXT: sltu $a0, $zero, $a0
+; LA32-NEXT: masknez $a1, $zero, $a0
+; LA32-NEXT: addi.w $a2, $zero, -1
+; LA32-NEXT: maskeqz $a0, $a2, $a0
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_ne_zero_negone:
+; LA64: # %bb.0:
+; LA64-NEXT: xor $a0, $a0, $a1
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a1, $zero, $a0
+; LA64-NEXT: addi.w $a2, $zero, -1
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %1 = icmp ne i32 %a, %b
+ %2 = select i1 %1, i32 -1, i32 0
+ ret i32 %2
+}
+
+define signext i32 @select_sgt_zero_negone(i32 signext %a, i32 signext %b) nounwind {
+; LA32-LABEL: select_sgt_zero_negone:
+; LA32: # %bb.0:
+; LA32-NEXT: slt $a0, $a1, $a0
+; LA32-NEXT: masknez $a1, $zero, $a0
+; LA32-NEXT: addi.w $a2, $zero, -1
+; LA32-NEXT: maskeqz $a0, $a2, $a0
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_sgt_zero_negone:
+; LA64: # %bb.0:
+; LA64-NEXT: slt $a0, $a1, $a0
+; LA64-NEXT: masknez $a1, $zero, $a0
+; LA64-NEXT: addi.w $a2, $zero, -1
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %1 = icmp sgt i32 %a, %b
+ %2 = select i1 %1, i32 -1, i32 0
+ ret i32 %2
+}
+
+define signext i32 @select_slt_zero_negone(i32 signext %a, i32 signext %b) nounwind {
+; LA32-LABEL: select_slt_zero_negone:
+; LA32: # %bb.0:
+; LA32-NEXT: slt $a0, $a0, $a1
+; LA32-NEXT: masknez $a1, $zero, $a0
+; LA32-NEXT: addi.w $a2, $zero, -1
+; LA32-NEXT: maskeqz $a0, $a2, $a0
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_slt_zero_negone:
+; LA64: # %bb.0:
+; LA64-NEXT: slt $a0, $a0, $a1
+; LA64-NEXT: masknez $a1, $zero, $a0
+; LA64-NEXT: addi.w $a2, $zero, -1
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %1 = icmp slt i32 %a, %b
+ %2 = select i1 %1, i32 -1, i32 0
+ ret i32 %2
+}
+
+define signext i32 @select_sge_zero_negone(i32 signext %a, i32 signext %b) nounwind {
+; LA32-LABEL: select_sge_zero_negone:
+; LA32: # %bb.0:
+; LA32-NEXT: slt $a0, $a0, $a1
+; LA32-NEXT: xori $a0, $a0, 1
+; LA32-NEXT: masknez $a1, $zero, $a0
+; LA32-NEXT: addi.w $a2, $zero, -1
+; LA32-NEXT: maskeqz $a0, $a2, $a0
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_sge_zero_negone:
+; LA64: # %bb.0:
+; LA64-NEXT: slt $a0, $a0, $a1
+; LA64-NEXT: xori $a0, $a0, 1
+; LA64-NEXT: masknez $a1, $zero, $a0
+; LA64-NEXT: addi.w $a2, $zero, -1
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %1 = icmp sge i32 %a, %b
+ %2 = select i1 %1, i32 -1, i32 0
+ ret i32 %2
+}
+
+define signext i32 @select_sle_zero_negone(i32 signext %a, i32 signext %b) nounwind {
+; LA32-LABEL: select_sle_zero_negone:
+; LA32: # %bb.0:
+; LA32-NEXT: slt $a0, $a1, $a0
+; LA32-NEXT: xori $a0, $a0, 1
+; LA32-NEXT: masknez $a1, $zero, $a0
+; LA32-NEXT: addi.w $a2, $zero, -1
+; LA32-NEXT: maskeqz $a0, $a2, $a0
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_sle_zero_negone:
+; LA64: # %bb.0:
+; LA64-NEXT: slt $a0, $a1, $a0
+; LA64-NEXT: xori $a0, $a0, 1
+; LA64-NEXT: masknez $a1, $zero, $a0
+; LA64-NEXT: addi.w $a2, $zero, -1
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %1 = icmp sle i32 %a, %b
+ %2 = select i1 %1, i32 -1, i32 0
+ ret i32 %2
+}
+
+define signext i32 @select_ugt_zero_negone(i32 signext %a, i32 signext %b) nounwind {
+; LA32-LABEL: select_ugt_zero_negone:
+; LA32: # %bb.0:
+; LA32-NEXT: sltu $a0, $a1, $a0
+; LA32-NEXT: masknez $a1, $zero, $a0
+; LA32-NEXT: addi.w $a2, $zero, -1
+; LA32-NEXT: maskeqz $a0, $a2, $a0
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_ugt_zero_negone:
+; LA64: # %bb.0:
+; LA64-NEXT: sltu $a0, $a1, $a0
+; LA64-NEXT: masknez $a1, $zero, $a0
+; LA64-NEXT: addi.w $a2, $zero, -1
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %1 = icmp ugt i32 %a, %b
+ %2 = select i1 %1, i32 -1, i32 0
+ ret i32 %2
+}
+
+define signext i32 @select_ult_zero_negone(i32 signext %a, i32 signext %b) nounwind {
+; LA32-LABEL: select_ult_zero_negone:
+; LA32: # %bb.0:
+; LA32-NEXT: sltu $a0, $a0, $a1
+; LA32-NEXT: masknez $a1, $zero, $a0
+; LA32-NEXT: addi.w $a2, $zero, -1
+; LA32-NEXT: maskeqz $a0, $a2, $a0
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_ult_zero_negone:
+; LA64: # %bb.0:
+; LA64-NEXT: sltu $a0, $a0, $a1
+; LA64-NEXT: masknez $a1, $zero, $a0
+; LA64-NEXT: addi.w $a2, $zero, -1
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %1 = icmp ult i32 %a, %b
+ %2 = select i1 %1, i32 -1, i32 0
+ ret i32 %2
+}
+
+define signext i32 @select_uge_zero_negone(i32 signext %a, i32 signext %b) nounwind {
+; LA32-LABEL: select_uge_zero_negone:
+; LA32: # %bb.0:
+; LA32-NEXT: sltu $a0, $a0, $a1
+; LA32-NEXT: xori $a0, $a0, 1
+; LA32-NEXT: masknez $a1, $zero, $a0
+; LA32-NEXT: addi.w $a2, $zero, -1
+; LA32-NEXT: maskeqz $a0, $a2, $a0
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_uge_zero_negone:
+; LA64: # %bb.0:
+; LA64-NEXT: sltu $a0, $a0, $a1
+; LA64-NEXT: xori $a0, $a0, 1
+; LA64-NEXT: masknez $a1, $zero, $a0
+; LA64-NEXT: addi.w $a2, $zero, -1
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %1 = icmp uge i32 %a, %b
+ %2 = select i1 %1, i32 -1, i32 0
+ ret i32 %2
+}
+
+define signext i32 @select_ule_zero_negone(i32 signext %a, i32 signext %b) nounwind {
+; LA32-LABEL: select_ule_zero_negone:
+; LA32: # %bb.0:
+; LA32-NEXT: sltu $a0, $a1, $a0
+; LA32-NEXT: xori $a0, $a0, 1
+; LA32-NEXT: masknez $a1, $zero, $a0
+; LA32-NEXT: addi.w $a2, $zero, -1
+; LA32-NEXT: maskeqz $a0, $a2, $a0
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_ule_zero_negone:
+; LA64: # %bb.0:
+; LA64-NEXT: sltu $a0, $a1, $a0
+; LA64-NEXT: xori $a0, $a0, 1
+; LA64-NEXT: masknez $a1, $zero, $a0
+; LA64-NEXT: addi.w $a2, $zero, -1
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %1 = icmp ule i32 %a, %b
+ %2 = select i1 %1, i32 -1, i32 0
+ ret i32 %2
+}
+
+define i32 @select_eq_1_2(i32 signext %a, i32 signext %b) {
+; LA32-LABEL: select_eq_1_2:
+; LA32: # %bb.0:
+; LA32-NEXT: xor $a0, $a0, $a1
+; LA32-NEXT: sltui $a0, $a0, 1
+; LA32-NEXT: ori $a1, $zero, 2
+; LA32-NEXT: masknez $a1, $a1, $a0
+; LA32-NEXT: ori $a2, $zero, 1
+; LA32-NEXT: maskeqz $a0, $a2, $a0
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_eq_1_2:
+; LA64: # %bb.0:
+; LA64-NEXT: xor $a0, $a0, $a1
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: ori $a1, $zero, 2
+; LA64-NEXT: masknez $a1, $a1, $a0
+; LA64-NEXT: ori $a2, $zero, 1
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %1 = icmp eq i32 %a, %b
+ %2 = select i1 %1, i32 1, i32 2
+ ret i32 %2
+}
+
+define i32 @select_ne_1_2(i32 signext %a, i32 signext %b) {
+; LA32-LABEL: select_ne_1_2:
+; LA32: # %bb.0:
+; LA32-NEXT: xor $a0, $a0, $a1
+; LA32-NEXT: sltu $a0, $zero, $a0
+; LA32-NEXT: ori $a1, $zero, 2
+; LA32-NEXT: masknez $a1, $a1, $a0
+; LA32-NEXT: ori $a2, $zero, 1
+; LA32-NEXT: maskeqz $a0, $a2, $a0
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_ne_1_2:
+; LA64: # %bb.0:
+; LA64-NEXT: xor $a0, $a0, $a1
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: ori $a1, $zero, 2
+; LA64-NEXT: masknez $a1, $a1, $a0
+; LA64-NEXT: ori $a2, $zero, 1
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %1 = icmp ne i32 %a, %b
+ %2 = select i1 %1, i32 1, i32 2
+ ret i32 %2
+}
+
+define i32 @select_eq_10000_10001(i32 signext %a, i32 signext %b) {
+; LA32-LABEL: select_eq_10000_10001:
+; LA32: # %bb.0:
+; LA32-NEXT: xor $a0, $a0, $a1
+; LA32-NEXT: sltui $a0, $a0, 1
+; LA32-NEXT: lu12i.w $a1, 2
+; LA32-NEXT: ori $a2, $a1, 1810
+; LA32-NEXT: masknez $a2, $a2, $a0
+; LA32-NEXT: ori $a1, $a1, 1809
+; LA32-NEXT: maskeqz $a0, $a1, $a0
+; LA32-NEXT: or $a0, $a0, $a2
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_eq_10000_10001:
+; LA64: # %bb.0:
+; LA64-NEXT: xor $a0, $a0, $a1
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: lu12i.w $a1, 2
+; LA64-NEXT: ori $a2, $a1, 1810
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: ori $a1, $a1, 1809
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = icmp eq i32 %a, %b
+ %2 = select i1 %1, i32 10001, i32 10002
+ ret i32 %2
+}
+
+define i32 @select_ne_10001_10002(i32 signext %a, i32 signext %b) {
+; LA32-LABEL: select_ne_10001_10002:
+; LA32: # %bb.0:
+; LA32-NEXT: xor $a0, $a0, $a1
+; LA32-NEXT: sltu $a0, $zero, $a0
+; LA32-NEXT: lu12i.w $a1, 2
+; LA32-NEXT: ori $a2, $a1, 1810
+; LA32-NEXT: masknez $a2, $a2, $a0
+; LA32-NEXT: ori $a1, $a1, 1809
+; LA32-NEXT: maskeqz $a0, $a1, $a0
+; LA32-NEXT: or $a0, $a0, $a2
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_ne_10001_10002:
+; LA64: # %bb.0:
+; LA64-NEXT: xor $a0, $a0, $a1
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: lu12i.w $a1, 2
+; LA64-NEXT: ori $a2, $a1, 1810
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: ori $a1, $a1, 1809
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = icmp ne i32 %a, %b
+ %2 = select i1 %1, i32 10001, i32 10002
+ ret i32 %2
+}
More information about the llvm-commits
mailing list