[llvm] e316f19 - [LoongArch] Pre-commit tests for spurious mask removal. NFC
WANG Rui via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 16 22:30:00 PDT 2024
Author: WANG Rui
Date: 2024-07-17T13:30:14+08:00
New Revision: e316f1956992730fa601849799ccb12d17f507d7
URL: https://github.com/llvm/llvm-project/commit/e316f1956992730fa601849799ccb12d17f507d7
DIFF: https://github.com/llvm/llvm-project/commit/e316f1956992730fa601849799ccb12d17f507d7.diff
LOG: [LoongArch] Pre-commit tests for spurious mask removal. NFC
Added:
Modified:
llvm/test/CodeGen/LoongArch/andn-icmp.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/LoongArch/andn-icmp.ll b/llvm/test/CodeGen/LoongArch/andn-icmp.ll
index c529c2e281214..4fc3c8df4664c 100644
--- a/llvm/test/CodeGen/LoongArch/andn-icmp.ll
+++ b/llvm/test/CodeGen/LoongArch/andn-icmp.ll
@@ -149,3 +149,455 @@ define i1 @andn_icmp_ne_i64(i64 %a, i64 %b) nounwind {
%cmpne = icmp ne i64 %and, %b
ret i1 %cmpne
}
+
+define i1 @andn_icmp_ult_i8(i8 signext %a, i8 signext %b) nounwind {
+; LA32-LABEL: andn_icmp_ult_i8:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a1, $a1, 255
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: sltu $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_ult_i8:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: and $a0, $a1, $a0
+; LA64-NEXT: sltu $a0, $a0, $a1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp ult i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_ult_i16(i16 signext %a, i16 signext %b) nounwind {
+; LA32-LABEL: andn_icmp_ult_i16:
+; LA32: # %bb.0:
+; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: sltu $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_ult_i16:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: and $a0, $a1, $a0
+; LA64-NEXT: sltu $a0, $a0, $a1
+; LA64-NEXT: ret
+ %and = and i16 %a, %b
+ %cmp = icmp ult i16 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_uge_i8(i8 signext %a, i8 signext %b) nounwind {
+; LA32-LABEL: andn_icmp_uge_i8:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a1, $a1, 255
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: sltu $a0, $a0, $a1
+; LA32-NEXT: xori $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_uge_i8:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: and $a0, $a1, $a0
+; LA64-NEXT: sltu $a0, $a0, $a1
+; LA64-NEXT: xori $a0, $a0, 1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp uge i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_uge_i16(i16 signext %a, i16 signext %b) nounwind {
+; LA32-LABEL: andn_icmp_uge_i16:
+; LA32: # %bb.0:
+; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: sltu $a0, $a0, $a1
+; LA32-NEXT: xori $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_uge_i16:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: and $a0, $a1, $a0
+; LA64-NEXT: sltu $a0, $a0, $a1
+; LA64-NEXT: xori $a0, $a0, 1
+; LA64-NEXT: ret
+ %and = and i16 %a, %b
+ %cmp = icmp uge i16 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_ugt_i8(i8 signext %a, i8 signext %b) nounwind {
+; LA32-LABEL: andn_icmp_ugt_i8:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a1, $a1, 255
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: sltu $a0, $a1, $a0
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_ugt_i8:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: and $a0, $a1, $a0
+; LA64-NEXT: sltu $a0, $a1, $a0
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp ugt i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_ugt_i16(i16 signext %a, i16 signext %b) nounwind {
+; LA32-LABEL: andn_icmp_ugt_i16:
+; LA32: # %bb.0:
+; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: sltu $a0, $a1, $a0
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_ugt_i16:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: and $a0, $a1, $a0
+; LA64-NEXT: sltu $a0, $a1, $a0
+; LA64-NEXT: ret
+ %and = and i16 %a, %b
+ %cmp = icmp ugt i16 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_ule_i8(i8 signext %a, i8 signext %b) nounwind {
+; LA32-LABEL: andn_icmp_ule_i8:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a1, $a1, 255
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: sltu $a0, $a1, $a0
+; LA32-NEXT: xori $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_ule_i8:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: and $a0, $a1, $a0
+; LA64-NEXT: sltu $a0, $a1, $a0
+; LA64-NEXT: xori $a0, $a0, 1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp ule i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_ule_i16(i16 signext %a, i16 signext %b) nounwind {
+; LA32-LABEL: andn_icmp_ule_i16:
+; LA32: # %bb.0:
+; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: sltu $a0, $a1, $a0
+; LA32-NEXT: xori $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_ule_i16:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: and $a0, $a1, $a0
+; LA64-NEXT: sltu $a0, $a1, $a0
+; LA64-NEXT: xori $a0, $a0, 1
+; LA64-NEXT: ret
+ %and = and i16 %a, %b
+ %cmp = icmp ule i16 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_eq_i8_sz(i8 signext %a, i8 zeroext %b) nounwind {
+; LA32-LABEL: andn_icmp_eq_i8_sz:
+; LA32: # %bb.0:
+; LA32-NEXT: andn $a0, $a1, $a0
+; LA32-NEXT: sltui $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_eq_i8_sz:
+; LA64: # %bb.0:
+; LA64-NEXT: andn $a0, $a1, $a0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp eq i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_eq_i8_zs(i8 zeroext %a, i8 signext %b) nounwind {
+; LA32-LABEL: andn_icmp_eq_i8_zs:
+; LA32: # %bb.0:
+; LA32-NEXT: andn $a0, $a1, $a0
+; LA32-NEXT: andi $a0, $a0, 255
+; LA32-NEXT: sltui $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_eq_i8_zs:
+; LA64: # %bb.0:
+; LA64-NEXT: andn $a0, $a1, $a0
+; LA64-NEXT: andi $a0, $a0, 255
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp eq i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_eq_i8_zz(i8 zeroext %a, i8 zeroext %b) nounwind {
+; LA32-LABEL: andn_icmp_eq_i8_zz:
+; LA32: # %bb.0:
+; LA32-NEXT: andn $a0, $a1, $a0
+; LA32-NEXT: sltui $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_eq_i8_zz:
+; LA64: # %bb.0:
+; LA64-NEXT: andn $a0, $a1, $a0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp eq i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_eq_i8_sn(i8 signext %a, i8 %b) nounwind {
+; LA32-LABEL: andn_icmp_eq_i8_sn:
+; LA32: # %bb.0:
+; LA32-NEXT: andn $a0, $a1, $a0
+; LA32-NEXT: andi $a0, $a0, 255
+; LA32-NEXT: sltui $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_eq_i8_sn:
+; LA64: # %bb.0:
+; LA64-NEXT: andn $a0, $a1, $a0
+; LA64-NEXT: andi $a0, $a0, 255
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp eq i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_eq_i8_zn(i8 zeroext %a, i8 %b) nounwind {
+; LA32-LABEL: andn_icmp_eq_i8_zn:
+; LA32: # %bb.0:
+; LA32-NEXT: andn $a0, $a1, $a0
+; LA32-NEXT: andi $a0, $a0, 255
+; LA32-NEXT: sltui $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_eq_i8_zn:
+; LA64: # %bb.0:
+; LA64-NEXT: andn $a0, $a1, $a0
+; LA64-NEXT: andi $a0, $a0, 255
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp eq i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_eq_i8_ns(i8 %a, i8 signext %b) nounwind {
+; LA32-LABEL: andn_icmp_eq_i8_ns:
+; LA32: # %bb.0:
+; LA32-NEXT: andn $a0, $a1, $a0
+; LA32-NEXT: andi $a0, $a0, 255
+; LA32-NEXT: sltui $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_eq_i8_ns:
+; LA64: # %bb.0:
+; LA64-NEXT: andn $a0, $a1, $a0
+; LA64-NEXT: andi $a0, $a0, 255
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp eq i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_eq_i8_nz(i8 %a, i8 zeroext %b) nounwind {
+; LA32-LABEL: andn_icmp_eq_i8_nz:
+; LA32: # %bb.0:
+; LA32-NEXT: andn $a0, $a1, $a0
+; LA32-NEXT: sltui $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_eq_i8_nz:
+; LA64: # %bb.0:
+; LA64-NEXT: andn $a0, $a1, $a0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp eq i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_eq_i8_nn(i8 %a, i8 %b) nounwind {
+; LA32-LABEL: andn_icmp_eq_i8_nn:
+; LA32: # %bb.0:
+; LA32-NEXT: andn $a0, $a1, $a0
+; LA32-NEXT: andi $a0, $a0, 255
+; LA32-NEXT: sltui $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_eq_i8_nn:
+; LA64: # %bb.0:
+; LA64-NEXT: andn $a0, $a1, $a0
+; LA64-NEXT: andi $a0, $a0, 255
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp eq i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_ult_i8_sz(i8 signext %a, i8 zeroext %b) nounwind {
+; LA32-LABEL: andn_icmp_ult_i8_sz:
+; LA32: # %bb.0:
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: sltu $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_ult_i8_sz:
+; LA64: # %bb.0:
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: sltu $a0, $a0, $a1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp ult i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_ult_i8_zs(i8 zeroext %a, i8 signext %b) nounwind {
+; LA32-LABEL: andn_icmp_ult_i8_zs:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a1, $a1, 255
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: sltu $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_ult_i8_zs:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: sltu $a0, $a0, $a1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp ult i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_ult_i8_zz(i8 zeroext %a, i8 zeroext %b) nounwind {
+; LA32-LABEL: andn_icmp_ult_i8_zz:
+; LA32: # %bb.0:
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: sltu $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_ult_i8_zz:
+; LA64: # %bb.0:
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: sltu $a0, $a0, $a1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp ult i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_ult_i8_sn(i8 signext %a, i8 %b) nounwind {
+; LA32-LABEL: andn_icmp_ult_i8_sn:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a1, $a1, 255
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: sltu $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_ult_i8_sn:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: and $a0, $a1, $a0
+; LA64-NEXT: sltu $a0, $a0, $a1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp ult i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_ult_i8_zn(i8 zeroext %a, i8 %b) nounwind {
+; LA32-LABEL: andn_icmp_ult_i8_zn:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a1, $a1, 255
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: sltu $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_ult_i8_zn:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: and $a0, $a1, $a0
+; LA64-NEXT: sltu $a0, $a0, $a1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp ult i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_ult_i8_ns(i8 %a, i8 signext %b) nounwind {
+; LA32-LABEL: andn_icmp_ult_i8_ns:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a1, $a1, 255
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: sltu $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_ult_i8_ns:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: and $a0, $a1, $a0
+; LA64-NEXT: sltu $a0, $a0, $a1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp ult i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_ult_i8_nz(i8 %a, i8 zeroext %b) nounwind {
+; LA32-LABEL: andn_icmp_ult_i8_nz:
+; LA32: # %bb.0:
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: sltu $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_ult_i8_nz:
+; LA64: # %bb.0:
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: sltu $a0, $a0, $a1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp ult i8 %and, %b
+ ret i1 %cmp
+}
+
+define i1 @andn_icmp_ult_i8_nn(i8 %a, i8 %b) nounwind {
+; LA32-LABEL: andn_icmp_ult_i8_nn:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a1, $a1, 255
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: sltu $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: andn_icmp_ult_i8_nn:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: and $a0, $a1, $a0
+; LA64-NEXT: sltu $a0, $a0, $a1
+; LA64-NEXT: ret
+ %and = and i8 %a, %b
+ %cmp = icmp ult i8 %and, %b
+ ret i1 %cmp
+}
More information about the llvm-commits
mailing list