[llvm] b5caa68 - [ARM] Tests for various NEON vector compares. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 1 08:01:01 PDT 2022


Author: David Green
Date: 2022-11-01T15:00:56Z
New Revision: b5caa68fb2444f0fc0e459be3b7a3da88a68b566

URL: https://github.com/llvm/llvm-project/commit/b5caa68fb2444f0fc0e459be3b7a3da88a68b566
DIFF: https://github.com/llvm/llvm-project/commit/b5caa68fb2444f0fc0e459be3b7a3da88a68b566.diff

LOG: [ARM] Tests for various NEON vector compares. NFC

Added: 
    llvm/test/CodeGen/ARM/vcmpz.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/ARM/vcmpz.ll b/llvm/test/CodeGen/ARM/vcmpz.ll
new file mode 100644
index 0000000000000..f800346a6b564
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/vcmpz.ll
@@ -0,0 +1,301 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=armv8-eabi -mattr=+neon | FileCheck %s
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_eq(<4 x i32> %0, <4 x i32> %b) {
+; CHECK-LABEL: vcmpz_eq:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vceq.i32 q0, q0, q1
+; CHECK-NEXT:    bx lr
+  %2 = icmp eq <4 x i32> %0, %b
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_ne(<4 x i32> %0, <4 x i32> %b) {
+; CHECK-LABEL: vcmpz_ne:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vceq.i32 q8, q0, q1
+; CHECK-NEXT:    vmvn q0, q8
+; CHECK-NEXT:    bx lr
+  %2 = icmp ne <4 x i32> %0, %b
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_slt(<4 x i32> %0, <4 x i32> %b) {
+; CHECK-LABEL: vcmpz_slt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcgt.s32 q0, q1, q0
+; CHECK-NEXT:    bx lr
+  %2 = icmp slt <4 x i32> %0, %b
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_sle(<4 x i32> %0, <4 x i32> %b) {
+; CHECK-LABEL: vcmpz_sle:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcge.s32 q0, q1, q0
+; CHECK-NEXT:    bx lr
+  %2 = icmp sle <4 x i32> %0, %b
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_sgt(<4 x i32> %0, <4 x i32> %b) {
+; CHECK-LABEL: vcmpz_sgt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcgt.s32 q0, q0, q1
+; CHECK-NEXT:    bx lr
+  %2 = icmp sgt <4 x i32> %0, %b
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_sge(<4 x i32> %0, <4 x i32> %b) {
+; CHECK-LABEL: vcmpz_sge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcge.s32 q0, q0, q1
+; CHECK-NEXT:    bx lr
+  %2 = icmp sge <4 x i32> %0, %b
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_ult(<4 x i32> %0, <4 x i32> %b) {
+; CHECK-LABEL: vcmpz_ult:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcgt.u32 q0, q1, q0
+; CHECK-NEXT:    bx lr
+  %2 = icmp ult <4 x i32> %0, %b
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_ule(<4 x i32> %0, <4 x i32> %b) {
+; CHECK-LABEL: vcmpz_ule:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcge.u32 q0, q1, q0
+; CHECK-NEXT:    bx lr
+  %2 = icmp ule <4 x i32> %0, %b
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_ugt(<4 x i32> %0, <4 x i32> %b) {
+; CHECK-LABEL: vcmpz_ugt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcgt.u32 q0, q0, q1
+; CHECK-NEXT:    bx lr
+  %2 = icmp ugt <4 x i32> %0, %b
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_uge(<4 x i32> %0, <4 x i32> %b) {
+; CHECK-LABEL: vcmpz_uge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcge.u32 q0, q0, q1
+; CHECK-NEXT:    bx lr
+  %2 = icmp uge <4 x i32> %0, %b
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_eq(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zr_eq:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vceq.i32 q0, q0, #0
+; CHECK-NEXT:    bx lr
+  %2 = icmp eq <4 x i32> %0, zeroinitializer
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_ne(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zr_ne:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vceq.i32 q8, q0, #0
+; CHECK-NEXT:    vmvn q0, q8
+; CHECK-NEXT:    bx lr
+  %2 = icmp ne <4 x i32> %0, zeroinitializer
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_slt(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zr_slt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vclt.s32 q0, q0, #0
+; CHECK-NEXT:    bx lr
+  %2 = icmp slt <4 x i32> %0, zeroinitializer
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_sle(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zr_sle:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcle.s32 q0, q0, #0
+; CHECK-NEXT:    bx lr
+  %2 = icmp sle <4 x i32> %0, zeroinitializer
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_sgt(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zr_sgt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcgt.s32 q0, q0, #0
+; CHECK-NEXT:    bx lr
+  %2 = icmp sgt <4 x i32> %0, zeroinitializer
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_sge(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zr_sge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcge.s32 q0, q0, #0
+; CHECK-NEXT:    bx lr
+  %2 = icmp sge <4 x i32> %0, zeroinitializer
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_ult(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zr_ult:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    bx lr
+  %2 = icmp ult <4 x i32> %0, zeroinitializer
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+;define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_ule(<4 x i32> %0) {
+;  %2 = icmp ule <4 x i32> %0, zeroinitializer
+;  %3 = sext <4 x i1> %2 to <4 x i32>
+;  ret <4 x i32> %3
+;}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_ugt(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zr_ugt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vceq.i32 q8, q0, #0
+; CHECK-NEXT:    vmvn q0, q8
+; CHECK-NEXT:    bx lr
+  %2 = icmp ugt <4 x i32> %0, zeroinitializer
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_uge(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zr_uge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i8 q0, #0xff
+; CHECK-NEXT:    bx lr
+  %2 = icmp uge <4 x i32> %0, zeroinitializer
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_eq(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zl_eq:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vceq.i32 q0, q0, #0
+; CHECK-NEXT:    bx lr
+  %2 = icmp eq <4 x i32> zeroinitializer, %0
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_ne(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zl_ne:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vceq.i32 q8, q0, #0
+; CHECK-NEXT:    vmvn q0, q8
+; CHECK-NEXT:    bx lr
+  %2 = icmp ne <4 x i32> zeroinitializer, %0
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_slt(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zl_slt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcgt.s32 q0, q0, #0
+; CHECK-NEXT:    bx lr
+  %2 = icmp slt <4 x i32> zeroinitializer, %0
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_sle(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zl_sle:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcge.s32 q0, q0, #0
+; CHECK-NEXT:    bx lr
+  %2 = icmp sle <4 x i32> zeroinitializer, %0
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_sgt(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zl_sgt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vclt.s32 q0, q0, #0
+; CHECK-NEXT:    bx lr
+  %2 = icmp sgt <4 x i32> zeroinitializer, %0
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_sge(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zl_sge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcle.s32 q0, q0, #0
+; CHECK-NEXT:    bx lr
+  %2 = icmp sge <4 x i32> zeroinitializer, %0
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_ult(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zl_ult:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vceq.i32 q8, q0, #0
+; CHECK-NEXT:    vmvn q0, q8
+; CHECK-NEXT:    bx lr
+  %2 = icmp ult <4 x i32> zeroinitializer, %0
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_ule(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zl_ule:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i8 q0, #0xff
+; CHECK-NEXT:    bx lr
+  %2 = icmp ule <4 x i32> zeroinitializer, %0
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_ugt(<4 x i32> %0) {
+; CHECK-LABEL: vcmpz_zl_ugt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    bx lr
+  %2 = icmp ugt <4 x i32> zeroinitializer, %0
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+;define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_uge(<4 x i32> %0) {
+;  %2 = icmp uge <4 x i32> zeroinitializer, %0
+;  %3 = sext <4 x i1> %2 to <4 x i32>
+;  ret <4 x i32> %3
+;}


        


More information about the llvm-commits mailing list