[llvm] 3f18fc5 - [ARM] Extra tests for sign extended floating point compares. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 16 02:50:03 PDT 2021


Author: David Green
Date: 2021-06-16T10:49:50+01:00
New Revision: 3f18fc5ece720ae2b956ab77a9378bbcf33dc89f

URL: https://github.com/llvm/llvm-project/commit/3f18fc5ece720ae2b956ab77a9378bbcf33dc89f
DIFF: https://github.com/llvm/llvm-project/commit/3f18fc5ece720ae2b956ab77a9378bbcf33dc89f.diff

LOG: [ARM] Extra tests for sign extended floating point compares. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/Thumb2/mve-pred-ext.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/mve-pred-ext.ll b/llvm/test/CodeGen/Thumb2/mve-pred-ext.ll
index df468608e0eae..1ad44d5d56c3f 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pred-ext.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-ext.ll
@@ -15,6 +15,46 @@ entry:
   ret <4 x i32> %0
 }
 
+define arm_aapcs_vfpcc <4 x i32> @sext_v4i1_v4f32(<4 x float> %src1, <4 x float> %src2) {
+; CHECK-LABEL: sext_v4i1_v4f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    vcmp.f32 s0, s4
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r2, #0
+; CHECK-NEXT:    vcmp.f32 s3, s7
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r2, #1
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r3, #0
+; CHECK-NEXT:    vcmp.f32 s1, s5
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r3, #1
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    csetm r3, ne
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vmov q0[2], q0[0], r2, r1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov q0[3], q0[1], r0, r3
+; CHECK-NEXT:    bx lr
+entry:
+  %c = fcmp une <4 x float> %src1, %src2
+  %0 = sext <4 x i1> %c to <4 x i32>
+  ret <4 x i32> %0
+}
+
 define arm_aapcs_vfpcc <8 x i16> @sext_v8i1_v8i16(<8 x i16> %src) {
 ; CHECK-LABEL: sext_v8i1_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
@@ -29,6 +69,90 @@ entry:
   ret <8 x i16> %0
 }
 
+define arm_aapcs_vfpcc <8 x i16> @sext_v8i1_v8f32(<8 x half> %src1, <8 x half> %src2) {
+; CHECK-LABEL: sext_v8i1_v8f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    vmovx.f16 s8, s5
+; CHECK-NEXT:    vmovx.f16 s10, s1
+; CHECK-NEXT:    vcmp.f16 s10, s8
+; CHECK-NEXT:    vmovx.f16 s8, s6
+; CHECK-NEXT:    vmovx.f16 s10, s2
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r1, #0
+; CHECK-NEXT:    vcmp.f16 s10, s8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r1, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r2, #0
+; CHECK-NEXT:    vcmp.f16 s1, s5
+; CHECK-NEXT:    vmovx.f16 s8, s7
+; CHECK-NEXT:    vmovx.f16 s10, s3
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r2, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r3, #0
+; CHECK-NEXT:    vcmp.f16 s10, s8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r3, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r4, #0
+; CHECK-NEXT:    vcmp.f16 s2, s6
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r5, #0
+; CHECK-NEXT:    vcmp.f16 s3, s7
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    vcmp.f16 s0, s4
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csetm r12, ne
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    csetm lr, ne
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    csetm r4, ne
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r3, #0
+; CHECK-NEXT:    vmovx.f16 s4, s4
+; CHECK-NEXT:    vmovx.f16 s0, s0
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r3, #1
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    vcmp.f16 s0, s4
+; CHECK-NEXT:    csetm r3, ne
+; CHECK-NEXT:    movs r6, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r6, #1
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    vmov.16 q0[0], r3
+; CHECK-NEXT:    csetm r5, ne
+; CHECK-NEXT:    vmov.16 q0[1], r5
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov.16 q0[3], r1
+; CHECK-NEXT:    vmov.16 q0[4], r4
+; CHECK-NEXT:    vmov.16 q0[5], r2
+; CHECK-NEXT:    vmov.16 q0[6], r12
+; CHECK-NEXT:    vmov.16 q0[7], lr
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+entry:
+  %c = fcmp une <8 x half> %src1, %src2
+  %0 = sext <8 x i1> %c to <8 x i16>
+  ret <8 x i16> %0
+}
+
 define arm_aapcs_vfpcc <16 x i8> @sext_v16i1_v16i8(<16 x i8> %src) {
 ; CHECK-LABEL: sext_v16i1_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
@@ -71,6 +195,46 @@ entry:
   ret <2 x i64> %0
 }
 
+define arm_aapcs_vfpcc <2 x i64> @sext_v2i1_v2f64(<2 x double> %src) {
+; CHECK-LABEL: sext_v2i1_v2f64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI6_0
+; CHECK-NEXT:    vmov r0, r1, d9
+; CHECK-NEXT:    vmov r4, r5, d0
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    bl __aeabi_dcmpeq
+; CHECK-NEXT:    vmov r2, r1, d8
+; CHECK-NEXT:    clz r0, r0
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    lsrs r0, r0, #5
+; CHECK-NEXT:    csetm r6, ne
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    bl __aeabi_dcmpeq
+; CHECK-NEXT:    clz r0, r0
+; CHECK-NEXT:    lsrs r0, r0, #5
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov q0[2], q0[0], r0, r6
+; CHECK-NEXT:    vmov q0[3], q0[1], r0, r6
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI6_0:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+entry:
+  %c = fcmp une <2 x double> %src, zeroinitializer
+  %0 = sext <2 x i1> %c to <2 x i64>
+  ret <2 x i64> %0
+}
+
 
 define arm_aapcs_vfpcc <4 x i32> @zext_v4i1_v4i32(<4 x i32> %src) {
 ; CHECK-LABEL: zext_v4i1_v4i32:
@@ -86,6 +250,48 @@ entry:
   ret <4 x i32> %0
 }
 
+define arm_aapcs_vfpcc <4 x i32> @zext_v4i1_v4f32(<4 x float> %src1, <4 x float> %src2) {
+; CHECK-LABEL: zext_v4i1_v4f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f32 s3, s7
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    vcmp.f32 s1, s5
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r2, #0
+; CHECK-NEXT:    vcmp.f32 s2, s6
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r2, #1
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r3, #0
+; CHECK-NEXT:    vcmp.f32 s0, s4
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r3, #1
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    csetm r3, ne
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vmov.i32 q2, #0x1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov q0[2], q0[0], r0, r3
+; CHECK-NEXT:    vmov q0[3], q0[1], r2, r1
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %c = fcmp une <4 x float> %src1, %src2
+  %0 = zext <4 x i1> %c to <4 x i32>
+  ret <4 x i32> %0
+}
+
 define arm_aapcs_vfpcc <8 x i16> @zext_v8i1_v8i16(<8 x i16> %src) {
 ; CHECK-LABEL: zext_v8i1_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
@@ -100,6 +306,92 @@ entry:
   ret <8 x i16> %0
 }
 
+define arm_aapcs_vfpcc <8 x i16> @zext_v8i1_v8f32(<8 x half> %src1, <8 x half> %src2) {
+; CHECK-LABEL: zext_v8i1_v8f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    vmovx.f16 s8, s4
+; CHECK-NEXT:    vmovx.f16 s10, s0
+; CHECK-NEXT:    vcmp.f16 s10, s8
+; CHECK-NEXT:    vmovx.f16 s8, s5
+; CHECK-NEXT:    vmovx.f16 s10, s1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r1, #0
+; CHECK-NEXT:    vcmp.f16 s10, s8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r1, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r3, #0
+; CHECK-NEXT:    vcmp.f16 s2, s6
+; CHECK-NEXT:    vmovx.f16 s8, s6
+; CHECK-NEXT:    vmovx.f16 s10, s2
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r3, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r2, #0
+; CHECK-NEXT:    vcmp.f16 s10, s8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r2, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r4, #0
+; CHECK-NEXT:    vcmp.f16 s3, s7
+; CHECK-NEXT:    vmovx.f16 s8, s7
+; CHECK-NEXT:    vmovx.f16 s10, s3
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r4, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r5, #0
+; CHECK-NEXT:    vcmp.f16 s10, s8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r5, #1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    vcmp.f16 s1, s5
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    csetm r12, ne
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    csetm lr, ne
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    csetm r4, ne
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    mov.w r3, #0
+; CHECK-NEXT:    vcmp.f16 s0, s4
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r3, #1
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    csetm r3, ne
+; CHECK-NEXT:    movs r6, #0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    movne r6, #1
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    vmov.i16 q0, #0x1
+; CHECK-NEXT:    csetm r5, ne
+; CHECK-NEXT:    vmov.16 q1[0], r5
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov.16 q1[2], r3
+; CHECK-NEXT:    vmov.16 q1[3], r0
+; CHECK-NEXT:    vmov.16 q1[4], r2
+; CHECK-NEXT:    vmov.16 q1[5], lr
+; CHECK-NEXT:    vmov.16 q1[6], r4
+; CHECK-NEXT:    vmov.16 q1[7], r12
+; CHECK-NEXT:    vand q0, q1, q0
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+entry:
+  %c = fcmp une <8 x half> %src1, %src2
+  %0 = zext <8 x i1> %c to <8 x i16>
+  ret <8 x i16> %0
+}
+
 define arm_aapcs_vfpcc <16 x i8> @zext_v16i1_v16i8(<16 x i8> %src) {
 ; CHECK-LABEL: zext_v16i1_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
@@ -122,7 +414,7 @@ define arm_aapcs_vfpcc <2 x i64> @zext_v2i1_v2i64(<2 x i64> %src) {
 ; CHECK-NEXT:    vmov r0, r1, d1
 ; CHECK-NEXT:    movs r3, #0
 ; CHECK-NEXT:    vmov lr, r12, d0
-; CHECK-NEXT:    adr r2, .LCPI7_0
+; CHECK-NEXT:    adr r2, .LCPI12_0
 ; CHECK-NEXT:    vldrw.u32 q0, [r2]
 ; CHECK-NEXT:    rsbs r0, r0, #0
 ; CHECK-NEXT:    sbcs.w r0, r3, r1
@@ -142,7 +434,7 @@ define arm_aapcs_vfpcc <2 x i64> @zext_v2i1_v2i64(<2 x i64> %src) {
 ; CHECK-NEXT:    pop {r7, pc}
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  @ %bb.1:
-; CHECK-NEXT:  .LCPI7_0:
+; CHECK-NEXT:  .LCPI12_0:
 ; CHECK-NEXT:    .long 1 @ 0x1
 ; CHECK-NEXT:    .long 0 @ 0x0
 ; CHECK-NEXT:    .long 1 @ 0x1
@@ -153,6 +445,53 @@ entry:
   ret <2 x i64> %0
 }
 
+define arm_aapcs_vfpcc <2 x i64> @zext_v2i1_v2f64(<2 x double> %src) {
+; CHECK-LABEL: zext_v2i1_v2f64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vldr d0, .LCPI13_0
+; CHECK-NEXT:    vmov r0, r1, d9
+; CHECK-NEXT:    vmov r4, r5, d0
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    bl __aeabi_dcmpeq
+; CHECK-NEXT:    vmov r2, r1, d8
+; CHECK-NEXT:    clz r0, r0
+; CHECK-NEXT:    adr r3, .LCPI13_1
+; CHECK-NEXT:    lsrs r0, r0, #5
+; CHECK-NEXT:    vldrw.u32 q4, [r3]
+; CHECK-NEXT:    mov r3, r5
+; CHECK-NEXT:    csetm r6, ne
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    mov r2, r4
+; CHECK-NEXT:    bl __aeabi_dcmpeq
+; CHECK-NEXT:    clz r0, r0
+; CHECK-NEXT:    lsrs r0, r0, #5
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov q0[2], q0[0], r0, r6
+; CHECK-NEXT:    vand q0, q0, q4
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI13_1:
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:    .long 1 @ 0x1
+; CHECK-NEXT:    .long 0 @ 0x0
+; CHECK-NEXT:  .LCPI13_0:
+; CHECK-NEXT:    .long 0 @ double 0
+; CHECK-NEXT:    .long 0
+entry:
+  %c = fcmp une <2 x double> %src, zeroinitializer
+  %0 = zext <2 x i1> %c to <2 x i64>
+  ret <2 x i64> %0
+}
+
 
 define arm_aapcs_vfpcc <4 x i32> @trunc_v4i1_v4i32(<4 x i32> %src) {
 ; CHECK-LABEL: trunc_v4i1_v4i32:


        


More information about the llvm-commits mailing list