[llvm] a647040 - [ARM] Extra widening and narrowing combinations tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Sat Jul 10 14:08:43 PDT 2021


Author: David Green
Date: 2021-07-10T22:08:30+01:00
New Revision: a6470408cf3601391c6c85f8b3a743f2b5fbaad2

URL: https://github.com/llvm/llvm-project/commit/a6470408cf3601391c6c85f8b3a743f2b5fbaad2
DIFF: https://github.com/llvm/llvm-project/commit/a6470408cf3601391c6c85f8b3a743f2b5fbaad2.diff

LOG: [ARM] Extra widening and narrowing combinations tests. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/Thumb2/block-placement.mir
    llvm/test/CodeGen/Thumb2/mve-widen-narrow.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/block-placement.mir b/llvm/test/CodeGen/Thumb2/block-placement.mir
index 8a15b4190541..3c827a57be6d 100644
--- a/llvm/test/CodeGen/Thumb2/block-placement.mir
+++ b/llvm/test/CodeGen/Thumb2/block-placement.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve -run-pass=arm-block-placement %s -o - | FileCheck %s
+# RUN: llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve -run-pass=arm-block-placement -verify-machineinstrs %s -o - | FileCheck %s
 --- |
 
   ; Checks that Predecessor gets moved (to before the LoopExit) if it contains a backward WLS.

diff  --git a/llvm/test/CodeGen/Thumb2/mve-widen-narrow.ll b/llvm/test/CodeGen/Thumb2/mve-widen-narrow.ll
index fe535c641ca7..d8a45ea1e8ef 100644
--- a/llvm/test/CodeGen/Thumb2/mve-widen-narrow.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-widen-narrow.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
-; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LE
+; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-BE
 
 define void @foo_int8_int32(<4 x i8>* %dest, <4 x i32>* readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int8_int32:
@@ -42,22 +42,50 @@ entry:
 }
 
 
-define void @foo_int8_int32_double(<16 x i8>* %dest, <16 x i32>* readonly %src, i32 %n) {
-; CHECK-LABEL: foo_int8_int32_double:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vldrw.u32 q0, [r1]
-; CHECK-NEXT:    vldrw.u32 q1, [r1, #16]
-; CHECK-NEXT:    vldrw.u32 q2, [r1, #32]
-; CHECK-NEXT:    vldrw.u32 q3, [r1, #48]
-; CHECK-NEXT:    vstrb.32 q1, [r0, #4]
-; CHECK-NEXT:    vstrb.32 q0, [r0]
-; CHECK-NEXT:    vstrb.32 q3, [r0, #12]
-; CHECK-NEXT:    vstrb.32 q2, [r0, #8]
-; CHECK-NEXT:    bx lr
+define void @foo_int8_int32_double(<8 x i8>* %dest, <8 x i32>* readonly %src, i32 %n) {
+; CHECK-LE-LABEL: foo_int8_int32_double:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vldrh.u16 q1, [r1]
+; CHECK-LE-NEXT:    vmov r2, r3, d2
+; CHECK-LE-NEXT:    vmov.16 q0[0], r2
+; CHECK-LE-NEXT:    vmov.16 q0[1], r3
+; CHECK-LE-NEXT:    vmov r2, r3, d3
+; CHECK-LE-NEXT:    vldrh.u16 q1, [r1, #16]
+; CHECK-LE-NEXT:    vmov.16 q0[2], r2
+; CHECK-LE-NEXT:    vmov.16 q0[3], r3
+; CHECK-LE-NEXT:    vmov r1, r2, d2
+; CHECK-LE-NEXT:    vmov.16 q0[4], r1
+; CHECK-LE-NEXT:    vmov.16 q0[5], r2
+; CHECK-LE-NEXT:    vmov r1, r2, d3
+; CHECK-LE-NEXT:    vmov.16 q0[6], r1
+; CHECK-LE-NEXT:    vmov.16 q0[7], r2
+; CHECK-LE-NEXT:    vstrb.16 q0, [r0]
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: foo_int8_int32_double:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vldrb.u8 q0, [r1]
+; CHECK-BE-NEXT:    vrev32.8 q1, q0
+; CHECK-BE-NEXT:    vmov r2, r3, d2
+; CHECK-BE-NEXT:    vmov.16 q0[0], r2
+; CHECK-BE-NEXT:    vmov.16 q0[1], r3
+; CHECK-BE-NEXT:    vmov r2, r3, d3
+; CHECK-BE-NEXT:    vldrb.u8 q1, [r1, #16]
+; CHECK-BE-NEXT:    vmov.16 q0[2], r2
+; CHECK-BE-NEXT:    vmov.16 q0[3], r3
+; CHECK-BE-NEXT:    vrev32.8 q1, q1
+; CHECK-BE-NEXT:    vmov r1, r2, d2
+; CHECK-BE-NEXT:    vmov.16 q0[4], r1
+; CHECK-BE-NEXT:    vmov.16 q0[5], r2
+; CHECK-BE-NEXT:    vmov r1, r2, d3
+; CHECK-BE-NEXT:    vmov.16 q0[6], r1
+; CHECK-BE-NEXT:    vmov.16 q0[7], r2
+; CHECK-BE-NEXT:    vstrb.16 q0, [r0]
+; CHECK-BE-NEXT:    bx lr
 entry:
-  %wide.load = load <16 x i32>, <16 x i32>* %src, align 4
-  %0 = trunc <16 x i32> %wide.load to <16 x i8>
-  store <16 x i8> %0, <16 x i8>* %dest, align 1
+  %wide.load = load <8 x i32>, <8 x i32>* %src, align 2
+  %0 = trunc <8 x i32> %wide.load to <8 x i8>
+  store <8 x i8> %0, <8 x i8>* %dest, align 1
   ret void
 }
 
@@ -91,6 +119,25 @@ entry:
   ret void
 }
 
+define void @foo_int8_int32_quad(<16 x i8>* %dest, <16 x i32>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_int8_int32_quad:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vldrw.u32 q1, [r1, #16]
+; CHECK-NEXT:    vldrw.u32 q2, [r1, #32]
+; CHECK-NEXT:    vldrw.u32 q3, [r1, #48]
+; CHECK-NEXT:    vstrb.32 q1, [r0, #4]
+; CHECK-NEXT:    vstrb.32 q0, [r0]
+; CHECK-NEXT:    vstrb.32 q3, [r0, #12]
+; CHECK-NEXT:    vstrb.32 q2, [r0, #8]
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <16 x i32>, <16 x i32>* %src, align 4
+  %0 = trunc <16 x i32> %wide.load to <16 x i8>
+  store <16 x i8> %0, <16 x i8>* %dest, align 1
+  ret void
+}
+
 
 define void @foo_int32_int8(<4 x i32>* %dest, <4 x i8>* readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int32_int8:
@@ -131,22 +178,18 @@ entry:
   ret void
 }
 
-define void @foo_int32_int8_double(<16 x i32>* %dest, <16 x i8>* readonly %src, i32 %n) {
+define void @foo_int32_int8_double(<8 x i32>* %dest, <8 x i8>* readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int32_int8_double:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.s32 q0, [r1]
 ; CHECK-NEXT:    vldrb.s32 q1, [r1, #4]
-; CHECK-NEXT:    vldrb.s32 q2, [r1, #8]
-; CHECK-NEXT:    vldrb.s32 q3, [r1, #12]
 ; CHECK-NEXT:    vstrw.32 q1, [r0, #16]
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
-; CHECK-NEXT:    vstrw.32 q3, [r0, #48]
-; CHECK-NEXT:    vstrw.32 q2, [r0, #32]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <16 x i8>, <16 x i8>* %src, align 1
-  %0 = sext <16 x i8> %wide.load to <16 x i32>
-  store <16 x i32> %0, <16 x i32>* %dest, align 4
+  %wide.load = load <8 x i8>, <8 x i8>* %src, align 1
+  %0 = sext <8 x i8> %wide.load to <8 x i32>
+  store <8 x i32> %0, <8 x i32>* %dest, align 4
   ret void
 }
 
@@ -180,6 +223,25 @@ entry:
   ret void
 }
 
+define void @foo_int32_int8_quad(<16 x i32>* %dest, <16 x i8>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_int32_int8_quad:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.s32 q0, [r1]
+; CHECK-NEXT:    vldrb.s32 q1, [r1, #4]
+; CHECK-NEXT:    vldrb.s32 q2, [r1, #8]
+; CHECK-NEXT:    vldrb.s32 q3, [r1, #12]
+; CHECK-NEXT:    vstrw.32 q1, [r0, #16]
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    vstrw.32 q3, [r0, #48]
+; CHECK-NEXT:    vstrw.32 q2, [r0, #32]
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <16 x i8>, <16 x i8>* %src, align 1
+  %0 = sext <16 x i8> %wide.load to <16 x i32>
+  store <16 x i32> %0, <16 x i32>* %dest, align 4
+  ret void
+}
+
 
 define void @foo_uint32_uint8(<4 x i32>* %dest, <4 x i8>* readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_uint32_uint8:
@@ -221,22 +283,18 @@ entry:
 }
 
 
-define void @foo_uint32_uint8_double(<16 x i32>* %dest, <16 x i8>* readonly %src, i32 %n) {
+define void @foo_uint32_uint8_double(<8 x i32>* %dest, <8 x i8>* readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_uint32_uint8_double:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vldrb.u32 q0, [r1]
 ; CHECK-NEXT:    vldrb.u32 q1, [r1, #4]
-; CHECK-NEXT:    vldrb.u32 q2, [r1, #8]
-; CHECK-NEXT:    vldrb.u32 q3, [r1, #12]
 ; CHECK-NEXT:    vstrw.32 q1, [r0, #16]
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
-; CHECK-NEXT:    vstrw.32 q3, [r0, #48]
-; CHECK-NEXT:    vstrw.32 q2, [r0, #32]
 ; CHECK-NEXT:    bx lr
 entry:
-  %wide.load = load <16 x i8>, <16 x i8>* %src, align 1
-  %0 = zext <16 x i8> %wide.load to <16 x i32>
-  store <16 x i32> %0, <16 x i32>* %dest, align 4
+  %wide.load = load <8 x i8>, <8 x i8>* %src, align 1
+  %0 = zext <8 x i8> %wide.load to <8 x i32>
+  store <8 x i32> %0, <8 x i32>* %dest, align 4
   ret void
 }
 
@@ -270,6 +328,25 @@ entry:
   ret void
 }
 
+define void @foo_uint32_uint8_quad(<16 x i32>* %dest, <16 x i8>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_uint32_uint8_quad:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u32 q0, [r1]
+; CHECK-NEXT:    vldrb.u32 q1, [r1, #4]
+; CHECK-NEXT:    vldrb.u32 q2, [r1, #8]
+; CHECK-NEXT:    vldrb.u32 q3, [r1, #12]
+; CHECK-NEXT:    vstrw.32 q1, [r0, #16]
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    vstrw.32 q3, [r0, #48]
+; CHECK-NEXT:    vstrw.32 q2, [r0, #32]
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <16 x i8>, <16 x i8>* %src, align 1
+  %0 = zext <16 x i8> %wide.load to <16 x i32>
+  store <16 x i32> %0, <16 x i32>* %dest, align 4
+  ret void
+}
+
 
 define void @foo_int32_int8_both(<16 x i32>* %dest, <16 x i8>* readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int32_int8_both:


        


More information about the llvm-commits mailing list