[llvm] r290196 - [ARM] Generate checks for shuffle tests using update_llc_test_checks.py.

Eli Friedman via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 20 11:33:24 PST 2016


Author: efriedma
Date: Tue Dec 20 13:33:24 2016
New Revision: 290196

URL: http://llvm.org/viewvc/llvm-project?rev=290196&view=rev
Log:
[ARM] Generate checks for shuffle tests using update_llc_test_checks.py.


Modified:
    llvm/trunk/test/CodeGen/ARM/vext.ll
    llvm/trunk/test/CodeGen/ARM/vpadd.ll
    llvm/trunk/test/CodeGen/ARM/vuzp.ll

Modified: llvm/trunk/test/CodeGen/ARM/vext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vext.ll?rev=290196&r1=290195&r2=290196&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vext.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vext.ll Tue Dec 20 13:33:24 2016
@@ -1,8 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - -lower-interleaved-accesses=false | FileCheck %s
 
 define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK-LABEL: test_vextd:
-;CHECK: vext
+; CHECK-LABEL: test_vextd:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vldr d16, [r1]
+; CHECK-NEXT:    vldr d17, [r0]
+; CHECK-NEXT:    vext.8 d16, d17, d16, #3
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
@@ -10,8 +16,13 @@ define <8 x i8> @test_vextd(<8 x i8>* %A
 }
 
 define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK-LABEL: test_vextRd:
-;CHECK: vext
+; CHECK-LABEL: test_vextRd:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vldr d17, [r1]
+; CHECK-NEXT:    vext.8 d16, d17, d16, #5
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4>
@@ -19,8 +30,14 @@ define <8 x i8> @test_vextRd(<8 x i8>* %
 }
 
 define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK-LABEL: test_vextq:
-;CHECK: vext
+; CHECK-LABEL: test_vextq:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT:    vext.8 q8, q9, q8, #3
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
@@ -28,8 +45,14 @@ define <16 x i8> @test_vextq(<16 x i8>*
 }
 
 define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK-LABEL: test_vextRq:
-;CHECK: vext
+; CHECK-LABEL: test_vextRq:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r1]
+; CHECK-NEXT:    vext.8 q8, q9, q8, #7
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6>
@@ -37,8 +60,13 @@ define <16 x i8> @test_vextRq(<16 x i8>*
 }
 
 define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK-LABEL: test_vextd16:
-;CHECK: vext
+; CHECK-LABEL: test_vextd16:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vldr d16, [r1]
+; CHECK-NEXT:    vldr d17, [r0]
+; CHECK-NEXT:    vext.16 d16, d17, d16, #3
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
@@ -46,8 +74,14 @@ define <4 x i16> @test_vextd16(<4 x i16>
 }
 
 define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK-LABEL: test_vextq32:
-;CHECK: vext
+; CHECK-LABEL: test_vextq32:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT:    vext.32 q8, q9, q8, #3
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
@@ -57,8 +91,13 @@ define <4 x i32> @test_vextq32(<4 x i32>
 ; Undef shuffle indices should not prevent matching to VEXT:
 
 define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK-LABEL: test_vextd_undef:
-;CHECK: vext
+; CHECK-LABEL: test_vextd_undef:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vldr d16, [r1]
+; CHECK-NEXT:    vldr d17, [r0]
+; CHECK-NEXT:    vext.8 d16, d17, d16, #3
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 undef, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10>
@@ -66,8 +105,14 @@ define <8 x i8> @test_vextd_undef(<8 x i
 }
 
 define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK-LABEL: test_vextRq_undef:
-;CHECK: vext
+; CHECK-LABEL: test_vextRq_undef:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r1]
+; CHECK-NEXT:    vext.8 q8, q9, q8, #7
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 undef, i32 undef, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 undef, i32 6>
@@ -75,16 +120,26 @@ define <16 x i8> @test_vextRq_undef(<16
 }
 
 define <16 x i8> @test_vextq_undef_op2(<16 x i8> %a) nounwind {
-;CHECK-LABEL: test_vextq_undef_op2:
-;CHECK: vext
+; CHECK-LABEL: test_vextq_undef_op2:
+; CHECK:       @ BB#0: @ %entry
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vext.8 q8, q8, q8, #2
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 entry:
   %tmp1 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1>
   ret <16 x i8> %tmp1
 }
 
 define <8 x i8> @test_vextd_undef_op2(<8 x i8> %a) nounwind {
-;CHECK-LABEL: test_vextd_undef_op2:
-;CHECK: vext
+; CHECK-LABEL: test_vextd_undef_op2:
+; CHECK:       @ BB#0: @ %entry
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vext.8 d16, d16, d16, #2
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 entry:
   %tmp1 = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1>
   ret <8 x i8> %tmp1
@@ -92,16 +147,26 @@ entry:
 
 
 define <16 x i8> @test_vextq_undef_op2_undef(<16 x i8> %a) nounwind {
-;CHECK-LABEL: test_vextq_undef_op2_undef:
-;CHECK: vext
+; CHECK-LABEL: test_vextq_undef_op2_undef:
+; CHECK:       @ BB#0: @ %entry
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vext.8 q8, q8, q8, #2
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 entry:
   %tmp1 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 4, i32 undef, i32 undef, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1>
   ret <16 x i8> %tmp1
 }
 
 define <8 x i8> @test_vextd_undef_op2_undef(<8 x i8> %a) nounwind {
-;CHECK-LABEL: test_vextd_undef_op2_undef:
-;CHECK: vext
+; CHECK-LABEL: test_vextd_undef_op2_undef:
+; CHECK:       @ BB#0: @ %entry
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vext.8 d16, d16, d16, #2
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 entry:
   %tmp1 = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 6, i32 7, i32 undef, i32 1>
   ret <8 x i8> %tmp1
@@ -114,10 +179,16 @@ entry:
 ; Also checks interleaving of sources is handled correctly.
 ; Essence: a vext is used on %A and something saner than stack load/store for final result.
 define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK-LABEL: test_interleaved:
-;CHECK: vext.16
-;CHECK-NOT: vext.16
-;CHECK: vzip.16
+; CHECK-LABEL: test_interleaved:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r1]
+; CHECK-NEXT:    vext.16 d16, d16, d17, #3
+; CHECK-NEXT:    vorr d17, d16, d16
+; CHECK-NEXT:    vuzp.16 d16, d17
+; CHECK-NEXT:    vzip.16 d16, d18
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
         %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp2 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 3, i32 8, i32 5, i32 9>
@@ -126,8 +197,13 @@ define <4 x i16> @test_interleaved(<8 x
 
 ; An undef in the shuffle list should still be optimizable
 define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK-LABEL: test_undef:
-;CHECK: vzip.16
+; CHECK-LABEL: test_undef:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r1]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT:    vzip.16 d19, d16
+; CHECK-NEXT:    vmov r0, r1, d19
+; CHECK-NEXT:    mov pc, lr
         %tmp1 = load <8 x i16>, <8 x i16>* %A
         %tmp2 = load <8 x i16>, <8 x i16>* %B
         %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 undef, i32 8, i32 5, i32 9>
@@ -138,11 +214,25 @@ define <4 x i16> @test_undef(<8 x i16>*
 ; Use illegal <32 x i16> type to produce such a shuffle after legalizing types.
 ; Try to look for fallback to by-element inserts.
 define <4 x i16> @test_multisource(<32 x i16>* %B) nounwind {
-;CHECK-LABEL: test_multisource:
-;CHECK: vmov.16 [[REG:d[0-9]+]][0]
-;CHECK: vmov.16 [[REG]][1]
-;CHECK: vmov.16 [[REG]][2]
-;CHECK: vmov.16 [[REG]][3]
+; CHECK-LABEL: test_multisource:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    mov r1, r0
+; CHECK-NEXT:    add r2, r0, #48
+; CHECK-NEXT:    add r0, r0, #32
+; CHECK-NEXT:    vld1.16 {d16, d17}, [r1:128]!
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r2:128]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r1:128]
+; CHECK-NEXT:    vmov.u16 r1, d16[0]
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0:128]
+; CHECK-NEXT:    vmov.16 d22[0], r1
+; CHECK-NEXT:    vmov.u16 r0, d18[0]
+; CHECK-NEXT:    vmov.u16 r1, d16[0]
+; CHECK-NEXT:    vmov.16 d22[1], r0
+; CHECK-NEXT:    vmov.u16 r0, d20[0]
+; CHECK-NEXT:    vmov.16 d22[2], r1
+; CHECK-NEXT:    vmov.16 d22[3], r0
+; CHECK-NEXT:    vmov r0, r1, d22
+; CHECK-NEXT:    mov pc, lr
         %tmp1 = load <32 x i16>, <32 x i16>* %B
         %tmp2 = shufflevector <32 x i16> %tmp1, <32 x i16> undef, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
         ret <4 x i16> %tmp2
@@ -151,11 +241,19 @@ define <4 x i16> @test_multisource(<32 x
 ; We don't handle shuffles using more than half of a 128-bit vector.
 ; Again, test for fallback to by-element inserts.
 define <4 x i16> @test_largespan(<8 x i16>* %B) nounwind {
-;CHECK-LABEL: test_largespan:
-;CHECK: vmov.16 [[REG:d[0-9]+]][0]
-;CHECK: vmov.16 [[REG]][1]
-;CHECK: vmov.16 [[REG]][2]
-;CHECK: vmov.16 [[REG]][3]
+; CHECK-LABEL: test_largespan:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vmov.u16 r1, d16[0]
+; CHECK-NEXT:    vmov.u16 r0, d16[2]
+; CHECK-NEXT:    vmov.16 d18[0], r1
+; CHECK-NEXT:    vmov.u16 r1, d17[0]
+; CHECK-NEXT:    vmov.16 d18[1], r0
+; CHECK-NEXT:    vmov.u16 r0, d17[2]
+; CHECK-NEXT:    vmov.16 d18[2], r1
+; CHECK-NEXT:    vmov.16 d18[3], r0
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
         %tmp1 = load <8 x i16>, <8 x i16>* %B
         %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
         ret <4 x i16> %tmp2
@@ -167,22 +265,26 @@ define <4 x i16> @test_largespan(<8 x i1
 ; (There are probably better ways to lower this shuffle, but it's not
 ; really important.)
 define <8 x i16> @test_illegal(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK-LABEL: test_illegal:
-;CHECK:      vmov.u16
-;CHECK-NEXT: vmov.u16
-;CHECK-NEXT: vorr
-;CHECK-NEXT: vorr
-;CHECK-NEXT: vmov.16
-;CHECK-NEXT: vuzp.16
-;CHECK-NEXT: vmov.u16
-;CHECK-NEXT: vmov.16
-;CHECK-NEXT: vuzp.16
-;CHECK-NEXT: vmov.16
-;CHECK-NEXT: vmov.u16
-;CHECK-NEXT: vext.16
-;CHECK-NEXT: vmov.16
-;CHECK-NEXT: vmov r0, r1, d
-;CHECK-NEXT: vmov r2, r3, d
+; CHECK-LABEL: test_illegal:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r1]
+; CHECK-NEXT:    vmov.u16 r1, d16[0]
+; CHECK-NEXT:    vmov.u16 r0, d17[3]
+; CHECK-NEXT:    vorr d22, d16, d16
+; CHECK-NEXT:    vorr d23, d16, d16
+; CHECK-NEXT:    vmov.16 d20[0], r1
+; CHECK-NEXT:    vuzp.16 d22, d23
+; CHECK-NEXT:    vmov.u16 r1, d17[1]
+; CHECK-NEXT:    vmov.16 d20[1], r0
+; CHECK-NEXT:    vuzp.16 d22, d18
+; CHECK-NEXT:    vmov.16 d20[2], r1
+; CHECK-NEXT:    vmov.u16 r0, d19[1]
+; CHECK-NEXT:    vext.16 d21, d16, d18, #3
+; CHECK-NEXT:    vmov.16 d20[3], r0
+; CHECK-NEXT:    vmov r0, r1, d20
+; CHECK-NEXT:    vmov r2, r3, d21
+; CHECK-NEXT:    mov pc, lr
        %tmp1 = load <8 x i16>, <8 x i16>* %A
        %tmp2 = load <8 x i16>, <8 x i16>* %B
        %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 7, i32 5, i32 13, i32 3, i32 2, i32 2, i32 9>
@@ -193,7 +295,14 @@ define <8 x i16> @test_illegal(<8 x i16>
 ; Make sure this doesn't crash
 define arm_aapcscc void @test_elem_mismatch(<2 x i64>* nocapture %src, <4 x i16>* nocapture %dest) nounwind {
 ; CHECK-LABEL: test_elem_mismatch:
-; CHECK: vstr
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0:128]
+; CHECK-NEXT:    vmov.32 r2, d16[0]
+; CHECK-NEXT:    vmov.32 r0, d17[0]
+; CHECK-NEXT:    vmov.16 d16[0], r2
+; CHECK-NEXT:    vmov.16 d16[1], r0
+; CHECK-NEXT:    vstr d16, [r1]
+; CHECK-NEXT:    mov pc, lr
   %tmp0 = load <2 x i64>, <2 x i64>* %src, align 16
   %tmp1 = bitcast <2 x i64> %tmp0 to <4 x i32>
   %tmp2 = extractelement <4 x i32> %tmp1, i32 0
@@ -207,32 +316,47 @@ define arm_aapcscc void @test_elem_misma
 }
 
 define <4 x i32> @test_reverse_and_extract(<2 x i32>* %A) {
+; CHECK-LABEL: test_reverse_and_extract:
+; CHECK:       @ BB#0: @ %entry
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vrev64.32 q9, q8
+; CHECK-NEXT:    vext.32 q8, q8, q9, #2
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 entry:
-  ; CHECK-LABEL: test_reverse_and_extract
-  ; CHECK-NOT: vtrn
-  ; CHECK: vrev
-  ; CHECK: vext
 	%tmp1 = load <2 x i32>, <2 x i32>* %A
   %0 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> <i32 undef, i32 undef, i32 1, i32 0>
   ret <4 x i32> %0
 }
 
 define <4 x i32> @test_dup_and_extract(<2 x i32>* %A) {
+; CHECK-LABEL: test_dup_and_extract:
+; CHECK:       @ BB#0: @ %entry
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vdup.32 q9, d16[0]
+; CHECK-NEXT:    vext.32 q8, q9, q8, #2
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 entry:
-  ; CHECK-LABEL: test_dup_and_extract
-  ; CHECK-NOT: vtrn
-  ; CHECK: vdup
-  ; CHECK: vext
 	%tmp1 = load <2 x i32>, <2 x i32>* %A
   %0 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
   ret <4 x i32> %0
 }
 
 define <4 x i32> @test_zip_and_extract(<2 x i32>* %A) {
+; CHECK-LABEL: test_zip_and_extract:
+; CHECK:       @ BB#0: @ %entry
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vorr q9, q8, q8
+; CHECK-NEXT:    vorr q10, q8, q8
+; CHECK-NEXT:    vzip.32 q9, q10
+; CHECK-NEXT:    vext.32 q8, q9, q8, #2
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 entry:
-  ; CHECK-LABEL: test_zip_and_extract
-  ; CHECK: vzip
-  ; CHECK: vext
 	%tmp1 = load <2 x i32>, <2 x i32>* %A
   %0 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 0, i32 1>
   ret <4 x i32> %0

Modified: llvm/trunk/test/CodeGen/ARM/vpadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vpadd.ll?rev=290196&r1=290195&r2=290196&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vpadd.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vpadd.ll Tue Dec 20 13:33:24 2016
@@ -1,8 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - -lower-interleaved-accesses=false | FileCheck %s
 
 define <8 x i8> @vpaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK-LABEL: vpaddi8:
-;CHECK: vpadd.i8
+; CHECK-LABEL: vpaddi8:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vldr d16, [r1]
+; CHECK-NEXT:    vldr d17, [r0]
+; CHECK-NEXT:    vpadd.i8 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = call <8 x i8> @llvm.arm.neon.vpadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
@@ -10,8 +16,13 @@ define <8 x i8> @vpaddi8(<8 x i8>* %A, <
 }
 
 define <4 x i16> @vpaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK-LABEL: vpaddi16:
-;CHECK: vpadd.i16
+; CHECK-LABEL: vpaddi16:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vldr d16, [r1]
+; CHECK-NEXT:    vldr d17, [r0]
+; CHECK-NEXT:    vpadd.i16 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = call <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -19,8 +30,13 @@ define <4 x i16> @vpaddi16(<4 x i16>* %A
 }
 
 define <2 x i32> @vpaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK-LABEL: vpaddi32:
-;CHECK: vpadd.i32
+; CHECK-LABEL: vpaddi32:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vldr d16, [r1]
+; CHECK-NEXT:    vldr d17, [r0]
+; CHECK-NEXT:    vpadd.i32 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = call <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -28,8 +44,13 @@ define <2 x i32> @vpaddi32(<2 x i32>* %A
 }
 
 define <2 x float> @vpaddf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK-LABEL: vpaddf32:
-;CHECK: vpadd.f32
+; CHECK-LABEL: vpaddf32:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vldr d16, [r1]
+; CHECK-NEXT:    vldr d17, [r0]
+; CHECK-NEXT:    vpadd.f32 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <2 x float>, <2 x float>* %A
 	%tmp2 = load <2 x float>, <2 x float>* %B
 	%tmp3 = call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
@@ -43,96 +64,150 @@ declare <2 x i32> @llvm.arm.neon.vpadd.v
 declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwind readnone
 
 define <4 x i16> @vpaddls8(<8 x i8>* %A) nounwind {
-;CHECK-LABEL: vpaddls8:
-;CHECK: vpaddl.s8
+; CHECK-LABEL: vpaddls8:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vpaddl.s8 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %tmp1)
 	ret <4 x i16> %tmp2
 }
 
 define <2 x i32> @vpaddls16(<4 x i16>* %A) nounwind {
-;CHECK-LABEL: vpaddls16:
-;CHECK: vpaddl.s16
+; CHECK-LABEL: vpaddls16:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vpaddl.s16 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16> %tmp1)
 	ret <2 x i32> %tmp2
 }
 
 define <1 x i64> @vpaddls32(<2 x i32>* %A) nounwind {
-;CHECK-LABEL: vpaddls32:
-;CHECK: vpaddl.s32
+; CHECK-LABEL: vpaddls32:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vpaddl.s32 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32> %tmp1)
 	ret <1 x i64> %tmp2
 }
 
 define <4 x i16> @vpaddlu8(<8 x i8>* %A) nounwind {
-;CHECK-LABEL: vpaddlu8:
-;CHECK: vpaddl.u8
+; CHECK-LABEL: vpaddlu8:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vpaddl.u8 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = call <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8> %tmp1)
 	ret <4 x i16> %tmp2
 }
 
 define <2 x i32> @vpaddlu16(<4 x i16>* %A) nounwind {
-;CHECK-LABEL: vpaddlu16:
-;CHECK: vpaddl.u16
+; CHECK-LABEL: vpaddlu16:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vpaddl.u16 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %tmp1)
 	ret <2 x i32> %tmp2
 }
 
 define <1 x i64> @vpaddlu32(<2 x i32>* %A) nounwind {
-;CHECK-LABEL: vpaddlu32:
-;CHECK: vpaddl.u32
+; CHECK-LABEL: vpaddlu32:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vpaddl.u32 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = call <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32> %tmp1)
 	ret <1 x i64> %tmp2
 }
 
 define <8 x i16> @vpaddlQs8(<16 x i8>* %A) nounwind {
-;CHECK-LABEL: vpaddlQs8:
-;CHECK: vpaddl.s8
+; CHECK-LABEL: vpaddlQs8:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vpaddl.s8 q8, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8> %tmp1)
 	ret <8 x i16> %tmp2
 }
 
 define <4 x i32> @vpaddlQs16(<8 x i16>* %A) nounwind {
-;CHECK-LABEL: vpaddlQs16:
-;CHECK: vpaddl.s16
+; CHECK-LABEL: vpaddlQs16:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vpaddl.s16 q8, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16> %tmp1)
 	ret <4 x i32> %tmp2
 }
 
 define <2 x i64> @vpaddlQs32(<4 x i32>* %A) nounwind {
-;CHECK-LABEL: vpaddlQs32:
-;CHECK: vpaddl.s32
+; CHECK-LABEL: vpaddlQs32:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vpaddl.s32 q8, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32> %tmp1)
 	ret <2 x i64> %tmp2
 }
 
 define <8 x i16> @vpaddlQu8(<16 x i8>* %A) nounwind {
-;CHECK-LABEL: vpaddlQu8:
-;CHECK: vpaddl.u8
+; CHECK-LABEL: vpaddlQu8:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vpaddl.u8 q8, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = call <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8> %tmp1)
 	ret <8 x i16> %tmp2
 }
 
 define <4 x i32> @vpaddlQu16(<8 x i16>* %A) nounwind {
-;CHECK-LABEL: vpaddlQu16:
-;CHECK: vpaddl.u16
+; CHECK-LABEL: vpaddlQu16:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vpaddl.u16 q8, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %tmp1)
 	ret <4 x i32> %tmp2
 }
 
 define <2 x i64> @vpaddlQu32(<4 x i32>* %A) nounwind {
-;CHECK-LABEL: vpaddlQu32:
-;CHECK: vpaddl.u32
+; CHECK-LABEL: vpaddlQu32:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vpaddl.u32 q8, q8
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %tmp1)
 	ret <2 x i64> %tmp2
@@ -140,7 +215,23 @@ define <2 x i64> @vpaddlQu32(<4 x i32>*
 
 ; Test AddCombine optimization that generates a vpaddl.s
 define void @addCombineToVPADDL() nounwind ssp {
-; CHECK: vpaddl.s8
+; CHECK-LABEL: addCombineToVPADDL:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    .save {r11}
+; CHECK-NEXT:    push {r11}
+; CHECK-NEXT:    .setfp r11, sp
+; CHECK-NEXT:    mov r11, sp
+; CHECK-NEXT:    .pad #44
+; CHECK-NEXT:    sub sp, sp, #44
+; CHECK-NEXT:    bic sp, sp, #15
+; CHECK-NEXT:    add r0, sp, #16
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0:128]
+; CHECK-NEXT:    vpaddl.s8 q8, q8
+; CHECK-NEXT:    vmovn.i16 d16, q8
+; CHECK-NEXT:    vstr d16, [sp, #8]
+; CHECK-NEXT:    mov sp, r11
+; CHECK-NEXT:    pop {r11}
+; CHECK-NEXT:    mov pc, lr
   %cbcr = alloca <16 x i8>, align 16
   %X = alloca <8 x i8>, align 8
   %tmp = load <16 x i8>, <16 x i8>* %cbcr
@@ -155,8 +246,12 @@ define void @addCombineToVPADDL() nounwi
 ; Legalization produces a EXTRACT_VECTOR_ELT DAG node which performs an extend from
 ; i16 to i32. In this case the input for the formed VPADDL needs to be a vector of i16s.
 define <2 x i16> @fromExtendingExtractVectorElt(<4 x i16> %in) {
-;CHECK-LABEL: fromExtendingExtractVectorElt:
-;CHECK: vpaddl.s16
+; CHECK-LABEL: fromExtendingExtractVectorElt:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    vmov d16, r0, r1
+; CHECK-NEXT:    vpaddl.s16 d16, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
   %tmp1 = shufflevector <4 x i16> %in, <4 x i16> undef, <2 x i32> <i32 0, i32 2>
   %tmp2 = shufflevector <4 x i16> %in, <4 x i16> undef, <2 x i32> <i32 1, i32 3>
   %x = add <2 x i16> %tmp2, %tmp1

Modified: llvm/trunk/test/CodeGen/ARM/vuzp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vuzp.ll?rev=290196&r1=290195&r2=290196&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vuzp.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vuzp.ll Tue Dec 20 13:33:24 2016
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
 
 define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
@@ -20,11 +21,11 @@ define <8 x i8> @vuzpi8(<8 x i8>* %A, <8
 define <16 x i8> @vuzpi8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ; CHECK-LABEL: vuzpi8_Qres:
 ; CHECK:       @ BB#0:
-; CHECK-NEXT:    vldr [[LDR1:d[0-9]+]], [r1]
-; CHECK-NEXT:    vldr [[LDR0:d[0-9]+]], [r0]
-; CHECK-NEXT:    vuzp.8 [[LDR0]], [[LDR1]]
-; CHECK-NEXT:    vmov r0, r1, [[LDR0]]
-; CHECK-NEXT:    vmov r2, r3, [[LDR1]]
+; CHECK-NEXT:    vldr d17, [r1]
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vuzp.8 d16, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
 ; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = load <8 x i8>, <8 x i8>* %B
@@ -52,11 +53,11 @@ define <4 x i16> @vuzpi16(<4 x i16>* %A,
 define <8 x i16> @vuzpi16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ; CHECK-LABEL: vuzpi16_Qres:
 ; CHECK:       @ BB#0:
-; CHECK-NEXT:    vldr [[LDR1:d[0-9]+]], [r1]
-; CHECK-NEXT:    vldr [[LDR0:d[0-9]+]], [r0]
-; CHECK-NEXT:    vuzp.16 [[LDR0]], [[LDR1]]
-; CHECK-NEXT:    vmov r0, r1, [[LDR0]]
-; CHECK-NEXT:    vmov r2, r3, [[LDR1]]
+; CHECK-NEXT:    vldr d17, [r1]
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vuzp.16 d16, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
 ; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = load <4 x i16>, <4 x i16>* %B
@@ -220,11 +221,11 @@ define <8 x i8> @vuzpi8_undef(<8 x i8>*
 define <16 x i8> @vuzpi8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ; CHECK-LABEL: vuzpi8_undef_Qres:
 ; CHECK:       @ BB#0:
-; CHECK-NEXT:    vldr [[LDR1:d[0-9]+]], [r1]
-; CHECK-NEXT:    vldr [[LDR0:d[0-9]+]], [r0]
-; CHECK-NEXT:    vuzp.8 [[LDR0]], [[LDR1]]
-; CHECK-NEXT:    vmov r0, r1, [[LDR0]]
-; CHECK-NEXT:    vmov r2, r3, [[LDR1]]
+; CHECK-NEXT:    vldr d17, [r1]
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vuzp.8 d16, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
 ; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = load <8 x i8>, <8 x i8>* %B
@@ -266,9 +267,16 @@ define <16 x i16> @vuzpQi16_undef_QQres(
 }
 
 define <8 x i16> @vuzp_lower_shufflemask_undef(<4 x i16>* %A, <4 x i16>* %B) {
+; CHECK-LABEL: vuzp_lower_shufflemask_undef:
+; CHECK:       @ BB#0: @ %entry
+; CHECK-NEXT:    vldr d17, [r1]
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vorr q9, q8, q8
+; CHECK-NEXT:    vuzp.16 q8, q9
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    mov pc, lr
 entry:
-  ; CHECK-LABEL: vuzp_lower_shufflemask_undef
-  ; CHECK: vuzp
 	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = load <4 x i16>, <4 x i16>* %B
   %0 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 3, i32 5, i32 7>
@@ -276,10 +284,17 @@ entry:
 }
 
 define <4 x i32> @vuzp_lower_shufflemask_zeroed(<2 x i32>* %A, <2 x i32>* %B) {
+; CHECK-LABEL: vuzp_lower_shufflemask_zeroed:
+; CHECK:       @ BB#0: @ %entry
+; CHECK-NEXT:    vldr d17, [r1]
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vdup.32 q9, d16[0]
+; CHECK-NEXT:    vuzp.32 q8, q9
+; CHECK-NEXT:    vext.32 q8, q9, q9, #2
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    mov pc, lr
 entry:
-  ; CHECK-LABEL: vuzp_lower_shufflemask_zeroed
-  ; CHECK-NOT: vtrn
-  ; CHECK: vuzp
   %tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = load <2 x i32>, <2 x i32>* %B
   %0 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <4 x i32> <i32 0, i32 0, i32 1, i32 3>
@@ -287,10 +302,15 @@ entry:
 }
 
 define void @vuzp_rev_shufflemask_vtrn(<2 x i32>* %A, <2 x i32>* %B, <4 x i32>* %C) {
+; CHECK-LABEL: vuzp_rev_shufflemask_vtrn:
+; CHECK:       @ BB#0: @ %entry
+; CHECK-NEXT:    vldr d17, [r1]
+; CHECK-NEXT:    vldr d16, [r0]
+; CHECK-NEXT:    vrev64.32 q9, q8
+; CHECK-NEXT:    vuzp.32 q8, q9
+; CHECK-NEXT:    vst1.64 {d18, d19}, [r2]
+; CHECK-NEXT:    mov pc, lr
 entry:
-  ; CHECK-LABEL: vuzp_rev_shufflemask_vtrn
-  ; CHECK-NOT: vtrn
-  ; CHECK: vuzp
   %tmp1 = load <2 x i32>, <2 x i32>* %A
   %tmp2 = load <2 x i32>, <2 x i32>* %B
   %0 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
@@ -302,11 +322,33 @@ define <8 x i8> @vuzp_trunc(<8 x i8> %in
 ; In order to create the select we need to truncate the vcgt result from a vector of i32 to a vector of i8.
 ; This results in a build_vector with mismatched types. We will generate two vmovn.i32 instructions to
 ; truncate from i32 to i16 and one vuzp to perform the final truncation for i8.
-; CHECK-LABEL: vuzp_trunc
-; CHECK: vmovn.i32
-; CHECK: vmovn.i32
-; CHECK: vuzp
-; CHECK: vbsl
+; CHECK-LABEL: vuzp_trunc:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    .save {r4, r5, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r11, lr}
+; CHECK-NEXT:    add r12, sp, #48
+; CHECK-NEXT:    add lr, sp, #16
+; CHECK-NEXT:    add r4, sp, #64
+; CHECK-NEXT:    add r5, sp, #32
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r5]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r4]
+; CHECK-NEXT:    vld1.64 {d20, d21}, [lr]
+; CHECK-NEXT:    vld1.64 {d22, d23}, [r12]
+; CHECK-NEXT:    vcgt.u32 q8, q9, q8
+; CHECK-NEXT:    vcgt.u32 q9, q11, q10
+; CHECK-NEXT:    vmovn.i32 d16, q8
+; CHECK-NEXT:    vmovn.i32 d17, q9
+; CHECK-NEXT:    vmov.i8 d18, #0x7
+; CHECK-NEXT:    vmov d19, r0, r1
+; CHECK-NEXT:    vuzp.8 d17, d16
+; CHECK-NEXT:    vneg.s8 d16, d18
+; CHECK-NEXT:    vshl.i8 d17, d17, #7
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    vshl.s8 d16, d17, d16
+; CHECK-NEXT:    vbsl d16, d19, d18
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    pop {r4, r5, r11, lr}
+; CHECK-NEXT:    mov pc, lr
   %c = icmp ult <8 x i32> %cmp0, %cmp1
   %res = select <8 x i1> %c, <8 x i8> %in0, <8 x i8> %in1
   ret <8 x i8> %res
@@ -316,11 +358,31 @@ define <8 x i8> @vuzp_trunc(<8 x i8> %in
 ; We need to extend the loaded <4 x i8> to <4 x i16>. Otherwise we wouldn't be able
 ; to perform the vuzp and get the vbsl mask.
 define <8 x i8> @vuzp_trunc_and_shuffle(<8 x i8> %tr0, <8 x i8> %tr1,
+; CHECK-LABEL: vuzp_trunc_and_shuffle:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    ldr r12, [sp, #40]
+; CHECK-NEXT:    add lr, sp, #24
+; CHECK-NEXT:    add r4, sp, #8
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r4]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [lr]
+; CHECK-NEXT:    vld1.32 {d20[0]}, [r12:32]
+; CHECK-NEXT:    vcgt.u32 q8, q9, q8
+; CHECK-NEXT:    vmovn.i32 d16, q8
+; CHECK-NEXT:    vmov.i8 d17, #0x7
+; CHECK-NEXT:    vneg.s8 d17, d17
+; CHECK-NEXT:    vmovl.u8 q9, d20
+; CHECK-NEXT:    vuzp.8 d16, d18
+; CHECK-NEXT:    vshl.i8 d16, d16, #7
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    vmov d19, r0, r1
+; CHECK-NEXT:    vshl.s8 d16, d16, d17
+; CHECK-NEXT:    vbsl d16, d19, d18
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    pop {r4, lr}
+; CHECK-NEXT:    mov pc, lr
                          <4 x i32> %cmp0, <4 x i32> %cmp1, <4 x i8> *%cmp2_ptr) {
-; CHECK-LABEL: vuzp_trunc_and_shuffle
-; CHECK: vmovl
-; CHECK: vuzp
-; CHECK: vbsl
   %cmp2_load = load <4 x i8>, <4 x i8> * %cmp2_ptr, align 4
   %cmp2 = trunc <4 x i8> %cmp2_load to <4 x i1>
   %c0 = icmp ult <4 x i32> %cmp0, %cmp1
@@ -332,10 +394,28 @@ define <8 x i8> @vuzp_trunc_and_shuffle(
 ; Use an undef value for the <4 x i8> that is being shuffled with the compare result.
 ; This produces a build_vector with some of the operands undefs.
 define <8 x i8> @vuzp_trunc_and_shuffle_undef_right(<8 x i8> %tr0, <8 x i8> %tr1,
+; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_right:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    add r12, sp, #24
+; CHECK-NEXT:    add lr, sp, #8
+; CHECK-NEXT:    vld1.64 {d16, d17}, [lr]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vcgt.u32 q8, q9, q8
+; CHECK-NEXT:    vmov d19, r0, r1
+; CHECK-NEXT:    vmovn.i32 d16, q8
+; CHECK-NEXT:    vmov.i8 d17, #0x7
+; CHECK-NEXT:    vuzp.8 d16, d18
+; CHECK-NEXT:    vneg.s8 d17, d17
+; CHECK-NEXT:    vshl.i8 d16, d16, #7
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    vshl.s8 d16, d16, d17
+; CHECK-NEXT:    vbsl d16, d19, d18
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    pop {r11, lr}
+; CHECK-NEXT:    mov pc, lr
                          <4 x i32> %cmp0, <4 x i32> %cmp1, <4 x i8> *%cmp2_ptr) {
-; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_right
-; CHECK: vuzp
-; CHECK: vbsl
   %cmp2_load = load <4 x i8>, <4 x i8> * %cmp2_ptr, align 4
   %cmp2 = trunc <4 x i8> %cmp2_load to <4 x i1>
   %c0 = icmp ult <4 x i32> %cmp0, %cmp1
@@ -345,10 +425,40 @@ define <8 x i8> @vuzp_trunc_and_shuffle_
 }
 
 define <8 x i8> @vuzp_trunc_and_shuffle_undef_left(<8 x i8> %tr0, <8 x i8> %tr1,
+; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_left:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    add r12, sp, #24
+; CHECK-NEXT:    add lr, sp, #8
+; CHECK-NEXT:    vldr d20, .LCPI22_0
+; CHECK-NEXT:    vld1.64 {d16, d17}, [lr]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT:    vcgt.u32 q8, q9, q8
+; CHECK-NEXT:    vmov d18, r2, r3
+; CHECK-NEXT:    vmov d19, r0, r1
+; CHECK-NEXT:    vmovn.i32 d16, q8
+; CHECK-NEXT:    vmov.i8 d17, #0x7
+; CHECK-NEXT:    vtbl.8 d16, {d16}, d20
+; CHECK-NEXT:    vneg.s8 d17, d17
+; CHECK-NEXT:    vshl.i8 d16, d16, #7
+; CHECK-NEXT:    vshl.s8 d16, d16, d17
+; CHECK-NEXT:    vbsl d16, d19, d18
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    pop {r11, lr}
+; CHECK-NEXT:    mov pc, lr
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ BB#1:
+; CHECK-NEXT:  .LCPI22_0:
+; CHECK-NEXT:    .byte 255 @ 0xff
+; CHECK-NEXT:    .byte 255 @ 0xff
+; CHECK-NEXT:    .byte 255 @ 0xff
+; CHECK-NEXT:    .byte 255 @ 0xff
+; CHECK-NEXT:    .byte 0 @ 0x0
+; CHECK-NEXT:    .byte 2 @ 0x2
+; CHECK-NEXT:    .byte 4 @ 0x4
+; CHECK-NEXT:    .byte 6 @ 0x6
                          <4 x i32> %cmp0, <4 x i32> %cmp1, <4 x i8> *%cmp2_ptr) {
-; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_left
-; CHECK: vuzp
-; CHECK: vbsl
   %cmp2_load = load <4 x i8>, <4 x i8> * %cmp2_ptr, align 4
   %cmp2 = trunc <4 x i8> %cmp2_load to <4 x i1>
   %c0 = icmp ult <4 x i32> %cmp0, %cmp1
@@ -360,9 +470,79 @@ define <8 x i8> @vuzp_trunc_and_shuffle_
 ; We're using large data types here, and we have to fill with undef values until we
 ; get some vector size that we can represent.
 define <10 x i8> @vuzp_wide_type(<10 x i8> %tr0, <10 x i8> %tr1,
+; CHECK-LABEL: vuzp_wide_type:
+; CHECK:       @ BB#0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    .setfp r11, sp, #16
+; CHECK-NEXT:    add r11, sp, #16
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, sp, #8
+; CHECK-NEXT:    bic sp, sp, #15
+; CHECK-NEXT:    add r5, r11, #52
+; CHECK-NEXT:    add r7, r11, #32
+; CHECK-NEXT:    add r4, r11, #44
+; CHECK-NEXT:    add r6, r11, #24
+; CHECK-NEXT:    add r12, r11, #60
+; CHECK-NEXT:    add lr, r11, #40
+; CHECK-NEXT:    vld1.32 {d17[0]}, [r7:32]
+; CHECK-NEXT:    vld1.32 {d19[0]}, [r5:32]
+; CHECK-NEXT:    vld1.32 {d22[0]}, [r12:32]
+; CHECK-NEXT:    ldr r12, [r11, #64]
+; CHECK-NEXT:    vld1.32 {d20[0]}, [lr:32]
+; CHECK-NEXT:    add r7, r11, #48
+; CHECK-NEXT:    add r5, r11, #28
+; CHECK-NEXT:    vld1.32 {d16[0]}, [r6:32]
+; CHECK-NEXT:    vld1.32 {d18[0]}, [r4:32]
+; CHECK-NEXT:    add r6, r11, #56
+; CHECK-NEXT:    add r4, r11, #36
+; CHECK-NEXT:    vcgt.u32 q10, q11, q10
+; CHECK-NEXT:    vld1.32 {d19[1]}, [r6:32]
+; CHECK-NEXT:    vld1.32 {d17[1]}, [r4:32]
+; CHECK-NEXT:    add r6, r12, #4
+; CHECK-NEXT:    vld1.32 {d18[1]}, [r7:32]
+; CHECK-NEXT:    vld1.32 {d16[1]}, [r5:32]
+; CHECK-NEXT:    ldr r7, [r12]
+; CHECK-NEXT:    vcgt.u32 q8, q9, q8
+; CHECK-NEXT:    vmovn.i32 d18, q10
+; CHECK-NEXT:    vmov.32 d21[0], r7
+; CHECK-NEXT:    vmovn.i32 d16, q8
+; CHECK-NEXT:    vmov.u8 r7, d21[3]
+; CHECK-NEXT:    vmov.i8 d17, #0x7
+; CHECK-NEXT:    vuzp.8 d16, d18
+; CHECK-NEXT:    vmov.8 d23[0], r7
+; CHECK-NEXT:    vneg.s8 d17, d17
+; CHECK-NEXT:    add r7, r11, #8
+; CHECK-NEXT:    vldr d18, .LCPI23_0
+; CHECK-NEXT:    vld1.8 {d23[1]}, [r6]
+; CHECK-NEXT:    vshl.i8 d16, d16, #7
+; CHECK-NEXT:    vshl.s8 d20, d16, d17
+; CHECK-NEXT:    vmov.i8 q8, #0x7
+; CHECK-NEXT:    vneg.s8 q8, q8
+; CHECK-NEXT:    vtbl.8 d22, {d20, d21}, d18
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r7]
+; CHECK-NEXT:    vshl.i8 q10, q11, #7
+; CHECK-NEXT:    vmov d23, r2, r3
+; CHECK-NEXT:    vmov d22, r0, r1
+; CHECK-NEXT:    vshl.s8 q8, q10, q8
+; CHECK-NEXT:    vbsl q8, q11, q9
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    sub sp, r11, #16
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov pc, lr
+; CHECK-NEXT:    .p2align 3
+; CHECK-NEXT:  @ BB#1:
+; CHECK-NEXT:  .LCPI23_0:
+; CHECK-NEXT:    .byte 0 @ 0x0
+; CHECK-NEXT:    .byte 1 @ 0x1
+; CHECK-NEXT:    .byte 2 @ 0x2
+; CHECK-NEXT:    .byte 3 @ 0x3
+; CHECK-NEXT:    .byte 4 @ 0x4
+; CHECK-NEXT:    .byte 8 @ 0x8
+; CHECK-NEXT:    .byte 9 @ 0x9
+; CHECK-NEXT:    .byte 10 @ 0xa
                             <5 x i32> %cmp0, <5 x i32> %cmp1, <5 x i8> *%cmp2_ptr) {
-; CHECK-LABEL: vuzp_wide_type
-; CHECK: vbsl
   %cmp2_load = load <5 x i8>, <5 x i8> * %cmp2_ptr, align 4
   %cmp2 = trunc <5 x i8> %cmp2_load to <5 x i1>
   %c0 = icmp ult <5 x i32> %cmp0, %cmp1




More information about the llvm-commits mailing list