[llvm] 806b47a - [ARM] Regenerate a couple of tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 20 02:55:40 PDT 2021


Author: David Green
Date: 2021-04-20T10:54:41+01:00
New Revision: 806b47ade3f6be6ea1235536db0f7147ac6d6eec

URL: https://github.com/llvm/llvm-project/commit/806b47ade3f6be6ea1235536db0f7147ac6d6eec
DIFF: https://github.com/llvm/llvm-project/commit/806b47ade3f6be6ea1235536db0f7147ac6d6eec.diff

LOG: [ARM] Regenerate a couple of tests. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/ARM/big-endian-vector-callee.ll
    llvm/test/CodeGen/ARM/combine-vmovdrr.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/ARM/big-endian-vector-callee.ll b/llvm/test/CodeGen/ARM/big-endian-vector-callee.ll
index 331ef9423736e..73632abcaa7cd 100644
--- a/llvm/test/CodeGen/ARM/big-endian-vector-callee.ll
+++ b/llvm/test/CodeGen/ARM/big-endian-vector-callee.ll
@@ -1,1172 +1,2443 @@
-; RUN: llc -mtriple armeb-eabi -mattr v7,neon -float-abi soft %s -o - | FileCheck %s -check-prefix CHECK -check-prefix SOFT
-; RUN: llc -mtriple armeb-eabi -mattr v7,neon -float-abi hard %s -o - | FileCheck %s -check-prefix CHECK -check-prefix HARD
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple armeb-eabi -mattr v7,neon -float-abi soft %s -o - | FileCheck %s -check-prefix SOFT
+; RUN: llc -mtriple armeb-eabi -mattr v7,neon -float-abi hard %s -o - | FileCheck %s -check-prefix HARD
 
-; CHECK-LABEL: test_i64_f64:
 define i64 @test_i64_f64(double %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vadd.f64 [[REG]]
-; HARD: vadd.f64 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_i64_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.f64 d16, d16, d16
+; SOFT-NEXT:    vmov r0, r2, d16
+; SOFT-NEXT:    adds r1, r0, r0
+; SOFT-NEXT:    adc r0, r2, r2
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_i64_f64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.f64 d16, d0, d0
+; HARD-NEXT:    vmov r0, r2, d16
+; HARD-NEXT:    adds r1, r0, r0
+; HARD-NEXT:    adc r0, r2, r2
+; HARD-NEXT:    bx lr
     %1 = fadd double %p, %p
     %2 = bitcast double %1 to i64
     %3 = add i64 %2, %2
     ret i64 %3
-; CHECK: adds r1
-; CHECK: adc r0
 }
 
-; CHECK-LABEL: test_i64_v1i64:
 define i64 @test_i64_v1i64(<1 x i64> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vadd.i64 [[REG]]
-; HARD: vadd.i64 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_i64_v1i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.i64 d16, d16, d16
+; SOFT-NEXT:    vmov r0, r2, d16
+; SOFT-NEXT:    adds r1, r0, r0
+; SOFT-NEXT:    adc r0, r2, r2
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_i64_v1i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.i64 d16, d0, d0
+; HARD-NEXT:    vmov r0, r2, d16
+; HARD-NEXT:    adds r1, r0, r0
+; HARD-NEXT:    adc r0, r2, r2
+; HARD-NEXT:    bx lr
     %1 = add <1 x i64> %p, %p
     %2 = bitcast <1 x i64> %1 to i64
     %3 = add i64 %2, %2
     ret i64 %3
-; CHECK: adds r1
-; CHECK: adc r0
 }
 
-; CHECK-LABEL: test_i64_v2f32:
 define i64 @test_i64_v2f32(<2 x float> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.32 [[REG]]
-; HARD: vrev64.32 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_i64_v2f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.f32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vmov r0, r2, d16
+; SOFT-NEXT:    adds r1, r0, r0
+; SOFT-NEXT:    adc r0, r2, r2
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_i64_v2f32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 d16, d0
+; HARD-NEXT:    vadd.f32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d16, d16
+; HARD-NEXT:    vmov r0, r2, d16
+; HARD-NEXT:    adds r1, r0, r0
+; HARD-NEXT:    adc r0, r2, r2
+; HARD-NEXT:    bx lr
     %1 = fadd <2 x float> %p, %p
     %2 = bitcast <2 x float> %1 to i64
     %3 = add i64 %2, %2
     ret i64 %3
-; CHECK: adds r1
-; CHECK: adc r0
 }
 
-; CHECK-LABEL: test_i64_v2i32:
 define i64 @test_i64_v2i32(<2 x i32> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.32 [[REG]]
-; HARD: vrev64.32 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_i64_v2i32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.i32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vmov r0, r2, d16
+; SOFT-NEXT:    adds r1, r0, r0
+; SOFT-NEXT:    adc r0, r2, r2
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_i64_v2i32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 d16, d0
+; HARD-NEXT:    vadd.i32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d16, d16
+; HARD-NEXT:    vmov r0, r2, d16
+; HARD-NEXT:    adds r1, r0, r0
+; HARD-NEXT:    adc r0, r2, r2
+; HARD-NEXT:    bx lr
     %1 = add <2 x i32> %p, %p
     %2 = bitcast <2 x i32> %1 to i64
     %3 = add i64 %2, %2
     ret i64 %3
-; CHECK: adds r1
-; CHECK: adc r0
 }
 
-; CHECK-LABEL: test_i64_v4i16:
 define i64 @test_i64_v4i16(<4 x i16> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.16 [[REG]]
-; HARD: vrev64.16 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_i64_v4i16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vadd.i16 d16, d16, d16
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vmov r0, r2, d16
+; SOFT-NEXT:    adds r1, r0, r0
+; SOFT-NEXT:    adc r0, r2, r2
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_i64_v4i16:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.16 d16, d0
+; HARD-NEXT:    vadd.i16 d16, d16, d16
+; HARD-NEXT:    vrev64.16 d16, d16
+; HARD-NEXT:    vmov r0, r2, d16
+; HARD-NEXT:    adds r1, r0, r0
+; HARD-NEXT:    adc r0, r2, r2
+; HARD-NEXT:    bx lr
     %1 = add <4 x i16> %p, %p
     %2 = bitcast <4 x i16> %1 to i64
     %3 = add i64 %2, %2
     ret i64 %3
-; CHECK: adds r1
-; CHECK: adc r0
 }
 
-; CHECK-LABEL: test_i64_v8i8:
 define i64 @test_i64_v8i8(<8 x i8> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.8 [[REG]]
-; HARD: vrev64.8 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_i64_v8i8:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vadd.i8 d16, d16, d16
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vmov r0, r2, d16
+; SOFT-NEXT:    adds r1, r0, r0
+; SOFT-NEXT:    adc r0, r2, r2
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_i64_v8i8:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.8 d16, d0
+; HARD-NEXT:    vadd.i8 d16, d16, d16
+; HARD-NEXT:    vrev64.8 d16, d16
+; HARD-NEXT:    vmov r0, r2, d16
+; HARD-NEXT:    adds r1, r0, r0
+; HARD-NEXT:    adc r0, r2, r2
+; HARD-NEXT:    bx lr
     %1 = add <8 x i8> %p, %p
     %2 = bitcast <8 x i8> %1 to i64
     %3 = add i64 %2, %2
     ret i64 %3
-; CHECK: adds r1
-; CHECK: adc r0
 }
 
-; CHECK-LABEL: test_f64_i64:
 define double @test_f64_i64(i64 %p) {
-; CHECK: adds r1
-; CHECK: adc r0
+; SOFT-LABEL: test_f64_i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    adds r1, r1, r1
+; SOFT-NEXT:    adc r0, r0, r0
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.f64 d16, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_f64_i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    adds r1, r1, r1
+; HARD-NEXT:    adc r0, r0, r0
+; HARD-NEXT:    vmov d16, r1, r0
+; HARD-NEXT:    vadd.f64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = add i64 %p, %p
     %2 = bitcast i64 %1 to double
     %3 = fadd double %2, %2
     ret double %3
-; SOFT: vadd.f64 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vadd.f64 d0
 }
 
-; CHECK-LABEL: test_f64_v1i64:
 define double @test_f64_v1i64(<1 x i64> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vadd.i64 [[REG]]
-; HARD: vadd.i64 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_f64_v1i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.i64 d16, d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_f64_v1i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.i64 d16, d0, d0
+; HARD-NEXT:    vadd.f64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = add <1 x i64> %p, %p
     %2 = bitcast <1 x i64> %1 to double
     %3 = fadd double %2, %2
     ret double %3
-; SOFT: vadd.f64 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vadd.f64 d0
 }
 
-; CHECK-LABEL: test_f64_v2f32:
 define double @test_f64_v2f32(<2 x float> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.32 [[REG]]
-; HARD: vrev64.32 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_f64_v2f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.f32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_f64_v2f32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 d16, d0
+; HARD-NEXT:    vadd.f32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d16, d16
+; HARD-NEXT:    vadd.f64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = fadd <2 x float> %p, %p
     %2 = bitcast <2 x float> %1 to double
     %3 = fadd double %2, %2
     ret double %3
-; SOFT: vadd.f64 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vadd.f64 d0
 }
 
-; CHECK-LABEL: test_f64_v2i32:
 define double @test_f64_v2i32(<2 x i32> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.32 [[REG]]
-; HARD: vrev64.32 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_f64_v2i32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.i32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_f64_v2i32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 d16, d0
+; HARD-NEXT:    vadd.i32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d16, d16
+; HARD-NEXT:    vadd.f64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = add <2 x i32> %p, %p
     %2 = bitcast <2 x i32> %1 to double
     %3 = fadd double %2, %2
     ret double %3
-; SOFT: vadd.f64 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vadd.f64 d0
 }
 
-; CHECK-LABEL: test_f64_v4i16:
 define double @test_f64_v4i16(<4 x i16> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.16 [[REG]]
-; HARD: vrev64.16 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_f64_v4i16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vadd.i16 d16, d16, d16
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_f64_v4i16:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.16 d16, d0
+; HARD-NEXT:    vadd.i16 d16, d16, d16
+; HARD-NEXT:    vrev64.16 d16, d16
+; HARD-NEXT:    vadd.f64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = add <4 x i16> %p, %p
     %2 = bitcast <4 x i16> %1 to double
     %3 = fadd double %2, %2
     ret double %3
-; SOFT: vadd.f64 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vadd.f64 d0
 }
 
-; CHECK-LABEL: test_f64_v8i8:
 define double @test_f64_v8i8(<8 x i8> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.8 [[REG]]
-; HARD: vrev64.8 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_f64_v8i8:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vadd.i8 d16, d16, d16
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_f64_v8i8:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.8 d16, d0
+; HARD-NEXT:    vadd.i8 d16, d16, d16
+; HARD-NEXT:    vrev64.8 d16, d16
+; HARD-NEXT:    vadd.f64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = add <8 x i8> %p, %p
     %2 = bitcast <8 x i8> %1 to double
     %3 = fadd double %2, %2
     ret double %3
-; SOFT: vadd.f64 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vadd.f64 d0
 }
 
-; CHECK-LABEL: test_v1i64_i64:
 define <1 x i64> @test_v1i64_i64(i64 %p) {
-; CHECK: adds r1
-; CHECK: adc r0
+; SOFT-LABEL: test_v1i64_i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    adds r1, r1, r1
+; SOFT-NEXT:    adc r0, r0, r0
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.i64 d16, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v1i64_i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    adds r1, r1, r1
+; HARD-NEXT:    adc r0, r0, r0
+; HARD-NEXT:    vmov d16, r1, r0
+; HARD-NEXT:    vadd.i64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = add i64 %p, %p
     %2 = bitcast i64 %1 to <1 x i64>
     %3 = add <1 x i64> %2, %2
     ret <1 x i64> %3
-; SOFT: vadd.i64 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vadd.i64 d0
 }
 
-; CHECK-LABEL: test_v1i64_f64:
 define <1 x i64> @test_v1i64_f64(double %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vadd.f64 [[REG]]
-; HARD: vadd.f64 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v1i64_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.f64 d16, d16, d16
+; SOFT-NEXT:    vadd.i64 d16, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v1i64_f64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.f64 d16, d0, d0
+; HARD-NEXT:    vadd.i64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = fadd double %p, %p
     %2 = bitcast double %1 to <1 x i64>
     %3 = add <1 x i64> %2, %2
     ret <1 x i64> %3
-; SOFT: vadd.i64 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vadd.i64 d0
 }
 
-; CHECK-LABEL: test_v1i64_v2f32:
 define <1 x i64> @test_v1i64_v2f32(<2 x float> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.32 [[REG]]
-; HARD: vrev64.32 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v1i64_v2f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.f32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.i64 d16, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v1i64_v2f32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 d16, d0
+; HARD-NEXT:    vadd.f32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d16, d16
+; HARD-NEXT:    vadd.i64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = fadd <2 x float> %p, %p
     %2 = bitcast <2 x float> %1 to <1 x i64>
     %3 = add <1 x i64> %2, %2
     ret <1 x i64> %3
-; SOFT: vadd.i64 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vadd.i64 d0
 }
 
-; CHECK-LABEL: test_v1i64_v2i32:
 define <1 x i64> @test_v1i64_v2i32(<2 x i32> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.32 [[REG]]
-; HARD: vrev64.32 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v1i64_v2i32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.i32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.i64 d16, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v1i64_v2i32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 d16, d0
+; HARD-NEXT:    vadd.i32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d16, d16
+; HARD-NEXT:    vadd.i64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = add <2 x i32> %p, %p
     %2 = bitcast <2 x i32> %1 to <1 x i64>
     %3 = add <1 x i64> %2, %2
     ret <1 x i64> %3
-; SOFT: vadd.i64 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vadd.i64 d0
 }
 
-; CHECK-LABEL: test_v1i64_v4i16:
 define <1 x i64> @test_v1i64_v4i16(<4 x i16> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.16 [[REG]]
-; HARD: vrev64.16 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v1i64_v4i16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vadd.i16 d16, d16, d16
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vadd.i64 d16, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v1i64_v4i16:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.16 d16, d0
+; HARD-NEXT:    vadd.i16 d16, d16, d16
+; HARD-NEXT:    vrev64.16 d16, d16
+; HARD-NEXT:    vadd.i64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = add <4 x i16> %p, %p
     %2 = bitcast <4 x i16> %1 to <1 x i64>
     %3 = add <1 x i64> %2, %2
     ret <1 x i64> %3
-; SOFT: vadd.i64 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vadd.i64 d0
 }
 
-; CHECK-LABEL: test_v1i64_v8i8:
 define <1 x i64> @test_v1i64_v8i8(<8 x i8> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.8 [[REG]]
-; HARD: vrev64.8 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v1i64_v8i8:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vadd.i8 d16, d16, d16
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vadd.i64 d16, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v1i64_v8i8:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.8 d16, d0
+; HARD-NEXT:    vadd.i8 d16, d16, d16
+; HARD-NEXT:    vrev64.8 d16, d16
+; HARD-NEXT:    vadd.i64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = add <8 x i8> %p, %p
     %2 = bitcast <8 x i8> %1 to <1 x i64>
     %3 = add <1 x i64> %2, %2
     ret <1 x i64> %3
-; SOFT: vadd.i64 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vadd.i64 d0
 }
 
-; CHECK-LABEL: test_v2f32_i64:
 define <2 x float> @test_v2f32_i64(i64 %p) {
-; CHECK: adds r1
-; CHECK: adc r0
+; SOFT-LABEL: test_v2f32_i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    adds r1, r1, r1
+; SOFT-NEXT:    adc r0, r0, r0
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.f32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2f32_i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    adds r1, r1, r1
+; HARD-NEXT:    adc r0, r0, r0
+; HARD-NEXT:    vmov d16, r1, r0
+; HARD-NEXT:    vrev64.32 d16, d16
+; HARD-NEXT:    vadd.f32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add i64 %p, %p
     %2 = bitcast i64 %1 to <2 x float>
     %3 = fadd <2 x float> %2, %2
     ret <2 x float> %3
-; SOFT: vrev64.32 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.32 d0
 }
 
-; CHECK-LABEL: test_v2f32_f64:
 define <2 x float> @test_v2f32_f64(double %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vadd.f64 [[REG]]
-; HARD: vadd.f64 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v2f32_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.f64 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.f32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2f32_f64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.f64 d16, d0, d0
+; HARD-NEXT:    vrev64.32 d16, d16
+; HARD-NEXT:    vadd.f32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d0, d16
+; HARD-NEXT:    bx lr
     %1 = fadd double %p, %p
     %2 = bitcast double %1 to <2 x float>
     %3 = fadd <2 x float> %2, %2
     ret <2 x float> %3
-; SOFT: vrev64.32 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.32 d0
 }
 
-; CHECK-LABEL: test_v2f32_v1i64:
 define <2 x float> @test_v2f32_v1i64(<1 x i64> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vadd.i64 [[REG]]
-; HARD: vadd.i64 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v2f32_v1i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.i64 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.f32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2f32_v1i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.i64 d16, d0, d0
+; HARD-NEXT:    vrev64.32 d16, d16
+; HARD-NEXT:    vadd.f32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add <1 x i64> %p, %p
     %2 = bitcast <1 x i64> %1 to <2 x float>
     %3 = fadd <2 x float> %2, %2
     ret <2 x float> %3
-; SOFT: vrev64.32 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.32 d0
 }
 
-; CHECK-LABEL: test_v2f32_v2i32:
 define <2 x float> @test_v2f32_v2i32(<2 x i32> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.32 [[REG]]
-; HARD: vrev64.32 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v2f32_v2i32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.i32 d16, d16, d16
+; SOFT-NEXT:    vadd.f32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2f32_v2i32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 d16, d0
+; HARD-NEXT:    vadd.i32 d16, d16, d16
+; HARD-NEXT:    vadd.f32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add <2 x i32> %p, %p
     %2 = bitcast <2 x i32> %1 to <2 x float>
     %3 = fadd <2 x float> %2, %2
     ret <2 x float> %3
-; SOFT: vrev64.32 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.32 d0
 }
 
-; CHECK-LABEL: test_v2f32_v4i16:
 define <2 x float> @test_v2f32_v4i16(<4 x i16> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.16 [[REG]]
-; HARD: vrev64.16 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v2f32_v4i16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vadd.i16 d16, d16, d16
+; SOFT-NEXT:    vrev32.16 d16, d16
+; SOFT-NEXT:    vadd.f32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2f32_v4i16:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.16 d16, d0
+; HARD-NEXT:    vadd.i16 d16, d16, d16
+; HARD-NEXT:    vrev32.16 d16, d16
+; HARD-NEXT:    vadd.f32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add <4 x i16> %p, %p
     %2 = bitcast <4 x i16> %1 to <2 x float>
     %3 = fadd <2 x float> %2, %2
     ret <2 x float> %3
-; SOFT: vrev64.32 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.32 d0
 }
 
-; CHECK-LABEL: test_v2f32_v8i8:
 define <2 x float> @test_v2f32_v8i8(<8 x i8> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.8 [[REG]]
-; HARD: vrev64.8 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v2f32_v8i8:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vadd.i8 d16, d16, d16
+; SOFT-NEXT:    vrev32.8 d16, d16
+; SOFT-NEXT:    vadd.f32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2f32_v8i8:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.8 d16, d0
+; HARD-NEXT:    vadd.i8 d16, d16, d16
+; HARD-NEXT:    vrev32.8 d16, d16
+; HARD-NEXT:    vadd.f32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add <8 x i8> %p, %p
     %2 = bitcast <8 x i8> %1 to <2 x float>
     %3 = fadd <2 x float> %2, %2
     ret <2 x float> %3
-; SOFT: vrev64.32 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.32 d0
 }
 
-; CHECK-LABEL: test_v2i32_i64:
 define <2 x i32> @test_v2i32_i64(i64 %p) {
-; CHECK: adds r1
-; CHECK: adc r0
+; SOFT-LABEL: test_v2i32_i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    adds r1, r1, r1
+; SOFT-NEXT:    adc r0, r0, r0
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.i32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2i32_i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    adds r1, r1, r1
+; HARD-NEXT:    adc r0, r0, r0
+; HARD-NEXT:    vmov d16, r1, r0
+; HARD-NEXT:    vrev64.32 d16, d16
+; HARD-NEXT:    vadd.i32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add i64 %p, %p
     %2 = bitcast i64 %1 to <2 x i32>
     %3 = add <2 x i32> %2, %2
     ret <2 x i32> %3
-; SOFT: vrev64.32 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.32 d0
 }
 
-; CHECK-LABEL: test_v2i32_f64:
 define <2 x i32> @test_v2i32_f64(double %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vadd.f64 [[REG]]
-; HARD: vadd.f64 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v2i32_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.f64 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.i32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2i32_f64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.f64 d16, d0, d0
+; HARD-NEXT:    vrev64.32 d16, d16
+; HARD-NEXT:    vadd.i32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d0, d16
+; HARD-NEXT:    bx lr
     %1 = fadd double %p, %p
     %2 = bitcast double %1 to <2 x i32>
     %3 = add <2 x i32> %2, %2
     ret <2 x i32> %3
-; SOFT: vrev64.32 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.32 d0
 }
 
-; CHECK-LABEL: test_v2i32_v1i64:
 define <2 x i32> @test_v2i32_v1i64(<1 x i64> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vadd.i64 [[REG]]
-; HARD: vadd.i64 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v2i32_v1i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.i64 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.i32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2i32_v1i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.i64 d16, d0, d0
+; HARD-NEXT:    vrev64.32 d16, d16
+; HARD-NEXT:    vadd.i32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add <1 x i64> %p, %p
     %2 = bitcast <1 x i64> %1 to <2 x i32>
     %3 = add <2 x i32> %2, %2
     ret <2 x i32> %3
-; SOFT: vrev64.32 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.32 d0
 }
 
-; CHECK-LABEL: test_v2i32_v2f32:
 define <2 x i32> @test_v2i32_v2f32(<2 x float> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.32 [[REG]]
-; HARD: vrev64.32 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v2i32_v2f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.f32 d16, d16, d16
+; SOFT-NEXT:    vadd.i32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2i32_v2f32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 d16, d0
+; HARD-NEXT:    vadd.f32 d16, d16, d16
+; HARD-NEXT:    vadd.i32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d0, d16
+; HARD-NEXT:    bx lr
     %1 = fadd <2 x float> %p, %p
     %2 = bitcast <2 x float> %1 to <2 x i32>
     %3 = add <2 x i32> %2, %2
     ret <2 x i32> %3
-; SOFT: vrev64.32 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.32 d0
 }
 
-; CHECK-LABEL: test_v2i32_v4i16:
 define <2 x i32> @test_v2i32_v4i16(<4 x i16> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.16 [[REG]]
-; HARD: vrev64.16 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v2i32_v4i16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vadd.i16 d16, d16, d16
+; SOFT-NEXT:    vrev32.16 d16, d16
+; SOFT-NEXT:    vadd.i32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2i32_v4i16:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.16 d16, d0
+; HARD-NEXT:    vadd.i16 d16, d16, d16
+; HARD-NEXT:    vrev32.16 d16, d16
+; HARD-NEXT:    vadd.i32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add <4 x i16> %p, %p
     %2 = bitcast <4 x i16> %1 to <2 x i32>
     %3 = add <2 x i32> %2, %2
     ret <2 x i32> %3
-; SOFT: vrev64.32 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.32 d0
 }
 
-; CHECK-LABEL: test_v2i32_v8i8:
 define <2 x i32> @test_v2i32_v8i8(<8 x i8> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.8 [[REG]]
-; HARD: vrev64.8 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v2i32_v8i8:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vadd.i8 d16, d16, d16
+; SOFT-NEXT:    vrev32.8 d16, d16
+; SOFT-NEXT:    vadd.i32 d16, d16, d16
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2i32_v8i8:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.8 d16, d0
+; HARD-NEXT:    vadd.i8 d16, d16, d16
+; HARD-NEXT:    vrev32.8 d16, d16
+; HARD-NEXT:    vadd.i32 d16, d16, d16
+; HARD-NEXT:    vrev64.32 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add <8 x i8> %p, %p
     %2 = bitcast <8 x i8> %1 to <2 x i32>
     %3 = add <2 x i32> %2, %2
     ret <2 x i32> %3
-; SOFT: vrev64.32 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.32 d0
 }
 
-; CHECK-LABEL: test_v4i16_i64:
 define <4 x i16> @test_v4i16_i64(i64 %p) {
-; CHECK: adds r1
-; CHECK: adc r0
+; SOFT-LABEL: test_v4i16_i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    adds r1, r1, r1
+; SOFT-NEXT:    adc r0, r0, r0
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vadd.i16 d16, d16, d16
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4i16_i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    adds r1, r1, r1
+; HARD-NEXT:    adc r0, r0, r0
+; HARD-NEXT:    vmov d16, r1, r0
+; HARD-NEXT:    vrev64.16 d16, d16
+; HARD-NEXT:    vadd.i16 d16, d16, d16
+; HARD-NEXT:    vrev64.16 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add i64 %p, %p
     %2 = bitcast i64 %1 to <4 x i16>
     %3 = add <4 x i16> %2, %2
     ret <4 x i16> %3
-; SOFT: vrev64.16 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.16 d0
 }
 
-; CHECK-LABEL: test_v4i16_f64:
 define <4 x i16> @test_v4i16_f64(double %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vadd.f64 [[REG]]
-; HARD: vadd.f64 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v4i16_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.f64 d16, d16, d16
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vadd.i16 d16, d16, d16
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4i16_f64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.f64 d16, d0, d0
+; HARD-NEXT:    vrev64.16 d16, d16
+; HARD-NEXT:    vadd.i16 d16, d16, d16
+; HARD-NEXT:    vrev64.16 d0, d16
+; HARD-NEXT:    bx lr
     %1 = fadd double %p, %p
     %2 = bitcast double %1 to <4 x i16>
     %3 = add <4 x i16> %2, %2
     ret <4 x i16> %3
-; SOFT: vrev64.16 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.16 d0
 }
 
-; CHECK-LABEL: test_v4i16_v1i64:
 define <4 x i16> @test_v4i16_v1i64(<1 x i64> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vadd.i64 [[REG]]
-; HARD: vadd.i64 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v4i16_v1i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.i64 d16, d16, d16
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vadd.i16 d16, d16, d16
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4i16_v1i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.i64 d16, d0, d0
+; HARD-NEXT:    vrev64.16 d16, d16
+; HARD-NEXT:    vadd.i16 d16, d16, d16
+; HARD-NEXT:    vrev64.16 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add <1 x i64> %p, %p
     %2 = bitcast <1 x i64> %1 to <4 x i16>
     %3 = add <4 x i16> %2, %2
     ret <4 x i16> %3
-; SOFT: vrev64.16 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.16 d0
 }
 
-; CHECK-LABEL: test_v4i16_v2f32:
 define <4 x i16> @test_v4i16_v2f32(<2 x float> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.32 [[REG]]
-; HARD: vrev64.32 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v4i16_v2f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.f32 d16, d16, d16
+; SOFT-NEXT:    vrev32.16 d16, d16
+; SOFT-NEXT:    vadd.i16 d16, d16, d16
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4i16_v2f32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 d16, d0
+; HARD-NEXT:    vadd.f32 d16, d16, d16
+; HARD-NEXT:    vrev32.16 d16, d16
+; HARD-NEXT:    vadd.i16 d16, d16, d16
+; HARD-NEXT:    vrev64.16 d0, d16
+; HARD-NEXT:    bx lr
     %1 = fadd <2 x float> %p, %p
     %2 = bitcast <2 x float> %1 to <4 x i16>
     %3 = add <4 x i16> %2, %2
     ret <4 x i16> %3
-; SOFT: vrev64.16 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.16 d0
 }
 
-; CHECK-LABEL: test_v4i16_v2i32:
 define <4 x i16> @test_v4i16_v2i32(<2 x i32> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.32 [[REG]]
-; HARD: vrev64.32 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v4i16_v2i32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.i32 d16, d16, d16
+; SOFT-NEXT:    vrev32.16 d16, d16
+; SOFT-NEXT:    vadd.i16 d16, d16, d16
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4i16_v2i32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 d16, d0
+; HARD-NEXT:    vadd.i32 d16, d16, d16
+; HARD-NEXT:    vrev32.16 d16, d16
+; HARD-NEXT:    vadd.i16 d16, d16, d16
+; HARD-NEXT:    vrev64.16 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add <2 x i32> %p, %p
     %2 = bitcast <2 x i32> %1 to <4 x i16>
     %3 = add <4 x i16> %2, %2
     ret <4 x i16> %3
-; SOFT: vrev64.16 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.16 d0
 }
 
-; CHECK-LABEL: test_v4i16_v8i8:
 define <4 x i16> @test_v4i16_v8i8(<8 x i8> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.8 [[REG]]
-; HARD: vrev64.8 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v4i16_v8i8:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vadd.i8 d16, d16, d16
+; SOFT-NEXT:    vrev16.8 d16, d16
+; SOFT-NEXT:    vadd.i16 d16, d16, d16
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4i16_v8i8:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.8 d16, d0
+; HARD-NEXT:    vadd.i8 d16, d16, d16
+; HARD-NEXT:    vrev16.8 d16, d16
+; HARD-NEXT:    vadd.i16 d16, d16, d16
+; HARD-NEXT:    vrev64.16 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add <8 x i8> %p, %p
     %2 = bitcast <8 x i8> %1 to <4 x i16>
     %3 = add <4 x i16> %2, %2
     ret <4 x i16> %3
-; SOFT: vrev64.16 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.16 d0
 }
 
-; CHECK-LABEL: test_v8i8_i64:
 define <8 x i8> @test_v8i8_i64(i64 %p) {
-; CHECK: adds r1
-; CHECK: adc r0
+; SOFT-LABEL: test_v8i8_i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    adds r1, r1, r1
+; SOFT-NEXT:    adc r0, r0, r0
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vadd.i8 d16, d16, d16
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v8i8_i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    adds r1, r1, r1
+; HARD-NEXT:    adc r0, r0, r0
+; HARD-NEXT:    vmov d16, r1, r0
+; HARD-NEXT:    vrev64.8 d16, d16
+; HARD-NEXT:    vadd.i8 d16, d16, d16
+; HARD-NEXT:    vrev64.8 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add i64 %p, %p
     %2 = bitcast i64 %1 to <8 x i8>
     %3 = add <8 x i8> %2, %2
     ret <8 x i8> %3
-; SOFT: vrev64.8 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.8 d0
 }
 
-; CHECK-LABEL: test_v8i8_f64:
 define <8 x i8> @test_v8i8_f64(double %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vadd.f64 [[REG]]
-; HARD: vadd.f64 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v8i8_f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.f64 d16, d16, d16
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vadd.i8 d16, d16, d16
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v8i8_f64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.f64 d16, d0, d0
+; HARD-NEXT:    vrev64.8 d16, d16
+; HARD-NEXT:    vadd.i8 d16, d16, d16
+; HARD-NEXT:    vrev64.8 d0, d16
+; HARD-NEXT:    bx lr
     %1 = fadd double %p, %p
     %2 = bitcast double %1 to <8 x i8>
     %3 = add <8 x i8> %2, %2
     ret <8 x i8> %3
-; SOFT: vrev64.8 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.8 d0
 }
 
-; CHECK-LABEL: test_v8i8_v1i64:
 define <8 x i8> @test_v8i8_v1i64(<1 x i64> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vadd.i64 [[REG]]
-; HARD: vadd.i64 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v8i8_v1i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.i64 d16, d16, d16
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vadd.i8 d16, d16, d16
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v8i8_v1i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.i64 d16, d0, d0
+; HARD-NEXT:    vrev64.8 d16, d16
+; HARD-NEXT:    vadd.i8 d16, d16, d16
+; HARD-NEXT:    vrev64.8 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add <1 x i64> %p, %p
     %2 = bitcast <1 x i64> %1 to <8 x i8>
     %3 = add <8 x i8> %2, %2
     ret <8 x i8> %3
-; SOFT: vrev64.8 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.8 d0
 }
 
-; CHECK-LABEL: test_v8i8_v2f32:
 define <8 x i8> @test_v8i8_v2f32(<2 x float> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.32 [[REG]]
-; HARD: vrev64.32 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v8i8_v2f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.f32 d16, d16, d16
+; SOFT-NEXT:    vrev32.8 d16, d16
+; SOFT-NEXT:    vadd.i8 d16, d16, d16
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v8i8_v2f32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 d16, d0
+; HARD-NEXT:    vadd.f32 d16, d16, d16
+; HARD-NEXT:    vrev32.8 d16, d16
+; HARD-NEXT:    vadd.i8 d16, d16, d16
+; HARD-NEXT:    vrev64.8 d0, d16
+; HARD-NEXT:    bx lr
     %1 = fadd <2 x float> %p, %p
     %2 = bitcast <2 x float> %1 to <8 x i8>
     %3 = add <8 x i8> %2, %2
     ret <8 x i8> %3
-; SOFT: vrev64.8 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.8 d0
 }
 
-; CHECK-LABEL: test_v8i8_v2i32:
 define <8 x i8> @test_v8i8_v2i32(<2 x i32> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.32 [[REG]]
-; HARD: vrev64.32 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v8i8_v2i32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 d16, d16
+; SOFT-NEXT:    vadd.i32 d16, d16, d16
+; SOFT-NEXT:    vrev32.8 d16, d16
+; SOFT-NEXT:    vadd.i8 d16, d16, d16
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v8i8_v2i32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 d16, d0
+; HARD-NEXT:    vadd.i32 d16, d16, d16
+; HARD-NEXT:    vrev32.8 d16, d16
+; HARD-NEXT:    vadd.i8 d16, d16, d16
+; HARD-NEXT:    vrev64.8 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add <2 x i32> %p, %p
     %2 = bitcast <2 x i32> %1 to <8 x i8>
     %3 = add <8 x i8> %2, %2
     ret <8 x i8> %3
-; SOFT: vrev64.8 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.8 d0
 }
 
-; CHECK-LABEL: test_v8i8_v4i16:
 define <8 x i8> @test_v8i8_v4i16(<4 x i16> %p) {
-; SOFT: vmov [[REG:d[0-9]+]], r1, r0
-; SOFT: vrev64.16 [[REG]]
-; HARD: vrev64.16 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v8i8_v4i16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.16 d16, d16
+; SOFT-NEXT:    vadd.i16 d16, d16, d16
+; SOFT-NEXT:    vrev16.8 d16, d16
+; SOFT-NEXT:    vadd.i8 d16, d16, d16
+; SOFT-NEXT:    vrev64.8 d16, d16
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v8i8_v4i16:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.16 d16, d0
+; HARD-NEXT:    vadd.i16 d16, d16, d16
+; HARD-NEXT:    vrev16.8 d16, d16
+; HARD-NEXT:    vadd.i8 d16, d16, d16
+; HARD-NEXT:    vrev64.8 d0, d16
+; HARD-NEXT:    bx lr
     %1 = add <4 x i16> %p, %p
     %2 = bitcast <4 x i16> %1 to <8 x i8>
     %3 = add <8 x i8> %2, %2
     ret <8 x i8> %3
-; SOFT: vrev64.8 [[REG:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG]]
-; HARD: vrev64.8 d0
 }
 
-; CHECK-LABEL: test_f128_v2f64:
 define fp128 @test_f128_v2f64(<2 x double> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; SOFT: vadd.f64 d{{[0-9]+}}, [[REG1]]
-; SOFT: vadd.f64 d{{[0-9]+}}, [[REG2]]
-; HARD: vadd.f64 d{{[0-9]+}}, d1
-; HARD: vadd.f64 d{{[0-9]+}}, d0
+; SOFT-LABEL: test_f128_v2f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r11, lr}
+; SOFT-NEXT:    push {r11, lr}
+; SOFT-NEXT:    .pad #16
+; SOFT-NEXT:    sub sp, sp, #16
+; SOFT-NEXT:    vmov d16, r3, r2
+; SOFT-NEXT:    add r12, sp, #12
+; SOFT-NEXT:    vmov d17, r1, r0
+; SOFT-NEXT:    vadd.f64 d19, d16, d16
+; SOFT-NEXT:    vadd.f64 d18, d17, d17
+; SOFT-NEXT:    vrev64.32 q8, q9
+; SOFT-NEXT:    vmov.32 r0, d16[0]
+; SOFT-NEXT:    vst1.32 {d17[1]}, [r12:32]
+; SOFT-NEXT:    add r12, sp, #8
+; SOFT-NEXT:    vst1.32 {d16[0]}, [sp:32]
+; SOFT-NEXT:    vst1.32 {d17[0]}, [r12:32]
+; SOFT-NEXT:    add r12, sp, #4
+; SOFT-NEXT:    vst1.32 {d16[1]}, [r12:32]
+; SOFT-NEXT:    vmov.32 r1, d16[1]
+; SOFT-NEXT:    vmov.32 r2, d17[0]
+; SOFT-NEXT:    vmov.32 r3, d17[1]
+; SOFT-NEXT:    bl __addtf3
+; SOFT-NEXT:    add sp, sp, #16
+; SOFT-NEXT:    pop {r11, pc}
+;
+; HARD-LABEL: test_f128_v2f64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    .save {r11, lr}
+; HARD-NEXT:    push {r11, lr}
+; HARD-NEXT:    .pad #16
+; HARD-NEXT:    sub sp, sp, #16
+; HARD-NEXT:    vadd.f64 d17, d1, d1
+; HARD-NEXT:    add r12, sp, #12
+; HARD-NEXT:    vadd.f64 d16, d0, d0
+; HARD-NEXT:    vrev64.32 q8, q8
+; HARD-NEXT:    vmov.32 r0, d16[0]
+; HARD-NEXT:    vst1.32 {d17[1]}, [r12:32]
+; HARD-NEXT:    add r12, sp, #8
+; HARD-NEXT:    vst1.32 {d16[0]}, [sp:32]
+; HARD-NEXT:    vst1.32 {d17[0]}, [r12:32]
+; HARD-NEXT:    add r12, sp, #4
+; HARD-NEXT:    vst1.32 {d16[1]}, [r12:32]
+; HARD-NEXT:    vmov.32 r1, d16[1]
+; HARD-NEXT:    vmov.32 r2, d17[0]
+; HARD-NEXT:    vmov.32 r3, d17[1]
+; HARD-NEXT:    bl __addtf3
+; HARD-NEXT:    add sp, sp, #16
+; HARD-NEXT:    pop {r11, pc}
     %1 = fadd <2 x double> %p, %p
     %2 = bitcast <2 x double> %1 to fp128
     %3 = fadd fp128 %2, %2
     ret fp128 %3
-; CHECK: vst1.32 {d{{[0-9]+}}[1]}, [{{[a-z0-9]+}}:32]
-; CHECK: vst1.32 {d{{[0-9]+}}[0]}, [{{[a-z0-9]+}}:32]
 }
 
-; CHECK-LABEL: test_f128_v2i64:
 define fp128 @test_f128_v2i64(<2 x i64> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vadd.i64 q{{[0-9]+}}, q0
+; SOFT-LABEL: test_f128_v2i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r11, lr}
+; SOFT-NEXT:    push {r11, lr}
+; SOFT-NEXT:    .pad #16
+; SOFT-NEXT:    sub sp, sp, #16
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    add r12, sp, #12
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.i64 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vmov.32 r0, d16[0]
+; SOFT-NEXT:    vst1.32 {d17[1]}, [r12:32]
+; SOFT-NEXT:    add r12, sp, #8
+; SOFT-NEXT:    vst1.32 {d16[0]}, [sp:32]
+; SOFT-NEXT:    vst1.32 {d17[0]}, [r12:32]
+; SOFT-NEXT:    add r12, sp, #4
+; SOFT-NEXT:    vst1.32 {d16[1]}, [r12:32]
+; SOFT-NEXT:    vmov.32 r1, d16[1]
+; SOFT-NEXT:    vmov.32 r2, d17[0]
+; SOFT-NEXT:    vmov.32 r3, d17[1]
+; SOFT-NEXT:    bl __addtf3
+; SOFT-NEXT:    add sp, sp, #16
+; SOFT-NEXT:    pop {r11, pc}
+;
+; HARD-LABEL: test_f128_v2i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    .save {r11, lr}
+; HARD-NEXT:    push {r11, lr}
+; HARD-NEXT:    .pad #16
+; HARD-NEXT:    sub sp, sp, #16
+; HARD-NEXT:    vadd.i64 q8, q0, q0
+; HARD-NEXT:    add r12, sp, #12
+; HARD-NEXT:    vrev64.32 q8, q8
+; HARD-NEXT:    vmov.32 r0, d16[0]
+; HARD-NEXT:    vst1.32 {d17[1]}, [r12:32]
+; HARD-NEXT:    add r12, sp, #8
+; HARD-NEXT:    vst1.32 {d16[0]}, [sp:32]
+; HARD-NEXT:    vst1.32 {d17[0]}, [r12:32]
+; HARD-NEXT:    add r12, sp, #4
+; HARD-NEXT:    vst1.32 {d16[1]}, [r12:32]
+; HARD-NEXT:    vmov.32 r1, d16[1]
+; HARD-NEXT:    vmov.32 r2, d17[0]
+; HARD-NEXT:    vmov.32 r3, d17[1]
+; HARD-NEXT:    bl __addtf3
+; HARD-NEXT:    add sp, sp, #16
+; HARD-NEXT:    pop {r11, pc}
     %1 = add <2 x i64> %p, %p
     %2 = bitcast <2 x i64> %1 to fp128
     %3 = fadd fp128 %2, %2
     ret fp128 %3
-; CHECK: vst1.32 {d{{[0-9]+}}[1]}, [{{[a-z0-9]+}}:32]
-; CHECK: vst1.32 {d{{[0-9]+}}[0]}, [{{[a-z0-9]+}}:32]
 }
 
-; CHECK-LABEL: test_f128_v4f32:
 define fp128 @test_f128_v4f32(<4 x float> %p) {
-; HARD: vrev64.32 q{{[0-9]+}}, q0
+; SOFT-LABEL: test_f128_v4f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r11, lr}
+; SOFT-NEXT:    push {r11, lr}
+; SOFT-NEXT:    .pad #16
+; SOFT-NEXT:    sub sp, sp, #16
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    add r12, sp, #12
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.f32 q8, q8, q8
+; SOFT-NEXT:    vmov.32 r0, d16[0]
+; SOFT-NEXT:    vst1.32 {d17[1]}, [r12:32]
+; SOFT-NEXT:    add r12, sp, #8
+; SOFT-NEXT:    vst1.32 {d16[0]}, [sp:32]
+; SOFT-NEXT:    vst1.32 {d17[0]}, [r12:32]
+; SOFT-NEXT:    add r12, sp, #4
+; SOFT-NEXT:    vst1.32 {d16[1]}, [r12:32]
+; SOFT-NEXT:    vmov.32 r1, d16[1]
+; SOFT-NEXT:    vmov.32 r2, d17[0]
+; SOFT-NEXT:    vmov.32 r3, d17[1]
+; SOFT-NEXT:    bl __addtf3
+; SOFT-NEXT:    add sp, sp, #16
+; SOFT-NEXT:    pop {r11, pc}
+;
+; HARD-LABEL: test_f128_v4f32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    .save {r11, lr}
+; HARD-NEXT:    push {r11, lr}
+; HARD-NEXT:    .pad #16
+; HARD-NEXT:    sub sp, sp, #16
+; HARD-NEXT:    vrev64.32 q8, q0
+; HARD-NEXT:    add r12, sp, #12
+; HARD-NEXT:    vadd.f32 q8, q8, q8
+; HARD-NEXT:    vmov.32 r0, d16[0]
+; HARD-NEXT:    vst1.32 {d17[1]}, [r12:32]
+; HARD-NEXT:    add r12, sp, #8
+; HARD-NEXT:    vst1.32 {d16[0]}, [sp:32]
+; HARD-NEXT:    vst1.32 {d17[0]}, [r12:32]
+; HARD-NEXT:    add r12, sp, #4
+; HARD-NEXT:    vst1.32 {d16[1]}, [r12:32]
+; HARD-NEXT:    vmov.32 r1, d16[1]
+; HARD-NEXT:    vmov.32 r2, d17[0]
+; HARD-NEXT:    vmov.32 r3, d17[1]
+; HARD-NEXT:    bl __addtf3
+; HARD-NEXT:    add sp, sp, #16
+; HARD-NEXT:    pop {r11, pc}
     %1 = fadd <4 x float> %p, %p
     %2 = bitcast <4 x float> %1 to fp128
     %3 = fadd fp128 %2, %2
     ret fp128 %3
-; CHECK: vst1.32 {d{{[0-9]+}}[1]}, [{{[a-z0-9]+}}:32]
-; CHECK: vst1.32 {d{{[0-9]+}}[0]}, [{{[a-z0-9]+}}:32]
 }
 
-; CHECK-LABEL: test_f128_v4i32:
 define fp128 @test_f128_v4i32(<4 x i32> %p) {
-; HARD: vrev64.32 q{{[0-9]+}}, q0
+; SOFT-LABEL: test_f128_v4i32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r11, lr}
+; SOFT-NEXT:    push {r11, lr}
+; SOFT-NEXT:    .pad #16
+; SOFT-NEXT:    sub sp, sp, #16
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    add r12, sp, #12
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.i32 q8, q8, q8
+; SOFT-NEXT:    vmov.32 r0, d16[0]
+; SOFT-NEXT:    vst1.32 {d17[1]}, [r12:32]
+; SOFT-NEXT:    add r12, sp, #8
+; SOFT-NEXT:    vst1.32 {d16[0]}, [sp:32]
+; SOFT-NEXT:    vst1.32 {d17[0]}, [r12:32]
+; SOFT-NEXT:    add r12, sp, #4
+; SOFT-NEXT:    vst1.32 {d16[1]}, [r12:32]
+; SOFT-NEXT:    vmov.32 r1, d16[1]
+; SOFT-NEXT:    vmov.32 r2, d17[0]
+; SOFT-NEXT:    vmov.32 r3, d17[1]
+; SOFT-NEXT:    bl __addtf3
+; SOFT-NEXT:    add sp, sp, #16
+; SOFT-NEXT:    pop {r11, pc}
+;
+; HARD-LABEL: test_f128_v4i32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    .save {r11, lr}
+; HARD-NEXT:    push {r11, lr}
+; HARD-NEXT:    .pad #16
+; HARD-NEXT:    sub sp, sp, #16
+; HARD-NEXT:    vrev64.32 q8, q0
+; HARD-NEXT:    add r12, sp, #12
+; HARD-NEXT:    vadd.i32 q8, q8, q8
+; HARD-NEXT:    vmov.32 r0, d16[0]
+; HARD-NEXT:    vst1.32 {d17[1]}, [r12:32]
+; HARD-NEXT:    add r12, sp, #8
+; HARD-NEXT:    vst1.32 {d16[0]}, [sp:32]
+; HARD-NEXT:    vst1.32 {d17[0]}, [r12:32]
+; HARD-NEXT:    add r12, sp, #4
+; HARD-NEXT:    vst1.32 {d16[1]}, [r12:32]
+; HARD-NEXT:    vmov.32 r1, d16[1]
+; HARD-NEXT:    vmov.32 r2, d17[0]
+; HARD-NEXT:    vmov.32 r3, d17[1]
+; HARD-NEXT:    bl __addtf3
+; HARD-NEXT:    add sp, sp, #16
+; HARD-NEXT:    pop {r11, pc}
     %1 = add <4 x i32> %p, %p
     %2 = bitcast <4 x i32> %1 to fp128
     %3 = fadd fp128 %2, %2
     ret fp128 %3
-; CHECK: vst1.32 {d{{[0-9]+}}[1]}, [{{[a-z0-9]+}}:32]
-; CHECK: vst1.32 {d{{[0-9]+}}[0]}, [{{[a-z0-9]+}}:32]
 }
 
-; CHECK-LABEL: test_f128_v8i16:
 define fp128 @test_f128_v8i16(<8 x i16> %p) {
-; HARD: vrev64.16 q{{[0-9]+}}, q0
+; SOFT-LABEL: test_f128_v8i16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r11, lr}
+; SOFT-NEXT:    push {r11, lr}
+; SOFT-NEXT:    .pad #16
+; SOFT-NEXT:    sub sp, sp, #16
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    add r12, sp, #12
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vadd.i16 q8, q8, q8
+; SOFT-NEXT:    vrev32.16 q8, q8
+; SOFT-NEXT:    vmov.32 r0, d16[0]
+; SOFT-NEXT:    vst1.32 {d17[1]}, [r12:32]
+; SOFT-NEXT:    add r12, sp, #8
+; SOFT-NEXT:    vst1.32 {d16[0]}, [sp:32]
+; SOFT-NEXT:    vst1.32 {d17[0]}, [r12:32]
+; SOFT-NEXT:    add r12, sp, #4
+; SOFT-NEXT:    vst1.32 {d16[1]}, [r12:32]
+; SOFT-NEXT:    vmov.32 r1, d16[1]
+; SOFT-NEXT:    vmov.32 r2, d17[0]
+; SOFT-NEXT:    vmov.32 r3, d17[1]
+; SOFT-NEXT:    bl __addtf3
+; SOFT-NEXT:    add sp, sp, #16
+; SOFT-NEXT:    pop {r11, pc}
+;
+; HARD-LABEL: test_f128_v8i16:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    .save {r11, lr}
+; HARD-NEXT:    push {r11, lr}
+; HARD-NEXT:    .pad #16
+; HARD-NEXT:    sub sp, sp, #16
+; HARD-NEXT:    vrev64.16 q8, q0
+; HARD-NEXT:    add r12, sp, #12
+; HARD-NEXT:    vadd.i16 q8, q8, q8
+; HARD-NEXT:    vrev32.16 q8, q8
+; HARD-NEXT:    vmov.32 r0, d16[0]
+; HARD-NEXT:    vst1.32 {d17[1]}, [r12:32]
+; HARD-NEXT:    add r12, sp, #8
+; HARD-NEXT:    vst1.32 {d16[0]}, [sp:32]
+; HARD-NEXT:    vst1.32 {d17[0]}, [r12:32]
+; HARD-NEXT:    add r12, sp, #4
+; HARD-NEXT:    vst1.32 {d16[1]}, [r12:32]
+; HARD-NEXT:    vmov.32 r1, d16[1]
+; HARD-NEXT:    vmov.32 r2, d17[0]
+; HARD-NEXT:    vmov.32 r3, d17[1]
+; HARD-NEXT:    bl __addtf3
+; HARD-NEXT:    add sp, sp, #16
+; HARD-NEXT:    pop {r11, pc}
     %1 = add <8 x i16> %p, %p
     %2 = bitcast <8 x i16> %1 to fp128
     %3 = fadd fp128 %2, %2
     ret fp128 %3
-; CHECK: vst1.32 {d{{[0-9]+}}[1]}, [{{[a-z0-9]+}}:32]
-; CHECK: vst1.32 {d{{[0-9]+}}[0]}, [{{[a-z0-9]+}}:32]
 }
 
-; CHECK-LABEL: test_f128_v16i8:
 define fp128 @test_f128_v16i8(<16 x i8> %p) {
-; HARD: vrev64.8 q{{[0-9]+}}, q0
+; SOFT-LABEL: test_f128_v16i8:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r11, lr}
+; SOFT-NEXT:    push {r11, lr}
+; SOFT-NEXT:    .pad #16
+; SOFT-NEXT:    sub sp, sp, #16
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    add r12, sp, #12
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vadd.i8 q8, q8, q8
+; SOFT-NEXT:    vrev32.8 q8, q8
+; SOFT-NEXT:    vmov.32 r0, d16[0]
+; SOFT-NEXT:    vst1.32 {d17[1]}, [r12:32]
+; SOFT-NEXT:    add r12, sp, #8
+; SOFT-NEXT:    vst1.32 {d16[0]}, [sp:32]
+; SOFT-NEXT:    vst1.32 {d17[0]}, [r12:32]
+; SOFT-NEXT:    add r12, sp, #4
+; SOFT-NEXT:    vst1.32 {d16[1]}, [r12:32]
+; SOFT-NEXT:    vmov.32 r1, d16[1]
+; SOFT-NEXT:    vmov.32 r2, d17[0]
+; SOFT-NEXT:    vmov.32 r3, d17[1]
+; SOFT-NEXT:    bl __addtf3
+; SOFT-NEXT:    add sp, sp, #16
+; SOFT-NEXT:    pop {r11, pc}
+;
+; HARD-LABEL: test_f128_v16i8:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    .save {r11, lr}
+; HARD-NEXT:    push {r11, lr}
+; HARD-NEXT:    .pad #16
+; HARD-NEXT:    sub sp, sp, #16
+; HARD-NEXT:    vrev64.8 q8, q0
+; HARD-NEXT:    add r12, sp, #12
+; HARD-NEXT:    vadd.i8 q8, q8, q8
+; HARD-NEXT:    vrev32.8 q8, q8
+; HARD-NEXT:    vmov.32 r0, d16[0]
+; HARD-NEXT:    vst1.32 {d17[1]}, [r12:32]
+; HARD-NEXT:    add r12, sp, #8
+; HARD-NEXT:    vst1.32 {d16[0]}, [sp:32]
+; HARD-NEXT:    vst1.32 {d17[0]}, [r12:32]
+; HARD-NEXT:    add r12, sp, #4
+; HARD-NEXT:    vst1.32 {d16[1]}, [r12:32]
+; HARD-NEXT:    vmov.32 r1, d16[1]
+; HARD-NEXT:    vmov.32 r2, d17[0]
+; HARD-NEXT:    vmov.32 r3, d17[1]
+; HARD-NEXT:    bl __addtf3
+; HARD-NEXT:    add sp, sp, #16
+; HARD-NEXT:    pop {r11, pc}
     %1 = add <16 x i8> %p, %p
     %2 = bitcast <16 x i8> %1 to fp128
     %3 = fadd fp128 %2, %2
     ret fp128 %3
-; CHECK: vst1.32 {d{{[0-9]+}}[1]}, [{{[a-z0-9]+}}:32]
-; CHECK: vst1.32 {d{{[0-9]+}}[0]}, [{{[a-z0-9]+}}:32]
 }
 
-; CHECK-LABEL: test_v2f64_f128:
 define <2 x double> @test_v2f64_f128(fp128 %p) {
-; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r2
-; CHECK: vmov.32 [[REG1:d[0-9]+]][0], r0
-; CHECK: vmov.32 [[REG2]][1], r3
-; CHECK: vmov.32 [[REG1]][1], r1
+; SOFT-LABEL: test_v2f64_f128:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r11, lr}
+; SOFT-NEXT:    push {r11, lr}
+; SOFT-NEXT:    .pad #16
+; SOFT-NEXT:    sub sp, sp, #16
+; SOFT-NEXT:    stm sp, {r0, r1, r2, r3}
+; SOFT-NEXT:    bl __addtf3
+; SOFT-NEXT:    vmov.32 d17[0], r2
+; SOFT-NEXT:    vmov.32 d16[0], r0
+; SOFT-NEXT:    vmov.32 d17[1], r3
+; SOFT-NEXT:    vmov.32 d16[1], r1
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.f64 d19, d17, d17
+; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d18
+; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    add sp, sp, #16
+; SOFT-NEXT:    pop {r11, pc}
+;
+; HARD-LABEL: test_v2f64_f128:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    .save {r11, lr}
+; HARD-NEXT:    push {r11, lr}
+; HARD-NEXT:    .pad #16
+; HARD-NEXT:    sub sp, sp, #16
+; HARD-NEXT:    stm sp, {r0, r1, r2, r3}
+; HARD-NEXT:    bl __addtf3
+; HARD-NEXT:    vmov.32 d17[0], r2
+; HARD-NEXT:    vmov.32 d16[0], r0
+; HARD-NEXT:    vmov.32 d17[1], r3
+; HARD-NEXT:    vmov.32 d16[1], r1
+; HARD-NEXT:    vrev64.32 q8, q8
+; HARD-NEXT:    vadd.f64 d1, d17, d17
+; HARD-NEXT:    vadd.f64 d0, d16, d16
+; HARD-NEXT:    add sp, sp, #16
+; HARD-NEXT:    pop {r11, pc}
     %1 = fadd fp128 %p, %p
     %2 = bitcast fp128 %1 to <2 x double>
     %3 = fadd <2 x double> %2, %2
     ret <2 x double> %3
-; SOFT: vadd.f64 [[REG1:d[0-9]+]]
-; SOFT: vadd.f64 [[REG2:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG2]]
-; SOFT: vmov r3, r2, [[REG1]]
-; HARD: vadd.f64 d1
-; HARD: vadd.f64 d0
 }
 
-; CHECK-LABEL: test_v2f64_v2i64:
 define <2 x double> @test_v2f64_v2i64(<2 x i64> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vadd.i64 q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v2f64_v2i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.i64 q8, q8, q8
+; SOFT-NEXT:    vadd.f64 d19, d17, d17
+; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d18
+; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2f64_v2i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.i64 q8, q0, q0
+; HARD-NEXT:    vadd.f64 d1, d17, d17
+; HARD-NEXT:    vadd.f64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = add <2 x i64> %p, %p
     %2 = bitcast <2 x i64> %1 to <2 x double>
     %3 = fadd <2 x double> %2, %2
     ret <2 x double> %3
-; SOFT: vadd.f64 [[REG1:d[0-9]+]]
-; SOFT: vadd.f64 [[REG2:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG2]]
-; SOFT: vmov r3, r2, [[REG1]]
-; HARD: vadd.f64 d1
-; HARD: vadd.f64 d0
 }
 
-; CHECK-LABEL: test_v2f64_v4f32:
 define <2 x double> @test_v2f64_v4f32(<4 x float> %p) {
-; HARD: vrev64.32  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v2f64_v4f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.f32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.f64 d19, d17, d17
+; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d18
+; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2f64_v4f32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 q8, q0
+; HARD-NEXT:    vadd.f32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q8, q8
+; HARD-NEXT:    vadd.f64 d1, d17, d17
+; HARD-NEXT:    vadd.f64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = fadd <4 x float> %p, %p
     %2 = bitcast <4 x float> %1 to <2 x double>
     %3 = fadd <2 x double> %2, %2
     ret <2 x double> %3
-; SOFT: vadd.f64 [[REG1:d[0-9]+]]
-; SOFT: vadd.f64 [[REG2:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG2]]
-; SOFT: vmov r3, r2, [[REG1]]
-; HARD: vadd.f64 d1
-; HARD: vadd.f64 d0
 }
 
-; CHECK-LABEL: test_v2f64_v4i32:
 define <2 x double> @test_v2f64_v4i32(<4 x i32> %p) {
-; HARD: vrev64.32  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v2f64_v4i32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.i32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.f64 d19, d17, d17
+; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d18
+; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2f64_v4i32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 q8, q0
+; HARD-NEXT:    vadd.i32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q8, q8
+; HARD-NEXT:    vadd.f64 d1, d17, d17
+; HARD-NEXT:    vadd.f64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = add <4 x i32> %p, %p
     %2 = bitcast <4 x i32> %1 to <2 x double>
     %3 = fadd <2 x double> %2, %2
     ret <2 x double> %3
-; SOFT: vadd.f64 [[REG1:d[0-9]+]]
-; SOFT: vadd.f64 [[REG2:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG2]]
-; SOFT: vmov r3, r2, [[REG1]]
-; HARD: vadd.f64 d1
-; HARD: vadd.f64 d0
 }
 
-; CHECK-LABEL: test_v2f64_v8i16:
 define <2 x double> @test_v2f64_v8i16(<8 x i16> %p) {
-; HARD: vrev64.16  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v2f64_v8i16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vadd.i16 q8, q8, q8
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vadd.f64 d19, d17, d17
+; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d18
+; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2f64_v8i16:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.16 q8, q0
+; HARD-NEXT:    vadd.i16 q8, q8, q8
+; HARD-NEXT:    vrev64.16 q8, q8
+; HARD-NEXT:    vadd.f64 d1, d17, d17
+; HARD-NEXT:    vadd.f64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = add <8 x i16> %p, %p
     %2 = bitcast <8 x i16> %1 to <2 x double>
     %3 = fadd <2 x double> %2, %2
     ret <2 x double> %3
-; SOFT: vadd.f64 [[REG1:d[0-9]+]]
-; SOFT: vadd.f64 [[REG2:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG2]]
-; SOFT: vmov r3, r2, [[REG1]]
-; HARD: vadd.f64 d1
-; HARD: vadd.f64 d0
 }
 
-; CHECK-LABEL: test_v2f64_v16i8:
 define <2 x double> @test_v2f64_v16i8(<16 x i8> %p) {
-; HARD: vrev64.8  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v2f64_v16i8:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vadd.i8 q8, q8, q8
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vadd.f64 d19, d17, d17
+; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vmov r1, r0, d18
+; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2f64_v16i8:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.8 q8, q0
+; HARD-NEXT:    vadd.i8 q8, q8, q8
+; HARD-NEXT:    vrev64.8 q8, q8
+; HARD-NEXT:    vadd.f64 d1, d17, d17
+; HARD-NEXT:    vadd.f64 d0, d16, d16
+; HARD-NEXT:    bx lr
     %1 = add <16 x i8> %p, %p
     %2 = bitcast <16 x i8> %1 to <2 x double>
     %3 = fadd <2 x double> %2, %2
     ret <2 x double> %3
-; SOFT: vadd.f64 [[REG1:d[0-9]+]]
-; SOFT: vadd.f64 [[REG2:d[0-9]+]]
-; SOFT: vmov r1, r0, [[REG2]]
-; SOFT: vmov r3, r2, [[REG1]]
-; HARD: vadd.f64 d1
-; HARD: vadd.f64 d0
 }
 
-; CHECK-LABEL: test_v2i64_f128:
 define <2 x i64> @test_v2i64_f128(fp128 %p) {
-; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r2
-; CHECK: vmov.32 [[REG1:d[0-9]+]][0], r0
-; CHECK: vmov.32 [[REG2]][1], r3
-; CHECK: vmov.32 [[REG1]][1], r1
+; SOFT-LABEL: test_v2i64_f128:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r11, lr}
+; SOFT-NEXT:    push {r11, lr}
+; SOFT-NEXT:    .pad #16
+; SOFT-NEXT:    sub sp, sp, #16
+; SOFT-NEXT:    stm sp, {r0, r1, r2, r3}
+; SOFT-NEXT:    bl __addtf3
+; SOFT-NEXT:    vmov.32 d17[0], r2
+; SOFT-NEXT:    vmov.32 d16[0], r0
+; SOFT-NEXT:    vmov.32 d17[1], r3
+; SOFT-NEXT:    vmov.32 d16[1], r1
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.i64 q8, q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    add sp, sp, #16
+; SOFT-NEXT:    pop {r11, pc}
+;
+; HARD-LABEL: test_v2i64_f128:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    .save {r11, lr}
+; HARD-NEXT:    push {r11, lr}
+; HARD-NEXT:    .pad #16
+; HARD-NEXT:    sub sp, sp, #16
+; HARD-NEXT:    stm sp, {r0, r1, r2, r3}
+; HARD-NEXT:    bl __addtf3
+; HARD-NEXT:    vmov.32 d17[0], r2
+; HARD-NEXT:    vmov.32 d16[0], r0
+; HARD-NEXT:    vmov.32 d17[1], r3
+; HARD-NEXT:    vmov.32 d16[1], r1
+; HARD-NEXT:    vrev64.32 q8, q8
+; HARD-NEXT:    vadd.i64 q0, q8, q8
+; HARD-NEXT:    add sp, sp, #16
+; HARD-NEXT:    pop {r11, pc}
     %1 = fadd fp128 %p, %p
     %2 = bitcast fp128 %1 to <2 x i64>
     %3 = add <2 x i64> %2, %2
     ret <2 x i64> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vadd.i64 q0
 }
 
-; CHECK-LABEL: test_v2i64_v2f64:
 define <2 x i64> @test_v2i64_v2f64(<2 x double> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; SOFT: vadd.f64 d{{[0-9]+}}, [[REG1]]
-; SOFT: vadd.f64 d{{[0-9]+}}, [[REG2]]
-; HARD: vadd.f64  d{{[0-9]+}}, d1
-; HARD: vadd.f64  d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v2i64_v2f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r3, r2
+; SOFT-NEXT:    vmov d17, r1, r0
+; SOFT-NEXT:    vadd.f64 d19, d16, d16
+; SOFT-NEXT:    vadd.f64 d18, d17, d17
+; SOFT-NEXT:    vadd.i64 q8, q9, q9
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2i64_v2f64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.f64 d17, d1, d1
+; HARD-NEXT:    vadd.f64 d16, d0, d0
+; HARD-NEXT:    vadd.i64 q0, q8, q8
+; HARD-NEXT:    bx lr
     %1 = fadd <2 x double> %p, %p
     %2 = bitcast <2 x double> %1 to <2 x i64>
     %3 = add <2 x i64> %2, %2
     ret <2 x i64> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vadd.i64 q0
 }
 
-; CHECK-LABEL: test_v2i64_v4f32:
 define <2 x i64> @test_v2i64_v4f32(<4 x float> %p) {
-; HARD: vrev64.32  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v2i64_v4f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.f32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.i64 q8, q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2i64_v4f32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 q8, q0
+; HARD-NEXT:    vadd.f32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q8, q8
+; HARD-NEXT:    vadd.i64 q0, q8, q8
+; HARD-NEXT:    bx lr
     %1 = fadd <4 x float> %p, %p
     %2 = bitcast <4 x float> %1 to <2 x i64>
     %3 = add <2 x i64> %2, %2
     ret <2 x i64> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vadd.i64 q0
 }
 
-; CHECK-LABEL: test_v2i64_v4i32:
 define <2 x i64> @test_v2i64_v4i32(<4 x i32> %p) {
-; HARD: vrev64.32  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v2i64_v4i32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.i32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.i64 q8, q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2i64_v4i32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 q8, q0
+; HARD-NEXT:    vadd.i32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q8, q8
+; HARD-NEXT:    vadd.i64 q0, q8, q8
+; HARD-NEXT:    bx lr
     %1 = add <4 x i32> %p, %p
     %2 = bitcast <4 x i32> %1 to <2 x i64>
     %3 = add <2 x i64> %2, %2
     ret <2 x i64> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vadd.i64 q0
 }
 
-; CHECK-LABEL: test_v2i64_v8i16:
 define <2 x i64> @test_v2i64_v8i16(<8 x i16> %p) {
-; HARD: vrev64.16  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v2i64_v8i16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vadd.i16 q8, q8, q8
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vadd.i64 q8, q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2i64_v8i16:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.16 q8, q0
+; HARD-NEXT:    vadd.i16 q8, q8, q8
+; HARD-NEXT:    vrev64.16 q8, q8
+; HARD-NEXT:    vadd.i64 q0, q8, q8
+; HARD-NEXT:    bx lr
     %1 = add <8 x i16> %p, %p
     %2 = bitcast <8 x i16> %1 to <2 x i64>
     %3 = add <2 x i64> %2, %2
     ret <2 x i64> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vadd.i64 q0
 }
 
-; CHECK-LABEL: test_v2i64_v16i8:
 define <2 x i64> @test_v2i64_v16i8(<16 x i8> %p) {
-; HARD: vrev64.8  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v2i64_v16i8:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vadd.i8 q8, q8, q8
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vadd.i64 q8, q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v2i64_v16i8:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.8 q8, q0
+; HARD-NEXT:    vadd.i8 q8, q8, q8
+; HARD-NEXT:    vrev64.8 q8, q8
+; HARD-NEXT:    vadd.i64 q0, q8, q8
+; HARD-NEXT:    bx lr
     %1 = add <16 x i8> %p, %p
     %2 = bitcast <16 x i8> %1 to <2 x i64>
     %3 = add <2 x i64> %2, %2
     ret <2 x i64> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vadd.i64 q0
 }
 
-; CHECK-LABEL: test_v4f32_f128:
 define <4 x float> @test_v4f32_f128(fp128 %p) {
-; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r2
-; CHECK: vmov.32 [[REG1:d[0-9]+]][0], r0
-; CHECK: vmov.32 [[REG2]][1], r3
-; CHECK: vmov.32 [[REG1]][1], r1
+; SOFT-LABEL: test_v4f32_f128:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r11, lr}
+; SOFT-NEXT:    push {r11, lr}
+; SOFT-NEXT:    .pad #16
+; SOFT-NEXT:    sub sp, sp, #16
+; SOFT-NEXT:    stm sp, {r0, r1, r2, r3}
+; SOFT-NEXT:    bl __addtf3
+; SOFT-NEXT:    vmov.32 d17[0], r2
+; SOFT-NEXT:    vmov.32 d16[0], r0
+; SOFT-NEXT:    vmov.32 d17[1], r3
+; SOFT-NEXT:    vmov.32 d16[1], r1
+; SOFT-NEXT:    vadd.f32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    add sp, sp, #16
+; SOFT-NEXT:    pop {r11, pc}
+;
+; HARD-LABEL: test_v4f32_f128:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    .save {r11, lr}
+; HARD-NEXT:    push {r11, lr}
+; HARD-NEXT:    .pad #16
+; HARD-NEXT:    sub sp, sp, #16
+; HARD-NEXT:    stm sp, {r0, r1, r2, r3}
+; HARD-NEXT:    bl __addtf3
+; HARD-NEXT:    vmov.32 d17[0], r2
+; HARD-NEXT:    vmov.32 d16[0], r0
+; HARD-NEXT:    vmov.32 d17[1], r3
+; HARD-NEXT:    vmov.32 d16[1], r1
+; HARD-NEXT:    vadd.f32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q0, q8
+; HARD-NEXT:    add sp, sp, #16
+; HARD-NEXT:    pop {r11, pc}
     %1 = fadd fp128 %p, %p
     %2 = bitcast fp128 %1 to <4 x float>
     %3 = fadd <4 x float> %2, %2
     ret <4 x float> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.32 q0
 }
 
-; CHECK-LABEL: test_v4f32_v2f64:
 define <4 x float> @test_v4f32_v2f64(<2 x double> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; SOFT: vadd.f64 d{{[0-9]+}}, [[REG1]]
-; SOFT: vadd.f64 d{{[0-9]+}}, [[REG2]]
-; HARD: vadd.f64  d{{[0-9]+}}, d1
-; HARD: vadd.f64  d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v4f32_v2f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r3, r2
+; SOFT-NEXT:    vmov d17, r1, r0
+; SOFT-NEXT:    vadd.f64 d19, d16, d16
+; SOFT-NEXT:    vadd.f64 d18, d17, d17
+; SOFT-NEXT:    vrev64.32 q8, q9
+; SOFT-NEXT:    vadd.f32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4f32_v2f64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.f64 d17, d1, d1
+; HARD-NEXT:    vadd.f64 d16, d0, d0
+; HARD-NEXT:    vrev64.32 q8, q8
+; HARD-NEXT:    vadd.f32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q0, q8
+; HARD-NEXT:    bx lr
     %1 = fadd <2 x double> %p, %p
     %2 = bitcast <2 x double> %1 to <4 x float>
     %3 = fadd <4 x float> %2, %2
     ret <4 x float> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.32 q0
 }
 
-; CHECK-LABEL: test_v4f32_v2i64:
 define <4 x float> @test_v4f32_v2i64(<2 x i64> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vadd.i64  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v4f32_v2i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.i64 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.f32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4f32_v2i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.i64 q8, q0, q0
+; HARD-NEXT:    vrev64.32 q8, q8
+; HARD-NEXT:    vadd.f32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q0, q8
+; HARD-NEXT:    bx lr
     %1 = add <2 x i64> %p, %p
     %2 = bitcast <2 x i64> %1 to <4 x float>
     %3 = fadd <4 x float> %2, %2
     ret <4 x float> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.32 q0
 }
 
-; CHECK-LABEL: test_v4f32_v4i32:
 define <4 x float> @test_v4f32_v4i32(<4 x i32> %p) {
-; HARD: vrev64.32  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v4f32_v4i32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.i32 q8, q8, q8
+; SOFT-NEXT:    vadd.f32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4f32_v4i32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 q8, q0
+; HARD-NEXT:    vadd.i32 q8, q8, q8
+; HARD-NEXT:    vadd.f32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q0, q8
+; HARD-NEXT:    bx lr
     %1 = add <4 x i32> %p, %p
     %2 = bitcast <4 x i32> %1 to <4 x float>
     %3 = fadd <4 x float> %2, %2
     ret <4 x float> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.32 q0
 }
 
-; CHECK-LABEL: test_v4f32_v8i16:
 define <4 x float> @test_v4f32_v8i16(<8 x i16> %p) {
-; HARD: vrev64.16  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v4f32_v8i16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vadd.i16 q8, q8, q8
+; SOFT-NEXT:    vrev32.16 q8, q8
+; SOFT-NEXT:    vadd.f32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4f32_v8i16:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.16 q8, q0
+; HARD-NEXT:    vadd.i16 q8, q8, q8
+; HARD-NEXT:    vrev32.16 q8, q8
+; HARD-NEXT:    vadd.f32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q0, q8
+; HARD-NEXT:    bx lr
     %1 = add <8 x i16> %p, %p
     %2 = bitcast <8 x i16> %1 to <4 x float>
     %3 = fadd <4 x float> %2, %2
     ret <4 x float> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.32 q0
 }
 
-; CHECK-LABEL: test_v4f32_v16i8:
 define <4 x float> @test_v4f32_v16i8(<16 x i8> %p) {
-; HARD: vrev64.8  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v4f32_v16i8:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vadd.i8 q8, q8, q8
+; SOFT-NEXT:    vrev32.8 q8, q8
+; SOFT-NEXT:    vadd.f32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4f32_v16i8:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.8 q8, q0
+; HARD-NEXT:    vadd.i8 q8, q8, q8
+; HARD-NEXT:    vrev32.8 q8, q8
+; HARD-NEXT:    vadd.f32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q0, q8
+; HARD-NEXT:    bx lr
     %1 = add <16 x i8> %p, %p
     %2 = bitcast <16 x i8> %1 to <4 x float>
     %3 = fadd <4 x float> %2, %2
     ret <4 x float> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.32 q0
 }
 
-; CHECK-LABEL: test_v4i32_f128:
 define <4 x i32> @test_v4i32_f128(fp128 %p) {
-; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r2
-; CHECK: vmov.32 [[REG1:d[0-9]+]][0], r0
-; CHECK: vmov.32 [[REG2]][1], r3
-; CHECK: vmov.32 [[REG1]][1], r1
+; SOFT-LABEL: test_v4i32_f128:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r11, lr}
+; SOFT-NEXT:    push {r11, lr}
+; SOFT-NEXT:    .pad #16
+; SOFT-NEXT:    sub sp, sp, #16
+; SOFT-NEXT:    stm sp, {r0, r1, r2, r3}
+; SOFT-NEXT:    bl __addtf3
+; SOFT-NEXT:    vmov.32 d17[0], r2
+; SOFT-NEXT:    vmov.32 d16[0], r0
+; SOFT-NEXT:    vmov.32 d17[1], r3
+; SOFT-NEXT:    vmov.32 d16[1], r1
+; SOFT-NEXT:    vadd.i32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    add sp, sp, #16
+; SOFT-NEXT:    pop {r11, pc}
+;
+; HARD-LABEL: test_v4i32_f128:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    .save {r11, lr}
+; HARD-NEXT:    push {r11, lr}
+; HARD-NEXT:    .pad #16
+; HARD-NEXT:    sub sp, sp, #16
+; HARD-NEXT:    stm sp, {r0, r1, r2, r3}
+; HARD-NEXT:    bl __addtf3
+; HARD-NEXT:    vmov.32 d17[0], r2
+; HARD-NEXT:    vmov.32 d16[0], r0
+; HARD-NEXT:    vmov.32 d17[1], r3
+; HARD-NEXT:    vmov.32 d16[1], r1
+; HARD-NEXT:    vadd.i32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q0, q8
+; HARD-NEXT:    add sp, sp, #16
+; HARD-NEXT:    pop {r11, pc}
     %1 = fadd fp128 %p, %p
     %2 = bitcast fp128 %1 to <4 x i32>
     %3 = add <4 x i32> %2, %2
     ret <4 x i32> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.32 q0
 }
 
-; CHECK-LABEL: test_v4i32_v2f64:
 define <4 x i32> @test_v4i32_v2f64(<2 x double> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; SOFT: vadd.f64 d{{[0-9]+}}, [[REG1]]
-; SOFT: vadd.f64 d{{[0-9]+}}, [[REG2]]
-; HARD: vadd.f64  d{{[0-9]+}}, d1
-; HARD: vadd.f64  d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v4i32_v2f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r3, r2
+; SOFT-NEXT:    vmov d17, r1, r0
+; SOFT-NEXT:    vadd.f64 d19, d16, d16
+; SOFT-NEXT:    vadd.f64 d18, d17, d17
+; SOFT-NEXT:    vrev64.32 q8, q9
+; SOFT-NEXT:    vadd.i32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4i32_v2f64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.f64 d17, d1, d1
+; HARD-NEXT:    vadd.f64 d16, d0, d0
+; HARD-NEXT:    vrev64.32 q8, q8
+; HARD-NEXT:    vadd.i32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q0, q8
+; HARD-NEXT:    bx lr
     %1 = fadd <2 x double> %p, %p
     %2 = bitcast <2 x double> %1 to <4 x i32>
     %3 = add <4 x i32> %2, %2
     ret <4 x i32> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.32 q0
 }
 
-; CHECK-LABEL: test_v4i32_v2i64:
 define <4 x i32> @test_v4i32_v2i64(<2 x i64> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vadd.i64  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v4i32_v2i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.i64 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.i32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4i32_v2i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.i64 q8, q0, q0
+; HARD-NEXT:    vrev64.32 q8, q8
+; HARD-NEXT:    vadd.i32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q0, q8
+; HARD-NEXT:    bx lr
     %1 = add <2 x i64> %p, %p
     %2 = bitcast <2 x i64> %1 to <4 x i32>
     %3 = add <4 x i32> %2, %2
     ret <4 x i32> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.32 q0
 }
 
-; CHECK-LABEL: test_v4i32_v4f32:
 define <4 x i32> @test_v4i32_v4f32(<4 x float> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vrev64.32  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v4i32_v4f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.f32 q8, q8, q8
+; SOFT-NEXT:    vadd.i32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4i32_v4f32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 q8, q0
+; HARD-NEXT:    vadd.f32 q8, q8, q8
+; HARD-NEXT:    vadd.i32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q0, q8
+; HARD-NEXT:    bx lr
     %1 = fadd <4 x float> %p, %p
     %2 = bitcast <4 x float> %1 to <4 x i32>
     %3 = add <4 x i32> %2, %2
     ret <4 x i32> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.32 q0
 }
 
-; CHECK-LABEL: test_v4i32_v8i16:
 define <4 x i32> @test_v4i32_v8i16(<8 x i16> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vrev64.16  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v4i32_v8i16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vadd.i16 q8, q8, q8
+; SOFT-NEXT:    vrev32.16 q8, q8
+; SOFT-NEXT:    vadd.i32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4i32_v8i16:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.16 q8, q0
+; HARD-NEXT:    vadd.i16 q8, q8, q8
+; HARD-NEXT:    vrev32.16 q8, q8
+; HARD-NEXT:    vadd.i32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q0, q8
+; HARD-NEXT:    bx lr
     %1 = add <8 x i16> %p, %p
     %2 = bitcast <8 x i16> %1 to <4 x i32>
     %3 = add <4 x i32> %2, %2
     ret <4 x i32> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.32 q0
 }
 
-; CHECK-LABEL: test_v4i32_v16i8:
 define <4 x i32> @test_v4i32_v16i8(<16 x i8> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vrev64.8  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v4i32_v16i8:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vadd.i8 q8, q8, q8
+; SOFT-NEXT:    vrev32.8 q8, q8
+; SOFT-NEXT:    vadd.i32 q8, q8, q8
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v4i32_v16i8:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.8 q8, q0
+; HARD-NEXT:    vadd.i8 q8, q8, q8
+; HARD-NEXT:    vrev32.8 q8, q8
+; HARD-NEXT:    vadd.i32 q8, q8, q8
+; HARD-NEXT:    vrev64.32 q0, q8
+; HARD-NEXT:    bx lr
     %1 = add <16 x i8> %p, %p
     %2 = bitcast <16 x i8> %1 to <4 x i32>
     %3 = add <4 x i32> %2, %2
     ret <4 x i32> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.32 q0
 }
 
-; CHECK-LABEL: test_v8i16_f128:
 define <8 x i16> @test_v8i16_f128(fp128 %p) {
-; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r2
-; CHECK: vmov.32 [[REG1:d[0-9]+]][0], r0
-; CHECK: vmov.32 [[REG2]][1], r3
-; CHECK: vmov.32 [[REG1]][1], r1
+; SOFT-LABEL: test_v8i16_f128:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r11, lr}
+; SOFT-NEXT:    push {r11, lr}
+; SOFT-NEXT:    .pad #16
+; SOFT-NEXT:    sub sp, sp, #16
+; SOFT-NEXT:    stm sp, {r0, r1, r2, r3}
+; SOFT-NEXT:    bl __addtf3
+; SOFT-NEXT:    vmov.32 d17[0], r2
+; SOFT-NEXT:    vmov.32 d16[0], r0
+; SOFT-NEXT:    vmov.32 d17[1], r3
+; SOFT-NEXT:    vmov.32 d16[1], r1
+; SOFT-NEXT:    vrev32.16 q8, q8
+; SOFT-NEXT:    vadd.i16 q8, q8, q8
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    add sp, sp, #16
+; SOFT-NEXT:    pop {r11, pc}
+;
+; HARD-LABEL: test_v8i16_f128:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    .save {r11, lr}
+; HARD-NEXT:    push {r11, lr}
+; HARD-NEXT:    .pad #16
+; HARD-NEXT:    sub sp, sp, #16
+; HARD-NEXT:    stm sp, {r0, r1, r2, r3}
+; HARD-NEXT:    bl __addtf3
+; HARD-NEXT:    vmov.32 d17[0], r2
+; HARD-NEXT:    vmov.32 d16[0], r0
+; HARD-NEXT:    vmov.32 d17[1], r3
+; HARD-NEXT:    vmov.32 d16[1], r1
+; HARD-NEXT:    vrev32.16 q8, q8
+; HARD-NEXT:    vadd.i16 q8, q8, q8
+; HARD-NEXT:    vrev64.16 q0, q8
+; HARD-NEXT:    add sp, sp, #16
+; HARD-NEXT:    pop {r11, pc}
     %1 = fadd fp128 %p, %p
     %2 = bitcast fp128 %1 to <8 x i16>
     %3 = add <8 x i16> %2, %2
     ret <8 x i16> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.16 q0
 }
 
-; CHECK-LABEL: test_v8i16_v2f64:
 define <8 x i16> @test_v8i16_v2f64(<2 x double> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; SOFT: vadd.f64 d{{[0-9]+}}, [[REG1]]
-; SOFT: vadd.f64 d{{[0-9]+}}, [[REG2]]
-; HARD: vadd.f64  d{{[0-9]+}}, d1
-; HARD: vadd.f64  d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v8i16_v2f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r3, r2
+; SOFT-NEXT:    vmov d17, r1, r0
+; SOFT-NEXT:    vadd.f64 d19, d16, d16
+; SOFT-NEXT:    vadd.f64 d18, d17, d17
+; SOFT-NEXT:    vrev64.16 q8, q9
+; SOFT-NEXT:    vadd.i16 q8, q8, q8
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v8i16_v2f64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.f64 d17, d1, d1
+; HARD-NEXT:    vadd.f64 d16, d0, d0
+; HARD-NEXT:    vrev64.16 q8, q8
+; HARD-NEXT:    vadd.i16 q8, q8, q8
+; HARD-NEXT:    vrev64.16 q0, q8
+; HARD-NEXT:    bx lr
     %1 = fadd <2 x double> %p, %p
     %2 = bitcast <2 x double> %1 to <8 x i16>
     %3 = add <8 x i16> %2, %2
     ret <8 x i16> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.16 q0
 }
 
-; CHECK-LABEL: test_v8i16_v2i64:
 define <8 x i16> @test_v8i16_v2i64(<2 x i64> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vadd.i64  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v8i16_v2i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.i64 q8, q8, q8
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vadd.i16 q8, q8, q8
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v8i16_v2i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.i64 q8, q0, q0
+; HARD-NEXT:    vrev64.16 q8, q8
+; HARD-NEXT:    vadd.i16 q8, q8, q8
+; HARD-NEXT:    vrev64.16 q0, q8
+; HARD-NEXT:    bx lr
     %1 = add <2 x i64> %p, %p
     %2 = bitcast <2 x i64> %1 to <8 x i16>
     %3 = add <8 x i16> %2, %2
     ret <8 x i16> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.16 q0
 }
 
-; CHECK-LABEL: test_v8i16_v4f32:
 define <8 x i16> @test_v8i16_v4f32(<4 x float> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vrev64.32  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v8i16_v4f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.f32 q8, q8, q8
+; SOFT-NEXT:    vrev32.16 q8, q8
+; SOFT-NEXT:    vadd.i16 q8, q8, q8
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v8i16_v4f32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 q8, q0
+; HARD-NEXT:    vadd.f32 q8, q8, q8
+; HARD-NEXT:    vrev32.16 q8, q8
+; HARD-NEXT:    vadd.i16 q8, q8, q8
+; HARD-NEXT:    vrev64.16 q0, q8
+; HARD-NEXT:    bx lr
     %1 = fadd <4 x float> %p, %p
     %2 = bitcast <4 x float> %1 to <8 x i16>
     %3 = add <8 x i16> %2, %2
     ret <8 x i16> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.16 q0
 }
 
-; CHECK-LABEL: test_v8i16_v4i32:
 define <8 x i16> @test_v8i16_v4i32(<4 x i32> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vrev64.32  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v8i16_v4i32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.i32 q8, q8, q8
+; SOFT-NEXT:    vrev32.16 q8, q8
+; SOFT-NEXT:    vadd.i16 q8, q8, q8
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v8i16_v4i32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 q8, q0
+; HARD-NEXT:    vadd.i32 q8, q8, q8
+; HARD-NEXT:    vrev32.16 q8, q8
+; HARD-NEXT:    vadd.i16 q8, q8, q8
+; HARD-NEXT:    vrev64.16 q0, q8
+; HARD-NEXT:    bx lr
     %1 = add <4 x i32> %p, %p
     %2 = bitcast <4 x i32> %1 to <8 x i16>
     %3 = add <8 x i16> %2, %2
     ret <8 x i16> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.16 q0
 }
 
-; CHECK-LABEL: test_v8i16_v16i8:
 define <8 x i16> @test_v8i16_v16i8(<16 x i8> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vrev64.8 q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v8i16_v16i8:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vadd.i8 q8, q8, q8
+; SOFT-NEXT:    vrev16.8 q8, q8
+; SOFT-NEXT:    vadd.i16 q8, q8, q8
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v8i16_v16i8:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.8 q8, q0
+; HARD-NEXT:    vadd.i8 q8, q8, q8
+; HARD-NEXT:    vrev16.8 q8, q8
+; HARD-NEXT:    vadd.i16 q8, q8, q8
+; HARD-NEXT:    vrev64.16 q0, q8
+; HARD-NEXT:    bx lr
     %1 = add <16 x i8> %p, %p
     %2 = bitcast <16 x i8> %1 to <8 x i16>
     %3 = add <8 x i16> %2, %2
     ret <8 x i16> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.16 q0
 }
 
-; CHECK-LABEL: test_v16i8_f128:
 define <16 x i8> @test_v16i8_f128(fp128 %p) {
-; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r2
-; CHECK: vmov.32 [[REG1:d[0-9]+]][0], r0
-; CHECK: vmov.32 [[REG2]][1], r3
-; CHECK: vmov.32 [[REG1]][1], r1
+; SOFT-LABEL: test_v16i8_f128:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    .save {r11, lr}
+; SOFT-NEXT:    push {r11, lr}
+; SOFT-NEXT:    .pad #16
+; SOFT-NEXT:    sub sp, sp, #16
+; SOFT-NEXT:    stm sp, {r0, r1, r2, r3}
+; SOFT-NEXT:    bl __addtf3
+; SOFT-NEXT:    vmov.32 d17[0], r2
+; SOFT-NEXT:    vmov.32 d16[0], r0
+; SOFT-NEXT:    vmov.32 d17[1], r3
+; SOFT-NEXT:    vmov.32 d16[1], r1
+; SOFT-NEXT:    vrev32.8 q8, q8
+; SOFT-NEXT:    vadd.i8 q8, q8, q8
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    add sp, sp, #16
+; SOFT-NEXT:    pop {r11, pc}
+;
+; HARD-LABEL: test_v16i8_f128:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    .save {r11, lr}
+; HARD-NEXT:    push {r11, lr}
+; HARD-NEXT:    .pad #16
+; HARD-NEXT:    sub sp, sp, #16
+; HARD-NEXT:    stm sp, {r0, r1, r2, r3}
+; HARD-NEXT:    bl __addtf3
+; HARD-NEXT:    vmov.32 d17[0], r2
+; HARD-NEXT:    vmov.32 d16[0], r0
+; HARD-NEXT:    vmov.32 d17[1], r3
+; HARD-NEXT:    vmov.32 d16[1], r1
+; HARD-NEXT:    vrev32.8 q8, q8
+; HARD-NEXT:    vadd.i8 q8, q8, q8
+; HARD-NEXT:    vrev64.8 q0, q8
+; HARD-NEXT:    add sp, sp, #16
+; HARD-NEXT:    pop {r11, pc}
     %1 = fadd fp128 %p, %p
     %2 = bitcast fp128 %1 to <16 x i8>
     %3 = add <16 x i8> %2, %2
     ret <16 x i8> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.8 q0
 }
 
-; CHECK-LABEL: test_v16i8_v2f64:
 define <16 x i8> @test_v16i8_v2f64(<2 x double> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; SOFT: vadd.f64 d{{[0-9]+}}, [[REG1]]
-; SOFT: vadd.f64 d{{[0-9]+}}, [[REG2]]
-; HARD: vadd.f64  d{{[0-9]+}}, d1
-; HARD: vadd.f64  d{{[0-9]+}}, d0
+; SOFT-LABEL: test_v16i8_v2f64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d16, r3, r2
+; SOFT-NEXT:    vmov d17, r1, r0
+; SOFT-NEXT:    vadd.f64 d19, d16, d16
+; SOFT-NEXT:    vadd.f64 d18, d17, d17
+; SOFT-NEXT:    vrev64.8 q8, q9
+; SOFT-NEXT:    vadd.i8 q8, q8, q8
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v16i8_v2f64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.f64 d17, d1, d1
+; HARD-NEXT:    vadd.f64 d16, d0, d0
+; HARD-NEXT:    vrev64.8 q8, q8
+; HARD-NEXT:    vadd.i8 q8, q8, q8
+; HARD-NEXT:    vrev64.8 q0, q8
+; HARD-NEXT:    bx lr
     %1 = fadd <2 x double> %p, %p
     %2 = bitcast <2 x double> %1 to <16 x i8>
     %3 = add <16 x i8> %2, %2
     ret <16 x i8> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.8 q0
 }
 
-; CHECK-LABEL: test_v16i8_v2i64:
 define <16 x i8> @test_v16i8_v2i64(<2 x i64> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vadd.i64  q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v16i8_v2i64:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vadd.i64 q8, q8, q8
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vadd.i8 q8, q8, q8
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v16i8_v2i64:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vadd.i64 q8, q0, q0
+; HARD-NEXT:    vrev64.8 q8, q8
+; HARD-NEXT:    vadd.i8 q8, q8, q8
+; HARD-NEXT:    vrev64.8 q0, q8
+; HARD-NEXT:    bx lr
     %1 = add <2 x i64> %p, %p
     %2 = bitcast <2 x i64> %1 to <16 x i8>
     %3 = add <16 x i8> %2, %2
     ret <16 x i8> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.8 q0
 }
 
-; CHECK-LABEL: test_v16i8_v4f32:
 define <16 x i8> @test_v16i8_v4f32(<4 x float> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vrev64.32 q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v16i8_v4f32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.f32 q8, q8, q8
+; SOFT-NEXT:    vrev32.8 q8, q8
+; SOFT-NEXT:    vadd.i8 q8, q8, q8
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v16i8_v4f32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 q8, q0
+; HARD-NEXT:    vadd.f32 q8, q8, q8
+; HARD-NEXT:    vrev32.8 q8, q8
+; HARD-NEXT:    vadd.i8 q8, q8, q8
+; HARD-NEXT:    vrev64.8 q0, q8
+; HARD-NEXT:    bx lr
     %1 = fadd <4 x float> %p, %p
     %2 = bitcast <4 x float> %1 to <16 x i8>
     %3 = add <16 x i8> %2, %2
     ret <16 x i8> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.8 q0
 }
 
-; CHECK-LABEL: test_v16i8_v4i32:
 define <16 x i8> @test_v16i8_v4i32(<4 x i32> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vrev64.32 q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v16i8_v4i32:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.32 q8, q8
+; SOFT-NEXT:    vadd.i32 q8, q8, q8
+; SOFT-NEXT:    vrev32.8 q8, q8
+; SOFT-NEXT:    vadd.i8 q8, q8, q8
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v16i8_v4i32:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.32 q8, q0
+; HARD-NEXT:    vadd.i32 q8, q8, q8
+; HARD-NEXT:    vrev32.8 q8, q8
+; HARD-NEXT:    vadd.i8 q8, q8, q8
+; HARD-NEXT:    vrev64.8 q0, q8
+; HARD-NEXT:    bx lr
     %1 = add <4 x i32> %p, %p
     %2 = bitcast <4 x i32> %1 to <16 x i8>
     %3 = add <16 x i8> %2, %2
     ret <16 x i8> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.8 q0
 }
 
-; CHECK-LABEL: test_v16i8_v8i16:
 define <16 x i8> @test_v16i8_v8i16(<8 x i16> %p) {
-; SOFT: vmov [[REG1:d[0-9]+]], r3, r2
-; SOFT: vmov [[REG2:d[0-9]+]], r1, r0
-; HARD: vrev64.16 q{{[0-9]+}}, q0
+; SOFT-LABEL: test_v16i8_v8i16:
+; SOFT:       @ %bb.0:
+; SOFT-NEXT:    vmov d17, r3, r2
+; SOFT-NEXT:    vmov d16, r1, r0
+; SOFT-NEXT:    vrev64.16 q8, q8
+; SOFT-NEXT:    vadd.i16 q8, q8, q8
+; SOFT-NEXT:    vrev16.8 q8, q8
+; SOFT-NEXT:    vadd.i8 q8, q8, q8
+; SOFT-NEXT:    vrev64.8 q8, q8
+; SOFT-NEXT:    vmov r1, r0, d16
+; SOFT-NEXT:    vmov r3, r2, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test_v16i8_v8i16:
+; HARD:       @ %bb.0:
+; HARD-NEXT:    vrev64.16 q8, q0
+; HARD-NEXT:    vadd.i16 q8, q8, q8
+; HARD-NEXT:    vrev16.8 q8, q8
+; HARD-NEXT:    vadd.i8 q8, q8, q8
+; HARD-NEXT:    vrev64.8 q0, q8
+; HARD-NEXT:    bx lr
     %1 = add <8 x i16> %p, %p
     %2 = bitcast <8 x i16> %1 to <16 x i8>
     %3 = add <16 x i8> %2, %2
     ret <16 x i8> %3
-; SOFT: vmov r1, r0
-; SOFT: vmov r3, r2
-; HARD: vrev64.8 q0
 }

diff  --git a/llvm/test/CodeGen/ARM/combine-vmovdrr.ll b/llvm/test/CodeGen/ARM/combine-vmovdrr.ll
index 01526b371990e..5a9097a6c39e6 100644
--- a/llvm/test/CodeGen/ARM/combine-vmovdrr.ll
+++ b/llvm/test/CodeGen/ARM/combine-vmovdrr.ll
@@ -1,6 +1,5 @@
-; RUN: llc %s -o - | FileCheck %s
-
-target triple = "thumbv7s-apple-ios"
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv7s-none-eabi %s -o - | FileCheck %s
 
 declare <8 x i8> @llvm.arm.neon.vtbl2(<8 x i8> %shuffle.i.i307, <8 x i8> %shuffle.i27.i308, <8 x i8> %vtbl2.i25.i)
 
@@ -8,13 +7,14 @@ declare <8 x i8> @llvm.arm.neon.vtbl2(<8 x i8> %shuffle.i.i307, <8 x i8> %shuffl
 ; The bitcasts force the values to go through the GPRs, whereas
 ; they are defined on VPRs and used on VPRs.
 ;
-; CHECK-LABEL: motivatingExample:
-; CHECK: vld1.32 {[[ARG1_VALlo:d[0-9]+]], [[ARG1_VALhi:d[0-9]+]]}, [r0]
-; CHECK-NEXT: vldr [[ARG2_VAL:d[0-9]+]], [r1]
-; CHECK-NEXT: vtbl.8 [[RES:d[0-9]+]], {[[ARG1_VALlo]], [[ARG1_VALhi]]}, [[ARG2_VAL]]
-; CHECK-NEXT: vstr [[RES]], [r1]
-; CHECK-NEXT: bx lr
 define void @motivatingExample(<2 x i64>* %addr, <8 x i8>* %addr2) {
+; CHECK-LABEL: motivatingExample:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vldr d18, [r1]
+; CHECK-NEXT:    vtbl.8 d16, {d16, d17}, d18
+; CHECK-NEXT:    vstr d16, [r1]
+; CHECK-NEXT:    bx lr
   %shuffle.i.bc.i309 = load <2 x i64>, <2 x i64>* %addr
   %vtbl2.i25.i = load <8 x i8>, <8 x i8>* %addr2
   %shuffle.i.extract.i310 = extractelement <2 x i64> %shuffle.i.bc.i309, i32 0
@@ -27,10 +27,37 @@ define void @motivatingExample(<2 x i64>* %addr, <8 x i8>* %addr2) {
 }
 
 ; Check that we do not perform the transformation for dynamic index.
-; CHECK-LABEL: dynamicIndex:
-; CHECK-NOT: mul
-; CHECK: pop
 define void @dynamicIndex(<2 x i64>* %addr, <8 x i8>* %addr2, i32 %index) {
+; CHECK-LABEL: dynamicIndex:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-NEXT:    push {r4, r6, r7, lr}
+; CHECK-NEXT:    .setfp r7, sp, #8
+; CHECK-NEXT:    add r7, sp, #8
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    mov r4, sp
+; CHECK-NEXT:    bfc r4, #0, #4
+; CHECK-NEXT:    mov sp, r4
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    adds r0, r2, r2
+; CHECK-NEXT:    and r2, r0, #3
+; CHECK-NEXT:    adds r0, #1
+; CHECK-NEXT:    mov r12, sp
+; CHECK-NEXT:    and r0, r0, #3
+; CHECK-NEXT:    lsls r2, r2, #2
+; CHECK-NEXT:    mov r3, r12
+; CHECK-NEXT:    vst1.64 {d16, d17}, [r3:128], r2
+; CHECK-NEXT:    orr.w r0, r12, r0, lsl #2
+; CHECK-NEXT:    sub.w r4, r7, #8
+; CHECK-NEXT:    ldr r2, [r3]
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    vldr d18, [r1]
+; CHECK-NEXT:    vmov d16, r2, r0
+; CHECK-NEXT:    vtbl.8 d16, {d16, d17}, d18
+; CHECK-NEXT:    vstr d16, [r1]
+; CHECK-NEXT:    mov sp, r4
+; CHECK-NEXT:    pop {r4, r6, r7, pc}
   %shuffle.i.bc.i309 = load <2 x i64>, <2 x i64>* %addr
   %vtbl2.i25.i = load <8 x i8>, <8 x i8>* %addr2
   %shuffle.i.extract.i310 = extractelement <2 x i64> %shuffle.i.bc.i309, i32 %index
@@ -44,22 +71,18 @@ define void @dynamicIndex(<2 x i64>* %addr, <8 x i8>* %addr2, i32 %index) {
 
 ; Check that we do not perform the transformation when there are several uses
 ; of the result of the bitcast.
-; CHECK-LABEL: severalUses:
-; ARG1_VALlo is hard coded because we need to access the high part of d0,
-; i.e., s1, and we can't express that with filecheck.
-; CHECK: vld1.32 {[[ARG1_VALlo:d0]], [[ARG1_VALhi:d[0-9]+]]}, [r0]
-; CHECK-NEXT: vldr [[ARG2_VAL:d[0-9]+]], [r1]
-; s1 is actually 2 * ARG1_VALlo + 1, but we cannot express that with filecheck.
-; CHECK-NEXT: vmov [[REThi:r[0-9]+]], s1
-; We build the return value here. s0 is 2 * ARG1_VALlo.
-; CHECK-NEXT: vmov r0, s0
-; This copy is correct but actually useless. We should be able to clean it up.
-; CHECK-NEXT: vmov [[ARG1_VALloCPY:d[0-9]+]], r0, [[REThi]]
-; CHECK-NEXT: vtbl.8 [[RES:d[0-9]+]], {[[ARG1_VALloCPY]], [[ARG1_VALhi]]}, [[ARG2_VAL]]
-; CHECK-NEXT: vstr [[RES]], [r1]
-; CHECK-NEXT: mov r1, [[REThi]]
-; CHECK-NEXT: bx lr
 define i64 @severalUses(<2 x i64>* %addr, <8 x i8>* %addr2) {
+; CHECK-LABEL: severalUses:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT:    vmov.32 r2, d16[1]
+; CHECK-NEXT:    vmov.32 r0, d16[0]
+; CHECK-NEXT:    vldr d18, [r1]
+; CHECK-NEXT:    vmov d16, r0, r2
+; CHECK-NEXT:    vtbl.8 d16, {d16, d17}, d18
+; CHECK-NEXT:    vstr d16, [r1]
+; CHECK-NEXT:    mov r1, r2
+; CHECK-NEXT:    bx lr
   %shuffle.i.bc.i309 = load <2 x i64>, <2 x i64>* %addr
   %vtbl2.i25.i = load <8 x i8>, <8 x i8>* %addr2
   %shuffle.i.extract.i310 = extractelement <2 x i64> %shuffle.i.bc.i309, i32 0


        


More information about the llvm-commits mailing list