[llvm] 8f7d343 - [ARM][NFC] More detailed vbsl checks in ARM & Thumb2 tests.

Pavel Iliin via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 13 09:00:57 PDT 2020


Author: Pavel Iliin
Date: 2020-07-13T17:00:43+01:00
New Revision: 8f7d3430b72e0458f0917b605cd94bcfb9396b37

URL: https://github.com/llvm/llvm-project/commit/8f7d3430b72e0458f0917b605cd94bcfb9396b37
DIFF: https://github.com/llvm/llvm-project/commit/8f7d3430b72e0458f0917b605cd94bcfb9396b37.diff

LOG: [ARM][NFC] More detailed vbsl checks in ARM & Thumb2 tests.

Added: 
    

Modified: 
    llvm/test/CodeGen/ARM/fcopysign.ll
    llvm/test/CodeGen/ARM/fp16-promote.ll
    llvm/test/CodeGen/ARM/vbsl-constant.ll
    llvm/test/CodeGen/ARM/vbsl.ll
    llvm/test/CodeGen/ARM/vselect_imax.ll
    llvm/test/CodeGen/Thumb2/float-intrinsics-double.ll
    llvm/test/CodeGen/Thumb2/float-intrinsics-float.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/ARM/fcopysign.ll b/llvm/test/CodeGen/ARM/fcopysign.ll
index d013fbf8c15a..05dbb65a6deb 100644
--- a/llvm/test/CodeGen/ARM/fcopysign.ll
+++ b/llvm/test/CodeGen/ARM/fcopysign.ll
@@ -1,40 +1,70 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -disable-post-ra -mtriple=armv7-apple-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=SOFT
 ; RUN: llc < %s -disable-post-ra -mtriple=armv7-gnueabi -float-abi=hard -mcpu=cortex-a8 | FileCheck %s -check-prefix=HARD
 
 ; rdar://8984306
 define float @test1(float %x, float %y) nounwind {
-entry:
 ; SOFT-LABEL: test1:
-; SOFT: lsr r1, r1, #31
-; SOFT: bfi r0, r1, #31, #1
-
+; SOFT:       @ %bb.0: @ %entry
+; SOFT-NEXT:    lsr r1, r1, #31
+; SOFT-NEXT:    bfi r0, r1, #31, #1
+; SOFT-NEXT:    bx lr
+;
 ; HARD-LABEL: test1:
-; HARD: vmov.i32 [[REG1:(d[0-9]+)]], #0x80000000
-; HARD: vbsl [[REG1]], d
+; HARD:       @ %bb.0: @ %entry
+; HARD-NEXT:    vmov.f32 s4, s1
+; HARD-NEXT:    @ kill: def $s0 killed $s0 def $d0
+; HARD-NEXT:    vmov.i32 d1, #0x80000000
+; HARD-NEXT:    vbsl d1, d2, d0
+; HARD-NEXT:    vmov.f32 s0, s2
+; HARD-NEXT:    bx lr
+entry:
+
   %0 = tail call float @copysignf(float %x, float %y) nounwind readnone
   ret float %0
 }
 
 define double @test2(double %x, double %y) nounwind {
-entry:
 ; SOFT-LABEL: test2:
-; SOFT: lsr r2, r3, #31
-; SOFT: bfi r1, r2, #31, #1
-
+; SOFT:       @ %bb.0: @ %entry
+; SOFT-NEXT:    lsr r2, r3, #31
+; SOFT-NEXT:    bfi r1, r2, #31, #1
+; SOFT-NEXT:    bx lr
+;
 ; HARD-LABEL: test2:
-; HARD: vmov.i32 [[REG2:(d[0-9]+)]], #0x80000000
-; HARD: vshl.i64 [[REG2]], [[REG2]], #32
-; HARD: vbsl [[REG2]], d1, d0
+; HARD:       @ %bb.0: @ %entry
+; HARD-NEXT:    vmov.i32 d16, #0x80000000
+; HARD-NEXT:    vshl.i64 d16, d16, #32
+; HARD-NEXT:    vbsl d16, d1, d0
+; HARD-NEXT:    vorr d0, d16, d16
+; HARD-NEXT:    bx lr
+entry:
+
   %0 = tail call double @copysign(double %x, double %y) nounwind readnone
   ret double %0
 }
 
 define double @test3(double %x, double %y, double %z) nounwind {
-entry:
 ; SOFT-LABEL: test3:
-; SOFT: vmov.i32 [[REG3:(d[0-9]+)]], #0x80000000
-; SOFT: vshl.i64 [[REG3]], [[REG3]], #32
-; SOFT: vbsl [[REG3]],
+; SOFT:       @ %bb.0: @ %entry
+; SOFT-NEXT:    vmov d16, r2, r3
+; SOFT-NEXT:    vmov d17, r0, r1
+; SOFT-NEXT:    vmul.f64 d16, d17, d16
+; SOFT-NEXT:    vmov.i32 d17, #0x80000000
+; SOFT-NEXT:    vshl.i64 d17, d17, #32
+; SOFT-NEXT:    vldr d18, [sp]
+; SOFT-NEXT:    vbsl d17, d18, d16
+; SOFT-NEXT:    vmov r0, r1, d17
+; SOFT-NEXT:    bx lr
+;
+; HARD-LABEL: test3:
+; HARD:       @ %bb.0: @ %entry
+; HARD-NEXT:    vmul.f64 d16, d0, d1
+; HARD-NEXT:    vmov.i32 d17, #0x80000000
+; HARD-NEXT:    vshl.i64 d0, d17, #32
+; HARD-NEXT:    vbsl d0, d2, d16
+; HARD-NEXT:    bx lr
+entry:
   %0 = fmul double %x, %y
   %1 = tail call double @copysign(double %0, double %z) nounwind readnone
   ret double %1
@@ -42,12 +72,34 @@ entry:
 
 ; rdar://9287902
 define float @test4() nounwind {
-entry:
 ; SOFT-LABEL: test4:
-; SOFT: vmov [[REG7:(d[0-9]+)]], r0, r1
-; SOFT: vmov.i32 [[REG6:(d[0-9]+)]], #0x80000000
-; SOFT: vshr.u64 [[REG7]], [[REG7]], #32
-; SOFT: vbsl [[REG6]], [[REG7]], 
+; SOFT:       @ %bb.0: @ %entry
+; SOFT-NEXT:    push {lr}
+; SOFT-NEXT:    bl _bar
+; SOFT-NEXT:    vmov d16, r0, r1
+; SOFT-NEXT:    vcvt.f32.f64 s0, d16
+; SOFT-NEXT:    vmov.i32 d17, #0x80000000
+; SOFT-NEXT:    vshr.u64 d16, d16, #32
+; SOFT-NEXT:    vmov.f32 d18, #5.000000e-01
+; SOFT-NEXT:    vbsl d17, d16, d18
+; SOFT-NEXT:    vadd.f32 d0, d0, d17
+; SOFT-NEXT:    vmov r0, s0
+; SOFT-NEXT:    pop {lr}
+;
+; HARD-LABEL: test4:
+; HARD:       @ %bb.0: @ %entry
+; HARD-NEXT:    .save {r11, lr}
+; HARD-NEXT:    push {r11, lr}
+; HARD-NEXT:    bl bar
+; HARD-NEXT:    vmov d16, r0, r1
+; HARD-NEXT:    vcvt.f32.f64 s0, d16
+; HARD-NEXT:    vmov.i32 d1, #0x80000000
+; HARD-NEXT:    vshr.u64 d16, d16, #32
+; HARD-NEXT:    vmov.f32 s4, #5.000000e-01
+; HARD-NEXT:    vbsl d1, d16, d2
+; HARD-NEXT:    vadd.f32 s0, s0, s2
+; HARD-NEXT:    pop {r11, pc}
+entry:
   %0 = tail call double (...) @bar() nounwind
   %1 = fptrunc double %0 to float
   %2 = tail call float @copysignf(float 5.000000e-01, float %1) nounwind readnone

diff  --git a/llvm/test/CodeGen/ARM/fp16-promote.ll b/llvm/test/CodeGen/ARM/fp16-promote.ll
index 3cd07df671b9..11670d7b57ad 100644
--- a/llvm/test/CodeGen/ARM/fp16-promote.ll
+++ b/llvm/test/CodeGen/ARM/fp16-promote.ll
@@ -424,7 +424,7 @@ declare half @llvm.fmuladd.f16(half %a, half %b, half %c) #0
 ; CHECK-FP16: vsqrt.f32
 ; CHECK-FP16: vcvtb.f16.f32
 ; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP-LIBCALL: vsqrt.f32
+; CHECK-LIBCALL-VFP: vsqrt.f32
 ; CHECK-NOVFP: bl sqrtf
 ; CHECK-LIBCALL: bl __aeabi_f2h
 define void @test_sqrt(half* %p) #0 {
@@ -700,18 +700,44 @@ define void @test_maximum(half* %p) #0 {
 }
 
 ; CHECK-FP16-LABEL: test_copysign:
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vcvtb.f32.f16
-; CHECK-FP16: vbsl
-; CHECK-FP16: vcvtb.f16.f32
+; CHECK-FP16:         ldrh r2, [r0]
+; CHECK-FP16-NEXT:    vmov.i32 d0, #0x80000000
+; CHECK-FP16-NEXT:    ldrh r1, [r1]
+; CHECK-FP16-NEXT:    vmov s2, r2
+; CHECK-FP16-NEXT:    vmov s4, r1
+; CHECK-FP16-NEXT:    vcvtb.f32.f16 s2, s2
+; CHECK-FP16-NEXT:    vcvtb.f32.f16 s4, s4
+; CHECK-FP16-NEXT:    vbsl d0, d2, d1
+; CHECK-FP16-NEXT:    vcvtb.f16.f32 s0, s0
+; CHECK-FP16-NEXT:    vmov r1, s0
+; CHECK-FP16-NEXT:    strh r1, [r0]
+; CHECK-FP16-NEXT:    bx lr
+
 ; CHECK-LIBCALL-LABEL: test_copysign:
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP-LIBCALL: vbsl
+; CHECK-LIBCALL-VFP:         .fnstart
+; CHECK-LIBCALL-VFP-NEXT:    .save {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT:    push {r4, r5, r11, lr}
+; CHECK-LIBCALL-VFP-NEXT:    .vsave {d8, d9}
+; CHECK-LIBCALL-VFP-NEXT:    vpush {d8, d9}
+; CHECK-LIBCALL-VFP-NEXT:    mov r5, r0
+; CHECK-LIBCALL-VFP-NEXT:    ldrh r0, [r0]
+; CHECK-LIBCALL-VFP-NEXT:    mov r4, r1
+; CHECK-LIBCALL: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP:         ldrh r1, [r4]
+; CHECK-LIBCALL-VFP-NEXT:    vmov s18, r0
+; CHECK-LIBCALL-VFP-NEXT:    vmov.i32 d8, #0x80000000
+; CHECK-LIBCALL-VFP-NEXT:    mov r0, r1
+; CHECK-LIBCALL: bl __aeabi_h2f
+; CHECK-LIBCALL-VFP:         vmov s0, r0
+; CHECK-LIBCALL-VFP-NEXT:    vbsl d8, d0, d9
+; CHECK-LIBCALL-VFP-NEXT:    vmov r0, s16
+; CHECK-LIBCALL: bl __aeabi_f2h
+; CHECK-LIBCALL-VFP:         strh r0, [r5]
+; CHECK-LIBCALL-VFP-NEXT:    vpop {d8, d9}
+; CHECK-LIBCALL-VFP-NEXT:    pop {r4, r5, r11, pc}
 ; CHECK-NOVFP: and
 ; CHECK-NOVFP: bic
 ; CHECK-NOVFP: orr
-; CHECK-LIBCALL: bl __aeabi_f2h
 define void @test_copysign(half* %p, half* %q) #0 {
   %a = load half, half* %p, align 2
   %b = load half, half* %q, align 2
@@ -820,7 +846,7 @@ define void @test_round(half* %p) {
 ; CHECK-LIBCALL: bl __aeabi_h2f
 ; CHECK-LIBCALL: bl __aeabi_h2f
 ; CHECK-LIBCALL: bl __aeabi_h2f
-; CHECK-VFP-LIBCALL: vmla.f32
+; CHECK-LIBCALL-VFP: vmla.f32
 ; CHECK-NOVFP: bl __aeabi_fmul
 ; CHECK-LIBCALL: bl __aeabi_f2h
 define void @test_fmuladd(half* %p, half* %q, half* %r) #0 {

diff  --git a/llvm/test/CodeGen/ARM/vbsl-constant.ll b/llvm/test/CodeGen/ARM/vbsl-constant.ll
index 6bcbbc8fa878..83b34a133dd1 100644
--- a/llvm/test/CodeGen/ARM/vbsl-constant.ll
+++ b/llvm/test/CodeGen/ARM/vbsl-constant.ll
@@ -1,10 +1,15 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm-apple-ios -mattr=+neon | FileCheck %s
 
 define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK-LABEL: v_bsli8:
-;CHECK: vldr
-;CHECK: vldr
-;CHECK: vbsl
+; CHECK-LABEL: v_bsli8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i8 d16, #0x3
+; CHECK-NEXT:    vldr d17, [r2]
+; CHECK-NEXT:    vldr d18, [r0]
+; CHECK-NEXT:    vbsl d16, d18, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = load <8 x i8>, <8 x i8>* %C
@@ -15,10 +20,14 @@ define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 }
 
 define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK-LABEL: v_bsli16:
-;CHECK: vldr
-;CHECK: vldr
-;CHECK: vbsl
+; CHECK-LABEL: v_bsli16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i16 d16, #0x3
+; CHECK-NEXT:    vldr d17, [r2]
+; CHECK-NEXT:    vldr d18, [r0]
+; CHECK-NEXT:    vbsl d16, d18, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = load <4 x i16>, <4 x i16>* %C
@@ -29,10 +38,14 @@ define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
 }
 
 define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK-LABEL: v_bsli32:
-;CHECK: vldr
-;CHECK: vldr
-;CHECK: vbsl
+; CHECK-LABEL: v_bsli32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i32 d16, #0x3
+; CHECK-NEXT:    vldr d17, [r2]
+; CHECK-NEXT:    vldr d18, [r0]
+; CHECK-NEXT:    vbsl d16, d18, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = load <2 x i32>, <2 x i32>* %C
@@ -43,11 +56,14 @@ define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
 }
 
 define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind {
-;CHECK-LABEL: v_bsli64:
-;CHECK: vldr
-;CHECK: vldr
-;CHECK: vldr
-;CHECK: vbsl
+; CHECK-LABEL: v_bsli64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d17, [r2]
+; CHECK-NEXT:    vldr d16, LCPI3_0
+; CHECK-NEXT:    vldr d18, [r0]
+; CHECK-NEXT:    vbsl d16, d18, d17
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <1 x i64>, <1 x i64>* %A
 	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = load <1 x i64>, <1 x i64>* %C
@@ -58,10 +74,15 @@ define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind
 }
 
 define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
-;CHECK-LABEL: v_bslQi8:
-;CHECK: vld1.32
-;CHECK: vld1.32
-;CHECK: vbsl
+; CHECK-LABEL: v_bslQi8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vld1.32 {d16, d17}, [r2]
+; CHECK-NEXT:    vmov.i8 q9, #0x3
+; CHECK-NEXT:    vld1.32 {d20, d21}, [r0]
+; CHECK-NEXT:    vbsl q9, q10, q8
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = load <16 x i8>, <16 x i8>* %C
@@ -72,10 +93,15 @@ define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind
 }
 
 define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
-;CHECK-LABEL: v_bslQi16:
-;CHECK: vld1.32
-;CHECK: vld1.32
-;CHECK: vbsl
+; CHECK-LABEL: v_bslQi16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vld1.32 {d16, d17}, [r2]
+; CHECK-NEXT:    vmov.i16 q9, #0x3
+; CHECK-NEXT:    vld1.32 {d20, d21}, [r0]
+; CHECK-NEXT:    vbsl q9, q10, q8
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = load <8 x i16>, <8 x i16>* %C
@@ -86,10 +112,15 @@ define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwin
 }
 
 define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
-;CHECK-LABEL: v_bslQi32:
-;CHECK: vld1.32
-;CHECK: vld1.32
-;CHECK: vbsl
+; CHECK-LABEL: v_bslQi32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vld1.32 {d16, d17}, [r2]
+; CHECK-NEXT:    vmov.i32 q9, #0x3
+; CHECK-NEXT:    vld1.32 {d20, d21}, [r0]
+; CHECK-NEXT:    vbsl q9, q10, q8
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = load <4 x i32>, <4 x i32>* %C
@@ -100,11 +131,16 @@ define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwin
 }
 
 define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwind {
-;CHECK-LABEL: v_bslQi64:
-;CHECK: vld1.32
-;CHECK: vld1.32
-;CHECK: vld1.64
-;CHECK: vbsl
+; CHECK-LABEL: v_bslQi64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vld1.32 {d16, d17}, [r2]
+; CHECK-NEXT:    vld1.32 {d18, d19}, [r0]
+; CHECK-NEXT:    adr r0, LCPI7_0
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r0:128]
+; CHECK-NEXT:    vbsl q10, q9, q8
+; CHECK-NEXT:    vmov r0, r1, d20
+; CHECK-NEXT:    vmov r2, r3, d21
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = load <2 x i64>, <2 x i64>* %C

diff  --git a/llvm/test/CodeGen/ARM/vbsl.ll b/llvm/test/CodeGen/ARM/vbsl.ll
index 6812dd90a100..01e1ffb2e983 100644
--- a/llvm/test/CodeGen/ARM/vbsl.ll
+++ b/llvm/test/CodeGen/ARM/vbsl.ll
@@ -1,10 +1,17 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
 
 ; rdar://12471808
 
 define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK-LABEL: v_bsli8:
-;CHECK: vbsl
+; CHECK-LABEL: v_bsli8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [r2]
+; CHECK-NEXT:    vldr d17, [r1]
+; CHECK-NEXT:    vldr d18, [r0]
+; CHECK-NEXT:    vbsl d18, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <8 x i8>, <8 x i8>* %A
 	%tmp2 = load <8 x i8>, <8 x i8>* %B
 	%tmp3 = load <8 x i8>, <8 x i8>* %C
@@ -16,8 +23,14 @@ define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 }
 
 define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK-LABEL: v_bsli16:
-;CHECK: vbsl
+; CHECK-LABEL: v_bsli16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [r2]
+; CHECK-NEXT:    vldr d17, [r1]
+; CHECK-NEXT:    vldr d18, [r0]
+; CHECK-NEXT:    vbsl d18, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <4 x i16>, <4 x i16>* %A
 	%tmp2 = load <4 x i16>, <4 x i16>* %B
 	%tmp3 = load <4 x i16>, <4 x i16>* %C
@@ -29,8 +42,14 @@ define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
 }
 
 define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK-LABEL: v_bsli32:
-;CHECK: vbsl
+; CHECK-LABEL: v_bsli32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [r2]
+; CHECK-NEXT:    vldr d17, [r1]
+; CHECK-NEXT:    vldr d18, [r0]
+; CHECK-NEXT:    vbsl d18, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <2 x i32>, <2 x i32>* %A
 	%tmp2 = load <2 x i32>, <2 x i32>* %B
 	%tmp3 = load <2 x i32>, <2 x i32>* %C
@@ -42,8 +61,14 @@ define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
 }
 
 define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind {
-;CHECK-LABEL: v_bsli64:
-;CHECK: vbsl
+; CHECK-LABEL: v_bsli64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [r2]
+; CHECK-NEXT:    vldr d17, [r1]
+; CHECK-NEXT:    vldr d18, [r0]
+; CHECK-NEXT:    vbsl d18, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <1 x i64>, <1 x i64>* %A
 	%tmp2 = load <1 x i64>, <1 x i64>* %B
 	%tmp3 = load <1 x i64>, <1 x i64>* %C
@@ -55,8 +80,15 @@ define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind
 }
 
 define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
-;CHECK-LABEL: v_bslQi8:
-;CHECK: vbsl
+; CHECK-LABEL: v_bslQi8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r2]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r1]
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
+; CHECK-NEXT:    vbsl q10, q9, q8
+; CHECK-NEXT:    vmov r0, r1, d20
+; CHECK-NEXT:    vmov r2, r3, d21
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <16 x i8>, <16 x i8>* %A
 	%tmp2 = load <16 x i8>, <16 x i8>* %B
 	%tmp3 = load <16 x i8>, <16 x i8>* %C
@@ -68,8 +100,15 @@ define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind
 }
 
 define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
-;CHECK-LABEL: v_bslQi16:
-;CHECK: vbsl
+; CHECK-LABEL: v_bslQi16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r2]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r1]
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
+; CHECK-NEXT:    vbsl q10, q9, q8
+; CHECK-NEXT:    vmov r0, r1, d20
+; CHECK-NEXT:    vmov r2, r3, d21
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <8 x i16>, <8 x i16>* %A
 	%tmp2 = load <8 x i16>, <8 x i16>* %B
 	%tmp3 = load <8 x i16>, <8 x i16>* %C
@@ -81,8 +120,15 @@ define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwin
 }
 
 define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
-;CHECK-LABEL: v_bslQi32:
-;CHECK: vbsl
+; CHECK-LABEL: v_bslQi32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r2]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r1]
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
+; CHECK-NEXT:    vbsl q10, q9, q8
+; CHECK-NEXT:    vmov r0, r1, d20
+; CHECK-NEXT:    vmov r2, r3, d21
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <4 x i32>, <4 x i32>* %A
 	%tmp2 = load <4 x i32>, <4 x i32>* %B
 	%tmp3 = load <4 x i32>, <4 x i32>* %C
@@ -94,8 +140,15 @@ define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwin
 }
 
 define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwind {
-;CHECK-LABEL: v_bslQi64:
-;CHECK: vbsl
+; CHECK-LABEL: v_bslQi64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r2]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r1]
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
+; CHECK-NEXT:    vbsl q10, q9, q8
+; CHECK-NEXT:    vmov r0, r1, d20
+; CHECK-NEXT:    vmov r2, r3, d21
+; CHECK-NEXT:    mov pc, lr
 	%tmp1 = load <2 x i64>, <2 x i64>* %A
 	%tmp2 = load <2 x i64>, <2 x i64>* %B
 	%tmp3 = load <2 x i64>, <2 x i64>* %C
@@ -108,84 +161,180 @@ define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwin
 
 define <8 x i8> @f1(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) nounwind readnone optsize ssp {
 ; CHECK-LABEL: f1:
-; CHECK: vbsl
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [sp]
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vbsl d18, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
   %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) nounwind
   ret <8 x i8> %vbsl.i
 }
 
 define <4 x i16> @f2(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c) nounwind readnone optsize ssp {
 ; CHECK-LABEL: f2:
-; CHECK: vbsl
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [sp]
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vbsl d18, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
   %vbsl3.i = tail call <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c) nounwind
   ret <4 x i16> %vbsl3.i
 }
 
 define <2 x i32> @f3(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) nounwind readnone optsize ssp {
 ; CHECK-LABEL: f3:
-; CHECK: vbsl
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [sp]
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vbsl d18, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
   %vbsl3.i = tail call <2 x i32> @llvm.arm.neon.vbsl.v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) nounwind
   ret <2 x i32> %vbsl3.i
 }
 
 define <2 x float> @f4(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind readnone optsize ssp {
 ; CHECK-LABEL: f4:
-; CHECK: vbsl
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [sp]
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vbsl d18, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
   %vbsl4.i = tail call <2 x float> @llvm.arm.neon.vbsl.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind
   ret <2 x float> %vbsl4.i
 }
 
 define <16 x i8> @g1(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) nounwind readnone optsize ssp {
 ; CHECK-LABEL: g1:
-; CHECK: vbsl
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    add r12, sp, #16
+; CHECK-NEXT:    vmov d19, r2, r3
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    mov r0, sp
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
+; CHECK-NEXT:    vbsl q9, q10, q8
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    mov pc, lr
   %vbsl.i = tail call <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) nounwind
   ret <16 x i8> %vbsl.i
 }
 
 define <8 x i16> @g2(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind readnone optsize ssp {
 ; CHECK-LABEL: g2:
-; CHECK: vbsl
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    add r12, sp, #16
+; CHECK-NEXT:    vmov d19, r2, r3
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    mov r0, sp
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
+; CHECK-NEXT:    vbsl q9, q10, q8
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    mov pc, lr
   %vbsl3.i = tail call <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind
   ret <8 x i16> %vbsl3.i
 }
 
 define <4 x i32> @g3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind readnone optsize ssp {
 ; CHECK-LABEL: g3:
-; CHECK: vbsl
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    add r12, sp, #16
+; CHECK-NEXT:    vmov d19, r2, r3
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    mov r0, sp
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
+; CHECK-NEXT:    vbsl q9, q10, q8
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    mov pc, lr
   %vbsl3.i = tail call <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind
   ret <4 x i32> %vbsl3.i
 }
 
 define <4 x float> @g4(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind readnone optsize ssp {
 ; CHECK-LABEL: g4:
-; CHECK: vbsl
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    add r12, sp, #16
+; CHECK-NEXT:    vmov d19, r2, r3
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    mov r0, sp
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
+; CHECK-NEXT:    vbsl q9, q10, q8
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    mov pc, lr
   %vbsl4.i = tail call <4 x float> @llvm.arm.neon.vbsl.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind
   ret <4 x float> %vbsl4.i
 }
 
 define <1 x i64> @test_vbsl_s64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind readnone optsize ssp {
 ; CHECK-LABEL: test_vbsl_s64:
-; CHECK: vbsl d
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [sp]
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vbsl d18, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
   %vbsl3.i = tail call <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind
   ret <1 x i64> %vbsl3.i
 }
 
 define <1 x i64> @test_vbsl_u64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind readnone optsize ssp {
 ; CHECK-LABEL: test_vbsl_u64:
-; CHECK: vbsl d
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr d16, [sp]
+; CHECK-NEXT:    vmov d17, r2, r3
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    vbsl d18, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    mov pc, lr
   %vbsl3.i = tail call <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind
   ret <1 x i64> %vbsl3.i
 }
 
 define <2 x i64> @test_vbslq_s64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind readnone optsize ssp {
 ; CHECK-LABEL: test_vbslq_s64:
-; CHECK: vbsl q
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    add r12, sp, #16
+; CHECK-NEXT:    vmov d19, r2, r3
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    mov r0, sp
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
+; CHECK-NEXT:    vbsl q9, q10, q8
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    mov pc, lr
   %vbsl3.i = tail call <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind
   ret <2 x i64> %vbsl3.i
 }
 
 define <2 x i64> @test_vbslq_u64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind readnone optsize ssp {
 ; CHECK-LABEL: test_vbslq_u64:
-; CHECK: vbsl q
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    add r12, sp, #16
+; CHECK-NEXT:    vmov d19, r2, r3
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT:    vmov d18, r0, r1
+; CHECK-NEXT:    mov r0, sp
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
+; CHECK-NEXT:    vbsl q9, q10, q8
+; CHECK-NEXT:    vmov r0, r1, d18
+; CHECK-NEXT:    vmov r2, r3, d19
+; CHECK-NEXT:    mov pc, lr
   %vbsl3.i = tail call <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind
   ret <2 x i64> %vbsl3.i
 }

diff  --git a/llvm/test/CodeGen/ARM/vselect_imax.ll b/llvm/test/CodeGen/ARM/vselect_imax.ll
index e212b37fa1f5..f9d88cc4af98 100644
--- a/llvm/test/CodeGen/ARM/vselect_imax.ll
+++ b/llvm/test/CodeGen/ARM/vselect_imax.ll
@@ -63,11 +63,66 @@ define void @func_blend15(%T0_15* %loadaddr, %T0_15* %loadaddr2,
 ; lowering we also need to adjust the cost.
 %T0_18 = type <4 x i64>
 %T1_18 = type <4 x i1>
-; CHECK-LABEL: func_blend18:
 define void @func_blend18(%T0_18* %loadaddr, %T0_18* %loadaddr2,
                            %T1_18* %blend, %T0_18* %storeaddr) {
-; CHECK: vbsl
-; CHECK: vbsl
+; CHECK-LABEL: func_blend18:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    vld1.64 {d22, d23}, [r0:128]!
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r1:128]!
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r1:128]
+; CHECK-NEXT:    mov r1, #0
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r0:128]
+; CHECK-NEXT:    vmov.32 r12, d16[0]
+; CHECK-NEXT:    vmov.32 r2, d20[0]
+; CHECK-NEXT:    vmov.32 lr, d16[1]
+; CHECK-NEXT:    vmov.32 r0, d20[1]
+; CHECK-NEXT:    vmov.32 r7, d18[0]
+; CHECK-NEXT:    vmov.32 r5, d22[0]
+; CHECK-NEXT:    vmov.32 r4, d22[1]
+; CHECK-NEXT:    vmov.32 r6, d17[0]
+; CHECK-NEXT:    subs r2, r2, r12
+; CHECK-NEXT:    vmov.32 r2, d18[1]
+; CHECK-NEXT:    sbcs r0, r0, lr
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mvnne r0, #0
+; CHECK-NEXT:    subs r7, r5, r7
+; CHECK-NEXT:    vmov.32 r7, d21[0]
+; CHECK-NEXT:    vmov.32 r5, d17[1]
+; CHECK-NEXT:    sbcs r2, r4, r2
+; CHECK-NEXT:    vmov.32 r4, d21[1]
+; CHECK-NEXT:    mov r2, #0
+; CHECK-NEXT:    movlt r2, #1
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    mvnne r2, #0
+; CHECK-NEXT:    subs r7, r7, r6
+; CHECK-NEXT:    vmov.32 r6, d23[0]
+; CHECK-NEXT:    vmov.32 r7, d19[0]
+; CHECK-NEXT:    sbcs r5, r4, r5
+; CHECK-NEXT:    mov r4, #0
+; CHECK-NEXT:    movlt r4, #1
+; CHECK-NEXT:    vmov.32 r5, d19[1]
+; CHECK-NEXT:    subs r7, r6, r7
+; CHECK-NEXT:    vmov.32 r7, d23[1]
+; CHECK-NEXT:    sbcs r7, r7, r5
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    mvnne r1, #0
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    vdup.32 d25, r1
+; CHECK-NEXT:    mvnne r4, #0
+; CHECK-NEXT:    vdup.32 d24, r2
+; CHECK-NEXT:    vdup.32 d27, r4
+; CHECK-NEXT:    vbsl q12, q11, q9
+; CHECK-NEXT:    vdup.32 d26, r0
+; CHECK-NEXT:    vbsl q13, q10, q8
+; CHECK-NEXT:    vst1.64 {d24, d25}, [r3:128]!
+; CHECK-NEXT:    vst1.64 {d26, d27}, [r3:128]
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r11, lr}
+; CHECK-NEXT:    mov pc, lr
   %v0 = load %T0_18, %T0_18* %loadaddr
   %v1 = load %T0_18, %T0_18* %loadaddr2
   %c = icmp slt %T0_18 %v0, %v1
@@ -79,13 +134,126 @@ define void @func_blend18(%T0_18* %loadaddr, %T0_18* %loadaddr2,
 }
 %T0_19 = type <8 x i64>
 %T1_19 = type <8 x i1>
-; CHECK-LABEL: func_blend19:
 define void @func_blend19(%T0_19* %loadaddr, %T0_19* %loadaddr2,
                            %T1_19* %blend, %T0_19* %storeaddr) {
-; CHECK: vbsl
-; CHECK: vbsl
-; CHECK: vbsl
-; CHECK: vbsl
+; CHECK-LABEL: func_blend19:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    mov r12, r1
+; CHECK-NEXT:    mov r2, r0
+; CHECK-NEXT:    vld1.64 {d24, d25}, [r12:128]!
+; CHECK-NEXT:    mov r6, #0
+; CHECK-NEXT:    mov lr, #0
+; CHECK-NEXT:    vld1.64 {d28, d29}, [r2:128]!
+; CHECK-NEXT:    vld1.64 {d16, d17}, [r12:128]
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r2:128]
+; CHECK-NEXT:    add r2, r1, #32
+; CHECK-NEXT:    add r1, r1, #48
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r2:128]
+; CHECK-NEXT:    add r2, r0, #32
+; CHECK-NEXT:    add r0, r0, #48
+; CHECK-NEXT:    vld1.64 {d30, d31}, [r2:128]
+; CHECK-NEXT:    vmov.32 r4, d16[0]
+; CHECK-NEXT:    vmov.32 r2, d18[0]
+; CHECK-NEXT:    vmov.32 r12, d16[1]
+; CHECK-NEXT:    vmov.32 r5, d18[1]
+; CHECK-NEXT:    vld1.64 {d22, d23}, [r1:128]
+; CHECK-NEXT:    vmov.32 r1, d21[0]
+; CHECK-NEXT:    vld1.64 {d26, d27}, [r0:128]
+; CHECK-NEXT:    vmov.32 r0, d21[1]
+; CHECK-NEXT:    subs r2, r2, r4
+; CHECK-NEXT:    vmov.32 r4, d31[1]
+; CHECK-NEXT:    vmov.32 r2, d31[0]
+; CHECK-NEXT:    sbcs r5, r5, r12
+; CHECK-NEXT:    mov r12, #0
+; CHECK-NEXT:    movlt r12, #1
+; CHECK-NEXT:    cmp r12, #0
+; CHECK-NEXT:    mvnne r12, #0
+; CHECK-NEXT:    vmov.32 r5, d25[0]
+; CHECK-NEXT:    subs r1, r2, r1
+; CHECK-NEXT:    mov r2, #0
+; CHECK-NEXT:    sbcs r0, r4, r0
+; CHECK-NEXT:    vmov.32 r1, d29[0]
+; CHECK-NEXT:    vmov.32 r0, d25[1]
+; CHECK-NEXT:    movlt r2, #1
+; CHECK-NEXT:    vmov.32 r4, d29[1]
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    mvnne r2, #0
+; CHECK-NEXT:    vdup.32 d5, r2
+; CHECK-NEXT:    subs r1, r1, r5
+; CHECK-NEXT:    vmov.32 r5, d24[1]
+; CHECK-NEXT:    vmov.32 r1, d24[0]
+; CHECK-NEXT:    sbcs r0, r4, r0
+; CHECK-NEXT:    vmov.32 r4, d28[0]
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    mvnne r0, #0
+; CHECK-NEXT:    vdup.32 d1, r0
+; CHECK-NEXT:    vmov.32 r0, d19[0]
+; CHECK-NEXT:    subs r1, r4, r1
+; CHECK-NEXT:    vmov.32 r4, d17[0]
+; CHECK-NEXT:    vmov.32 r1, d28[1]
+; CHECK-NEXT:    sbcs r1, r1, r5
+; CHECK-NEXT:    vmov.32 r5, d17[1]
+; CHECK-NEXT:    mov r1, #0
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    mvnne r1, #0
+; CHECK-NEXT:    subs r0, r0, r4
+; CHECK-NEXT:    vmov.32 r0, d19[1]
+; CHECK-NEXT:    vmov.32 r4, d22[0]
+; CHECK-NEXT:    vdup.32 d0, r1
+; CHECK-NEXT:    vmov.32 r1, d22[1]
+; CHECK-NEXT:    vbsl q0, q14, q12
+; CHECK-NEXT:    sbcs r0, r0, r5
+; CHECK-NEXT:    vmov.32 r5, d26[0]
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    subs r4, r5, r4
+; CHECK-NEXT:    vmov.32 r5, d20[0]
+; CHECK-NEXT:    vmov.32 r4, d26[1]
+; CHECK-NEXT:    sbcs r1, r4, r1
+; CHECK-NEXT:    vmov.32 r4, d30[0]
+; CHECK-NEXT:    mov r1, #0
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    subs r4, r4, r5
+; CHECK-NEXT:    vmov.32 r5, d30[1]
+; CHECK-NEXT:    vmov.32 r4, d20[1]
+; CHECK-NEXT:    sbcs r4, r5, r4
+; CHECK-NEXT:    vmov.32 r5, d27[0]
+; CHECK-NEXT:    vmov.32 r4, d23[0]
+; CHECK-NEXT:    movlt r6, #1
+; CHECK-NEXT:    subs r4, r5, r4
+; CHECK-NEXT:    vmov.32 r5, d27[1]
+; CHECK-NEXT:    vmov.32 r4, d23[1]
+; CHECK-NEXT:    sbcs r4, r5, r4
+; CHECK-NEXT:    movlt lr, #1
+; CHECK-NEXT:    cmp lr, #0
+; CHECK-NEXT:    mvnne lr, #0
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    mvnne r6, #0
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    mvnne r1, #0
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    vdup.32 d4, r6
+; CHECK-NEXT:    mvnne r0, #0
+; CHECK-NEXT:    vdup.32 d3, lr
+; CHECK-NEXT:    vbsl q2, q15, q10
+; CHECK-NEXT:    vdup.32 d21, r0
+; CHECK-NEXT:    add r0, r3, #32
+; CHECK-NEXT:    vdup.32 d2, r1
+; CHECK-NEXT:    vdup.32 d20, r12
+; CHECK-NEXT:    vbsl q1, q13, q11
+; CHECK-NEXT:    vst1.64 {d4, d5}, [r0:128]
+; CHECK-NEXT:    add r0, r3, #48
+; CHECK-NEXT:    vbsl q10, q9, q8
+; CHECK-NEXT:    vst1.64 {d0, d1}, [r3:128]!
+; CHECK-NEXT:    vst1.64 {d2, d3}, [r0:128]
+; CHECK-NEXT:    vst1.64 {d20, d21}, [r3:128]
+; CHECK-NEXT:    pop {r4, r5, r6, lr}
+; CHECK-NEXT:    mov pc, lr
   %v0 = load %T0_19, %T0_19* %loadaddr
   %v1 = load %T0_19, %T0_19* %loadaddr2
   %c = icmp slt %T0_19 %v0, %v1
@@ -97,17 +265,249 @@ define void @func_blend19(%T0_19* %loadaddr, %T0_19* %loadaddr2,
 }
 %T0_20 = type <16 x i64>
 %T1_20 = type <16 x i1>
-; CHECK-LABEL: func_blend20:
 define void @func_blend20(%T0_20* %loadaddr, %T0_20* %loadaddr2,
                            %T1_20* %blend, %T0_20* %storeaddr) {
-; CHECK: vbsl
-; CHECK: vbsl
-; CHECK: vbsl
-; CHECK: vbsl
-; CHECK: vbsl
-; CHECK: vbsl
-; CHECK: vbsl
-; CHECK: vbsl
+; CHECK-LABEL: func_blend20:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    .pad #4
+; CHECK-NEXT:    sub sp, sp, #4
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, sp, #8
+; CHECK-NEXT:    add r9, r1, #64
+; CHECK-NEXT:    mov r2, #32
+; CHECK-NEXT:    add r8, r0, #64
+; CHECK-NEXT:    vld1.64 {d18, d19}, [r9:128], r2
+; CHECK-NEXT:    mov r10, #0
+; CHECK-NEXT:    vld1.64 {d22, d23}, [r8:128], r2
+; CHECK-NEXT:    vmov.32 r2, d19[0]
+; CHECK-NEXT:    str r3, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    vmov.32 r7, d23[0]
+; CHECK-NEXT:    mov r3, #0
+; CHECK-NEXT:    vmov.32 r5, d19[1]
+; CHECK-NEXT:    vmov.32 r6, d23[1]
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r9:128]!
+; CHECK-NEXT:    vmov.32 r12, d2[0]
+; CHECK-NEXT:    subs r2, r7, r2
+; CHECK-NEXT:    mov r7, r1
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r7:128]!
+; CHECK-NEXT:    sbcs r2, r6, r5
+; CHECK-NEXT:    vmov.32 r5, d18[0]
+; CHECK-NEXT:    mov r2, #0
+; CHECK-NEXT:    vmov.32 r6, d22[0]
+; CHECK-NEXT:    movlt r2, #1
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    vld1.64 {d0, d1}, [r7:128]
+; CHECK-NEXT:    mvnne r2, #0
+; CHECK-NEXT:    vdup.32 d17, r2
+; CHECK-NEXT:    mov r2, r0
+; CHECK-NEXT:    subs r5, r6, r5
+; CHECK-NEXT:    vmov.32 r6, d22[1]
+; CHECK-NEXT:    vmov.32 r5, d18[1]
+; CHECK-NEXT:    sbcs r5, r6, r5
+; CHECK-NEXT:    mov r5, #0
+; CHECK-NEXT:    movlt r5, #1
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    mvnne r5, #0
+; CHECK-NEXT:    vdup.32 d16, r5
+; CHECK-NEXT:    vbsl q8, q11, q9
+; CHECK-NEXT:    vld1.64 {d22, d23}, [r2:128]!
+; CHECK-NEXT:    vmov.32 r5, d21[0]
+; CHECK-NEXT:    vmov.32 r6, d23[0]
+; CHECK-NEXT:    vld1.64 {d30, d31}, [r2:128]
+; CHECK-NEXT:    vmov.32 r2, d1[0]
+; CHECK-NEXT:    vmov.32 r7, d30[0]
+; CHECK-NEXT:    subs r5, r6, r5
+; CHECK-NEXT:    vmov.32 r6, d23[1]
+; CHECK-NEXT:    vmov.32 r5, d21[1]
+; CHECK-NEXT:    sbcs r5, r6, r5
+; CHECK-NEXT:    vmov.32 r6, d22[0]
+; CHECK-NEXT:    mov r5, #0
+; CHECK-NEXT:    movlt r5, #1
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    mvnne r5, #0
+; CHECK-NEXT:    vdup.32 d19, r5
+; CHECK-NEXT:    vmov.32 r5, d20[0]
+; CHECK-NEXT:    subs r5, r6, r5
+; CHECK-NEXT:    vmov.32 r6, d22[1]
+; CHECK-NEXT:    vmov.32 r5, d20[1]
+; CHECK-NEXT:    sbcs r5, r6, r5
+; CHECK-NEXT:    mov r5, #0
+; CHECK-NEXT:    movlt r5, #1
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    mvnne r5, #0
+; CHECK-NEXT:    vdup.32 d18, r5
+; CHECK-NEXT:    add r5, r0, #32
+; CHECK-NEXT:    vbsl q9, q11, q10
+; CHECK-NEXT:    vld1.64 {d22, d23}, [r5:128]
+; CHECK-NEXT:    add r5, r1, #32
+; CHECK-NEXT:    vld1.64 {d24, d25}, [r5:128]
+; CHECK-NEXT:    vmov.32 r5, d24[0]
+; CHECK-NEXT:    vmov.32 r6, d22[0]
+; CHECK-NEXT:    vmov.32 r4, d23[0]
+; CHECK-NEXT:    vld1.64 {d20, d21}, [r8:128]!
+; CHECK-NEXT:    vmov.32 r11, d21[0]
+; CHECK-NEXT:    subs r5, r6, r5
+; CHECK-NEXT:    vmov.32 r6, d22[1]
+; CHECK-NEXT:    vmov.32 r5, d24[1]
+; CHECK-NEXT:    sbcs r5, r6, r5
+; CHECK-NEXT:    vmov.32 r6, d25[0]
+; CHECK-NEXT:    movlt r10, #1
+; CHECK-NEXT:    cmp r10, #0
+; CHECK-NEXT:    mvnne r10, #0
+; CHECK-NEXT:    subs r4, r4, r6
+; CHECK-NEXT:    vmov.32 r6, d23[1]
+; CHECK-NEXT:    vmov.32 r4, d25[1]
+; CHECK-NEXT:    sbcs r4, r6, r4
+; CHECK-NEXT:    mov r6, #0
+; CHECK-NEXT:    vmov.32 r4, d31[0]
+; CHECK-NEXT:    movlt r6, #1
+; CHECK-NEXT:    cmp r6, #0
+; CHECK-NEXT:    mvnne r6, #0
+; CHECK-NEXT:    subs r2, r4, r2
+; CHECK-NEXT:    vmov.32 r4, d31[1]
+; CHECK-NEXT:    vmov.32 r2, d1[1]
+; CHECK-NEXT:    sbcs r2, r4, r2
+; CHECK-NEXT:    mov r2, #0
+; CHECK-NEXT:    movlt r2, #1
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    mvnne r2, #0
+; CHECK-NEXT:    vdup.32 d27, r2
+; CHECK-NEXT:    add r2, r0, #48
+; CHECK-NEXT:    vld1.64 {d4, d5}, [r2:128]
+; CHECK-NEXT:    add r2, r1, #48
+; CHECK-NEXT:    add r0, r0, #80
+; CHECK-NEXT:    add r1, r1, #80
+; CHECK-NEXT:    vld1.64 {d6, d7}, [r2:128]
+; CHECK-NEXT:    vmov.32 r2, d7[0]
+; CHECK-NEXT:    vmov.32 r4, d5[0]
+; CHECK-NEXT:    vmov.32 r5, d4[0]
+; CHECK-NEXT:    vld1.64 {d8, d9}, [r0:128]
+; CHECK-NEXT:    subs r2, r4, r2
+; CHECK-NEXT:    vmov.32 r4, d5[1]
+; CHECK-NEXT:    vmov.32 r2, d7[1]
+; CHECK-NEXT:    sbcs r2, r4, r2
+; CHECK-NEXT:    vmov.32 r4, d0[0]
+; CHECK-NEXT:    mov r2, #0
+; CHECK-NEXT:    movlt r2, #1
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    mvnne r2, #0
+; CHECK-NEXT:    vdup.32 d29, r2
+; CHECK-NEXT:    vmov.32 r2, d6[1]
+; CHECK-NEXT:    subs r4, r7, r4
+; CHECK-NEXT:    vmov.32 r7, d30[1]
+; CHECK-NEXT:    vmov.32 r4, d0[1]
+; CHECK-NEXT:    sbcs r4, r7, r4
+; CHECK-NEXT:    vmov.32 r7, d4[1]
+; CHECK-NEXT:    mov r4, #0
+; CHECK-NEXT:    movlt r4, #1
+; CHECK-NEXT:    cmp r4, #0
+; CHECK-NEXT:    mvnne r4, #0
+; CHECK-NEXT:    vdup.32 d26, r4
+; CHECK-NEXT:    vmov.32 r4, d6[0]
+; CHECK-NEXT:    vbsl q13, q15, q0
+; CHECK-NEXT:    vld1.64 {d0, d1}, [r9:128]
+; CHECK-NEXT:    vdup.32 d31, r6
+; CHECK-NEXT:    vmov.32 r9, d3[0]
+; CHECK-NEXT:    vdup.32 d30, r10
+; CHECK-NEXT:    vmov.32 r10, d21[1]
+; CHECK-NEXT:    vbsl q15, q11, q12
+; CHECK-NEXT:    subs r4, r5, r4
+; CHECK-NEXT:    sbcs r2, r7, r2
+; CHECK-NEXT:    vmov.32 r4, d0[1]
+; CHECK-NEXT:    mov r2, #0
+; CHECK-NEXT:    movlt r2, #1
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    mvnne r2, #0
+; CHECK-NEXT:    vdup.32 d28, r2
+; CHECK-NEXT:    vbsl q14, q2, q3
+; CHECK-NEXT:    vld1.64 {d4, d5}, [r8:128]
+; CHECK-NEXT:    vmov.32 r2, d0[0]
+; CHECK-NEXT:    vmov.32 r6, d4[0]
+; CHECK-NEXT:    vmov.32 r5, d4[1]
+; CHECK-NEXT:    vld1.64 {d6, d7}, [r1:128]
+; CHECK-NEXT:    vmov.32 r7, d7[0]
+; CHECK-NEXT:    vmov.32 r1, d7[1]
+; CHECK-NEXT:    vmov.32 lr, d5[0]
+; CHECK-NEXT:    vmov.32 r8, d3[1]
+; CHECK-NEXT:    subs r0, r6, r2
+; CHECK-NEXT:    vmov.32 r2, d9[1]
+; CHECK-NEXT:    sbcs r0, r5, r4
+; CHECK-NEXT:    vmov.32 r4, d9[0]
+; CHECK-NEXT:    movlt r3, #1
+; CHECK-NEXT:    cmp r3, #0
+; CHECK-NEXT:    mvnne r3, #0
+; CHECK-NEXT:    vmov.32 r6, d8[1]
+; CHECK-NEXT:    mov r5, #0
+; CHECK-NEXT:    vmov.32 r0, d5[1]
+; CHECK-NEXT:    subs r4, r4, r7
+; CHECK-NEXT:    vmov.32 r7, d2[1]
+; CHECK-NEXT:    sbcs r1, r2, r1
+; CHECK-NEXT:    vmov.32 r2, d8[0]
+; CHECK-NEXT:    vmov.32 r1, d6[0]
+; CHECK-NEXT:    movlt r5, #1
+; CHECK-NEXT:    vmov.32 r4, d6[1]
+; CHECK-NEXT:    cmp r5, #0
+; CHECK-NEXT:    mvnne r5, #0
+; CHECK-NEXT:    vdup.32 d11, r5
+; CHECK-NEXT:    vmov.32 r5, d20[0]
+; CHECK-NEXT:    subs r1, r2, r1
+; CHECK-NEXT:    vmov.32 r2, d1[0]
+; CHECK-NEXT:    sbcs r1, r6, r4
+; CHECK-NEXT:    vmov.32 r6, d1[1]
+; CHECK-NEXT:    vmov.32 r4, d20[1]
+; CHECK-NEXT:    mov r1, #0
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    mvnne r1, #0
+; CHECK-NEXT:    vdup.32 d10, r1
+; CHECK-NEXT:    mov r1, #0
+; CHECK-NEXT:    vbsl q5, q4, q3
+; CHECK-NEXT:    subs r2, lr, r2
+; CHECK-NEXT:    sbcs r0, r0, r6
+; CHECK-NEXT:    mov r0, #0
+; CHECK-NEXT:    movlt r0, #1
+; CHECK-NEXT:    subs r2, r5, r12
+; CHECK-NEXT:    sbcs r2, r4, r7
+; CHECK-NEXT:    mov r2, #0
+; CHECK-NEXT:    movlt r2, #1
+; CHECK-NEXT:    subs r7, r11, r9
+; CHECK-NEXT:    sbcs r7, r10, r8
+; CHECK-NEXT:    movlt r1, #1
+; CHECK-NEXT:    cmp r1, #0
+; CHECK-NEXT:    mvnne r1, #0
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    vdup.32 d23, r1
+; CHECK-NEXT:    mvnne r2, #0
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    mvnne r0, #0
+; CHECK-NEXT:    vdup.32 d22, r2
+; CHECK-NEXT:    vdup.32 d25, r0
+; CHECK-NEXT:    add r0, r1, #80
+; CHECK-NEXT:    vbsl q11, q10, q1
+; CHECK-NEXT:    vdup.32 d24, r3
+; CHECK-NEXT:    vst1.64 {d10, d11}, [r0:128]
+; CHECK-NEXT:    add r0, r1, #32
+; CHECK-NEXT:    vbsl q12, q2, q0
+; CHECK-NEXT:    vst1.64 {d30, d31}, [r0:128]
+; CHECK-NEXT:    add r0, r1, #48
+; CHECK-NEXT:    vst1.64 {d28, d29}, [r0:128]
+; CHECK-NEXT:    add r0, r1, #64
+; CHECK-NEXT:    vst1.64 {d18, d19}, [r1:128]!
+; CHECK-NEXT:    vst1.64 {d26, d27}, [r1:128]
+; CHECK-NEXT:    mov r1, #32
+; CHECK-NEXT:    vst1.64 {d16, d17}, [r0:128], r1
+; CHECK-NEXT:    vst1.64 {d22, d23}, [r0:128]!
+; CHECK-NEXT:    vst1.64 {d24, d25}, [r0:128]
+; CHECK-NEXT:    add sp, sp, #8
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    add sp, sp, #4
+; CHECK-NEXT:    pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT:    mov pc, lr
   %v0 = load %T0_20, %T0_20* %loadaddr
   %v1 = load %T0_20, %T0_20* %loadaddr2
   %c = icmp slt %T0_20 %v0, %v1

diff  --git a/llvm/test/CodeGen/Thumb2/float-intrinsics-double.ll b/llvm/test/CodeGen/Thumb2/float-intrinsics-double.ll
index acafde53ac83..611a9c1500d8 100644
--- a/llvm/test/CodeGen/Thumb2/float-intrinsics-double.ll
+++ b/llvm/test/CodeGen/Thumb2/float-intrinsics-double.ll
@@ -127,9 +127,11 @@ define double @copysign_d(double %a, double %b) {
 ; SOFT: bfi r1, [[REG]], #31, #1
 ; VFP: lsrs [[REG:r[0-9]+]], r3, #31
 ; VFP: bfi r1, [[REG]], #31, #1
-; NEON: vmov.i32 [[REG:d[0-9]+]], #0x80000000
-; NEON: vshl.i64 [[REG]], [[REG]], #32
-; NEON: vbsl [[REG]], d
+; NEON:         vmov.i32 d16, #0x80000000
+; NEON-NEXT:    vshl.i64 d16, d16, #32
+; NEON-NEXT:    vbsl d16, d1, d0
+; NEON-NEXT:    vorr d0, d16, d16
+; NEON-NEXT:    bx lr
   %1 = call double @llvm.copysign.f64(double %a, double %b)
   ret double %1
 }

diff  --git a/llvm/test/CodeGen/Thumb2/float-intrinsics-float.ll b/llvm/test/CodeGen/Thumb2/float-intrinsics-float.ll
index 1263ae15b466..5e8276f07115 100644
--- a/llvm/test/CodeGen/Thumb2/float-intrinsics-float.ll
+++ b/llvm/test/CodeGen/Thumb2/float-intrinsics-float.ll
@@ -3,8 +3,8 @@
 ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m33                   | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP -check-prefix=NO-VMLA
 ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7                    | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=VFP  -check-prefix=FP-ARMv8  -check-prefix=VMLA
 ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 -mattr=-fp64 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP -check-prefix=FP-ARMv8 -check-prefix=VMLA
-; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a7                    | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=VFP4 -check-prefix=NO-VMLA
-; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a57                   | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=FP-ARMv8 -check-prefix=VMLA
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a7                    | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON-A7 -check-prefix=VFP4 -check-prefix=NO-VMLA
+; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a57                   | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON-A57 -check-prefix=FP-ARMv8 -check-prefix=VMLA
 
 declare float     @llvm.sqrt.f32(float %Val)
 define float @sqrt_f(float %a) {
@@ -123,8 +123,20 @@ define float @copysign_f(float %a, float %b) {
 ; SP: bfi r{{[0-9]+}}, [[REG]], #31, #1
 ; VFP: lsrs [[REG:r[0-9]+]], r{{[0-9]+}}, #31
 ; VFP: bfi r{{[0-9]+}}, [[REG]], #31, #1
-; NEON: vmov.i32 [[REG:d[0-9]+]], #0x80000000
-; NEON: vbsl [[REG]], d
+; NEON-A7:       @ %bb.0:
+; NEON-A7-NEXT:    vmov.f32 s4, s1
+; NEON-A7-NEXT:    @ kill: def $s0 killed $s0 def $d0
+; NEON-A7-NEXT:    vmov.i32 d1, #0x80000000
+; NEON-A7-NEXT:    vbsl d1, d2, d0
+; NEON-A7-NEXT:    vmov.f32 s0, s2
+; NEON-A7-NEXT:    bx lr
+; NEON-A57:       @ %bb.0:
+; NEON-A57-NEXT:    vmov.f32 s4, s1
+; NEON-A57-NEXT:    vmov.i32 d1, #0x80000000
+; NEON-A57-NEXT:    @ kill: def $s0 killed $s0 def $d0
+; NEON-A57-NEXT:    vbsl d1, d2, d0
+; NEON-A57-NEXT:    vmov.f32 s0, s2
+; NEON-A57-NEXT:    bx lr
   %1 = call float @llvm.copysign.f32(float %a, float %b)
   ret float %1
 }


        


More information about the llvm-commits mailing list