[llvm] 7d4ebc9 - [ARM] FP16 conversion tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 4 05:14:14 PDT 2020


Author: David Green
Date: 2020-06-04T13:13:56+01:00
New Revision: 7d4ebc98afac1e8f749644589eae6b26ddd68811

URL: https://github.com/llvm/llvm-project/commit/7d4ebc98afac1e8f749644589eae6b26ddd68811
DIFF: https://github.com/llvm/llvm-project/commit/7d4ebc98afac1e8f749644589eae6b26ddd68811.diff

LOG: [ARM] FP16 conversion tests. NFC

Added: 
    llvm/test/CodeGen/Thumb2/mve-fp16convertloops.ll
    llvm/test/CodeGen/Thumb2/mve-vcvt16.ll

Modified: 
    llvm/test/CodeGen/Thumb2/mve-shuffleext.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/mve-fp16convertloops.ll b/llvm/test/CodeGen/Thumb2/mve-fp16convertloops.ll
new file mode 100644
index 000000000000..4e102427cad6
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-fp16convertloops.ll
@@ -0,0 +1,936 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
+
+define void @to_4(float* nocapture readonly %x, half* noalias nocapture %y) {
+; CHECK-LABEL: to_4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    adr r2, .LCPI0_0
+; CHECK-NEXT:    mov.w lr, #256
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB0_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r0], #16
+; CHECK-NEXT:    vmul.f32 q1, q1, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s4
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s5
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    vmov.16 q2[0], r2
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s6
+; CHECK-NEXT:    vmov.16 q2[1], r3
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q2[2], r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov.16 q2[3], r2
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmov r3, s9
+; CHECK-NEXT:    str r2, [r1]
+; CHECK-NEXT:    str r3, [r1, #4]
+; CHECK-NEXT:    adds r1, #8
+; CHECK-NEXT:    le lr, .LBB0_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI0_0:
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+entry:
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %1 = bitcast float* %0 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %1, align 4
+  %2 = fmul <4 x float> %wide.load, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %3 = fptrunc <4 x float> %2 to <4 x half>
+  %4 = getelementptr inbounds half, half* %y, i32 %index
+  %5 = bitcast half* %4 to <4 x half>*
+  store <4 x half> %3, <4 x half>* %5, align 2
+  %index.next = add i32 %index, 4
+  %6 = icmp eq i32 %index.next, 1024
+  br i1 %6, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @to_8(float* nocapture readonly %x, half* noalias nocapture %y) {
+; CHECK-LABEL: to_8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    adr r2, .LCPI1_0
+; CHECK-NEXT:    mov.w lr, #128
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB1_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r0], #32
+; CHECK-NEXT:    vmul.f32 q2, q1, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s8
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s9
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q1[0], r2
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s10
+; CHECK-NEXT:    vmov.16 q1[1], r3
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q1[2], r2
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #-16]
+; CHECK-NEXT:    vmov.16 q1[3], r2
+; CHECK-NEXT:    vmul.f32 q2, q2, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s8
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s9
+; CHECK-NEXT:    vmov.16 q1[4], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s10
+; CHECK-NEXT:    vmov.16 q1[5], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q1[6], r2
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmov.16 q1[7], r2
+; CHECK-NEXT:    vstrb.8 q1, [r1], #16
+; CHECK-NEXT:    le lr, .LBB1_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI1_0:
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+entry:
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %1 = bitcast float* %0 to <8 x float>*
+  %wide.load = load <8 x float>, <8 x float>* %1, align 4
+  %2 = fmul <8 x float> %wide.load, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %3 = fptrunc <8 x float> %2 to <8 x half>
+  %4 = getelementptr inbounds half, half* %y, i32 %index
+  %5 = bitcast half* %4 to <8 x half>*
+  store <8 x half> %3, <8 x half>* %5, align 2
+  %index.next = add i32 %index, 8
+  %6 = icmp eq i32 %index.next, 1024
+  br i1 %6, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @to_16(float* nocapture readonly %x, half* noalias nocapture %y) {
+; CHECK-LABEL: to_16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    adr r2, .LCPI2_0
+; CHECK-NEXT:    mov.w lr, #64
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB2_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
+; CHECK-NEXT:    vmul.f32 q2, q1, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s8
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s9
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q1[0], r2
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s10
+; CHECK-NEXT:    vmov.16 q1[1], r3
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q1[2], r2
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #48]
+; CHECK-NEXT:    vmov.16 q1[3], r2
+; CHECK-NEXT:    vmul.f32 q2, q2, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s8
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s9
+; CHECK-NEXT:    vmov.16 q1[4], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s10
+; CHECK-NEXT:    vmov.16 q1[5], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q1[6], r2
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmov.16 q1[7], r2
+; CHECK-NEXT:    vstrh.16 q1, [r1, #16]
+; CHECK-NEXT:    vldrw.u32 q1, [r0], #64
+; CHECK-NEXT:    vmul.f32 q2, q1, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s9
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s8
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s10
+; CHECK-NEXT:    vmov.16 q1[0], r3
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q1[1], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vmov.16 q1[2], r2
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #-48]
+; CHECK-NEXT:    vmov.16 q1[3], r2
+; CHECK-NEXT:    vmul.f32 q2, q2, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s8
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s9
+; CHECK-NEXT:    vmov.16 q1[4], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s10
+; CHECK-NEXT:    vmov.16 q1[5], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q1[6], r2
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmov.16 q1[7], r2
+; CHECK-NEXT:    vstrh.16 q1, [r1], #32
+; CHECK-NEXT:    le lr, .LBB2_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI2_0:
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+entry:
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %1 = bitcast float* %0 to <16 x float>*
+  %wide.load = load <16 x float>, <16 x float>* %1, align 4
+  %2 = fmul <16 x float> %wide.load, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %3 = fptrunc <16 x float> %2 to <16 x half>
+  %4 = getelementptr inbounds half, half* %y, i32 %index
+  %5 = bitcast half* %4 to <16 x half>*
+  store <16 x half> %3, <16 x half>* %5, align 2
+  %index.next = add i32 %index, 16
+  %6 = icmp eq i32 %index.next, 1024
+  br i1 %6, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @from_4(half* nocapture readonly %x, float* noalias nocapture %y) {
+; CHECK-LABEL: from_4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    adr r2, .LCPI3_0
+; CHECK-NEXT:    mov.w lr, #256
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB3_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r2, [r0]
+; CHECK-NEXT:    ldr r3, [r0, #4]
+; CHECK-NEXT:    adds r0, #8
+; CHECK-NEXT:    vmov.32 q1[0], r2
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    vmovx.f16 s10, s5
+; CHECK-NEXT:    vmovx.f16 s8, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s15, s10
+; CHECK-NEXT:    vcvtb.f32.f16 s14, s5
+; CHECK-NEXT:    vcvtb.f32.f16 s13, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s4
+; CHECK-NEXT:    vmul.f32 q1, q3, q0
+; CHECK-NEXT:    vstrb.8 q1, [r1], #16
+; CHECK-NEXT:    le lr, .LBB3_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI3_0:
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+entry:
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds half, half* %x, i32 %index
+  %1 = bitcast half* %0 to <4 x half>*
+  %wide.load = load <4 x half>, <4 x half>* %1, align 2
+  %2 = fpext <4 x half> %wide.load to <4 x float>
+  %3 = fmul <4 x float> %2, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %4 = getelementptr inbounds float, float* %y, i32 %index
+  %5 = bitcast float* %4 to <4 x float>*
+  store <4 x float> %3, <4 x float>* %5, align 4
+  %index.next = add i32 %index, 4
+  %6 = icmp eq i32 %index.next, 1024
+  br i1 %6, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @from_8(half* nocapture readonly %x, float* noalias nocapture %y) {
+; CHECK-LABEL: from_8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    adr r2, .LCPI4_0
+; CHECK-NEXT:    mov.w lr, #128
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB4_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrh.u16 q1, [r0], #16
+; CHECK-NEXT:    vmovx.f16 s8, s5
+; CHECK-NEXT:    vmovx.f16 s13, s7
+; CHECK-NEXT:    vcvtb.f32.f16 s11, s8
+; CHECK-NEXT:    vmovx.f16 s14, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s10, s5
+; CHECK-NEXT:    vcvtb.f32.f16 s19, s13
+; CHECK-NEXT:    vcvtb.f32.f16 s18, s7
+; CHECK-NEXT:    vmovx.f16 s12, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s17, s14
+; CHECK-NEXT:    vcvtb.f32.f16 s16, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s9, s12
+; CHECK-NEXT:    vcvtb.f32.f16 s8, s4
+; CHECK-NEXT:    vmul.f32 q1, q4, q0
+; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
+; CHECK-NEXT:    vmul.f32 q1, q2, q0
+; CHECK-NEXT:    vstrw.32 q1, [r1], #32
+; CHECK-NEXT:    le lr, .LBB4_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI4_0:
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+entry:
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds half, half* %x, i32 %index
+  %1 = bitcast half* %0 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %1, align 2
+  %2 = fpext <8 x half> %wide.load to <8 x float>
+  %3 = fmul <8 x float> %2, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %4 = getelementptr inbounds float, float* %y, i32 %index
+  %5 = bitcast float* %4 to <8 x float>*
+  store <8 x float> %3, <8 x float>* %5, align 4
+  %index.next = add i32 %index, 8
+  %6 = icmp eq i32 %index.next, 1024
+  br i1 %6, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @from_16(half* nocapture readonly %x, float* noalias nocapture %y) {
+; CHECK-LABEL: from_16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    adr r2, .LCPI5_0
+; CHECK-NEXT:    mov.w lr, #64
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB5_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrh.u16 q1, [r0], #32
+; CHECK-NEXT:    vmovx.f16 s12, s5
+; CHECK-NEXT:    vldrh.u16 q2, [r0, #-16]
+; CHECK-NEXT:    vcvtb.f32.f16 s15, s12
+; CHECK-NEXT:    vmovx.f16 s18, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s14, s5
+; CHECK-NEXT:    vmovx.f16 s16, s7
+; CHECK-NEXT:    vcvtb.f32.f16 s13, s18
+; CHECK-NEXT:    vmovx.f16 s20, s9
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s19, s16
+; CHECK-NEXT:    vmovx.f16 s22, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s18, s7
+; CHECK-NEXT:    vcvtb.f32.f16 s17, s22
+; CHECK-NEXT:    vcvtb.f32.f16 s23, s20
+; CHECK-NEXT:    vmovx.f16 s28, s11
+; CHECK-NEXT:    vcvtb.f32.f16 s22, s9
+; CHECK-NEXT:    vcvtb.f32.f16 s31, s28
+; CHECK-NEXT:    vmovx.f16 s26, s10
+; CHECK-NEXT:    vcvtb.f32.f16 s30, s11
+; CHECK-NEXT:    vmovx.f16 s24, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s29, s26
+; CHECK-NEXT:    vcvtb.f32.f16 s28, s10
+; CHECK-NEXT:    vcvtb.f32.f16 s21, s24
+; CHECK-NEXT:    vcvtb.f32.f16 s20, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s16, s6
+; CHECK-NEXT:    vmul.f32 q1, q7, q0
+; CHECK-NEXT:    vstrw.32 q1, [r1, #48]
+; CHECK-NEXT:    vmul.f32 q1, q5, q0
+; CHECK-NEXT:    vstrw.32 q1, [r1, #32]
+; CHECK-NEXT:    vmul.f32 q1, q4, q0
+; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
+; CHECK-NEXT:    vmul.f32 q1, q3, q0
+; CHECK-NEXT:    vstrw.32 q1, [r1], #64
+; CHECK-NEXT:    le lr, .LBB5_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI5_0:
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+entry:
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds half, half* %x, i32 %index
+  %1 = bitcast half* %0 to <16 x half>*
+  %wide.load = load <16 x half>, <16 x half>* %1, align 2
+  %2 = fpext <16 x half> %wide.load to <16 x float>
+  %3 = fmul <16 x float> %2, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %4 = getelementptr inbounds float, float* %y, i32 %index
+  %5 = bitcast float* %4 to <16 x float>*
+  store <16 x float> %3, <16 x float>* %5, align 4
+  %index.next = add i32 %index, 16
+  %6 = icmp eq i32 %index.next, 1024
+  br i1 %6, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @both_4(half* nocapture readonly %x, half* noalias nocapture %y) {
+; CHECK-LABEL: both_4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    adr r2, .LCPI6_0
+; CHECK-NEXT:    mov.w lr, #256
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB6_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r2, [r0]
+; CHECK-NEXT:    ldr r3, [r0, #4]
+; CHECK-NEXT:    adds r0, #8
+; CHECK-NEXT:    vmov.32 q1[0], r2
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    vmovx.f16 s10, s5
+; CHECK-NEXT:    vmovx.f16 s8, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s15, s10
+; CHECK-NEXT:    vcvtb.f32.f16 s14, s5
+; CHECK-NEXT:    vcvtb.f32.f16 s13, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s4
+; CHECK-NEXT:    vmul.f32 q1, q3, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s4
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s5
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    vmov.16 q2[0], r2
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s6
+; CHECK-NEXT:    vmov.16 q2[1], r3
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q2[2], r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov.16 q2[3], r2
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmov r3, s9
+; CHECK-NEXT:    str r2, [r1]
+; CHECK-NEXT:    str r3, [r1, #4]
+; CHECK-NEXT:    adds r1, #8
+; CHECK-NEXT:    le lr, .LBB6_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI6_0:
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+entry:
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds half, half* %x, i32 %index
+  %1 = bitcast half* %0 to <4 x half>*
+  %wide.load = load <4 x half>, <4 x half>* %1, align 2
+  %2 = fpext <4 x half> %wide.load to <4 x float>
+  %3 = fmul <4 x float> %2, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %4 = fptrunc <4 x float> %3 to <4 x half>
+  %5 = getelementptr inbounds half, half* %y, i32 %index
+  %6 = bitcast half* %5 to <4 x half>*
+  store <4 x half> %4, <4 x half>* %6, align 2
+  %index.next = add i32 %index, 4
+  %7 = icmp eq i32 %index.next, 1024
+  br i1 %7, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @both_8(half* nocapture readonly %x, half* noalias nocapture %y) {
+; CHECK-LABEL: both_8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    adr r2, .LCPI7_0
+; CHECK-NEXT:    mov.w lr, #128
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB7_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrh.u16 q2, [r0], #16
+; CHECK-NEXT:    vmovx.f16 s6, s9
+; CHECK-NEXT:    vmovx.f16 s4, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s15, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s14, s9
+; CHECK-NEXT:    vcvtb.f32.f16 s13, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s8
+; CHECK-NEXT:    vmul.f32 q3, q3, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s12
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s13
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q1[0], r2
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s14
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s15
+; CHECK-NEXT:    vmovx.f16 s14, s11
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vcvtb.f32.f16 s19, s14
+; CHECK-NEXT:    vmov.16 q1[1], r3
+; CHECK-NEXT:    vmov.16 q1[2], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vmovx.f16 s12, s10
+; CHECK-NEXT:    vcvtb.f32.f16 s18, s11
+; CHECK-NEXT:    vcvtb.f32.f16 s17, s12
+; CHECK-NEXT:    vmov.16 q1[3], r2
+; CHECK-NEXT:    vcvtb.f32.f16 s16, s10
+; CHECK-NEXT:    vmul.f32 q2, q4, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s8
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s9
+; CHECK-NEXT:    vmov.16 q1[4], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s10
+; CHECK-NEXT:    vmov.16 q1[5], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q1[6], r2
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmov.16 q1[7], r2
+; CHECK-NEXT:    vstrb.8 q1, [r1], #16
+; CHECK-NEXT:    le lr, .LBB7_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI7_0:
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+entry:
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds half, half* %x, i32 %index
+  %1 = bitcast half* %0 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %1, align 2
+  %2 = fpext <8 x half> %wide.load to <8 x float>
+  %3 = fmul <8 x float> %2, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %4 = fptrunc <8 x float> %3 to <8 x half>
+  %5 = getelementptr inbounds half, half* %y, i32 %index
+  %6 = bitcast half* %5 to <8 x half>*
+  store <8 x half> %4, <8 x half>* %6, align 2
+  %index.next = add i32 %index, 8
+  %7 = icmp eq i32 %index.next, 1024
+  br i1 %7, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @both_16(half* nocapture readonly %x, half* noalias nocapture %y) {
+; CHECK-LABEL: both_16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    adr r2, .LCPI8_0
+; CHECK-NEXT:    mov.w lr, #64
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB8_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrh.u16 q2, [r0, #16]
+; CHECK-NEXT:    vmovx.f16 s6, s9
+; CHECK-NEXT:    vmovx.f16 s4, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s15, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s14, s9
+; CHECK-NEXT:    vcvtb.f32.f16 s13, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s8
+; CHECK-NEXT:    vmul.f32 q3, q3, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s12
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s13
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q1[0], r2
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s14
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s15
+; CHECK-NEXT:    vmovx.f16 s14, s11
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vcvtb.f32.f16 s19, s14
+; CHECK-NEXT:    vmov.16 q1[1], r3
+; CHECK-NEXT:    vmov.16 q1[2], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vmovx.f16 s12, s10
+; CHECK-NEXT:    vcvtb.f32.f16 s18, s11
+; CHECK-NEXT:    vcvtb.f32.f16 s17, s12
+; CHECK-NEXT:    vmov.16 q1[3], r2
+; CHECK-NEXT:    vcvtb.f32.f16 s16, s10
+; CHECK-NEXT:    vmul.f32 q2, q4, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s8
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s9
+; CHECK-NEXT:    vmov.16 q1[4], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s10
+; CHECK-NEXT:    vmov.16 q1[5], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q1[6], r2
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vldrh.u16 q2, [r0], #32
+; CHECK-NEXT:    vmov.16 q1[7], r2
+; CHECK-NEXT:    vstrh.16 q1, [r1, #16]
+; CHECK-NEXT:    vmovx.f16 s6, s9
+; CHECK-NEXT:    vmovx.f16 s4, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s15, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s14, s9
+; CHECK-NEXT:    vcvtb.f32.f16 s13, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s8
+; CHECK-NEXT:    vmul.f32 q3, q3, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s12
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s13
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q1[0], r2
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s14
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s15
+; CHECK-NEXT:    vmovx.f16 s14, s11
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vcvtb.f32.f16 s19, s14
+; CHECK-NEXT:    vmov.16 q1[1], r3
+; CHECK-NEXT:    vmov.16 q1[2], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vmovx.f16 s12, s10
+; CHECK-NEXT:    vcvtb.f32.f16 s18, s11
+; CHECK-NEXT:    vcvtb.f32.f16 s17, s12
+; CHECK-NEXT:    vmov.16 q1[3], r2
+; CHECK-NEXT:    vcvtb.f32.f16 s16, s10
+; CHECK-NEXT:    vmul.f32 q2, q4, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s8
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s9
+; CHECK-NEXT:    vmov.16 q1[4], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s10
+; CHECK-NEXT:    vmov.16 q1[5], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q1[6], r2
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmov.16 q1[7], r2
+; CHECK-NEXT:    vstrh.16 q1, [r1], #32
+; CHECK-NEXT:    le lr, .LBB8_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI8_0:
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+entry:
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds half, half* %x, i32 %index
+  %1 = bitcast half* %0 to <16 x half>*
+  %wide.load = load <16 x half>, <16 x half>* %1, align 2
+  %2 = fpext <16 x half> %wide.load to <16 x float>
+  %3 = fmul <16 x float> %2, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %4 = fptrunc <16 x float> %3 to <16 x half>
+  %5 = getelementptr inbounds half, half* %y, i32 %index
+  %6 = bitcast half* %5 to <16 x half>*
+  store <16 x half> %4, <16 x half>* %6, align 2
+  %index.next = add i32 %index, 16
+  %7 = icmp eq i32 %index.next, 1024
+  br i1 %7, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @both_8_I(half* nocapture readonly %x, half* noalias nocapture %y) {
+; CHECK-LABEL: both_8_I:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    adr r2, .LCPI9_0
+; CHECK-NEXT:    mov.w lr, #128
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB9_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrh.u16 q2, [r0], #16
+; CHECK-NEXT:    vcvtb.f32.f16 s7, s11
+; CHECK-NEXT:    vmovx.f16 s13, s11
+; CHECK-NEXT:    vcvtb.f32.f16 s6, s10
+; CHECK-NEXT:    vmovx.f16 s14, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s5, s9
+; CHECK-NEXT:    vcvtb.f32.f16 s4, s8
+; CHECK-NEXT:    vmovx.f16 s8, s10
+; CHECK-NEXT:    vmul.f32 q1, q1, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s19, s13
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vmovx.f16 s12, s9
+; CHECK-NEXT:    vcvtb.f32.f16 s18, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s17, s12
+; CHECK-NEXT:    vcvtb.f32.f16 s16, s14
+; CHECK-NEXT:    vmul.f32 q2, q4, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s5
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov.16 q3[0], r2
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vmov.16 q3[1], r3
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s9
+; CHECK-NEXT:    vmov.16 q3[2], r2
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s6
+; CHECK-NEXT:    vmov.16 q3[3], r2
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s10
+; CHECK-NEXT:    vmov.16 q3[4], r2
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q3[5], r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s11
+; CHECK-NEXT:    vmov.16 q3[6], r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov.16 q3[7], r2
+; CHECK-NEXT:    vstrb.8 q3, [r1], #16
+; CHECK-NEXT:    le lr, .LBB9_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI9_0:
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+entry:
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds half, half* %x, i32 %index
+  %1 = bitcast half* %0 to <8 x half>*
+  %wide.load = load <8 x half>, <8 x half>* %1, align 2
+  %2 = shufflevector <8 x half> %wide.load, <8 x half> %wide.load, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %3 = shufflevector <8 x half> %wide.load, <8 x half> %wide.load, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %4 = fpext <4 x half> %2 to <4 x float>
+  %5 = fpext <4 x half> %3 to <4 x float>
+  %6 = fmul <4 x float> %4, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %7 = fmul <4 x float> %5, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %8 = shufflevector <4 x float> %6, <4 x float> %7, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  %9 = fptrunc <8 x float> %8 to <8 x half>
+  %10 = getelementptr inbounds half, half* %y, i32 %index
+  %11 = bitcast half* %10 to <8 x half>*
+  store <8 x half> %9, <8 x half>* %11, align 2
+  %index.next = add i32 %index, 8
+  %12 = icmp eq i32 %index.next, 1024
+  br i1 %12, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @both_16_I(half* nocapture readonly %x, half* noalias nocapture %y) {
+; CHECK-LABEL: both_16_I:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    adr r2, .LCPI10_0
+; CHECK-NEXT:    mov.w lr, #128
+; CHECK-NEXT:    vldrw.u32 q0, [r2]
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB10_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrh.u16 q2, [r0]
+; CHECK-NEXT:    vcvtb.f32.f16 s7, s11
+; CHECK-NEXT:    vmovx.f16 s13, s11
+; CHECK-NEXT:    vcvtb.f32.f16 s6, s10
+; CHECK-NEXT:    vmovx.f16 s14, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s5, s9
+; CHECK-NEXT:    vcvtb.f32.f16 s4, s8
+; CHECK-NEXT:    vmovx.f16 s8, s10
+; CHECK-NEXT:    vmul.f32 q1, q1, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s19, s13
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vmovx.f16 s12, s9
+; CHECK-NEXT:    vcvtb.f32.f16 s18, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s17, s12
+; CHECK-NEXT:    vcvtb.f32.f16 s16, s14
+; CHECK-NEXT:    vmul.f32 q2, q4, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s5
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov.16 q3[0], r2
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vmov.16 q3[1], r3
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s9
+; CHECK-NEXT:    vmov.16 q3[2], r2
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s6
+; CHECK-NEXT:    vmov.16 q3[3], r2
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s10
+; CHECK-NEXT:    vmov.16 q3[4], r2
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q3[5], r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s11
+; CHECK-NEXT:    vmov.16 q3[6], r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vldrh.u16 q2, [r0, #16]!
+; CHECK-NEXT:    vmov.16 q3[7], r2
+; CHECK-NEXT:    vstrh.16 q3, [r1]
+; CHECK-NEXT:    vmovx.f16 s12, s11
+; CHECK-NEXT:    vmovx.f16 s14, s10
+; CHECK-NEXT:    vcvtb.f32.f16 s19, s12
+; CHECK-NEXT:    vmovx.f16 s4, s9
+; CHECK-NEXT:    vcvtb.f32.f16 s18, s14
+; CHECK-NEXT:    vmovx.f16 s6, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s17, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s16, s6
+; CHECK-NEXT:    vmul.f32 q1, q4, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s4
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f32.f16 s15, s11
+; CHECK-NEXT:    vcvtb.f32.f16 s14, s10
+; CHECK-NEXT:    vcvtb.f32.f16 s13, s9
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s8
+; CHECK-NEXT:    vmul.f32 q3, q3, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s13
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    vmov.16 q2[0], r3
+; CHECK-NEXT:    vmov.16 q2[1], r2
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s5
+; CHECK-NEXT:    vmov.16 q2[2], r2
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s14
+; CHECK-NEXT:    vmov.16 q2[3], r2
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s6
+; CHECK-NEXT:    vmov.16 q2[4], r2
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s15
+; CHECK-NEXT:    vmov.16 q2[5], r2
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q2[6], r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov.16 q2[7], r2
+; CHECK-NEXT:    vstrb.8 q2, [r1, #16]!
+; CHECK-NEXT:    le lr, .LBB10_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI10_0:
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+; CHECK-NEXT:    .long 0x40066666 @ float 2.0999999
+entry:
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds half, half* %x, i32 %index
+  %1 = bitcast half* %0 to <16 x half>*
+  %wide.load = load <16 x half>, <16 x half>* %1, align 2
+  %2 = shufflevector <16 x half> %wide.load, <16 x half> %wide.load, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %3 = shufflevector <16 x half> %wide.load, <16 x half> %wide.load, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %4 = fpext <8 x half> %2 to <8 x float>
+  %5 = fpext <8 x half> %3 to <8 x float>
+  %6 = fmul <8 x float> %4, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %7 = fmul <8 x float> %5, <float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000, float 0x4000CCCCC0000000>
+  %8 = shufflevector <8 x float> %6, <8 x float> %7, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+  %9 = fptrunc <16 x float> %8 to <16 x half>
+  %10 = getelementptr inbounds half, half* %y, i32 %index
+  %11 = bitcast half* %10 to <16 x half>*
+  store <16 x half> %9, <16 x half>* %11, align 2
+  %index.next = add i32 %index, 8
+  %12 = icmp eq i32 %index.next, 1024
+  br i1 %12, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}

diff  --git a/llvm/test/CodeGen/Thumb2/mve-shuffleext.ll b/llvm/test/CodeGen/Thumb2/mve-shuffleext.ll
index c1a306a92718..ebfffad7af18 100644
--- a/llvm/test/CodeGen/Thumb2/mve-shuffleext.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-shuffleext.ll
@@ -1,8 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
 
-define arm_aapcs_vfpcc <4 x i32> @sext_0246(<8 x i16> %src) {
-; CHECK-LABEL: sext_0246:
+; i16 -> i32
+
+define arm_aapcs_vfpcc <4 x i32> @sext_i32_0246(<8 x i16> %src) {
+; CHECK-LABEL: sext_i32_0246:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlb.s16 q0, q0
 ; CHECK-NEXT:    bx lr
@@ -12,8 +14,8 @@ entry:
   ret <4 x i32> %out
 }
 
-define arm_aapcs_vfpcc <4 x i32> @sext_1357(<8 x i16> %src) {
-; CHECK-LABEL: sext_1357:
+define arm_aapcs_vfpcc <4 x i32> @sext_i32_1357(<8 x i16> %src) {
+; CHECK-LABEL: sext_i32_1357:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlt.s16 q0, q0
 ; CHECK-NEXT:    bx lr
@@ -23,8 +25,32 @@ entry:
   ret <4 x i32> %out
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_0246(<8 x i16> %src) {
-; CHECK-LABEL: zext_0246:
+define arm_aapcs_vfpcc <8 x i32> @sext_i32_02468101214(<16 x i16> %src) {
+; CHECK-LABEL: sext_i32_02468101214:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <16 x i16> %src, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %out = sext <8 x i16> %strided.vec to <8 x i32>
+  ret <8 x i32> %out
+}
+
+define arm_aapcs_vfpcc <8 x i32> @sext_i32_13579111315(<16 x i16> %src) {
+; CHECK-LABEL: sext_i32_13579111315:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlt.s16 q0, q0
+; CHECK-NEXT:    vmovlt.s16 q1, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <16 x i16> %src, <16 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %out = sext <8 x i16> %strided.vec to <8 x i32>
+  ret <8 x i32> %out
+}
+
+define arm_aapcs_vfpcc <4 x i32> @zext_i32_0246(<8 x i16> %src) {
+; CHECK-LABEL: zext_i32_0246:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlb.u16 q0, q0
 ; CHECK-NEXT:    bx lr
@@ -34,8 +60,8 @@ entry:
   ret <4 x i32> %out
 }
 
-define arm_aapcs_vfpcc <4 x i32> @zext_1357(<8 x i16> %src) {
-; CHECK-LABEL: zext_1357:
+define arm_aapcs_vfpcc <4 x i32> @zext_i32_1357(<8 x i16> %src) {
+; CHECK-LABEL: zext_i32_1357:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlt.u16 q0, q0
 ; CHECK-NEXT:    bx lr
@@ -45,8 +71,35 @@ entry:
   ret <4 x i32> %out
 }
 
-define arm_aapcs_vfpcc <8 x i16> @sext_02468101214(<16 x i8> %src) {
-; CHECK-LABEL: sext_02468101214:
+define arm_aapcs_vfpcc <8 x i32> @zext_i32_02468101214(<16 x i16> %src) {
+; CHECK-LABEL: zext_i32_02468101214:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    vmovlb.u16 q1, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <16 x i16> %src, <16 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %out = zext <8 x i16> %strided.vec to <8 x i32>
+  ret <8 x i32> %out
+}
+
+define arm_aapcs_vfpcc <8 x i32> @zext_i32_13579111315(<16 x i16> %src) {
+; CHECK-LABEL: zext_i32_13579111315:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlt.u16 q0, q0
+; CHECK-NEXT:    vmovlt.u16 q1, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <16 x i16> %src, <16 x i16> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %out = zext <8 x i16> %strided.vec to <8 x i32>
+  ret <8 x i32> %out
+}
+
+
+; i8 -> i16
+
+define arm_aapcs_vfpcc <8 x i16> @sext_i16_02468101214(<16 x i8> %src) {
+; CHECK-LABEL: sext_i16_02468101214:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlb.s8 q0, q0
 ; CHECK-NEXT:    bx lr
@@ -56,8 +109,8 @@ entry:
   ret <8 x i16> %out
 }
 
-define arm_aapcs_vfpcc <8 x i16> @sext_13579111315(<16 x i8> %src) {
-; CHECK-LABEL: sext_13579111315:
+define arm_aapcs_vfpcc <8 x i16> @sext_i16_13579111315(<16 x i8> %src) {
+; CHECK-LABEL: sext_i16_13579111315:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlt.s8 q0, q0
 ; CHECK-NEXT:    bx lr
@@ -67,8 +120,32 @@ entry:
   ret <8 x i16> %out
 }
 
-define arm_aapcs_vfpcc <8 x i16> @zext_02468101214(<16 x i8> %src) {
-; CHECK-LABEL: zext_02468101214:
+define arm_aapcs_vfpcc <16 x i16> @sext_i16_024681012141618202224262830(<32 x i8> %src) {
+; CHECK-LABEL: sext_i16_024681012141618202224262830:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <32 x i8> %src, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+  %out = sext <16 x i8> %strided.vec to <16 x i16>
+  ret <16 x i16> %out
+}
+
+define arm_aapcs_vfpcc <16 x i16> @sext_i16_135791113151719212325272931(<32 x i8> %src) {
+; CHECK-LABEL: sext_i16_135791113151719212325272931:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlt.s8 q0, q0
+; CHECK-NEXT:    vmovlt.s8 q1, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <32 x i8> %src, <32 x i8> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %out = sext <16 x i8> %strided.vec to <16 x i16>
+  ret <16 x i16> %out
+}
+
+define arm_aapcs_vfpcc <8 x i16> @zext_i16_02468101214(<16 x i8> %src) {
+; CHECK-LABEL: zext_i16_02468101214:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlb.u8 q0, q0
 ; CHECK-NEXT:    bx lr
@@ -78,8 +155,8 @@ entry:
   ret <8 x i16> %out
 }
 
-define arm_aapcs_vfpcc <8 x i16> @zext_13579111315(<16 x i8> %src) {
-; CHECK-LABEL: zext_13579111315:
+define arm_aapcs_vfpcc <8 x i16> @zext_i16_13579111315(<16 x i8> %src) {
+; CHECK-LABEL: zext_i16_13579111315:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmovlt.u8 q0, q0
 ; CHECK-NEXT:    bx lr
@@ -88,3 +165,109 @@ entry:
   %out = zext <8 x i8> %strided.vec to <8 x i16>
   ret <8 x i16> %out
 }
+
+define arm_aapcs_vfpcc <16 x i16> @zext_i16_024681012141618202224262830(<32 x i8> %src) {
+; CHECK-LABEL: zext_i16_024681012141618202224262830:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u8 q0, q0
+; CHECK-NEXT:    vmovlb.u8 q1, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <32 x i8> %src, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+  %out = zext <16 x i8> %strided.vec to <16 x i16>
+  ret <16 x i16> %out
+}
+
+define arm_aapcs_vfpcc <16 x i16> @zext_i16_135791113151719212325272931(<32 x i8> %src) {
+; CHECK-LABEL: zext_i16_135791113151719212325272931:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlt.u8 q0, q0
+; CHECK-NEXT:    vmovlt.u8 q1, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <32 x i8> %src, <32 x i8> undef, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+  %out = zext <16 x i8> %strided.vec to <16 x i16>
+  ret <16 x i16> %out
+}
+
+
+; f16 -> f32
+
+define arm_aapcs_vfpcc <4 x float> @fpext_0246(<8 x half> %src) {
+; CHECK-LABEL: fpext_0246:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtb.f32.f16 s7, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s6, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s5, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s4, s0
+; CHECK-NEXT:    vmov q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <8 x half> %src, <8 x half> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %out = fpext <4 x half> %strided.vec to <4 x float>
+  ret <4 x float> %out
+}
+
+define arm_aapcs_vfpcc <4 x float> @fpext_1357(<8 x half> %src) {
+; CHECK-LABEL: fpext_1357:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovx.f16 s8, s3
+; CHECK-NEXT:    vmovx.f16 s4, s1
+; CHECK-NEXT:    vmovx.f16 s6, s0
+; CHECK-NEXT:    vmovx.f16 s10, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s10
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s6
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <8 x half> %src, <8 x half> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %out = fpext <4 x half> %strided.vec to <4 x float>
+  ret <4 x float> %out
+}
+
+define arm_aapcs_vfpcc <8 x float> @fpext_02468101214(<16 x half> %src) {
+; CHECK-LABEL: fpext_02468101214:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtb.f32.f16 s11, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s10, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s9, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s8, s0
+; CHECK-NEXT:    vcvtb.f32.f16 s15, s7
+; CHECK-NEXT:    vcvtb.f32.f16 s14, s6
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    vcvtb.f32.f16 s13, s5
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s4
+; CHECK-NEXT:    vmov q1, q3
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <16 x half> %src, <16 x half> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %out = fpext <8 x half> %strided.vec to <8 x float>
+  ret <8 x float> %out
+}
+
+define arm_aapcs_vfpcc <8 x float> @fpext_13579111315(<16 x half> %src) {
+; CHECK-LABEL: fpext_13579111315:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovx.f16 s14, s6
+; CHECK-NEXT:    vmovx.f16 s8, s5
+; CHECK-NEXT:    vmovx.f16 s5, s3
+; CHECK-NEXT:    vmovx.f16 s10, s4
+; CHECK-NEXT:    vmovx.f16 s12, s7
+; CHECK-NEXT:    vmovx.f16 s4, s1
+; CHECK-NEXT:    vmovx.f16 s6, s0
+; CHECK-NEXT:    vmovx.f16 s7, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s5
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s7
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s7, s12
+; CHECK-NEXT:    vcvtb.f32.f16 s6, s14
+; CHECK-NEXT:    vcvtb.f32.f16 s5, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s4, s10
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <16 x half> %src, <16 x half> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  %out = fpext <8 x half> %strided.vec to <8 x float>
+  ret <8 x float> %out
+}

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vcvt16.ll b/llvm/test/CodeGen/Thumb2/mve-vcvt16.ll
new file mode 100644
index 000000000000..06c21a2115cd
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-vcvt16.ll
@@ -0,0 +1,848 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
+
+define arm_aapcs_vfpcc <4 x float> @fpext_4(<4 x half> %src1) {
+; CHECK-LABEL: fpext_4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovx.f16 s4, s1
+; CHECK-NEXT:    vmovx.f16 s8, s0
+; CHECK-NEXT:    vcvtb.f32.f16 s7, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s6, s1
+; CHECK-NEXT:    vcvtb.f32.f16 s5, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s4, s0
+; CHECK-NEXT:    vmov q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %out = fpext <4 x half> %src1 to <4 x float>
+  ret <4 x float> %out
+}
+
+define arm_aapcs_vfpcc <8 x float> @fpext_8(<8 x half> %src1) {
+; CHECK-LABEL: fpext_8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovx.f16 s8, s1
+; CHECK-NEXT:    vmovx.f16 s6, s0
+; CHECK-NEXT:    vcvtb.f32.f16 s11, s8
+; CHECK-NEXT:    vmovx.f16 s4, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s10, s1
+; CHECK-NEXT:    vmovx.f16 s12, s2
+; CHECK-NEXT:    vcvtb.f32.f16 s9, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s8, s0
+; CHECK-NEXT:    vcvtb.f32.f16 s7, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s6, s3
+; CHECK-NEXT:    vcvtb.f32.f16 s5, s12
+; CHECK-NEXT:    vcvtb.f32.f16 s4, s2
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %out = fpext <8 x half> %src1 to <8 x float>
+  ret <8 x float> %out
+}
+
+
+define arm_aapcs_vfpcc <4 x half> @fptrunc_4(<4 x float> %src1) {
+; CHECK-LABEL: fptrunc_4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s1
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vmov.16 q1[0], r0
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s2
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s3
+; CHECK-NEXT:    vmov.16 q1[2], r0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov.16 q1[3], r0
+; CHECK-NEXT:    vmov q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %out = fptrunc <4 x float> %src1 to <4 x half>
+  ret <4 x half> %out
+}
+
+define arm_aapcs_vfpcc <8 x half> @fptrunc_8(<8 x float> %src1) {
+; CHECK-LABEL: fptrunc_8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov q2, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s8
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s9
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s10
+; CHECK-NEXT:    vmov.16 q0[1], r1
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s4
+; CHECK-NEXT:    vmov.16 q0[3], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s5
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s6
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    bx lr
+entry:
+  %out = fptrunc <8 x float> %src1 to <8 x half>
+  ret <8 x half> %out
+}
+
+
+define arm_aapcs_vfpcc <8 x half> @shuffle_trunc1(<4 x float> %src1, <4 x float> %src2) {
+; CHECK-LABEL: shuffle_trunc1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov q2, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s8
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s4
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s9
+; CHECK-NEXT:    vmov.16 q0[1], r1
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s5
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s10
+; CHECK-NEXT:    vmov.16 q0[3], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s6
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <4 x float> %src1, <4 x float> %src2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  %out = fptrunc <8 x float> %strided.vec to <8 x half>
+  ret <8 x half> %out
+}
+
+define arm_aapcs_vfpcc <8 x half> @shuffle_trunc2(<4 x float> %src1, <4 x float> %src2) {
+; CHECK-LABEL: shuffle_trunc2:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s4
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s0
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s5
+; CHECK-NEXT:    vmov.16 q2[1], r1
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s1
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s6
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s2
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s3
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <4 x float> %src1, <4 x float> %src2, <8 x i32> <i32 4, i32 0, i32 5, i32 1, i32 6, i32 2, i32 7, i32 3>
+  %out = fptrunc <8 x float> %strided.vec to <8 x half>
+  ret <8 x half> %out
+}
+
+define arm_aapcs_vfpcc <16 x half> @shuffle_trunc3(<8 x float> %src1, <8 x float> %src2) {
+; CHECK-LABEL: shuffle_trunc3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s16
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s8
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s17
+; CHECK-NEXT:    vmov.16 q0[1], r1
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s9
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s18
+; CHECK-NEXT:    vmov.16 q0[3], r0
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s10
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s19
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s4
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s12
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s5
+; CHECK-NEXT:    vmov.16 q2[1], r1
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s13
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s6
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s14
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s15
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vmov q1, q2
+; CHECK-NEXT:    vpop {d8, d9, d10}
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <8 x float> %src1, <8 x float> %src2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+  %out = fptrunc <16 x float> %strided.vec to <16 x half>
+  ret <16 x half> %out
+}
+
+define arm_aapcs_vfpcc <16 x half> @shuffle_trunc4(<8 x float> %src1, <8 x float> %src2) {
+; CHECK-LABEL: shuffle_trunc4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s8
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s16
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s9
+; CHECK-NEXT:    vmov.16 q0[1], r1
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s17
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s10
+; CHECK-NEXT:    vmov.16 q0[3], r0
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s18
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s19
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s12
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s4
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s13
+; CHECK-NEXT:    vmov.16 q2[1], r1
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s5
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s14
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s6
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s15
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vmov q1, q2
+; CHECK-NEXT:    vpop {d8, d9, d10}
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <8 x float> %src1, <8 x float> %src2, <16 x i32> <i32 8, i32 0, i32 9, i32 1, i32 10, i32 2, i32 11, i32 3, i32 12, i32 4, i32 13, i32 5, i32 14, i32 6, i32 15, i32 7>
+  %out = fptrunc <16 x float> %strided.vec to <16 x half>
+  ret <16 x half> %out
+}
+
+define arm_aapcs_vfpcc <8 x half> @shuffle_trunc5(<4 x float> %src1, <4 x float> %src2) {
+; CHECK-LABEL: shuffle_trunc5:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov q2, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s8
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s4
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s9
+; CHECK-NEXT:    vmov.16 q0[1], r1
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s5
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s10
+; CHECK-NEXT:    vmov.16 q0[3], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s6
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    bx lr
+entry:
+  %out1 = fptrunc <4 x float> %src1 to <4 x half>
+  %out2 = fptrunc <4 x float> %src2 to <4 x half>
+  %s = shufflevector <4 x half> %out1, <4 x half> %out2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  ret <8 x half> %s
+}
+
+define arm_aapcs_vfpcc <8 x half> @shuffle_trunc6(<4 x float> %src1, <4 x float> %src2) {
+; CHECK-LABEL: shuffle_trunc6:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s4
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s0
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s5
+; CHECK-NEXT:    vmov.16 q2[1], r1
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s1
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s6
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s2
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s3
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    bx lr
+entry:
+  %out1 = fptrunc <4 x float> %src1 to <4 x half>
+  %out2 = fptrunc <4 x float> %src2 to <4 x half>
+  %s = shufflevector <4 x half> %out1, <4 x half> %out2, <8 x i32> <i32 4, i32 0, i32 5, i32 1, i32 6, i32 2, i32 7, i32 3>
+  ret <8 x half> %s
+}
+
+define arm_aapcs_vfpcc <16 x half> @shuffle_trunc7(<8 x float> %src1, <8 x float> %src2) {
+; CHECK-LABEL: shuffle_trunc7:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s16
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s8
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s17
+; CHECK-NEXT:    vmov.16 q0[1], r1
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s9
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s18
+; CHECK-NEXT:    vmov.16 q0[3], r0
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s10
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s19
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s4
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s12
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s5
+; CHECK-NEXT:    vmov.16 q2[1], r1
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s13
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s6
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s14
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s15
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vmov q1, q2
+; CHECK-NEXT:    vpop {d8, d9, d10}
+; CHECK-NEXT:    bx lr
+entry:
+  %out1 = fptrunc <8 x float> %src1 to <8 x half>
+  %out2 = fptrunc <8 x float> %src2 to <8 x half>
+  %s = shufflevector <8 x half> %out1, <8 x half> %out2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+  ret <16 x half> %s
+}
+
+define arm_aapcs_vfpcc <16 x half> @shuffle_trunc8(<8 x float> %src1, <8 x float> %src2) {
+; CHECK-LABEL: shuffle_trunc8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s8
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s16
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s9
+; CHECK-NEXT:    vmov.16 q0[1], r1
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s17
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s10
+; CHECK-NEXT:    vmov.16 q0[3], r0
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s18
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s19
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s12
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s4
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s13
+; CHECK-NEXT:    vmov.16 q2[1], r1
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s5
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s14
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s6
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s15
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vmov q1, q2
+; CHECK-NEXT:    vpop {d8, d9, d10}
+; CHECK-NEXT:    bx lr
+entry:
+  %out1 = fptrunc <8 x float> %src1 to <8 x half>
+  %out2 = fptrunc <8 x float> %src2 to <8 x half>
+  %s = shufflevector <8 x half> %out1, <8 x half> %out2, <16 x i32> <i32 8, i32 0, i32 9, i32 1, i32 10, i32 2, i32 11, i32 3, i32 12, i32 4, i32 13, i32 5, i32 14, i32 6, i32 15, i32 7>
+  ret <16 x half> %s
+}
+
+
+
+
+define arm_aapcs_vfpcc <4 x float> @load_ext_4(<4 x half>* %src) {
+; CHECK-LABEL: load_ext_4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    ldrd r1, r0, [r0]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmovx.f16 s0, s5
+; CHECK-NEXT:    vmovx.f16 s8, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s0
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s5
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s4
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <4 x half>, <4 x half>* %src, align 4
+  %e = fpext <4 x half> %wide.load to <4 x float>
+  ret <4 x float> %e
+}
+
+define arm_aapcs_vfpcc <8 x float> @load_ext_8(<8 x half>* %src) {
+; CHECK-LABEL: load_ext_8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-NEXT:    vmovx.f16 s0, s9
+; CHECK-NEXT:    vmovx.f16 s6, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s0
+; CHECK-NEXT:    vmovx.f16 s4, s11
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s9
+; CHECK-NEXT:    vmovx.f16 s12, s10
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s7, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s6, s11
+; CHECK-NEXT:    vcvtb.f32.f16 s5, s12
+; CHECK-NEXT:    vcvtb.f32.f16 s4, s10
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <8 x half>, <8 x half>* %src, align 4
+  %e = fpext <8 x half> %wide.load to <8 x float>
+  ret <8 x float> %e
+}
+
+define arm_aapcs_vfpcc <16 x float> @load_ext_16(<16 x half>* %src) {
+; CHECK-LABEL: load_ext_16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #16]
+; CHECK-NEXT:    vmovx.f16 s0, s9
+; CHECK-NEXT:    vmovx.f16 s6, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s0
+; CHECK-NEXT:    vmovx.f16 s4, s11
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s9
+; CHECK-NEXT:    vmovx.f16 s15, s10
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s6
+; CHECK-NEXT:    vmovx.f16 s13, s17
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s7, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s6, s11
+; CHECK-NEXT:    vmovx.f16 s14, s16
+; CHECK-NEXT:    vcvtb.f32.f16 s5, s15
+; CHECK-NEXT:    vmovx.f16 s12, s19
+; CHECK-NEXT:    vcvtb.f32.f16 s4, s10
+; CHECK-NEXT:    vcvtb.f32.f16 s11, s13
+; CHECK-NEXT:    vcvtb.f32.f16 s10, s17
+; CHECK-NEXT:    vmovx.f16 s20, s18
+; CHECK-NEXT:    vcvtb.f32.f16 s9, s14
+; CHECK-NEXT:    vcvtb.f32.f16 s8, s16
+; CHECK-NEXT:    vcvtb.f32.f16 s15, s12
+; CHECK-NEXT:    vcvtb.f32.f16 s14, s19
+; CHECK-NEXT:    vcvtb.f32.f16 s13, s20
+; CHECK-NEXT:    vcvtb.f32.f16 s12, s18
+; CHECK-NEXT:    vpop {d8, d9, d10}
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <16 x half>, <16 x half>* %src, align 4
+  %e = fpext <16 x half> %wide.load to <16 x float>
+  ret <16 x float> %e
+}
+
+define arm_aapcs_vfpcc <4 x float> @load_shuffleext_8(<8 x half>* %src) {
+; CHECK-LABEL: load_shuffleext_8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s7
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s5
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s4
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <8 x half>, <8 x half>* %src, align 4
+  %sh = shufflevector <8 x half> %wide.load, <8 x half> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %e = fpext <4 x half> %sh to <4 x float>
+  ret <4 x float> %e
+}
+
+define arm_aapcs_vfpcc <8 x float> @load_shuffleext_16(<16 x half>* %src) {
+; CHECK-LABEL: load_shuffleext_16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8}
+; CHECK-NEXT:    vpush {d8}
+; CHECK-NEXT:    vld20.16 {q2, q3}, [r0]
+; CHECK-NEXT:    vld21.16 {q2, q3}, [r0]
+; CHECK-NEXT:    vmovx.f16 s0, s9
+; CHECK-NEXT:    vmovx.f16 s6, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s3, s0
+; CHECK-NEXT:    vmovx.f16 s4, s11
+; CHECK-NEXT:    vcvtb.f32.f16 s2, s9
+; CHECK-NEXT:    vmovx.f16 s16, s10
+; CHECK-NEXT:    vcvtb.f32.f16 s1, s6
+; CHECK-NEXT:    vcvtb.f32.f16 s0, s8
+; CHECK-NEXT:    vcvtb.f32.f16 s7, s4
+; CHECK-NEXT:    vcvtb.f32.f16 s6, s11
+; CHECK-NEXT:    vcvtb.f32.f16 s5, s16
+; CHECK-NEXT:    vcvtb.f32.f16 s4, s10
+; CHECK-NEXT:    vpop {d8}
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <16 x half>, <16 x half>* %src, align 4
+  %sh = shufflevector <16 x half> %wide.load, <16 x half> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  %e = fpext <8 x half> %sh to <8 x float>
+  ret <8 x float> %e
+}
+
+
+
+
+define arm_aapcs_vfpcc void @store_trunc_4(<4 x half>* %src, <4 x float> %val) {
+; CHECK-LABEL: store_trunc_4:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s0
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s1
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov.16 q1[0], r1
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s2
+; CHECK-NEXT:    vmov.16 q1[1], r2
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s3
+; CHECK-NEXT:    vmov.16 q1[2], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov.16 q1[3], r1
+; CHECK-NEXT:    vmov r2, s5
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    strd r1, r2, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %e = fptrunc <4 x float> %val to <4 x half>
+  store <4 x half> %e, <4 x half>* %src, align 4
+  ret void
+}
+
+define arm_aapcs_vfpcc void @store_trunc_8(<8 x half>* %src, <8 x float> %val) {
+; CHECK-LABEL: store_trunc_8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s0
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s1
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmov.16 q2[0], r1
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s2
+; CHECK-NEXT:    vmov.16 q2[1], r2
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s3
+; CHECK-NEXT:    vmov.16 q2[2], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s4
+; CHECK-NEXT:    vmov.16 q2[3], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s5
+; CHECK-NEXT:    vmov.16 q2[4], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s6
+; CHECK-NEXT:    vmov.16 q2[5], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s7
+; CHECK-NEXT:    vmov.16 q2[6], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov.16 q2[7], r1
+; CHECK-NEXT:    vstrw.32 q2, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %e = fptrunc <8 x float> %val to <8 x half>
+  store <8 x half> %e, <8 x half>* %src, align 4
+  ret void
+}
+
+define arm_aapcs_vfpcc void @store_trunc_16(<16 x half>* %src, <16 x float> %val) {
+; CHECK-LABEL: store_trunc_16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s8
+; CHECK-NEXT:    vmov r1, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s9
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vmov.16 q4[0], r1
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s10
+; CHECK-NEXT:    vmov.16 q4[1], r2
+; CHECK-NEXT:    vmov r1, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s11
+; CHECK-NEXT:    vmov.16 q4[2], r1
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s12
+; CHECK-NEXT:    vmov.16 q4[3], r1
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s13
+; CHECK-NEXT:    vmov.16 q4[4], r1
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s14
+; CHECK-NEXT:    vmov.16 q4[5], r1
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s15
+; CHECK-NEXT:    vmov.16 q4[6], r1
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vmov.16 q4[7], r1
+; CHECK-NEXT:    vstrw.32 q4, [r0, #16]
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s0
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s1
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmov.16 q2[0], r1
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s2
+; CHECK-NEXT:    vmov.16 q2[1], r2
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s3
+; CHECK-NEXT:    vmov.16 q2[2], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s4
+; CHECK-NEXT:    vmov.16 q2[3], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s5
+; CHECK-NEXT:    vmov.16 q2[4], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s6
+; CHECK-NEXT:    vmov.16 q2[5], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s7
+; CHECK-NEXT:    vmov.16 q2[6], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov.16 q2[7], r1
+; CHECK-NEXT:    vstrw.32 q2, [r0]
+; CHECK-NEXT:    vpop {d8, d9, d10}
+; CHECK-NEXT:    bx lr
+entry:
+  %e = fptrunc <16 x float> %val to <16 x half>
+  store <16 x half> %e, <16 x half>* %src, align 4
+  ret void
+}
+
+define arm_aapcs_vfpcc void @store_shuffletrunc_8(<8 x half>* %src, <4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: store_shuffletrunc_8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s0
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vcvtb.f16.f32 s8, s4
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmov.16 q2[0], r1
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s1
+; CHECK-NEXT:    vmov.16 q2[1], r2
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s5
+; CHECK-NEXT:    vmov.16 q2[2], r1
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s2
+; CHECK-NEXT:    vmov.16 q2[3], r1
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s6
+; CHECK-NEXT:    vmov.16 q2[4], r1
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s3
+; CHECK-NEXT:    vmov.16 q2[5], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s7
+; CHECK-NEXT:    vmov.16 q2[6], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov.16 q2[7], r1
+; CHECK-NEXT:    vstrw.32 q2, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <4 x float> %val1, <4 x float> %val2, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  %out = fptrunc <8 x float> %strided.vec to <8 x half>
+  store <8 x half> %out, <8 x half>* %src, align 4
+  ret void
+}
+
+define arm_aapcs_vfpcc void @store_shuffletrunc_16(<16 x half>* %src, <8 x float> %val1, <8 x float> %val2) {
+; CHECK-LABEL: store_shuffletrunc_16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10}
+; CHECK-NEXT:    vpush {d8, d9, d10}
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s4
+; CHECK-NEXT:    vmov r1, s16
+; CHECK-NEXT:    vcvtb.f16.f32 s16, s12
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vmov.16 q4[0], r1
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s5
+; CHECK-NEXT:    vmov.16 q4[1], r2
+; CHECK-NEXT:    vmov r1, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s13
+; CHECK-NEXT:    vmov.16 q4[2], r1
+; CHECK-NEXT:    vmov r1, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s6
+; CHECK-NEXT:    vmov.16 q4[3], r1
+; CHECK-NEXT:    vmov r1, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s20, s14
+; CHECK-NEXT:    vmov.16 q4[4], r1
+; CHECK-NEXT:    vmov r1, s20
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s7
+; CHECK-NEXT:    vmov.16 q4[5], r1
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s15
+; CHECK-NEXT:    vmov.16 q4[6], r1
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vmov.16 q4[7], r1
+; CHECK-NEXT:    vstrw.32 q4, [r0, #16]
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s8
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s4, s0
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s1
+; CHECK-NEXT:    vmov.16 q1[0], r2
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s9
+; CHECK-NEXT:    vmov.16 q1[2], r1
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s2
+; CHECK-NEXT:    vmov.16 q1[3], r1
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s12, s10
+; CHECK-NEXT:    vmov.16 q1[4], r1
+; CHECK-NEXT:    vmov r1, s12
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s3
+; CHECK-NEXT:    vmov.16 q1[5], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vcvtb.f16.f32 s0, s11
+; CHECK-NEXT:    vmov.16 q1[6], r1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov.16 q1[7], r1
+; CHECK-NEXT:    vstrw.32 q1, [r0]
+; CHECK-NEXT:    vpop {d8, d9, d10}
+; CHECK-NEXT:    bx lr
+entry:
+  %strided.vec = shufflevector <8 x float> %val1, <8 x float> %val2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+  %out = fptrunc <16 x float> %strided.vec to <16 x half>
+  store <16 x half> %out, <16 x half>* %src, align 4
+  ret void
+}


        


More information about the llvm-commits mailing list