[llvm] r370325 - [ARM] Masked load and store and predicate tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 29 03:32:13 PDT 2019


Author: dmgreen
Date: Thu Aug 29 03:32:12 2019
New Revision: 370325

URL: http://llvm.org/viewvc/llvm-project?rev=370325&view=rev
Log:
[ARM] Masked load and store and predicate tests. NFC

Added:
    llvm/trunk/test/CodeGen/Thumb2/mve-masked-ldst.ll
    llvm/trunk/test/CodeGen/Thumb2/mve-masked-load.ll
    llvm/trunk/test/CodeGen/Thumb2/mve-masked-store.ll
    llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block7.mir
    llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block8.mir
    llvm/trunk/test/CodeGen/Thumb2/mve-vpt-nots.mir
Modified:
    llvm/trunk/test/CodeGen/Thumb2/mve-pred-not.ll
    llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block.mir
    llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block2.mir
    llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block3.mir
    llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block4.mir
    llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block5.mir
    llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block6.mir

Added: llvm/trunk/test/CodeGen/Thumb2/mve-masked-ldst.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-masked-ldst.ll?rev=370325&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-masked-ldst.ll (added)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-masked-ldst.ll Thu Aug 29 03:32:12 2019
@@ -0,0 +1,1143 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE
+; RUN: llc -mtriple=thumbebv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
+
+define void @foo_v4i32_v4i32(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i32> *%src) {
+; CHECK-LABEL: foo_v4i32_v4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    add r3, sp, #4
+; CHECK-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-NEXT:    @ implicit-def: $q0
+; CHECK-NEXT:    vstr p0, [r3]
+; CHECK-NEXT:    ldrb.w r1, [sp, #4]
+; CHECK-NEXT:    lsls r3, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrne r3, [r2]
+; CHECK-NEXT:    vmovne.32 q0[0], r3
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrmi r3, [r2, #4]
+; CHECK-NEXT:    vmovmi.32 q0[1], r3
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrmi r3, [r2, #8]
+; CHECK-NEXT:    vmovmi.32 q0[2], r3
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrmi r1, [r2, #12]
+; CHECK-NEXT:    vmovmi.32 q0[3], r1
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    vstr p0, [r1]
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    lsls r2, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    vmovne r2, s0
+; CHECK-NEXT:    strne r2, [r0]
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r2, s1
+; CHECK-NEXT:    strmi r2, [r0, #4]
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r2, s2
+; CHECK-NEXT:    strmi r2, [r0, #8]
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r1, s3
+; CHECK-NEXT:    strmi r1, [r0, #12]
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %1 = icmp sgt <4 x i32> %0, zeroinitializer
+  %2 = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %src, i32 4, <4 x i1> %1, <4 x i32> undef)
+  call void @llvm.masked.store.v4i32(<4 x i32> %2, <4 x i32>* %dest, i32 4, <4 x i1> %1)
+  ret void
+}
+
+define void @foo_sext_v4i32_v4i8(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i8> *%src) {
+; CHECK-LABEL: foo_sext_v4i32_v4i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    add r3, sp, #4
+; CHECK-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-NEXT:    @ implicit-def: $q0
+; CHECK-NEXT:    vstr p0, [r3]
+; CHECK-NEXT:    ldrb.w r1, [sp, #4]
+; CHECK-NEXT:    lsls r3, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrbne r3, [r2]
+; CHECK-NEXT:    vmovne.32 q0[0], r3
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #1]
+; CHECK-NEXT:    vmovmi.32 q0[1], r3
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #2]
+; CHECK-NEXT:    vmovmi.32 q0[2], r3
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r1, [r2, #3]
+; CHECK-NEXT:    vmovmi.32 q0[3], r1
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vstr p0, [r1]
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    lsls r2, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    vmovne r2, s0
+; CHECK-NEXT:    strne r2, [r0]
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r2, s1
+; CHECK-NEXT:    strmi r2, [r0, #4]
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r2, s2
+; CHECK-NEXT:    strmi r2, [r0, #8]
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r1, s3
+; CHECK-NEXT:    strmi r1, [r0, #12]
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %1 = icmp sgt <4 x i32> %0, zeroinitializer
+  %2 = call <4 x i8> @llvm.masked.load.v4i8(<4 x i8>* %src, i32 1, <4 x i1> %1, <4 x i8> undef)
+  %3 = sext <4 x i8> %2 to <4 x i32>
+  call void @llvm.masked.store.v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1)
+  ret void
+}
+
+define void @foo_sext_v4i32_v4i16(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i16> *%src) {
+; CHECK-LABEL: foo_sext_v4i32_v4i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    add r3, sp, #4
+; CHECK-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-NEXT:    @ implicit-def: $q0
+; CHECK-NEXT:    vstr p0, [r3]
+; CHECK-NEXT:    ldrb.w r1, [sp, #4]
+; CHECK-NEXT:    lsls r3, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrhne r3, [r2]
+; CHECK-NEXT:    vmovne.32 q0[0], r3
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #2]
+; CHECK-NEXT:    vmovmi.32 q0[1], r3
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #4]
+; CHECK-NEXT:    vmovmi.32 q0[2], r3
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r1, [r2, #6]
+; CHECK-NEXT:    vmovmi.32 q0[3], r1
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vstr p0, [r1]
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    lsls r2, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    vmovne r2, s0
+; CHECK-NEXT:    strne r2, [r0]
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r2, s1
+; CHECK-NEXT:    strmi r2, [r0, #4]
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r2, s2
+; CHECK-NEXT:    strmi r2, [r0, #8]
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r1, s3
+; CHECK-NEXT:    strmi r1, [r0, #12]
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %1 = icmp sgt <4 x i32> %0, zeroinitializer
+  %2 = call <4 x i16> @llvm.masked.load.v4i16(<4 x i16>* %src, i32 2, <4 x i1> %1, <4 x i16> undef)
+  %3 = sext <4 x i16> %2 to <4 x i32>
+  call void @llvm.masked.store.v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1)
+  ret void
+}
+
+define void @foo_zext_v4i32_v4i8(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i8> *%src) {
+; CHECK-LABEL: foo_zext_v4i32_v4i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    add r3, sp, #4
+; CHECK-NEXT:    vmov.i32 q1, #0xff
+; CHECK-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-NEXT:    @ implicit-def: $q0
+; CHECK-NEXT:    vstr p0, [r3]
+; CHECK-NEXT:    ldrb.w r1, [sp, #4]
+; CHECK-NEXT:    lsls r3, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrbne r3, [r2]
+; CHECK-NEXT:    vmovne.32 q0[0], r3
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #1]
+; CHECK-NEXT:    vmovmi.32 q0[1], r3
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #2]
+; CHECK-NEXT:    vmovmi.32 q0[2], r3
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r1, [r2, #3]
+; CHECK-NEXT:    vmovmi.32 q0[3], r1
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vstr p0, [r1]
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    lsls r2, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    vmovne r2, s0
+; CHECK-NEXT:    strne r2, [r0]
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r2, s1
+; CHECK-NEXT:    strmi r2, [r0, #4]
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r2, s2
+; CHECK-NEXT:    strmi r2, [r0, #8]
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r1, s3
+; CHECK-NEXT:    strmi r1, [r0, #12]
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %1 = icmp sgt <4 x i32> %0, zeroinitializer
+  %2 = call <4 x i8> @llvm.masked.load.v4i8(<4 x i8>* %src, i32 1, <4 x i1> %1, <4 x i8> undef)
+  %3 = zext <4 x i8> %2 to <4 x i32>
+  call void @llvm.masked.store.v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1)
+  ret void
+}
+
+define void @foo_zext_v4i32_v4i16(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i16> *%src) {
+; CHECK-LABEL: foo_zext_v4i32_v4i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    add r3, sp, #4
+; CHECK-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-NEXT:    @ implicit-def: $q0
+; CHECK-NEXT:    vstr p0, [r3]
+; CHECK-NEXT:    ldrb.w r1, [sp, #4]
+; CHECK-NEXT:    lsls r3, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrhne r3, [r2]
+; CHECK-NEXT:    vmovne.32 q0[0], r3
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #2]
+; CHECK-NEXT:    vmovmi.32 q0[1], r3
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #4]
+; CHECK-NEXT:    vmovmi.32 q0[2], r3
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r1, [r2, #6]
+; CHECK-NEXT:    vmovmi.32 q0[3], r1
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    vstr p0, [r1]
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    lsls r2, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    vmovne r2, s0
+; CHECK-NEXT:    strne r2, [r0]
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r2, s1
+; CHECK-NEXT:    strmi r2, [r0, #4]
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r2, s2
+; CHECK-NEXT:    strmi r2, [r0, #8]
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r1, s3
+; CHECK-NEXT:    strmi r1, [r0, #12]
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %1 = icmp sgt <4 x i32> %0, zeroinitializer
+  %2 = call <4 x i16> @llvm.masked.load.v4i16(<4 x i16>* %src, i32 2, <4 x i1> %1, <4 x i16> undef)
+  %3 = zext <4 x i16> %2 to <4 x i32>
+  call void @llvm.masked.store.v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1)
+  ret void
+}
+
+define void @foo_v8i16_v8i16(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i16> *%src) {
+; CHECK-LABEL: foo_v8i16_v8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    vldrh.u16 q0, [r1]
+; CHECK-NEXT:    add r3, sp, #8
+; CHECK-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-NEXT:    @ implicit-def: $q0
+; CHECK-NEXT:    vstr p0, [r3]
+; CHECK-NEXT:    ldrb.w r1, [sp, #8]
+; CHECK-NEXT:    lsls r3, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrhne r3, [r2]
+; CHECK-NEXT:    vmovne.16 q0[0], r3
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #2]
+; CHECK-NEXT:    vmovmi.16 q0[1], r3
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #4]
+; CHECK-NEXT:    vmovmi.16 q0[2], r3
+; CHECK-NEXT:    lsls r3, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #6]
+; CHECK-NEXT:    vmovmi.16 q0[3], r3
+; CHECK-NEXT:    lsls r3, r1, #27
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #8]
+; CHECK-NEXT:    vmovmi.16 q0[4], r3
+; CHECK-NEXT:    lsls r3, r1, #26
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #10]
+; CHECK-NEXT:    vmovmi.16 q0[5], r3
+; CHECK-NEXT:    lsls r3, r1, #25
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #12]
+; CHECK-NEXT:    vmovmi.16 q0[6], r3
+; CHECK-NEXT:    lsls r1, r1, #24
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r1, [r2, #14]
+; CHECK-NEXT:    vmovmi.16 q0[7], r1
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    vstr p0, [r1]
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    lsls r2, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    vmovne.u16 r2, q0[0]
+; CHECK-NEXT:    strhne r2, [r0]
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[1]
+; CHECK-NEXT:    strhmi r2, [r0, #2]
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[2]
+; CHECK-NEXT:    strhmi r2, [r0, #4]
+; CHECK-NEXT:    lsls r2, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[3]
+; CHECK-NEXT:    strhmi r2, [r0, #6]
+; CHECK-NEXT:    lsls r2, r1, #27
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[4]
+; CHECK-NEXT:    strhmi r2, [r0, #8]
+; CHECK-NEXT:    lsls r2, r1, #26
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[5]
+; CHECK-NEXT:    strhmi r2, [r0, #10]
+; CHECK-NEXT:    lsls r2, r1, #25
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[6]
+; CHECK-NEXT:    strhmi r2, [r0, #12]
+; CHECK-NEXT:    lsls r1, r1, #24
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r1, q0[7]
+; CHECK-NEXT:    strhmi r1, [r0, #14]
+; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %mask, align 2
+  %1 = icmp sgt <8 x i16> %0, zeroinitializer
+  %2 = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %src, i32 2, <8 x i1> %1, <8 x i16> undef)
+  call void @llvm.masked.store.v8i16(<8 x i16> %2, <8 x i16>* %dest, i32 2, <8 x i1> %1)
+  ret void
+}
+
+define void @foo_sext_v8i16_v8i8(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i8> *%src) {
+; CHECK-LABEL: foo_sext_v8i16_v8i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    vldrh.u16 q0, [r1]
+; CHECK-NEXT:    add r3, sp, #8
+; CHECK-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-NEXT:    @ implicit-def: $q0
+; CHECK-NEXT:    vstr p0, [r3]
+; CHECK-NEXT:    ldrb.w r1, [sp, #8]
+; CHECK-NEXT:    lsls r3, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrbne r3, [r2]
+; CHECK-NEXT:    vmovne.16 q0[0], r3
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #1]
+; CHECK-NEXT:    vmovmi.16 q0[1], r3
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #2]
+; CHECK-NEXT:    vmovmi.16 q0[2], r3
+; CHECK-NEXT:    lsls r3, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #3]
+; CHECK-NEXT:    vmovmi.16 q0[3], r3
+; CHECK-NEXT:    lsls r3, r1, #27
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #4]
+; CHECK-NEXT:    vmovmi.16 q0[4], r3
+; CHECK-NEXT:    lsls r3, r1, #26
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #5]
+; CHECK-NEXT:    vmovmi.16 q0[5], r3
+; CHECK-NEXT:    lsls r3, r1, #25
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #6]
+; CHECK-NEXT:    vmovmi.16 q0[6], r3
+; CHECK-NEXT:    lsls r1, r1, #24
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r1, [r2, #7]
+; CHECK-NEXT:    vmovmi.16 q0[7], r1
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vstr p0, [r1]
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    lsls r2, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    vmovne.u16 r2, q0[0]
+; CHECK-NEXT:    strhne r2, [r0]
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[1]
+; CHECK-NEXT:    strhmi r2, [r0, #2]
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[2]
+; CHECK-NEXT:    strhmi r2, [r0, #4]
+; CHECK-NEXT:    lsls r2, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[3]
+; CHECK-NEXT:    strhmi r2, [r0, #6]
+; CHECK-NEXT:    lsls r2, r1, #27
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[4]
+; CHECK-NEXT:    strhmi r2, [r0, #8]
+; CHECK-NEXT:    lsls r2, r1, #26
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[5]
+; CHECK-NEXT:    strhmi r2, [r0, #10]
+; CHECK-NEXT:    lsls r2, r1, #25
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[6]
+; CHECK-NEXT:    strhmi r2, [r0, #12]
+; CHECK-NEXT:    lsls r1, r1, #24
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r1, q0[7]
+; CHECK-NEXT:    strhmi r1, [r0, #14]
+; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %mask, align 2
+  %1 = icmp sgt <8 x i16> %0, zeroinitializer
+  %2 = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %src, i32 1, <8 x i1> %1, <8 x i8> undef)
+  %3 = sext <8 x i8> %2 to <8 x i16>
+  call void @llvm.masked.store.v8i16(<8 x i16> %3, <8 x i16>* %dest, i32 2, <8 x i1> %1)
+  ret void
+}
+
+define void @foo_zext_v8i16_v8i8(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i8> *%src) {
+; CHECK-LABEL: foo_zext_v8i16_v8i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    vldrh.u16 q0, [r1]
+; CHECK-NEXT:    add r3, sp, #8
+; CHECK-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-NEXT:    @ implicit-def: $q0
+; CHECK-NEXT:    vstr p0, [r3]
+; CHECK-NEXT:    ldrb.w r1, [sp, #8]
+; CHECK-NEXT:    lsls r3, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrbne r3, [r2]
+; CHECK-NEXT:    vmovne.16 q0[0], r3
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #1]
+; CHECK-NEXT:    vmovmi.16 q0[1], r3
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #2]
+; CHECK-NEXT:    vmovmi.16 q0[2], r3
+; CHECK-NEXT:    lsls r3, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #3]
+; CHECK-NEXT:    vmovmi.16 q0[3], r3
+; CHECK-NEXT:    lsls r3, r1, #27
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #4]
+; CHECK-NEXT:    vmovmi.16 q0[4], r3
+; CHECK-NEXT:    lsls r3, r1, #26
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #5]
+; CHECK-NEXT:    vmovmi.16 q0[5], r3
+; CHECK-NEXT:    lsls r3, r1, #25
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #6]
+; CHECK-NEXT:    vmovmi.16 q0[6], r3
+; CHECK-NEXT:    lsls r1, r1, #24
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r1, [r2, #7]
+; CHECK-NEXT:    vmovmi.16 q0[7], r1
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    vmovlb.u8 q0, q0
+; CHECK-NEXT:    vstr p0, [r1]
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    lsls r2, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    vmovne.u16 r2, q0[0]
+; CHECK-NEXT:    strhne r2, [r0]
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[1]
+; CHECK-NEXT:    strhmi r2, [r0, #2]
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[2]
+; CHECK-NEXT:    strhmi r2, [r0, #4]
+; CHECK-NEXT:    lsls r2, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[3]
+; CHECK-NEXT:    strhmi r2, [r0, #6]
+; CHECK-NEXT:    lsls r2, r1, #27
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[4]
+; CHECK-NEXT:    strhmi r2, [r0, #8]
+; CHECK-NEXT:    lsls r2, r1, #26
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[5]
+; CHECK-NEXT:    strhmi r2, [r0, #10]
+; CHECK-NEXT:    lsls r2, r1, #25
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[6]
+; CHECK-NEXT:    strhmi r2, [r0, #12]
+; CHECK-NEXT:    lsls r1, r1, #24
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r1, q0[7]
+; CHECK-NEXT:    strhmi r1, [r0, #14]
+; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %mask, align 2
+  %1 = icmp sgt <8 x i16> %0, zeroinitializer
+  %2 = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %src, i32 1, <8 x i1> %1, <8 x i8> undef)
+  %3 = zext <8 x i8> %2 to <8 x i16>
+  call void @llvm.masked.store.v8i16(<8 x i16> %3, <8 x i16>* %dest, i32 2, <8 x i1> %1)
+  ret void
+}
+
+define void @foo_v16i8_v16i8(<16 x i8> *%dest, <16 x i8> *%mask, <16 x i8> *%src) {
+; CHECK-LABEL: foo_v16i8_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-NEXT:    push {r4, r6, r7, lr}
+; CHECK-NEXT:    .setfp r7, sp, #8
+; CHECK-NEXT:    add r7, sp, #8
+; CHECK-NEXT:    .pad #32
+; CHECK-NEXT:    sub sp, #32
+; CHECK-NEXT:    mov r4, sp
+; CHECK-NEXT:    bfc r4, #0, #4
+; CHECK-NEXT:    mov sp, r4
+; CHECK-NEXT:    vldrb.u8 q0, [r1]
+; CHECK-NEXT:    add r3, sp, #16
+; CHECK-NEXT:    sub.w r4, r7, #8
+; CHECK-NEXT:    vcmp.s8 gt, q0, zr
+; CHECK-NEXT:    @ implicit-def: $q0
+; CHECK-NEXT:    vstr p0, [r3]
+; CHECK-NEXT:    ldrh.w r1, [sp, #16]
+; CHECK-NEXT:    lsls r3, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrbne r3, [r2]
+; CHECK-NEXT:    vmovne.8 q0[0], r3
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #1]
+; CHECK-NEXT:    vmovmi.8 q0[1], r3
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #2]
+; CHECK-NEXT:    vmovmi.8 q0[2], r3
+; CHECK-NEXT:    lsls r3, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #3]
+; CHECK-NEXT:    vmovmi.8 q0[3], r3
+; CHECK-NEXT:    lsls r3, r1, #27
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #4]
+; CHECK-NEXT:    vmovmi.8 q0[4], r3
+; CHECK-NEXT:    lsls r3, r1, #26
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #5]
+; CHECK-NEXT:    vmovmi.8 q0[5], r3
+; CHECK-NEXT:    lsls r3, r1, #25
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #6]
+; CHECK-NEXT:    vmovmi.8 q0[6], r3
+; CHECK-NEXT:    lsls r3, r1, #24
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #7]
+; CHECK-NEXT:    vmovmi.8 q0[7], r3
+; CHECK-NEXT:    lsls r3, r1, #23
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #8]
+; CHECK-NEXT:    vmovmi.8 q0[8], r3
+; CHECK-NEXT:    lsls r3, r1, #22
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #9]
+; CHECK-NEXT:    vmovmi.8 q0[9], r3
+; CHECK-NEXT:    lsls r3, r1, #21
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #10]
+; CHECK-NEXT:    vmovmi.8 q0[10], r3
+; CHECK-NEXT:    lsls r3, r1, #20
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #11]
+; CHECK-NEXT:    vmovmi.8 q0[11], r3
+; CHECK-NEXT:    lsls r3, r1, #19
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #12]
+; CHECK-NEXT:    vmovmi.8 q0[12], r3
+; CHECK-NEXT:    lsls r3, r1, #18
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #13]
+; CHECK-NEXT:    vmovmi.8 q0[13], r3
+; CHECK-NEXT:    lsls r3, r1, #17
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r3, [r2, #14]
+; CHECK-NEXT:    vmovmi.8 q0[14], r3
+; CHECK-NEXT:    lsls r1, r1, #16
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrbmi r1, [r2, #15]
+; CHECK-NEXT:    vmovmi.8 q0[15], r1
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    vstr p0, [r1]
+; CHECK-NEXT:    ldrh.w r1, [sp]
+; CHECK-NEXT:    lsls r2, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    vmovne.u8 r2, q0[0]
+; CHECK-NEXT:    strbne r2, [r0]
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r2, q0[1]
+; CHECK-NEXT:    strbmi r2, [r0, #1]
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r2, q0[2]
+; CHECK-NEXT:    strbmi r2, [r0, #2]
+; CHECK-NEXT:    lsls r2, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r2, q0[3]
+; CHECK-NEXT:    strbmi r2, [r0, #3]
+; CHECK-NEXT:    lsls r2, r1, #27
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r2, q0[4]
+; CHECK-NEXT:    strbmi r2, [r0, #4]
+; CHECK-NEXT:    lsls r2, r1, #26
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r2, q0[5]
+; CHECK-NEXT:    strbmi r2, [r0, #5]
+; CHECK-NEXT:    lsls r2, r1, #25
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r2, q0[6]
+; CHECK-NEXT:    strbmi r2, [r0, #6]
+; CHECK-NEXT:    lsls r2, r1, #24
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r2, q0[7]
+; CHECK-NEXT:    strbmi r2, [r0, #7]
+; CHECK-NEXT:    lsls r2, r1, #23
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r2, q0[8]
+; CHECK-NEXT:    strbmi r2, [r0, #8]
+; CHECK-NEXT:    lsls r2, r1, #22
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r2, q0[9]
+; CHECK-NEXT:    strbmi r2, [r0, #9]
+; CHECK-NEXT:    lsls r2, r1, #21
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r2, q0[10]
+; CHECK-NEXT:    strbmi r2, [r0, #10]
+; CHECK-NEXT:    lsls r2, r1, #20
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r2, q0[11]
+; CHECK-NEXT:    strbmi r2, [r0, #11]
+; CHECK-NEXT:    lsls r2, r1, #19
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r2, q0[12]
+; CHECK-NEXT:    strbmi r2, [r0, #12]
+; CHECK-NEXT:    lsls r2, r1, #18
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r2, q0[13]
+; CHECK-NEXT:    strbmi r2, [r0, #13]
+; CHECK-NEXT:    lsls r2, r1, #17
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r2, q0[14]
+; CHECK-NEXT:    strbmi r2, [r0, #14]
+; CHECK-NEXT:    lsls r1, r1, #16
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u8 r1, q0[15]
+; CHECK-NEXT:    strbmi r1, [r0, #15]
+; CHECK-NEXT:    mov sp, r4
+; CHECK-NEXT:    pop {r4, r6, r7, pc}
+entry:
+  %0 = load <16 x i8>, <16 x i8>* %mask, align 1
+  %1 = icmp sgt <16 x i8> %0, zeroinitializer
+  %2 = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %src, i32 1, <16 x i1> %1, <16 x i8> undef)
+  call void @llvm.masked.store.v16i8(<16 x i8> %2, <16 x i8>* %dest, i32 1, <16 x i1> %1)
+  ret void
+}
+
+define void @foo_trunc_v8i8_v8i16(<8 x i8> *%dest, <8 x i16> *%mask, <8 x i16> *%src) {
+; CHECK-LABEL: foo_trunc_v8i8_v8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    vldrh.u16 q0, [r1]
+; CHECK-NEXT:    add r3, sp, #8
+; CHECK-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-NEXT:    @ implicit-def: $q0
+; CHECK-NEXT:    vstr p0, [r3]
+; CHECK-NEXT:    ldrb.w r1, [sp, #8]
+; CHECK-NEXT:    lsls r3, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrhne r3, [r2]
+; CHECK-NEXT:    vmovne.16 q0[0], r3
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #2]
+; CHECK-NEXT:    vmovmi.16 q0[1], r3
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #4]
+; CHECK-NEXT:    vmovmi.16 q0[2], r3
+; CHECK-NEXT:    lsls r3, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #6]
+; CHECK-NEXT:    vmovmi.16 q0[3], r3
+; CHECK-NEXT:    lsls r3, r1, #27
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #8]
+; CHECK-NEXT:    vmovmi.16 q0[4], r3
+; CHECK-NEXT:    lsls r3, r1, #26
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #10]
+; CHECK-NEXT:    vmovmi.16 q0[5], r3
+; CHECK-NEXT:    lsls r3, r1, #25
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r3, [r2, #12]
+; CHECK-NEXT:    vmovmi.16 q0[6], r3
+; CHECK-NEXT:    lsls r1, r1, #24
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrhmi r1, [r2, #14]
+; CHECK-NEXT:    vmovmi.16 q0[7], r1
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    vstr p0, [r1]
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    lsls r2, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    vmovne.u16 r2, q0[0]
+; CHECK-NEXT:    strbne r2, [r0]
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[1]
+; CHECK-NEXT:    strbmi r2, [r0, #1]
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[2]
+; CHECK-NEXT:    strbmi r2, [r0, #2]
+; CHECK-NEXT:    lsls r2, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[3]
+; CHECK-NEXT:    strbmi r2, [r0, #3]
+; CHECK-NEXT:    lsls r2, r1, #27
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[4]
+; CHECK-NEXT:    strbmi r2, [r0, #4]
+; CHECK-NEXT:    lsls r2, r1, #26
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[5]
+; CHECK-NEXT:    strbmi r2, [r0, #5]
+; CHECK-NEXT:    lsls r2, r1, #25
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r2, q0[6]
+; CHECK-NEXT:    strbmi r2, [r0, #6]
+; CHECK-NEXT:    lsls r1, r1, #24
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi.u16 r1, q0[7]
+; CHECK-NEXT:    strbmi r1, [r0, #7]
+; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %mask, align 2
+  %1 = icmp sgt <8 x i16> %0, zeroinitializer
+  %2 = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %src, i32 2, <8 x i1> %1, <8 x i16> undef)
+  %3 = trunc <8 x i16> %2 to <8 x i8>
+  call void @llvm.masked.store.v8i8(<8 x i8> %3, <8 x i8>* %dest, i32 1, <8 x i1> %1)
+  ret void
+}
+
+define void @foo_trunc_v4i8_v4i32(<4 x i8> *%dest, <4 x i32> *%mask, <4 x i32> *%src) {
+; CHECK-LABEL: foo_trunc_v4i8_v4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    add r3, sp, #4
+; CHECK-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-NEXT:    @ implicit-def: $q0
+; CHECK-NEXT:    vstr p0, [r3]
+; CHECK-NEXT:    ldrb.w r1, [sp, #4]
+; CHECK-NEXT:    lsls r3, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrne r3, [r2]
+; CHECK-NEXT:    vmovne.32 q0[0], r3
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrmi r3, [r2, #4]
+; CHECK-NEXT:    vmovmi.32 q0[1], r3
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrmi r3, [r2, #8]
+; CHECK-NEXT:    vmovmi.32 q0[2], r3
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrmi r1, [r2, #12]
+; CHECK-NEXT:    vmovmi.32 q0[3], r1
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    vstr p0, [r1]
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    lsls r2, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    vmovne r2, s0
+; CHECK-NEXT:    strbne r2, [r0]
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r2, s1
+; CHECK-NEXT:    strbmi r2, [r0, #1]
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r2, s2
+; CHECK-NEXT:    strbmi r2, [r0, #2]
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r1, s3
+; CHECK-NEXT:    strbmi r1, [r0, #3]
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %1 = icmp sgt <4 x i32> %0, zeroinitializer
+  %2 = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %src, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %3 = trunc <4 x i32> %2 to <4 x i8>
+  call void @llvm.masked.store.v4i8(<4 x i8> %3, <4 x i8>* %dest, i32 1, <4 x i1> %1)
+  ret void
+}
+
+define void @foo_trunc_v4i16_v4i32(<4 x i16> *%dest, <4 x i32> *%mask, <4 x i32> *%src) {
+; CHECK-LABEL: foo_trunc_v4i16_v4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    add r3, sp, #4
+; CHECK-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-NEXT:    @ implicit-def: $q0
+; CHECK-NEXT:    vstr p0, [r3]
+; CHECK-NEXT:    ldrb.w r1, [sp, #4]
+; CHECK-NEXT:    lsls r3, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    ldrne r3, [r2]
+; CHECK-NEXT:    vmovne.32 q0[0], r3
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrmi r3, [r2, #4]
+; CHECK-NEXT:    vmovmi.32 q0[1], r3
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrmi r3, [r2, #8]
+; CHECK-NEXT:    vmovmi.32 q0[2], r3
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    ldrmi r1, [r2, #12]
+; CHECK-NEXT:    vmovmi.32 q0[3], r1
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    vstr p0, [r1]
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    lsls r2, r1, #31
+; CHECK-NEXT:    itt ne
+; CHECK-NEXT:    vmovne r2, s0
+; CHECK-NEXT:    strhne r2, [r0]
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r2, s1
+; CHECK-NEXT:    strhmi r2, [r0, #2]
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r2, s2
+; CHECK-NEXT:    strhmi r2, [r0, #4]
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    itt mi
+; CHECK-NEXT:    vmovmi r1, s3
+; CHECK-NEXT:    strhmi r1, [r0, #6]
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %1 = icmp sgt <4 x i32> %0, zeroinitializer
+  %2 = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %src, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %3 = trunc <4 x i32> %2 to <4 x i16>
+  call void @llvm.masked.store.v4i16(<4 x i16> %3, <4 x i16>* %dest, i32 2, <4 x i1> %1)
+  ret void
+}
+
+define void @foo_v4f32_v4f32(<4 x float> *%dest, <4 x i32> *%mask, <4 x float> *%src) {
+; CHECK-LABEL: foo_v4f32_v4f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    add r3, sp, #4
+; CHECK-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-NEXT:    @ implicit-def: $q0
+; CHECK-NEXT:    vstr p0, [r3]
+; CHECK-NEXT:    ldrb.w r1, [sp, #4]
+; CHECK-NEXT:    lsls r3, r1, #31
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    vldrne s0, [r2]
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    it mi
+; CHECK-NEXT:    vldrmi s1, [r2, #4]
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    it mi
+; CHECK-NEXT:    vldrmi s2, [r2, #8]
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    it mi
+; CHECK-NEXT:    vldrmi s3, [r2, #12]
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    vstr p0, [r1]
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    lsls r2, r1, #31
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    vstrne s0, [r0]
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    it mi
+; CHECK-NEXT:    vstrmi s1, [r0, #4]
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    it mi
+; CHECK-NEXT:    vstrmi s2, [r0, #8]
+; CHECK-NEXT:    lsls r1, r1, #28
+; CHECK-NEXT:    it mi
+; CHECK-NEXT:    vstrmi s3, [r0, #12]
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %mask, align 4
+  %1 = icmp sgt <4 x i32> %0, zeroinitializer
+  %2 = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %src, i32 4, <4 x i1> %1, <4 x float> undef)
+  call void @llvm.masked.store.v4f32(<4 x float> %2, <4 x float>* %dest, i32 4, <4 x i1> %1)
+  ret void
+}
+
+define void @foo_v8f16_v8f16(<8 x half> *%dest, <8 x i16> *%mask, <8 x half> *%src) {
+; CHECK-LABEL: foo_v8f16_v8f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    vldrh.u16 q0, [r1]
+; CHECK-NEXT:    add r3, sp, #8
+; CHECK-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-NEXT:    @ implicit-def: $q0
+; CHECK-NEXT:    vstr p0, [r3]
+; CHECK-NEXT:    ldrb.w r1, [sp, #8]
+; CHECK-NEXT:    lsls r3, r1, #31
+; CHECK-NEXT:    bne .LBB13_18
+; CHECK-NEXT:  @ %bb.1: @ %else
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    bmi .LBB13_19
+; CHECK-NEXT:  .LBB13_2: @ %else2
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    bmi .LBB13_20
+; CHECK-NEXT:  .LBB13_3: @ %else5
+; CHECK-NEXT:    lsls r3, r1, #28
+; CHECK-NEXT:    bmi .LBB13_21
+; CHECK-NEXT:  .LBB13_4: @ %else8
+; CHECK-NEXT:    lsls r3, r1, #27
+; CHECK-NEXT:    bmi .LBB13_22
+; CHECK-NEXT:  .LBB13_5: @ %else11
+; CHECK-NEXT:    lsls r3, r1, #26
+; CHECK-NEXT:    bmi .LBB13_23
+; CHECK-NEXT:  .LBB13_6: @ %else14
+; CHECK-NEXT:    lsls r3, r1, #25
+; CHECK-NEXT:    bmi .LBB13_24
+; CHECK-NEXT:  .LBB13_7: @ %else17
+; CHECK-NEXT:    lsls r1, r1, #24
+; CHECK-NEXT:    bpl .LBB13_9
+; CHECK-NEXT:  .LBB13_8: @ %cond.load19
+; CHECK-NEXT:    vldr.16 s4, [r2, #14]
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vmov.16 q0[7], r1
+; CHECK-NEXT:  .LBB13_9: @ %else20
+; CHECK-NEXT:    mov r1, sp
+; CHECK-NEXT:    vstr p0, [r1]
+; CHECK-NEXT:    ldrb.w r1, [sp]
+; CHECK-NEXT:    lsls r2, r1, #31
+; CHECK-NEXT:    bne .LBB13_25
+; CHECK-NEXT:  @ %bb.10: @ %else23
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    bmi .LBB13_26
+; CHECK-NEXT:  .LBB13_11: @ %else25
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    bmi .LBB13_27
+; CHECK-NEXT:  .LBB13_12: @ %else27
+; CHECK-NEXT:    lsls r2, r1, #28
+; CHECK-NEXT:    bmi .LBB13_28
+; CHECK-NEXT:  .LBB13_13: @ %else29
+; CHECK-NEXT:    lsls r2, r1, #27
+; CHECK-NEXT:    bmi .LBB13_29
+; CHECK-NEXT:  .LBB13_14: @ %else31
+; CHECK-NEXT:    lsls r2, r1, #26
+; CHECK-NEXT:    bmi .LBB13_30
+; CHECK-NEXT:  .LBB13_15: @ %else33
+; CHECK-NEXT:    lsls r2, r1, #25
+; CHECK-NEXT:    bmi .LBB13_31
+; CHECK-NEXT:  .LBB13_16: @ %else35
+; CHECK-NEXT:    lsls r1, r1, #24
+; CHECK-NEXT:    bmi .LBB13_32
+; CHECK-NEXT:  .LBB13_17: @ %else37
+; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:  .LBB13_18: @ %cond.load
+; CHECK-NEXT:    vldr.16 s0, [r2]
+; CHECK-NEXT:    lsls r3, r1, #30
+; CHECK-NEXT:    bpl .LBB13_2
+; CHECK-NEXT:  .LBB13_19: @ %cond.load1
+; CHECK-NEXT:    vldr.16 s4, [r2, #2]
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q0[1], r3
+; CHECK-NEXT:    lsls r3, r1, #29
+; CHECK-NEXT:    bpl .LBB13_3
+; CHECK-NEXT:  .LBB13_20: @ %cond.load4
+; CHECK-NEXT:    vldr.16 s4, [r2, #4]
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q0[2], r3
+; CHECK-NEXT:    lsls r3, r1, #28
+; CHECK-NEXT:    bpl .LBB13_4
+; CHECK-NEXT:  .LBB13_21: @ %cond.load7
+; CHECK-NEXT:    vldr.16 s4, [r2, #6]
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q0[3], r3
+; CHECK-NEXT:    lsls r3, r1, #27
+; CHECK-NEXT:    bpl .LBB13_5
+; CHECK-NEXT:  .LBB13_22: @ %cond.load10
+; CHECK-NEXT:    vldr.16 s4, [r2, #8]
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q0[4], r3
+; CHECK-NEXT:    lsls r3, r1, #26
+; CHECK-NEXT:    bpl .LBB13_6
+; CHECK-NEXT:  .LBB13_23: @ %cond.load13
+; CHECK-NEXT:    vldr.16 s4, [r2, #10]
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q0[5], r3
+; CHECK-NEXT:    lsls r3, r1, #25
+; CHECK-NEXT:    bpl .LBB13_7
+; CHECK-NEXT:  .LBB13_24: @ %cond.load16
+; CHECK-NEXT:    vldr.16 s4, [r2, #12]
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.16 q0[6], r3
+; CHECK-NEXT:    lsls r1, r1, #24
+; CHECK-NEXT:    bmi .LBB13_8
+; CHECK-NEXT:    b .LBB13_9
+; CHECK-NEXT:  .LBB13_25: @ %cond.store
+; CHECK-NEXT:    vstr.16 s0, [r0]
+; CHECK-NEXT:    lsls r2, r1, #30
+; CHECK-NEXT:    bpl .LBB13_11
+; CHECK-NEXT:  .LBB13_26: @ %cond.store24
+; CHECK-NEXT:    vmovx.f16 s4, s0
+; CHECK-NEXT:    vstr.16 s4, [r0, #2]
+; CHECK-NEXT:    lsls r2, r1, #29
+; CHECK-NEXT:    bpl .LBB13_12
+; CHECK-NEXT:  .LBB13_27: @ %cond.store26
+; CHECK-NEXT:    vstr.16 s1, [r0, #4]
+; CHECK-NEXT:    lsls r2, r1, #28
+; CHECK-NEXT:    bpl .LBB13_13
+; CHECK-NEXT:  .LBB13_28: @ %cond.store28
+; CHECK-NEXT:    vmovx.f16 s4, s1
+; CHECK-NEXT:    vstr.16 s4, [r0, #6]
+; CHECK-NEXT:    lsls r2, r1, #27
+; CHECK-NEXT:    bpl .LBB13_14
+; CHECK-NEXT:  .LBB13_29: @ %cond.store30
+; CHECK-NEXT:    vstr.16 s2, [r0, #8]
+; CHECK-NEXT:    lsls r2, r1, #26
+; CHECK-NEXT:    bpl .LBB13_15
+; CHECK-NEXT:  .LBB13_30: @ %cond.store32
+; CHECK-NEXT:    vmovx.f16 s4, s2
+; CHECK-NEXT:    vstr.16 s4, [r0, #10]
+; CHECK-NEXT:    lsls r2, r1, #25
+; CHECK-NEXT:    bpl .LBB13_16
+; CHECK-NEXT:  .LBB13_31: @ %cond.store34
+; CHECK-NEXT:    vstr.16 s3, [r0, #12]
+; CHECK-NEXT:    lsls r1, r1, #24
+; CHECK-NEXT:    bpl .LBB13_17
+; CHECK-NEXT:  .LBB13_32: @ %cond.store36
+; CHECK-NEXT:    vmovx.f16 s0, s3
+; CHECK-NEXT:    vstr.16 s0, [r0, #14]
+; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = load <8 x i16>, <8 x i16>* %mask, align 2
+  %1 = icmp sgt <8 x i16> %0, zeroinitializer
+  %2 = call <8 x half> @llvm.masked.load.v8f16(<8 x half>* %src, i32 2, <8 x i1> %1, <8 x half> undef)
+  call void @llvm.masked.store.v8f16(<8 x half> %2, <8 x half>* %dest, i32 2, <8 x i1> %1)
+  ret void
+}
+
+declare void @llvm.masked.store.v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
+declare void @llvm.masked.store.v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
+declare void @llvm.masked.store.v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
+declare void @llvm.masked.store.v8f16(<8 x half>, <8 x half>*, i32, <8 x i1>)
+declare void @llvm.masked.store.v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)
+declare <16 x i8> @llvm.masked.load.v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
+declare <8 x i16> @llvm.masked.load.v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
+declare <4 x i32> @llvm.masked.load.v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
+declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
+declare <8 x half> @llvm.masked.load.v8f16(<8 x half>*, i32, <8 x i1>, <8 x half>)
+
+declare void @llvm.masked.store.v8i8(<8 x i8>, <8 x i8>*, i32, <8 x i1>)
+declare void @llvm.masked.store.v4i8(<4 x i8>, <4 x i8>*, i32, <4 x i1>)
+declare void @llvm.masked.store.v4i16(<4 x i16>, <4 x i16>*, i32, <4 x i1>)
+declare <4 x i16> @llvm.masked.load.v4i16(<4 x i16>*, i32, <4 x i1>, <4 x i16>)
+declare <4 x i8> @llvm.masked.load.v4i8(<4 x i8>*, i32, <4 x i1>, <4 x i8>)
+declare <8 x i8> @llvm.masked.load.v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>)

Added: llvm/trunk/test/CodeGen/Thumb2/mve-masked-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-masked-load.ll?rev=370325&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-masked-load.ll (added)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-masked-load.ll Thu Aug 29 03:32:12 2019
@@ -0,0 +1,3597 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE
+; RUN: llc -mtriple=thumbebv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
+
+define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_zero(<4 x i32> *%dest, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4i32_align4_zero:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    beq .LBB0_2
+; CHECK-LE-NEXT:  @ %bb.1: @ %cond.load
+; CHECK-LE-NEXT:    movs r2, #0
+; CHECK-LE-NEXT:    ldr r3, [r0]
+; CHECK-LE-NEXT:    vdup.32 q0, r2
+; CHECK-LE-NEXT:    vmov.32 q0[0], r3
+; CHECK-LE-NEXT:    b .LBB0_3
+; CHECK-LE-NEXT:  .LBB0_2:
+; CHECK-LE-NEXT:    vmov.i32 q0, #0x0
+; CHECK-LE-NEXT:  .LBB0_3: @ %else
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.32 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.32 q0[2], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r0, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.32 q0[3], r0
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4i32_align4_zero:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    beq .LBB0_2
+; CHECK-BE-NEXT:  @ %bb.1: @ %cond.load
+; CHECK-BE-NEXT:    movs r2, #0
+; CHECK-BE-NEXT:    ldr r3, [r0]
+; CHECK-BE-NEXT:    vdup.32 q1, r2
+; CHECK-BE-NEXT:    vmov.32 q1[0], r3
+; CHECK-BE-NEXT:    b .LBB0_3
+; CHECK-BE-NEXT:  .LBB0_2:
+; CHECK-BE-NEXT:    vmov.i32 q1, #0x0
+; CHECK-BE-NEXT:  .LBB0_3: @ %else
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.32 q1[1], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.32 q1[2], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r0, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.32 q1[3], r0
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> zeroinitializer)
+  ret <4 x i32> %l
+}
+
+define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_undef(<4 x i32> *%dest, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4i32_align4_undef:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrne r2, [r0]
+; CHECK-LE-NEXT:    vmovne.32 q0[0], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.32 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.32 q0[2], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r0, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.32 q0[3], r0
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4i32_align4_undef:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q1
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrne r2, [r0]
+; CHECK-BE-NEXT:    vmovne.32 q1[0], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.32 q1[1], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.32 q1[2], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r0, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.32 q1[3], r0
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> undef)
+  ret <4 x i32> %l
+}
+
+define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align1_undef(<4 x i32> *%dest, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4i32_align1_undef:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrne r2, [r0]
+; CHECK-LE-NEXT:    vmovne.32 q0[0], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.32 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.32 q0[2], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r0, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.32 q0[3], r0
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4i32_align1_undef:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q1
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrne r2, [r0]
+; CHECK-BE-NEXT:    vmovne.32 q1[0], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.32 q1[1], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.32 q1[2], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r0, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.32 q1[3], r0
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 1, <4 x i1> %c, <4 x i32> undef)
+  ret <4 x i32> %l
+}
+
+define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_other(<4 x i32> *%dest, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4i32_align4_other:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrne r2, [r0]
+; CHECK-LE-NEXT:    vmovne.32 q0[0], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.32 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.32 q0[2], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r0, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.32 q0[3], r0
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4i32_align4_other:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrne r2, [r0]
+; CHECK-BE-NEXT:    vmovne.32 q1[0], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.32 q1[1], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.32 q1[2], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r0, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.32 q1[3], r0
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> %a)
+  ret <4 x i32> %l
+}
+
+define arm_aapcs_vfpcc i8* @masked_v4i32_preinc(i8* %x, i8* %y, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4i32_preinc:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    adds r0, #4
+; CHECK-LE-NEXT:    ldrb.w r2, [sp]
+; CHECK-LE-NEXT:    lsls r3, r2, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrne r3, [r0]
+; CHECK-LE-NEXT:    vmovne.32 q0[0], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r3, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.32 q0[1], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r3, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.32 q0[2], r3
+; CHECK-LE-NEXT:    lsls r2, r2, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r2, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.32 q0[3], r2
+; CHECK-LE-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4i32_preinc:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q0
+; CHECK-BE-NEXT:    adds r0, #4
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrb.w r2, [sp]
+; CHECK-BE-NEXT:    lsls r3, r2, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrne r3, [r0]
+; CHECK-BE-NEXT:    vmovne.32 q0[0], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r3, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.32 q0[1], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r3, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.32 q0[2], r3
+; CHECK-BE-NEXT:    lsls r2, r2, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r2, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.32 q0[3], r2
+; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds i8, i8* %x, i32 4
+  %0 = bitcast i8* %z to <4 x i32>*
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
+  %2 = bitcast i8* %y to <4 x i32>*
+  store <4 x i32> %1, <4 x i32>* %2, align 4
+  ret i8* %z
+}
+
+define arm_aapcs_vfpcc i8* @masked_v4i32_postinc(i8* %x, i8* %y, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4i32_postinc:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    add.w r12, r0, #4
+; CHECK-LE-NEXT:    ldrb.w r3, [sp]
+; CHECK-LE-NEXT:    lsls r2, r3, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrne r2, [r0]
+; CHECK-LE-NEXT:    vmovne.32 q0[0], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.32 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.32 q0[2], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r0, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.32 q0[3], r0
+; CHECK-LE-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LE-NEXT:    mov r0, r12
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4i32_postinc:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q0
+; CHECK-BE-NEXT:    add.w r12, r0, #4
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrb.w r3, [sp]
+; CHECK-BE-NEXT:    lsls r2, r3, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrne r2, [r0]
+; CHECK-BE-NEXT:    vmovne.32 q0[0], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.32 q0[1], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.32 q0[2], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r0, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.32 q0[3], r0
+; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
+; CHECK-BE-NEXT:    mov r0, r12
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds i8, i8* %x, i32 4
+  %0 = bitcast i8* %x to <4 x i32>*
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
+  %2 = bitcast i8* %y to <4 x i32>*
+  store <4 x i32> %1, <4 x i32>* %2, align 4
+  ret i8* %z
+}
+
+
+
+define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_zero(<8 x i16> *%dest, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8i16_align4_zero:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    beq .LBB6_2
+; CHECK-LE-NEXT:  @ %bb.1: @ %cond.load
+; CHECK-LE-NEXT:    movs r2, #0
+; CHECK-LE-NEXT:    ldrh r3, [r0]
+; CHECK-LE-NEXT:    vdup.16 q0, r2
+; CHECK-LE-NEXT:    vmov.16 q0[0], r3
+; CHECK-LE-NEXT:    b .LBB6_3
+; CHECK-LE-NEXT:  .LBB6_2:
+; CHECK-LE-NEXT:    vmov.i32 q0, #0x0
+; CHECK-LE-NEXT:  .LBB6_3: @ %else
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #2]
+; CHECK-LE-NEXT:    vmovmi.16 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.16 q0[2], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #6]
+; CHECK-LE-NEXT:    vmovmi.16 q0[3], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.16 q0[4], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #10]
+; CHECK-LE-NEXT:    vmovmi.16 q0[5], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.16 q0[6], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r0, [r0, #14]
+; CHECK-LE-NEXT:    vmovmi.16 q0[7], r0
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8i16_align4_zero:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    beq .LBB6_2
+; CHECK-BE-NEXT:  @ %bb.1: @ %cond.load
+; CHECK-BE-NEXT:    movs r2, #0
+; CHECK-BE-NEXT:    ldrh r3, [r0]
+; CHECK-BE-NEXT:    vdup.16 q1, r2
+; CHECK-BE-NEXT:    vmov.16 q1[0], r3
+; CHECK-BE-NEXT:    b .LBB6_3
+; CHECK-BE-NEXT:  .LBB6_2:
+; CHECK-BE-NEXT:    vmov.i32 q0, #0x0
+; CHECK-BE-NEXT:    vrev32.16 q1, q0
+; CHECK-BE-NEXT:  .LBB6_3: @ %else
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #2]
+; CHECK-BE-NEXT:    vmovmi.16 q1[1], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.16 q1[2], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #6]
+; CHECK-BE-NEXT:    vmovmi.16 q1[3], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.16 q1[4], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #10]
+; CHECK-BE-NEXT:    vmovmi.16 q1[5], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.16 q1[6], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r0, [r0, #14]
+; CHECK-BE-NEXT:    vmovmi.16 q1[7], r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> zeroinitializer)
+  ret <8 x i16> %l
+}
+
+define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_undef(<8 x i16> *%dest, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8i16_align4_undef:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrhne r2, [r0]
+; CHECK-LE-NEXT:    vmovne.16 q0[0], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #2]
+; CHECK-LE-NEXT:    vmovmi.16 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.16 q0[2], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #6]
+; CHECK-LE-NEXT:    vmovmi.16 q0[3], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.16 q0[4], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #10]
+; CHECK-LE-NEXT:    vmovmi.16 q0[5], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.16 q0[6], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r0, [r0, #14]
+; CHECK-LE-NEXT:    vmovmi.16 q0[7], r0
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8i16_align4_undef:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q1
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrhne r2, [r0]
+; CHECK-BE-NEXT:    vmovne.16 q1[0], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #2]
+; CHECK-BE-NEXT:    vmovmi.16 q1[1], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.16 q1[2], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #6]
+; CHECK-BE-NEXT:    vmovmi.16 q1[3], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.16 q1[4], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #10]
+; CHECK-BE-NEXT:    vmovmi.16 q1[5], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.16 q1[6], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r0, [r0, #14]
+; CHECK-BE-NEXT:    vmovmi.16 q1[7], r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> undef)
+  ret <8 x i16> %l
+}
+
+define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align1_undef(<8 x i16> *%dest, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8i16_align1_undef:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrhne r2, [r0]
+; CHECK-LE-NEXT:    vmovne.16 q0[0], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #2]
+; CHECK-LE-NEXT:    vmovmi.16 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.16 q0[2], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #6]
+; CHECK-LE-NEXT:    vmovmi.16 q0[3], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.16 q0[4], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #10]
+; CHECK-LE-NEXT:    vmovmi.16 q0[5], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.16 q0[6], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r0, [r0, #14]
+; CHECK-LE-NEXT:    vmovmi.16 q0[7], r0
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8i16_align1_undef:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q1
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrhne r2, [r0]
+; CHECK-BE-NEXT:    vmovne.16 q1[0], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #2]
+; CHECK-BE-NEXT:    vmovmi.16 q1[1], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.16 q1[2], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #6]
+; CHECK-BE-NEXT:    vmovmi.16 q1[3], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.16 q1[4], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #10]
+; CHECK-BE-NEXT:    vmovmi.16 q1[5], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.16 q1[6], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r0, [r0, #14]
+; CHECK-BE-NEXT:    vmovmi.16 q1[7], r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 1, <8 x i1> %c, <8 x i16> undef)
+  ret <8 x i16> %l
+}
+
+define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_other(<8 x i16> *%dest, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8i16_align4_other:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrhne r2, [r0]
+; CHECK-LE-NEXT:    vmovne.16 q0[0], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #2]
+; CHECK-LE-NEXT:    vmovmi.16 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.16 q0[2], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #6]
+; CHECK-LE-NEXT:    vmovmi.16 q0[3], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.16 q0[4], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #10]
+; CHECK-LE-NEXT:    vmovmi.16 q0[5], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.16 q0[6], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r0, [r0, #14]
+; CHECK-LE-NEXT:    vmovmi.16 q0[7], r0
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8i16_align4_other:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrhne r2, [r0]
+; CHECK-BE-NEXT:    vmovne.16 q1[0], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #2]
+; CHECK-BE-NEXT:    vmovmi.16 q1[1], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.16 q1[2], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #6]
+; CHECK-BE-NEXT:    vmovmi.16 q1[3], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.16 q1[4], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #10]
+; CHECK-BE-NEXT:    vmovmi.16 q1[5], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.16 q1[6], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r0, [r0, #14]
+; CHECK-BE-NEXT:    vmovmi.16 q1[7], r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> %a)
+  ret <8 x i16> %l
+}
+
+define i8* @masked_v8i16_preinc(i8* %x, i8* %y, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8i16_preinc:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vldr d1, [sp, #8]
+; CHECK-LE-NEXT:    adds r0, #4
+; CHECK-LE-NEXT:    vmov d0, r2, r3
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    ldrb.w r2, [sp]
+; CHECK-LE-NEXT:    lsls r3, r2, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrhne r3, [r0]
+; CHECK-LE-NEXT:    vmovne.16 q0[0], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r3, [r0, #2]
+; CHECK-LE-NEXT:    vmovmi.16 q0[1], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r3, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.16 q0[2], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r3, [r0, #6]
+; CHECK-LE-NEXT:    vmovmi.16 q0[3], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r3, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.16 q0[4], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r3, [r0, #10]
+; CHECK-LE-NEXT:    vmovmi.16 q0[5], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r3, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.16 q0[6], r3
+; CHECK-LE-NEXT:    lsls r2, r2, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #14]
+; CHECK-LE-NEXT:    vmovmi.16 q0[7], r2
+; CHECK-LE-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8i16_preinc:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vldr d1, [sp, #8]
+; CHECK-BE-NEXT:    adds r0, #4
+; CHECK-BE-NEXT:    vmov d0, r3, r2
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    @ implicit-def: $q0
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrb.w r2, [sp]
+; CHECK-BE-NEXT:    lsls r3, r2, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrhne r3, [r0]
+; CHECK-BE-NEXT:    vmovne.16 q0[0], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r3, [r0, #2]
+; CHECK-BE-NEXT:    vmovmi.16 q0[1], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r3, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.16 q0[2], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r3, [r0, #6]
+; CHECK-BE-NEXT:    vmovmi.16 q0[3], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r3, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.16 q0[4], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r3, [r0, #10]
+; CHECK-BE-NEXT:    vmovmi.16 q0[5], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r3, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.16 q0[6], r3
+; CHECK-BE-NEXT:    lsls r2, r2, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #14]
+; CHECK-BE-NEXT:    vmovmi.16 q0[7], r2
+; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds i8, i8* %x, i32 4
+  %0 = bitcast i8* %z to <8 x i16>*
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 4, <8 x i1> %c, <8 x i16> undef)
+  %2 = bitcast i8* %y to <8 x i16>*
+  store <8 x i16> %1, <8 x i16>* %2, align 4
+  ret i8* %z
+}
+
+define arm_aapcs_vfpcc i8* @masked_v8i16_postinc(i8* %x, i8* %y, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8i16_postinc:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    add.w r12, r0, #4
+; CHECK-LE-NEXT:    ldrb.w r3, [sp]
+; CHECK-LE-NEXT:    lsls r2, r3, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrhne r2, [r0]
+; CHECK-LE-NEXT:    vmovne.16 q0[0], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #2]
+; CHECK-LE-NEXT:    vmovmi.16 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.16 q0[2], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #6]
+; CHECK-LE-NEXT:    vmovmi.16 q0[3], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.16 q0[4], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #10]
+; CHECK-LE-NEXT:    vmovmi.16 q0[5], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r2, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.16 q0[6], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrhmi r0, [r0, #14]
+; CHECK-LE-NEXT:    vmovmi.16 q0[7], r0
+; CHECK-LE-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LE-NEXT:    mov r0, r12
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8i16_postinc:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q0
+; CHECK-BE-NEXT:    add.w r12, r0, #4
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrb.w r3, [sp]
+; CHECK-BE-NEXT:    lsls r2, r3, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrhne r2, [r0]
+; CHECK-BE-NEXT:    vmovne.16 q0[0], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #2]
+; CHECK-BE-NEXT:    vmovmi.16 q0[1], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.16 q0[2], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #6]
+; CHECK-BE-NEXT:    vmovmi.16 q0[3], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.16 q0[4], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #10]
+; CHECK-BE-NEXT:    vmovmi.16 q0[5], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r2, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.16 q0[6], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrhmi r0, [r0, #14]
+; CHECK-BE-NEXT:    vmovmi.16 q0[7], r0
+; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
+; CHECK-BE-NEXT:    mov r0, r12
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds i8, i8* %x, i32 4
+  %0 = bitcast i8* %x to <8 x i16>*
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 4, <8 x i1> %c, <8 x i16> undef)
+  %2 = bitcast i8* %y to <8 x i16>*
+  store <8 x i16> %1, <8 x i16>* %2, align 4
+  ret i8* %z
+}
+
+
+define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_zero(<16 x i8> *%dest, <16 x i8> %a) {
+; CHECK-LE-LABEL: masked_v16i8_align4_zero:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    .setfp r7, sp, #8
+; CHECK-LE-NEXT:    add r7, sp, #8
+; CHECK-LE-NEXT:    .pad #16
+; CHECK-LE-NEXT:    sub sp, #16
+; CHECK-LE-NEXT:    mov r4, sp
+; CHECK-LE-NEXT:    bfc r4, #0, #4
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s8 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrh.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    beq .LBB12_2
+; CHECK-LE-NEXT:  @ %bb.1: @ %cond.load
+; CHECK-LE-NEXT:    movs r2, #0
+; CHECK-LE-NEXT:    ldrb r3, [r0]
+; CHECK-LE-NEXT:    vdup.8 q0, r2
+; CHECK-LE-NEXT:    vmov.8 q0[0], r3
+; CHECK-LE-NEXT:    b .LBB12_3
+; CHECK-LE-NEXT:  .LBB12_2:
+; CHECK-LE-NEXT:    vmov.i32 q0, #0x0
+; CHECK-LE-NEXT:  .LBB12_3: @ %else
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #1]
+; CHECK-LE-NEXT:    vmovmi.8 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #2]
+; CHECK-LE-NEXT:    vmovmi.8 q0[2], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #3]
+; CHECK-LE-NEXT:    vmovmi.8 q0[3], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.8 q0[4], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #5]
+; CHECK-LE-NEXT:    vmovmi.8 q0[5], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #6]
+; CHECK-LE-NEXT:    vmovmi.8 q0[6], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #7]
+; CHECK-LE-NEXT:    vmovmi.8 q0[7], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #23
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.8 q0[8], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #22
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #9]
+; CHECK-LE-NEXT:    vmovmi.8 q0[9], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #21
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #10]
+; CHECK-LE-NEXT:    vmovmi.8 q0[10], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #20
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #11]
+; CHECK-LE-NEXT:    vmovmi.8 q0[11], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #19
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.8 q0[12], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #18
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #13]
+; CHECK-LE-NEXT:    vmovmi.8 q0[13], r2
+; CHECK-LE-NEXT:    sub.w r4, r7, #8
+; CHECK-LE-NEXT:    lsls r2, r1, #17
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #14]
+; CHECK-LE-NEXT:    vmovmi.8 q0[14], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #16
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r0, [r0, #15]
+; CHECK-LE-NEXT:    vmovmi.8 q0[15], r0
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    pop {r4, r6, r7, pc}
+;
+; CHECK-BE-LABEL: masked_v16i8_align4_zero:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    .setfp r7, sp, #8
+; CHECK-BE-NEXT:    add r7, sp, #8
+; CHECK-BE-NEXT:    .pad #16
+; CHECK-BE-NEXT:    sub sp, #16
+; CHECK-BE-NEXT:    mov r4, sp
+; CHECK-BE-NEXT:    bfc r4, #0, #4
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    vrev64.8 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s8 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrh.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    beq .LBB12_2
+; CHECK-BE-NEXT:  @ %bb.1: @ %cond.load
+; CHECK-BE-NEXT:    movs r2, #0
+; CHECK-BE-NEXT:    ldrb r3, [r0]
+; CHECK-BE-NEXT:    vdup.8 q1, r2
+; CHECK-BE-NEXT:    vmov.8 q1[0], r3
+; CHECK-BE-NEXT:    b .LBB12_3
+; CHECK-BE-NEXT:  .LBB12_2:
+; CHECK-BE-NEXT:    vmov.i32 q0, #0x0
+; CHECK-BE-NEXT:    vrev32.8 q1, q0
+; CHECK-BE-NEXT:  .LBB12_3: @ %else
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #1]
+; CHECK-BE-NEXT:    vmovmi.8 q1[1], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #2]
+; CHECK-BE-NEXT:    vmovmi.8 q1[2], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #3]
+; CHECK-BE-NEXT:    vmovmi.8 q1[3], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.8 q1[4], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #5]
+; CHECK-BE-NEXT:    vmovmi.8 q1[5], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #6]
+; CHECK-BE-NEXT:    vmovmi.8 q1[6], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #7]
+; CHECK-BE-NEXT:    vmovmi.8 q1[7], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #23
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.8 q1[8], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #22
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #9]
+; CHECK-BE-NEXT:    vmovmi.8 q1[9], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #21
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #10]
+; CHECK-BE-NEXT:    vmovmi.8 q1[10], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #20
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #11]
+; CHECK-BE-NEXT:    vmovmi.8 q1[11], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #19
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.8 q1[12], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #18
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #13]
+; CHECK-BE-NEXT:    vmovmi.8 q1[13], r2
+; CHECK-BE-NEXT:    sub.w r4, r7, #8
+; CHECK-BE-NEXT:    lsls r2, r1, #17
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #14]
+; CHECK-BE-NEXT:    vmovmi.8 q1[14], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #16
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r0, [r0, #15]
+; CHECK-BE-NEXT:    vmovmi.8 q1[15], r0
+; CHECK-BE-NEXT:    vrev64.8 q0, q1
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    pop {r4, r6, r7, pc}
+entry:
+  %c = icmp sgt <16 x i8> %a, zeroinitializer
+  %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> zeroinitializer)
+  ret <16 x i8> %l
+}
+
+define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_undef(<16 x i8> *%dest, <16 x i8> %a) {
+; CHECK-LE-LABEL: masked_v16i8_align4_undef:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    .setfp r7, sp, #8
+; CHECK-LE-NEXT:    add r7, sp, #8
+; CHECK-LE-NEXT:    .pad #16
+; CHECK-LE-NEXT:    sub sp, #16
+; CHECK-LE-NEXT:    mov r4, sp
+; CHECK-LE-NEXT:    bfc r4, #0, #4
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    vcmp.s8 gt, q0, zr
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    sub.w r4, r7, #8
+; CHECK-LE-NEXT:    ldrh.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrbne r2, [r0]
+; CHECK-LE-NEXT:    vmovne.8 q0[0], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #1]
+; CHECK-LE-NEXT:    vmovmi.8 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #2]
+; CHECK-LE-NEXT:    vmovmi.8 q0[2], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #3]
+; CHECK-LE-NEXT:    vmovmi.8 q0[3], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.8 q0[4], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #5]
+; CHECK-LE-NEXT:    vmovmi.8 q0[5], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #6]
+; CHECK-LE-NEXT:    vmovmi.8 q0[6], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #7]
+; CHECK-LE-NEXT:    vmovmi.8 q0[7], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #23
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.8 q0[8], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #22
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #9]
+; CHECK-LE-NEXT:    vmovmi.8 q0[9], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #21
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #10]
+; CHECK-LE-NEXT:    vmovmi.8 q0[10], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #20
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #11]
+; CHECK-LE-NEXT:    vmovmi.8 q0[11], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #19
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.8 q0[12], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #18
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #13]
+; CHECK-LE-NEXT:    vmovmi.8 q0[13], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #17
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #14]
+; CHECK-LE-NEXT:    vmovmi.8 q0[14], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #16
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r0, [r0, #15]
+; CHECK-LE-NEXT:    vmovmi.8 q0[15], r0
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    pop {r4, r6, r7, pc}
+;
+; CHECK-BE-LABEL: masked_v16i8_align4_undef:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    .setfp r7, sp, #8
+; CHECK-BE-NEXT:    add r7, sp, #8
+; CHECK-BE-NEXT:    .pad #16
+; CHECK-BE-NEXT:    sub sp, #16
+; CHECK-BE-NEXT:    mov r4, sp
+; CHECK-BE-NEXT:    bfc r4, #0, #4
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    vrev64.8 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s8 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q1
+; CHECK-BE-NEXT:    sub.w r4, r7, #8
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrh.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrbne r2, [r0]
+; CHECK-BE-NEXT:    vmovne.8 q1[0], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #1]
+; CHECK-BE-NEXT:    vmovmi.8 q1[1], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #2]
+; CHECK-BE-NEXT:    vmovmi.8 q1[2], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #3]
+; CHECK-BE-NEXT:    vmovmi.8 q1[3], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.8 q1[4], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #5]
+; CHECK-BE-NEXT:    vmovmi.8 q1[5], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #6]
+; CHECK-BE-NEXT:    vmovmi.8 q1[6], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #7]
+; CHECK-BE-NEXT:    vmovmi.8 q1[7], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #23
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.8 q1[8], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #22
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #9]
+; CHECK-BE-NEXT:    vmovmi.8 q1[9], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #21
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #10]
+; CHECK-BE-NEXT:    vmovmi.8 q1[10], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #20
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #11]
+; CHECK-BE-NEXT:    vmovmi.8 q1[11], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #19
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.8 q1[12], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #18
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #13]
+; CHECK-BE-NEXT:    vmovmi.8 q1[13], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #17
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #14]
+; CHECK-BE-NEXT:    vmovmi.8 q1[14], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #16
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r0, [r0, #15]
+; CHECK-BE-NEXT:    vmovmi.8 q1[15], r0
+; CHECK-BE-NEXT:    vrev64.8 q0, q1
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    pop {r4, r6, r7, pc}
+entry:
+  %c = icmp sgt <16 x i8> %a, zeroinitializer
+  %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> undef)
+  ret <16 x i8> %l
+}
+
+define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_other(<16 x i8> *%dest, <16 x i8> %a) {
+; CHECK-LE-LABEL: masked_v16i8_align4_other:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    .setfp r7, sp, #8
+; CHECK-LE-NEXT:    add r7, sp, #8
+; CHECK-LE-NEXT:    .pad #16
+; CHECK-LE-NEXT:    sub sp, #16
+; CHECK-LE-NEXT:    mov r4, sp
+; CHECK-LE-NEXT:    bfc r4, #0, #4
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s8 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    sub.w r4, r7, #8
+; CHECK-LE-NEXT:    ldrh.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrbne r2, [r0]
+; CHECK-LE-NEXT:    vmovne.8 q0[0], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #1]
+; CHECK-LE-NEXT:    vmovmi.8 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #2]
+; CHECK-LE-NEXT:    vmovmi.8 q0[2], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #3]
+; CHECK-LE-NEXT:    vmovmi.8 q0[3], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.8 q0[4], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #5]
+; CHECK-LE-NEXT:    vmovmi.8 q0[5], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #6]
+; CHECK-LE-NEXT:    vmovmi.8 q0[6], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #7]
+; CHECK-LE-NEXT:    vmovmi.8 q0[7], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #23
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.8 q0[8], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #22
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #9]
+; CHECK-LE-NEXT:    vmovmi.8 q0[9], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #21
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #10]
+; CHECK-LE-NEXT:    vmovmi.8 q0[10], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #20
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #11]
+; CHECK-LE-NEXT:    vmovmi.8 q0[11], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #19
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.8 q0[12], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #18
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #13]
+; CHECK-LE-NEXT:    vmovmi.8 q0[13], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #17
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #14]
+; CHECK-LE-NEXT:    vmovmi.8 q0[14], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #16
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r0, [r0, #15]
+; CHECK-LE-NEXT:    vmovmi.8 q0[15], r0
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    pop {r4, r6, r7, pc}
+;
+; CHECK-BE-LABEL: masked_v16i8_align4_other:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    .setfp r7, sp, #8
+; CHECK-BE-NEXT:    add r7, sp, #8
+; CHECK-BE-NEXT:    .pad #16
+; CHECK-BE-NEXT:    sub sp, #16
+; CHECK-BE-NEXT:    mov r4, sp
+; CHECK-BE-NEXT:    bfc r4, #0, #4
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    vrev64.8 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s8 gt, q1, zr
+; CHECK-BE-NEXT:    sub.w r4, r7, #8
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrh.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrbne r2, [r0]
+; CHECK-BE-NEXT:    vmovne.8 q1[0], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #1]
+; CHECK-BE-NEXT:    vmovmi.8 q1[1], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #2]
+; CHECK-BE-NEXT:    vmovmi.8 q1[2], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #3]
+; CHECK-BE-NEXT:    vmovmi.8 q1[3], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.8 q1[4], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #5]
+; CHECK-BE-NEXT:    vmovmi.8 q1[5], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #6]
+; CHECK-BE-NEXT:    vmovmi.8 q1[6], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #7]
+; CHECK-BE-NEXT:    vmovmi.8 q1[7], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #23
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.8 q1[8], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #22
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #9]
+; CHECK-BE-NEXT:    vmovmi.8 q1[9], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #21
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #10]
+; CHECK-BE-NEXT:    vmovmi.8 q1[10], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #20
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #11]
+; CHECK-BE-NEXT:    vmovmi.8 q1[11], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #19
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.8 q1[12], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #18
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #13]
+; CHECK-BE-NEXT:    vmovmi.8 q1[13], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #17
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #14]
+; CHECK-BE-NEXT:    vmovmi.8 q1[14], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #16
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r0, [r0, #15]
+; CHECK-BE-NEXT:    vmovmi.8 q1[15], r0
+; CHECK-BE-NEXT:    vrev64.8 q0, q1
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    pop {r4, r6, r7, pc}
+entry:
+  %c = icmp sgt <16 x i8> %a, zeroinitializer
+  %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> %a)
+  ret <16 x i8> %l
+}
+
+define arm_aapcs_vfpcc i8* @masked_v16i8_preinc(i8* %x, i8* %y, <16 x i8> %a) {
+; CHECK-LE-LABEL: masked_v16i8_preinc:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    .setfp r7, sp, #8
+; CHECK-LE-NEXT:    add r7, sp, #8
+; CHECK-LE-NEXT:    .pad #16
+; CHECK-LE-NEXT:    sub sp, #16
+; CHECK-LE-NEXT:    mov r4, sp
+; CHECK-LE-NEXT:    bfc r4, #0, #4
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    vcmp.s8 gt, q0, zr
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    adds r0, #4
+; CHECK-LE-NEXT:    ldrh.w r2, [sp]
+; CHECK-LE-NEXT:    sub.w r4, r7, #8
+; CHECK-LE-NEXT:    lsls r3, r2, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrbne r3, [r0]
+; CHECK-LE-NEXT:    vmovne.8 q0[0], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r3, [r0, #1]
+; CHECK-LE-NEXT:    vmovmi.8 q0[1], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r3, [r0, #2]
+; CHECK-LE-NEXT:    vmovmi.8 q0[2], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r3, [r0, #3]
+; CHECK-LE-NEXT:    vmovmi.8 q0[3], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r3, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.8 q0[4], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r3, [r0, #5]
+; CHECK-LE-NEXT:    vmovmi.8 q0[5], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r3, [r0, #6]
+; CHECK-LE-NEXT:    vmovmi.8 q0[6], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r3, [r0, #7]
+; CHECK-LE-NEXT:    vmovmi.8 q0[7], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #23
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r3, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.8 q0[8], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #22
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r3, [r0, #9]
+; CHECK-LE-NEXT:    vmovmi.8 q0[9], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #21
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r3, [r0, #10]
+; CHECK-LE-NEXT:    vmovmi.8 q0[10], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #20
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r3, [r0, #11]
+; CHECK-LE-NEXT:    vmovmi.8 q0[11], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #19
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r3, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.8 q0[12], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #18
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r3, [r0, #13]
+; CHECK-LE-NEXT:    vmovmi.8 q0[13], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #17
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r3, [r0, #14]
+; CHECK-LE-NEXT:    vmovmi.8 q0[14], r3
+; CHECK-LE-NEXT:    lsls r2, r2, #16
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #15]
+; CHECK-LE-NEXT:    vmovmi.8 q0[15], r2
+; CHECK-LE-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    pop {r4, r6, r7, pc}
+;
+; CHECK-BE-LABEL: masked_v16i8_preinc:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    .setfp r7, sp, #8
+; CHECK-BE-NEXT:    add r7, sp, #8
+; CHECK-BE-NEXT:    .pad #16
+; CHECK-BE-NEXT:    sub sp, #16
+; CHECK-BE-NEXT:    mov r4, sp
+; CHECK-BE-NEXT:    bfc r4, #0, #4
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    vrev64.8 q1, q0
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vcmp.s8 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q0
+; CHECK-BE-NEXT:    adds r0, #4
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    sub.w r4, r7, #8
+; CHECK-BE-NEXT:    ldrh.w r2, [sp]
+; CHECK-BE-NEXT:    lsls r3, r2, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrbne r3, [r0]
+; CHECK-BE-NEXT:    vmovne.8 q0[0], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r3, [r0, #1]
+; CHECK-BE-NEXT:    vmovmi.8 q0[1], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r3, [r0, #2]
+; CHECK-BE-NEXT:    vmovmi.8 q0[2], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r3, [r0, #3]
+; CHECK-BE-NEXT:    vmovmi.8 q0[3], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r3, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.8 q0[4], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r3, [r0, #5]
+; CHECK-BE-NEXT:    vmovmi.8 q0[5], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r3, [r0, #6]
+; CHECK-BE-NEXT:    vmovmi.8 q0[6], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r3, [r0, #7]
+; CHECK-BE-NEXT:    vmovmi.8 q0[7], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #23
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r3, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.8 q0[8], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #22
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r3, [r0, #9]
+; CHECK-BE-NEXT:    vmovmi.8 q0[9], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #21
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r3, [r0, #10]
+; CHECK-BE-NEXT:    vmovmi.8 q0[10], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #20
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r3, [r0, #11]
+; CHECK-BE-NEXT:    vmovmi.8 q0[11], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #19
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r3, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.8 q0[12], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #18
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r3, [r0, #13]
+; CHECK-BE-NEXT:    vmovmi.8 q0[13], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #17
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r3, [r0, #14]
+; CHECK-BE-NEXT:    vmovmi.8 q0[14], r3
+; CHECK-BE-NEXT:    lsls r2, r2, #16
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #15]
+; CHECK-BE-NEXT:    vmovmi.8 q0[15], r2
+; CHECK-BE-NEXT:    vstrb.8 q0, [r1]
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    pop {r4, r6, r7, pc}
+entry:
+  %z = getelementptr inbounds i8, i8* %x, i32 4
+  %0 = bitcast i8* %z to <16 x i8>*
+  %c = icmp sgt <16 x i8> %a, zeroinitializer
+  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 4, <16 x i1> %c, <16 x i8> undef)
+  %2 = bitcast i8* %y to <16 x i8>*
+  store <16 x i8> %1, <16 x i8>* %2, align 4
+  ret i8* %z
+}
+
+define arm_aapcs_vfpcc i8* @masked_v16i8_postinc(i8* %x, i8* %y, <16 x i8> %a) {
+; CHECK-LE-LABEL: masked_v16i8_postinc:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    .setfp r7, sp, #8
+; CHECK-LE-NEXT:    add r7, sp, #8
+; CHECK-LE-NEXT:    .pad #16
+; CHECK-LE-NEXT:    sub sp, #16
+; CHECK-LE-NEXT:    mov r4, sp
+; CHECK-LE-NEXT:    bfc r4, #0, #4
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    vcmp.s8 gt, q0, zr
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    sub.w r4, r7, #8
+; CHECK-LE-NEXT:    ldrh.w r3, [sp]
+; CHECK-LE-NEXT:    add.w r12, r0, #4
+; CHECK-LE-NEXT:    lsls r2, r3, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrbne r2, [r0]
+; CHECK-LE-NEXT:    vmovne.8 q0[0], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #1]
+; CHECK-LE-NEXT:    vmovmi.8 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #2]
+; CHECK-LE-NEXT:    vmovmi.8 q0[2], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #3]
+; CHECK-LE-NEXT:    vmovmi.8 q0[3], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi.8 q0[4], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #5]
+; CHECK-LE-NEXT:    vmovmi.8 q0[5], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #6]
+; CHECK-LE-NEXT:    vmovmi.8 q0[6], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #7]
+; CHECK-LE-NEXT:    vmovmi.8 q0[7], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #23
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi.8 q0[8], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #22
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #9]
+; CHECK-LE-NEXT:    vmovmi.8 q0[9], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #21
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #10]
+; CHECK-LE-NEXT:    vmovmi.8 q0[10], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #20
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #11]
+; CHECK-LE-NEXT:    vmovmi.8 q0[11], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #19
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi.8 q0[12], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #18
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #13]
+; CHECK-LE-NEXT:    vmovmi.8 q0[13], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #17
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r2, [r0, #14]
+; CHECK-LE-NEXT:    vmovmi.8 q0[14], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #16
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrbmi r0, [r0, #15]
+; CHECK-LE-NEXT:    vmovmi.8 q0[15], r0
+; CHECK-LE-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LE-NEXT:    mov r0, r12
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    pop {r4, r6, r7, pc}
+;
+; CHECK-BE-LABEL: masked_v16i8_postinc:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    .setfp r7, sp, #8
+; CHECK-BE-NEXT:    add r7, sp, #8
+; CHECK-BE-NEXT:    .pad #16
+; CHECK-BE-NEXT:    sub sp, #16
+; CHECK-BE-NEXT:    mov r4, sp
+; CHECK-BE-NEXT:    bfc r4, #0, #4
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    vrev64.8 q1, q0
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vcmp.s8 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q0
+; CHECK-BE-NEXT:    sub.w r4, r7, #8
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    add.w r12, r0, #4
+; CHECK-BE-NEXT:    ldrh.w r3, [sp]
+; CHECK-BE-NEXT:    lsls r2, r3, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrbne r2, [r0]
+; CHECK-BE-NEXT:    vmovne.8 q0[0], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #1]
+; CHECK-BE-NEXT:    vmovmi.8 q0[1], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #2]
+; CHECK-BE-NEXT:    vmovmi.8 q0[2], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #3]
+; CHECK-BE-NEXT:    vmovmi.8 q0[3], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi.8 q0[4], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #5]
+; CHECK-BE-NEXT:    vmovmi.8 q0[5], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #6]
+; CHECK-BE-NEXT:    vmovmi.8 q0[6], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #7]
+; CHECK-BE-NEXT:    vmovmi.8 q0[7], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #23
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi.8 q0[8], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #22
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #9]
+; CHECK-BE-NEXT:    vmovmi.8 q0[9], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #21
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #10]
+; CHECK-BE-NEXT:    vmovmi.8 q0[10], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #20
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #11]
+; CHECK-BE-NEXT:    vmovmi.8 q0[11], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #19
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi.8 q0[12], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #18
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #13]
+; CHECK-BE-NEXT:    vmovmi.8 q0[13], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #17
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r2, [r0, #14]
+; CHECK-BE-NEXT:    vmovmi.8 q0[14], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #16
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrbmi r0, [r0, #15]
+; CHECK-BE-NEXT:    vmovmi.8 q0[15], r0
+; CHECK-BE-NEXT:    vstrb.8 q0, [r1]
+; CHECK-BE-NEXT:    mov r0, r12
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    pop {r4, r6, r7, pc}
+entry:
+  %z = getelementptr inbounds i8, i8* %x, i32 4
+  %0 = bitcast i8* %x to <16 x i8>*
+  %c = icmp sgt <16 x i8> %a, zeroinitializer
+  %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 4, <16 x i1> %c, <16 x i8> undef)
+  %2 = bitcast i8* %y to <16 x i8>*
+  store <16 x i8> %1, <16 x i8>* %2, align 4
+  ret i8* %z
+}
+
+
+define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_zero(<4 x float> *%dest, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4f32_align4_zero:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    beq .LBB17_2
+; CHECK-LE-NEXT:  @ %bb.1: @ %cond.load
+; CHECK-LE-NEXT:    vldr s0, .LCPI17_0
+; CHECK-LE-NEXT:    vldr s4, [r0]
+; CHECK-LE-NEXT:    vmov r2, s0
+; CHECK-LE-NEXT:    vdup.32 q0, r2
+; CHECK-LE-NEXT:    vmov.f32 s0, s4
+; CHECK-LE-NEXT:    b .LBB17_3
+; CHECK-LE-NEXT:  .LBB17_2:
+; CHECK-LE-NEXT:    vmov.i32 q0, #0x0
+; CHECK-LE-NEXT:  .LBB17_3: @ %else
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s1, [r0, #4]
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r1, r1, #28
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s3, [r0, #12]
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+; CHECK-LE-NEXT:    .p2align 2
+; CHECK-LE-NEXT:  @ %bb.4:
+; CHECK-LE-NEXT:  .LCPI17_0:
+; CHECK-LE-NEXT:    .long 0 @ float 0
+;
+; CHECK-BE-LABEL: masked_v4f32_align4_zero:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    beq .LBB17_2
+; CHECK-BE-NEXT:  @ %bb.1: @ %cond.load
+; CHECK-BE-NEXT:    vldr s0, .LCPI17_0
+; CHECK-BE-NEXT:    vldr s2, [r0]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vdup.32 q1, r2
+; CHECK-BE-NEXT:    vmov.f32 s4, s2
+; CHECK-BE-NEXT:    b .LBB17_3
+; CHECK-BE-NEXT:  .LBB17_2:
+; CHECK-BE-NEXT:    vmov.i32 q1, #0x0
+; CHECK-BE-NEXT:  .LBB17_3: @ %else
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s5, [r0, #4]
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s6, [r0, #8]
+; CHECK-BE-NEXT:    lsls r1, r1, #28
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s7, [r0, #12]
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+; CHECK-BE-NEXT:    .p2align 2
+; CHECK-BE-NEXT:  @ %bb.4:
+; CHECK-BE-NEXT:  .LCPI17_0:
+; CHECK-BE-NEXT:    .long 0 @ float 0
+entry:
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> zeroinitializer)
+  ret <4 x float> %l
+}
+
+define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_undef(<4 x float> *%dest, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4f32_align4_undef:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    it ne
+; CHECK-LE-NEXT:    vldrne s0, [r0]
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s1, [r0, #4]
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r1, r1, #28
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s3, [r0, #12]
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4f32_align4_undef:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q1
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    it ne
+; CHECK-BE-NEXT:    vldrne s4, [r0]
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s5, [r0, #4]
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s6, [r0, #8]
+; CHECK-BE-NEXT:    lsls r1, r1, #28
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s7, [r0, #12]
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> undef)
+  ret <4 x float> %l
+}
+
+define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align1_undef(<4 x float> *%dest, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4f32_align1_undef:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    ldrne r2, [r0]
+; CHECK-LE-NEXT:    vmovne s0, r2
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r2, [r0, #4]
+; CHECK-LE-NEXT:    vmovmi s1, r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r2, [r0, #8]
+; CHECK-LE-NEXT:    vmovmi s2, r2
+; CHECK-LE-NEXT:    lsls r1, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    ldrmi r0, [r0, #12]
+; CHECK-LE-NEXT:    vmovmi s3, r0
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4f32_align1_undef:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q1
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    ldrne r2, [r0]
+; CHECK-BE-NEXT:    vmovne s4, r2
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r2, [r0, #4]
+; CHECK-BE-NEXT:    vmovmi s5, r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r2, [r0, #8]
+; CHECK-BE-NEXT:    vmovmi s6, r2
+; CHECK-BE-NEXT:    lsls r1, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    ldrmi r0, [r0, #12]
+; CHECK-BE-NEXT:    vmovmi s7, r0
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 1, <4 x i1> %c, <4 x float> undef)
+  ret <4 x float> %l
+}
+
+define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_other(<4 x float> *%dest, <4 x i32> %a, <4 x float> %b) {
+; CHECK-LE-LABEL: masked_v4f32_align4_other:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    it ne
+; CHECK-LE-NEXT:    vldrne s4, [r0]
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s5, [r0, #4]
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s6, [r0, #8]
+; CHECK-LE-NEXT:    lsls r1, r1, #28
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s7, [r0, #12]
+; CHECK-LE-NEXT:    vmov q0, q1
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4f32_align4_other:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q2, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s32 gt, q2, zr
+; CHECK-BE-NEXT:    vrev64.32 q2, q1
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    it ne
+; CHECK-BE-NEXT:    vldrne s8, [r0]
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s9, [r0, #4]
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s10, [r0, #8]
+; CHECK-BE-NEXT:    lsls r1, r1, #28
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s11, [r0, #12]
+; CHECK-BE-NEXT:    vrev64.32 q0, q2
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> %b)
+  ret <4 x float> %l
+}
+
+define arm_aapcs_vfpcc i8* @masked_v4f32_preinc(i8* %x, i8* %y, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4f32_preinc:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    adds r0, #4
+; CHECK-LE-NEXT:    ldrb.w r2, [sp]
+; CHECK-LE-NEXT:    lsls r3, r2, #31
+; CHECK-LE-NEXT:    it ne
+; CHECK-LE-NEXT:    vldrne s0, [r0]
+; CHECK-LE-NEXT:    lsls r3, r2, #30
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s1, [r0, #4]
+; CHECK-LE-NEXT:    lsls r3, r2, #29
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r2, r2, #28
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s3, [r0, #12]
+; CHECK-LE-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4f32_preinc:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q0
+; CHECK-BE-NEXT:    adds r0, #4
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrb.w r2, [sp]
+; CHECK-BE-NEXT:    lsls r3, r2, #31
+; CHECK-BE-NEXT:    it ne
+; CHECK-BE-NEXT:    vldrne s0, [r0]
+; CHECK-BE-NEXT:    lsls r3, r2, #30
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s1, [r0, #4]
+; CHECK-BE-NEXT:    lsls r3, r2, #29
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s2, [r0, #8]
+; CHECK-BE-NEXT:    lsls r2, r2, #28
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s3, [r0, #12]
+; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds i8, i8* %x, i32 4
+  %0 = bitcast i8* %z to <4 x float>*
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
+  %2 = bitcast i8* %y to <4 x float>*
+  store <4 x float> %1, <4 x float>* %2, align 4
+  ret i8* %z
+}
+
+define arm_aapcs_vfpcc i8* @masked_v4f32_postinc(i8* %x, i8* %y, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4f32_postinc:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    add.w r12, r0, #4
+; CHECK-LE-NEXT:    ldrb.w r3, [sp]
+; CHECK-LE-NEXT:    lsls r2, r3, #31
+; CHECK-LE-NEXT:    it ne
+; CHECK-LE-NEXT:    vldrne s0, [r0]
+; CHECK-LE-NEXT:    lsls r2, r3, #30
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s1, [r0, #4]
+; CHECK-LE-NEXT:    lsls r2, r3, #29
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r2, r3, #28
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi s3, [r0, #12]
+; CHECK-LE-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LE-NEXT:    mov r0, r12
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4f32_postinc:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q0
+; CHECK-BE-NEXT:    add.w r12, r0, #4
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrb.w r3, [sp]
+; CHECK-BE-NEXT:    lsls r2, r3, #31
+; CHECK-BE-NEXT:    it ne
+; CHECK-BE-NEXT:    vldrne s0, [r0]
+; CHECK-BE-NEXT:    lsls r2, r3, #30
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s1, [r0, #4]
+; CHECK-BE-NEXT:    lsls r2, r3, #29
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s2, [r0, #8]
+; CHECK-BE-NEXT:    lsls r2, r3, #28
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi s3, [r0, #12]
+; CHECK-BE-NEXT:    vstrw.32 q0, [r1]
+; CHECK-BE-NEXT:    mov r0, r12
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds i8, i8* %x, i32 4
+  %0 = bitcast i8* %x to <4 x float>*
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
+  %2 = bitcast i8* %y to <4 x float>*
+  store <4 x float> %1, <4 x float>* %2, align 4
+  ret i8* %z
+}
+
+
+define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_zero(<8 x half> *%dest, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8f16_align4_zero:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    beq .LBB23_2
+; CHECK-LE-NEXT:  @ %bb.1: @ %cond.load
+; CHECK-LE-NEXT:    vldr.16 s0, .LCPI23_0
+; CHECK-LE-NEXT:    vmov r2, s0
+; CHECK-LE-NEXT:    vldr.16 s0, [r0]
+; CHECK-LE-NEXT:    vmov r3, s0
+; CHECK-LE-NEXT:    vdup.16 q0, r2
+; CHECK-LE-NEXT:    vmov.16 q0[0], r3
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    bmi .LBB23_3
+; CHECK-LE-NEXT:    b .LBB23_4
+; CHECK-LE-NEXT:  .LBB23_2:
+; CHECK-LE-NEXT:    vmov.i32 q0, #0x0
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    bpl .LBB23_4
+; CHECK-LE-NEXT:  .LBB23_3: @ %cond.load1
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #2]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[1], r2
+; CHECK-LE-NEXT:  .LBB23_4: @ %else2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    bmi .LBB23_11
+; CHECK-LE-NEXT:  @ %bb.5: @ %else5
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    bmi .LBB23_12
+; CHECK-LE-NEXT:  .LBB23_6: @ %else8
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    bmi .LBB23_13
+; CHECK-LE-NEXT:  .LBB23_7: @ %else11
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    bmi .LBB23_14
+; CHECK-LE-NEXT:  .LBB23_8: @ %else14
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    bmi .LBB23_15
+; CHECK-LE-NEXT:  .LBB23_9: @ %else17
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    bmi .LBB23_16
+; CHECK-LE-NEXT:  .LBB23_10: @ %else20
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+; CHECK-LE-NEXT:  .LBB23_11: @ %cond.load4
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #4]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[2], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    bpl .LBB23_6
+; CHECK-LE-NEXT:  .LBB23_12: @ %cond.load7
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #6]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[3], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    bpl .LBB23_7
+; CHECK-LE-NEXT:  .LBB23_13: @ %cond.load10
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #8]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[4], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    bpl .LBB23_8
+; CHECK-LE-NEXT:  .LBB23_14: @ %cond.load13
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #10]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[5], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    bpl .LBB23_9
+; CHECK-LE-NEXT:  .LBB23_15: @ %cond.load16
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #12]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[6], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    bpl .LBB23_10
+; CHECK-LE-NEXT:  .LBB23_16: @ %cond.load19
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #14]
+; CHECK-LE-NEXT:    vmov r0, s4
+; CHECK-LE-NEXT:    vmov.16 q0[7], r0
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+; CHECK-LE-NEXT:    .p2align 1
+; CHECK-LE-NEXT:  @ %bb.17:
+; CHECK-LE-NEXT:  .LCPI23_0:
+; CHECK-LE-NEXT:    .short 0 @ half 0
+;
+; CHECK-BE-LABEL: masked_v8f16_align4_zero:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    beq .LBB23_2
+; CHECK-BE-NEXT:  @ %bb.1: @ %cond.load
+; CHECK-BE-NEXT:    vldr.16 s0, .LCPI23_0
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vldr.16 s0, [r0]
+; CHECK-BE-NEXT:    vdup.16 q1, r2
+; CHECK-BE-NEXT:    vmov r3, s0
+; CHECK-BE-NEXT:    vmov.16 q1[0], r3
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    bmi .LBB23_3
+; CHECK-BE-NEXT:    b .LBB23_4
+; CHECK-BE-NEXT:  .LBB23_2:
+; CHECK-BE-NEXT:    vmov.i32 q0, #0x0
+; CHECK-BE-NEXT:    vrev32.16 q1, q0
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    bpl .LBB23_4
+; CHECK-BE-NEXT:  .LBB23_3: @ %cond.load1
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #2]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[1], r2
+; CHECK-BE-NEXT:  .LBB23_4: @ %else2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    bmi .LBB23_12
+; CHECK-BE-NEXT:  @ %bb.5: @ %else5
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    bmi .LBB23_13
+; CHECK-BE-NEXT:  .LBB23_6: @ %else8
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    bmi .LBB23_14
+; CHECK-BE-NEXT:  .LBB23_7: @ %else11
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    bmi .LBB23_15
+; CHECK-BE-NEXT:  .LBB23_8: @ %else14
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    bmi .LBB23_16
+; CHECK-BE-NEXT:  .LBB23_9: @ %else17
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    bpl .LBB23_11
+; CHECK-BE-NEXT:  .LBB23_10: @ %cond.load19
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #14]
+; CHECK-BE-NEXT:    vmov r0, s0
+; CHECK-BE-NEXT:    vmov.16 q1[7], r0
+; CHECK-BE-NEXT:  .LBB23_11: @ %else20
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+; CHECK-BE-NEXT:  .LBB23_12: @ %cond.load4
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #4]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[2], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    bpl .LBB23_6
+; CHECK-BE-NEXT:  .LBB23_13: @ %cond.load7
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #6]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[3], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    bpl .LBB23_7
+; CHECK-BE-NEXT:  .LBB23_14: @ %cond.load10
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #8]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[4], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    bpl .LBB23_8
+; CHECK-BE-NEXT:  .LBB23_15: @ %cond.load13
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #10]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[5], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    bpl .LBB23_9
+; CHECK-BE-NEXT:  .LBB23_16: @ %cond.load16
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #12]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[6], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    bmi .LBB23_10
+; CHECK-BE-NEXT:    b .LBB23_11
+; CHECK-BE-NEXT:    .p2align 1
+; CHECK-BE-NEXT:  @ %bb.17:
+; CHECK-BE-NEXT:  .LCPI23_0:
+; CHECK-BE-NEXT:    .short 0 @ half 0
+entry:
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> zeroinitializer)
+  ret <8 x half> %l
+}
+
+define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_undef(<8 x half> *%dest, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8f16_align4_undef:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    bne .LBB24_9
+; CHECK-LE-NEXT:  @ %bb.1: @ %else
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    bmi .LBB24_10
+; CHECK-LE-NEXT:  .LBB24_2: @ %else2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    bmi .LBB24_11
+; CHECK-LE-NEXT:  .LBB24_3: @ %else5
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    bmi .LBB24_12
+; CHECK-LE-NEXT:  .LBB24_4: @ %else8
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    bmi .LBB24_13
+; CHECK-LE-NEXT:  .LBB24_5: @ %else11
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    bmi .LBB24_14
+; CHECK-LE-NEXT:  .LBB24_6: @ %else14
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    bmi .LBB24_15
+; CHECK-LE-NEXT:  .LBB24_7: @ %else17
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    bmi .LBB24_16
+; CHECK-LE-NEXT:  .LBB24_8: @ %else20
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+; CHECK-LE-NEXT:  .LBB24_9: @ %cond.load
+; CHECK-LE-NEXT:    vldr.16 s0, [r0]
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    bpl .LBB24_2
+; CHECK-LE-NEXT:  .LBB24_10: @ %cond.load1
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #2]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    bpl .LBB24_3
+; CHECK-LE-NEXT:  .LBB24_11: @ %cond.load4
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #4]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[2], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    bpl .LBB24_4
+; CHECK-LE-NEXT:  .LBB24_12: @ %cond.load7
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #6]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[3], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    bpl .LBB24_5
+; CHECK-LE-NEXT:  .LBB24_13: @ %cond.load10
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #8]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[4], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    bpl .LBB24_6
+; CHECK-LE-NEXT:  .LBB24_14: @ %cond.load13
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #10]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[5], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    bpl .LBB24_7
+; CHECK-LE-NEXT:  .LBB24_15: @ %cond.load16
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #12]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[6], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    bpl .LBB24_8
+; CHECK-LE-NEXT:  .LBB24_16: @ %cond.load19
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #14]
+; CHECK-LE-NEXT:    vmov r0, s4
+; CHECK-LE-NEXT:    vmov.16 q0[7], r0
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8f16_align4_undef:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q1
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    bne .LBB24_10
+; CHECK-BE-NEXT:  @ %bb.1: @ %else
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    bmi .LBB24_11
+; CHECK-BE-NEXT:  .LBB24_2: @ %else2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    bmi .LBB24_12
+; CHECK-BE-NEXT:  .LBB24_3: @ %else5
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    bmi .LBB24_13
+; CHECK-BE-NEXT:  .LBB24_4: @ %else8
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    bmi .LBB24_14
+; CHECK-BE-NEXT:  .LBB24_5: @ %else11
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    bmi .LBB24_15
+; CHECK-BE-NEXT:  .LBB24_6: @ %else14
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    bmi .LBB24_16
+; CHECK-BE-NEXT:  .LBB24_7: @ %else17
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    bpl .LBB24_9
+; CHECK-BE-NEXT:  .LBB24_8: @ %cond.load19
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #14]
+; CHECK-BE-NEXT:    vmov r0, s0
+; CHECK-BE-NEXT:    vmov.16 q1[7], r0
+; CHECK-BE-NEXT:  .LBB24_9: @ %else20
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+; CHECK-BE-NEXT:  .LBB24_10: @ %cond.load
+; CHECK-BE-NEXT:    vldr.16 s4, [r0]
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    bpl .LBB24_2
+; CHECK-BE-NEXT:  .LBB24_11: @ %cond.load1
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #2]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[1], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    bpl .LBB24_3
+; CHECK-BE-NEXT:  .LBB24_12: @ %cond.load4
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #4]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[2], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    bpl .LBB24_4
+; CHECK-BE-NEXT:  .LBB24_13: @ %cond.load7
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #6]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[3], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    bpl .LBB24_5
+; CHECK-BE-NEXT:  .LBB24_14: @ %cond.load10
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #8]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[4], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    bpl .LBB24_6
+; CHECK-BE-NEXT:  .LBB24_15: @ %cond.load13
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #10]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[5], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    bpl .LBB24_7
+; CHECK-BE-NEXT:  .LBB24_16: @ %cond.load16
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #12]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[6], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    bmi .LBB24_8
+; CHECK-BE-NEXT:    b .LBB24_9
+entry:
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> undef)
+  ret <8 x half> %l
+}
+
+define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8f16_align1_undef:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #40
+; CHECK-LE-NEXT:    sub sp, #40
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    add r1, sp, #32
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    ldrb.w r1, [sp, #32]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    bne .LBB25_9
+; CHECK-LE-NEXT:  @ %bb.1: @ %else
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    bmi .LBB25_10
+; CHECK-LE-NEXT:  .LBB25_2: @ %else2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    bmi .LBB25_11
+; CHECK-LE-NEXT:  .LBB25_3: @ %else5
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    bmi .LBB25_12
+; CHECK-LE-NEXT:  .LBB25_4: @ %else8
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    bmi .LBB25_13
+; CHECK-LE-NEXT:  .LBB25_5: @ %else11
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    bmi .LBB25_14
+; CHECK-LE-NEXT:  .LBB25_6: @ %else14
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    bmi .LBB25_15
+; CHECK-LE-NEXT:  .LBB25_7: @ %else17
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    bmi .LBB25_16
+; CHECK-LE-NEXT:  .LBB25_8: @ %else20
+; CHECK-LE-NEXT:    add sp, #40
+; CHECK-LE-NEXT:    bx lr
+; CHECK-LE-NEXT:  .LBB25_9: @ %cond.load
+; CHECK-LE-NEXT:    ldrh r2, [r0]
+; CHECK-LE-NEXT:    strh.w r2, [sp, #28]
+; CHECK-LE-NEXT:    vldr.16 s0, [sp, #28]
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    bpl .LBB25_2
+; CHECK-LE-NEXT:  .LBB25_10: @ %cond.load1
+; CHECK-LE-NEXT:    ldrh r2, [r0, #2]
+; CHECK-LE-NEXT:    strh.w r2, [sp, #24]
+; CHECK-LE-NEXT:    vldr.16 s4, [sp, #24]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    bpl .LBB25_3
+; CHECK-LE-NEXT:  .LBB25_11: @ %cond.load4
+; CHECK-LE-NEXT:    ldrh r2, [r0, #4]
+; CHECK-LE-NEXT:    strh.w r2, [sp, #20]
+; CHECK-LE-NEXT:    vldr.16 s4, [sp, #20]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[2], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    bpl .LBB25_4
+; CHECK-LE-NEXT:  .LBB25_12: @ %cond.load7
+; CHECK-LE-NEXT:    ldrh r2, [r0, #6]
+; CHECK-LE-NEXT:    strh.w r2, [sp, #16]
+; CHECK-LE-NEXT:    vldr.16 s4, [sp, #16]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[3], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    bpl .LBB25_5
+; CHECK-LE-NEXT:  .LBB25_13: @ %cond.load10
+; CHECK-LE-NEXT:    ldrh r2, [r0, #8]
+; CHECK-LE-NEXT:    strh.w r2, [sp, #12]
+; CHECK-LE-NEXT:    vldr.16 s4, [sp, #12]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[4], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    bpl .LBB25_6
+; CHECK-LE-NEXT:  .LBB25_14: @ %cond.load13
+; CHECK-LE-NEXT:    ldrh r2, [r0, #10]
+; CHECK-LE-NEXT:    strh.w r2, [sp, #8]
+; CHECK-LE-NEXT:    vldr.16 s4, [sp, #8]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[5], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    bpl .LBB25_7
+; CHECK-LE-NEXT:  .LBB25_15: @ %cond.load16
+; CHECK-LE-NEXT:    ldrh r2, [r0, #12]
+; CHECK-LE-NEXT:    strh.w r2, [sp, #4]
+; CHECK-LE-NEXT:    vldr.16 s4, [sp, #4]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[6], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    bpl .LBB25_8
+; CHECK-LE-NEXT:  .LBB25_16: @ %cond.load19
+; CHECK-LE-NEXT:    ldrh r0, [r0, #14]
+; CHECK-LE-NEXT:    strh.w r0, [sp]
+; CHECK-LE-NEXT:    vldr.16 s4, [sp]
+; CHECK-LE-NEXT:    vmov r0, s4
+; CHECK-LE-NEXT:    vmov.16 q0[7], r0
+; CHECK-LE-NEXT:    add sp, #40
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8f16_align1_undef:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #40
+; CHECK-BE-NEXT:    sub sp, #40
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    add r1, sp, #32
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q1
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp, #32]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    bne .LBB25_10
+; CHECK-BE-NEXT:  @ %bb.1: @ %else
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    bmi .LBB25_11
+; CHECK-BE-NEXT:  .LBB25_2: @ %else2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    bmi .LBB25_12
+; CHECK-BE-NEXT:  .LBB25_3: @ %else5
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    bmi .LBB25_13
+; CHECK-BE-NEXT:  .LBB25_4: @ %else8
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    bmi .LBB25_14
+; CHECK-BE-NEXT:  .LBB25_5: @ %else11
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    bmi .LBB25_15
+; CHECK-BE-NEXT:  .LBB25_6: @ %else14
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    bmi .LBB25_16
+; CHECK-BE-NEXT:  .LBB25_7: @ %else17
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    bpl .LBB25_9
+; CHECK-BE-NEXT:  .LBB25_8: @ %cond.load19
+; CHECK-BE-NEXT:    ldrh r0, [r0, #14]
+; CHECK-BE-NEXT:    strh.w r0, [sp]
+; CHECK-BE-NEXT:    vldr.16 s0, [sp]
+; CHECK-BE-NEXT:    vmov r0, s0
+; CHECK-BE-NEXT:    vmov.16 q1[7], r0
+; CHECK-BE-NEXT:  .LBB25_9: @ %else20
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    add sp, #40
+; CHECK-BE-NEXT:    bx lr
+; CHECK-BE-NEXT:  .LBB25_10: @ %cond.load
+; CHECK-BE-NEXT:    ldrh r2, [r0]
+; CHECK-BE-NEXT:    strh.w r2, [sp, #28]
+; CHECK-BE-NEXT:    vldr.16 s4, [sp, #28]
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    bpl .LBB25_2
+; CHECK-BE-NEXT:  .LBB25_11: @ %cond.load1
+; CHECK-BE-NEXT:    ldrh r2, [r0, #2]
+; CHECK-BE-NEXT:    strh.w r2, [sp, #24]
+; CHECK-BE-NEXT:    vldr.16 s0, [sp, #24]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[1], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    bpl .LBB25_3
+; CHECK-BE-NEXT:  .LBB25_12: @ %cond.load4
+; CHECK-BE-NEXT:    ldrh r2, [r0, #4]
+; CHECK-BE-NEXT:    strh.w r2, [sp, #20]
+; CHECK-BE-NEXT:    vldr.16 s0, [sp, #20]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[2], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    bpl .LBB25_4
+; CHECK-BE-NEXT:  .LBB25_13: @ %cond.load7
+; CHECK-BE-NEXT:    ldrh r2, [r0, #6]
+; CHECK-BE-NEXT:    strh.w r2, [sp, #16]
+; CHECK-BE-NEXT:    vldr.16 s0, [sp, #16]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[3], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    bpl .LBB25_5
+; CHECK-BE-NEXT:  .LBB25_14: @ %cond.load10
+; CHECK-BE-NEXT:    ldrh r2, [r0, #8]
+; CHECK-BE-NEXT:    strh.w r2, [sp, #12]
+; CHECK-BE-NEXT:    vldr.16 s0, [sp, #12]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[4], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    bpl .LBB25_6
+; CHECK-BE-NEXT:  .LBB25_15: @ %cond.load13
+; CHECK-BE-NEXT:    ldrh r2, [r0, #10]
+; CHECK-BE-NEXT:    strh.w r2, [sp, #8]
+; CHECK-BE-NEXT:    vldr.16 s0, [sp, #8]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[5], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    bpl .LBB25_7
+; CHECK-BE-NEXT:  .LBB25_16: @ %cond.load16
+; CHECK-BE-NEXT:    ldrh r2, [r0, #12]
+; CHECK-BE-NEXT:    strh.w r2, [sp, #4]
+; CHECK-BE-NEXT:    vldr.16 s0, [sp, #4]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q1[6], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    bmi .LBB25_8
+; CHECK-BE-NEXT:    b .LBB25_9
+entry:
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 1, <8 x i1> %c, <8 x half> undef)
+  ret <8 x half> %l
+}
+
+define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_other(<8 x half> *%dest, <8 x i16> %a, <8 x half> %b) {
+; CHECK-LE-LABEL: masked_v8f16_align4_other:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    bne .LBB26_10
+; CHECK-LE-NEXT:  @ %bb.1: @ %else
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    bmi .LBB26_11
+; CHECK-LE-NEXT:  .LBB26_2: @ %else2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    bmi .LBB26_12
+; CHECK-LE-NEXT:  .LBB26_3: @ %else5
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    bmi .LBB26_13
+; CHECK-LE-NEXT:  .LBB26_4: @ %else8
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    bmi .LBB26_14
+; CHECK-LE-NEXT:  .LBB26_5: @ %else11
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    bmi .LBB26_15
+; CHECK-LE-NEXT:  .LBB26_6: @ %else14
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    bmi .LBB26_16
+; CHECK-LE-NEXT:  .LBB26_7: @ %else17
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    bpl .LBB26_9
+; CHECK-LE-NEXT:  .LBB26_8: @ %cond.load19
+; CHECK-LE-NEXT:    vldr.16 s0, [r0, #14]
+; CHECK-LE-NEXT:    vmov r0, s0
+; CHECK-LE-NEXT:    vmov.16 q1[7], r0
+; CHECK-LE-NEXT:  .LBB26_9: @ %else20
+; CHECK-LE-NEXT:    vmov q0, q1
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+; CHECK-LE-NEXT:  .LBB26_10: @ %cond.load
+; CHECK-LE-NEXT:    vldr.16 s0, [r0]
+; CHECK-LE-NEXT:    vmov r2, s0
+; CHECK-LE-NEXT:    vmov.16 q1[0], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    bpl .LBB26_2
+; CHECK-LE-NEXT:  .LBB26_11: @ %cond.load1
+; CHECK-LE-NEXT:    vldr.16 s0, [r0, #2]
+; CHECK-LE-NEXT:    vmov r2, s0
+; CHECK-LE-NEXT:    vmov.16 q1[1], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    bpl .LBB26_3
+; CHECK-LE-NEXT:  .LBB26_12: @ %cond.load4
+; CHECK-LE-NEXT:    vldr.16 s0, [r0, #4]
+; CHECK-LE-NEXT:    vmov r2, s0
+; CHECK-LE-NEXT:    vmov.16 q1[2], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    bpl .LBB26_4
+; CHECK-LE-NEXT:  .LBB26_13: @ %cond.load7
+; CHECK-LE-NEXT:    vldr.16 s0, [r0, #6]
+; CHECK-LE-NEXT:    vmov r2, s0
+; CHECK-LE-NEXT:    vmov.16 q1[3], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    bpl .LBB26_5
+; CHECK-LE-NEXT:  .LBB26_14: @ %cond.load10
+; CHECK-LE-NEXT:    vldr.16 s0, [r0, #8]
+; CHECK-LE-NEXT:    vmov r2, s0
+; CHECK-LE-NEXT:    vmov.16 q1[4], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    bpl .LBB26_6
+; CHECK-LE-NEXT:  .LBB26_15: @ %cond.load13
+; CHECK-LE-NEXT:    vldr.16 s0, [r0, #10]
+; CHECK-LE-NEXT:    vmov r2, s0
+; CHECK-LE-NEXT:    vmov.16 q1[5], r2
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    bpl .LBB26_7
+; CHECK-LE-NEXT:  .LBB26_16: @ %cond.load16
+; CHECK-LE-NEXT:    vldr.16 s0, [r0, #12]
+; CHECK-LE-NEXT:    vmov r2, s0
+; CHECK-LE-NEXT:    vmov.16 q1[6], r2
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    bmi .LBB26_8
+; CHECK-LE-NEXT:    b .LBB26_9
+;
+; CHECK-BE-LABEL: masked_v8f16_align4_other:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vrev64.16 q2, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s16 gt, q2, zr
+; CHECK-BE-NEXT:    vrev64.16 q2, q1
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    bne .LBB26_10
+; CHECK-BE-NEXT:  @ %bb.1: @ %else
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    bmi .LBB26_11
+; CHECK-BE-NEXT:  .LBB26_2: @ %else2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    bmi .LBB26_12
+; CHECK-BE-NEXT:  .LBB26_3: @ %else5
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    bmi .LBB26_13
+; CHECK-BE-NEXT:  .LBB26_4: @ %else8
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    bmi .LBB26_14
+; CHECK-BE-NEXT:  .LBB26_5: @ %else11
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    bmi .LBB26_15
+; CHECK-BE-NEXT:  .LBB26_6: @ %else14
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    bmi .LBB26_16
+; CHECK-BE-NEXT:  .LBB26_7: @ %else17
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    bpl .LBB26_9
+; CHECK-BE-NEXT:  .LBB26_8: @ %cond.load19
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #14]
+; CHECK-BE-NEXT:    vmov r0, s0
+; CHECK-BE-NEXT:    vmov.16 q2[7], r0
+; CHECK-BE-NEXT:  .LBB26_9: @ %else20
+; CHECK-BE-NEXT:    vrev64.16 q0, q2
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+; CHECK-BE-NEXT:  .LBB26_10: @ %cond.load
+; CHECK-BE-NEXT:    vldr.16 s0, [r0]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q2[0], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    bpl .LBB26_2
+; CHECK-BE-NEXT:  .LBB26_11: @ %cond.load1
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #2]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q2[1], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    bpl .LBB26_3
+; CHECK-BE-NEXT:  .LBB26_12: @ %cond.load4
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #4]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q2[2], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    bpl .LBB26_4
+; CHECK-BE-NEXT:  .LBB26_13: @ %cond.load7
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #6]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q2[3], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    bpl .LBB26_5
+; CHECK-BE-NEXT:  .LBB26_14: @ %cond.load10
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #8]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q2[4], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    bpl .LBB26_6
+; CHECK-BE-NEXT:  .LBB26_15: @ %cond.load13
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #10]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q2[5], r2
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    bpl .LBB26_7
+; CHECK-BE-NEXT:  .LBB26_16: @ %cond.load16
+; CHECK-BE-NEXT:    vldr.16 s0, [r0, #12]
+; CHECK-BE-NEXT:    vmov r2, s0
+; CHECK-BE-NEXT:    vmov.16 q2[6], r2
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    bmi .LBB26_8
+; CHECK-BE-NEXT:    b .LBB26_9
+entry:
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> %b)
+  ret <8 x half> %l
+}
+
+define arm_aapcs_vfpcc i8* @masked_v8f16_preinc(i8* %x, i8* %y, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8f16_preinc:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    adds r0, #4
+; CHECK-LE-NEXT:    ldrb.w r2, [sp]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    lsls r3, r2, #31
+; CHECK-LE-NEXT:    bne .LBB27_10
+; CHECK-LE-NEXT:  @ %bb.1: @ %else
+; CHECK-LE-NEXT:    lsls r3, r2, #30
+; CHECK-LE-NEXT:    bmi .LBB27_11
+; CHECK-LE-NEXT:  .LBB27_2: @ %else2
+; CHECK-LE-NEXT:    lsls r3, r2, #29
+; CHECK-LE-NEXT:    bmi .LBB27_12
+; CHECK-LE-NEXT:  .LBB27_3: @ %else5
+; CHECK-LE-NEXT:    lsls r3, r2, #28
+; CHECK-LE-NEXT:    bmi .LBB27_13
+; CHECK-LE-NEXT:  .LBB27_4: @ %else8
+; CHECK-LE-NEXT:    lsls r3, r2, #27
+; CHECK-LE-NEXT:    bmi .LBB27_14
+; CHECK-LE-NEXT:  .LBB27_5: @ %else11
+; CHECK-LE-NEXT:    lsls r3, r2, #26
+; CHECK-LE-NEXT:    bmi .LBB27_15
+; CHECK-LE-NEXT:  .LBB27_6: @ %else14
+; CHECK-LE-NEXT:    lsls r3, r2, #25
+; CHECK-LE-NEXT:    bmi .LBB27_16
+; CHECK-LE-NEXT:  .LBB27_7: @ %else17
+; CHECK-LE-NEXT:    lsls r2, r2, #24
+; CHECK-LE-NEXT:    bpl .LBB27_9
+; CHECK-LE-NEXT:  .LBB27_8: @ %cond.load19
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #14]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[7], r2
+; CHECK-LE-NEXT:  .LBB27_9: @ %else20
+; CHECK-LE-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+; CHECK-LE-NEXT:  .LBB27_10: @ %cond.load
+; CHECK-LE-NEXT:    vldr.16 s0, [r0]
+; CHECK-LE-NEXT:    lsls r3, r2, #30
+; CHECK-LE-NEXT:    bpl .LBB27_2
+; CHECK-LE-NEXT:  .LBB27_11: @ %cond.load1
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #2]
+; CHECK-LE-NEXT:    vmov r3, s4
+; CHECK-LE-NEXT:    vmov.16 q0[1], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #29
+; CHECK-LE-NEXT:    bpl .LBB27_3
+; CHECK-LE-NEXT:  .LBB27_12: @ %cond.load4
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #4]
+; CHECK-LE-NEXT:    vmov r3, s4
+; CHECK-LE-NEXT:    vmov.16 q0[2], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #28
+; CHECK-LE-NEXT:    bpl .LBB27_4
+; CHECK-LE-NEXT:  .LBB27_13: @ %cond.load7
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #6]
+; CHECK-LE-NEXT:    vmov r3, s4
+; CHECK-LE-NEXT:    vmov.16 q0[3], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #27
+; CHECK-LE-NEXT:    bpl .LBB27_5
+; CHECK-LE-NEXT:  .LBB27_14: @ %cond.load10
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #8]
+; CHECK-LE-NEXT:    vmov r3, s4
+; CHECK-LE-NEXT:    vmov.16 q0[4], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #26
+; CHECK-LE-NEXT:    bpl .LBB27_6
+; CHECK-LE-NEXT:  .LBB27_15: @ %cond.load13
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #10]
+; CHECK-LE-NEXT:    vmov r3, s4
+; CHECK-LE-NEXT:    vmov.16 q0[5], r3
+; CHECK-LE-NEXT:    lsls r3, r2, #25
+; CHECK-LE-NEXT:    bpl .LBB27_7
+; CHECK-LE-NEXT:  .LBB27_16: @ %cond.load16
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #12]
+; CHECK-LE-NEXT:    vmov r3, s4
+; CHECK-LE-NEXT:    vmov.16 q0[6], r3
+; CHECK-LE-NEXT:    lsls r2, r2, #24
+; CHECK-LE-NEXT:    bmi .LBB27_8
+; CHECK-LE-NEXT:    b .LBB27_9
+;
+; CHECK-BE-LABEL: masked_v8f16_preinc:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    adds r0, #4
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    @ implicit-def: $q0
+; CHECK-BE-NEXT:    ldrb.w r2, [sp]
+; CHECK-BE-NEXT:    lsls r3, r2, #31
+; CHECK-BE-NEXT:    bne .LBB27_10
+; CHECK-BE-NEXT:  @ %bb.1: @ %else
+; CHECK-BE-NEXT:    lsls r3, r2, #30
+; CHECK-BE-NEXT:    bmi .LBB27_11
+; CHECK-BE-NEXT:  .LBB27_2: @ %else2
+; CHECK-BE-NEXT:    lsls r3, r2, #29
+; CHECK-BE-NEXT:    bmi .LBB27_12
+; CHECK-BE-NEXT:  .LBB27_3: @ %else5
+; CHECK-BE-NEXT:    lsls r3, r2, #28
+; CHECK-BE-NEXT:    bmi .LBB27_13
+; CHECK-BE-NEXT:  .LBB27_4: @ %else8
+; CHECK-BE-NEXT:    lsls r3, r2, #27
+; CHECK-BE-NEXT:    bmi .LBB27_14
+; CHECK-BE-NEXT:  .LBB27_5: @ %else11
+; CHECK-BE-NEXT:    lsls r3, r2, #26
+; CHECK-BE-NEXT:    bmi .LBB27_15
+; CHECK-BE-NEXT:  .LBB27_6: @ %else14
+; CHECK-BE-NEXT:    lsls r3, r2, #25
+; CHECK-BE-NEXT:    bmi .LBB27_16
+; CHECK-BE-NEXT:  .LBB27_7: @ %else17
+; CHECK-BE-NEXT:    lsls r2, r2, #24
+; CHECK-BE-NEXT:    bpl .LBB27_9
+; CHECK-BE-NEXT:  .LBB27_8: @ %cond.load19
+; CHECK-BE-NEXT:    vldr.16 s4, [r0, #14]
+; CHECK-BE-NEXT:    vmov r2, s4
+; CHECK-BE-NEXT:    vmov.16 q0[7], r2
+; CHECK-BE-NEXT:  .LBB27_9: @ %else20
+; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+; CHECK-BE-NEXT:  .LBB27_10: @ %cond.load
+; CHECK-BE-NEXT:    vldr.16 s0, [r0]
+; CHECK-BE-NEXT:    lsls r3, r2, #30
+; CHECK-BE-NEXT:    bpl .LBB27_2
+; CHECK-BE-NEXT:  .LBB27_11: @ %cond.load1
+; CHECK-BE-NEXT:    vldr.16 s4, [r0, #2]
+; CHECK-BE-NEXT:    vmov r3, s4
+; CHECK-BE-NEXT:    vmov.16 q0[1], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #29
+; CHECK-BE-NEXT:    bpl .LBB27_3
+; CHECK-BE-NEXT:  .LBB27_12: @ %cond.load4
+; CHECK-BE-NEXT:    vldr.16 s4, [r0, #4]
+; CHECK-BE-NEXT:    vmov r3, s4
+; CHECK-BE-NEXT:    vmov.16 q0[2], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #28
+; CHECK-BE-NEXT:    bpl .LBB27_4
+; CHECK-BE-NEXT:  .LBB27_13: @ %cond.load7
+; CHECK-BE-NEXT:    vldr.16 s4, [r0, #6]
+; CHECK-BE-NEXT:    vmov r3, s4
+; CHECK-BE-NEXT:    vmov.16 q0[3], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #27
+; CHECK-BE-NEXT:    bpl .LBB27_5
+; CHECK-BE-NEXT:  .LBB27_14: @ %cond.load10
+; CHECK-BE-NEXT:    vldr.16 s4, [r0, #8]
+; CHECK-BE-NEXT:    vmov r3, s4
+; CHECK-BE-NEXT:    vmov.16 q0[4], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #26
+; CHECK-BE-NEXT:    bpl .LBB27_6
+; CHECK-BE-NEXT:  .LBB27_15: @ %cond.load13
+; CHECK-BE-NEXT:    vldr.16 s4, [r0, #10]
+; CHECK-BE-NEXT:    vmov r3, s4
+; CHECK-BE-NEXT:    vmov.16 q0[5], r3
+; CHECK-BE-NEXT:    lsls r3, r2, #25
+; CHECK-BE-NEXT:    bpl .LBB27_7
+; CHECK-BE-NEXT:  .LBB27_16: @ %cond.load16
+; CHECK-BE-NEXT:    vldr.16 s4, [r0, #12]
+; CHECK-BE-NEXT:    vmov r3, s4
+; CHECK-BE-NEXT:    vmov.16 q0[6], r3
+; CHECK-BE-NEXT:    lsls r2, r2, #24
+; CHECK-BE-NEXT:    bmi .LBB27_8
+; CHECK-BE-NEXT:    b .LBB27_9
+entry:
+  %z = getelementptr inbounds i8, i8* %x, i32 4
+  %0 = bitcast i8* %z to <8 x half>*
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 4, <8 x i1> %c, <8 x half> undef)
+  %2 = bitcast i8* %y to <8 x half>*
+  store <8 x half> %1, <8 x half>* %2, align 4
+  ret i8* %z
+}
+
+define arm_aapcs_vfpcc i8* @masked_v8f16_postinc(i8* %x, i8* %y, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8f16_postinc:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    @ implicit-def: $q0
+; CHECK-LE-NEXT:    ldrb.w r3, [sp]
+; CHECK-LE-NEXT:    lsls r2, r3, #31
+; CHECK-LE-NEXT:    bne .LBB28_12
+; CHECK-LE-NEXT:  @ %bb.1: @ %else
+; CHECK-LE-NEXT:    lsls r2, r3, #30
+; CHECK-LE-NEXT:    bmi .LBB28_13
+; CHECK-LE-NEXT:  .LBB28_2: @ %else2
+; CHECK-LE-NEXT:    lsls r2, r3, #29
+; CHECK-LE-NEXT:    bmi .LBB28_14
+; CHECK-LE-NEXT:  .LBB28_3: @ %else5
+; CHECK-LE-NEXT:    lsls r2, r3, #28
+; CHECK-LE-NEXT:    bmi .LBB28_15
+; CHECK-LE-NEXT:  .LBB28_4: @ %else8
+; CHECK-LE-NEXT:    lsls r2, r3, #27
+; CHECK-LE-NEXT:    bmi .LBB28_16
+; CHECK-LE-NEXT:  .LBB28_5: @ %else11
+; CHECK-LE-NEXT:    lsls r2, r3, #26
+; CHECK-LE-NEXT:    bpl .LBB28_7
+; CHECK-LE-NEXT:  .LBB28_6: @ %cond.load13
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #10]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[5], r2
+; CHECK-LE-NEXT:  .LBB28_7: @ %else14
+; CHECK-LE-NEXT:    add.w r12, r0, #4
+; CHECK-LE-NEXT:    lsls r2, r3, #25
+; CHECK-LE-NEXT:    bpl .LBB28_9
+; CHECK-LE-NEXT:  @ %bb.8: @ %cond.load16
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #12]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[6], r2
+; CHECK-LE-NEXT:  .LBB28_9: @ %else17
+; CHECK-LE-NEXT:    lsls r2, r3, #24
+; CHECK-LE-NEXT:    bpl .LBB28_11
+; CHECK-LE-NEXT:  @ %bb.10: @ %cond.load19
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #14]
+; CHECK-LE-NEXT:    vmov r0, s4
+; CHECK-LE-NEXT:    vmov.16 q0[7], r0
+; CHECK-LE-NEXT:  .LBB28_11: @ %else20
+; CHECK-LE-NEXT:    vstrw.32 q0, [r1]
+; CHECK-LE-NEXT:    mov r0, r12
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+; CHECK-LE-NEXT:  .LBB28_12: @ %cond.load
+; CHECK-LE-NEXT:    vldr.16 s0, [r0]
+; CHECK-LE-NEXT:    lsls r2, r3, #30
+; CHECK-LE-NEXT:    bpl .LBB28_2
+; CHECK-LE-NEXT:  .LBB28_13: @ %cond.load1
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #2]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[1], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #29
+; CHECK-LE-NEXT:    bpl .LBB28_3
+; CHECK-LE-NEXT:  .LBB28_14: @ %cond.load4
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #4]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[2], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #28
+; CHECK-LE-NEXT:    bpl .LBB28_4
+; CHECK-LE-NEXT:  .LBB28_15: @ %cond.load7
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #6]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[3], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #27
+; CHECK-LE-NEXT:    bpl .LBB28_5
+; CHECK-LE-NEXT:  .LBB28_16: @ %cond.load10
+; CHECK-LE-NEXT:    vldr.16 s4, [r0, #8]
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    vmov.16 q0[4], r2
+; CHECK-LE-NEXT:    lsls r2, r3, #26
+; CHECK-LE-NEXT:    bmi .LBB28_6
+; CHECK-LE-NEXT:    b .LBB28_7
+;
+; CHECK-BE-LABEL: masked_v8f16_postinc:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    @ implicit-def: $q0
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrb.w r3, [sp]
+; CHECK-BE-NEXT:    lsls r2, r3, #31
+; CHECK-BE-NEXT:    bne .LBB28_12
+; CHECK-BE-NEXT:  @ %bb.1: @ %else
+; CHECK-BE-NEXT:    lsls r2, r3, #30
+; CHECK-BE-NEXT:    bmi .LBB28_13
+; CHECK-BE-NEXT:  .LBB28_2: @ %else2
+; CHECK-BE-NEXT:    lsls r2, r3, #29
+; CHECK-BE-NEXT:    bmi .LBB28_14
+; CHECK-BE-NEXT:  .LBB28_3: @ %else5
+; CHECK-BE-NEXT:    lsls r2, r3, #28
+; CHECK-BE-NEXT:    bmi .LBB28_15
+; CHECK-BE-NEXT:  .LBB28_4: @ %else8
+; CHECK-BE-NEXT:    lsls r2, r3, #27
+; CHECK-BE-NEXT:    bmi .LBB28_16
+; CHECK-BE-NEXT:  .LBB28_5: @ %else11
+; CHECK-BE-NEXT:    lsls r2, r3, #26
+; CHECK-BE-NEXT:    bpl .LBB28_7
+; CHECK-BE-NEXT:  .LBB28_6: @ %cond.load13
+; CHECK-BE-NEXT:    vldr.16 s4, [r0, #10]
+; CHECK-BE-NEXT:    vmov r2, s4
+; CHECK-BE-NEXT:    vmov.16 q0[5], r2
+; CHECK-BE-NEXT:  .LBB28_7: @ %else14
+; CHECK-BE-NEXT:    add.w r12, r0, #4
+; CHECK-BE-NEXT:    lsls r2, r3, #25
+; CHECK-BE-NEXT:    bpl .LBB28_9
+; CHECK-BE-NEXT:  @ %bb.8: @ %cond.load16
+; CHECK-BE-NEXT:    vldr.16 s4, [r0, #12]
+; CHECK-BE-NEXT:    vmov r2, s4
+; CHECK-BE-NEXT:    vmov.16 q0[6], r2
+; CHECK-BE-NEXT:  .LBB28_9: @ %else17
+; CHECK-BE-NEXT:    lsls r2, r3, #24
+; CHECK-BE-NEXT:    bpl .LBB28_11
+; CHECK-BE-NEXT:  @ %bb.10: @ %cond.load19
+; CHECK-BE-NEXT:    vldr.16 s4, [r0, #14]
+; CHECK-BE-NEXT:    vmov r0, s4
+; CHECK-BE-NEXT:    vmov.16 q0[7], r0
+; CHECK-BE-NEXT:  .LBB28_11: @ %else20
+; CHECK-BE-NEXT:    vstrh.16 q0, [r1]
+; CHECK-BE-NEXT:    mov r0, r12
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+; CHECK-BE-NEXT:  .LBB28_12: @ %cond.load
+; CHECK-BE-NEXT:    vldr.16 s0, [r0]
+; CHECK-BE-NEXT:    lsls r2, r3, #30
+; CHECK-BE-NEXT:    bpl .LBB28_2
+; CHECK-BE-NEXT:  .LBB28_13: @ %cond.load1
+; CHECK-BE-NEXT:    vldr.16 s4, [r0, #2]
+; CHECK-BE-NEXT:    vmov r2, s4
+; CHECK-BE-NEXT:    vmov.16 q0[1], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #29
+; CHECK-BE-NEXT:    bpl .LBB28_3
+; CHECK-BE-NEXT:  .LBB28_14: @ %cond.load4
+; CHECK-BE-NEXT:    vldr.16 s4, [r0, #4]
+; CHECK-BE-NEXT:    vmov r2, s4
+; CHECK-BE-NEXT:    vmov.16 q0[2], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #28
+; CHECK-BE-NEXT:    bpl .LBB28_4
+; CHECK-BE-NEXT:  .LBB28_15: @ %cond.load7
+; CHECK-BE-NEXT:    vldr.16 s4, [r0, #6]
+; CHECK-BE-NEXT:    vmov r2, s4
+; CHECK-BE-NEXT:    vmov.16 q0[3], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #27
+; CHECK-BE-NEXT:    bpl .LBB28_5
+; CHECK-BE-NEXT:  .LBB28_16: @ %cond.load10
+; CHECK-BE-NEXT:    vldr.16 s4, [r0, #8]
+; CHECK-BE-NEXT:    vmov r2, s4
+; CHECK-BE-NEXT:    vmov.16 q0[4], r2
+; CHECK-BE-NEXT:    lsls r2, r3, #26
+; CHECK-BE-NEXT:    bmi .LBB28_6
+; CHECK-BE-NEXT:    b .LBB28_7
+entry:
+  %z = getelementptr inbounds i8, i8* %x, i32 4
+  %0 = bitcast i8* %x to <8 x half>*
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 4, <8 x i1> %c, <8 x half> undef)
+  %2 = bitcast i8* %y to <8 x half>*
+  store <8 x half> %1, <8 x half>* %2, align 4
+  ret i8* %z
+}
+
+
+define arm_aapcs_vfpcc <2 x i64> @masked_v2i64_align4_zero(<2 x i64> *%dest, <2 x i64> %a) {
+; CHECK-LE-LABEL: masked_v2i64_align4_zero:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    vmov r3, s0
+; CHECK-LE-NEXT:    movs r2, #0
+; CHECK-LE-NEXT:    vmov r1, s1
+; CHECK-LE-NEXT:    vmov r12, s3
+; CHECK-LE-NEXT:    rsbs r3, r3, #0
+; CHECK-LE-NEXT:    vmov r3, s2
+; CHECK-LE-NEXT:    sbcs.w r1, r2, r1
+; CHECK-LE-NEXT:    mov.w r1, #0
+; CHECK-LE-NEXT:    it lt
+; CHECK-LE-NEXT:    movlt r1, #1
+; CHECK-LE-NEXT:    rsbs r3, r3, #0
+; CHECK-LE-NEXT:    sbcs.w r3, r2, r12
+; CHECK-LE-NEXT:    it lt
+; CHECK-LE-NEXT:    movlt r2, #1
+; CHECK-LE-NEXT:    cmp r2, #0
+; CHECK-LE-NEXT:    it ne
+; CHECK-LE-NEXT:    mvnne r2, #1
+; CHECK-LE-NEXT:    bfi r2, r1, #0, #1
+; CHECK-LE-NEXT:    and r1, r2, #3
+; CHECK-LE-NEXT:    lsls r2, r2, #31
+; CHECK-LE-NEXT:    beq .LBB29_2
+; CHECK-LE-NEXT:  @ %bb.1: @ %cond.load
+; CHECK-LE-NEXT:    vldr d1, .LCPI29_0
+; CHECK-LE-NEXT:    vldr d0, [r0]
+; CHECK-LE-NEXT:    b .LBB29_3
+; CHECK-LE-NEXT:  .LBB29_2:
+; CHECK-LE-NEXT:    vmov.i32 q0, #0x0
+; CHECK-LE-NEXT:  .LBB29_3: @ %else
+; CHECK-LE-NEXT:    lsls r1, r1, #30
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi d1, [r0, #8]
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+; CHECK-LE-NEXT:    .p2align 3
+; CHECK-LE-NEXT:  @ %bb.4:
+; CHECK-LE-NEXT:  .LCPI29_0:
+; CHECK-LE-NEXT:    .long 0 @ double 0
+; CHECK-LE-NEXT:    .long 0
+;
+; CHECK-BE-LABEL: masked_v2i64_align4_zero:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    movs r2, #0
+; CHECK-BE-NEXT:    vmov r3, s7
+; CHECK-BE-NEXT:    vmov r1, s6
+; CHECK-BE-NEXT:    vmov r12, s4
+; CHECK-BE-NEXT:    rsbs r3, r3, #0
+; CHECK-BE-NEXT:    vmov r3, s5
+; CHECK-BE-NEXT:    sbcs.w r1, r2, r1
+; CHECK-BE-NEXT:    mov.w r1, #0
+; CHECK-BE-NEXT:    it lt
+; CHECK-BE-NEXT:    movlt r1, #1
+; CHECK-BE-NEXT:    rsbs r3, r3, #0
+; CHECK-BE-NEXT:    sbcs.w r3, r2, r12
+; CHECK-BE-NEXT:    it lt
+; CHECK-BE-NEXT:    movlt r2, #1
+; CHECK-BE-NEXT:    cmp r2, #0
+; CHECK-BE-NEXT:    it ne
+; CHECK-BE-NEXT:    mvnne r2, #1
+; CHECK-BE-NEXT:    bfi r2, r1, #0, #1
+; CHECK-BE-NEXT:    and r1, r2, #3
+; CHECK-BE-NEXT:    lsls r2, r2, #31
+; CHECK-BE-NEXT:    beq .LBB29_2
+; CHECK-BE-NEXT:  @ %bb.1: @ %cond.load
+; CHECK-BE-NEXT:    vldr d1, .LCPI29_0
+; CHECK-BE-NEXT:    vldr d0, [r0]
+; CHECK-BE-NEXT:    b .LBB29_3
+; CHECK-BE-NEXT:  .LBB29_2:
+; CHECK-BE-NEXT:    vmov.i32 q1, #0x0
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:  .LBB29_3: @ %else
+; CHECK-BE-NEXT:    lsls r1, r1, #30
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi d1, [r0, #8]
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+; CHECK-BE-NEXT:    .p2align 3
+; CHECK-BE-NEXT:  @ %bb.4:
+; CHECK-BE-NEXT:  .LCPI29_0:
+; CHECK-BE-NEXT:    .long 0 @ double 0
+; CHECK-BE-NEXT:    .long 0
+entry:
+  %c = icmp sgt <2 x i64> %a, zeroinitializer
+  %l = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %dest, i32 8, <2 x i1> %c, <2 x i64> zeroinitializer)
+  ret <2 x i64> %l
+}
+
+define arm_aapcs_vfpcc <2 x double> @masked_v2f64_align4_zero(<2 x double> *%dest, <2 x double> %a, <2 x i64> %b) {
+; CHECK-LE-LABEL: masked_v2f64_align4_zero:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    vmov r3, s4
+; CHECK-LE-NEXT:    movs r2, #0
+; CHECK-LE-NEXT:    vmov r1, s5
+; CHECK-LE-NEXT:    vmov r12, s7
+; CHECK-LE-NEXT:    rsbs r3, r3, #0
+; CHECK-LE-NEXT:    vmov r3, s6
+; CHECK-LE-NEXT:    sbcs.w r1, r2, r1
+; CHECK-LE-NEXT:    mov.w r1, #0
+; CHECK-LE-NEXT:    it lt
+; CHECK-LE-NEXT:    movlt r1, #1
+; CHECK-LE-NEXT:    rsbs r3, r3, #0
+; CHECK-LE-NEXT:    sbcs.w r3, r2, r12
+; CHECK-LE-NEXT:    it lt
+; CHECK-LE-NEXT:    movlt r2, #1
+; CHECK-LE-NEXT:    cmp r2, #0
+; CHECK-LE-NEXT:    it ne
+; CHECK-LE-NEXT:    mvnne r2, #1
+; CHECK-LE-NEXT:    bfi r2, r1, #0, #1
+; CHECK-LE-NEXT:    and r1, r2, #3
+; CHECK-LE-NEXT:    lsls r2, r2, #31
+; CHECK-LE-NEXT:    beq .LBB30_2
+; CHECK-LE-NEXT:  @ %bb.1: @ %cond.load
+; CHECK-LE-NEXT:    vldr d1, .LCPI30_0
+; CHECK-LE-NEXT:    vldr d0, [r0]
+; CHECK-LE-NEXT:    b .LBB30_3
+; CHECK-LE-NEXT:  .LBB30_2:
+; CHECK-LE-NEXT:    vmov.i32 q0, #0x0
+; CHECK-LE-NEXT:  .LBB30_3: @ %else
+; CHECK-LE-NEXT:    lsls r1, r1, #30
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vldrmi d1, [r0, #8]
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+; CHECK-LE-NEXT:    .p2align 3
+; CHECK-LE-NEXT:  @ %bb.4:
+; CHECK-LE-NEXT:  .LCPI30_0:
+; CHECK-LE-NEXT:    .long 0 @ double 0
+; CHECK-LE-NEXT:    .long 0
+;
+; CHECK-BE-LABEL: masked_v2f64_align4_zero:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    movs r2, #0
+; CHECK-BE-NEXT:    vmov r3, s3
+; CHECK-BE-NEXT:    vmov r1, s2
+; CHECK-BE-NEXT:    vmov r12, s0
+; CHECK-BE-NEXT:    rsbs r3, r3, #0
+; CHECK-BE-NEXT:    vmov r3, s1
+; CHECK-BE-NEXT:    sbcs.w r1, r2, r1
+; CHECK-BE-NEXT:    mov.w r1, #0
+; CHECK-BE-NEXT:    it lt
+; CHECK-BE-NEXT:    movlt r1, #1
+; CHECK-BE-NEXT:    rsbs r3, r3, #0
+; CHECK-BE-NEXT:    sbcs.w r3, r2, r12
+; CHECK-BE-NEXT:    it lt
+; CHECK-BE-NEXT:    movlt r2, #1
+; CHECK-BE-NEXT:    cmp r2, #0
+; CHECK-BE-NEXT:    it ne
+; CHECK-BE-NEXT:    mvnne r2, #1
+; CHECK-BE-NEXT:    bfi r2, r1, #0, #1
+; CHECK-BE-NEXT:    and r1, r2, #3
+; CHECK-BE-NEXT:    lsls r2, r2, #31
+; CHECK-BE-NEXT:    beq .LBB30_2
+; CHECK-BE-NEXT:  @ %bb.1: @ %cond.load
+; CHECK-BE-NEXT:    vldr d1, .LCPI30_0
+; CHECK-BE-NEXT:    vldr d0, [r0]
+; CHECK-BE-NEXT:    b .LBB30_3
+; CHECK-BE-NEXT:  .LBB30_2:
+; CHECK-BE-NEXT:    vmov.i32 q1, #0x0
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:  .LBB30_3: @ %else
+; CHECK-BE-NEXT:    lsls r1, r1, #30
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vldrmi d1, [r0, #8]
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+; CHECK-BE-NEXT:    .p2align 3
+; CHECK-BE-NEXT:  @ %bb.4:
+; CHECK-BE-NEXT:  .LCPI30_0:
+; CHECK-BE-NEXT:    .long 0 @ double 0
+; CHECK-BE-NEXT:    .long 0
+entry:
+  %c = icmp sgt <2 x i64> %b, zeroinitializer
+  %l = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %dest, i32 8, <2 x i1> %c, <2 x double> zeroinitializer)
+  ret <2 x double> %l
+}
+
+declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
+declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
+declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
+declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32, <8 x i1>, <8 x half>)
+declare <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>*, i32, <2 x i1>, <2 x i64>)
+declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>)

Added: llvm/trunk/test/CodeGen/Thumb2/mve-masked-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-masked-store.ll?rev=370325&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-masked-store.ll (added)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-masked-store.ll Thu Aug 29 03:32:12 2019
@@ -0,0 +1,2292 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE
+; RUN: llc -mtriple=thumbebv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
+
+define arm_aapcs_vfpcc void @masked_v4i32(<4 x i32> *%dest, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4i32:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    vmovne r2, s0
+; CHECK-LE-NEXT:    strne r2, [r0]
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi r2, s1
+; CHECK-LE-NEXT:    strmi r2, [r0, #4]
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi r2, s2
+; CHECK-LE-NEXT:    strmi r2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r1, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi r1, s3
+; CHECK-LE-NEXT:    strmi r1, [r0, #12]
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4i32:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    vmovne r2, s4
+; CHECK-BE-NEXT:    strne r2, [r0]
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi r2, s5
+; CHECK-BE-NEXT:    strmi r2, [r0, #4]
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi r2, s6
+; CHECK-BE-NEXT:    strmi r2, [r0, #8]
+; CHECK-BE-NEXT:    lsls r1, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi r1, s7
+; CHECK-BE-NEXT:    strmi r1, [r0, #12]
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a, <4 x i32>* %dest, i32 4, <4 x i1> %c)
+  ret void
+}
+
+define arm_aapcs_vfpcc void @masked_v4i32_align1(<4 x i32> *%dest, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4i32_align1:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    vmovne r2, s0
+; CHECK-LE-NEXT:    strne r2, [r0]
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi r2, s1
+; CHECK-LE-NEXT:    strmi r2, [r0, #4]
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi r2, s2
+; CHECK-LE-NEXT:    strmi r2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r1, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi r1, s3
+; CHECK-LE-NEXT:    strmi r1, [r0, #12]
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4i32_align1:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    vmovne r2, s4
+; CHECK-BE-NEXT:    strne r2, [r0]
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi r2, s5
+; CHECK-BE-NEXT:    strmi r2, [r0, #4]
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi r2, s6
+; CHECK-BE-NEXT:    strmi r2, [r0, #8]
+; CHECK-BE-NEXT:    lsls r1, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi r1, s7
+; CHECK-BE-NEXT:    strmi r1, [r0, #12]
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %a, <4 x i32>* %dest, i32 1, <4 x i1> %c)
+  ret void
+}
+
+define i8* @masked_v4i32_pre(i8* %y, i8* %x, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4i32_pre:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vldr d1, [sp, #8]
+; CHECK-LE-NEXT:    adds r0, #4
+; CHECK-LE-NEXT:    vmov d0, r2, r3
+; CHECK-LE-NEXT:    add r2, sp, #4
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    ldrb.w r2, [sp, #4]
+; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-LE-NEXT:    lsls r1, r2, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    vmovne r1, s0
+; CHECK-LE-NEXT:    strne r1, [r0]
+; CHECK-LE-NEXT:    lsls r1, r2, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi r1, s1
+; CHECK-LE-NEXT:    strmi r1, [r0, #4]
+; CHECK-LE-NEXT:    lsls r1, r2, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi r1, s2
+; CHECK-LE-NEXT:    strmi r1, [r0, #8]
+; CHECK-LE-NEXT:    lsls r1, r2, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi r1, s3
+; CHECK-LE-NEXT:    strmi r1, [r0, #12]
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4i32_pre:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vldr d1, [sp, #8]
+; CHECK-BE-NEXT:    adds r0, #4
+; CHECK-BE-NEXT:    vmov d0, r3, r2
+; CHECK-BE-NEXT:    add r2, sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrb.w r2, [sp, #4]
+; CHECK-BE-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-BE-NEXT:    lsls r1, r2, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    vmovne r1, s0
+; CHECK-BE-NEXT:    strne r1, [r0]
+; CHECK-BE-NEXT:    lsls r1, r2, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi r1, s1
+; CHECK-BE-NEXT:    strmi r1, [r0, #4]
+; CHECK-BE-NEXT:    lsls r1, r2, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi r1, s2
+; CHECK-BE-NEXT:    strmi r1, [r0, #8]
+; CHECK-BE-NEXT:    lsls r1, r2, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi r1, s3
+; CHECK-BE-NEXT:    strmi r1, [r0, #12]
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds i8, i8* %y, i32 4
+  %0 = bitcast i8* %x to <4 x i32>*
+  %1 = load <4 x i32>, <4 x i32>* %0, align 4
+  %2 = bitcast i8* %z to <4 x i32>*
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
+  ret i8* %z
+}
+
+define i8* @masked_v4i32_post(i8* %y, i8* %x, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4i32_post:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vldr d1, [sp, #8]
+; CHECK-LE-NEXT:    vmov d0, r2, r3
+; CHECK-LE-NEXT:    add r2, sp, #4
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    ldrb.w r2, [sp, #4]
+; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-LE-NEXT:    lsls r1, r2, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    vmovne r1, s0
+; CHECK-LE-NEXT:    strne r1, [r0]
+; CHECK-LE-NEXT:    lsls r1, r2, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi r1, s1
+; CHECK-LE-NEXT:    strmi r1, [r0, #4]
+; CHECK-LE-NEXT:    adds r1, r0, #4
+; CHECK-LE-NEXT:    lsls r3, r2, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi r3, s2
+; CHECK-LE-NEXT:    strmi r3, [r0, #8]
+; CHECK-LE-NEXT:    lsls r2, r2, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi r2, s3
+; CHECK-LE-NEXT:    strmi r2, [r0, #12]
+; CHECK-LE-NEXT:    mov r0, r1
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4i32_post:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vldr d1, [sp, #8]
+; CHECK-BE-NEXT:    vmov d0, r3, r2
+; CHECK-BE-NEXT:    add r2, sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrb.w r2, [sp, #4]
+; CHECK-BE-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-BE-NEXT:    lsls r1, r2, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    vmovne r1, s0
+; CHECK-BE-NEXT:    strne r1, [r0]
+; CHECK-BE-NEXT:    lsls r1, r2, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi r1, s1
+; CHECK-BE-NEXT:    strmi r1, [r0, #4]
+; CHECK-BE-NEXT:    adds r1, r0, #4
+; CHECK-BE-NEXT:    lsls r3, r2, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi r3, s2
+; CHECK-BE-NEXT:    strmi r3, [r0, #8]
+; CHECK-BE-NEXT:    lsls r2, r2, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi r2, s3
+; CHECK-BE-NEXT:    strmi r2, [r0, #12]
+; CHECK-BE-NEXT:    mov r0, r1
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds i8, i8* %y, i32 4
+  %0 = bitcast i8* %x to <4 x i32>*
+  %1 = load <4 x i32>, <4 x i32>* %0, align 4
+  %2 = bitcast i8* %y to <4 x i32>*
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %1, <4 x i32>* %2, i32 4, <4 x i1> %c)
+  ret i8* %z
+}
+
+
+define arm_aapcs_vfpcc void @masked_v8i16(<8 x i16> *%dest, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8i16:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    vmovne.u16 r2, q0[0]
+; CHECK-LE-NEXT:    strhne r2, [r0]
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r2, q0[1]
+; CHECK-LE-NEXT:    strhmi r2, [r0, #2]
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r2, q0[2]
+; CHECK-LE-NEXT:    strhmi r2, [r0, #4]
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r2, q0[3]
+; CHECK-LE-NEXT:    strhmi r2, [r0, #6]
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r2, q0[4]
+; CHECK-LE-NEXT:    strhmi r2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r2, q0[5]
+; CHECK-LE-NEXT:    strhmi r2, [r0, #10]
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r2, q0[6]
+; CHECK-LE-NEXT:    strhmi r2, [r0, #12]
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[7]
+; CHECK-LE-NEXT:    strhmi r1, [r0, #14]
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8i16:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    vmovne.u16 r2, q1[0]
+; CHECK-BE-NEXT:    strhne r2, [r0]
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r2, q1[1]
+; CHECK-BE-NEXT:    strhmi r2, [r0, #2]
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r2, q1[2]
+; CHECK-BE-NEXT:    strhmi r2, [r0, #4]
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r2, q1[3]
+; CHECK-BE-NEXT:    strhmi r2, [r0, #6]
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r2, q1[4]
+; CHECK-BE-NEXT:    strhmi r2, [r0, #8]
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r2, q1[5]
+; CHECK-BE-NEXT:    strhmi r2, [r0, #10]
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r2, q1[6]
+; CHECK-BE-NEXT:    strhmi r2, [r0, #12]
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r1, q1[7]
+; CHECK-BE-NEXT:    strhmi r1, [r0, #14]
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %a, <8 x i16>* %dest, i32 2, <8 x i1> %c)
+  ret void
+}
+
+define arm_aapcs_vfpcc void @masked_v8i16_align1(<8 x i16> *%dest, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8i16_align1:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    vmovne.u16 r2, q0[0]
+; CHECK-LE-NEXT:    strhne r2, [r0]
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r2, q0[1]
+; CHECK-LE-NEXT:    strhmi r2, [r0, #2]
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r2, q0[2]
+; CHECK-LE-NEXT:    strhmi r2, [r0, #4]
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r2, q0[3]
+; CHECK-LE-NEXT:    strhmi r2, [r0, #6]
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r2, q0[4]
+; CHECK-LE-NEXT:    strhmi r2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r2, q0[5]
+; CHECK-LE-NEXT:    strhmi r2, [r0, #10]
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r2, q0[6]
+; CHECK-LE-NEXT:    strhmi r2, [r0, #12]
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[7]
+; CHECK-LE-NEXT:    strhmi r1, [r0, #14]
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8i16_align1:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    vmovne.u16 r2, q1[0]
+; CHECK-BE-NEXT:    strhne r2, [r0]
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r2, q1[1]
+; CHECK-BE-NEXT:    strhmi r2, [r0, #2]
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r2, q1[2]
+; CHECK-BE-NEXT:    strhmi r2, [r0, #4]
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r2, q1[3]
+; CHECK-BE-NEXT:    strhmi r2, [r0, #6]
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r2, q1[4]
+; CHECK-BE-NEXT:    strhmi r2, [r0, #8]
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r2, q1[5]
+; CHECK-BE-NEXT:    strhmi r2, [r0, #10]
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r2, q1[6]
+; CHECK-BE-NEXT:    strhmi r2, [r0, #12]
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r1, q1[7]
+; CHECK-BE-NEXT:    strhmi r1, [r0, #14]
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %a, <8 x i16>* %dest, i32 1, <8 x i1> %c)
+  ret void
+}
+
+define i8* @masked_v8i16_pre(i8* %y, i8* %x, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8i16_pre:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vldr d1, [sp, #8]
+; CHECK-LE-NEXT:    adds r0, #4
+; CHECK-LE-NEXT:    vmov d0, r2, r3
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    ldrb.w r2, [sp]
+; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-LE-NEXT:    lsls r1, r2, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    vmovne.u16 r1, q0[0]
+; CHECK-LE-NEXT:    strhne r1, [r0]
+; CHECK-LE-NEXT:    lsls r1, r2, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[1]
+; CHECK-LE-NEXT:    strhmi r1, [r0, #2]
+; CHECK-LE-NEXT:    lsls r1, r2, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[2]
+; CHECK-LE-NEXT:    strhmi r1, [r0, #4]
+; CHECK-LE-NEXT:    lsls r1, r2, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[3]
+; CHECK-LE-NEXT:    strhmi r1, [r0, #6]
+; CHECK-LE-NEXT:    lsls r1, r2, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[4]
+; CHECK-LE-NEXT:    strhmi r1, [r0, #8]
+; CHECK-LE-NEXT:    lsls r1, r2, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[5]
+; CHECK-LE-NEXT:    strhmi r1, [r0, #10]
+; CHECK-LE-NEXT:    lsls r1, r2, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[6]
+; CHECK-LE-NEXT:    strhmi r1, [r0, #12]
+; CHECK-LE-NEXT:    lsls r1, r2, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[7]
+; CHECK-LE-NEXT:    strhmi r1, [r0, #14]
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8i16_pre:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vldr d1, [sp, #8]
+; CHECK-BE-NEXT:    adds r0, #4
+; CHECK-BE-NEXT:    vmov d0, r3, r2
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrb.w r2, [sp]
+; CHECK-BE-NEXT:    vldrh.u16 q0, [r1]
+; CHECK-BE-NEXT:    lsls r1, r2, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    vmovne.u16 r1, q0[0]
+; CHECK-BE-NEXT:    strhne r1, [r0]
+; CHECK-BE-NEXT:    lsls r1, r2, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r1, q0[1]
+; CHECK-BE-NEXT:    strhmi r1, [r0, #2]
+; CHECK-BE-NEXT:    lsls r1, r2, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r1, q0[2]
+; CHECK-BE-NEXT:    strhmi r1, [r0, #4]
+; CHECK-BE-NEXT:    lsls r1, r2, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r1, q0[3]
+; CHECK-BE-NEXT:    strhmi r1, [r0, #6]
+; CHECK-BE-NEXT:    lsls r1, r2, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r1, q0[4]
+; CHECK-BE-NEXT:    strhmi r1, [r0, #8]
+; CHECK-BE-NEXT:    lsls r1, r2, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r1, q0[5]
+; CHECK-BE-NEXT:    strhmi r1, [r0, #10]
+; CHECK-BE-NEXT:    lsls r1, r2, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r1, q0[6]
+; CHECK-BE-NEXT:    strhmi r1, [r0, #12]
+; CHECK-BE-NEXT:    lsls r1, r2, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r1, q0[7]
+; CHECK-BE-NEXT:    strhmi r1, [r0, #14]
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds i8, i8* %y, i32 4
+  %0 = bitcast i8* %x to <8 x i16>*
+  %1 = load <8 x i16>, <8 x i16>* %0, align 4
+  %2 = bitcast i8* %z to <8 x i16>*
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
+  ret i8* %z
+}
+
+define i8* @masked_v8i16_post(i8* %y, i8* %x, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8i16_post:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vldr d1, [sp, #8]
+; CHECK-LE-NEXT:    vmov d0, r2, r3
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    ldrb.w r2, [sp]
+; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-LE-NEXT:    lsls r1, r2, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    vmovne.u16 r1, q0[0]
+; CHECK-LE-NEXT:    strhne r1, [r0]
+; CHECK-LE-NEXT:    lsls r1, r2, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[1]
+; CHECK-LE-NEXT:    strhmi r1, [r0, #2]
+; CHECK-LE-NEXT:    lsls r1, r2, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[2]
+; CHECK-LE-NEXT:    strhmi r1, [r0, #4]
+; CHECK-LE-NEXT:    lsls r1, r2, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[3]
+; CHECK-LE-NEXT:    strhmi r1, [r0, #6]
+; CHECK-LE-NEXT:    lsls r1, r2, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[4]
+; CHECK-LE-NEXT:    strhmi r1, [r0, #8]
+; CHECK-LE-NEXT:    lsls r1, r2, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r1, q0[5]
+; CHECK-LE-NEXT:    strhmi r1, [r0, #10]
+; CHECK-LE-NEXT:    adds r1, r0, #4
+; CHECK-LE-NEXT:    lsls r3, r2, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r3, q0[6]
+; CHECK-LE-NEXT:    strhmi r3, [r0, #12]
+; CHECK-LE-NEXT:    lsls r2, r2, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u16 r2, q0[7]
+; CHECK-LE-NEXT:    strhmi r2, [r0, #14]
+; CHECK-LE-NEXT:    mov r0, r1
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8i16_post:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vldr d1, [sp, #8]
+; CHECK-BE-NEXT:    vmov d0, r3, r2
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrb.w r2, [sp]
+; CHECK-BE-NEXT:    vldrh.u16 q0, [r1]
+; CHECK-BE-NEXT:    lsls r1, r2, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    vmovne.u16 r1, q0[0]
+; CHECK-BE-NEXT:    strhne r1, [r0]
+; CHECK-BE-NEXT:    lsls r1, r2, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r1, q0[1]
+; CHECK-BE-NEXT:    strhmi r1, [r0, #2]
+; CHECK-BE-NEXT:    lsls r1, r2, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r1, q0[2]
+; CHECK-BE-NEXT:    strhmi r1, [r0, #4]
+; CHECK-BE-NEXT:    lsls r1, r2, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r1, q0[3]
+; CHECK-BE-NEXT:    strhmi r1, [r0, #6]
+; CHECK-BE-NEXT:    lsls r1, r2, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r1, q0[4]
+; CHECK-BE-NEXT:    strhmi r1, [r0, #8]
+; CHECK-BE-NEXT:    lsls r1, r2, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r1, q0[5]
+; CHECK-BE-NEXT:    strhmi r1, [r0, #10]
+; CHECK-BE-NEXT:    adds r1, r0, #4
+; CHECK-BE-NEXT:    lsls r3, r2, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r3, q0[6]
+; CHECK-BE-NEXT:    strhmi r3, [r0, #12]
+; CHECK-BE-NEXT:    lsls r2, r2, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u16 r2, q0[7]
+; CHECK-BE-NEXT:    strhmi r2, [r0, #14]
+; CHECK-BE-NEXT:    mov r0, r1
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds i8, i8* %y, i32 4
+  %0 = bitcast i8* %x to <8 x i16>*
+  %1 = load <8 x i16>, <8 x i16>* %0, align 4
+  %2 = bitcast i8* %y to <8 x i16>*
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %1, <8 x i16>* %2, i32 2, <8 x i1> %c)
+  ret i8* %z
+}
+
+
+define arm_aapcs_vfpcc void @masked_v16i8(<16 x i8> *%dest, <16 x i8> %a) {
+; CHECK-LE-LABEL: masked_v16i8:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    .setfp r7, sp, #8
+; CHECK-LE-NEXT:    add r7, sp, #8
+; CHECK-LE-NEXT:    .pad #16
+; CHECK-LE-NEXT:    sub sp, #16
+; CHECK-LE-NEXT:    mov r4, sp
+; CHECK-LE-NEXT:    bfc r4, #0, #4
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.s8 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    sub.w r4, r7, #8
+; CHECK-LE-NEXT:    ldrh.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    vmovne.u8 r2, q0[0]
+; CHECK-LE-NEXT:    strbne r2, [r0]
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[1]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #1]
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[2]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #2]
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[3]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #3]
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[4]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #4]
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[5]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #5]
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[6]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #6]
+; CHECK-LE-NEXT:    lsls r2, r1, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[7]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #7]
+; CHECK-LE-NEXT:    lsls r2, r1, #23
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[8]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r2, r1, #22
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[9]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #9]
+; CHECK-LE-NEXT:    lsls r2, r1, #21
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[10]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #10]
+; CHECK-LE-NEXT:    lsls r2, r1, #20
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[11]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #11]
+; CHECK-LE-NEXT:    lsls r2, r1, #19
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[12]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #12]
+; CHECK-LE-NEXT:    lsls r2, r1, #18
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[13]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #13]
+; CHECK-LE-NEXT:    lsls r2, r1, #17
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[14]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #14]
+; CHECK-LE-NEXT:    lsls r1, r1, #16
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[15]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #15]
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    pop {r4, r6, r7, pc}
+;
+; CHECK-BE-LABEL: masked_v16i8:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    .setfp r7, sp, #8
+; CHECK-BE-NEXT:    add r7, sp, #8
+; CHECK-BE-NEXT:    .pad #16
+; CHECK-BE-NEXT:    sub sp, #16
+; CHECK-BE-NEXT:    mov r4, sp
+; CHECK-BE-NEXT:    bfc r4, #0, #4
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    vrev64.8 q1, q0
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.s8 gt, q1, zr
+; CHECK-BE-NEXT:    sub.w r4, r7, #8
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrh.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    vmovne.u8 r2, q1[0]
+; CHECK-BE-NEXT:    strbne r2, [r0]
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q1[1]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #1]
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q1[2]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #2]
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q1[3]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #3]
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q1[4]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #4]
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q1[5]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #5]
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q1[6]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #6]
+; CHECK-BE-NEXT:    lsls r2, r1, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q1[7]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #7]
+; CHECK-BE-NEXT:    lsls r2, r1, #23
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q1[8]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #8]
+; CHECK-BE-NEXT:    lsls r2, r1, #22
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q1[9]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #9]
+; CHECK-BE-NEXT:    lsls r2, r1, #21
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q1[10]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #10]
+; CHECK-BE-NEXT:    lsls r2, r1, #20
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q1[11]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #11]
+; CHECK-BE-NEXT:    lsls r2, r1, #19
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q1[12]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #12]
+; CHECK-BE-NEXT:    lsls r2, r1, #18
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q1[13]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #13]
+; CHECK-BE-NEXT:    lsls r2, r1, #17
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q1[14]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #14]
+; CHECK-BE-NEXT:    lsls r1, r1, #16
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q1[15]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #15]
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    pop {r4, r6, r7, pc}
+entry:
+  %c = icmp sgt <16 x i8> %a, zeroinitializer
+  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %a, <16 x i8>* %dest, i32 1, <16 x i1> %c)
+  ret void
+}
+
+define i8* @masked_v16i8_pre(i8* %y, i8* %x, <16 x i8> %a) {
+; CHECK-LE-LABEL: masked_v16i8_pre:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    .setfp r7, sp, #8
+; CHECK-LE-NEXT:    add r7, sp, #8
+; CHECK-LE-NEXT:    .pad #16
+; CHECK-LE-NEXT:    sub sp, #16
+; CHECK-LE-NEXT:    mov r4, sp
+; CHECK-LE-NEXT:    bfc r4, #0, #4
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    vldr d1, [r7, #8]
+; CHECK-LE-NEXT:    adds r0, #4
+; CHECK-LE-NEXT:    vmov d0, r2, r3
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vcmp.s8 gt, q0, zr
+; CHECK-LE-NEXT:    sub.w r4, r7, #8
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    ldrh.w r2, [sp]
+; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-LE-NEXT:    lsls r1, r2, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    vmovne.u8 r1, q0[0]
+; CHECK-LE-NEXT:    strbne r1, [r0]
+; CHECK-LE-NEXT:    lsls r1, r2, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[1]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #1]
+; CHECK-LE-NEXT:    lsls r1, r2, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[2]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #2]
+; CHECK-LE-NEXT:    lsls r1, r2, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[3]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #3]
+; CHECK-LE-NEXT:    lsls r1, r2, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[4]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #4]
+; CHECK-LE-NEXT:    lsls r1, r2, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[5]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #5]
+; CHECK-LE-NEXT:    lsls r1, r2, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[6]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #6]
+; CHECK-LE-NEXT:    lsls r1, r2, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[7]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #7]
+; CHECK-LE-NEXT:    lsls r1, r2, #23
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[8]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #8]
+; CHECK-LE-NEXT:    lsls r1, r2, #22
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[9]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #9]
+; CHECK-LE-NEXT:    lsls r1, r2, #21
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[10]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #10]
+; CHECK-LE-NEXT:    lsls r1, r2, #20
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[11]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #11]
+; CHECK-LE-NEXT:    lsls r1, r2, #19
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[12]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #12]
+; CHECK-LE-NEXT:    lsls r1, r2, #18
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[13]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #13]
+; CHECK-LE-NEXT:    lsls r1, r2, #17
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[14]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #14]
+; CHECK-LE-NEXT:    lsls r1, r2, #16
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[15]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #15]
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    pop {r4, r6, r7, pc}
+;
+; CHECK-BE-LABEL: masked_v16i8_pre:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    .setfp r7, sp, #8
+; CHECK-BE-NEXT:    add r7, sp, #8
+; CHECK-BE-NEXT:    .pad #16
+; CHECK-BE-NEXT:    sub sp, #16
+; CHECK-BE-NEXT:    mov r4, sp
+; CHECK-BE-NEXT:    bfc r4, #0, #4
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    vldr d1, [r7, #8]
+; CHECK-BE-NEXT:    adds r0, #4
+; CHECK-BE-NEXT:    vmov d0, r3, r2
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vrev64.8 q1, q0
+; CHECK-BE-NEXT:    sub.w r4, r7, #8
+; CHECK-BE-NEXT:    vcmp.s8 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrh.w r2, [sp]
+; CHECK-BE-NEXT:    vldrb.u8 q0, [r1]
+; CHECK-BE-NEXT:    lsls r1, r2, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    vmovne.u8 r1, q0[0]
+; CHECK-BE-NEXT:    strbne r1, [r0]
+; CHECK-BE-NEXT:    lsls r1, r2, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[1]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #1]
+; CHECK-BE-NEXT:    lsls r1, r2, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[2]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #2]
+; CHECK-BE-NEXT:    lsls r1, r2, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[3]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #3]
+; CHECK-BE-NEXT:    lsls r1, r2, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[4]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #4]
+; CHECK-BE-NEXT:    lsls r1, r2, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[5]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #5]
+; CHECK-BE-NEXT:    lsls r1, r2, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[6]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #6]
+; CHECK-BE-NEXT:    lsls r1, r2, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[7]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #7]
+; CHECK-BE-NEXT:    lsls r1, r2, #23
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[8]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #8]
+; CHECK-BE-NEXT:    lsls r1, r2, #22
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[9]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #9]
+; CHECK-BE-NEXT:    lsls r1, r2, #21
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[10]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #10]
+; CHECK-BE-NEXT:    lsls r1, r2, #20
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[11]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #11]
+; CHECK-BE-NEXT:    lsls r1, r2, #19
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[12]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #12]
+; CHECK-BE-NEXT:    lsls r1, r2, #18
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[13]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #13]
+; CHECK-BE-NEXT:    lsls r1, r2, #17
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[14]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #14]
+; CHECK-BE-NEXT:    lsls r1, r2, #16
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[15]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #15]
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    pop {r4, r6, r7, pc}
+entry:
+  %z = getelementptr inbounds i8, i8* %y, i32 4
+  %0 = bitcast i8* %x to <16 x i8>*
+  %1 = load <16 x i8>, <16 x i8>* %0, align 4
+  %2 = bitcast i8* %z to <16 x i8>*
+  %c = icmp sgt <16 x i8> %a, zeroinitializer
+  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
+  ret i8* %z
+}
+
+define i8* @masked_v16i8_post(i8* %y, i8* %x, <16 x i8> %a) {
+; CHECK-LE-LABEL: masked_v16i8_post:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-LE-NEXT:    .setfp r7, sp, #8
+; CHECK-LE-NEXT:    add r7, sp, #8
+; CHECK-LE-NEXT:    .pad #16
+; CHECK-LE-NEXT:    sub sp, #16
+; CHECK-LE-NEXT:    mov r4, sp
+; CHECK-LE-NEXT:    bfc r4, #0, #4
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    vldr d1, [r7, #8]
+; CHECK-LE-NEXT:    sub.w r4, r7, #8
+; CHECK-LE-NEXT:    vmov d0, r2, r3
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vcmp.s8 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    ldrh.w r2, [sp]
+; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-LE-NEXT:    lsls r1, r2, #31
+; CHECK-LE-NEXT:    itt ne
+; CHECK-LE-NEXT:    vmovne.u8 r1, q0[0]
+; CHECK-LE-NEXT:    strbne r1, [r0]
+; CHECK-LE-NEXT:    lsls r1, r2, #30
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[1]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #1]
+; CHECK-LE-NEXT:    lsls r1, r2, #29
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[2]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #2]
+; CHECK-LE-NEXT:    lsls r1, r2, #28
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[3]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #3]
+; CHECK-LE-NEXT:    lsls r1, r2, #27
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[4]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #4]
+; CHECK-LE-NEXT:    lsls r1, r2, #26
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[5]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #5]
+; CHECK-LE-NEXT:    lsls r1, r2, #25
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[6]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #6]
+; CHECK-LE-NEXT:    lsls r1, r2, #24
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[7]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #7]
+; CHECK-LE-NEXT:    lsls r1, r2, #23
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[8]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #8]
+; CHECK-LE-NEXT:    lsls r1, r2, #22
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[9]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #9]
+; CHECK-LE-NEXT:    lsls r1, r2, #21
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[10]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #10]
+; CHECK-LE-NEXT:    lsls r1, r2, #20
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[11]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #11]
+; CHECK-LE-NEXT:    lsls r1, r2, #19
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[12]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #12]
+; CHECK-LE-NEXT:    lsls r1, r2, #18
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r1, q0[13]
+; CHECK-LE-NEXT:    strbmi r1, [r0, #13]
+; CHECK-LE-NEXT:    adds r1, r0, #4
+; CHECK-LE-NEXT:    lsls r3, r2, #17
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r3, q0[14]
+; CHECK-LE-NEXT:    strbmi r3, [r0, #14]
+; CHECK-LE-NEXT:    lsls r2, r2, #16
+; CHECK-LE-NEXT:    itt mi
+; CHECK-LE-NEXT:    vmovmi.u8 r2, q0[15]
+; CHECK-LE-NEXT:    strbmi r2, [r0, #15]
+; CHECK-LE-NEXT:    mov r0, r1
+; CHECK-LE-NEXT:    mov sp, r4
+; CHECK-LE-NEXT:    pop {r4, r6, r7, pc}
+;
+; CHECK-BE-LABEL: masked_v16i8_post:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .save {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    push {r4, r6, r7, lr}
+; CHECK-BE-NEXT:    .setfp r7, sp, #8
+; CHECK-BE-NEXT:    add r7, sp, #8
+; CHECK-BE-NEXT:    .pad #16
+; CHECK-BE-NEXT:    sub sp, #16
+; CHECK-BE-NEXT:    mov r4, sp
+; CHECK-BE-NEXT:    bfc r4, #0, #4
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    vldr d1, [r7, #8]
+; CHECK-BE-NEXT:    sub.w r4, r7, #8
+; CHECK-BE-NEXT:    vmov d0, r3, r2
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vrev64.8 q1, q0
+; CHECK-BE-NEXT:    vcmp.s8 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrh.w r2, [sp]
+; CHECK-BE-NEXT:    vldrb.u8 q0, [r1]
+; CHECK-BE-NEXT:    lsls r1, r2, #31
+; CHECK-BE-NEXT:    itt ne
+; CHECK-BE-NEXT:    vmovne.u8 r1, q0[0]
+; CHECK-BE-NEXT:    strbne r1, [r0]
+; CHECK-BE-NEXT:    lsls r1, r2, #30
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[1]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #1]
+; CHECK-BE-NEXT:    lsls r1, r2, #29
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[2]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #2]
+; CHECK-BE-NEXT:    lsls r1, r2, #28
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[3]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #3]
+; CHECK-BE-NEXT:    lsls r1, r2, #27
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[4]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #4]
+; CHECK-BE-NEXT:    lsls r1, r2, #26
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[5]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #5]
+; CHECK-BE-NEXT:    lsls r1, r2, #25
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[6]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #6]
+; CHECK-BE-NEXT:    lsls r1, r2, #24
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[7]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #7]
+; CHECK-BE-NEXT:    lsls r1, r2, #23
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[8]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #8]
+; CHECK-BE-NEXT:    lsls r1, r2, #22
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[9]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #9]
+; CHECK-BE-NEXT:    lsls r1, r2, #21
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[10]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #10]
+; CHECK-BE-NEXT:    lsls r1, r2, #20
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[11]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #11]
+; CHECK-BE-NEXT:    lsls r1, r2, #19
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[12]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #12]
+; CHECK-BE-NEXT:    lsls r1, r2, #18
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r1, q0[13]
+; CHECK-BE-NEXT:    strbmi r1, [r0, #13]
+; CHECK-BE-NEXT:    adds r1, r0, #4
+; CHECK-BE-NEXT:    lsls r3, r2, #17
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r3, q0[14]
+; CHECK-BE-NEXT:    strbmi r3, [r0, #14]
+; CHECK-BE-NEXT:    lsls r2, r2, #16
+; CHECK-BE-NEXT:    itt mi
+; CHECK-BE-NEXT:    vmovmi.u8 r2, q0[15]
+; CHECK-BE-NEXT:    strbmi r2, [r0, #15]
+; CHECK-BE-NEXT:    mov r0, r1
+; CHECK-BE-NEXT:    mov sp, r4
+; CHECK-BE-NEXT:    pop {r4, r6, r7, pc}
+entry:
+  %z = getelementptr inbounds i8, i8* %y, i32 4
+  %0 = bitcast i8* %x to <16 x i8>*
+  %1 = load <16 x i8>, <16 x i8>* %0, align 4
+  %2 = bitcast i8* %y to <16 x i8>*
+  %c = icmp sgt <16 x i8> %a, zeroinitializer
+  call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %1, <16 x i8>* %2, i32 1, <16 x i1> %c)
+  ret i8* %z
+}
+
+
+define arm_aapcs_vfpcc void @masked_v4f32(<4 x float> *%dest, <4 x float> %a, <4 x i32> %b) {
+; CHECK-LE-LABEL: masked_v4f32:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.i32 ne, q1, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    it ne
+; CHECK-LE-NEXT:    vstrne s0, [r0]
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vstrmi s1, [r0, #4]
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vstrmi s2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r1, r1, #28
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vstrmi s3, [r0, #12]
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4f32:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q2, q1
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    it ne
+; CHECK-BE-NEXT:    vstrne s4, [r0]
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vstrmi s5, [r0, #4]
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vstrmi s6, [r0, #8]
+; CHECK-BE-NEXT:    lsls r1, r1, #28
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vstrmi s7, [r0, #12]
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp ugt <4 x i32> %b, zeroinitializer
+  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %a, <4 x float>* %dest, i32 4, <4 x i1> %c)
+  ret void
+}
+
+define arm_aapcs_vfpcc void @masked_v4f32_align1(<4 x float> *%dest, <4 x float> %a, <4 x i32> %b) {
+; CHECK-LE-LABEL: masked_v4f32_align1:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #20
+; CHECK-LE-NEXT:    sub sp, #20
+; CHECK-LE-NEXT:    add r1, sp, #16
+; CHECK-LE-NEXT:    vcmp.i32 ne, q1, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp, #16]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    ittt ne
+; CHECK-LE-NEXT:    vstrne s0, [sp, #12]
+; CHECK-LE-NEXT:    ldrne r2, [sp, #12]
+; CHECK-LE-NEXT:    strne r2, [r0]
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    ittt mi
+; CHECK-LE-NEXT:    vstrmi s1, [sp, #8]
+; CHECK-LE-NEXT:    ldrmi r2, [sp, #8]
+; CHECK-LE-NEXT:    strmi r2, [r0, #4]
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    ittt mi
+; CHECK-LE-NEXT:    vstrmi s2, [sp, #4]
+; CHECK-LE-NEXT:    ldrmi r2, [sp, #4]
+; CHECK-LE-NEXT:    strmi r2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r1, r1, #28
+; CHECK-LE-NEXT:    ittt mi
+; CHECK-LE-NEXT:    vstrmi s3, [sp]
+; CHECK-LE-NEXT:    ldrmi r1, [sp]
+; CHECK-LE-NEXT:    strmi r1, [r0, #12]
+; CHECK-LE-NEXT:    add sp, #20
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4f32_align1:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #20
+; CHECK-BE-NEXT:    sub sp, #20
+; CHECK-BE-NEXT:    vrev64.32 q2, q1
+; CHECK-BE-NEXT:    add r1, sp, #16
+; CHECK-BE-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp, #16]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    ittt ne
+; CHECK-BE-NEXT:    vstrne s4, [sp, #12]
+; CHECK-BE-NEXT:    ldrne r2, [sp, #12]
+; CHECK-BE-NEXT:    strne r2, [r0]
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    ittt mi
+; CHECK-BE-NEXT:    vstrmi s5, [sp, #8]
+; CHECK-BE-NEXT:    ldrmi r2, [sp, #8]
+; CHECK-BE-NEXT:    strmi r2, [r0, #4]
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    ittt mi
+; CHECK-BE-NEXT:    vstrmi s6, [sp, #4]
+; CHECK-BE-NEXT:    ldrmi r2, [sp, #4]
+; CHECK-BE-NEXT:    strmi r2, [r0, #8]
+; CHECK-BE-NEXT:    lsls r1, r1, #28
+; CHECK-BE-NEXT:    ittt mi
+; CHECK-BE-NEXT:    vstrmi s7, [sp]
+; CHECK-BE-NEXT:    ldrmi r1, [sp]
+; CHECK-BE-NEXT:    strmi r1, [r0, #12]
+; CHECK-BE-NEXT:    add sp, #20
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp ugt <4 x i32> %b, zeroinitializer
+  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %a, <4 x float>* %dest, i32 1, <4 x i1> %c)
+  ret void
+}
+
+define i8* @masked_v4f32_pre(i8* %y, i8* %x, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4f32_pre:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vldr d1, [sp, #8]
+; CHECK-LE-NEXT:    adds r0, #4
+; CHECK-LE-NEXT:    vmov d0, r2, r3
+; CHECK-LE-NEXT:    add r2, sp, #4
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    ldrb.w r2, [sp, #4]
+; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-LE-NEXT:    lsls r1, r2, #31
+; CHECK-LE-NEXT:    it ne
+; CHECK-LE-NEXT:    vstrne s0, [r0]
+; CHECK-LE-NEXT:    lsls r1, r2, #30
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vstrmi s1, [r0, #4]
+; CHECK-LE-NEXT:    lsls r1, r2, #29
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vstrmi s2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r1, r2, #28
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vstrmi s3, [r0, #12]
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4f32_pre:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vldr d1, [sp, #8]
+; CHECK-BE-NEXT:    adds r0, #4
+; CHECK-BE-NEXT:    vmov d0, r3, r2
+; CHECK-BE-NEXT:    add r2, sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrb.w r2, [sp, #4]
+; CHECK-BE-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-BE-NEXT:    lsls r1, r2, #31
+; CHECK-BE-NEXT:    it ne
+; CHECK-BE-NEXT:    vstrne s0, [r0]
+; CHECK-BE-NEXT:    lsls r1, r2, #30
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vstrmi s1, [r0, #4]
+; CHECK-BE-NEXT:    lsls r1, r2, #29
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vstrmi s2, [r0, #8]
+; CHECK-BE-NEXT:    lsls r1, r2, #28
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vstrmi s3, [r0, #12]
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds i8, i8* %y, i32 4
+  %0 = bitcast i8* %x to <4 x float>*
+  %1 = load <4 x float>, <4 x float>* %0, align 4
+  %2 = bitcast i8* %z to <4 x float>*
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
+  ret i8* %z
+}
+
+define i8* @masked_v4f32_post(i8* %y, i8* %x, <4 x i32> %a) {
+; CHECK-LE-LABEL: masked_v4f32_post:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vldr d1, [sp, #8]
+; CHECK-LE-NEXT:    vmov d0, r2, r3
+; CHECK-LE-NEXT:    add r2, sp, #4
+; CHECK-LE-NEXT:    vcmp.s32 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    ldrb.w r2, [sp, #4]
+; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-LE-NEXT:    lsls r1, r2, #31
+; CHECK-LE-NEXT:    it ne
+; CHECK-LE-NEXT:    vstrne s0, [r0]
+; CHECK-LE-NEXT:    lsls r1, r2, #30
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vstrmi s1, [r0, #4]
+; CHECK-LE-NEXT:    adds r1, r0, #4
+; CHECK-LE-NEXT:    lsls r3, r2, #29
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vstrmi s2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r2, r2, #28
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vstrmi s3, [r0, #12]
+; CHECK-LE-NEXT:    mov r0, r1
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v4f32_post:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vldr d1, [sp, #8]
+; CHECK-BE-NEXT:    vmov d0, r3, r2
+; CHECK-BE-NEXT:    add r2, sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    vcmp.s32 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrb.w r2, [sp, #4]
+; CHECK-BE-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-BE-NEXT:    lsls r1, r2, #31
+; CHECK-BE-NEXT:    it ne
+; CHECK-BE-NEXT:    vstrne s0, [r0]
+; CHECK-BE-NEXT:    lsls r1, r2, #30
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vstrmi s1, [r0, #4]
+; CHECK-BE-NEXT:    adds r1, r0, #4
+; CHECK-BE-NEXT:    lsls r3, r2, #29
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vstrmi s2, [r0, #8]
+; CHECK-BE-NEXT:    lsls r2, r2, #28
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vstrmi s3, [r0, #12]
+; CHECK-BE-NEXT:    mov r0, r1
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds i8, i8* %y, i32 4
+  %0 = bitcast i8* %x to <4 x float>*
+  %1 = load <4 x float>, <4 x float>* %0, align 4
+  %2 = bitcast i8* %y to <4 x float>*
+  %c = icmp sgt <4 x i32> %a, zeroinitializer
+  call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %1, <4 x float>* %2, i32 4, <4 x i1> %c)
+  ret i8* %z
+}
+
+
+define arm_aapcs_vfpcc void @masked_v8f16(<8 x half> *%dest, <8 x half> %a, <8 x i16> %b) {
+; CHECK-LE-LABEL: masked_v8f16:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    mov r1, sp
+; CHECK-LE-NEXT:    vcmp.i16 ne, q1, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    bne .LBB15_9
+; CHECK-LE-NEXT:  @ %bb.1: @ %else
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    bmi .LBB15_10
+; CHECK-LE-NEXT:  .LBB15_2: @ %else2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    bmi .LBB15_11
+; CHECK-LE-NEXT:  .LBB15_3: @ %else4
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    bmi .LBB15_12
+; CHECK-LE-NEXT:  .LBB15_4: @ %else6
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    bmi .LBB15_13
+; CHECK-LE-NEXT:  .LBB15_5: @ %else8
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    bmi .LBB15_14
+; CHECK-LE-NEXT:  .LBB15_6: @ %else10
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    bmi .LBB15_15
+; CHECK-LE-NEXT:  .LBB15_7: @ %else12
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    bmi .LBB15_16
+; CHECK-LE-NEXT:  .LBB15_8: @ %else14
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+; CHECK-LE-NEXT:  .LBB15_9: @ %cond.store
+; CHECK-LE-NEXT:    vstr.16 s0, [r0]
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    bpl .LBB15_2
+; CHECK-LE-NEXT:  .LBB15_10: @ %cond.store1
+; CHECK-LE-NEXT:    vmovx.f16 s4, s0
+; CHECK-LE-NEXT:    vstr.16 s4, [r0, #2]
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    bpl .LBB15_3
+; CHECK-LE-NEXT:  .LBB15_11: @ %cond.store3
+; CHECK-LE-NEXT:    vstr.16 s1, [r0, #4]
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    bpl .LBB15_4
+; CHECK-LE-NEXT:  .LBB15_12: @ %cond.store5
+; CHECK-LE-NEXT:    vmovx.f16 s4, s1
+; CHECK-LE-NEXT:    vstr.16 s4, [r0, #6]
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    bpl .LBB15_5
+; CHECK-LE-NEXT:  .LBB15_13: @ %cond.store7
+; CHECK-LE-NEXT:    vstr.16 s2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    bpl .LBB15_6
+; CHECK-LE-NEXT:  .LBB15_14: @ %cond.store9
+; CHECK-LE-NEXT:    vmovx.f16 s4, s2
+; CHECK-LE-NEXT:    vstr.16 s4, [r0, #10]
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    bpl .LBB15_7
+; CHECK-LE-NEXT:  .LBB15_15: @ %cond.store11
+; CHECK-LE-NEXT:    vstr.16 s3, [r0, #12]
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    bpl .LBB15_8
+; CHECK-LE-NEXT:  .LBB15_16: @ %cond.store13
+; CHECK-LE-NEXT:    vmovx.f16 s0, s3
+; CHECK-LE-NEXT:    vstr.16 s0, [r0, #14]
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8f16:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vrev64.16 q2, q1
+; CHECK-BE-NEXT:    mov r1, sp
+; CHECK-BE-NEXT:    vcmp.i16 ne, q2, zr
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    bne .LBB15_9
+; CHECK-BE-NEXT:  @ %bb.1: @ %else
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    bmi .LBB15_10
+; CHECK-BE-NEXT:  .LBB15_2: @ %else2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    bmi .LBB15_11
+; CHECK-BE-NEXT:  .LBB15_3: @ %else4
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    bmi .LBB15_12
+; CHECK-BE-NEXT:  .LBB15_4: @ %else6
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    bmi .LBB15_13
+; CHECK-BE-NEXT:  .LBB15_5: @ %else8
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    bmi .LBB15_14
+; CHECK-BE-NEXT:  .LBB15_6: @ %else10
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    bmi .LBB15_15
+; CHECK-BE-NEXT:  .LBB15_7: @ %else12
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    bmi .LBB15_16
+; CHECK-BE-NEXT:  .LBB15_8: @ %else14
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+; CHECK-BE-NEXT:  .LBB15_9: @ %cond.store
+; CHECK-BE-NEXT:    vstr.16 s4, [r0]
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    bpl .LBB15_2
+; CHECK-BE-NEXT:  .LBB15_10: @ %cond.store1
+; CHECK-BE-NEXT:    vmovx.f16 s0, s4
+; CHECK-BE-NEXT:    vstr.16 s0, [r0, #2]
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    bpl .LBB15_3
+; CHECK-BE-NEXT:  .LBB15_11: @ %cond.store3
+; CHECK-BE-NEXT:    vstr.16 s5, [r0, #4]
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    bpl .LBB15_4
+; CHECK-BE-NEXT:  .LBB15_12: @ %cond.store5
+; CHECK-BE-NEXT:    vmovx.f16 s0, s5
+; CHECK-BE-NEXT:    vstr.16 s0, [r0, #6]
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    bpl .LBB15_5
+; CHECK-BE-NEXT:  .LBB15_13: @ %cond.store7
+; CHECK-BE-NEXT:    vstr.16 s6, [r0, #8]
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    bpl .LBB15_6
+; CHECK-BE-NEXT:  .LBB15_14: @ %cond.store9
+; CHECK-BE-NEXT:    vmovx.f16 s0, s6
+; CHECK-BE-NEXT:    vstr.16 s0, [r0, #10]
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    bpl .LBB15_7
+; CHECK-BE-NEXT:  .LBB15_15: @ %cond.store11
+; CHECK-BE-NEXT:    vstr.16 s7, [r0, #12]
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    bpl .LBB15_8
+; CHECK-BE-NEXT:  .LBB15_16: @ %cond.store13
+; CHECK-BE-NEXT:    vmovx.f16 s0, s7
+; CHECK-BE-NEXT:    vstr.16 s0, [r0, #14]
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp ugt <8 x i16> %b, zeroinitializer
+  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %a, <8 x half>* %dest, i32 2, <8 x i1> %c)
+  ret void
+}
+
+define arm_aapcs_vfpcc void @masked_v8f16_align1(<8 x half> *%dest, <8 x half> %a, <8 x i16> %b) {
+; CHECK-LE-LABEL: masked_v8f16_align1:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #40
+; CHECK-LE-NEXT:    sub sp, #40
+; CHECK-LE-NEXT:    add r1, sp, #32
+; CHECK-LE-NEXT:    vcmp.i16 ne, q1, zr
+; CHECK-LE-NEXT:    vstr p0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp, #32]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    bne .LBB16_9
+; CHECK-LE-NEXT:  @ %bb.1: @ %else
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    bmi .LBB16_10
+; CHECK-LE-NEXT:  .LBB16_2: @ %else2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    bmi .LBB16_11
+; CHECK-LE-NEXT:  .LBB16_3: @ %else4
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    bmi .LBB16_12
+; CHECK-LE-NEXT:  .LBB16_4: @ %else6
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    bmi .LBB16_13
+; CHECK-LE-NEXT:  .LBB16_5: @ %else8
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    bmi .LBB16_14
+; CHECK-LE-NEXT:  .LBB16_6: @ %else10
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    bmi .LBB16_15
+; CHECK-LE-NEXT:  .LBB16_7: @ %else12
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    bmi .LBB16_16
+; CHECK-LE-NEXT:  .LBB16_8: @ %else14
+; CHECK-LE-NEXT:    add sp, #40
+; CHECK-LE-NEXT:    bx lr
+; CHECK-LE-NEXT:  .LBB16_9: @ %cond.store
+; CHECK-LE-NEXT:    vstr.16 s0, [sp, #28]
+; CHECK-LE-NEXT:    ldrh.w r2, [sp, #28]
+; CHECK-LE-NEXT:    strh r2, [r0]
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    bpl .LBB16_2
+; CHECK-LE-NEXT:  .LBB16_10: @ %cond.store1
+; CHECK-LE-NEXT:    vmovx.f16 s4, s0
+; CHECK-LE-NEXT:    vstr.16 s4, [sp, #24]
+; CHECK-LE-NEXT:    ldrh.w r2, [sp, #24]
+; CHECK-LE-NEXT:    strh r2, [r0, #2]
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    bpl .LBB16_3
+; CHECK-LE-NEXT:  .LBB16_11: @ %cond.store3
+; CHECK-LE-NEXT:    vstr.16 s1, [sp, #20]
+; CHECK-LE-NEXT:    ldrh.w r2, [sp, #20]
+; CHECK-LE-NEXT:    strh r2, [r0, #4]
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    bpl .LBB16_4
+; CHECK-LE-NEXT:  .LBB16_12: @ %cond.store5
+; CHECK-LE-NEXT:    vmovx.f16 s4, s1
+; CHECK-LE-NEXT:    vstr.16 s4, [sp, #16]
+; CHECK-LE-NEXT:    ldrh.w r2, [sp, #16]
+; CHECK-LE-NEXT:    strh r2, [r0, #6]
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    bpl .LBB16_5
+; CHECK-LE-NEXT:  .LBB16_13: @ %cond.store7
+; CHECK-LE-NEXT:    vstr.16 s2, [sp, #12]
+; CHECK-LE-NEXT:    ldrh.w r2, [sp, #12]
+; CHECK-LE-NEXT:    strh r2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    bpl .LBB16_6
+; CHECK-LE-NEXT:  .LBB16_14: @ %cond.store9
+; CHECK-LE-NEXT:    vmovx.f16 s4, s2
+; CHECK-LE-NEXT:    vstr.16 s4, [sp, #8]
+; CHECK-LE-NEXT:    ldrh.w r2, [sp, #8]
+; CHECK-LE-NEXT:    strh r2, [r0, #10]
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    bpl .LBB16_7
+; CHECK-LE-NEXT:  .LBB16_15: @ %cond.store11
+; CHECK-LE-NEXT:    vstr.16 s3, [sp, #4]
+; CHECK-LE-NEXT:    ldrh.w r2, [sp, #4]
+; CHECK-LE-NEXT:    strh r2, [r0, #12]
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    bpl .LBB16_8
+; CHECK-LE-NEXT:  .LBB16_16: @ %cond.store13
+; CHECK-LE-NEXT:    vmovx.f16 s0, s3
+; CHECK-LE-NEXT:    vstr.16 s0, [sp]
+; CHECK-LE-NEXT:    ldrh.w r1, [sp]
+; CHECK-LE-NEXT:    strh r1, [r0, #14]
+; CHECK-LE-NEXT:    add sp, #40
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8f16_align1:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #40
+; CHECK-BE-NEXT:    sub sp, #40
+; CHECK-BE-NEXT:    vrev64.16 q2, q1
+; CHECK-BE-NEXT:    add r1, sp, #32
+; CHECK-BE-NEXT:    vcmp.i16 ne, q2, zr
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    vstr p0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp, #32]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    bne .LBB16_9
+; CHECK-BE-NEXT:  @ %bb.1: @ %else
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    bmi .LBB16_10
+; CHECK-BE-NEXT:  .LBB16_2: @ %else2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    bmi .LBB16_11
+; CHECK-BE-NEXT:  .LBB16_3: @ %else4
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    bmi .LBB16_12
+; CHECK-BE-NEXT:  .LBB16_4: @ %else6
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    bmi .LBB16_13
+; CHECK-BE-NEXT:  .LBB16_5: @ %else8
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    bmi .LBB16_14
+; CHECK-BE-NEXT:  .LBB16_6: @ %else10
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    bmi .LBB16_15
+; CHECK-BE-NEXT:  .LBB16_7: @ %else12
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    bmi .LBB16_16
+; CHECK-BE-NEXT:  .LBB16_8: @ %else14
+; CHECK-BE-NEXT:    add sp, #40
+; CHECK-BE-NEXT:    bx lr
+; CHECK-BE-NEXT:  .LBB16_9: @ %cond.store
+; CHECK-BE-NEXT:    vstr.16 s4, [sp, #28]
+; CHECK-BE-NEXT:    ldrh.w r2, [sp, #28]
+; CHECK-BE-NEXT:    strh r2, [r0]
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    bpl .LBB16_2
+; CHECK-BE-NEXT:  .LBB16_10: @ %cond.store1
+; CHECK-BE-NEXT:    vmovx.f16 s0, s4
+; CHECK-BE-NEXT:    vstr.16 s0, [sp, #24]
+; CHECK-BE-NEXT:    ldrh.w r2, [sp, #24]
+; CHECK-BE-NEXT:    strh r2, [r0, #2]
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    bpl .LBB16_3
+; CHECK-BE-NEXT:  .LBB16_11: @ %cond.store3
+; CHECK-BE-NEXT:    vstr.16 s5, [sp, #20]
+; CHECK-BE-NEXT:    ldrh.w r2, [sp, #20]
+; CHECK-BE-NEXT:    strh r2, [r0, #4]
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    bpl .LBB16_4
+; CHECK-BE-NEXT:  .LBB16_12: @ %cond.store5
+; CHECK-BE-NEXT:    vmovx.f16 s0, s5
+; CHECK-BE-NEXT:    vstr.16 s0, [sp, #16]
+; CHECK-BE-NEXT:    ldrh.w r2, [sp, #16]
+; CHECK-BE-NEXT:    strh r2, [r0, #6]
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    bpl .LBB16_5
+; CHECK-BE-NEXT:  .LBB16_13: @ %cond.store7
+; CHECK-BE-NEXT:    vstr.16 s6, [sp, #12]
+; CHECK-BE-NEXT:    ldrh.w r2, [sp, #12]
+; CHECK-BE-NEXT:    strh r2, [r0, #8]
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    bpl .LBB16_6
+; CHECK-BE-NEXT:  .LBB16_14: @ %cond.store9
+; CHECK-BE-NEXT:    vmovx.f16 s0, s6
+; CHECK-BE-NEXT:    vstr.16 s0, [sp, #8]
+; CHECK-BE-NEXT:    ldrh.w r2, [sp, #8]
+; CHECK-BE-NEXT:    strh r2, [r0, #10]
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    bpl .LBB16_7
+; CHECK-BE-NEXT:  .LBB16_15: @ %cond.store11
+; CHECK-BE-NEXT:    vstr.16 s7, [sp, #4]
+; CHECK-BE-NEXT:    ldrh.w r2, [sp, #4]
+; CHECK-BE-NEXT:    strh r2, [r0, #12]
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    bpl .LBB16_8
+; CHECK-BE-NEXT:  .LBB16_16: @ %cond.store13
+; CHECK-BE-NEXT:    vmovx.f16 s0, s7
+; CHECK-BE-NEXT:    vstr.16 s0, [sp]
+; CHECK-BE-NEXT:    ldrh.w r1, [sp]
+; CHECK-BE-NEXT:    strh r1, [r0, #14]
+; CHECK-BE-NEXT:    add sp, #40
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp ugt <8 x i16> %b, zeroinitializer
+  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %a, <8 x half>* %dest, i32 1, <8 x i1> %c)
+  ret void
+}
+
+define i8* @masked_v8f16_pre(i8* %y, i8* %x, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8f16_pre:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vldr d1, [sp, #8]
+; CHECK-LE-NEXT:    adds r0, #4
+; CHECK-LE-NEXT:    vmov d0, r2, r3
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-LE-NEXT:    ldrb.w r1, [sp]
+; CHECK-LE-NEXT:    lsls r2, r1, #31
+; CHECK-LE-NEXT:    bne .LBB17_9
+; CHECK-LE-NEXT:  @ %bb.1: @ %else
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    bmi .LBB17_10
+; CHECK-LE-NEXT:  .LBB17_2: @ %else2
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    bmi .LBB17_11
+; CHECK-LE-NEXT:  .LBB17_3: @ %else4
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    bmi .LBB17_12
+; CHECK-LE-NEXT:  .LBB17_4: @ %else6
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    bmi .LBB17_13
+; CHECK-LE-NEXT:  .LBB17_5: @ %else8
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    bmi .LBB17_14
+; CHECK-LE-NEXT:  .LBB17_6: @ %else10
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    bmi .LBB17_15
+; CHECK-LE-NEXT:  .LBB17_7: @ %else12
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    bmi .LBB17_16
+; CHECK-LE-NEXT:  .LBB17_8: @ %else14
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+; CHECK-LE-NEXT:  .LBB17_9: @ %cond.store
+; CHECK-LE-NEXT:    vstr.16 s0, [r0]
+; CHECK-LE-NEXT:    lsls r2, r1, #30
+; CHECK-LE-NEXT:    bpl .LBB17_2
+; CHECK-LE-NEXT:  .LBB17_10: @ %cond.store1
+; CHECK-LE-NEXT:    vmovx.f16 s4, s0
+; CHECK-LE-NEXT:    vstr.16 s4, [r0, #2]
+; CHECK-LE-NEXT:    lsls r2, r1, #29
+; CHECK-LE-NEXT:    bpl .LBB17_3
+; CHECK-LE-NEXT:  .LBB17_11: @ %cond.store3
+; CHECK-LE-NEXT:    vstr.16 s1, [r0, #4]
+; CHECK-LE-NEXT:    lsls r2, r1, #28
+; CHECK-LE-NEXT:    bpl .LBB17_4
+; CHECK-LE-NEXT:  .LBB17_12: @ %cond.store5
+; CHECK-LE-NEXT:    vmovx.f16 s4, s1
+; CHECK-LE-NEXT:    vstr.16 s4, [r0, #6]
+; CHECK-LE-NEXT:    lsls r2, r1, #27
+; CHECK-LE-NEXT:    bpl .LBB17_5
+; CHECK-LE-NEXT:  .LBB17_13: @ %cond.store7
+; CHECK-LE-NEXT:    vstr.16 s2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r2, r1, #26
+; CHECK-LE-NEXT:    bpl .LBB17_6
+; CHECK-LE-NEXT:  .LBB17_14: @ %cond.store9
+; CHECK-LE-NEXT:    vmovx.f16 s4, s2
+; CHECK-LE-NEXT:    vstr.16 s4, [r0, #10]
+; CHECK-LE-NEXT:    lsls r2, r1, #25
+; CHECK-LE-NEXT:    bpl .LBB17_7
+; CHECK-LE-NEXT:  .LBB17_15: @ %cond.store11
+; CHECK-LE-NEXT:    vstr.16 s3, [r0, #12]
+; CHECK-LE-NEXT:    lsls r1, r1, #24
+; CHECK-LE-NEXT:    bpl .LBB17_8
+; CHECK-LE-NEXT:  .LBB17_16: @ %cond.store13
+; CHECK-LE-NEXT:    vmovx.f16 s0, s3
+; CHECK-LE-NEXT:    vstr.16 s0, [r0, #14]
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v8f16_pre:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vldr d1, [sp, #8]
+; CHECK-BE-NEXT:    adds r0, #4
+; CHECK-BE-NEXT:    vmov d0, r3, r2
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    vldrh.u16 q0, [r1]
+; CHECK-BE-NEXT:    ldrb.w r1, [sp]
+; CHECK-BE-NEXT:    lsls r2, r1, #31
+; CHECK-BE-NEXT:    bne .LBB17_9
+; CHECK-BE-NEXT:  @ %bb.1: @ %else
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    bmi .LBB17_10
+; CHECK-BE-NEXT:  .LBB17_2: @ %else2
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    bmi .LBB17_11
+; CHECK-BE-NEXT:  .LBB17_3: @ %else4
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    bmi .LBB17_12
+; CHECK-BE-NEXT:  .LBB17_4: @ %else6
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    bmi .LBB17_13
+; CHECK-BE-NEXT:  .LBB17_5: @ %else8
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    bmi .LBB17_14
+; CHECK-BE-NEXT:  .LBB17_6: @ %else10
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    bmi .LBB17_15
+; CHECK-BE-NEXT:  .LBB17_7: @ %else12
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    bmi .LBB17_16
+; CHECK-BE-NEXT:  .LBB17_8: @ %else14
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+; CHECK-BE-NEXT:  .LBB17_9: @ %cond.store
+; CHECK-BE-NEXT:    vstr.16 s0, [r0]
+; CHECK-BE-NEXT:    lsls r2, r1, #30
+; CHECK-BE-NEXT:    bpl .LBB17_2
+; CHECK-BE-NEXT:  .LBB17_10: @ %cond.store1
+; CHECK-BE-NEXT:    vmovx.f16 s4, s0
+; CHECK-BE-NEXT:    vstr.16 s4, [r0, #2]
+; CHECK-BE-NEXT:    lsls r2, r1, #29
+; CHECK-BE-NEXT:    bpl .LBB17_3
+; CHECK-BE-NEXT:  .LBB17_11: @ %cond.store3
+; CHECK-BE-NEXT:    vstr.16 s1, [r0, #4]
+; CHECK-BE-NEXT:    lsls r2, r1, #28
+; CHECK-BE-NEXT:    bpl .LBB17_4
+; CHECK-BE-NEXT:  .LBB17_12: @ %cond.store5
+; CHECK-BE-NEXT:    vmovx.f16 s4, s1
+; CHECK-BE-NEXT:    vstr.16 s4, [r0, #6]
+; CHECK-BE-NEXT:    lsls r2, r1, #27
+; CHECK-BE-NEXT:    bpl .LBB17_5
+; CHECK-BE-NEXT:  .LBB17_13: @ %cond.store7
+; CHECK-BE-NEXT:    vstr.16 s2, [r0, #8]
+; CHECK-BE-NEXT:    lsls r2, r1, #26
+; CHECK-BE-NEXT:    bpl .LBB17_6
+; CHECK-BE-NEXT:  .LBB17_14: @ %cond.store9
+; CHECK-BE-NEXT:    vmovx.f16 s4, s2
+; CHECK-BE-NEXT:    vstr.16 s4, [r0, #10]
+; CHECK-BE-NEXT:    lsls r2, r1, #25
+; CHECK-BE-NEXT:    bpl .LBB17_7
+; CHECK-BE-NEXT:  .LBB17_15: @ %cond.store11
+; CHECK-BE-NEXT:    vstr.16 s3, [r0, #12]
+; CHECK-BE-NEXT:    lsls r1, r1, #24
+; CHECK-BE-NEXT:    bpl .LBB17_8
+; CHECK-BE-NEXT:  .LBB17_16: @ %cond.store13
+; CHECK-BE-NEXT:    vmovx.f16 s0, s3
+; CHECK-BE-NEXT:    vstr.16 s0, [r0, #14]
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds i8, i8* %y, i32 4
+  %0 = bitcast i8* %x to <8 x half>*
+  %1 = load <8 x half>, <8 x half>* %0, align 4
+  %2 = bitcast i8* %z to <8 x half>*
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
+  ret i8* %z
+}
+
+define i8* @masked_v8f16_post(i8* %y, i8* %x, <8 x i16> %a) {
+; CHECK-LE-LABEL: masked_v8f16_post:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    vldr d1, [sp, #8]
+; CHECK-LE-NEXT:    vmov d0, r2, r3
+; CHECK-LE-NEXT:    mov r2, sp
+; CHECK-LE-NEXT:    vcmp.s16 gt, q0, zr
+; CHECK-LE-NEXT:    vstr p0, [r2]
+; CHECK-LE-NEXT:    ldrb.w r2, [sp]
+; CHECK-LE-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-LE-NEXT:    lsls r1, r2, #31
+; CHECK-LE-NEXT:    bne .LBB18_12
+; CHECK-LE-NEXT:  @ %bb.1: @ %else
+; CHECK-LE-NEXT:    lsls r1, r2, #30
+; CHECK-LE-NEXT:    bmi .LBB18_13
+; CHECK-LE-NEXT:  .LBB18_2: @ %else2
+; CHECK-LE-NEXT:    lsls r1, r2, #29
+; CHECK-LE-NEXT:    bmi .LBB18_14
+; CHECK-LE-NEXT:  .LBB18_3: @ %else4
+; CHECK-LE-NEXT:    lsls r1, r2, #28
+; CHECK-LE-NEXT:    bmi .LBB18_15
+; CHECK-LE-NEXT:  .LBB18_4: @ %else6
+; CHECK-LE-NEXT:    lsls r1, r2, #27
+; CHECK-LE-NEXT:    bmi .LBB18_16
+; CHECK-LE-NEXT:  .LBB18_5: @ %else8
+; CHECK-LE-NEXT:    lsls r1, r2, #26
+; CHECK-LE-NEXT:    bpl .LBB18_7
+; CHECK-LE-NEXT:  .LBB18_6: @ %cond.store9
+; CHECK-LE-NEXT:    vmovx.f16 s4, s2
+; CHECK-LE-NEXT:    vstr.16 s4, [r0, #10]
+; CHECK-LE-NEXT:  .LBB18_7: @ %else10
+; CHECK-LE-NEXT:    adds r1, r0, #4
+; CHECK-LE-NEXT:    lsls r3, r2, #25
+; CHECK-LE-NEXT:    bpl .LBB18_9
+; CHECK-LE-NEXT:  @ %bb.8: @ %cond.store11
+; CHECK-LE-NEXT:    vstr.16 s3, [r0, #12]
+; CHECK-LE-NEXT:  .LBB18_9: @ %else12
+; CHECK-LE-NEXT:    lsls r2, r2, #24
+; CHECK-LE-NEXT:    bpl .LBB18_11
+; CHECK-LE-NEXT:  @ %bb.10: @ %cond.store13
+; CHECK-LE-NEXT:    vmovx.f16 s0, s3
+; CHECK-LE-NEXT:    vstr.16 s0, [r0, #14]
+; CHECK-LE-NEXT:  .LBB18_11: @ %else14
+; CHECK-LE-NEXT:    mov r0, r1
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+; CHECK-LE-NEXT:  .LBB18_12: @ %cond.store
+; CHECK-LE-NEXT:    vstr.16 s0, [r0]
+; CHECK-LE-NEXT:    lsls r1, r2, #30
+; CHECK-LE-NEXT:    bpl .LBB18_2
+; CHECK-LE-NEXT:  .LBB18_13: @ %cond.store1
+; CHECK-LE-NEXT:    vmovx.f16 s4, s0
+; CHECK-LE-NEXT:    vstr.16 s4, [r0, #2]
+; CHECK-LE-NEXT:    lsls r1, r2, #29
+; CHECK-LE-NEXT:    bpl .LBB18_3
+; CHECK-LE-NEXT:  .LBB18_14: @ %cond.store3
+; CHECK-LE-NEXT:    vstr.16 s1, [r0, #4]
+; CHECK-LE-NEXT:    lsls r1, r2, #28
+; CHECK-LE-NEXT:    bpl .LBB18_4
+; CHECK-LE-NEXT:  .LBB18_15: @ %cond.store5
+; CHECK-LE-NEXT:    vmovx.f16 s4, s1
+; CHECK-LE-NEXT:    vstr.16 s4, [r0, #6]
+; CHECK-LE-NEXT:    lsls r1, r2, #27
+; CHECK-LE-NEXT:    bpl .LBB18_5
+; CHECK-LE-NEXT:  .LBB18_16: @ %cond.store7
+; CHECK-LE-NEXT:    vstr.16 s2, [r0, #8]
+; CHECK-LE-NEXT:    lsls r1, r2, #26
+; CHECK-LE-NEXT:    bmi .LBB18_6
+; CHECK-LE-NEXT:    b .LBB18_7
+;
+; CHECK-BE-LABEL: masked_v8f16_post:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    vldr d1, [sp, #8]
+; CHECK-BE-NEXT:    vmov d0, r3, r2
+; CHECK-BE-NEXT:    mov r2, sp
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    vcmp.s16 gt, q1, zr
+; CHECK-BE-NEXT:    vstr p0, [r2]
+; CHECK-BE-NEXT:    ldrb.w r2, [sp]
+; CHECK-BE-NEXT:    vldrh.u16 q0, [r1]
+; CHECK-BE-NEXT:    lsls r1, r2, #31
+; CHECK-BE-NEXT:    bne .LBB18_12
+; CHECK-BE-NEXT:  @ %bb.1: @ %else
+; CHECK-BE-NEXT:    lsls r1, r2, #30
+; CHECK-BE-NEXT:    bmi .LBB18_13
+; CHECK-BE-NEXT:  .LBB18_2: @ %else2
+; CHECK-BE-NEXT:    lsls r1, r2, #29
+; CHECK-BE-NEXT:    bmi .LBB18_14
+; CHECK-BE-NEXT:  .LBB18_3: @ %else4
+; CHECK-BE-NEXT:    lsls r1, r2, #28
+; CHECK-BE-NEXT:    bmi .LBB18_15
+; CHECK-BE-NEXT:  .LBB18_4: @ %else6
+; CHECK-BE-NEXT:    lsls r1, r2, #27
+; CHECK-BE-NEXT:    bmi .LBB18_16
+; CHECK-BE-NEXT:  .LBB18_5: @ %else8
+; CHECK-BE-NEXT:    lsls r1, r2, #26
+; CHECK-BE-NEXT:    bpl .LBB18_7
+; CHECK-BE-NEXT:  .LBB18_6: @ %cond.store9
+; CHECK-BE-NEXT:    vmovx.f16 s4, s2
+; CHECK-BE-NEXT:    vstr.16 s4, [r0, #10]
+; CHECK-BE-NEXT:  .LBB18_7: @ %else10
+; CHECK-BE-NEXT:    adds r1, r0, #4
+; CHECK-BE-NEXT:    lsls r3, r2, #25
+; CHECK-BE-NEXT:    bpl .LBB18_9
+; CHECK-BE-NEXT:  @ %bb.8: @ %cond.store11
+; CHECK-BE-NEXT:    vstr.16 s3, [r0, #12]
+; CHECK-BE-NEXT:  .LBB18_9: @ %else12
+; CHECK-BE-NEXT:    lsls r2, r2, #24
+; CHECK-BE-NEXT:    bpl .LBB18_11
+; CHECK-BE-NEXT:  @ %bb.10: @ %cond.store13
+; CHECK-BE-NEXT:    vmovx.f16 s0, s3
+; CHECK-BE-NEXT:    vstr.16 s0, [r0, #14]
+; CHECK-BE-NEXT:  .LBB18_11: @ %else14
+; CHECK-BE-NEXT:    mov r0, r1
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+; CHECK-BE-NEXT:  .LBB18_12: @ %cond.store
+; CHECK-BE-NEXT:    vstr.16 s0, [r0]
+; CHECK-BE-NEXT:    lsls r1, r2, #30
+; CHECK-BE-NEXT:    bpl .LBB18_2
+; CHECK-BE-NEXT:  .LBB18_13: @ %cond.store1
+; CHECK-BE-NEXT:    vmovx.f16 s4, s0
+; CHECK-BE-NEXT:    vstr.16 s4, [r0, #2]
+; CHECK-BE-NEXT:    lsls r1, r2, #29
+; CHECK-BE-NEXT:    bpl .LBB18_3
+; CHECK-BE-NEXT:  .LBB18_14: @ %cond.store3
+; CHECK-BE-NEXT:    vstr.16 s1, [r0, #4]
+; CHECK-BE-NEXT:    lsls r1, r2, #28
+; CHECK-BE-NEXT:    bpl .LBB18_4
+; CHECK-BE-NEXT:  .LBB18_15: @ %cond.store5
+; CHECK-BE-NEXT:    vmovx.f16 s4, s1
+; CHECK-BE-NEXT:    vstr.16 s4, [r0, #6]
+; CHECK-BE-NEXT:    lsls r1, r2, #27
+; CHECK-BE-NEXT:    bpl .LBB18_5
+; CHECK-BE-NEXT:  .LBB18_16: @ %cond.store7
+; CHECK-BE-NEXT:    vstr.16 s2, [r0, #8]
+; CHECK-BE-NEXT:    lsls r1, r2, #26
+; CHECK-BE-NEXT:    bmi .LBB18_6
+; CHECK-BE-NEXT:    b .LBB18_7
+entry:
+  %z = getelementptr inbounds i8, i8* %y, i32 4
+  %0 = bitcast i8* %x to <8 x half>*
+  %1 = load <8 x half>, <8 x half>* %0, align 4
+  %2 = bitcast i8* %y to <8 x half>*
+  %c = icmp sgt <8 x i16> %a, zeroinitializer
+  call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %1, <8 x half>* %2, i32 2, <8 x i1> %c)
+  ret i8* %z
+}
+
+
+define arm_aapcs_vfpcc void @masked_v2i64(<2 x i64> *%dest, <2 x i64> %a) {
+; CHECK-LE-LABEL: masked_v2i64:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    vmov r2, s0
+; CHECK-LE-NEXT:    movs r3, #0
+; CHECK-LE-NEXT:    vmov r1, s1
+; CHECK-LE-NEXT:    vmov r12, s3
+; CHECK-LE-NEXT:    rsbs r2, r2, #0
+; CHECK-LE-NEXT:    vmov r2, s2
+; CHECK-LE-NEXT:    sbcs.w r1, r3, r1
+; CHECK-LE-NEXT:    mov.w r1, #0
+; CHECK-LE-NEXT:    it lt
+; CHECK-LE-NEXT:    movlt r1, #1
+; CHECK-LE-NEXT:    rsbs r2, r2, #0
+; CHECK-LE-NEXT:    sbcs.w r2, r3, r12
+; CHECK-LE-NEXT:    it lt
+; CHECK-LE-NEXT:    movlt r3, #1
+; CHECK-LE-NEXT:    cmp r3, #0
+; CHECK-LE-NEXT:    it ne
+; CHECK-LE-NEXT:    mvnne r3, #1
+; CHECK-LE-NEXT:    bfi r3, r1, #0, #1
+; CHECK-LE-NEXT:    and r1, r3, #3
+; CHECK-LE-NEXT:    lsls r2, r3, #31
+; CHECK-LE-NEXT:    ittt ne
+; CHECK-LE-NEXT:    vmovne r2, s1
+; CHECK-LE-NEXT:    vmovne r3, s0
+; CHECK-LE-NEXT:    strdne r3, r2, [r0]
+; CHECK-LE-NEXT:    lsls r1, r1, #30
+; CHECK-LE-NEXT:    ittt mi
+; CHECK-LE-NEXT:    vmovmi r1, s3
+; CHECK-LE-NEXT:    vmovmi r2, s2
+; CHECK-LE-NEXT:    strdmi r2, r1, [r0, #8]
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v2i64:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    movs r3, #0
+; CHECK-BE-NEXT:    vmov r2, s7
+; CHECK-BE-NEXT:    vmov r1, s6
+; CHECK-BE-NEXT:    vmov r12, s4
+; CHECK-BE-NEXT:    rsbs r2, r2, #0
+; CHECK-BE-NEXT:    vmov r2, s5
+; CHECK-BE-NEXT:    sbcs.w r1, r3, r1
+; CHECK-BE-NEXT:    mov.w r1, #0
+; CHECK-BE-NEXT:    it lt
+; CHECK-BE-NEXT:    movlt r1, #1
+; CHECK-BE-NEXT:    rsbs r2, r2, #0
+; CHECK-BE-NEXT:    sbcs.w r2, r3, r12
+; CHECK-BE-NEXT:    it lt
+; CHECK-BE-NEXT:    movlt r3, #1
+; CHECK-BE-NEXT:    cmp r3, #0
+; CHECK-BE-NEXT:    it ne
+; CHECK-BE-NEXT:    mvnne r3, #1
+; CHECK-BE-NEXT:    bfi r3, r1, #0, #1
+; CHECK-BE-NEXT:    and r1, r3, #3
+; CHECK-BE-NEXT:    lsls r2, r3, #31
+; CHECK-BE-NEXT:    bne .LBB19_3
+; CHECK-BE-NEXT:  @ %bb.1: @ %else
+; CHECK-BE-NEXT:    lsls r1, r1, #30
+; CHECK-BE-NEXT:    bmi .LBB19_4
+; CHECK-BE-NEXT:  .LBB19_2: @ %else2
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+; CHECK-BE-NEXT:  .LBB19_3: @ %cond.store
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    vmov r2, s5
+; CHECK-BE-NEXT:    vmov r3, s4
+; CHECK-BE-NEXT:    strd r3, r2, [r0]
+; CHECK-BE-NEXT:    lsls r1, r1, #30
+; CHECK-BE-NEXT:    bpl .LBB19_2
+; CHECK-BE-NEXT:  .LBB19_4: @ %cond.store1
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    vmov r1, s7
+; CHECK-BE-NEXT:    vmov r2, s6
+; CHECK-BE-NEXT:    strd r2, r1, [r0, #8]
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <2 x i64> %a, zeroinitializer
+  call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> %a, <2 x i64>* %dest, i32 8, <2 x i1> %c)
+  ret void
+}
+
+define arm_aapcs_vfpcc void @masked_v2f64(<2 x double> *%dest, <2 x double> %a, <2 x i64> %b) {
+; CHECK-LE-LABEL: masked_v2f64:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    .pad #4
+; CHECK-LE-NEXT:    sub sp, #4
+; CHECK-LE-NEXT:    vmov r2, s4
+; CHECK-LE-NEXT:    movs r3, #0
+; CHECK-LE-NEXT:    vmov r1, s5
+; CHECK-LE-NEXT:    vmov r12, s7
+; CHECK-LE-NEXT:    rsbs r2, r2, #0
+; CHECK-LE-NEXT:    vmov r2, s6
+; CHECK-LE-NEXT:    sbcs.w r1, r3, r1
+; CHECK-LE-NEXT:    mov.w r1, #0
+; CHECK-LE-NEXT:    it lt
+; CHECK-LE-NEXT:    movlt r1, #1
+; CHECK-LE-NEXT:    rsbs r2, r2, #0
+; CHECK-LE-NEXT:    sbcs.w r2, r3, r12
+; CHECK-LE-NEXT:    it lt
+; CHECK-LE-NEXT:    movlt r3, #1
+; CHECK-LE-NEXT:    cmp r3, #0
+; CHECK-LE-NEXT:    it ne
+; CHECK-LE-NEXT:    mvnne r3, #1
+; CHECK-LE-NEXT:    bfi r3, r1, #0, #1
+; CHECK-LE-NEXT:    and r1, r3, #3
+; CHECK-LE-NEXT:    lsls r2, r3, #31
+; CHECK-LE-NEXT:    it ne
+; CHECK-LE-NEXT:    vstrne d0, [r0]
+; CHECK-LE-NEXT:    lsls r1, r1, #30
+; CHECK-LE-NEXT:    it mi
+; CHECK-LE-NEXT:    vstrmi d1, [r0, #8]
+; CHECK-LE-NEXT:    add sp, #4
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: masked_v2f64:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    .pad #4
+; CHECK-BE-NEXT:    sub sp, #4
+; CHECK-BE-NEXT:    vrev64.32 q2, q1
+; CHECK-BE-NEXT:    movs r3, #0
+; CHECK-BE-NEXT:    vmov r2, s11
+; CHECK-BE-NEXT:    vmov r1, s10
+; CHECK-BE-NEXT:    vmov r12, s8
+; CHECK-BE-NEXT:    rsbs r2, r2, #0
+; CHECK-BE-NEXT:    vmov r2, s9
+; CHECK-BE-NEXT:    sbcs.w r1, r3, r1
+; CHECK-BE-NEXT:    mov.w r1, #0
+; CHECK-BE-NEXT:    it lt
+; CHECK-BE-NEXT:    movlt r1, #1
+; CHECK-BE-NEXT:    rsbs r2, r2, #0
+; CHECK-BE-NEXT:    sbcs.w r2, r3, r12
+; CHECK-BE-NEXT:    it lt
+; CHECK-BE-NEXT:    movlt r3, #1
+; CHECK-BE-NEXT:    cmp r3, #0
+; CHECK-BE-NEXT:    it ne
+; CHECK-BE-NEXT:    mvnne r3, #1
+; CHECK-BE-NEXT:    bfi r3, r1, #0, #1
+; CHECK-BE-NEXT:    and r1, r3, #3
+; CHECK-BE-NEXT:    lsls r2, r3, #31
+; CHECK-BE-NEXT:    it ne
+; CHECK-BE-NEXT:    vstrne d0, [r0]
+; CHECK-BE-NEXT:    lsls r1, r1, #30
+; CHECK-BE-NEXT:    it mi
+; CHECK-BE-NEXT:    vstrmi d1, [r0, #8]
+; CHECK-BE-NEXT:    add sp, #4
+; CHECK-BE-NEXT:    bx lr
+entry:
+  %c = icmp sgt <2 x i64> %b, zeroinitializer
+  call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %a, <2 x double>* %dest, i32 8, <2 x i1> %c)
+  ret void
+}
+
+
+declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
+declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
+declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
+declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)
+declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32, <8 x i1>)
+declare void @llvm.masked.store.v2i64.p0v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>)
+declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double>, <2 x double>*, i32, <2 x i1>)

Modified: llvm/trunk/test/CodeGen/Thumb2/mve-pred-not.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-pred-not.ll?rev=370325&r1=370324&r2=370325&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-pred-not.ll (original)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-pred-not.ll Thu Aug 29 03:32:12 2019
@@ -384,4 +384,24 @@ entry:
   ret <2 x i64> %s
 }
 
-
+define arm_aapcs_vfpcc <4 x i32> @vpnot_v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK-LABEL: vpnot_v4i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.s32 lt, q0, zr
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcmpt.s32 gt, q1, zr
+; CHECK-NEXT:    vpnot
+; CHECK-NEXT:    vpst
+; CHECK-NEXT:    vcmpt.i32 eq, q2, zr
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp slt <4 x i32> %a, zeroinitializer
+  %c2 = icmp sgt <4 x i32> %b, zeroinitializer
+  %c3 = icmp eq <4 x i32> %c, zeroinitializer
+  %o1 = and <4 x i1> %c1, %c2
+  %o2 = xor <4 x i1> %o1, <i1 -1, i1 -1, i1 -1, i1 -1>
+  %o = and <4 x i1> %c3, %o2
+  %s = select <4 x i1> %o, <4 x i32> %a, <4 x i32> %b
+  ret <4 x i32> %s
+}

Modified: llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block.mir?rev=370325&r1=370324&r2=370325&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block.mir (original)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block.mir Thu Aug 29 03:32:12 2019
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s
 
 --- |
@@ -61,9 +62,14 @@ body:             |
   bb.0.entry:
     liveins: $q0, $q1, $q2, $r0
 
-    ; CHECK:       MVE_VPST 8, implicit-def $p0
-    ; CHECK-NEXT:  $q0 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, killed renamable $q2, 1, killed renamable $vpr, killed renamable $q0
-
+    ; CHECK-LABEL: name: test_vminnmq_m_f32_v2
+    ; CHECK: liveins: $q0, $q1, $q2, $r0
+    ; CHECK: $vpr = VMSR_P0 killed $r0, 14, $noreg
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit killed $q1, implicit killed $q2, implicit killed $vpr, implicit killed $q0 {
+    ; CHECK:   MVE_VPST 8, implicit-def $p0
+    ; CHECK:   renamable $q0 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, killed renamable $q2, 1, killed renamable $vpr, killed renamable $q0
+    ; CHECK:   tBX_RET 14, $noreg, implicit internal $q0
+    ; CHECK: }
     $vpr = VMSR_P0 killed $r0, 14, $noreg
     renamable $q0 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, killed renamable $q2, 1, killed renamable $vpr, killed renamable $q0
     tBX_RET 14, $noreg, implicit $q0

Modified: llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block2.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block2.mir?rev=370325&r1=370324&r2=370325&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block2.mir (original)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block2.mir Thu Aug 29 03:32:12 2019
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s
 
 --- |
@@ -64,10 +65,16 @@ body:             |
   bb.0.entry:
     liveins: $q0, $q1, $q2, $q3, $r0
 
-    ; CHECK:       MVE_VPST 4, implicit-def $p0
-    ; CHECK-NEXT:  renamable $q0 = nnan ninf nsz MVE_VMINNMf32
-    ; CHECK-NEXT:  renamable $q1 = nnan ninf nsz MVE_VMINNMf32
-
+    ; CHECK-LABEL: name: test_vminnmq_m_f32_v2
+    ; CHECK: liveins: $q0, $q1, $q2, $q3, $r0
+    ; CHECK: $vpr = VMSR_P0 killed $r0, 14, $noreg
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit-def dead $q1, implicit-def $d2, implicit-def $s4, implicit-def $s5, implicit-def $d3, implicit-def $s6, implicit-def $s7, implicit killed $q2, implicit killed $q3, implicit killed $vpr, implicit killed $q0, implicit killed $q1 {
+    ; CHECK:   MVE_VPST 4, implicit-def $p0
+    ; CHECK:   renamable $q0 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q2, renamable $q3, 1, renamable $vpr, killed renamable $q0
+    ; CHECK:   renamable $q1 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q0, killed renamable $q3, 1, killed renamable $vpr, killed renamable $q1
+    ; CHECK:   $q0 = MVE_VORR internal killed $q1, internal killed $q1, 0, $noreg, internal undef $q0
+    ; CHECK: }
+    ; CHECK: tBX_RET 14, $noreg, implicit $q0
     $vpr = VMSR_P0 killed $r0, 14, $noreg
     renamable $q0 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q2, renamable $q3, 1, renamable $vpr, killed renamable $q0
     renamable $q1 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q0, killed renamable $q3, 1, killed renamable $vpr, killed renamable $q1

Modified: llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block3.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block3.mir?rev=370325&r1=370324&r2=370325&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block3.mir (original)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block3.mir Thu Aug 29 03:32:12 2019
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s
 
 --- |
@@ -65,12 +66,18 @@ body:             |
   bb.0.entry:
     liveins: $q0, $q1, $q2, $q3, $r0
 
-    ; CHECK:       MVE_VPST 1, implicit-def $p0
-    ; CHECK-NEXT:  renamable $q2 = nnan ninf nsz MVE_VMINNMf32
-    ; CHECK-NEXT:  renamable $q2 = nnan ninf nsz MVE_VMINNMf32
-    ; CHECK-NEXT:  renamable $q0 = nnan ninf nsz MVE_VMINNMf32
-    ; CHECK-NEXT:  renamable $q1 = nnan ninf nsz MVE_VMINNMf32
-
+    ; CHECK-LABEL: name: test_vminnmq_m_f32_v2
+    ; CHECK: liveins: $q0, $q1, $q2, $q3, $r0
+    ; CHECK: $vpr = VMSR_P0 killed $r0, 14, $noreg
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def dead $q2, implicit-def $d4, implicit-def $s8, implicit-def $s9, implicit-def $d5, implicit-def $s10, implicit-def $s11, implicit-def dead $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit-def $q1, implicit-def $d2, implicit-def $s4, implicit-def $s5, implicit-def $d3, implicit-def $s6, implicit-def $s7, implicit killed $q2, implicit killed $q3, implicit killed $vpr, implicit killed $q0, implicit killed $q1 {
+    ; CHECK:   MVE_VPST 1, implicit-def $p0
+    ; CHECK:   renamable $q2 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q2, renamable $q3, 1, renamable $vpr, undef renamable $q2
+    ; CHECK:   renamable $q2 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q2, internal renamable $q2, 1, renamable $vpr, internal undef renamable $q2
+    ; CHECK:   renamable $q0 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q2, renamable $q3, 1, renamable $vpr, killed renamable $q0
+    ; CHECK:   renamable $q1 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q0, killed renamable $q3, 1, killed renamable $vpr, killed renamable $q1
+    ; CHECK: }
+    ; CHECK: $q0 = MVE_VORR killed $q1, killed $q1, 0, $noreg, undef $q0
+    ; CHECK: tBX_RET 14, $noreg, implicit $q0
     $vpr = VMSR_P0 killed $r0, 14, $noreg
     renamable $q2 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q2, renamable $q3, 1, renamable $vpr, undef renamable $q2
     renamable $q2 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q2, renamable $q2, 1, renamable $vpr, undef renamable $q2

Modified: llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block4.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block4.mir?rev=370325&r1=370324&r2=370325&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block4.mir (original)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block4.mir Thu Aug 29 03:32:12 2019
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s
 
 --- |
@@ -66,18 +67,22 @@ body:             |
   bb.0.entry:
     liveins: $q0, $q1, $q2, $q3, $r0
 
-    ; CHECK:       MVE_VPST 1, implicit-def $p0
-    ; CHECK-NEXT:  renamable $q2 = nnan ninf nsz MVE_VMINNMf32
-    ; CHECK-NEXT:  renamable $q2 = nnan ninf nsz MVE_VMINNMf32
-    ; CHECK-NEXT:  renamable $q0 = nnan ninf nsz MVE_VMINNMf32
-    ; CHECK-NEXT:  renamable $q0 = nnan ninf nsz MVE_VMINNMf32
-    ; CHECK-NEXT:  }
-    ; CHECK-NEXT:  BUNDLE {{.*}} {
-    ; CHECK-NEXT:    MVE_VPST 8, implicit-def $p0
-    ; CHECK-NEXT:    renamable $q1 = nnan ninf nsz MVE_VMINNMf32
-    ; CHECK-NEXT:    $q0 = MVE_VORR
-    ; CHECK-NEXT:  }
-
+    ; CHECK-LABEL: name: test_vminnmq_m_f32_v2
+    ; CHECK: liveins: $q0, $q1, $q2, $q3, $r0
+    ; CHECK: $vpr = VMSR_P0 killed $r0, 14, $noreg
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def dead $q2, implicit-def $d4, implicit-def $s8, implicit-def $s9, implicit-def $d5, implicit-def $s10, implicit-def $s11, implicit-def $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit killed $q2, implicit $q3, implicit $vpr, implicit killed $q0 {
+    ; CHECK:   MVE_VPST 1, implicit-def $p0
+    ; CHECK:   renamable $q2 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q2, renamable $q3, 1, renamable $vpr, undef renamable $q2
+    ; CHECK:   renamable $q2 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q2, internal renamable $q2, 1, renamable $vpr, internal undef renamable $q2
+    ; CHECK:   renamable $q0 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q2, renamable $q3, 1, renamable $vpr, killed renamable $q0
+    ; CHECK:   renamable $q0 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q0, renamable $q3, 1, renamable $vpr, internal undef renamable $q0
+    ; CHECK: }
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def dead $q1, implicit-def $d2, implicit-def $s4, implicit-def $s5, implicit-def $d3, implicit-def $s6, implicit-def $s7, implicit-def $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit killed $q0, implicit killed $q3, implicit killed $vpr, implicit killed $q1 {
+    ; CHECK:   MVE_VPST 8, implicit-def $p0
+    ; CHECK:   renamable $q1 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q0, killed renamable $q3, 1, killed renamable $vpr, killed renamable $q1
+    ; CHECK:   $q0 = MVE_VORR internal killed $q1, internal killed $q1, 0, $noreg, undef $q0
+    ; CHECK: }
+    ; CHECK: tBX_RET 14, $noreg, implicit $q0
     $vpr = VMSR_P0 killed $r0, 14, $noreg
     renamable $q2 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q2, renamable $q3, 1, renamable $vpr, undef renamable $q2
     renamable $q2 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q2, renamable $q2, 1, renamable $vpr, undef renamable $q2

Modified: llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block5.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block5.mir?rev=370325&r1=370324&r2=370325&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block5.mir (original)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block5.mir Thu Aug 29 03:32:12 2019
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s
 
 --- |
@@ -63,19 +64,22 @@ body:             |
   bb.0.entry:
     liveins: $q0, $q1, $q2, $r0
 
-    ; CHECK:       BUNDLE {{.*}} {
-    ; CHECK-NEXT:    MVE_VPST 4, implicit-def $p0
-    ; CHECK-NEXT:    renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, killed renamable $q3
-    ; CHECK-NEXT:    renamable $q1 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q3, internal renamable $q3, 1, renamable $vpr, undef renamable $q1
-    ; CHECK-NEXT:    $q3 = MVE_VORR $q0, $q0, 0, $noreg, internal undef $q3
-    ; CHECK-NEXT:  }
-    ; CHECK-NEXT:  BUNDLE {{.*}} {
-    ; CHECK-NEXT:    MVE_VPST 4, implicit-def $p0
-    ; CHECK-NEXT:    renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, killed renamable $q3
-    ; CHECK-NEXT:    renamable $q0 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q3, killed renamable $q2, 1, killed renamable $vpr, killed renamable $q0
-    ; CHECK-NEXT:    tBX_RET 14, $noreg, implicit internal $q0
-    ; CHECK-NEXT:  }
-
+    ; CHECK-LABEL: name: test_vminnmq_m_f32_v2
+    ; CHECK: liveins: $q0, $q1, $q2, $r0
+    ; CHECK: $vpr = VMSR_P0 killed $r0, 14, $noreg
+    ; CHECK: $q3 = MVE_VORR $q0, $q0, 0, $noreg, undef $q3
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def $q3, implicit-def $d6, implicit-def $s12, implicit-def $s13, implicit-def $d7, implicit-def $s14, implicit-def $s15, implicit-def $q1, implicit-def $d2, implicit-def $s4, implicit-def $s5, implicit-def $d3, implicit-def $s6, implicit-def $s7, implicit killed $q1, implicit $q2, implicit $vpr, implicit killed $q3, implicit $q0 {
+    ; CHECK:   MVE_VPST 4, implicit-def $p0
+    ; CHECK:   renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, killed renamable $q3
+    ; CHECK:   renamable $q1 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q3, internal renamable $q3, 1, renamable $vpr, undef renamable $q1
+    ; CHECK:   $q3 = MVE_VORR $q0, $q0, 0, $noreg, internal undef $q3
+    ; CHECK: }
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def dead $q3, implicit-def $d6, implicit-def $s12, implicit-def $s13, implicit-def $d7, implicit-def $s14, implicit-def $s15, implicit-def $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit killed $q1, implicit killed $q2, implicit killed $vpr, implicit killed $q3, implicit killed $q0 {
+    ; CHECK:   MVE_VPST 4, implicit-def $p0
+    ; CHECK:   renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, killed renamable $q3
+    ; CHECK:   renamable $q0 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q3, killed renamable $q2, 1, killed renamable $vpr, killed renamable $q0
+    ; CHECK:   tBX_RET 14, $noreg, implicit internal $q0
+    ; CHECK: }
     $vpr = VMSR_P0 killed $r0, 14, $noreg
     $q3 = MVE_VORR $q0, $q0, 0, $noreg, undef $q3
     renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, killed renamable $q3

Modified: llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block6.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block6.mir?rev=370325&r1=370324&r2=370325&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block6.mir (original)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block6.mir Thu Aug 29 03:32:12 2019
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s
 
 --- |
@@ -63,17 +64,20 @@ body:             |
   bb.0.entry:
     liveins: $q0, $q1, $q2, $r0, $r1
 
-    ; CHECK:       BUNDLE {{.*}} {
-    ; CHECK-NEXT:    MVE_VPST 8, implicit-def $p0
-    ; CHECK-NEXT:    renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, killed renamable $vpr, killed renamable $q3
-    ; CHECK-NEXT:    $vpr = VMSR_P0 killed $r1, 14, $noreg
-    ; CHECK-NEXT:  }
-    ; CHECK-NEXT:  BUNDLE {{.*}} {
-    ; CHECK-NEXT:    MVE_VPST 8, implicit-def $p0
-    ; CHECK-NEXT:    renamable $q0 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q3, killed renamable $q2, 1, killed renamable $vpr, killed renamable $q0
-    ; CHECK-NEXT:    tBX_RET 14, $noreg, implicit internal $q0
-    ; CHECK-NEXT:  }
-
+    ; CHECK-LABEL: name: test_vminnmq_m_f32_v2
+    ; CHECK: liveins: $q0, $q1, $q2, $r0, $r1
+    ; CHECK: $vpr = VMSR_P0 killed $r0, 14, $noreg
+    ; CHECK: $q3 = MVE_VORR $q0, $q0, 0, $noreg, undef $q3
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def $q3, implicit-def $d6, implicit-def $s12, implicit-def $s13, implicit-def $d7, implicit-def $s14, implicit-def $s15, implicit-def $vpr, implicit killed $q1, implicit $q2, implicit killed $vpr, implicit killed $q3, implicit killed $r1 {
+    ; CHECK:   MVE_VPST 8, implicit-def $p0
+    ; CHECK:   renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, killed renamable $vpr, killed renamable $q3
+    ; CHECK:   $vpr = VMSR_P0 killed $r1, 14, $noreg
+    ; CHECK: }
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit killed $q3, implicit killed $q2, implicit killed $vpr, implicit killed $q0 {
+    ; CHECK:   MVE_VPST 8, implicit-def $p0
+    ; CHECK:   renamable $q0 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q3, killed renamable $q2, 1, killed renamable $vpr, killed renamable $q0
+    ; CHECK:   tBX_RET 14, $noreg, implicit internal $q0
+    ; CHECK: }
     $vpr = VMSR_P0 killed $r0, 14, $noreg
     $q3 = MVE_VORR $q0, $q0, 0, $noreg, undef $q3
     renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, killed renamable $vpr, killed renamable $q3

Added: llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block7.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block7.mir?rev=370325&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block7.mir (added)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block7.mir Thu Aug 29 03:32:12 2019
@@ -0,0 +1,102 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s
+
+--- |
+  target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
+  target triple = "thumbv8.1m.main-arm-none-eabi"
+
+  define hidden arm_aapcs_vfpcc <4 x float> @test_vminnmq_m_f32_v2(<4 x float> %inactive1, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 {
+  entry:
+    %conv.i = zext i16 %p to i32
+    %0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %a, <4 x float> %b, i32 %conv.i) #2
+    %1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> undef, <4 x float> %0, <4 x float> %0, i32 %conv.i) #2
+    br label %bb2
+  bb2:
+    %2 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %1, <4 x float> %b, i32 %conv.i) #2
+    %3 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %2, <4 x float> %b, i32 %conv.i) #2
+    ret <4 x float> %3
+  }
+
+  declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1
+
+  attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "no-frame-pointer-elim"="false" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+  attributes #1 = { nounwind readnone }
+  attributes #2 = { nounwind }
+
+...
+---
+name:            test_vminnmq_m_f32_v2
+alignment:       2
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+registers:       []
+liveins:
+  - { reg: '$q0', virtual-reg: '' }
+  - { reg: '$q1', virtual-reg: '' }
+  - { reg: '$q2', virtual-reg: '' }
+  - { reg: '$r0', virtual-reg: '' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       0
+  offsetAdjustment: 0
+  maxAlignment:    0
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 0
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:           []
+constants:       []
+body:             |
+  ; CHECK-LABEL: name: test_vminnmq_m_f32_v2
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   liveins: $q0, $q1, $q2, $r0
+  ; CHECK:   $vpr = VMSR_P0 killed $r0, 14, $noreg
+  ; CHECK:   $q3 = MVE_VORR $q0, $q0, 0, $noreg, undef $q3
+  ; CHECK:   BUNDLE implicit-def $p0, implicit-def dead $q3, implicit-def $d6, implicit-def $s12, implicit-def $s13, implicit-def $d7, implicit-def $s14, implicit-def $s15, implicit-def $q1, implicit-def $d2, implicit-def $s4, implicit-def $s5, implicit-def $d3, implicit-def $s6, implicit-def $s7, implicit-def $vpr, implicit killed $q1, implicit $q2, implicit killed $vpr, implicit killed $q3 {
+  ; CHECK:     MVE_VPST 4, implicit-def $p0
+  ; CHECK:     renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, killed renamable $q3
+  ; CHECK:     renamable $q1 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q3, internal renamable $q3, 1, renamable $vpr, undef renamable $q1
+  ; CHECK:     renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+  ; CHECK:   }
+  ; CHECK: bb.1.bb2:
+  ; CHECK:   liveins: $q0, $q1, $q2, $q3, $vpr
+  ; CHECK:   BUNDLE implicit-def $p0, implicit-def dead $q3, implicit-def $d6, implicit-def $s12, implicit-def $s13, implicit-def $d7, implicit-def $s14, implicit-def $s15, implicit-def $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit killed $q1, implicit killed $q2, implicit killed $vpr, implicit killed $q3, implicit killed $q0 {
+  ; CHECK:     MVE_VPST 4, implicit-def $p0
+  ; CHECK:     renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, killed renamable $q3
+  ; CHECK:     renamable $q0 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q3, killed renamable $q2, 1, killed renamable $vpr, killed renamable $q0
+  ; CHECK:     tBX_RET 14, $noreg, implicit internal $q0
+  ; CHECK:   }
+  bb.0.entry:
+    liveins: $q0, $q1, $q2, $r0
+
+    $vpr = VMSR_P0 killed $r0, 14, $noreg
+    $q3 = MVE_VORR $q0, $q0, 0, $noreg, undef $q3
+    renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, killed renamable $q3
+    renamable $q1 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q3, renamable $q3, 1, renamable $vpr, undef renamable $q1
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+
+  bb.1.bb2:
+    liveins: $q0, $q1, $q2, $q3, $vpr
+
+    renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, killed renamable $q3
+    renamable $q0 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q3, killed renamable $q2, 1, killed renamable $vpr, killed renamable $q0
+    tBX_RET 14, $noreg, implicit $q0
+
+...

Added: llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block8.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block8.mir?rev=370325&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block8.mir (added)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-vpt-block8.mir Thu Aug 29 03:32:12 2019
@@ -0,0 +1,97 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -run-pass arm-mve-vpt %s -o - | FileCheck %s
+
+--- |
+  target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
+  target triple = "thumbv8.1m.main-arm-none-eabi"
+
+  define hidden arm_aapcs_vfpcc <4 x float> @test_vminnmq_m_f32_v2(<4 x float> %inactive1, <4 x float> %a, <4 x float> %b, i16 zeroext %p) local_unnamed_addr #0 {
+  entry:
+    %conv.i = zext i16 %p to i32
+    %0 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %a, <4 x float> %b, i32 %conv.i) #2
+    %1 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> undef, <4 x float> %0, <4 x float> %0, i32 %conv.i) #2
+    %2 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %1, <4 x float> %b, i32 %conv.i) #2
+    %3 = tail call nnan ninf nsz <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float> %inactive1, <4 x float> %2, <4 x float> %b, i32 %conv.i) #2
+    ret <4 x float> %3
+  }
+
+  declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1
+
+  attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "no-frame-pointer-elim"="false" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+  attributes #1 = { nounwind readnone }
+  attributes #2 = { nounwind }
+
+...
+---
+name:            test_vminnmq_m_f32_v2
+alignment:       2
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+registers:       []
+liveins:
+  - { reg: '$q0', virtual-reg: '' }
+  - { reg: '$q1', virtual-reg: '' }
+  - { reg: '$q2', virtual-reg: '' }
+  - { reg: '$r0', virtual-reg: '' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       0
+  offsetAdjustment: 0
+  maxAlignment:    0
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 0
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:           []
+constants:       []
+body:             |
+  bb.0.entry:
+    liveins: $q0, $q1, $q2, $r0
+
+    ; CHECK-LABEL: name: test_vminnmq_m_f32_v2
+    ; CHECK: liveins: $q0, $q1, $q2, $r0
+    ; CHECK: $vpr = VMSR_P0 killed $r0, 14, $noreg
+    ; CHECK: $q3 = MVE_VORR $q0, $q0, 0, $noreg, undef $q3
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def $q3, implicit-def $d6, implicit-def $s12, implicit-def $s13, implicit-def $d7, implicit-def $s14, implicit-def $s15, implicit-def $vpr, implicit killed $q1, implicit $q2, implicit killed $vpr, implicit killed $q3 {
+    ; CHECK:   MVE_VPST 8, implicit-def $p0
+    ; CHECK:   renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, killed renamable $q3
+    ; CHECK:   renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    ; CHECK: }
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def $q1, implicit-def $d2, implicit-def $s4, implicit-def $s5, implicit-def $d3, implicit-def $s6, implicit-def $s7, implicit-def $q3, implicit-def $d6, implicit-def $s12, implicit-def $s13, implicit-def $d7, implicit-def $s14, implicit-def $s15, implicit killed $q3, implicit $vpr, implicit undef $q1, implicit $q0 {
+    ; CHECK:   MVE_VPST 8, implicit-def $p0
+    ; CHECK:   renamable $q1 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q3, renamable $q3, 1, renamable $vpr, undef renamable $q1
+    ; CHECK:   $q3 = MVE_VORR $q0, $q0, 0, $noreg, undef $q3
+    ; CHECK: }
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def dead $q3, implicit-def $d6, implicit-def $s12, implicit-def $s13, implicit-def $d7, implicit-def $s14, implicit-def $s15, implicit-def $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit killed $q1, implicit killed $q2, implicit killed $vpr, implicit killed $q3, implicit killed $q0 {
+    ; CHECK:   MVE_VPST 4, implicit-def $p0
+    ; CHECK:   renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, killed renamable $q3
+    ; CHECK:   renamable $q0 = nnan ninf nsz MVE_VMINNMf32 internal killed renamable $q3, killed renamable $q2, 1, killed renamable $vpr, killed renamable $q0
+    ; CHECK:   tBX_RET 14, $noreg, implicit internal $q0
+    ; CHECK: }
+    $vpr = VMSR_P0 killed $r0, 14, $noreg
+    $q3 = MVE_VORR $q0, $q0, 0, $noreg, undef $q3
+    renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, killed renamable $q3
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    renamable $q1 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q3, renamable $q3, 1, renamable $vpr, undef renamable $q1
+    $q3 = MVE_VORR $q0, $q0, 0, $noreg, undef $q3
+    renamable $q3 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q1, renamable $q2, 1, renamable $vpr, killed renamable $q3
+    renamable $q0 = nnan ninf nsz MVE_VMINNMf32 killed renamable $q3, killed renamable $q2, 1, killed renamable $vpr, killed renamable $q0
+    tBX_RET 14, $noreg, implicit $q0
+
+...

Added: llvm/trunk/test/CodeGen/Thumb2/mve-vpt-nots.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-vpt-nots.mir?rev=370325&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-vpt-nots.mir (added)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-vpt-nots.mir Thu Aug 29 03:32:12 2019
@@ -0,0 +1,272 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1-m.main-none-eabi -mattr=+mve -run-pass arm-mve-vpt %s -o - | FileCheck %s
+
+--- |
+
+  define arm_aapcs_vfpcc <4 x i32> @vpnot(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+  entry:
+    %0 = icmp slt <4 x i32> %a, zeroinitializer
+    %c2 = icmp sgt <4 x i32> %b, zeroinitializer
+    %c3 = icmp eq <4 x i32> %c, zeroinitializer
+    %o1 = and <4 x i1> %0, %c2
+    %o2 = xor <4 x i1> %o1, <i1 true, i1 true, i1 true, i1 true>
+    %o = and <4 x i1> %c3, %o2
+    %s = select <4 x i1> %o, <4 x i32> %a, <4 x i32> %b
+    ret <4 x i32> %s
+  }
+
+  define arm_aapcs_vfpcc <4 x i32> @vpnot_end(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+  entry:
+    %0 = icmp slt <4 x i32> %a, zeroinitializer
+    %c2 = icmp sgt <4 x i32> %b, zeroinitializer
+    %c3 = icmp eq <4 x i32> %c, zeroinitializer
+    %o1 = and <4 x i1> %0, %c2
+    %o2 = xor <4 x i1> %o1, <i1 true, i1 true, i1 true, i1 true>
+    br label %bb2
+  bb2:
+    %o = and <4 x i1> %c3, %o2
+    %s = select <4 x i1> %o, <4 x i32> %a, <4 x i32> %b
+    ret <4 x i32> %s
+  }
+
+  define arm_aapcs_vfpcc <4 x i32> @vpnot_two(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+  entry:
+    unreachable
+  }
+  define arm_aapcs_vfpcc <4 x i32> @vpnot_lots(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+  entry:
+    unreachable
+  }
+  define arm_aapcs_vfpcc <4 x i32> @vpnot_first(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+  entry:
+    unreachable
+  }
+  define arm_aapcs_vfpcc <4 x i32> @vpnot_many(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+  entry:
+    unreachable
+  }
+
+...
+---
+name:            vpnot
+alignment:       2
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0', virtual-reg: '' }
+  - { reg: '$q1', virtual-reg: '' }
+  - { reg: '$q2', virtual-reg: '' }
+body:             |
+  bb.0.entry:
+    liveins: $q0, $q1, $q2
+
+    ; CHECK-LABEL: name: vpnot
+    ; CHECK: liveins: $q0, $q1, $q2
+    ; CHECK: renamable $vpr = MVE_VCMPs32r renamable $q0, $zr, 11, 0, $noreg
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def $vpr, implicit $q1, implicit $zr, implicit killed $vpr {
+    ; CHECK:   MVE_VPST 8, implicit-def $p0
+    ; CHECK:   renamable $vpr = MVE_VCMPs32r renamable $q1, $zr, 12, 1, killed renamable $vpr
+    ; CHECK:   renamable $vpr = MVE_VPNOT internal killed renamable $vpr, 0, $noreg
+    ; CHECK: }
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def dead $vpr, implicit-def $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit killed $q2, implicit $zr, implicit killed $vpr, implicit killed $q0, implicit killed $q1 {
+    ; CHECK:   MVE_VPST 8, implicit-def $p0
+    ; CHECK:   renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 1, killed renamable $vpr
+    ; CHECK:   renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, internal killed renamable $vpr
+    ; CHECK: }
+    ; CHECK: tBX_RET 14, $noreg, implicit $q0
+    renamable $vpr = MVE_VCMPs32r renamable $q0, $zr, 11, 0, $noreg
+    renamable $vpr = MVE_VCMPs32r renamable $q1, $zr, 12, 1, killed renamable $vpr
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 1, killed renamable $vpr
+    renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr
+    tBX_RET 14, $noreg, implicit $q0
+
+...
+---
+name:            vpnot_end
+alignment:       2
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0', virtual-reg: '' }
+  - { reg: '$q1', virtual-reg: '' }
+  - { reg: '$q2', virtual-reg: '' }
+body:             |
+  ; CHECK-LABEL: name: vpnot_end
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   liveins: $q0, $q1, $q2
+  ; CHECK:   renamable $vpr = MVE_VCMPs32r renamable $q0, $zr, 11, 0, $noreg
+  ; CHECK:   BUNDLE implicit-def $p0, implicit-def $vpr, implicit $q1, implicit $zr, implicit killed $vpr {
+  ; CHECK:     MVE_VPST 8, implicit-def $p0
+  ; CHECK:     renamable $vpr = MVE_VCMPs32r renamable $q1, $zr, 12, 1, killed renamable $vpr
+  ; CHECK:     renamable $vpr = MVE_VPNOT internal killed renamable $vpr, 0, $noreg
+  ; CHECK:   }
+  ; CHECK: bb.1.bb2:
+  ; CHECK:   liveins: $q0, $q1, $q2, $vpr
+  ; CHECK:   BUNDLE implicit-def $p0, implicit-def dead $vpr, implicit-def $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit killed $q2, implicit $zr, implicit killed $vpr, implicit killed $q0, implicit killed $q1 {
+  ; CHECK:     MVE_VPST 8, implicit-def $p0
+  ; CHECK:     renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 1, killed renamable $vpr
+  ; CHECK:     renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, internal killed renamable $vpr
+  ; CHECK:   }
+  ; CHECK:   tBX_RET 14, $noreg, implicit $q0
+  bb.0.entry:
+    liveins: $q0, $q1, $q2
+
+    renamable $vpr = MVE_VCMPs32r renamable $q0, $zr, 11, 0, $noreg
+    renamable $vpr = MVE_VCMPs32r renamable $q1, $zr, 12, 1, killed renamable $vpr
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+
+  bb.1.bb2:
+    liveins: $q0, $q1, $q2, $vpr
+
+    renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 1, killed renamable $vpr
+    renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr
+    tBX_RET 14, $noreg, implicit $q0
+
+...
+---
+name:            vpnot_two
+alignment:       2
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0', virtual-reg: '' }
+  - { reg: '$q1', virtual-reg: '' }
+  - { reg: '$q2', virtual-reg: '' }
+body:             |
+  bb.0.entry:
+    liveins: $q0, $q1, $q2
+
+    ; CHECK-LABEL: name: vpnot_two
+    ; CHECK: liveins: $q0, $q1, $q2
+    ; CHECK: renamable $vpr = MVE_VCMPs32r renamable $q0, $zr, 11, 0, $noreg
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def $vpr, implicit $q1, implicit $zr, implicit killed $vpr {
+    ; CHECK:   MVE_VPST 8, implicit-def $p0
+    ; CHECK:   renamable $vpr = MVE_VCMPs32r renamable $q1, $zr, 12, 1, killed renamable $vpr
+    ; CHECK:   renamable $vpr = MVE_VPNOT internal killed renamable $vpr, 0, $noreg
+    ; CHECK: }
+    ; CHECK: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def dead $vpr, implicit-def $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit killed $q2, implicit $zr, implicit killed $vpr, implicit killed $q0, implicit killed $q1 {
+    ; CHECK:   MVE_VPST 8, implicit-def $p0
+    ; CHECK:   renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 1, killed renamable $vpr
+    ; CHECK:   renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, internal killed renamable $vpr
+    ; CHECK: }
+    ; CHECK: tBX_RET 14, $noreg, implicit $q0
+    renamable $vpr = MVE_VCMPs32r renamable $q0, $zr, 11, 0, $noreg
+    renamable $vpr = MVE_VCMPs32r renamable $q1, $zr, 12, 1, killed renamable $vpr
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 1, killed renamable $vpr
+    renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr
+    tBX_RET 14, $noreg, implicit $q0
+
+...
+---
+name:            vpnot_lots
+alignment:       2
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0', virtual-reg: '' }
+  - { reg: '$q1', virtual-reg: '' }
+  - { reg: '$q2', virtual-reg: '' }
+body:             |
+  bb.0.entry:
+    liveins: $q0, $q1, $q2
+
+    ; CHECK-LABEL: name: vpnot_lots
+    ; CHECK: liveins: $q0, $q1, $q2
+    ; CHECK: renamable $vpr = MVE_VCMPs32r renamable $q0, $zr, 11, 0, $noreg
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def $vpr, implicit $q1, implicit $zr, implicit killed $vpr {
+    ; CHECK:   MVE_VPST 8, implicit-def $p0
+    ; CHECK:   renamable $vpr = MVE_VCMPs32r renamable $q1, $zr, 12, 1, killed renamable $vpr
+    ; CHECK:   renamable $vpr = MVE_VPNOT internal killed renamable $vpr, 0, $noreg
+    ; CHECK: }
+    ; CHECK: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    ; CHECK: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    ; CHECK: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    ; CHECK: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def dead $vpr, implicit-def $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit killed $q2, implicit $zr, implicit killed $vpr, implicit killed $q0, implicit killed $q1 {
+    ; CHECK:   MVE_VPST 8, implicit-def $p0
+    ; CHECK:   renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 1, killed renamable $vpr
+    ; CHECK:   renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, internal killed renamable $vpr
+    ; CHECK: }
+    ; CHECK: tBX_RET 14, $noreg, implicit $q0
+    renamable $vpr = MVE_VCMPs32r renamable $q0, $zr, 11, 0, $noreg
+    renamable $vpr = MVE_VCMPs32r renamable $q1, $zr, 12, 1, killed renamable $vpr
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 1, killed renamable $vpr
+    renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr
+    tBX_RET 14, $noreg, implicit $q0
+
+...
+---
+name:            vpnot_first
+alignment:       2
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0', virtual-reg: '' }
+  - { reg: '$q1', virtual-reg: '' }
+  - { reg: '$q2', virtual-reg: '' }
+body:             |
+  bb.0.entry:
+    liveins: $q0, $q1, $q2
+
+    ; CHECK-LABEL: name: vpnot_first
+    ; CHECK: liveins: $q0, $q1, $q2
+    ; CHECK: renamable $vpr = MVE_VCMPs32r renamable $q0, $zr, 11, 0, $noreg
+    ; CHECK: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def dead $vpr, implicit-def $q0, implicit-def $d0, implicit-def $s0, implicit-def $s1, implicit-def $d1, implicit-def $s2, implicit-def $s3, implicit killed $q1, implicit $zr, implicit killed $vpr, implicit killed $q2, implicit killed $q0 {
+    ; CHECK:   MVE_VPST 4, implicit-def $p0
+    ; CHECK:   renamable $vpr = MVE_VCMPs32r renamable $q1, $zr, 12, 1, killed renamable $vpr
+    ; CHECK:   renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 1, internal killed renamable $vpr
+    ; CHECK:   renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, internal killed renamable $vpr
+    ; CHECK: }
+    ; CHECK: tBX_RET 14, $noreg, implicit $q0
+    renamable $vpr = MVE_VCMPs32r renamable $q0, $zr, 11, 0, $noreg
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    renamable $vpr = MVE_VCMPs32r renamable $q1, $zr, 12, 1, killed renamable $vpr
+    renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 1, killed renamable $vpr
+    renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr
+    tBX_RET 14, $noreg, implicit $q0
+
+...
+---
+name:            vpnot_many
+alignment:       2
+tracksRegLiveness: true
+liveins:
+  - { reg: '$q0', virtual-reg: '' }
+  - { reg: '$q1', virtual-reg: '' }
+  - { reg: '$q2', virtual-reg: '' }
+body:             |
+  bb.0.entry:
+    liveins: $q0, $q1, $q2
+
+    ; CHECK-LABEL: name: vpnot_many
+    ; CHECK: liveins: $q0, $q1, $q2
+    ; CHECK: renamable $vpr = MVE_VCMPs32r renamable $q0, $zr, 11, 0, $noreg
+    ; CHECK: renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def $vpr, implicit $q1, implicit $zr, implicit killed $vpr {
+    ; CHECK:   MVE_VPST 8, implicit-def $p0
+    ; CHECK:   renamable $vpr = MVE_VCMPs32r renamable $q1, $zr, 12, 1, killed renamable $vpr
+    ; CHECK:   renamable $vpr = MVE_VPNOT internal killed renamable $vpr, 0, $noreg
+    ; CHECK: }
+    ; CHECK: BUNDLE implicit-def $p0, implicit-def $vpr, implicit killed $q2, implicit $zr, implicit killed $vpr {
+    ; CHECK:   MVE_VPST 8, implicit-def $p0
+    ; CHECK:   renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 1, killed renamable $vpr
+    ; CHECK:   renamable $vpr = MVE_VPNOT internal killed renamable $vpr, 0, $noreg
+    ; CHECK: }
+    ; CHECK: renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr
+    ; CHECK: tBX_RET 14, $noreg, implicit $q0
+    renamable $vpr = MVE_VCMPs32r renamable $q0, $zr, 11, 0, $noreg
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    renamable $vpr = MVE_VCMPs32r renamable $q1, $zr, 12, 1, killed renamable $vpr
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    renamable $vpr = MVE_VCMPi32r killed renamable $q2, $zr, 0, 1, killed renamable $vpr
+    renamable $vpr = MVE_VPNOT killed renamable $vpr, 0, $noreg
+    renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr
+    tBX_RET 14, $noreg, implicit $q0
+
+...




More information about the llvm-commits mailing list