[llvm] ab0096d - [ARM] Add some opaque pointer gather/scatter tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 7 14:04:05 PDT 2021


Author: David Green
Date: 2021-07-07T22:03:53+01:00
New Revision: ab0096de05709dd36b69155e08d156d073c69501

URL: https://github.com/llvm/llvm-project/commit/ab0096de05709dd36b69155e08d156d073c69501
DIFF: https://github.com/llvm/llvm-project/commit/ab0096de05709dd36b69155e08d156d073c69501.diff

LOG: [ARM] Add some opaque pointer gather/scatter tests. NFC

They seem to work OK. Some other test cleanups at the same time.

Added: 
    

Modified: 
    llvm/test/CodeGen/Thumb2/mve-gather-ind32-scaled.ll
    llvm/test/CodeGen/Thumb2/mve-gather-ind32-unscaled.ll
    llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
    llvm/test/CodeGen/Thumb2/mve-gather-scatter-opt.ll
    llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
    llvm/test/CodeGen/Thumb2/mve-gather-scatter-ptr-address.ll
    llvm/test/CodeGen/Thumb2/mve-gather-scatter-tailpred.ll
    llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
    llvm/test/CodeGen/Thumb2/mve-scatter-ind32-unscaled.ll
    llvm/test/CodeGen/Thumb2/mve-scatter-ptrs.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-ind32-scaled.ll b/llvm/test/CodeGen/Thumb2/mve-gather-ind32-scaled.ll
index 1cea4a0929f23..38664370e6e85 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-ind32-scaled.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-ind32-scaled.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o 2>/dev/null - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s
 
 define arm_aapcs_vfpcc <4 x i32> @zext_scaled_i16_i32(i16* %base, <4 x i32>* %offptr) {
 ; CHECK-LABEL: zext_scaled_i16_i32:
@@ -15,6 +15,20 @@ entry:
   ret <4 x i32> %gather.zext
 }
 
+define arm_aapcs_vfpcc <4 x i32> @zext_scaled_i16_i32_opaque(ptr %base, ptr %offptr) {
+; CHECK-LABEL: zext_scaled_i16_i32_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q1, [r1]
+; CHECK-NEXT:    vldrh.u32 q0, [r0, q1, uxtw #1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i16, ptr %base, <4 x i32> %offs
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %gather.zext = zext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %gather.zext
+}
+
 define arm_aapcs_vfpcc <4 x i32> @sext_scaled_i16_i32(i16* %base, <4 x i32>* %offptr) {
 ; CHECK-LABEL: sext_scaled_i16_i32:
 ; CHECK:       @ %bb.0: @ %entry
@@ -58,6 +72,20 @@ entry:
   ret <4 x float> %gather
 }
 
+define arm_aapcs_vfpcc <4 x float> @scaled_f32_i32_opaque(ptr %base, ptr %offptr) {
+; CHECK-LABEL: scaled_f32_i32_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q1, [r1]
+; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %i32_ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs
+  %ptrs = bitcast <4 x ptr> %i32_ptrs to <4 x ptr>
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  ret <4 x float> %gather
+}
+
 define arm_aapcs_vfpcc <4 x i32> @unsigned_scaled_b_i32_i16(i32* %base, <4 x i16>* %offptr) {
 ; CHECK-LABEL: unsigned_scaled_b_i32_i16:
 ; CHECK:       @ %bb.0: @ %entry
@@ -86,6 +114,34 @@ entry:
   ret <4 x i32> %gather
 }
 
+define arm_aapcs_vfpcc <4 x i32> @unsigned_scaled_b_i32_i16_opaque(ptr %base, ptr %offptr) {
+; CHECK-LABEL: unsigned_scaled_b_i32_i16_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u32 q1, [r1]
+; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, ptr %offptr, align 2
+  %offs.zext = zext <4 x i16> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.zext
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <4 x i32> @signed_scaled_i32_i16_opaque(ptr %base, ptr %offptr) {
+; CHECK-LABEL: signed_scaled_i32_i16_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.s32 q1, [r1]
+; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i16>, ptr %offptr, align 2
+  %offs.sext = sext <4 x i16> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs.sext
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
 define arm_aapcs_vfpcc <4 x float> @a_unsigned_scaled_f32_i16(i32* %base, <4 x i16>* %offptr) {
 ; CHECK-LABEL: a_unsigned_scaled_f32_i16:
 ; CHECK:       @ %bb.0: @ %entry
@@ -312,16 +368,16 @@ entry:
   ret <4 x i32> %gather
 }
 
-define arm_aapcs_vfpcc <4 x i32> @scaled_i32_i32_2gep2(i32* %base, <4 x i32>* %offptr) {
+define arm_aapcs_vfpcc <4 x i32> @scaled_i32_i32_2gep2(i32* %base) {
 ; CHECK-LABEL: scaled_i32_i32_2gep2:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    adr r1, .LCPI21_0
+; CHECK-NEXT:    adr r1, .LCPI25_0
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  @ %bb.1:
-; CHECK-NEXT:  .LCPI21_0:
+; CHECK-NEXT:  .LCPI25_0:
 ; CHECK-NEXT:    .long 5 @ 0x5
 ; CHECK-NEXT:    .long 8 @ 0x8
 ; CHECK-NEXT:    .long 11 @ 0xb
@@ -333,8 +389,50 @@ entry:
   ret <4 x i32> %gather
 }
 
+define arm_aapcs_vfpcc <4 x i32> @scaled_i32_i32_2gep_opaque(ptr %base, ptr %offptr) {
+; CHECK-LABEL: scaled_i32_i32_2gep_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q1, [r1]
+; CHECK-NEXT:    vmov.i32 q0, #0x14
+; CHECK-NEXT:    vshl.i32 q1, q1, #2
+; CHECK-NEXT:    vadd.i32 q1, q1, r0
+; CHECK-NEXT:    vadd.i32 q1, q1, q0
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> %offs
+  %ptrs2 = getelementptr inbounds i32, <4 x ptr> %ptrs, i32 5
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <4 x i32> @scaled_i32_i32_2gep2_opaque(ptr %base) {
+; CHECK-LABEL: scaled_i32_i32_2gep2_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    adr r1, .LCPI27_0
+; CHECK-NEXT:    vldrw.u32 q1, [r1]
+; CHECK-NEXT:    vldrw.u32 q0, [r0, q1, uxtw #2]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI27_0:
+; CHECK-NEXT:    .long 5 @ 0x5
+; CHECK-NEXT:    .long 8 @ 0x8
+; CHECK-NEXT:    .long 11 @ 0xb
+; CHECK-NEXT:    .long 14 @ 0xe
+entry:
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+  %ptrs2 = getelementptr inbounds i32, <4 x ptr> %ptrs, i32 5
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
 declare <4 x i8>  @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>)
 declare <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>)
+declare <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i16>)
 declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>)
 declare <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*>, i32, <4 x i1>, <4 x half>)
 declare <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*>, i32, <4 x i1>, <4 x float>)
+declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x float>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-ind32-unscaled.ll b/llvm/test/CodeGen/Thumb2/mve-gather-ind32-unscaled.ll
index 442ecc6267c52..27e31244875cd 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-ind32-unscaled.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-ind32-unscaled.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o 2>/dev/null - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s
 
 define arm_aapcs_vfpcc <4 x i32> @zext_unscaled_i8_i32(i8* %base, <4 x i32>* %offptr) {
 ; CHECK-LABEL: zext_unscaled_i8_i32:
@@ -29,6 +29,20 @@ entry:
   ret <4 x i32> %gather.sext
 }
 
+define arm_aapcs_vfpcc <4 x i32> @sext_unscaled_i8_i32_opaque(ptr %base, ptr %offptr) {
+; CHECK-LABEL: sext_unscaled_i8_i32_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q1, [r1]
+; CHECK-NEXT:    vldrb.s32 q0, [r0, q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i32>, ptr %offptr, align 4
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %gather.sext = sext <4 x i8> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
 define arm_aapcs_vfpcc <4 x i32> @zext_unscaled_i16_i32(i8* %base, <4 x i32>* %offptr) {
 ; CHECK-LABEL: zext_unscaled_i16_i32:
 ; CHECK:       @ %bb.0: @ %entry
@@ -455,11 +469,39 @@ entry:
   ret <4 x i32> %gather.sext
 }
 
+define arm_aapcs_vfpcc <4 x i32> @sext_unsigned_unscaled_i8_i8_opaque(ptr %base, ptr %offptr) {
+; CHECK-LABEL: sext_unsigned_unscaled_i8_i8_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u32 q1, [r1]
+; CHECK-NEXT:    vldrb.s32 q0, [r0, q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x i8>, ptr %offptr, align 1
+  %offs.zext = zext <4 x i8> %offs to <4 x i32>
+  %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> %offs.zext
+  %gather = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
+  %gather.sext = sext <4 x i8> %gather to <4 x i32>
+  ret <4 x i32> %gather.sext
+}
+
 ; VLDRW.u32 Qd, [P, 4]
 define arm_aapcs_vfpcc <4 x i32> @qi4(<4 x i32*> %p) {
 ; CHECK-LABEL: qi4:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov.i32 q1, #0x10
+; CHECK-NEXT:    vadd.i32 q1, q0, q1
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %g = getelementptr inbounds i32, <4 x i32*> %p, i32 4
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %g, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
+define arm_aapcs_vfpcc <4 x i32> @qi4_unaligned(<4 x i32*> %p) {
+; CHECK-LABEL: qi4_unaligned:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q1, #0x10
 ; CHECK-NEXT:    vadd.i32 q0, q0, q1
 ; CHECK-NEXT:    vmov r0, r1, d1
 ; CHECK-NEXT:    vmov r2, r3, d0
@@ -481,3 +523,5 @@ declare <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*>, i32, <4 x i1>, <
 declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
 declare <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*>, i32, <4 x i1>, <4 x half>)
 declare <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*>, i32, <4 x i1>, <4 x float>)
+
+declare <4 x i8>  @llvm.masked.gather.v4i8.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i8>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll b/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
index 2958636fb57c0..2d66b4bc2ab6b 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-ptrs.ll
@@ -29,6 +29,18 @@ entry:
   ret <4 x i32> %gather
 }
 
+define arm_aapcs_vfpcc <4 x i32> @ptr_v4i32_opaque(<4 x ptr>* %offptr) {
+; CHECK-LABEL: ptr_v4i32_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x ptr>, <4 x ptr>* %offptr, align 4
+  %gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  ret <4 x i32> %gather
+}
+
 define arm_aapcs_vfpcc <8 x i32> @ptr_v8i32(<8 x i32*>* %offptr) {
 ; CHECK-LABEL: ptr_v8i32:
 ; CHECK:       @ %bb.0: @ %entry
@@ -134,6 +146,18 @@ entry:
   ret <4 x float> %gather
 }
 
+define arm_aapcs_vfpcc <4 x float> @ptr_v4f32_opaque(<4 x ptr>* %offptr) {
+; CHECK-LABEL: ptr_v4f32_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    vldrw.u32 q0, [q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x ptr>, <4 x ptr>* %offptr, align 4
+  %gather = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %offs, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> undef)
+  ret <4 x float> %gather
+}
+
 define arm_aapcs_vfpcc <8 x float> @ptr_v8f32(<8 x float*>* %offptr) {
 ; CHECK-LABEL: ptr_v8f32:
 ; CHECK:       @ %bb.0: @ %entry
@@ -259,6 +283,34 @@ entry:
   ret <4 x i32> %ext
 }
 
+define arm_aapcs_vfpcc <4 x i32> @ptr_v4i16_sext_opaque(<4 x ptr>* %offptr) {
+; CHECK-LABEL: ptr_v4i16_sext_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    vldrh.s32 q0, [r1, q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x ptr>, <4 x ptr>* %offptr, align 4
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %ext = sext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %ext
+}
+
+define arm_aapcs_vfpcc <4 x i32> @ptr_v4i16_zext_opaque(<4 x ptr>* %offptr) {
+; CHECK-LABEL: ptr_v4i16_zext_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    vldrh.u32 q0, [r1, q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x ptr>, <4 x ptr>* %offptr, align 4
+  %gather = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i16> undef)
+  %ext = zext <4 x i16> %gather to <4 x i32>
+  ret <4 x i32> %ext
+}
+
 define arm_aapcs_vfpcc <4 x i16> @ptr_v4i16(<4 x i16*>* %offptr) {
 ; CHECK-LABEL: ptr_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
@@ -684,17 +736,17 @@ define void @foo_ptr_p_int32_t(i32* %dest, i32** %src, i32 %n) {
 ; CHECK-NEXT:    cmp r2, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    poplt {r7, pc}
-; CHECK-NEXT:  .LBB26_1: @ %vector.body.preheader
+; CHECK-NEXT:  .LBB30_1: @ %vector.body.preheader
 ; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    movs r3, #1
 ; CHECK-NEXT:    add.w lr, r3, r2, lsr #2
-; CHECK-NEXT:  .LBB26_2: @ %vector.body
+; CHECK-NEXT:  .LBB30_2: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r1], #16
 ; CHECK-NEXT:    vptt.i32 ne, q0, zr
 ; CHECK-NEXT:    vldrwt.u32 q1, [q0]
 ; CHECK-NEXT:    vstrwt.32 q1, [r0], #16
-; CHECK-NEXT:    le lr, .LBB26_2
+; CHECK-NEXT:    le lr, .LBB30_2
 ; CHECK-NEXT:  @ %bb.3: @ %for.end
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -720,6 +772,51 @@ for.end:                                          ; preds = %vector.body, %entry
   ret void
 }
 
+define void @foo_ptr_p_int32_t_opaque(ptr %dest, ptr %src, i32 %n) {
+; CHECK-LABEL: foo_ptr_p_int32_t_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    bic r2, r2, #15
+; CHECK-NEXT:    cmp r2, #1
+; CHECK-NEXT:    it lt
+; CHECK-NEXT:    poplt {r7, pc}
+; CHECK-NEXT:  .LBB31_1: @ %vector.body.preheader
+; CHECK-NEXT:    subs r2, #4
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    add.w lr, r3, r2, lsr #2
+; CHECK-NEXT:  .LBB31_2: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [r1], #16
+; CHECK-NEXT:    vptt.i32 ne, q0, zr
+; CHECK-NEXT:    vldrwt.u32 q1, [q0]
+; CHECK-NEXT:    vstrwt.32 q1, [r0], #16
+; CHECK-NEXT:    le lr, .LBB31_2
+; CHECK-NEXT:  @ %bb.3: @ %for.end
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %and = and i32 %n, -16
+  %cmp11 = icmp sgt i32 %and, 0
+  br i1 %cmp11, label %vector.body, label %for.end
+
+vector.body:                                      ; preds = %entry, %vector.body
+  %index = phi i32 [ %index.next, %vector.body ], [ 0, %entry ]
+  %0 = getelementptr inbounds ptr, ptr %src, i32 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <4 x ptr>, ptr %1, align 4
+  %2 = icmp ne <4 x ptr> %wide.load, zeroinitializer
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %wide.load, i32 4, <4 x i1> %2, <4 x i32> undef)
+  %3 = getelementptr inbounds i32, ptr %dest, i32 %index
+  %4 = bitcast ptr %3 to ptr
+  call void @llvm.masked.store.v4i32.p0(<4 x i32> %wide.masked.gather, ptr %4, i32 4, <4 x i1> %2)
+  %index.next = add i32 %index, 4
+  %5 = icmp eq i32 %index.next, %and
+  br i1 %5, label %for.end, label %vector.body
+
+for.end:                                          ; preds = %vector.body, %entry
+  ret void
+}
+
 define void @foo_ptr_p_float(float* %dest, float** %src, i32 %n) {
 ; CHECK-LABEL: foo_ptr_p_float:
 ; CHECK:       @ %bb.0: @ %entry
@@ -729,17 +826,17 @@ define void @foo_ptr_p_float(float* %dest, float** %src, i32 %n) {
 ; CHECK-NEXT:    cmp r2, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    poplt {r7, pc}
-; CHECK-NEXT:  .LBB27_1: @ %vector.body.preheader
+; CHECK-NEXT:  .LBB32_1: @ %vector.body.preheader
 ; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    movs r3, #1
 ; CHECK-NEXT:    add.w lr, r3, r2, lsr #2
-; CHECK-NEXT:  .LBB27_2: @ %vector.body
+; CHECK-NEXT:  .LBB32_2: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r1], #16
 ; CHECK-NEXT:    vptt.i32 ne, q0, zr
 ; CHECK-NEXT:    vldrwt.u32 q1, [q0]
 ; CHECK-NEXT:    vstrwt.32 q1, [r0], #16
-; CHECK-NEXT:    le lr, .LBB27_2
+; CHECK-NEXT:    le lr, .LBB32_2
 ; CHECK-NEXT:  @ %bb.3: @ %for.end
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -837,3 +934,8 @@ declare <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*>, i32, <8 x i1>, <8 x
 declare <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>)
 declare <32 x i8> @llvm.masked.gather.v32i8.v32p0i8(<32 x i8*>, i32, <32 x i1>, <32 x i8>)
 declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
+
+declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x float>)
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>)
+declare <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i16>)
+declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-opt.ll b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-opt.ll
index 71de7ba335f1f..7e72edd360993 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-opt.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-opt.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedgatscat=false %s -o 2>/dev/null - | FileCheck --check-prefix NOGATSCAT %s
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=-mve %s -o 2>/dev/null - | FileCheck --check-prefix NOMVE %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedgatscat=false %s -o - | FileCheck --check-prefix NOGATSCAT %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=-mve %s -o - | FileCheck --check-prefix NOMVE %s
 
 define arm_aapcs_vfpcc <4 x i32> @unscaled_i32_i32_gather(i8* %base, <4 x i32>* %offptr) {
 ; NOGATSCAT-LABEL: unscaled_i32_i32_gather:

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
index 56421bde7b605..ceece1ad14f38 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp --arm-memtransfer-tploop=allow %s -o 2>/dev/null - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp --arm-memtransfer-tploop=allow %s -o - | FileCheck %s
 
 !0 = !{i32 1, !"wchar_size", i32 4}
 !1 = !{i32 1, !"min_enum_size", i32 4}
@@ -146,22 +146,66 @@ end:
   ret void;
 }
 
+define arm_aapcs_vfpcc void @push_out_mul_add_gather_opaque(ptr noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec) {
+; CHECK-LABEL: push_out_mul_add_gather_opaque:
+; CHECK:       @ %bb.0: @ %vector.ph
+; CHECK-NEXT:    adr r3, .LCPI3_0
+; CHECK-NEXT:    vldrw.u32 q0, [r3]
+; CHECK-NEXT:    vadd.i32 q0, q0, r0
+; CHECK-NEXT:  .LBB3_1: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [q0, #96]!
+; CHECK-NEXT:    subs r2, #4
+; CHECK-NEXT:    vstrb.8 q1, [r1], #16
+; CHECK-NEXT:    bne .LBB3_1
+; CHECK-NEXT:  @ %bb.2: @ %end
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.3:
+; CHECK-NEXT:  .LCPI3_0:
+; CHECK-NEXT:    .long 4294967224 @ 0xffffffb8
+; CHECK-NEXT:    .long 4294967248 @ 0xffffffd0
+; CHECK-NEXT:    .long 4294967272 @ 0xffffffe8
+; CHECK-NEXT:    .long 0 @ 0x0
+
+vector.ph:                                        ; preds = %for.body.preheader
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.ind = phi <4 x i32> [ <i32 0, i32 2, i32 4, i32 6>, %vector.ph ], [ %vec.ind.next, %vector.body ]
+  %0 = mul <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3>
+  %1 = add <4 x i32> %0, <i32 6, i32 6, i32 6, i32 6>
+  %2 = getelementptr inbounds i32, ptr %data, <4 x i32> %1
+  %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
+  %3 = getelementptr inbounds i32, i32* %dst, i32 %index
+  %4 = bitcast i32* %3 to <4 x i32>*
+  store <4 x i32> %wide.masked.gather, <4 x i32>* %4, align 4
+  %index.next = add i32 %index, 4
+  %vec.ind.next = add <4 x i32> %vec.ind, <i32 8, i32 8, i32 8, i32 8>
+  %5 = icmp eq i32 %index.next, %n.vec
+  br i1 %5, label %end, label %vector.body
+
+end:
+  ret void;
+}
+
 define arm_aapcs_vfpcc void @push_out_mul_scatter(i32* noalias nocapture readonly %data,
 ; CHECK-LABEL: push_out_mul_scatter:
 ; CHECK:       @ %bb.0: @ %vector.ph
-; CHECK-NEXT:    adr r1, .LCPI3_0
+; CHECK-NEXT:    adr r1, .LCPI4_0
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vadd.i32 q1, q1, r0
-; CHECK-NEXT:  .LBB3_1: @ %vector.body
+; CHECK-NEXT:  .LBB4_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    vstrw.32 q0, [q1, #96]!
-; CHECK-NEXT:    bne .LBB3_1
+; CHECK-NEXT:    bne .LBB4_1
 ; CHECK-NEXT:  @ %bb.2: @ %end
 ; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  @ %bb.3:
-; CHECK-NEXT:  .LCPI3_0:
+; CHECK-NEXT:  .LCPI4_0:
 ; CHECK-NEXT:    .long 4294967200 @ 0xffffffa0
 ; CHECK-NEXT:    .long 4294967224 @ 0xffffffb8
 ; CHECK-NEXT:    .long 4294967248 @ 0xffffffd0
@@ -190,19 +234,19 @@ end:
 define arm_aapcs_vfpcc void @push_out_add_scatter(i32* noalias nocapture readonly %data,
 ; CHECK-LABEL: push_out_add_scatter:
 ; CHECK:       @ %bb.0: @ %vector.ph
-; CHECK-NEXT:    adr r1, .LCPI4_0
+; CHECK-NEXT:    adr r1, .LCPI5_0
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
 ; CHECK-NEXT:    vadd.i32 q1, q1, r0
-; CHECK-NEXT:  .LBB4_1: @ %vector.body
+; CHECK-NEXT:  .LBB5_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    vstrw.32 q0, [q1, #32]!
-; CHECK-NEXT:    bne .LBB4_1
+; CHECK-NEXT:    bne .LBB5_1
 ; CHECK-NEXT:  @ %bb.2: @ %end
 ; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  @ %bb.3:
-; CHECK-NEXT:  .LCPI4_0:
+; CHECK-NEXT:  .LCPI5_0:
 ; CHECK-NEXT:    .long 4294967288 @ 0xfffffff8
 ; CHECK-NEXT:    .long 0 @ 0x0
 ; CHECK-NEXT:    .long 8 @ 0x8
@@ -231,22 +275,22 @@ end:
 define arm_aapcs_vfpcc void @push_out_mul_gather_scatter(i32* noalias nocapture readonly %data,
 ; CHECK-LABEL: push_out_mul_gather_scatter:
 ; CHECK:       @ %bb.0: @ %vector.ph
-; CHECK-NEXT:    adr r1, .LCPI5_0
+; CHECK-NEXT:    adr r1, .LCPI6_0
 ; CHECK-NEXT:    vmov.i32 q0, #0x18
 ; CHECK-NEXT:    vldrw.u32 q1, [r1]
-; CHECK-NEXT:  .LBB5_1: @ %vector.body
+; CHECK-NEXT:  .LBB6_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q2, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    vadd.i32 q3, q1, q0
 ; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    vstrw.32 q2, [r0, q1, uxtw #2]
 ; CHECK-NEXT:    vmov q1, q3
-; CHECK-NEXT:    bne .LBB5_1
+; CHECK-NEXT:    bne .LBB6_1
 ; CHECK-NEXT:  @ %bb.2: @ %end
 ; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  @ %bb.3:
-; CHECK-NEXT:  .LCPI5_0:
+; CHECK-NEXT:  .LCPI6_0:
 ; CHECK-NEXT:    .long 0 @ 0x0
 ; CHECK-NEXT:    .long 6 @ 0x6
 ; CHECK-NEXT:    .long 12 @ 0xc
@@ -275,20 +319,20 @@ end:
 define arm_aapcs_vfpcc void @push_out_add_sub_block(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec) {
 ; CHECK-LABEL: push_out_add_sub_block:
 ; CHECK:       @ %bb.0: @ %vector.ph
-; CHECK-NEXT:    adr r3, .LCPI6_0
+; CHECK-NEXT:    adr r3, .LCPI7_0
 ; CHECK-NEXT:    vldrw.u32 q0, [r3]
 ; CHECK-NEXT:    vadd.i32 q0, q0, r0
-; CHECK-NEXT:  .LBB6_1: @ %vector.body
+; CHECK-NEXT:  .LBB7_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q1, [q0, #32]!
 ; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    vstrb.8 q1, [r1], #16
-; CHECK-NEXT:    bne .LBB6_1
+; CHECK-NEXT:    bne .LBB7_1
 ; CHECK-NEXT:  @ %bb.2: @ %end
 ; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  @ %bb.3:
-; CHECK-NEXT:  .LCPI6_0:
+; CHECK-NEXT:  .LCPI7_0:
 ; CHECK-NEXT:    .long 4294967288 @ 0xfffffff8
 ; CHECK-NEXT:    .long 0 @ 0x0
 ; CHECK-NEXT:    .long 8 @ 0x8
@@ -326,12 +370,12 @@ define arm_aapcs_vfpcc void @non_gatscat_use1(i32* noalias nocapture readonly %d
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13}
-; CHECK-NEXT:    adr.w r12, .LCPI7_0
+; CHECK-NEXT:    adr.w r12, .LCPI8_0
 ; CHECK-NEXT:    vmov.i32 q0, #0x9
 ; CHECK-NEXT:    vldrw.u32 q3, [r12]
 ; CHECK-NEXT:    vmov.i32 q1, #0xc
 ; CHECK-NEXT:    vmov.i32 q2, #0x8
-; CHECK-NEXT:  .LBB7_1: @ %vector.body
+; CHECK-NEXT:  .LBB8_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vadd.i32 q4, q3, q2
 ; CHECK-NEXT:    vmul.i32 q5, q3, q0
@@ -341,13 +385,13 @@ define arm_aapcs_vfpcc void @non_gatscat_use1(i32* noalias nocapture readonly %d
 ; CHECK-NEXT:    vmov q3, q4
 ; CHECK-NEXT:    vstrw.32 q5, [r3]
 ; CHECK-NEXT:    vstrb.8 q6, [r1], #16
-; CHECK-NEXT:    bne .LBB7_1
+; CHECK-NEXT:    bne .LBB8_1
 ; CHECK-NEXT:  @ %bb.2: @ %end
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  @ %bb.3:
-; CHECK-NEXT:  .LCPI7_0:
+; CHECK-NEXT:  .LCPI8_0:
 ; CHECK-NEXT:    .long 0 @ 0x0
 ; CHECK-NEXT:    .long 2 @ 0x2
 ; CHECK-NEXT:    .long 4 @ 0x4
@@ -382,13 +426,13 @@ define arm_aapcs_vfpcc void @non_gatscat_use2(i32* noalias nocapture readonly %d
 ; CHECK:       @ %bb.0: @ %vector.ph
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    adr.w r12, .LCPI8_0
+; CHECK-NEXT:    adr.w r12, .LCPI9_0
 ; CHECK-NEXT:    vmov.i32 q0, #0x12
 ; CHECK-NEXT:    vldrw.u32 q4, [r12]
 ; CHECK-NEXT:    vmov.i32 q1, #0x9
 ; CHECK-NEXT:    vmov.i32 q2, #0x8
 ; CHECK-NEXT:    vmov.i32 q3, #0xc
-; CHECK-NEXT:  .LBB8_1: @ %vector.body
+; CHECK-NEXT:  .LBB9_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vadd.i32 q5, q4, q2
 ; CHECK-NEXT:    vmul.i32 q6, q4, q1
@@ -399,13 +443,13 @@ define arm_aapcs_vfpcc void @non_gatscat_use2(i32* noalias nocapture readonly %d
 ; CHECK-NEXT:    vstrw.32 q4, [r3]
 ; CHECK-NEXT:    vmov q4, q5
 ; CHECK-NEXT:    vstrb.8 q7, [r1], #16
-; CHECK-NEXT:    bne .LBB8_1
+; CHECK-NEXT:    bne .LBB9_1
 ; CHECK-NEXT:  @ %bb.2: @ %end
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  @ %bb.3:
-; CHECK-NEXT:  .LCPI8_0:
+; CHECK-NEXT:  .LCPI9_0:
 ; CHECK-NEXT:    .long 0 @ 0x0
 ; CHECK-NEXT:    .long 2 @ 0x2
 ; CHECK-NEXT:    .long 4 @ 0x4
@@ -456,22 +500,22 @@ define dso_local void @arm_mat_mult_q31(i32* noalias nocapture readonly %A, i32*
 ; CHECK-NEXT:    vshl.i32 q3, q1, #3
 ; CHECK-NEXT:    subs r7, #4
 ; CHECK-NEXT:    add.w r10, r6, r7, lsr #2
-; CHECK-NEXT:    adr r7, .LCPI9_0
-; CHECK-NEXT:    adr r6, .LCPI9_1
+; CHECK-NEXT:    adr r7, .LCPI10_0
+; CHECK-NEXT:    adr r6, .LCPI10_1
 ; CHECK-NEXT:    vldrw.u32 q2, [r7]
 ; CHECK-NEXT:    vldrw.u32 q0, [r6]
 ; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
-; CHECK-NEXT:  .LBB9_1: @ %for.cond8.preheader.us.us.preheader
+; CHECK-NEXT:  .LBB10_1: @ %for.cond8.preheader.us.us.preheader
 ; CHECK-NEXT:    @ =>This Loop Header: Depth=1
-; CHECK-NEXT:    @ Child Loop BB9_2 Depth 2
-; CHECK-NEXT:    @ Child Loop BB9_3 Depth 3
+; CHECK-NEXT:    @ Child Loop BB10_2 Depth 2
+; CHECK-NEXT:    @ Child Loop BB10_3 Depth 3
 ; CHECK-NEXT:    mul r11, r8, r9
 ; CHECK-NEXT:    movs r5, #0
 ; CHECK-NEXT:    mul r7, r8, r12
-; CHECK-NEXT:  .LBB9_2: @ %vector.ph
-; CHECK-NEXT:    @ Parent Loop BB9_1 Depth=1
+; CHECK-NEXT:  .LBB10_2: @ %vector.ph
+; CHECK-NEXT:    @ Parent Loop BB10_1 Depth=1
 ; CHECK-NEXT:    @ => This Loop Header: Depth=2
-; CHECK-NEXT:    @ Child Loop BB9_3 Depth 3
+; CHECK-NEXT:    @ Child Loop BB10_3 Depth 3
 ; CHECK-NEXT:    vdup.32 q5, r7
 ; CHECK-NEXT:    vldrw.u32 q0, [sp] @ 16-byte Reload
 ; CHECK-NEXT:    vshl.i32 q5, q5, #2
@@ -481,9 +525,9 @@ define dso_local void @arm_mat_mult_q31(i32* noalias nocapture readonly %A, i32*
 ; CHECK-NEXT:    vmov.i32 q4, #0x0
 ; CHECK-NEXT:    vadd.i32 q5, q5, q0
 ; CHECK-NEXT:    vmlas.u32 q6, q2, r5
-; CHECK-NEXT:  .LBB9_3: @ %vector.body
-; CHECK-NEXT:    @ Parent Loop BB9_1 Depth=1
-; CHECK-NEXT:    @ Parent Loop BB9_2 Depth=2
+; CHECK-NEXT:  .LBB10_3: @ %vector.body
+; CHECK-NEXT:    @ Parent Loop BB10_1 Depth=1
+; CHECK-NEXT:    @ Parent Loop BB10_2 Depth=2
 ; CHECK-NEXT:    @ => This Inner Loop Header: Depth=3
 ; CHECK-NEXT:    vadd.i32 q7, q6, q3
 ; CHECK-NEXT:    vldrw.u32 q0, [r1, q6, uxtw #2]
@@ -491,20 +535,20 @@ define dso_local void @arm_mat_mult_q31(i32* noalias nocapture readonly %A, i32*
 ; CHECK-NEXT:    vmul.i32 q0, q0, q6
 ; CHECK-NEXT:    vmov q6, q7
 ; CHECK-NEXT:    vadd.i32 q4, q0, q4
-; CHECK-NEXT:    le lr, .LBB9_3
+; CHECK-NEXT:    le lr, .LBB10_3
 ; CHECK-NEXT:  @ %bb.4: @ %middle.block
-; CHECK-NEXT:    @ in Loop: Header=BB9_2 Depth=2
+; CHECK-NEXT:    @ in Loop: Header=BB10_2 Depth=2
 ; CHECK-NEXT:    add.w r4, r5, r11
 ; CHECK-NEXT:    adds r5, #1
 ; CHECK-NEXT:    vaddv.u32 r6, q4
 ; CHECK-NEXT:    cmp r5, r9
 ; CHECK-NEXT:    str.w r6, [r2, r4, lsl #2]
-; CHECK-NEXT:    bne .LBB9_2
+; CHECK-NEXT:    bne .LBB10_2
 ; CHECK-NEXT:  @ %bb.5: @ %for.cond4.for.cond.cleanup6_crit_edge.us
-; CHECK-NEXT:    @ in Loop: Header=BB9_1 Depth=1
+; CHECK-NEXT:    @ in Loop: Header=BB10_1 Depth=1
 ; CHECK-NEXT:    add.w r8, r8, #1
 ; CHECK-NEXT:    cmp r8, r3
-; CHECK-NEXT:    bne .LBB9_1
+; CHECK-NEXT:    bne .LBB10_1
 ; CHECK-NEXT:  @ %bb.6: @ %for.end25
 ; CHECK-NEXT:    add sp, #16
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -512,12 +556,12 @@ define dso_local void @arm_mat_mult_q31(i32* noalias nocapture readonly %A, i32*
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  @ %bb.7:
-; CHECK-NEXT:  .LCPI9_0:
+; CHECK-NEXT:  .LCPI10_0:
 ; CHECK-NEXT:    .long 0 @ 0x0
 ; CHECK-NEXT:    .long 2 @ 0x2
 ; CHECK-NEXT:    .long 4 @ 0x4
 ; CHECK-NEXT:    .long 6 @ 0x6
-; CHECK-NEXT:  .LCPI9_1:
+; CHECK-NEXT:  .LCPI10_1:
 ; CHECK-NEXT:    .long 4294967264 @ 0xffffffe0
 ; CHECK-NEXT:    .long 4294967272 @ 0xffffffe8
 ; CHECK-NEXT:    .long 4294967280 @ 0xfffffff0
@@ -603,13 +647,13 @@ define dso_local void @arm_mat_mult_q15(i16* noalias nocapture readonly %A, i16*
 ; CHECK-NEXT:    itt ne
 ; CHECK-NEXT:    ldrne r0, [sp, #136]
 ; CHECK-NEXT:    cmpne r0, #0
-; CHECK-NEXT:    bne .LBB10_2
-; CHECK-NEXT:  .LBB10_1: @ %for.cond.cleanup
+; CHECK-NEXT:    bne .LBB11_2
+; CHECK-NEXT:  .LBB11_1: @ %for.cond.cleanup
 ; CHECK-NEXT:    add sp, #32
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    add sp, #4
 ; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
-; CHECK-NEXT:  .LBB10_2: @ %for.cond1.preheader.us.preheader
+; CHECK-NEXT:  .LBB11_2: @ %for.cond1.preheader.us.preheader
 ; CHECK-NEXT:    ldr.w r12, [sp, #140]
 ; CHECK-NEXT:    movs r7, #1
 ; CHECK-NEXT:    mov.w r11, #0
@@ -618,7 +662,7 @@ define dso_local void @arm_mat_mult_q15(i16* noalias nocapture readonly %A, i16*
 ; CHECK-NEXT:    subs r3, r2, #4
 ; CHECK-NEXT:    add.w r0, r7, r3, lsr #2
 ; CHECK-NEXT:    ldr r7, [sp, #136]
-; CHECK-NEXT:    adr r3, .LCPI10_0
+; CHECK-NEXT:    adr r3, .LCPI11_0
 ; CHECK-NEXT:    str r0, [sp, #16] @ 4-byte Spill
 ; CHECK-NEXT:    lsl.w r0, r12, #1
 ; CHECK-NEXT:    vdup.32 q1, r7
@@ -629,15 +673,15 @@ define dso_local void @arm_mat_mult_q15(i16* noalias nocapture readonly %A, i16*
 ; CHECK-NEXT:    vshl.i32 q3, q1, #2
 ; CHECK-NEXT:    movs r3, #0
 ; CHECK-NEXT:    str r0, [sp, #20] @ 4-byte Spill
-; CHECK-NEXT:    b .LBB10_5
-; CHECK-NEXT:  .LBB10_3: @ %for.cond5.preheader.us73.preheader
-; CHECK-NEXT:    @ in Loop: Header=BB10_5 Depth=1
+; CHECK-NEXT:    b .LBB11_5
+; CHECK-NEXT:  .LBB11_3: @ %for.cond5.preheader.us73.preheader
+; CHECK-NEXT:    @ in Loop: Header=BB11_5 Depth=1
 ; CHECK-NEXT:    ldr r0, [sp, #28] @ 4-byte Reload
 ; CHECK-NEXT:    add.w r3, r0, r5, lsl #1
-; CHECK-NEXT:    wlstp.8 lr, r6, .LBB10_4
-; CHECK-NEXT:    b .LBB10_15
-; CHECK-NEXT:  .LBB10_4: @ %for.cond1.for.cond.cleanup3_crit_edge.us
-; CHECK-NEXT:    @ in Loop: Header=BB10_5 Depth=1
+; CHECK-NEXT:    wlstp.8 lr, r6, .LBB11_4
+; CHECK-NEXT:    b .LBB11_15
+; CHECK-NEXT:  .LBB11_4: @ %for.cond1.for.cond.cleanup3_crit_edge.us
+; CHECK-NEXT:    @ in Loop: Header=BB11_5 Depth=1
 ; CHECK-NEXT:    ldr r0, [sp, #4] @ 4-byte Reload
 ; CHECK-NEXT:    add r11, r12
 ; CHECK-NEXT:    ldr r3, [sp, #20] @ 4-byte Reload
@@ -647,51 +691,51 @@ define dso_local void @arm_mat_mult_q15(i16* noalias nocapture readonly %A, i16*
 ; CHECK-NEXT:    ldr r0, [sp, #8] @ 4-byte Reload
 ; CHECK-NEXT:    adds r3, #1
 ; CHECK-NEXT:    cmp r3, r0
-; CHECK-NEXT:    beq .LBB10_1
-; CHECK-NEXT:  .LBB10_5: @ %for.cond1.preheader.us
+; CHECK-NEXT:    beq .LBB11_1
+; CHECK-NEXT:  .LBB11_5: @ %for.cond1.preheader.us
 ; CHECK-NEXT:    @ =>This Loop Header: Depth=1
-; CHECK-NEXT:    @ Child Loop BB10_8 Depth 2
-; CHECK-NEXT:    @ Child Loop BB10_11 Depth 3
-; CHECK-NEXT:    @ Child Loop BB10_14 Depth 3
-; CHECK-NEXT:    @ Child Loop BB10_15 Depth 2
+; CHECK-NEXT:    @ Child Loop BB11_8 Depth 2
+; CHECK-NEXT:    @ Child Loop BB11_11 Depth 3
+; CHECK-NEXT:    @ Child Loop BB11_14 Depth 3
+; CHECK-NEXT:    @ Child Loop BB11_15 Depth 2
 ; CHECK-NEXT:    mul r5, r3, r7
 ; CHECK-NEXT:    cmp.w r12, #0
 ; CHECK-NEXT:    str r3, [sp, #12] @ 4-byte Spill
-; CHECK-NEXT:    beq .LBB10_3
+; CHECK-NEXT:    beq .LBB11_3
 ; CHECK-NEXT:  @ %bb.6: @ %for.cond5.preheader.us.us.preheader
-; CHECK-NEXT:    @ in Loop: Header=BB10_5 Depth=1
+; CHECK-NEXT:    @ in Loop: Header=BB11_5 Depth=1
 ; CHECK-NEXT:    mov.w r8, #0
-; CHECK-NEXT:    b .LBB10_8
-; CHECK-NEXT:  .LBB10_7: @ %for.cond5.for.cond.cleanup7_crit_edge.us.us
-; CHECK-NEXT:    @ in Loop: Header=BB10_8 Depth=2
+; CHECK-NEXT:    b .LBB11_8
+; CHECK-NEXT:  .LBB11_7: @ %for.cond5.for.cond.cleanup7_crit_edge.us.us
+; CHECK-NEXT:    @ in Loop: Header=BB11_8 Depth=2
 ; CHECK-NEXT:    ldr r3, [sp, #28] @ 4-byte Reload
 ; CHECK-NEXT:    add.w r0, r8, r5
 ; CHECK-NEXT:    add.w r8, r8, #1
 ; CHECK-NEXT:    cmp r8, r7
 ; CHECK-NEXT:    strh.w r10, [r3, r0, lsl #1]
-; CHECK-NEXT:    beq .LBB10_4
-; CHECK-NEXT:  .LBB10_8: @ %for.cond5.preheader.us.us
-; CHECK-NEXT:    @ Parent Loop BB10_5 Depth=1
+; CHECK-NEXT:    beq .LBB11_4
+; CHECK-NEXT:  .LBB11_8: @ %for.cond5.preheader.us.us
+; CHECK-NEXT:    @ Parent Loop BB11_5 Depth=1
 ; CHECK-NEXT:    @ => This Loop Header: Depth=2
-; CHECK-NEXT:    @ Child Loop BB10_11 Depth 3
-; CHECK-NEXT:    @ Child Loop BB10_14 Depth 3
+; CHECK-NEXT:    @ Child Loop BB11_11 Depth 3
+; CHECK-NEXT:    @ Child Loop BB11_14 Depth 3
 ; CHECK-NEXT:    cmp.w r12, #3
-; CHECK-NEXT:    bhi .LBB10_10
-; CHECK-NEXT:  @ %bb.9: @ in Loop: Header=BB10_8 Depth=2
+; CHECK-NEXT:    bhi .LBB11_10
+; CHECK-NEXT:  @ %bb.9: @ in Loop: Header=BB11_8 Depth=2
 ; CHECK-NEXT:    movs r4, #0
 ; CHECK-NEXT:    mov.w r10, #0
-; CHECK-NEXT:    b .LBB10_13
-; CHECK-NEXT:  .LBB10_10: @ %vector.ph
-; CHECK-NEXT:    @ in Loop: Header=BB10_8 Depth=2
+; CHECK-NEXT:    b .LBB11_13
+; CHECK-NEXT:  .LBB11_10: @ %vector.ph
+; CHECK-NEXT:    @ in Loop: Header=BB11_8 Depth=2
 ; CHECK-NEXT:    ldr r0, [sp, #16] @ 4-byte Reload
 ; CHECK-NEXT:    vmov q5, q1
 ; CHECK-NEXT:    vmov.i32 q4, #0x0
 ; CHECK-NEXT:    vmlas.u32 q5, q2, r8
 ; CHECK-NEXT:    dls lr, r0
 ; CHECK-NEXT:    ldr r3, [sp, #20] @ 4-byte Reload
-; CHECK-NEXT:  .LBB10_11: @ %vector.body
-; CHECK-NEXT:    @ Parent Loop BB10_5 Depth=1
-; CHECK-NEXT:    @ Parent Loop BB10_8 Depth=2
+; CHECK-NEXT:  .LBB11_11: @ %vector.body
+; CHECK-NEXT:    @ Parent Loop BB11_5 Depth=1
+; CHECK-NEXT:    @ Parent Loop BB11_8 Depth=2
 ; CHECK-NEXT:    @ => This Inner Loop Header: Depth=3
 ; CHECK-NEXT:    vadd.i32 q6, q5, q3
 ; CHECK-NEXT:    vldrh.s32 q7, [r1, q5, uxtw #1]
@@ -699,15 +743,15 @@ define dso_local void @arm_mat_mult_q15(i16* noalias nocapture readonly %A, i16*
 ; CHECK-NEXT:    vmul.i32 q5, q7, q5
 ; CHECK-NEXT:    vadd.i32 q4, q5, q4
 ; CHECK-NEXT:    vmov q5, q6
-; CHECK-NEXT:    le lr, .LBB10_11
+; CHECK-NEXT:    le lr, .LBB11_11
 ; CHECK-NEXT:  @ %bb.12: @ %middle.block
-; CHECK-NEXT:    @ in Loop: Header=BB10_8 Depth=2
+; CHECK-NEXT:    @ in Loop: Header=BB11_8 Depth=2
 ; CHECK-NEXT:    vaddv.u32 r10, q4
 ; CHECK-NEXT:    cmp r2, r12
 ; CHECK-NEXT:    mov r4, r2
-; CHECK-NEXT:    beq .LBB10_7
-; CHECK-NEXT:  .LBB10_13: @ %for.body8.us.us.preheader
-; CHECK-NEXT:    @ in Loop: Header=BB10_8 Depth=2
+; CHECK-NEXT:    beq .LBB11_7
+; CHECK-NEXT:  .LBB11_13: @ %for.body8.us.us.preheader
+; CHECK-NEXT:    @ in Loop: Header=BB11_8 Depth=2
 ; CHECK-NEXT:    mla r3, r7, r4, r8
 ; CHECK-NEXT:    add.w r0, r11, r4
 ; CHECK-NEXT:    ldr r7, [sp, #24] @ 4-byte Reload
@@ -715,24 +759,24 @@ define dso_local void @arm_mat_mult_q15(i16* noalias nocapture readonly %A, i16*
 ; CHECK-NEXT:    add.w r9, r7, r0, lsl #1
 ; CHECK-NEXT:    ldr r7, [sp, #136]
 ; CHECK-NEXT:    add.w r3, r1, r3, lsl #1
-; CHECK-NEXT:  .LBB10_14: @ %for.body8.us.us
-; CHECK-NEXT:    @ Parent Loop BB10_5 Depth=1
-; CHECK-NEXT:    @ Parent Loop BB10_8 Depth=2
+; CHECK-NEXT:  .LBB11_14: @ %for.body8.us.us
+; CHECK-NEXT:    @ Parent Loop BB11_5 Depth=1
+; CHECK-NEXT:    @ Parent Loop BB11_8 Depth=2
 ; CHECK-NEXT:    @ => This Inner Loop Header: Depth=3
 ; CHECK-NEXT:    ldrsh.w r4, [r3]
 ; CHECK-NEXT:    add r3, r6
 ; CHECK-NEXT:    ldrsh r0, [r9], #2
 ; CHECK-NEXT:    smlabb r10, r4, r0, r10
-; CHECK-NEXT:    le lr, .LBB10_14
-; CHECK-NEXT:    b .LBB10_7
-; CHECK-NEXT:  .LBB10_15: @ Parent Loop BB10_5 Depth=1
+; CHECK-NEXT:    le lr, .LBB11_14
+; CHECK-NEXT:    b .LBB11_7
+; CHECK-NEXT:  .LBB11_15: @ Parent Loop BB11_5 Depth=1
 ; CHECK-NEXT:    @ => This Inner Loop Header: Depth=2
 ; CHECK-NEXT:    vstrb.8 q0, [r3], #16
-; CHECK-NEXT:    letp lr, .LBB10_15
-; CHECK-NEXT:    b .LBB10_4
+; CHECK-NEXT:    letp lr, .LBB11_15
+; CHECK-NEXT:    b .LBB11_4
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  @ %bb.16:
-; CHECK-NEXT:  .LCPI10_0:
+; CHECK-NEXT:  .LCPI11_0:
 ; CHECK-NEXT:    .long 0 @ 0x0
 ; CHECK-NEXT:    .long 1 @ 0x1
 ; CHECK-NEXT:    .long 2 @ 0x2
@@ -856,7 +900,7 @@ define hidden arm_aapcs_vfpcc i32 @arm_depthwise_conv_s8(i8* nocapture readonly
 ; CHECK-NEXT:    sub sp, #8
 ; CHECK-NEXT:    ldrd r2, r7, [sp, #104]
 ; CHECK-NEXT:    add.w r8, r7, #10
-; CHECK-NEXT:    adr r7, .LCPI11_0
+; CHECK-NEXT:    adr r7, .LCPI12_0
 ; CHECK-NEXT:    ldr r1, [sp, #96]
 ; CHECK-NEXT:    vdup.32 q0, r2
 ; CHECK-NEXT:    vldrw.u32 q1, [r7]
@@ -865,36 +909,36 @@ define hidden arm_aapcs_vfpcc i32 @arm_depthwise_conv_s8(i8* nocapture readonly
 ; CHECK-NEXT:    movs r6, #11
 ; CHECK-NEXT:    vshl.i32 q0, q0, #2
 ; CHECK-NEXT:    movs r5, #0
-; CHECK-NEXT:  .LBB11_1: @ %for.body10.i
+; CHECK-NEXT:  .LBB12_1: @ %for.body10.i
 ; CHECK-NEXT:    @ =>This Loop Header: Depth=1
-; CHECK-NEXT:    @ Child Loop BB11_2 Depth 2
-; CHECK-NEXT:    @ Child Loop BB11_3 Depth 3
-; CHECK-NEXT:    @ Child Loop BB11_4 Depth 4
-; CHECK-NEXT:    @ Child Loop BB11_5 Depth 5
+; CHECK-NEXT:    @ Child Loop BB12_2 Depth 2
+; CHECK-NEXT:    @ Child Loop BB12_3 Depth 3
+; CHECK-NEXT:    @ Child Loop BB12_4 Depth 4
+; CHECK-NEXT:    @ Child Loop BB12_5 Depth 5
 ; CHECK-NEXT:    movs r7, #0
 ; CHECK-NEXT:    str r5, [sp, #4] @ 4-byte Spill
-; CHECK-NEXT:  .LBB11_2: @ %for.cond22.preheader.i
-; CHECK-NEXT:    @ Parent Loop BB11_1 Depth=1
+; CHECK-NEXT:  .LBB12_2: @ %for.cond22.preheader.i
+; CHECK-NEXT:    @ Parent Loop BB12_1 Depth=1
 ; CHECK-NEXT:    @ => This Loop Header: Depth=2
-; CHECK-NEXT:    @ Child Loop BB11_3 Depth 3
-; CHECK-NEXT:    @ Child Loop BB11_4 Depth 4
-; CHECK-NEXT:    @ Child Loop BB11_5 Depth 5
+; CHECK-NEXT:    @ Child Loop BB12_3 Depth 3
+; CHECK-NEXT:    @ Child Loop BB12_4 Depth 4
+; CHECK-NEXT:    @ Child Loop BB12_5 Depth 5
 ; CHECK-NEXT:    movs r5, #0
-; CHECK-NEXT:  .LBB11_3: @ %for.body27.i
-; CHECK-NEXT:    @ Parent Loop BB11_1 Depth=1
-; CHECK-NEXT:    @ Parent Loop BB11_2 Depth=2
+; CHECK-NEXT:  .LBB12_3: @ %for.body27.i
+; CHECK-NEXT:    @ Parent Loop BB12_1 Depth=1
+; CHECK-NEXT:    @ Parent Loop BB12_2 Depth=2
 ; CHECK-NEXT:    @ => This Loop Header: Depth=3
-; CHECK-NEXT:    @ Child Loop BB11_4 Depth 4
-; CHECK-NEXT:    @ Child Loop BB11_5 Depth 5
+; CHECK-NEXT:    @ Child Loop BB12_4 Depth 4
+; CHECK-NEXT:    @ Child Loop BB12_5 Depth 5
 ; CHECK-NEXT:    dls lr, r9
 ; CHECK-NEXT:    mov.w r12, #0
 ; CHECK-NEXT:    mov.w r11, #4
-; CHECK-NEXT:  .LBB11_4: @ %for.body78.us.i
-; CHECK-NEXT:    @ Parent Loop BB11_1 Depth=1
-; CHECK-NEXT:    @ Parent Loop BB11_2 Depth=2
-; CHECK-NEXT:    @ Parent Loop BB11_3 Depth=3
+; CHECK-NEXT:  .LBB12_4: @ %for.body78.us.i
+; CHECK-NEXT:    @ Parent Loop BB12_1 Depth=1
+; CHECK-NEXT:    @ Parent Loop BB12_2 Depth=2
+; CHECK-NEXT:    @ Parent Loop BB12_3 Depth=3
 ; CHECK-NEXT:    @ => This Loop Header: Depth=4
-; CHECK-NEXT:    @ Child Loop BB11_5 Depth 5
+; CHECK-NEXT:    @ Child Loop BB12_5 Depth 5
 ; CHECK-NEXT:    mul r4, r11, r6
 ; CHECK-NEXT:    vdup.32 q3, r5
 ; CHECK-NEXT:    vdup.32 q2, r7
@@ -904,11 +948,11 @@ define hidden arm_aapcs_vfpcc i32 @arm_depthwise_conv_s8(i8* nocapture readonly
 ; CHECK-NEXT:    vadd.i32 q4, q1, r4
 ; CHECK-NEXT:    mov r4, r8
 ; CHECK-NEXT:    vmla.u32 q2, q4, r2
-; CHECK-NEXT:  .LBB11_5: @ %vector.body
-; CHECK-NEXT:    @ Parent Loop BB11_1 Depth=1
-; CHECK-NEXT:    @ Parent Loop BB11_2 Depth=2
-; CHECK-NEXT:    @ Parent Loop BB11_3 Depth=3
-; CHECK-NEXT:    @ Parent Loop BB11_4 Depth=4
+; CHECK-NEXT:  .LBB12_5: @ %vector.body
+; CHECK-NEXT:    @ Parent Loop BB12_1 Depth=1
+; CHECK-NEXT:    @ Parent Loop BB12_2 Depth=2
+; CHECK-NEXT:    @ Parent Loop BB12_3 Depth=3
+; CHECK-NEXT:    @ Parent Loop BB12_4 Depth=4
 ; CHECK-NEXT:    @ => This Inner Loop Header: Depth=5
 ; CHECK-NEXT:    vldrb.s32 q6, [r0, q2]
 ; CHECK-NEXT:    vadd.i32 q5, q2, q0
@@ -919,34 +963,34 @@ define hidden arm_aapcs_vfpcc i32 @arm_depthwise_conv_s8(i8* nocapture readonly
 ; CHECK-NEXT:    vmov q3, q4
 ; CHECK-NEXT:    vmlava.u32 r12, q2, q6
 ; CHECK-NEXT:    vmov q2, q5
-; CHECK-NEXT:    bne .LBB11_5
+; CHECK-NEXT:    bne .LBB12_5
 ; CHECK-NEXT:  @ %bb.6: @ %middle.block
-; CHECK-NEXT:    @ in Loop: Header=BB11_4 Depth=4
+; CHECK-NEXT:    @ in Loop: Header=BB12_4 Depth=4
 ; CHECK-NEXT:    add.w r11, r11, #1
-; CHECK-NEXT:    le lr, .LBB11_4
+; CHECK-NEXT:    le lr, .LBB12_4
 ; CHECK-NEXT:  @ %bb.7: @ %for.cond.cleanup77.i
-; CHECK-NEXT:    @ in Loop: Header=BB11_3 Depth=3
+; CHECK-NEXT:    @ in Loop: Header=BB12_3 Depth=3
 ; CHECK-NEXT:    adds r5, #1
 ; CHECK-NEXT:    add.w r10, r10, #1
 ; CHECK-NEXT:    cmp r5, r2
-; CHECK-NEXT:    bne .LBB11_3
+; CHECK-NEXT:    bne .LBB12_3
 ; CHECK-NEXT:  @ %bb.8: @ %for.cond.cleanup26.i
-; CHECK-NEXT:    @ in Loop: Header=BB11_2 Depth=2
+; CHECK-NEXT:    @ in Loop: Header=BB12_2 Depth=2
 ; CHECK-NEXT:    adds r7, #1
 ; CHECK-NEXT:    cmp r7, r3
-; CHECK-NEXT:    bne .LBB11_2
+; CHECK-NEXT:    bne .LBB12_2
 ; CHECK-NEXT:  @ %bb.9: @ %for.cond.cleanup20.i
-; CHECK-NEXT:    @ in Loop: Header=BB11_1 Depth=1
+; CHECK-NEXT:    @ in Loop: Header=BB12_1 Depth=1
 ; CHECK-NEXT:    ldr r5, [sp, #4] @ 4-byte Reload
 ; CHECK-NEXT:    ldr r7, [sp, #148]
 ; CHECK-NEXT:    adds r5, #1
 ; CHECK-NEXT:    cmp r5, r7
 ; CHECK-NEXT:    it eq
 ; CHECK-NEXT:    moveq r5, #0
-; CHECK-NEXT:    b .LBB11_1
+; CHECK-NEXT:    b .LBB12_1
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  @ %bb.10:
-; CHECK-NEXT:  .LCPI11_0:
+; CHECK-NEXT:  .LCPI12_0:
 ; CHECK-NEXT:    .long 0 @ 0x0
 ; CHECK-NEXT:    .long 1 @ 0x1
 ; CHECK-NEXT:    .long 2 @ 0x2
@@ -1092,6 +1136,7 @@ if.end:                                           ; preds = %for.cond.cleanup9.i
 }
 
 declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
+declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>)
 declare <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>)
 declare <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32 immarg, <4 x i1>, <4 x i8>) #3
 

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-ptr-address.ll b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-ptr-address.ll
index 1410e9f85e505..fc2a696940add 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-ptr-address.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-ptr-address.ll
@@ -1,6 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o 2>/dev/null - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s
 
 define void @ptr_iv_v4i32(i32* noalias nocapture readonly %A, i32* noalias nocapture %B, i32 %y) {
 ; CHECK-LABEL: ptr_iv_v4i32:

diff  --git a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-tailpred.ll b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-tailpred.ll
index e885a7af5d86d..c3c94a598de22 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-tailpred.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-tailpred.ll
@@ -8,25 +8,19 @@ define dso_local void @mve_gather_qi_wb(i32* noalias nocapture readonly %A, i32*
 ; CHECK-NEXT:    push {r4, lr}
 ; CHECK-NEXT:    add.w r4, r0, r3, lsl #2
 ; CHECK-NEXT:    adr r0, .LCPI0_0
-; CHECK-NEXT:    vldrw.u32 q0, [r0]
-; CHECK-NEXT:    vmov.i32 q1, #0x0
-; CHECK-NEXT:    movw r12, #1250
-; CHECK-NEXT:    vadd.i32 q0, q0, r1
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    vadd.i32 q1, q1, r1
 ; CHECK-NEXT:    adds r1, r3, #4
-; CHECK-NEXT:    dls lr, r12
+; CHECK-NEXT:    dlstp.32 lr, r3
 ; CHECK-NEXT:  .LBB0_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vctp.32 r3
-; CHECK-NEXT:    vmov q2, q1
-; CHECK-NEXT:    vpstt
-; CHECK-NEXT:    vldrwt.u32 q1, [r4], #16
-; CHECK-NEXT:    vldrwt.u32 q3, [q0, #80]!
-; CHECK-NEXT:    subs r3, #4
-; CHECK-NEXT:    vmul.i32 q1, q3, q1
-; CHECK-NEXT:    vadd.i32 q1, q2, q1
-; CHECK-NEXT:    le lr, .LBB0_1
+; CHECK-NEXT:    vldrw.u32 q2, [r4], #16
+; CHECK-NEXT:    vldrw.u32 q3, [q1, #80]!
+; CHECK-NEXT:    vmul.i32 q2, q3, q2
+; CHECK-NEXT:    vadd.i32 q0, q0, q2
+; CHECK-NEXT:    letp lr, .LBB0_1
 ; CHECK-NEXT:  @ %bb.2: @ %middle.block
-; CHECK-NEXT:    vpsel q0, q1, q2
 ; CHECK-NEXT:    vaddv.u32 r0, q0
 ; CHECK-NEXT:    str.w r0, [r2, r1, lsl #2]
 ; CHECK-NEXT:    pop {r4, pc}
@@ -62,7 +56,7 @@ vector.body:                                      ; preds = %vector.body, %entry
   br i1 %8, label %middle.block, label %vector.body
 middle.block:                                     ; preds = %vector.body
   %9 = select <4 x i1> %active.lane.mask, <4 x i32> %7, <4 x i32> %vec.phi
-  %10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %9)
+  %10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %7)
   store i32 %10, i32* %arrayidx.us.us, align 4
   %inc21.us.us = add nuw i32 4, 1
   %exitcond81.not = icmp eq i32 %inc21.us.us, %n
@@ -80,27 +74,21 @@ define dso_local void @mve_gatherscatter_offset(i32* noalias nocapture readonly
 ; CHECK-NEXT:    vpush {d8, d9}
 ; CHECK-NEXT:    add.w r4, r0, r3, lsl #2
 ; CHECK-NEXT:    adr r0, .LCPI1_0
-; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
 ; CHECK-NEXT:    add.w r12, r3, #4
-; CHECK-NEXT:    vmov.i32 q2, #0x0
-; CHECK-NEXT:    vmov.i32 q0, #0x14
-; CHECK-NEXT:    movw lr, #1250
+; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    vmov.i32 q1, #0x14
+; CHECK-NEXT:    dlstp.32 lr, r3
 ; CHECK-NEXT:  .LBB1_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vctp.32 r3
-; CHECK-NEXT:    vmov q3, q2
-; CHECK-NEXT:    vpstt
-; CHECK-NEXT:    vldrwt.u32 q2, [r1, q1, uxtw #2]
-; CHECK-NEXT:    vldrwt.u32 q4, [r4], #16
-; CHECK-NEXT:    subs r3, #4
-; CHECK-NEXT:    vmul.i32 q2, q2, q4
-; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrwt.32 q2, [r1, q1, uxtw #2]
-; CHECK-NEXT:    vadd.i32 q1, q1, q0
-; CHECK-NEXT:    vadd.i32 q2, q3, q2
-; CHECK-NEXT:    le lr, .LBB1_1
+; CHECK-NEXT:    vldrw.u32 q3, [r1, q2, uxtw #2]
+; CHECK-NEXT:    vldrw.u32 q4, [r4], #16
+; CHECK-NEXT:    vmul.i32 q3, q3, q4
+; CHECK-NEXT:    vstrw.32 q3, [r1, q2, uxtw #2]
+; CHECK-NEXT:    vadd.i32 q2, q2, q1
+; CHECK-NEXT:    vadd.i32 q0, q0, q3
+; CHECK-NEXT:    letp lr, .LBB1_1
 ; CHECK-NEXT:  @ %bb.2: @ %middle.block
-; CHECK-NEXT:    vpsel q0, q2, q3
 ; CHECK-NEXT:    vaddv.u32 r0, q0
 ; CHECK-NEXT:    str.w r0, [r2, r12, lsl #2]
 ; CHECK-NEXT:    vpop {d8, d9}
@@ -138,7 +126,7 @@ vector.body:                                      ; preds = %vector.body, %entry
   br i1 %8, label %middle.block, label %vector.body
 middle.block:                                     ; preds = %vector.body
   %9 = select <4 x i1> %active.lane.mask, <4 x i32> %7, <4 x i32> %vec.phi
-  %10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %9)
+  %10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %7)
   store i32 %10, i32* %arrayidx.us.us, align 4
   %inc21.us.us = add nuw i32 4, 1
   %exitcond81.not = icmp eq i32 %inc21.us.us, %n
@@ -146,6 +134,7 @@ middle.block:                                     ; preds = %vector.body
 end:                                 ; preds = %middle.block
   ret void
 }
+
 define dso_local void @mve_scatter_qi(i32* noalias nocapture readonly %A, i32* noalias nocapture readonly %B, i32* noalias nocapture %C, i32 %n, i32 %m, i32 %l) {
 ; CHECK-LABEL: mve_scatter_qi:
 ; CHECK:       @ %bb.0: @ %entry
@@ -153,27 +142,25 @@ define dso_local void @mve_scatter_qi(i32* noalias nocapture readonly %A, i32* n
 ; CHECK-NEXT:    push {r4, lr}
 ; CHECK-NEXT:    add.w r4, r0, r3, lsl #2
 ; CHECK-NEXT:    adr r0, .LCPI2_0
-; CHECK-NEXT:    vldrw.u32 q0, [r0]
-; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    vmov.i32 q0, #0x0
 ; CHECK-NEXT:    movw r12, #1250
 ; CHECK-NEXT:    vmov.i32 q2, #0x3
-; CHECK-NEXT:    vadd.i32 q0, q0, r1
+; CHECK-NEXT:    vadd.i32 q1, q1, r1
 ; CHECK-NEXT:    adds r1, r3, #4
 ; CHECK-NEXT:    dls lr, r12
 ; CHECK-NEXT:  .LBB2_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vctp.32 r3
-; CHECK-NEXT:    vmov q3, q1
 ; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vldrwt.u32 q1, [r4], #16
+; CHECK-NEXT:    vldrwt.u32 q3, [r4], #16
+; CHECK-NEXT:    vmul.i32 q3, q3, q2
 ; CHECK-NEXT:    subs r3, #4
-; CHECK-NEXT:    vmul.i32 q1, q1, q2
+; CHECK-NEXT:    vadd.i32 q0, q0, q3
 ; CHECK-NEXT:    vpst
-; CHECK-NEXT:    vstrwt.32 q1, [q0, #80]!
-; CHECK-NEXT:    vadd.i32 q1, q3, q1
+; CHECK-NEXT:    vstrwt.32 q3, [q1, #80]!
 ; CHECK-NEXT:    le lr, .LBB2_1
 ; CHECK-NEXT:  @ %bb.2: @ %middle.block
-; CHECK-NEXT:    vpsel q0, q1, q3
 ; CHECK-NEXT:    vaddv.u32 r0, q0
 ; CHECK-NEXT:    str.w r0, [r2, r1, lsl #2]
 ; CHECK-NEXT:    pop {r4, pc}
@@ -209,7 +196,7 @@ vector.body:                                      ; preds = %vector.body, %entry
   br i1 %8, label %middle.block, label %vector.body
 middle.block:                                     ; preds = %vector.body
   %9 = select <4 x i1> %active.lane.mask, <4 x i32> %7, <4 x i32> %vec.phi
-  %10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %9)
+  %10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %7)
   store i32 %10, i32* %arrayidx.us.us, align 4
   %inc21.us.us = add nuw i32 4, 1
   %exitcond81.not = icmp eq i32 %inc21.us.us, %n

diff  --git a/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll b/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
index 47805d60e6129..312d853ef192a 100644
--- a/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-scatter-increment.ll
@@ -224,6 +224,103 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
   ret void
 }
 
+define arm_aapcs_vfpcc void @scatter_inc_v4i32_complex_opaque(<4 x i32> %data1, <4 x i32> %data2, <4 x i32> %data3, ptr %dst, i32 %n) {
+; CHECK-LABEL: scatter_inc_v4i32_complex_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB4_5
+; CHECK-NEXT:  @ %bb.1: @ %vector.ph.preheader
+; CHECK-NEXT:    adr r4, .LCPI4_2
+; CHECK-NEXT:    bic r2, r1, #3
+; CHECK-NEXT:    vldrw.u32 q3, [r4]
+; CHECK-NEXT:    sub.w r12, r2, #4
+; CHECK-NEXT:    adr.w lr, .LCPI4_1
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    vadd.i32 q3, q3, r0
+; CHECK-NEXT:    add.w r3, r3, r12, lsr #2
+; CHECK-NEXT:    vstrw.32 q3, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q3, [lr]
+; CHECK-NEXT:    adr.w r12, .LCPI4_0
+; CHECK-NEXT:    vadd.i32 q4, q3, r0
+; CHECK-NEXT:    vldrw.u32 q3, [r12]
+; CHECK-NEXT:    vadd.i32 q3, q3, r0
+; CHECK-NEXT:  .LBB4_2: @ %vector.ph
+; CHECK-NEXT:    @ =>This Loop Header: Depth=1
+; CHECK-NEXT:    @ Child Loop BB4_3 Depth 2
+; CHECK-NEXT:    dls lr, r3
+; CHECK-NEXT:    vmov q6, q4
+; CHECK-NEXT:    vldrw.u32 q7, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vmov q5, q3
+; CHECK-NEXT:  .LBB4_3: @ %vector.body
+; CHECK-NEXT:    @ Parent Loop BB4_2 Depth=1
+; CHECK-NEXT:    @ => This Inner Loop Header: Depth=2
+; CHECK-NEXT:    vstrw.32 q0, [q5, #48]!
+; CHECK-NEXT:    vstrw.32 q1, [q6, #48]!
+; CHECK-NEXT:    vstrw.32 q2, [q7, #48]!
+; CHECK-NEXT:    le lr, .LBB4_3
+; CHECK-NEXT:  @ %bb.4: @ %middle.block
+; CHECK-NEXT:    @ in Loop: Header=BB4_2 Depth=1
+; CHECK-NEXT:    cmp r2, r1
+; CHECK-NEXT:    bne .LBB4_2
+; CHECK-NEXT:  .LBB4_5: @ %for.cond.cleanup
+; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r4, pc}
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.6:
+; CHECK-NEXT:  .LCPI4_0:
+; CHECK-NEXT:    .long 4294967248 @ 0xffffffd0
+; CHECK-NEXT:    .long 4294967260 @ 0xffffffdc
+; CHECK-NEXT:    .long 4294967272 @ 0xffffffe8
+; CHECK-NEXT:    .long 4294967284 @ 0xfffffff4
+; CHECK-NEXT:  .LCPI4_1:
+; CHECK-NEXT:    .long 4294967252 @ 0xffffffd4
+; CHECK-NEXT:    .long 4294967264 @ 0xffffffe0
+; CHECK-NEXT:    .long 4294967276 @ 0xffffffec
+; CHECK-NEXT:    .long 4294967288 @ 0xfffffff8
+; CHECK-NEXT:  .LCPI4_2:
+; CHECK-NEXT:    .long 4294967256 @ 0xffffffd8
+; CHECK-NEXT:    .long 4294967268 @ 0xffffffe4
+; CHECK-NEXT:    .long 4294967280 @ 0xfffffff0
+; CHECK-NEXT:    .long 4294967292 @ 0xfffffffc
+entry:
+  %cmp22 = icmp sgt i32 %n, 0
+  br i1 %cmp22, label %vector.ph, label %for.cond.cleanup
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
+  %0 = mul nuw nsw <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3>
+  %1 = getelementptr inbounds i32, ptr %dst, <4 x i32> %0
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %data1, <4 x ptr> %1, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %2 = add nuw nsw <4 x i32> %0, <i32 1, i32 1, i32 1, i32 1>
+  %3 = getelementptr inbounds i32, ptr %dst, <4 x i32> %2
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %data2, <4 x ptr> %3, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %4 = add nuw nsw <4 x i32> %0, <i32 2, i32 2, i32 2, i32 2>
+  %5 = getelementptr inbounds i32, ptr %dst, <4 x i32> %4
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %data3, <4 x ptr> %5, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  %index.next = add i32 %index, 4
+  %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
+  %6 = icmp eq i32 %index.next, %n.vec
+  br i1 %6, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  ret void
+}
 
 declare void @llvm.masked.scatter.v8i8.v8p0i8(<8 x i8>, <8 x i8*>, i32, <8 x i1>)
 declare void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16>, <8 x i16*>, i32, <8 x i1>)
@@ -233,4 +330,5 @@ declare void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8>, <4 x i8*>, i32, <4 x i1>
 declare void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16>, <4 x i16*>, i32, <4 x i1>)
 declare void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half>, <4 x half*>, i32, <4 x i1>)
 declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, i32, <4 x i1>)
 declare void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float>, <4 x float*>, i32, <4 x i1>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-scatter-ind32-unscaled.ll b/llvm/test/CodeGen/Thumb2/mve-scatter-ind32-unscaled.ll
index 61b85e0247e6b..affb361febd68 100644
--- a/llvm/test/CodeGen/Thumb2/mve-scatter-ind32-unscaled.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-scatter-ind32-unscaled.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o 2>/dev/null - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s
 
 ; VLDRB.u32 Qd, [base, offs]
 define arm_aapcs_vfpcc void @ext_unscaled_i8_i32(i8* %base, <4 x i32>* %offptr, <4 x i32> %input) {

diff  --git a/llvm/test/CodeGen/Thumb2/mve-scatter-ptrs.ll b/llvm/test/CodeGen/Thumb2/mve-scatter-ptrs.ll
index 661b5834637a3..e8daac426b4cf 100644
--- a/llvm/test/CodeGen/Thumb2/mve-scatter-ptrs.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-scatter-ptrs.ll
@@ -242,6 +242,20 @@ entry:
   ret void
 }
 
+define arm_aapcs_vfpcc void @ptr_v4i16_trunc_opaque(<4 x i32> %v, ptr %offptr) {
+; CHECK-LABEL: ptr_v4i16_trunc_opaque:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    vstrh.32 q0, [r0, q1]
+; CHECK-NEXT:    bx lr
+entry:
+  %offs = load <4 x ptr>, ptr %offptr, align 4
+  %ext = trunc <4 x i32> %v to <4 x i16>
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %ext, <4 x ptr> %offs, i32 2, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+  ret void
+}
+
 define arm_aapcs_vfpcc void @ptr_v4i16_dup(i32 %v, <4 x i16*> %offs) {
 ; CHECK-LABEL: ptr_v4i16_dup:
 ; CHECK:       @ %bb.0: @ %entry
@@ -507,14 +521,14 @@ define void @foo_ptr_p_int32_t(i32* %dest, i32** %src, i32 %n) {
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:  .LBB19_1: @ %vector.body
+; CHECK-NEXT:  .LBB20_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r1], #16
 ; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    vptt.i32 ne, q0, zr
 ; CHECK-NEXT:    vldrwt.u32 q1, [r0], #16
 ; CHECK-NEXT:    vstrwt.32 q1, [q0]
-; CHECK-NEXT:    bne .LBB19_1
+; CHECK-NEXT:    bne .LBB20_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.end
 ; CHECK-NEXT:    bx lr
 entry:
@@ -547,14 +561,14 @@ define void @foo_ptr_p_float(float* %dest, float** %src, i32 %n) {
 ; CHECK-NEXT:    cmp r3, #1
 ; CHECK-NEXT:    it lt
 ; CHECK-NEXT:    bxlt lr
-; CHECK-NEXT:  .LBB20_1: @ %vector.body
+; CHECK-NEXT:  .LBB21_1: @ %vector.body
 ; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vldrw.u32 q0, [r1], #16
 ; CHECK-NEXT:    subs r2, #4
 ; CHECK-NEXT:    vptt.i32 ne, q0, zr
 ; CHECK-NEXT:    vldrwt.u32 q1, [r0], #16
 ; CHECK-NEXT:    vstrwt.32 q1, [q0]
-; CHECK-NEXT:    bne .LBB20_1
+; CHECK-NEXT:    bne .LBB21_1
 ; CHECK-NEXT:  @ %bb.2: @ %for.end
 ; CHECK-NEXT:    bx lr
 entry:
@@ -600,6 +614,7 @@ declare void @llvm.masked.scatter.v2i32.v2p0i32(<2 x i32>, <2 x i32*>, i32, <2 x
 declare void @llvm.masked.scatter.v2f32.v2p0f32(<2 x float>, <2 x float*>, i32, <2 x i1>)
 declare void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8>, <4 x i8*>, i32, <4 x i1>)
 declare void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16>, <4 x i16*>, i32, <4 x i1>)
+declare void @llvm.masked.scatter.v4i16.v4p0(<4 x i16>, <4 x ptr>, i32, <4 x i1>)
 declare void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half>, <4 x half*>, i32, <4 x i1>)
 declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)
 declare void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float>, <4 x float*>, i32, <4 x i1>)


        


More information about the llvm-commits mailing list