[llvm] 52272f2 - [AArch64] Update some sve-fixed-length test checks.

David Green via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 23 11:30:52 PDT 2021


Author: David Green
Date: 2021-09-23T19:30:45+01:00
New Revision: 52272f294ffc28691ec3d77582c3678273d25411

URL: https://github.com/llvm/llvm-project/commit/52272f294ffc28691ec3d77582c3678273d25411
DIFF: https://github.com/llvm/llvm-project/commit/52272f294ffc28691ec3d77582c3678273d25411.diff

LOG: [AArch64] Update some sve-fixed-length test checks.

Some of these test show very poor code generation. Updating the tests
to make the tests more maintainable and prevent problems from being
hidden behind badly written test checks. Also in some of them the check
lines were using incorrect prefixes.

These are not-quite auto-generated. They are generated with the normal
update scripts and then uninteresting checks are removed, which at least
makes the test _more_ maintainable without materially changing what they
are testing.

I have otherwise attempted to not alter what is tested.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/sve-fixed-length-float-compares.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-insert-vector-elt.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-float-compares.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-float-compares.ll
index 724690f8814d4..4899d313f6d1d 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-float-compares.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-float-compares.ll
@@ -1,19 +1,19 @@
-; RUN: llc -aarch64-sve-vector-bits-min=128  -asm-verbose=0 < %s | FileCheck %s -check-prefix=NO_SVE
-; RUN: llc -aarch64-sve-vector-bits-min=256  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
-; RUN: llc -aarch64-sve-vector-bits-min=384  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK
-; RUN: llc -aarch64-sve-vector-bits-min=512  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=640  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=768  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=896  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
+; RUN: llc -aarch64-sve-vector-bits-min=128  < %s | FileCheck %s -check-prefix=NO_SVE
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
+; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -check-prefixes=CHECK
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -27,8 +27,9 @@ target triple = "aarch64-unknown-linux-gnu"
 ; Don't use SVE for 64-bit vectors.
 define <4 x i16> @fcmp_oeq_v4f16(<4 x half> %op1, <4 x half> %op2) #0 {
 ; CHECK-LABEL: fcmp_oeq_v4f16:
-; CHECK: fcmeq v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmeq v0.4h, v0.4h, v1.4h
+; CHECK-NEXT:    ret
   %cmp = fcmp oeq <4 x half> %op1, %op2
   %sext = sext <4 x i1> %cmp to <4 x i16>
   ret <4 x i16> %sext
@@ -37,8 +38,9 @@ define <4 x i16> @fcmp_oeq_v4f16(<4 x half> %op1, <4 x half> %op2) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <8 x i16> @fcmp_oeq_v8f16(<8 x half> %op1, <8 x half> %op2) #0 {
 ; CHECK-LABEL: fcmp_oeq_v8f16:
-; CHECK: fcmeq v0.8h, v0.8h, v1.8h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmeq v0.8h, v0.8h, v1.8h
+; CHECK-NEXT:    ret
   %cmp = fcmp oeq <8 x half> %op1, %op2
   %sext = sext <8 x i1> %cmp to <8 x i16>
   ret <8 x i16> %sext
@@ -46,13 +48,14 @@ define <8 x i16> @fcmp_oeq_v8f16(<8 x half> %op1, <8 x half> %op2) #0 {
 
 define void @fcmp_oeq_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_oeq_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp oeq <16 x half> %op1, %op2
@@ -62,29 +65,32 @@ define void @fcmp_oeq_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #
 }
 
 define void @fcmp_oeq_v32f16(<32 x half>* %a, <32 x half>* %b, <32 x i16>* %c) #0 {
-; CHECK-LABEL: fcmp_oeq_v32f16:
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32
-; VBITS_GE_512-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_512-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; VBITS_GE_512-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_512-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #16
-; VBITS_EQ_256-DAG: ld1h { [[OP1_LO:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1h { [[OP1_HI:z[0-9]+]].h }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #1]
-; VBITS_EQ_256-DAG: ld1h { [[OP2_LO:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1h { [[OP2_HI:z[0-9]+]].h }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #1]
-; VBITS_EQ_256-DAG: fcmeq [[CMP_HI:p[0-9]+]].h, [[PG]]/z, [[OP1_HI]].h, [[OP2_HI]].h
-; VBITS_EQ_256-DAG: mov [[SEXT_HI:z[0-9]+]].h, [[CMP_HI]]/z, #-1
-; VBITS_EQ_256-DAG: fcmeq [[CMP_LO:p[0-9]+]].h, [[PG]]/z, [[OP1_LO]].h, [[OP2_LO]].h
-; VBITS_EQ_256-DAG: mov [[SEXT_LO:z[0-9]+]].h, [[CMP_LO]]/z, #-1
-; VBITS_EQ_256-DAG: st1h { [[SEXT_LO]].h }, [[PG]], [x2]
-; VBITS_EQ_256-DAG: st1h { [[SEXT_HI]].h }, [[PG]], [x2, x[[NUMELTS]], lsl #1]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: fcmp_oeq_v32f16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #16
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    fcmeq p1.h, p0/z, z0.h, z2.h
+; VBITS_EQ_256-NEXT:    fcmeq p2.h, p0/z, z1.h, z3.h
+; VBITS_EQ_256-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_EQ_256-NEXT:    mov z1.h, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x2, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    st1h { z1.h }, p0, [x2]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: fcmp_oeq_v32f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    fcmeq p1.h, p0/z, z0.h, z1.h
+; VBITS_GE_512-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x2]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <32 x half>, <32 x half>* %a
   %op2 = load <32 x half>, <32 x half>* %b
   %cmp = fcmp oeq <32 x half> %op1, %op2
@@ -94,14 +100,15 @@ define void @fcmp_oeq_v32f16(<32 x half>* %a, <32 x half>* %b, <32 x i16>* %c) #
 }
 
 define void @fcmp_oeq_v64f16(<64 x half>* %a, <64 x half>* %b, <64 x i16>* %c) #0 {
-; CHECK-LABEL: fcmp_oeq_v64f16:
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].h, vl64
-; VBITS_GE_1024-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_1024-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; VBITS_GE_1024-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_1024-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: fcmp_oeq_v64f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    fcmeq p1.h, p0/z, z0.h, z1.h
+; VBITS_GE_1024-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x2]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <64 x half>, <64 x half>* %a
   %op2 = load <64 x half>, <64 x half>* %b
   %cmp = fcmp oeq <64 x half> %op1, %op2
@@ -111,14 +118,15 @@ define void @fcmp_oeq_v64f16(<64 x half>* %a, <64 x half>* %b, <64 x i16>* %c) #
 }
 
 define void @fcmp_oeq_v128f16(<128 x half>* %a, <128 x half>* %b, <128 x i16>* %c) #0 {
-; CHECK-LABEL: fcmp_oeq_v128f16:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128
-; VBITS_GE_2048-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; VBITS_GE_2048-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: fcmp_oeq_v128f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p1.h, p0/z, z0.h, z1.h
+; VBITS_GE_2048-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x2]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <128 x half>, <128 x half>* %a
   %op2 = load <128 x half>, <128 x half>* %b
   %cmp = fcmp oeq <128 x half> %op1, %op2
@@ -130,8 +138,9 @@ define void @fcmp_oeq_v128f16(<128 x half>* %a, <128 x half>* %b, <128 x i16>* %
 ; Don't use SVE for 64-bit vectors.
 define <2 x i32> @fcmp_oeq_v2f32(<2 x float> %op1, <2 x float> %op2) #0 {
 ; CHECK-LABEL: fcmp_oeq_v2f32:
-; CHECK: fcmeq v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmeq v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    ret
   %cmp = fcmp oeq <2 x float> %op1, %op2
   %sext = sext <2 x i1> %cmp to <2 x i32>
   ret <2 x i32> %sext
@@ -140,8 +149,9 @@ define <2 x i32> @fcmp_oeq_v2f32(<2 x float> %op1, <2 x float> %op2) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <4 x i32> @fcmp_oeq_v4f32(<4 x float> %op1, <4 x float> %op2) #0 {
 ; CHECK-LABEL: fcmp_oeq_v4f32:
-; CHECK: fcmeq v0.4s, v0.4s, v1.4s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmeq v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    ret
   %cmp = fcmp oeq <4 x float> %op1, %op2
   %sext = sext <4 x i1> %cmp to <4 x i32>
   ret <4 x i32> %sext
@@ -149,13 +159,14 @@ define <4 x i32> @fcmp_oeq_v4f32(<4 x float> %op1, <4 x float> %op2) #0 {
 
 define void @fcmp_oeq_v8f32(<8 x float>* %a, <8 x float>* %b, <8 x i32>* %c) #0 {
 ; CHECK-LABEL: fcmp_oeq_v8f32:
-; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
-; CHECK-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG]]/z, [[OP1]].s, [[OP2]].s
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].s, [[CMP]]/z, #-1
-; CHECK-NEXT: st1w { [[SEXT]].s }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq p1.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1w { z0.s }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <8 x float>, <8 x float>* %a
   %op2 = load <8 x float>, <8 x float>* %b
   %cmp = fcmp oeq <8 x float> %op1, %op2
@@ -165,29 +176,32 @@ define void @fcmp_oeq_v8f32(<8 x float>* %a, <8 x float>* %b, <8 x i32>* %c) #0
 }
 
 define void @fcmp_oeq_v16f32(<16 x float>* %a, <16 x float>* %b, <16 x i32>* %c) #0 {
-; CHECK-LABEL: fcmp_oeq_v16f32:
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16
-; VBITS_GE_512-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_512-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG]]/z, [[OP1]].s, [[OP2]].s
-; VBITS_GE_512-NEXT: mov [[SEXT:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_512-NEXT: st1w { [[SEXT]].s }, [[PG]], [x2]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #8
-; VBITS_EQ_256-DAG: ld1w { [[OP1_LO:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1w { [[OP1_HI:z[0-9]+]].s }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-DAG: ld1w { [[OP2_LO:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1w { [[OP2_HI:z[0-9]+]].s }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-DAG: fcmeq [[CMP_HI:p[0-9]+]].s, [[PG]]/z, [[OP1_HI]].s, [[OP2_HI]].s
-; VBITS_EQ_256-DAG: mov [[SEXT_HI:z[0-9]+]].s, [[CMP_HI]]/z, #-1
-; VBITS_EQ_256-DAG: fcmeq [[CMP_LO:p[0-9]+]].s, [[PG]]/z, [[OP1_LO]].s, [[OP2_LO]].s
-; VBITS_EQ_256-DAG: mov [[SEXT_LO:z[0-9]+]].s, [[CMP_LO]]/z, #-1
-; VBITS_EQ_256-DAG: st1w { [[SEXT_LO]].s }, [[PG]], [x2]
-; VBITS_EQ_256-DAG: st1w { [[SEXT_HI]].s }, [[PG]], [x2, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: fcmp_oeq_v16f32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #8
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    fcmeq p1.s, p0/z, z0.s, z2.s
+; VBITS_EQ_256-NEXT:    fcmeq p2.s, p0/z, z1.s, z3.s
+; VBITS_EQ_256-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_EQ_256-NEXT:    mov z1.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x2, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x2]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: fcmp_oeq_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    fcmeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_512-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x2]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <16 x float>, <16 x float>* %a
   %op2 = load <16 x float>, <16 x float>* %b
   %cmp = fcmp oeq <16 x float> %op1, %op2
@@ -197,14 +211,15 @@ define void @fcmp_oeq_v16f32(<16 x float>* %a, <16 x float>* %b, <16 x i32>* %c)
 }
 
 define void @fcmp_oeq_v32f32(<32 x float>* %a, <32 x float>* %b, <32 x i32>* %c) #0 {
-; CHECK-LABEL: fcmp_oeq_v32f32:
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].s, vl32
-; VBITS_GE_1024-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_1024-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG]]/z, [[OP1]].s, [[OP2]].s
-; VBITS_GE_1024-NEXT: mov [[SEXT:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_1024-NEXT: st1w { [[SEXT]].s }, [[PG]], [x2]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: fcmp_oeq_v32f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    fcmeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_1024-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x2]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <32 x float>, <32 x float>* %a
   %op2 = load <32 x float>, <32 x float>* %b
   %cmp = fcmp oeq <32 x float> %op1, %op2
@@ -214,14 +229,15 @@ define void @fcmp_oeq_v32f32(<32 x float>* %a, <32 x float>* %b, <32 x i32>* %c)
 }
 
 define void @fcmp_oeq_v64f32(<64 x float>* %a, <64 x float>* %b, <64 x i32>* %c) #0 {
-; CHECK-LABEL: fcmp_oeq_v64f32:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl64
-; VBITS_GE_2048-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG]]/z, [[OP1]].s, [[OP2]].s
-; VBITS_GE_2048-NEXT: mov [[SEXT:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: st1w { [[SEXT]].s }, [[PG]], [x2]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: fcmp_oeq_v64f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_2048-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x2]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <64 x float>, <64 x float>* %a
   %op2 = load <64 x float>, <64 x float>* %b
   %cmp = fcmp oeq <64 x float> %op1, %op2
@@ -233,8 +249,9 @@ define void @fcmp_oeq_v64f32(<64 x float>* %a, <64 x float>* %b, <64 x i32>* %c)
 ; Don't use SVE for 64-bit vectors.
 define <1 x i64> @fcmp_oeq_v1f64(<1 x double> %op1, <1 x double> %op2) #0 {
 ; CHECK-LABEL: fcmp_oeq_v1f64:
-; CHECK: fcmeq d0, d0, d1
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmeq d0, d0, d1
+; CHECK-NEXT:    ret
   %cmp = fcmp oeq <1 x double> %op1, %op2
   %sext = sext <1 x i1> %cmp to <1 x i64>
   ret <1 x i64> %sext
@@ -243,8 +260,9 @@ define <1 x i64> @fcmp_oeq_v1f64(<1 x double> %op1, <1 x double> %op2) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <2 x i64> @fcmp_oeq_v2f64(<2 x double> %op1, <2 x double> %op2) #0 {
 ; CHECK-LABEL: fcmp_oeq_v2f64:
-; CHECK: fcmeq v0.2d, v0.2d, v1.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmeq v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    ret
   %cmp = fcmp oeq <2 x double> %op1, %op2
   %sext = sext <2 x i1> %cmp to <2 x i64>
   ret <2 x i64> %sext
@@ -252,13 +270,14 @@ define <2 x i64> @fcmp_oeq_v2f64(<2 x double> %op1, <2 x double> %op2) #0 {
 
 define void @fcmp_oeq_v4f64(<4 x double>* %a, <4 x double>* %b, <4 x i64>* %c) #0 {
 ; CHECK-LABEL: fcmp_oeq_v4f64:
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmeq [[CMP:p[0-9]+]].d, [[PG]]/z, [[OP1]].d, [[OP2]].d
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].d, [[CMP]]/z, #-1
-; CHECK-NEXT: st1d { [[SEXT]].d }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq p1.d, p0/z, z0.d, z1.d
+; CHECK-NEXT:    mov z0.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1d { z0.d }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <4 x double>, <4 x double>* %a
   %op2 = load <4 x double>, <4 x double>* %b
   %cmp = fcmp oeq <4 x double> %op1, %op2
@@ -268,29 +287,32 @@ define void @fcmp_oeq_v4f64(<4 x double>* %a, <4 x double>* %b, <4 x i64>* %c) #
 }
 
 define void @fcmp_oeq_v8f64(<8 x double>* %a, <8 x double>* %b, <8 x i64>* %c) #0 {
-; CHECK-LABEL: fcmp_oeq_v8f64:
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_512-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: fcmeq [[CMP:p[0-9]+]].d, [[PG]]/z, [[OP1]].d, [[OP2]].d
-; VBITS_GE_512-NEXT: mov [[SEXT:z[0-9]+]].d, [[CMP]]/z, #-1
-; VBITS_GE_512-NEXT: st1d { [[SEXT]].d }, [[PG]], [x2]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[OP1_LO:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1d { [[OP1_HI:z[0-9]+]].d }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: ld1d { [[OP2_LO:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1d { [[OP2_HI:z[0-9]+]].d }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: fcmeq [[CMP_HI:p[0-9]+]].d, [[PG]]/z, [[OP1_HI]].d, [[OP2_HI]].d
-; VBITS_EQ_256-DAG: mov [[SEXT_HI:z[0-9]+]].d, [[CMP_HI]]/z, #-1
-; VBITS_EQ_256-DAG: fcmeq [[CMP_LO:p[0-9]+]].d, [[PG]]/z, [[OP1_LO]].d, [[OP2_LO]].d
-; VBITS_EQ_256-DAG: mov [[SEXT_LO:z[0-9]+]].d, [[CMP_LO]]/z, #-1
-; VBITS_EQ_256-DAG: st1d { [[SEXT_LO]].d }, [[PG]], [x2]
-; VBITS_EQ_256-DAG: st1d { [[SEXT_HI]].d }, [[PG]], [x2, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: fcmp_oeq_v8f64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    fcmeq p1.d, p0/z, z0.d, z2.d
+; VBITS_EQ_256-NEXT:    fcmeq p2.d, p0/z, z1.d, z3.d
+; VBITS_EQ_256-NEXT:    mov z0.d, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_EQ_256-NEXT:    mov z1.d, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x2, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x2]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: fcmp_oeq_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    fcmeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_512-NEXT:    mov z0.d, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x2]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x double>, <8 x double>* %a
   %op2 = load <8 x double>, <8 x double>* %b
   %cmp = fcmp oeq <8 x double> %op1, %op2
@@ -300,14 +322,15 @@ define void @fcmp_oeq_v8f64(<8 x double>* %a, <8 x double>* %b, <8 x i64>* %c) #
 }
 
 define void @fcmp_oeq_v16f64(<16 x double>* %a, <16 x double>* %b, <16 x i64>* %c) #0 {
-; CHECK-LABEL: fcmp_oeq_v16f64:
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl16
-; VBITS_GE_1024-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_1024-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: fcmeq [[CMP:p[0-9]+]].d, [[PG]]/z, [[OP1]].d, [[OP2]].d
-; VBITS_GE_1024-NEXT: mov [[SEXT:z[0-9]+]].d, [[CMP]]/z, #-1
-; VBITS_GE_1024-NEXT: st1d { [[SEXT]].d }, [[PG]], [x2]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: fcmp_oeq_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    fcmeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_1024-NEXT:    mov z0.d, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x2]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x double>, <16 x double>* %a
   %op2 = load <16 x double>, <16 x double>* %b
   %cmp = fcmp oeq <16 x double> %op1, %op2
@@ -317,14 +340,15 @@ define void @fcmp_oeq_v16f64(<16 x double>* %a, <16 x double>* %b, <16 x i64>* %
 }
 
 define void @fcmp_oeq_v32f64(<32 x double>* %a, <32 x double>* %b, <32 x i64>* %c) #0 {
-; CHECK-LABEL: fcmp_oeq_v32f64:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32
-; VBITS_GE_2048-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].d, [[PG]]/z, [[OP1]].d, [[OP2]].d
-; VBITS_GE_2048-NEXT: mov [[SEXT:z[0-9]+]].d, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: st1d { [[SEXT]].d }, [[PG]], [x2]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: fcmp_oeq_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_2048-NEXT:    mov z0.d, p1/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x2]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x double>, <32 x double>* %a
   %op2 = load <32 x double>, <32 x double>* %b
   %cmp = fcmp oeq <32 x double> %op1, %op2
@@ -339,16 +363,17 @@ define void @fcmp_oeq_v32f64(<32 x double>* %a, <32 x double>* %b, <32 x i64>* %
 
 define void @fcmp_ueq_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_ueq_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov [[INV:w[0-9]+]], #65535
-; CHECK-NEXT: fcmne [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: mov [[TMP:z[0-9]+]].h, [[INV]]
-; CHECK-NEXT: eor [[SEXT]].d, [[SEXT]].d, [[TMP]].d
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    mov w8, #65535
+; CHECK-NEXT:    fcmne p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.h, w8
+; CHECK-NEXT:    eor z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp ueq <16 x half> %op1, %op2
@@ -363,13 +388,14 @@ define void @fcmp_ueq_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #
 
 define void @fcmp_one_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_one_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmne [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    fcmne p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp one <16 x half> %op1, %op2
@@ -384,16 +410,17 @@ define void @fcmp_one_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #
 
 define void @fcmp_une_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_une_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov [[INV:w[0-9]+]], #65535
-; CHECK-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: mov [[TMP:z[0-9]+]].h, [[INV]]
-; CHECK-NEXT: eor [[SEXT]].d, [[SEXT]].d, [[TMP]].d
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    mov w8, #65535
+; CHECK-NEXT:    fcmeq p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.h, w8
+; CHECK-NEXT:    eor z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp une <16 x half> %op1, %op2
@@ -408,13 +435,14 @@ define void @fcmp_une_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #
 
 define void @fcmp_ogt_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_ogt_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmgt [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    fcmgt p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp ogt <16 x half> %op1, %op2
@@ -429,16 +457,17 @@ define void @fcmp_ogt_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #
 
 define void @fcmp_ugt_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_ugt_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov [[INV:w[0-9]+]], #65535
-; CHECK-NEXT: fcmge [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP2]].h, [[OP1]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: mov [[TMP:z[0-9]+]].h, [[INV]]
-; CHECK-NEXT: eor [[SEXT]].d, [[SEXT]].d, [[TMP]].d
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    mov w8, #65535
+; CHECK-NEXT:    fcmge p1.h, p0/z, z1.h, z0.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.h, w8
+; CHECK-NEXT:    eor z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp ugt <16 x half> %op1, %op2
@@ -453,13 +482,14 @@ define void @fcmp_ugt_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #
 
 define void @fcmp_olt_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_olt_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmgt [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP2]].h, [[OP1]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    fcmgt p1.h, p0/z, z1.h, z0.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp olt <16 x half> %op1, %op2
@@ -474,16 +504,17 @@ define void @fcmp_olt_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #
 
 define void @fcmp_ult_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_ult_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov [[INV:w[0-9]+]], #65535
-; CHECK-NEXT: fcmge [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: mov [[TMP:z[0-9]+]].h, [[INV]]
-; CHECK-NEXT: eor [[SEXT]].d, [[SEXT]].d, [[TMP]].d
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    mov w8, #65535
+; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.h, w8
+; CHECK-NEXT:    eor z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp ult <16 x half> %op1, %op2
@@ -498,13 +529,14 @@ define void @fcmp_ult_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #
 
 define void @fcmp_oge_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_oge_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmge [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp oge <16 x half> %op1, %op2
@@ -519,16 +551,17 @@ define void @fcmp_oge_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #
 
 define void @fcmp_uge_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_uge_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov [[INV:w[0-9]+]], #65535
-; CHECK-NEXT: fcmgt [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP2]].h, [[OP1]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: mov [[TMP:z[0-9]+]].h, [[INV]]
-; CHECK-NEXT: eor [[SEXT]].d, [[SEXT]].d, [[TMP]].d
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    mov w8, #65535
+; CHECK-NEXT:    fcmgt p1.h, p0/z, z1.h, z0.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.h, w8
+; CHECK-NEXT:    eor z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp uge <16 x half> %op1, %op2
@@ -543,13 +576,14 @@ define void @fcmp_uge_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #
 
 define void @fcmp_ole_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_ole_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmge [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP2]].h, [[OP1]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    fcmge p1.h, p0/z, z1.h, z0.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp ole <16 x half> %op1, %op2
@@ -564,16 +598,17 @@ define void @fcmp_ole_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #
 
 define void @fcmp_ule_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_ule_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov [[INV:w[0-9]+]], #65535
-; CHECK-NEXT: fcmgt [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: mov [[TMP:z[0-9]+]].h, [[INV]]
-; CHECK-NEXT: eor [[SEXT]].d, [[SEXT]].d, [[TMP]].d
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    mov w8, #65535
+; CHECK-NEXT:    fcmgt p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.h, w8
+; CHECK-NEXT:    eor z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp ule <16 x half> %op1, %op2
@@ -588,13 +623,14 @@ define void @fcmp_ule_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #
 
 define void @fcmp_uno_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_uno_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmuo [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    fcmuo p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp uno <16 x half> %op1, %op2
@@ -609,16 +645,17 @@ define void @fcmp_uno_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #
 
 define void @fcmp_ord_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_ord_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov [[INV:w[0-9]+]], #65535
-; CHECK-NEXT: fcmuo [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: mov [[TMP:z[0-9]+]].h, [[INV]]
-; CHECK-NEXT: eor [[SEXT]].d, [[SEXT]].d, [[TMP]].d
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    mov w8, #65535
+; CHECK-NEXT:    fcmuo p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.h, w8
+; CHECK-NEXT:    eor z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp ord <16 x half> %op1, %op2
@@ -633,13 +670,14 @@ define void @fcmp_ord_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #
 
 define void @fcmp_eq_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_eq_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp fast oeq <16 x half> %op1, %op2
@@ -654,13 +692,14 @@ define void @fcmp_eq_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0
 
 define void @fcmp_ne_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_ne_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmne [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    fcmne p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp fast one <16 x half> %op1, %op2
@@ -675,13 +714,14 @@ define void @fcmp_ne_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0
 
 define void @fcmp_gt_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_gt_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmgt [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    fcmgt p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp fast ogt <16 x half> %op1, %op2
@@ -696,13 +736,14 @@ define void @fcmp_gt_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0
 
 define void @fcmp_lt_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_lt_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmgt [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP2]].h, [[OP1]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    fcmgt p1.h, p0/z, z1.h, z0.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp fast olt <16 x half> %op1, %op2
@@ -717,13 +758,14 @@ define void @fcmp_lt_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0
 
 define void @fcmp_ge_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_ge_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmge [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z1.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp fast oge <16 x half> %op1, %op2
@@ -738,13 +780,14 @@ define void @fcmp_ge_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0
 
 define void @fcmp_le_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i16>* %c) #0 {
 ; CHECK-LABEL: fcmp_le_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmge [[CMP:p[0-9]+]].h, [[PG]]/z, [[OP2]].h, [[OP1]].h
-; CHECK-NEXT: mov [[SEXT:z[0-9]+]].h, [[CMP]]/z, #-1
-; CHECK-NEXT: st1h { [[SEXT]].h }, [[PG]], [x2]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    fcmge p1.h, p0/z, z1.h, z0.h
+; CHECK-NEXT:    mov z0.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    st1h { z0.h }, p0, [x2]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %cmp = fcmp fast ole <16 x half> %op1, %op2

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
index 10e51e11253b7..e898f38c7ade6 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
@@ -1,19 +1,19 @@
-; RUN: llc -aarch64-sve-vector-bits-min=128  -asm-verbose=0 < %s | FileCheck %s -check-prefix=NO_SVE
-; RUN: llc -aarch64-sve-vector-bits-min=256  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
-; RUN: llc -aarch64-sve-vector-bits-min=384  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK
-; RUN: llc -aarch64-sve-vector-bits-min=512  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=640  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=768  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=896  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
+; RUN: llc -aarch64-sve-vector-bits-min=128  < %s | FileCheck %s -check-prefix=NO_SVE
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
+; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -check-prefixes=CHECK
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -27,8 +27,10 @@ target triple = "aarch64-unknown-linux-gnu"
 ; Don't use SVE for 64-bit vectors.
 define <2 x float> @fcvt_v2f16_v2f32(<2 x half> %op1) #0 {
 ; CHECK-LABEL: fcvt_v2f16_v2f32:
-; CHECK: fcvtl v0.4s, v0.4h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %res = fpext <2 x half> %op1 to <2 x float>
   ret <2 x float> %res
 }
@@ -36,20 +38,22 @@ define <2 x float> @fcvt_v2f16_v2f32(<2 x half> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <4 x float> @fcvt_v4f16_v4f32(<4 x half> %op1) #0 {
 ; CHECK-LABEL: fcvt_v4f16_v4f32:
-; CHECK: fcvtl v0.4s, v0.4h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NEXT:    ret
   %res = fpext <4 x half> %op1 to <4 x float>
   ret <4 x float> %res
 }
 
 define void @fcvt_v8f16_v8f32(<8 x half>* %a, <8 x float>* %b) #0 {
 ; CHECK-LABEL: fcvt_v8f16_v8f32:
-; CHECK: ldr q[[OP:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].s, vl8
-; CHECK-NEXT: uunpklo [[UPK:z[0-9]+]].s, z[[OP]].h
-; CHECK-NEXT: fcvt [[RES:z[0-9]+]].s, [[PG]]/m, [[UPK]].h
-; CHECK-NEXT: st1w { [[RES]].s }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    fcvt z0.s, p0/m, z0.h
+; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <8 x half>, <8 x half>* %a
   %res = fpext <8 x half> %op1 to <8 x float>
   store <8 x float> %res, <8 x float>* %b
@@ -57,28 +61,42 @@ define void @fcvt_v8f16_v8f32(<8 x half>* %a, <8 x float>* %b) #0 {
 }
 
 define void @fcvt_v16f16_v16f32(<16 x half>* %a, <16 x float>* %b) #0 {
-; CHECK-LABEL: fcvt_v16f16_v16f32:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].h, vl16
-; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_512-NEXT: fcvt [[RES:z[0-9]+]].s, [[PG2]]/m, [[UPK]].h
-; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG1]], [x1]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation - fixed type extract_subvector codegen is poor currently.
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].h, vl16
-; VBITS_EQ_256-DAG: ld1h { [[VEC:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_EQ_256-DAG: st1h { [[VEC:z[0-9]+]].h }, [[PG1]], [x8]
-; VBITS_EQ_256-DAG: ldp q[[LO:[0-9]+]], q[[HI:[0-9]+]], [sp]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #8
-; VBITS_EQ_256-DAG: uunpklo [[UPK_LO:z[0-9]+]].s, z[[LO]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK_HI:z[0-9]+]].s, z[[HI]].h
-; VBITS_EQ_256-DAG: fcvt [[RES_LO:z[0-9]+]].s, [[PG2]]/m, [[UPK_LO]].h
-; VBITS_EQ_256-DAG: fcvt [[RES_HI:z[0-9]+]].s, [[PG2]]/m, [[UPK_HI]].h
-; VBITS_EQ_256-DAG: st1w { [[RES_LO]].s }, [[PG2]], [x1]
-; VBITS_EQ_256-DAG: st1w { [[RES_HI]].s }, [[PG2]], [x1, x[[NUMELTS]], lsl #2]
+; VBITS_EQ_256-LABEL: fcvt_v16f16_v16f32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_EQ_256-NEXT:    sub x9, sp, #48
+; VBITS_EQ_256-NEXT:    mov x29, sp
+; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
+; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
+; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    mov x8, sp
+; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x8]
+; VBITS_EQ_256-NEXT:    ldp q0, q1, [sp]
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    mov x8, #8
+; VBITS_EQ_256-NEXT:    uunpklo z0.s, z0.h
+; VBITS_EQ_256-NEXT:    uunpklo z1.s, z1.h
+; VBITS_EQ_256-NEXT:    fcvt z0.s, p0/m, z0.h
+; VBITS_EQ_256-NEXT:    fcvt z1.s, p0/m, z1.h
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_EQ_256-NEXT:    mov sp, x29
+; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: fcvt_v16f16_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    fcvt z0.s, p0/m, z0.h
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %res = fpext <16 x half> %op1 to <16 x float>
   store <16 x float> %res, <16 x float>* %b
@@ -86,14 +104,15 @@ define void @fcvt_v16f16_v16f32(<16 x half>* %a, <16 x float>* %b) #0 {
 }
 
 define void @fcvt_v32f16_v32f32(<32 x half>* %a, <32 x float>* %b) #0 {
-; CHECK-LABEL: fcvt_v32f16_v32f32:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].h, vl32
-; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].s, vl32
-; VBITS_GE_1024-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_1024-NEXT: fcvt [[RES:z[0-9]+]].s, [[PG2]]/m, [[UPK]].h
-; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG1]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: fcvt_v32f16_v32f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_1024-NEXT:    fcvt z0.s, p0/m, z0.h
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <32 x half>, <32 x half>* %a
   %res = fpext <32 x half> %op1 to <32 x float>
   store <32 x float> %res, <32 x float>* %b
@@ -101,14 +120,15 @@ define void @fcvt_v32f16_v32f32(<32 x half>* %a, <32 x float>* %b) #0 {
 }
 
 define void @fcvt_v64f16_v64f32(<64 x half>* %a, <64 x float>* %b) #0 {
-; CHECK-LABEL: fcvt_v64f16_v64f32:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].h, vl64
-; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].s, vl64
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_2048-NEXT: fcvt [[RES:z[0-9]+]].s, [[PG2]]/m, [[UPK]].h
-; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG1]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: fcvt_v64f16_v64f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    fcvt z0.s, p0/m, z0.h
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <64 x half>, <64 x half>* %a
   %res = fpext <64 x half> %op1 to <64 x float>
   store <64 x float> %res, <64 x float>* %b
@@ -122,8 +142,9 @@ define void @fcvt_v64f16_v64f32(<64 x half>* %a, <64 x float>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <1 x double> @fcvt_v1f16_v1f64(<1 x half> %op1) #0 {
 ; CHECK-LABEL: fcvt_v1f16_v1f64:
-; CHECK: fcvt d0, h0
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    ret
   %res = fpext <1 x half> %op1 to <1 x double>
   ret <1 x double> %res
 }
@@ -131,24 +152,28 @@ define <1 x double> @fcvt_v1f16_v1f64(<1 x half> %op1) #0 {
 ; v2f16 is not legal for NEON, so use SVE
 define <2 x double> @fcvt_v2f16_v2f64(<2 x half> %op1) #0 {
 ; CHECK-LABEL: fcvt_v2f16_v2f64:
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z0.h
-; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; CHECK-NEXT: fcvt z0.d, [[PG]]/m, [[UPK2]].h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    fcvt z0.d, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
   %res = fpext <2 x half> %op1 to <2 x double>
   ret <2 x double> %res
 }
 
 define void @fcvt_v4f16_v4f64(<4 x half>* %a, <4 x double>* %b) #0 {
 ; CHECK-LABEL: fcvt_v4f16_v4f64:
-; CHECK: ldr d[[OP:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[OP]].h
-; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; CHECK-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG]]/m, [[UPK2]].h
-; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    fcvt z0.d, p0/m, z0.h
+; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <4 x half>, <4 x half>* %a
   %res = fpext <4 x half> %op1 to <4 x double>
   store <4 x double> %res, <4 x double>* %b
@@ -156,28 +181,32 @@ define void @fcvt_v4f16_v4f64(<4 x half>* %a, <4 x double>* %b) #0 {
 }
 
 define void @fcvt_v8f16_v8f64(<8 x half>* %a, <8 x double>* %b) #0 {
-; CHECK-LABEL: fcvt_v8f16_v8f64:
-; VBITS_GE_512: ldr q[[OP:[0-9]+]], [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[OP]].h
-; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_512-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG]]/m, [[UPK2]].h
-; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ldr q[[OP:[0-9]+]], [x0]
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ext v[[HI:[0-9]+]].16b, v[[OP]].16b, v[[OP]].16b, #8
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_LO:z[0-9]+]].s, z[[OP]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_HI:z[0-9]+]].s, z[[HI]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK2_LO:z[0-9]+]].d, [[UPK1_LO]].s
-; VBITS_EQ_256-DAG: uunpklo [[UPK2_HI:z[0-9]+]].d, [[UPK2_HI]].s
-; VBITS_EQ_256-DAG: fcvt [[RES_LO:z[0-9]+]].d, [[PG]]/m, [[UPK2_LO]].h
-; VBITS_EQ_256-DAG: fcvt [[RES_HI:z[0-9]+]].d, [[PG]]/m, [[UPK2_HI]].h
-; VBITS_EQ_256-DAG: st1d { [[RES_LO]].d }, [[PG]], [x1]
-; VBITS_EQ_256-DAG: st1d { [[RES_HI]].d }, [[PG]], [x1, x[[NUMELTS]], lsl #3]
+; VBITS_EQ_256-LABEL: fcvt_v8f16_v8f64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    ldr q0, [x0]
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    uunpklo z1.s, z0.h
+; VBITS_EQ_256-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; VBITS_EQ_256-NEXT:    uunpklo z0.s, z0.h
+; VBITS_EQ_256-NEXT:    uunpklo z1.d, z1.s
+; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
+; VBITS_EQ_256-NEXT:    fcvt z1.d, p0/m, z1.h
+; VBITS_EQ_256-NEXT:    fcvt z0.d, p0/m, z0.h
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: fcvt_v8f16_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr q0, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    fcvt z0.d, p0/m, z0.h
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x half>, <8 x half>* %a
   %res = fpext <8 x half> %op1 to <8 x double>
   store <8 x double> %res, <8 x double>* %b
@@ -185,15 +214,16 @@ define void @fcvt_v8f16_v8f64(<8 x half>* %a, <8 x double>* %b) #0 {
 }
 
 define void @fcvt_v16f16_v16f64(<16 x half>* %a, <16 x double>* %b) #0 {
-; CHECK-LABEL: fcvt_v16f16_v16f64:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].h, vl16
-; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_1024-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK2]].h
-; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: fcvt_v16f16_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_1024-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    fcvt z0.d, p0/m, z0.h
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %res = fpext <16 x half> %op1 to <16 x double>
   store <16 x double> %res, <16 x double>* %b
@@ -201,15 +231,16 @@ define void @fcvt_v16f16_v16f64(<16 x half>* %a, <16 x double>* %b) #0 {
 }
 
 define void @fcvt_v32f16_v32f64(<32 x half>* %a, <32 x double>* %b) #0 {
-; CHECK-LABEL: fcvt_v32f16_v32f64:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK]].s
-; VBITS_GE_2048-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK2]].h
-; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: fcvt_v32f16_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    fcvt z0.d, p0/m, z0.h
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x half>, <32 x half>* %a
   %res = fpext <32 x half> %op1 to <32 x double>
   store <32 x double> %res, <32 x double>* %b
@@ -223,8 +254,10 @@ define void @fcvt_v32f16_v32f64(<32 x half>* %a, <32 x double>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <1 x double> @fcvt_v1f32_v1f64(<1 x float> %op1) #0 {
 ; CHECK-LABEL: fcvt_v1f32_v1f64:
-; CHECK: fcvtl v0.2d, v0.2s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %res = fpext <1 x float> %op1 to <1 x double>
   ret <1 x double> %res
 }
@@ -232,20 +265,22 @@ define <1 x double> @fcvt_v1f32_v1f64(<1 x float> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <2 x double> @fcvt_v2f32_v2f64(<2 x float> %op1) #0 {
 ; CHECK-LABEL: fcvt_v2f32_v2f64:
-; CHECK: fcvtl v0.2d, v0.2s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtl v0.2d, v0.2s
+; CHECK-NEXT:    ret
   %res = fpext <2 x float> %op1 to <2 x double>
   ret <2 x double> %res
 }
 
 define void @fcvt_v4f32_v4f64(<4 x float>* %a, <4 x double>* %b) #0 {
 ; CHECK-LABEL: fcvt_v4f32_v4f64:
-; CHECK: ldr q[[OP:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: uunpklo [[UPK:z[0-9]+]].d, z[[OP]].s
-; CHECK-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG]]/m, [[UPK]].s
-; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    fcvt z0.d, p0/m, z0.s
+; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <4 x float>, <4 x float>* %a
   %res = fpext <4 x float> %op1 to <4 x double>
   store <4 x double> %res, <4 x double>* %b
@@ -253,28 +288,42 @@ define void @fcvt_v4f32_v4f64(<4 x float>* %a, <4 x double>* %b) #0 {
 }
 
 define void @fcvt_v8f32_v8f64(<8 x float>* %a, <8 x double>* %b) #0 {
-; CHECK-LABEL: fcvt_v8f32_v8f64:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].s, vl8
-; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[OP]].s
-; VBITS_GE_512-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG1]]/m, [[UPK]].s
-; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation - fixed type extract_subvector codegen is poor currently.
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: ld1w { [[VEC:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_EQ_256-DAG: st1w { [[VEC:z[0-9]+]].s }, [[PG1]], [x8]
-; VBITS_EQ_256-DAG: ldp q[[LO:[0-9]+]], q[[HI:[0-9]+]], [sp]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: uunpklo [[UPK_LO:z[0-9]+]].d, z[[LO]].s
-; VBITS_EQ_256-DAG: uunpklo [[UPK_HI:z[0-9]+]].d, z[[HI]].s
-; VBITS_EQ_256-DAG: fcvt [[RES_LO:z[0-9]+]].d, [[PG2]]/m, [[UPK_LO]].s
-; VBITS_EQ_256-DAG: fcvt [[RES_HI:z[0-9]+]].d, [[PG2]]/m, [[UPK_HI]].s
-; VBITS_EQ_256-DAG: st1d { [[RES_LO]].d }, [[PG2]], [x1]
-; VBITS_EQ_256-DAG: st1d { [[RES_HI]].d }, [[PG2]], [x1, x[[NUMELTS]], lsl #3]
+; VBITS_EQ_256-LABEL: fcvt_v8f32_v8f64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_EQ_256-NEXT:    sub x9, sp, #48
+; VBITS_EQ_256-NEXT:    mov x29, sp
+; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
+; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
+; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    mov x8, sp
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_EQ_256-NEXT:    ldp q0, q1, [sp]
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
+; VBITS_EQ_256-NEXT:    uunpklo z1.d, z1.s
+; VBITS_EQ_256-NEXT:    fcvt z0.d, p0/m, z0.s
+; VBITS_EQ_256-NEXT:    fcvt z1.d, p0/m, z1.s
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_EQ_256-NEXT:    mov sp, x29
+; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: fcvt_v8f32_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    fcvt z0.d, p0/m, z0.s
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x float>, <8 x float>* %a
   %res = fpext <8 x float> %op1 to <8 x double>
   store <8 x double> %res, <8 x double>* %b
@@ -282,14 +331,15 @@ define void @fcvt_v8f32_v8f64(<8 x float>* %a, <8 x double>* %b) #0 {
 }
 
 define void @fcvt_v16f32_v16f64(<16 x float>* %a, <16 x double>* %b) #0 {
-; CHECK-LABEL: fcvt_v16f32_v16f64:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].s, vl16
-; VBITS_GE_1024-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[OP]].s
-; VBITS_GE_1024-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK]].s
-; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: fcvt_v16f32_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    fcvt z0.d, p0/m, z0.s
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x float>, <16 x float>* %a
   %res = fpext <16 x float> %op1 to <16 x double>
   store <16 x double> %res, <16 x double>* %b
@@ -297,14 +347,15 @@ define void @fcvt_v16f32_v16f64(<16 x float>* %a, <16 x double>* %b) #0 {
 }
 
 define void @fcvt_v32f32_v32f64(<32 x float>* %a, <32 x double>* %b) #0 {
-; CHECK-LABEL: fcvt_v32f32_v32f64:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[OP]].s
-; VBITS_GE_2048-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK]].s
-; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: fcvt_v32f32_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    fcvt z0.d, p0/m, z0.s
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x float>, <32 x float>* %a
   %res = fpext <32 x float> %op1 to <32 x double>
   store <32 x double> %res, <32 x double>* %b
@@ -318,8 +369,10 @@ define void @fcvt_v32f32_v32f64(<32 x float>* %a, <32 x double>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <2 x half> @fcvt_v2f32_v2f16(<2 x float> %op1) #0 {
 ; CHECK-LABEL: fcvt_v2f32_v2f16:
-; CHECK: fcvtn v0.4h, v0.4s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
   %res = fptrunc <2 x float> %op1 to <2 x half>
   ret <2 x half> %res
 }
@@ -327,50 +380,57 @@ define <2 x half> @fcvt_v2f32_v2f16(<2 x float> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <4 x half> @fcvt_v4f32_v4f16(<4 x float> %op1) #0 {
 ; CHECK-LABEL: fcvt_v4f32_v4f16:
-; CHECK: fcvtn v0.4h, v0.4s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
   %res = fptrunc <4 x float> %op1 to <4 x half>
   ret <4 x half> %res
 }
 
 define <8 x half> @fcvt_v8f32_v8f16(<8 x float>* %a) #0 {
 ; CHECK-LABEL: fcvt_v8f32_v8f16:
-; CHECK: ptrue [[PG1:p[0-9]+]].s, vl8
-; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ptrue [[PG2:p[0-9]+]].s
-; CHECK-NEXT: fcvt [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].s
-; CHECK-NEXT: uzp1 z0.h, [[CVT]].h, [[CVT]].h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
   %op1 = load <8 x float>, <8 x float>* %a
   %res = fptrunc <8 x float> %op1 to <8 x half>
   ret <8 x half> %res
 }
 
 define void @fcvt_v16f32_v16f16(<16 x float>* %a, <16 x half>* %b) #0 {
-; CHECK-LABEL: fcvt_v16f32_v16f16:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s
-; VBITS_GE_512-NEXT: fcvt [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].s
-; VBITS_GE_512-NEXT: uzp1 [[RES:z[0-9]+]].h, [[CVT]].h, [[CVT]].h
-; VBITS_GE_512-NEXT: ptrue [[PG3:p[0-9]+]].h, vl16
-; VBITS_GE_512-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #8
-; VBITS_EQ_256-DAG: ld1w { [[LO:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1w { [[HI:z[0-9]+]].s }, [[PG1]]/z, [x0, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].s
-; VBITS_EQ_256-DAG: ptrue [[PG3:p[0-9]+]].h, vl8
-; VBITS_EQ_256-DAG: fcvt [[CVT_LO:z[0-9]+]].h, [[PG2]]/m, [[LO]].s
-; VBITS_EQ_256-DAG: fcvt [[CVT_HI:z[0-9]+]].h, [[PG2]]/m, [[HI]].s
-; VBITS_EQ_256-DAG: uzp1 [[RES_LO:z[0-9]+]].h, [[CVT_LO]].h, [[CVT_LO]].h
-; VBITS_EQ_256-DAG: uzp1 [[RES_HI:z[0-9]+]].h, [[CVT_HI]].h, [[CVT_HI]].h
-; VBITS_EQ_256-DAG: splice [[RES:z[0-9]+]].h, [[PG3]], [[RES_LO]].h, [[RES_HI]].h
-; VBITS_EQ_256-DAG: ptrue [[PG4:p[0-9]+]].h, vl16
-; VBITS_EQ_256-DAG: st1h { [[RES]].h }, [[PG4]], [x1]
+; VBITS_EQ_256-LABEL: fcvt_v16f32_v16f16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #8
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ptrue p0.s
+; VBITS_EQ_256-NEXT:    ptrue p1.h, vl8
+; VBITS_EQ_256-NEXT:    fcvt z0.h, p0/m, z0.s
+; VBITS_EQ_256-NEXT:    fcvt z1.h, p0/m, z1.s
+; VBITS_EQ_256-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_EQ_256-NEXT:    uzp1 z1.h, z1.h, z1.h
+; VBITS_EQ_256-NEXT:    splice z1.h, p1, z1.h, z0.h
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    st1h { z1.h }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: fcvt_v16f32_v16f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.s
+; VBITS_GE_512-NEXT:    fcvt z0.h, p0/m, z0.s
+; VBITS_GE_512-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <16 x float>, <16 x float>* %a
   %res = fptrunc <16 x float> %op1 to <16 x half>
   store <16 x half> %res, <16 x half>* %b
@@ -378,15 +438,16 @@ define void @fcvt_v16f32_v16f16(<16 x float>* %a, <16 x half>* %b) #0 {
 }
 
 define void @fcvt_v32f32_v32f16(<32 x float>* %a, <32 x half>* %b) #0 {
-; CHECK-LABEL: fcvt_v32f32_v32f16:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].s, vl32
-; VBITS_GE_1024-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].s
-; VBITS_GE_1024-NEXT: fcvt [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].s
-; VBITS_GE_1024-NEXT: uzp1 [[RES:z[0-9]+]].h, [[CVT]].h, [[CVT]].h
-; VBITS_GE_1024-NEXT: ptrue [[PG3:p[0-9]+]].h, vl32
-; VBITS_GE_1024-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: fcvt_v32f32_v32f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.s
+; VBITS_GE_1024-NEXT:    fcvt z0.h, p0/m, z0.s
+; VBITS_GE_1024-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <32 x float>, <32 x float>* %a
   %res = fptrunc <32 x float> %op1 to <32 x half>
   store <32 x half> %res, <32 x half>* %b
@@ -394,15 +455,16 @@ define void @fcvt_v32f32_v32f16(<32 x float>* %a, <32 x half>* %b) #0 {
 }
 
 define void @fcvt_v64f32_v64f16(<64 x float>* %a, <64 x half>* %b) #0 {
-; CHECK-LABEL: fcvt_v64f32_v64f16:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].s, vl64
-; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].s
-; VBITS_GE_2048-NEXT: fcvt [[RES:z[0-9]+]].h, [[PG2]]/m, [[UPK]].s
-; VBITS_GE_2048-NEXT: uzp1 [[RES:z[0-9]+]].h, [[CVT]].h, [[CVT]].h
-; VBITS_GE_2048-NEXT: ptrue [[PG3:p[0-9]+]].h, vl64
-; VBITS_GE_2048-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: fcvt_v64f32_v64f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.s
+; VBITS_GE_2048-NEXT:    fcvt z0.h, p0/m, z0.s
+; VBITS_GE_2048-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <64 x float>, <64 x float>* %a
   %res = fptrunc <64 x float> %op1 to <64 x half>
   store <64 x half> %res, <64 x half>* %b
@@ -416,8 +478,9 @@ define void @fcvt_v64f32_v64f16(<64 x float>* %a, <64 x half>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <1 x half> @fcvt_v1f64_v1f16(<1 x double> %op1) #0 {
 ; CHECK-LABEL: fcvt_v1f64_v1f16:
-; CHECK: fcvt h0, d0
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvt h0, d0
+; CHECK-NEXT:    ret
   %res = fptrunc <1 x double> %op1 to <1 x half>
   ret <1 x half> %res
 }
@@ -425,68 +488,80 @@ define <1 x half> @fcvt_v1f64_v1f16(<1 x double> %op1) #0 {
 ; v2f16 is not legal for NEON, so use SVE
 define <2 x half> @fcvt_v2f64_v2f16(<2 x double> %op1) #0 {
 ; CHECK-LABEL: fcvt_v2f64_v2f16:
-; CHECK: ptrue [[PG:p[0-9]+]].d
-; CHECK-NEXT: fcvt [[CVT:z[0-9]+]].h, [[PG]]/m, z0.d
-; CHECK-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; CHECK-NEXT: uzp1 z0.h, [[UZP]].h, [[UZP]].h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
   %res = fptrunc <2 x double> %op1 to <2 x half>
   ret <2 x half> %res
 }
 
 define <4 x half> @fcvt_v4f64_v4f16(<4 x double>* %a) #0 {
 ; CHECK-LABEL: fcvt_v4f64_v4f16:
-; CHECK: ptrue [[PG1:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ptrue [[PG2:p[0-9]+]].d
-; CHECK-NEXT: fcvt [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].d
-; CHECK-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; CHECK-NEXT: uzp1 z0.h, [[UZP]].h, [[UZP]].h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
   %op1 = load <4 x double>, <4 x double>* %a
   %res = fptrunc <4 x double> %op1 to <4 x half>
   ret <4 x half> %res
 }
 
 define <8 x half> @fcvt_v8f64_v8f16(<8 x double>* %a) #0 {
-; CHECK-LABEL: fcvt_v8f64_v8f16:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_512-NEXT: fcvt [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].d
-; VBITS_GE_512-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_512-NEXT: uzp1 z0.h, [[UZP]].h, [[UZP]].h
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[LO:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1d { [[HI:z[0-9]+]].d }, [[PG1]]/z, [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].d
-; VBITS_EQ_256-DAG: fcvt [[CVT_LO:z[0-9]+]].h, [[PG2]]/m, [[LO]].d
-; VBITS_EQ_256-DAG: fcvt [[CVT_HI:z[0-9]+]].h, [[PG2]]/m, [[HI]].d
-; VBITS_EQ_256-DAG: uzp1 [[UZP_LO:z[0-9]+]].s, [[CVT_LO]].s, [[CVT_LO]].s
-; VBITS_EQ_256-DAG: uzp1 [[UZP_HI:z[0-9]+]].s, [[CVT_HI]].s, [[CVT_HI]].s
-; VBITS_EQ_256-DAG: uzp1 z0.h, [[UZP_LO]].h, [[UZP_LO]].h
-; VBITS_EQ_256-DAG: uzp1 z[[RES_HI:[0-9]+]].h, [[UZP_HI]].h, [[UZP_HI]].h
-; VBITS_EQ_256-DAG: mov v0.d[1], v[[RES_HI]].d[0]
+; VBITS_EQ_256-LABEL: fcvt_v8f64_v8f16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ptrue p0.d
+; VBITS_EQ_256-NEXT:    fcvt z0.h, p0/m, z0.d
+; VBITS_EQ_256-NEXT:    fcvt z1.h, p0/m, z1.d
+; VBITS_EQ_256-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_EQ_256-NEXT:    uzp1 z1.s, z1.s, z1.s
+; VBITS_EQ_256-NEXT:    uzp1 z2.h, z0.h, z0.h
+; VBITS_EQ_256-NEXT:    uzp1 z0.h, z1.h, z1.h
+; VBITS_EQ_256-NEXT:    mov v0.d[1], v2.d[0]
+; VBITS_EQ_256-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: fcvt_v8f64_v8f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d
+; VBITS_GE_512-NEXT:    fcvt z0.h, p0/m, z0.d
+; VBITS_GE_512-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_512-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_512-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x double>, <8 x double>* %a
   %res = fptrunc <8 x double> %op1 to <8 x half>
   ret <8 x half> %res
 }
 
 define void @fcvt_v16f64_v16f16(<16 x double>* %a, <16 x half>* %b) #0 {
-; CHECK-LABEL: fcvt_v16f64_v16f16:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_1024-NEXT: fcvt [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].d
-; VBITS_GE_1024-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_1024-NEXT: uzp1 [[RES:z[0-9]+]].h, [[UZP]].h, [[UZP]].h
-; VBITS_GE_1024-NEXT: ptrue [[PG3:p[0-9]+]].h, vl16
-; VBITS_GE_1024-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: fcvt_v16f64_v16f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d
+; VBITS_GE_1024-NEXT:    fcvt z0.h, p0/m, z0.d
+; VBITS_GE_1024-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_1024-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x double>, <16 x double>* %a
   %res = fptrunc <16 x double> %op1 to <16 x half>
   store <16 x half> %res, <16 x half>* %b
@@ -494,16 +569,17 @@ define void @fcvt_v16f64_v16f16(<16 x double>* %a, <16 x half>* %b) #0 {
 }
 
 define void @fcvt_v32f64_v32f16(<32 x double>* %a, <32 x half>* %b) #0 {
-; CHECK-LABEL: fcvt_v32f64_v32f16:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_2048-NEXT: fcvt [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].d
-; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_2048-NEXT: uzp1 [[RES:z[0-9]+]].h, [[UZP]].h, [[UZP]].h
-; VBITS_GE_2048-NEXT: ptrue [[PG3:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: fcvt_v32f64_v32f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.d
+; VBITS_GE_2048-NEXT:    fcvt z0.h, p0/m, z0.d
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x double>, <32 x double>* %a
   %res = fptrunc <32 x double> %op1 to <32 x half>
   store <32 x half> %res, <32 x half>* %b
@@ -517,8 +593,10 @@ define void @fcvt_v32f64_v32f16(<32 x double>* %a, <32 x half>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <1 x float> @fcvt_v1f64_v1f32(<1 x double> %op1) #0 {
 ; CHECK-LABEL: fcvt_v1f64_v1f32:
-; CHECK: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    ret
   %res = fptrunc <1 x double> %op1 to <1 x float>
   ret <1 x float> %res
 }
@@ -526,51 +604,57 @@ define <1 x float> @fcvt_v1f64_v1f32(<1 x double> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <2 x float> @fcvt_v2f64_v2f32(<2 x double> %op1) #0 {
 ; CHECK-LABEL: fcvt_v2f64_v2f32:
-; CHECK: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    ret
   %res = fptrunc <2 x double> %op1 to <2 x float>
   ret <2 x float> %res
 }
 
 define <4 x float> @fcvt_v4f64_v4f32(<4 x double>* %a) #0 {
 ; CHECK-LABEL: fcvt_v4f64_v4f32:
-; CHECK: ptrue [[PG1:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ptrue [[PG2:p[0-9]+]].d
-; CHECK-NEXT: fcvt [[CVT:z[0-9]+]].s, [[PG2]]/m, [[OP]].d
-; CHECK-NEXT: uzp1 z0.s, [[CVT]].s, [[CVT]].s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    fcvt z0.s, p0/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
   %op1 = load <4 x double>, <4 x double>* %a
   %res = fptrunc <4 x double> %op1 to <4 x float>
   ret <4 x float> %res
 }
 
 define void @fcvt_v8f64_v8f32(<8 x double>* %a, <8 x float>* %b) #0 {
-; CHECK-LABEL: fcvt_v8f64_v8f32:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_512-NEXT: fcvt [[CVT:z[0-9]+]].s, [[PG2]]/m, [[OP]].d
-; VBITS_GE_512-NEXT: uzp1 [[RES:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_512-NEXT: ptrue [[PG3:p[0-9]+]].s, vl8
-; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG3]], [x1]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[LO:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1d { [[HI:z[0-9]+]].d }, [[PG1]]/z, [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].d
-; VBITS_EQ_256-DAG: ptrue [[PG3:p[0-9]+]].s, vl4
-; VBITS_EQ_256-DAG: fcvt [[CVT_LO:z[0-9]+]].s, [[PG2]]/m, [[LO]].d
-; VBITS_EQ_256-DAG: fcvt [[CVT_HI:z[0-9]+]].s, [[PG2]]/m, [[HI]].d
-; VBITS_EQ_256-DAG: uzp1 [[RES_LO:z[0-9]+]].s, [[CVT_LO]].s, [[CVT_LO]].s
-; VBITS_EQ_256-DAG: uzp1 [[RES_HI:z[0-9]+]].s, [[CVT_HI]].s, [[CVT_HI]].s
-; VBITS_EQ_256-DAG: splice [[RES:z[0-9]+]].s, [[PG3]], [[RES_LO]].s, [[RES_HI]].s
-; VBITS_EQ_256-DAG: ptrue [[PG4:p[0-9]+]].s, vl8
-; VBITS_EQ_256-NEXT: st1w { [[RES]].s }, [[PG4]], [x1]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: fcvt_v8f64_v8f32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ptrue p0.d
+; VBITS_EQ_256-NEXT:    ptrue p1.s, vl4
+; VBITS_EQ_256-NEXT:    fcvt z0.s, p0/m, z0.d
+; VBITS_EQ_256-NEXT:    fcvt z1.s, p0/m, z1.d
+; VBITS_EQ_256-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_EQ_256-NEXT:    uzp1 z1.s, z1.s, z1.s
+; VBITS_EQ_256-NEXT:    splice z1.s, p1, z1.s, z0.s
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: fcvt_v8f64_v8f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d
+; VBITS_GE_512-NEXT:    fcvt z0.s, p0/m, z0.d
+; VBITS_GE_512-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x double>, <8 x double>* %a
   %res = fptrunc <8 x double> %op1 to <8 x float>
   store <8 x float> %res, <8 x float>* %b
@@ -578,15 +662,16 @@ define void @fcvt_v8f64_v8f32(<8 x double>* %a, <8 x float>* %b) #0 {
 }
 
 define void @fcvt_v16f64_v16f32(<16 x double>* %a, <16 x float>* %b) #0 {
-; CHECK-LABEL: fcvt_v16f64_v16f32:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_1024-NEXT: fcvt [[CVT:z[0-9]+]].s, [[PG2]]/m, [[OP]].d
-; VBITS_GE_1024-NEXT: uzp1 [[RES:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_1024-NEXT: ptrue [[PG3:p[0-9]+]].s, vl16
-; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG3]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: fcvt_v16f64_v16f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d
+; VBITS_GE_1024-NEXT:    fcvt z0.s, p0/m, z0.d
+; VBITS_GE_1024-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x double>, <16 x double>* %a
   %res = fptrunc <16 x double> %op1 to <16 x float>
   store <16 x float> %res, <16 x float>* %b
@@ -594,15 +679,16 @@ define void @fcvt_v16f64_v16f32(<16 x double>* %a, <16 x float>* %b) #0 {
 }
 
 define void @fcvt_v32f64_v32f32(<32 x double>* %a, <32 x float>* %b) #0 {
-; CHECK-LABEL: fcvt_v32f64_v32f32:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_2048-NEXT: fcvt [[CVT:z[0-9]+]].s, [[PG2]]/m, [[OP]].d
-; VBITS_GE_2048-NEXT: uzp1 [[RES:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_2048-NEXT: ptrue [[PG3:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG3]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: fcvt_v32f64_v32f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.d
+; VBITS_GE_2048-NEXT:    fcvt z0.s, p0/m, z0.d
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x double>, <32 x double>* %a
   %res = fptrunc <32 x double> %op1 to <32 x float>
   store <32 x float> %res, <32 x float>* %b

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
index 232387df25a43..5db8082d2c2f9 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
@@ -1,19 +1,19 @@
-; RUN: llc -aarch64-sve-vector-bits-min=128  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=16 -check-prefix=NO_SVE
-; RUN: llc -aarch64-sve-vector-bits-min=256  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=32
-; RUN: llc -aarch64-sve-vector-bits-min=384  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=32
-; RUN: llc -aarch64-sve-vector-bits-min=512  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=640  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=768  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=896  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
+; RUN: llc -aarch64-sve-vector-bits-min=128  < %s | FileCheck %s -D#VBYTES=16 -check-prefix=NO_SVE
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -D#VBYTES=32
+; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -D#VBYTES=32
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -22,41 +22,44 @@ target triple = "aarch64-unknown-linux-gnu"
 
 ; Don't use SVE for 64-bit vectors.
 define <4 x half> @select_v4f16(<4 x half> %op1, <4 x half> %op2, i1 %mask) #0 {
-; CHECK: select_v4f16:
-; CHECK: tst w0, #0x1
-; CHECK-NEXT: csetm w8, ne
-; CHECK-NEXT: dup v2.4h, w8
-; CHECK-NEXT: bif v0.8b, v1.8b, v2.8b
-; CHECK-NEXT: ret
+; CHECK-LABEL: select_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst w0, #0x1
+; CHECK-NEXT:    csetm w8, ne
+; CHECK-NEXT:    dup v2.4h, w8
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    ret
   %sel = select i1 %mask, <4 x half> %op1, <4 x half> %op2
   ret <4 x half> %sel
 }
 
 ; Don't use SVE for 128-bit vectors.
 define <8 x half> @select_v8f16(<8 x half> %op1, <8 x half> %op2, i1 %mask) #0 {
-; CHECK: select_v8f16:
-; CHECK: tst w0, #0x1
-; CHECK-NEXT: csetm w8, ne
-; CHECK-NEXT: dup v2.8h, w8
-; CHECK-NEXT: bif v0.16b, v1.16b, v2.16b
-; CHECK-NEXT: ret
+; CHECK-LABEL: select_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst w0, #0x1
+; CHECK-NEXT:    csetm w8, ne
+; CHECK-NEXT:    dup v2.8h, w8
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ret
   %sel = select i1 %mask, <8 x half> %op1, <8 x half> %op2
   ret <8 x half> %sel
 }
 
 define void @select_v16f16(<16 x half>* %a, <16 x half>* %b, i1 %mask) #0 {
-; CHECK: select_v16f16:
-; CHECK: ptrue [[PG1:p[0-9]+]].h, vl[[#min(div(VBYTES,2),16)]]
-; CHECK-NEXT: and w[[AND:[0-9]+]], w2, #0x1
-; CHECK-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG1]]/z, [x1]
-; CHECK-NEXT: mov [[TMP1:z[0-9]+]].h, w[[AND]]
-; CHECK-NEXT: and [[TMP2:z[0-9]+]].h, [[TMP1]].h, #0x1
-; CHECK-NEXT: ptrue [[PG2:p[0-9]+]].h
-; CHECK-NEXT: cmpne [[PRES:p[0-9]+]].h, [[PG2]]/z, [[TMP2]].h, #0
-; CHECK-NEXT: sel [[RES:z[0-9]+]].h, [[PRES]], [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: st1h { [[RES]].h }, [[PG1]], [x0]
-; CHECK-NEXT: ret
+; CHECK-LABEL: select_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    and w8, w2, #0x1
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    mov z2.h, w8
+; CHECK-NEXT:    and z2.h, z2.h, #0x1
+; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    cmpne p1.h, p1/z, z2.h, #0
+; CHECK-NEXT:    sel z0.h, p1, z0.h, z1.h
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    ret
   %op1 = load volatile <16 x half>, <16 x half>* %a
   %op2 = load volatile <16 x half>, <16 x half>* %b
   %sel = select i1 %mask, <16 x half> %op1, <16 x half> %op2
@@ -65,18 +68,19 @@ define void @select_v16f16(<16 x half>* %a, <16 x half>* %b, i1 %mask) #0 {
 }
 
 define void @select_v32f16(<32 x half>* %a, <32 x half>* %b, i1 %mask) #0 {
-; CHECK: select_v32f16:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].h, vl[[#min(div(VBYTES,2),32)]]
-; VBITS_GE_512-NEXT: and w[[AND:[0-9]+]], w2, #0x1
-; VBITS_GE_512-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG1]]/z, [x1]
-; VBITS_GE_512-NEXT: mov [[TMP1:z[0-9]+]].h, w[[AND]]
-; VBITS_GE_512-NEXT: and [[TMP2:z[0-9]+]].h, [[TMP1]].h, #0x1
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].h
-; VBITS_GE_512-NEXT: cmpne [[PRES:p[0-9]+]].h, [[PG2]]/z, [[TMP2]].h, #0
-; VBITS_GE_512-NEXT: sel [[RES:z[0-9]+]].h, [[PRES]], [[OP1]].h, [[OP2]].h
-; VBITS_GE_512-NEXT: st1h { [[RES]].h }, [[PG1]], [x0]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: select_v32f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    and w8, w2, #0x1
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mov z2.h, w8
+; VBITS_GE_512-NEXT:    and z2.h, z2.h, #0x1
+; VBITS_GE_512-NEXT:    ptrue p1.h
+; VBITS_GE_512-NEXT:    cmpne p1.h, p1/z, z2.h, #0
+; VBITS_GE_512-NEXT:    sel z0.h, p1, z0.h, z1.h
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load volatile <32 x half>, <32 x half>* %a
   %op2 = load volatile <32 x half>, <32 x half>* %b
   %sel = select i1 %mask, <32 x half> %op1, <32 x half> %op2
@@ -85,18 +89,19 @@ define void @select_v32f16(<32 x half>* %a, <32 x half>* %b, i1 %mask) #0 {
 }
 
 define void @select_v64f16(<64 x half>* %a, <64 x half>* %b, i1 %mask) #0 {
-; CHECK: select_v64f16:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].h, vl[[#min(div(VBYTES,2),64)]]
-; VBITS_GE_1024-NEXT: and w[[AND:[0-9]+]], w2, #0x1
-; VBITS_GE_1024-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG1]]/z, [x1]
-; VBITS_GE_1024-NEXT: mov [[TMP1:z[0-9]+]].h, w[[AND]]
-; VBITS_GE_1024-NEXT: and [[TMP2:z[0-9]+]].h, [[TMP1]].h, #0x1
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].h
-; VBITS_GE_1024-NEXT: cmpne [[PRES:p[0-9]+]].h, [[PG2]]/z, [[TMP2]].h, #0
-; VBITS_GE_1024-NEXT: sel [[RES:z[0-9]+]].h, [[PRES]], [[OP1]].h, [[OP2]].h
-; VBITS_GE_1024-NEXT: st1h { [[RES]].h }, [[PG1]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: select_v64f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_1024-NEXT:    and w8, w2, #0x1
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mov z2.h, w8
+; VBITS_GE_1024-NEXT:    and z2.h, z2.h, #0x1
+; VBITS_GE_1024-NEXT:    ptrue p1.h
+; VBITS_GE_1024-NEXT:    cmpne p1.h, p1/z, z2.h, #0
+; VBITS_GE_1024-NEXT:    sel z0.h, p1, z0.h, z1.h
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load volatile <64 x half>, <64 x half>* %a
   %op2 = load volatile <64 x half>, <64 x half>* %b
   %sel = select i1 %mask, <64 x half> %op1, <64 x half> %op2
@@ -105,18 +110,19 @@ define void @select_v64f16(<64 x half>* %a, <64 x half>* %b, i1 %mask) #0 {
 }
 
 define void @select_v128f16(<128 x half>* %a, <128 x half>* %b, i1 %mask) #0 {
-; CHECK: select_v128f16:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].h, vl[[#min(div(VBYTES,2),128)]]
-; VBITS_GE_2048-NEXT: and w[[AND:[0-9]+]], w2, #0x1
-; VBITS_GE_2048-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: mov [[TMP1:z[0-9]+]].h, w[[AND]]
-; VBITS_GE_2048-NEXT: and [[TMP2:z[0-9]+]].h, [[TMP1]].h, #0x1
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].h
-; VBITS_GE_2048-NEXT: cmpne [[PRES:p[0-9]+]].h, [[PG2]]/z, [[TMP2]].h, #0
-; VBITS_GE_2048-NEXT: sel [[RES:z[0-9]+]].h, [[PRES]], [[OP1]].h, [[OP2]].h
-; VBITS_GE_2048-NEXT: st1h { [[RES]].h }, [[PG1]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: select_v128f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    and w8, w2, #0x1
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mov z2.h, w8
+; VBITS_GE_2048-NEXT:    and z2.h, z2.h, #0x1
+; VBITS_GE_2048-NEXT:    ptrue p1.h
+; VBITS_GE_2048-NEXT:    cmpne p1.h, p1/z, z2.h, #0
+; VBITS_GE_2048-NEXT:    sel z0.h, p1, z0.h, z1.h
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load volatile <128 x half>, <128 x half>* %a
   %op2 = load volatile <128 x half>, <128 x half>* %b
   %sel = select i1 %mask, <128 x half> %op1, <128 x half> %op2
@@ -126,41 +132,44 @@ define void @select_v128f16(<128 x half>* %a, <128 x half>* %b, i1 %mask) #0 {
 
 ; Don't use SVE for 64-bit vectors.
 define <2 x float> @select_v2f32(<2 x float> %op1, <2 x float> %op2, i1 %mask) #0 {
-; CHECK: select_v2f32:
-; CHECK: tst w0, #0x1
-; CHECK-NEXT: csetm w8, ne
-; CHECK-NEXT: dup v2.2s, w8
-; CHECK-NEXT: bif v0.8b, v1.8b, v2.8b
-; CHECK-NEXT: ret
+; CHECK-LABEL: select_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst w0, #0x1
+; CHECK-NEXT:    csetm w8, ne
+; CHECK-NEXT:    dup v2.2s, w8
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    ret
   %sel = select i1 %mask, <2 x float> %op1, <2 x float> %op2
   ret <2 x float> %sel
 }
 
 ; Don't use SVE for 128-bit vectors.
 define <4 x float> @select_v4f32(<4 x float> %op1, <4 x float> %op2, i1 %mask) #0 {
-; CHECK: select_v4f32:
-; CHECK: tst w0, #0x1
-; CHECK-NEXT: csetm w8, ne
-; CHECK-NEXT: dup v2.4s, w8
-; CHECK-NEXT: bif v0.16b, v1.16b, v2.16b
-; CHECK-NEXT: ret
+; CHECK-LABEL: select_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst w0, #0x1
+; CHECK-NEXT:    csetm w8, ne
+; CHECK-NEXT:    dup v2.4s, w8
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ret
   %sel = select i1 %mask, <4 x float> %op1, <4 x float> %op2
   ret <4 x float> %sel
 }
 
 define void @select_v8f32(<8 x float>* %a, <8 x float>* %b, i1 %mask) #0 {
-; CHECK: select_v8f32:
-; CHECK: ptrue [[PG1:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
-; CHECK-NEXT: and w[[AND:[0-9]+]], w2, #0x1
-; CHECK-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG1]]/z, [x1]
-; CHECK-NEXT: mov [[TMP1:z[0-9]+]].s, w[[AND]]
-; CHECK-NEXT: and [[TMP2:z[0-9]+]].s, [[TMP1]].s, #0x1
-; CHECK-NEXT: ptrue [[PG2:p[0-9]+]].s
-; CHECK-NEXT: cmpne [[PRES:p[0-9]+]].s, [[PG2]]/z, [[TMP2]].s, #0
-; CHECK-NEXT: sel [[RES:z[0-9]+]].s, [[PRES]], [[OP1]].s, [[OP2]].s
-; CHECK-NEXT: st1w { [[RES]].s }, [[PG1]], [x0]
-; CHECK-NEXT: ret
+; CHECK-LABEL: select_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    and w8, w2, #0x1
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    mov z2.s, w8
+; CHECK-NEXT:    and z2.s, z2.s, #0x1
+; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    cmpne p1.s, p1/z, z2.s, #0
+; CHECK-NEXT:    sel z0.s, p1, z0.s, z1.s
+; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-NEXT:    ret
   %op1 = load volatile <8 x float>, <8 x float>* %a
   %op2 = load volatile <8 x float>, <8 x float>* %b
   %sel = select i1 %mask, <8 x float> %op1, <8 x float> %op2
@@ -169,18 +178,19 @@ define void @select_v8f32(<8 x float>* %a, <8 x float>* %b, i1 %mask) #0 {
 }
 
 define void @select_v16f32(<16 x float>* %a, <16 x float>* %b, i1 %mask) #0 {
-; CHECK: select_v16f32:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].s, vl[[#min(div(VBYTES,4),16)]]
-; VBITS_GE_512-NEXT: and w[[AND:[0-9]+]], w2, #0x1
-; VBITS_GE_512-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG1]]/z, [x1]
-; VBITS_GE_512-NEXT: mov [[TMP1:z[0-9]+]].s, w[[AND]]
-; VBITS_GE_512-NEXT: and [[TMP2:z[0-9]+]].s, [[TMP1]].s, #0x1
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s
-; VBITS_GE_512-NEXT: cmpne [[PRES:p[0-9]+]].s, [[PG2]]/z, [[TMP2]].s, #0
-; VBITS_GE_512-NEXT: sel [[RES:z[0-9]+]].s, [[PRES]], [[OP1]].s, [[OP2]].s
-; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG1]], [x0]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: select_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    and w8, w2, #0x1
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mov z2.s, w8
+; VBITS_GE_512-NEXT:    and z2.s, z2.s, #0x1
+; VBITS_GE_512-NEXT:    ptrue p1.s
+; VBITS_GE_512-NEXT:    cmpne p1.s, p1/z, z2.s, #0
+; VBITS_GE_512-NEXT:    sel z0.s, p1, z0.s, z1.s
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load volatile <16 x float>, <16 x float>* %a
   %op2 = load volatile <16 x float>, <16 x float>* %b
   %sel = select i1 %mask, <16 x float> %op1, <16 x float> %op2
@@ -189,18 +199,19 @@ define void @select_v16f32(<16 x float>* %a, <16 x float>* %b, i1 %mask) #0 {
 }
 
 define void @select_v32f32(<32 x float>* %a, <32 x float>* %b, i1 %mask) #0 {
-; CHECK: select_v32f32:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].s, vl[[#min(div(VBYTES,4),32)]]
-; VBITS_GE_1024-NEXT: and w[[AND:[0-9]+]], w2, #0x1
-; VBITS_GE_1024-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG1]]/z, [x1]
-; VBITS_GE_1024-NEXT: mov [[TMP1:z[0-9]+]].s, w[[AND]]
-; VBITS_GE_1024-NEXT: and [[TMP2:z[0-9]+]].s, [[TMP1]].s, #0x1
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].s
-; VBITS_GE_1024-NEXT: cmpne [[PRES:p[0-9]+]].s, [[PG2]]/z, [[TMP2]].s, #0
-; VBITS_GE_1024-NEXT: sel [[RES:z[0-9]+]].s, [[PRES]], [[OP1]].s, [[OP2]].s
-; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG1]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: select_v32f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    and w8, w2, #0x1
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mov z2.s, w8
+; VBITS_GE_1024-NEXT:    and z2.s, z2.s, #0x1
+; VBITS_GE_1024-NEXT:    ptrue p1.s
+; VBITS_GE_1024-NEXT:    cmpne p1.s, p1/z, z2.s, #0
+; VBITS_GE_1024-NEXT:    sel z0.s, p1, z0.s, z1.s
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load volatile <32 x float>, <32 x float>* %a
   %op2 = load volatile <32 x float>, <32 x float>* %b
   %sel = select i1 %mask, <32 x float> %op1, <32 x float> %op2
@@ -209,18 +220,19 @@ define void @select_v32f32(<32 x float>* %a, <32 x float>* %b, i1 %mask) #0 {
 }
 
 define void @select_v64f32(<64 x float>* %a, <64 x float>* %b, i1 %mask) #0 {
-; CHECK: select_v64f32:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].s, vl[[#min(div(VBYTES,4),64)]]
-; VBITS_GE_2048-NEXT: and w[[AND:[0-9]+]], w2, #0x1
-; VBITS_GE_2048-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: mov [[TMP1:z[0-9]+]].s, w[[AND]]
-; VBITS_GE_2048-NEXT: and [[TMP2:z[0-9]+]].s, [[TMP1]].s, #0x1
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].s
-; VBITS_GE_2048-NEXT: cmpne [[PRES:p[0-9]+]].s, [[PG2]]/z, [[TMP2]].s, #0
-; VBITS_GE_2048-NEXT: sel [[RES:z[0-9]+]].s, [[PRES]], [[OP1]].s, [[OP2]].s
-; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG1]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: select_v64f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    and w8, w2, #0x1
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mov z2.s, w8
+; VBITS_GE_2048-NEXT:    and z2.s, z2.s, #0x1
+; VBITS_GE_2048-NEXT:    ptrue p1.s
+; VBITS_GE_2048-NEXT:    cmpne p1.s, p1/z, z2.s, #0
+; VBITS_GE_2048-NEXT:    sel z0.s, p1, z0.s, z1.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load volatile <64 x float>, <64 x float>* %a
   %op2 = load volatile <64 x float>, <64 x float>* %b
   %sel = select i1 %mask, <64 x float> %op1, <64 x float> %op2
@@ -230,41 +242,44 @@ define void @select_v64f32(<64 x float>* %a, <64 x float>* %b, i1 %mask) #0 {
 
 ; Don't use SVE for 64-bit vectors.
 define <1 x double> @select_v1f64(<1 x double> %op1, <1 x double> %op2, i1 %mask) #0 {
-; CHECK: select_v1f64:
-; CHECK: tst w0, #0x1
-; CHECK-NEXT: csetm x8, ne
-; CHECK-NEXT: fmov d2, x8
-; CHECK-NEXT: bif v0.8b, v1.8b, v2.8b
-; CHECK-NEXT: ret
+; CHECK-LABEL: select_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst w0, #0x1
+; CHECK-NEXT:    csetm x8, ne
+; CHECK-NEXT:    fmov d2, x8
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    ret
   %sel = select i1 %mask, <1 x double> %op1, <1 x double> %op2
   ret <1 x double> %sel
 }
 
 ; Don't use SVE for 128-bit vectors.
 define <2 x double> @select_v2f64(<2 x double> %op1, <2 x double> %op2, i1 %mask) #0 {
-; CHECK: select_v2f64:
-; CHECK: tst w0, #0x1
-; CHECK-NEXT: csetm x8, ne
-; CHECK-NEXT: dup v2.2d, x8
-; CHECK-NEXT: bif v0.16b, v1.16b, v2.16b
-; CHECK-NEXT: ret
+; CHECK-LABEL: select_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst w0, #0x1
+; CHECK-NEXT:    csetm x8, ne
+; CHECK-NEXT:    dup v2.2d, x8
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ret
   %sel = select i1 %mask, <2 x double> %op1, <2 x double> %op2
   ret <2 x double> %sel
 }
 
 define void @select_v4f64(<4 x double>* %a, <4 x double>* %b, i1 %mask) #0 {
-; CHECK: select_v4f64:
-; CHECK: ptrue [[PG1:p[0-9]+]].d, vl[[#min(div(VBYTES,8),4)]]
-; CHECK-NEXT: and w[[AND:[0-9]+]], w2, #0x1
-; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; CHECK-NEXT: mov [[TMP1:z[0-9]+]].d, x[[AND]]
-; CHECK-NEXT: and [[TMP2:z[0-9]+]].d, [[TMP1]].d, #0x1
-; CHECK-NEXT: ptrue [[PG2:p[0-9]+]].d
-; CHECK-NEXT: cmpne [[PRES:p[0-9]+]].d, [[PG2]]/z, [[TMP2]].d, #0
-; CHECK-NEXT: sel [[RES:z[0-9]+]].d, [[PRES]], [[OP1]].d, [[OP2]].d
-; CHECK-NEXT: st1d { [[RES]].d }, [[PG1]], [x0]
-; CHECK-NEXT: ret
+; CHECK-LABEL: select_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    and w8, w2, #0x1
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    mov z2.d, x8
+; CHECK-NEXT:    and z2.d, z2.d, #0x1
+; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    cmpne p1.d, p1/z, z2.d, #0
+; CHECK-NEXT:    sel z0.d, p1, z0.d, z1.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    ret
   %op1 = load volatile <4 x double>, <4 x double>* %a
   %op2 = load volatile <4 x double>, <4 x double>* %b
   %sel = select i1 %mask, <4 x double> %op1, <4 x double> %op2
@@ -273,18 +288,19 @@ define void @select_v4f64(<4 x double>* %a, <4 x double>* %b, i1 %mask) #0 {
 }
 
 define void @select_v8f64(<8 x double>* %a, <8 x double>* %b, i1 %mask) #0 {
-; CHECK: select_v8f64:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].d, vl[[#min(div(VBYTES,8),8)]]
-; VBITS_GE_512-NEXT: and w[[AND:[0-9]+]], w2, #0x1
-; VBITS_GE_512-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_512-NEXT: mov [[TMP1:z[0-9]+]].d, x[[AND]]
-; VBITS_GE_512-NEXT: and [[TMP2:z[0-9]+]].d, [[TMP1]].d, #0x1
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_512-NEXT: cmpne [[PRES:p[0-9]+]].d, [[PG2]]/z, [[TMP2]].d, #0
-; VBITS_GE_512-NEXT: sel [[RES:z[0-9]+]].d, [[PRES]], [[OP1]].d, [[OP2]].d
-; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG1]], [x0]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: select_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    and w8, w2, #0x1
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mov z2.d, x8
+; VBITS_GE_512-NEXT:    and z2.d, z2.d, #0x1
+; VBITS_GE_512-NEXT:    ptrue p1.d
+; VBITS_GE_512-NEXT:    cmpne p1.d, p1/z, z2.d, #0
+; VBITS_GE_512-NEXT:    sel z0.d, p1, z0.d, z1.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load volatile <8 x double>, <8 x double>* %a
   %op2 = load volatile <8 x double>, <8 x double>* %b
   %sel = select i1 %mask, <8 x double> %op1, <8 x double> %op2
@@ -293,18 +309,19 @@ define void @select_v8f64(<8 x double>* %a, <8 x double>* %b, i1 %mask) #0 {
 }
 
 define void @select_v16f64(<16 x double>* %a, <16 x double>* %b, i1 %mask) #0 {
-; CHECK: select_v16f64:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].d, vl[[#min(div(VBYTES,8),16)]]
-; VBITS_GE_1024-NEXT: and w[[AND:[0-9]+]], w2, #0x1
-; VBITS_GE_1024-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_1024-NEXT: mov [[TMP1:z[0-9]+]].d, x[[AND]]
-; VBITS_GE_1024-NEXT: and [[TMP2:z[0-9]+]].d, [[TMP1]].d, #0x1
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_1024-NEXT: cmpne [[PRES:p[0-9]+]].d, [[PG2]]/z, [[TMP2]].d, #0
-; VBITS_GE_1024-NEXT: sel [[RES:z[0-9]+]].d, [[PRES]], [[OP1]].d, [[OP2]].d
-; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG1]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: select_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    and w8, w2, #0x1
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mov z2.d, x8
+; VBITS_GE_1024-NEXT:    and z2.d, z2.d, #0x1
+; VBITS_GE_1024-NEXT:    ptrue p1.d
+; VBITS_GE_1024-NEXT:    cmpne p1.d, p1/z, z2.d, #0
+; VBITS_GE_1024-NEXT:    sel z0.d, p1, z0.d, z1.d
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load volatile <16 x double>, <16 x double>* %a
   %op2 = load volatile <16 x double>, <16 x double>* %b
   %sel = select i1 %mask, <16 x double> %op1, <16 x double> %op2
@@ -313,18 +330,19 @@ define void @select_v16f64(<16 x double>* %a, <16 x double>* %b, i1 %mask) #0 {
 }
 
 define void @select_v32f64(<32 x double>* %a, <32 x double>* %b, i1 %mask) #0 {
-; CHECK: select_v32f64:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].d, vl[[#min(div(VBYTES,8),32)]]
-; VBITS_GE_2048-NEXT: and w[[AND:[0-9]+]], w2, #0x1
-; VBITS_GE_2048-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: mov [[TMP1:z[0-9]+]].d, x[[AND]]
-; VBITS_GE_2048-NEXT: and [[TMP2:z[0-9]+]].d, [[TMP1]].d, #0x1
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_2048-NEXT: cmpne [[PRES:p[0-9]+]].d, [[PG2]]/z, [[TMP2]].d, #0
-; VBITS_GE_2048-NEXT: sel [[RES:z[0-9]+]].d, [[PRES]], [[OP1]].d, [[OP2]].d
-; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG1]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: select_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    and w8, w2, #0x1
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mov z2.d, x8
+; VBITS_GE_2048-NEXT:    and z2.d, z2.d, #0x1
+; VBITS_GE_2048-NEXT:    ptrue p1.d
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    sel z0.d, p1, z0.d, z1.d
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load volatile <32 x double>, <32 x double>* %a
   %op2 = load volatile <32 x double>, <32 x double>* %b
   %sel = select i1 %mask, <32 x double> %op1, <32 x double> %op2

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
index d984995ac5565..6b3f7e4f35110 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-vselect.ll
@@ -1,19 +1,19 @@
-; RUN: llc -aarch64-sve-vector-bits-min=128  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=16 -check-prefix=NO_SVE
-; RUN: llc -aarch64-sve-vector-bits-min=256  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=32
-; RUN: llc -aarch64-sve-vector-bits-min=384  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=32
-; RUN: llc -aarch64-sve-vector-bits-min=512  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=640  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=768  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=896  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
+; RUN: llc -aarch64-sve-vector-bits-min=128  < %s | FileCheck %s -D#VBYTES=16 -check-prefix=NO_SVE
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -D#VBYTES=32
+; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -D#VBYTES=32
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -23,8 +23,11 @@ target triple = "aarch64-unknown-linux-gnu"
 ; Don't use SVE for 64-bit vectors.
 define <4 x half> @select_v4f16(<4 x half> %op1, <4 x half> %op2, <4 x i1> %mask) #0 {
 ; CHECK-LABEL: select_v4f16:
-; CHECK: bif v0.8b, v1.8b, v2.8b
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shl v2.4h, v2.4h, #15
+; CHECK-NEXT:    sshr v2.4h, v2.4h, #15
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    ret
   %sel = select <4 x i1> %mask, <4 x half> %op1, <4 x half> %op2
   ret <4 x half> %sel
 }
@@ -32,24 +35,72 @@ define <4 x half> @select_v4f16(<4 x half> %op1, <4 x half> %op2, <4 x i1> %mask
 ; Don't use SVE for 128-bit vectors.
 define <8 x half> @select_v8f16(<8 x half> %op1, <8 x half> %op2, <8 x i1> %mask) #0 {
 ; CHECK-LABEL: select_v8f16:
-; CHECK: bif v0.16b, v1.16b, v2.16b
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ushll v2.8h, v2.8b, #0
+; CHECK-NEXT:    shl v2.8h, v2.8h, #15
+; CHECK-NEXT:    sshr v2.8h, v2.8h, #15
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ret
   %sel = select <8 x i1> %mask, <8 x half> %op1, <8 x half> %op2
   ret <8 x half> %sel
 }
 
 define void @select_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i1>* %c) #0 {
 ; CHECK-LABEL: select_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),16)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].h
-; CHECK: ld1h { [[MASK:z[0-9]+]].h }, [[PG]]/z, [x9]
-; CHECK-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: and [[AND:z[0-9]+]].h, [[MASK]].h, #0x1
-; CHECK-NEXT: cmpne [[COND:p[0-9]+]].h, [[PG1]]/z, [[AND]].h, #0
-; CHECK-NEXT: sel [[RES:z[0-9]+]].h, [[COND]], [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    sub x9, sp, #48
+; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
+; CHECK-NEXT:    .cfi_def_cfa w29, 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    ldrh w8, [x2]
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    sbfx w9, w8, #15, #1
+; CHECK-NEXT:    strh w9, [sp, #30]
+; CHECK-NEXT:    sbfx w9, w8, #14, #1
+; CHECK-NEXT:    strh w9, [sp, #28]
+; CHECK-NEXT:    sbfx w9, w8, #13, #1
+; CHECK-NEXT:    strh w9, [sp, #26]
+; CHECK-NEXT:    sbfx w9, w8, #12, #1
+; CHECK-NEXT:    strh w9, [sp, #24]
+; CHECK-NEXT:    sbfx w9, w8, #11, #1
+; CHECK-NEXT:    strh w9, [sp, #22]
+; CHECK-NEXT:    sbfx w9, w8, #10, #1
+; CHECK-NEXT:    strh w9, [sp, #20]
+; CHECK-NEXT:    sbfx w9, w8, #9, #1
+; CHECK-NEXT:    strh w9, [sp, #18]
+; CHECK-NEXT:    sbfx w9, w8, #8, #1
+; CHECK-NEXT:    strh w9, [sp, #16]
+; CHECK-NEXT:    sbfx w9, w8, #7, #1
+; CHECK-NEXT:    strh w9, [sp, #14]
+; CHECK-NEXT:    sbfx w9, w8, #6, #1
+; CHECK-NEXT:    strh w9, [sp, #12]
+; CHECK-NEXT:    sbfx w9, w8, #5, #1
+; CHECK-NEXT:    strh w9, [sp, #10]
+; CHECK-NEXT:    sbfx w9, w8, #4, #1
+; CHECK-NEXT:    strh w9, [sp, #8]
+; CHECK-NEXT:    sbfx w9, w8, #3, #1
+; CHECK-NEXT:    strh w9, [sp, #6]
+; CHECK-NEXT:    sbfx w9, w8, #2, #1
+; CHECK-NEXT:    strh w9, [sp, #4]
+; CHECK-NEXT:    sbfx w9, w8, #1, #1
+; CHECK-NEXT:    sbfx w8, w8, #0, #1
+; CHECK-NEXT:    strh w9, [sp, #2]
+; CHECK-NEXT:    mov x9, sp
+; CHECK-NEXT:    strh w8, [sp]
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x9]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x1]
+; CHECK-NEXT:    and z0.h, z0.h, #0x1
+; CHECK-NEXT:    cmpne p1.h, p1/z, z0.h, #0
+; CHECK-NEXT:    sel z0.h, p1, z1.h, z2.h
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    mov sp, x29
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
   %mask = load <16 x i1>, <16 x i1>* %c
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
@@ -59,17 +110,93 @@ define void @select_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x i1>* %c) #0 {
 }
 
 define void @select_v32f16(<32 x half>* %a, <32 x half>* %b, <32 x i1>* %c) #0 {
-; CHECK-LABEL: select_v32f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),32)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].h
-; VBITS_GE_512: ld1h { [[MASK:z[0-9]+]].h }, [[PG]]/z, [x9]
-; VBITS_GE_512-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: and [[AND:z[0-9]+]].h, [[MASK]].h, #0x1
-; VBITS_GE_512-NEXT: cmpne [[COND:p[0-9]+]].h, [[PG1]]/z, [[AND]].h, #0
-; VBITS_GE_512-NEXT: sel [[RES:z[0-9]+]].h, [[COND]], [[OP1]].h, [[OP2]].h
-; VBITS_GE_512-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_GE_512-LABEL: select_v32f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_512-NEXT:    sub x9, sp, #112
+; VBITS_GE_512-NEXT:    mov x29, sp
+; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0
+; VBITS_GE_512-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_512-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_512-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_512-NEXT:    ldr w8, [x2]
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ptrue p1.h
+; VBITS_GE_512-NEXT:    asr w9, w8, #31
+; VBITS_GE_512-NEXT:    strh w9, [sp, #62]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #30, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #60]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #29, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #58]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #28, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #56]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #27, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #54]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #26, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #52]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #25, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #50]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #24, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #48]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #23, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #46]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #22, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #44]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #21, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #42]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #20, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #40]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #19, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #38]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #18, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #36]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #17, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #34]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #16, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #32]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #15, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #30]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #14, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #28]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #13, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #26]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #12, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #24]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #11, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #22]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #10, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #20]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #9, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #18]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #8, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #16]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #7, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #14]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #6, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #12]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #5, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #10]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #4, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #8]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #3, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #6]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #2, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #4]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #1, #1
+; VBITS_GE_512-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #2]
+; VBITS_GE_512-NEXT:    mov x9, sp
+; VBITS_GE_512-NEXT:    strh w8, [sp]
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x9]
+; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1h { z2.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    and z0.h, z0.h, #0x1
+; VBITS_GE_512-NEXT:    cmpne p1.h, p1/z, z0.h, #0
+; VBITS_GE_512-NEXT:    sel z0.h, p1, z1.h, z2.h
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_512-NEXT:    mov sp, x29
+; VBITS_GE_512-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_512-NEXT:    ret
   %mask = load <32 x i1>, <32 x i1>* %c
   %op1 = load <32 x half>, <32 x half>* %a
   %op2 = load <32 x half>, <32 x half>* %b
@@ -79,17 +206,157 @@ define void @select_v32f16(<32 x half>* %a, <32 x half>* %b, <32 x i1>* %c) #0 {
 }
 
 define void @select_v64f16(<64 x half>* %a, <64 x half>* %b, <64 x i1>* %c) #0 {
-; CHECK-LABEL: select_v64f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),64)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].h
-; VBITS_GE_1024: ld1h { [[MASK:z[0-9]+]].h }, [[PG]]/z, [x9]
-; VBITS_GE_1024-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: and [[AND:z[0-9]+]].h, [[MASK]].h, #0x1
-; VBITS_GE_1024-NEXT: cmpne [[COND:p[0-9]+]].h, [[PG1]]/z, [[AND]].h, #0
-; VBITS_GE_1024-NEXT: sel [[RES:z[0-9]+]].h, [[COND]], [[OP1]].h, [[OP2]].h
-; VBITS_GE_1024-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_GE_1024-LABEL: select_v64f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_1024-NEXT:    sub x9, sp, #240
+; VBITS_GE_1024-NEXT:    mov x29, sp
+; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80
+; VBITS_GE_1024-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_1024-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_1024-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_1024-NEXT:    ldr x8, [x2]
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_1024-NEXT:    ptrue p1.h
+; VBITS_GE_1024-NEXT:    asr x9, x8, #63
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #126]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #62, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #124]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #61, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #122]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #60, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #120]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #59, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #118]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #58, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #116]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #57, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #114]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #56, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #112]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #55, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #110]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #54, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #108]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #53, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #106]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #52, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #104]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #51, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #102]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #50, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #100]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #49, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #98]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #48, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #96]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #47, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #94]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #46, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #92]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #45, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #90]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #44, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #88]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #43, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #86]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #42, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #84]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #41, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #82]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #40, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #80]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #39, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #78]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #38, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #76]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #37, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #74]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #36, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #72]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #35, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #70]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #34, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #68]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #33, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #66]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #32, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #64]
+; VBITS_GE_1024-NEXT:    asr w9, w8, #31
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #62]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #30, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #60]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #29, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #58]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #28, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #56]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #27, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #54]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #26, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #52]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #25, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #50]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #24, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #48]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #23, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #46]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #22, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #44]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #21, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #42]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #20, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #40]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #19, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #38]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #18, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #36]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #17, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #34]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #16, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #32]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #15, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #30]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #14, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #28]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #13, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #26]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #12, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #24]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #11, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #22]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #10, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #20]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #9, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #18]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #8, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #16]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #7, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #14]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #6, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #12]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #5, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #10]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #4, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #8]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #3, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #6]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #2, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #4]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #1, #1
+; VBITS_GE_1024-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #2]
+; VBITS_GE_1024-NEXT:    mov x9, sp
+; VBITS_GE_1024-NEXT:    strh w8, [sp]
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x9]
+; VBITS_GE_1024-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1h { z2.h }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    and z0.h, z0.h, #0x1
+; VBITS_GE_1024-NEXT:    cmpne p1.h, p1/z, z0.h, #0
+; VBITS_GE_1024-NEXT:    sel z0.h, p1, z1.h, z2.h
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_1024-NEXT:    mov sp, x29
+; VBITS_GE_1024-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_1024-NEXT:    ret
   %mask = load <64 x i1>, <64 x i1>* %c
   %op1 = load <64 x half>, <64 x half>* %a
   %op2 = load <64 x half>, <64 x half>* %b
@@ -99,17 +366,286 @@ define void @select_v64f16(<64 x half>* %a, <64 x half>* %b, <64 x i1>* %c) #0 {
 }
 
 define void @select_v128f16(<128 x half>* %a, <128 x half>* %b, <128 x i1>* %c) #0 {
-; CHECK-LABEL: select_v128f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),128)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].h
-; VBITS_GE_2048: ld1h { [[MASK:z[0-9]+]].h }, [[PG]]/z, [x9]
-; VBITS_GE_2048-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: and [[AND:z[0-9]+]].h, [[MASK]].h, #0x1
-; VBITS_GE_2048-NEXT: cmpne [[COND:p[0-9]+]].h, [[PG1]]/z, [[AND]].h, #0
-; VBITS_GE_2048-NEXT: sel [[RES:z[0-9]+]].h, [[COND]], [[OP1]].h, [[OP2]].h
-; VBITS_GE_2048-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: select_v128f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    sub x9, sp, #496
+; VBITS_GE_2048-NEXT:    mov x29, sp
+; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00
+; VBITS_GE_2048-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_2048-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_2048-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_2048-NEXT:    ldr x8, [x2, #8]
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    ptrue p1.h
+; VBITS_GE_2048-NEXT:    asr x9, x8, #63
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #254]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #62, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #252]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #61, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #250]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #60, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #248]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #59, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #246]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #58, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #244]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #57, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #242]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #56, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #240]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #55, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #238]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #54, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #236]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #53, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #234]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #52, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #232]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #51, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #230]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #50, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #228]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #49, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #226]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #48, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #224]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #47, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #222]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #46, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #220]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #45, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #218]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #44, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #216]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #43, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #214]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #42, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #212]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #41, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #210]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #40, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #208]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #39, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #206]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #38, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #204]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #37, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #202]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #36, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #200]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #35, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #198]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #34, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #196]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #33, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #194]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #32, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #192]
+; VBITS_GE_2048-NEXT:    asr w9, w8, #31
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #190]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #30, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #188]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #29, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #186]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #28, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #184]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #27, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #182]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #26, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #180]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #25, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #178]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #24, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #176]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #23, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #174]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #22, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #172]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #21, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #170]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #20, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #168]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #19, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #166]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #18, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #164]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #17, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #162]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #16, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #160]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #15, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #158]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #14, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #156]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #13, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #154]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #12, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #152]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #11, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #150]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #10, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #148]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #9, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #146]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #8, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #144]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #7, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #142]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #6, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #140]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #5, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #138]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #4, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #136]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #3, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #134]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #2, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #132]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #1, #1
+; VBITS_GE_2048-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #130]
+; VBITS_GE_2048-NEXT:    strh w8, [sp, #128]
+; VBITS_GE_2048-NEXT:    ldr x8, [x2]
+; VBITS_GE_2048-NEXT:    mov x9, sp
+; VBITS_GE_2048-NEXT:    asr x10, x8, #63
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #126]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #62, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #124]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #61, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #122]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #60, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #120]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #59, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #118]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #58, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #116]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #57, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #114]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #56, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #112]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #55, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #110]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #54, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #108]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #53, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #106]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #52, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #104]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #51, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #102]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #50, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #100]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #49, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #98]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #48, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #96]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #47, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #94]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #46, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #92]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #45, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #90]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #44, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #88]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #43, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #86]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #42, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #84]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #41, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #82]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #40, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #80]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #39, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #78]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #38, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #76]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #37, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #74]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #36, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #72]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #35, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #70]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #34, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #68]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #33, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #66]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #32, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #64]
+; VBITS_GE_2048-NEXT:    asr w10, w8, #31
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #62]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #30, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #60]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #29, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #58]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #28, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #56]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #27, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #54]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #26, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #52]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #25, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #50]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #24, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #48]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #23, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #46]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #22, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #44]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #21, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #42]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #20, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #40]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #19, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #38]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #18, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #36]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #17, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #34]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #16, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #32]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #15, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #30]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #14, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #28]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #13, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #26]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #12, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #24]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #11, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #22]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #10, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #20]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #9, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #18]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #8, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #16]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #7, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #14]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #6, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #12]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #5, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #10]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #4, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #8]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #3, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #6]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #2, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #4]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #1, #1
+; VBITS_GE_2048-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #2]
+; VBITS_GE_2048-NEXT:    strh w8, [sp]
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x9]
+; VBITS_GE_2048-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1h { z2.h }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    and z0.h, z0.h, #0x1
+; VBITS_GE_2048-NEXT:    cmpne p1.h, p1/z, z0.h, #0
+; VBITS_GE_2048-NEXT:    sel z0.h, p1, z1.h, z2.h
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    mov sp, x29
+; VBITS_GE_2048-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ret
   %mask = load <128 x i1>, <128 x i1>* %c
   %op1 = load <128 x half>, <128 x half>* %a
   %op2 = load <128 x half>, <128 x half>* %b
@@ -121,8 +657,11 @@ define void @select_v128f16(<128 x half>* %a, <128 x half>* %b, <128 x i1>* %c)
 ; Don't use SVE for 64-bit vectors.
 define <2 x float> @select_v2f32(<2 x float> %op1, <2 x float> %op2, <2 x i1> %mask) #0 {
 ; CHECK-LABEL: select_v2f32:
-; CHECK: bif v0.8b, v1.8b, v2.8b
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shl v2.2s, v2.2s, #31
+; CHECK-NEXT:    sshr v2.2s, v2.2s, #31
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    ret
   %sel = select <2 x i1> %mask, <2 x float> %op1, <2 x float> %op2
   ret <2 x float> %sel
 }
@@ -130,24 +669,52 @@ define <2 x float> @select_v2f32(<2 x float> %op1, <2 x float> %op2, <2 x i1> %m
 ; Don't use SVE for 128-bit vectors.
 define <4 x float> @select_v4f32(<4 x float> %op1, <4 x float> %op2, <4 x i1> %mask) #0 {
 ; CHECK-LABEL: select_v4f32:
-; CHECK: bif v0.16b, v1.16b, v2.16b
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ushll v2.4s, v2.4h, #0
+; CHECK-NEXT:    shl v2.4s, v2.4s, #31
+; CHECK-NEXT:    sshr v2.4s, v2.4s, #31
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ret
   %sel = select <4 x i1> %mask, <4 x float> %op1, <4 x float> %op2
   ret <4 x float> %sel
 }
 
 define void @select_v8f32(<8 x float>* %a, <8 x float>* %b, <8 x i1>* %c) #0 {
 ; CHECK-LABEL: select_v8f32:
-; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].s
-; CHECK: ld1w { [[MASK:z[0-9]+]].s }, [[PG]]/z, [x9]
-; CHECK-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; CHECK-NEXT: and [[AND:z[0-9]+]].s, [[MASK]].s, #0x1
-; CHECK-NEXT: cmpne [[COND:p[0-9]+]].s, [[PG1]]/z, [[AND]].s, #0
-; CHECK-NEXT: sel [[RES:z[0-9]+]].s, [[COND]], [[OP1]].s, [[OP2]].s
-; CHECK-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    sub x9, sp, #48
+; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
+; CHECK-NEXT:    .cfi_def_cfa w29, 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    ldrb w8, [x2]
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    mov x9, sp
+; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    sbfx w10, w8, #7, #1
+; CHECK-NEXT:    sbfx w11, w8, #6, #1
+; CHECK-NEXT:    stp w11, w10, [sp, #24]
+; CHECK-NEXT:    sbfx w10, w8, #3, #1
+; CHECK-NEXT:    sbfx w11, w8, #2, #1
+; CHECK-NEXT:    sbfx w12, w8, #5, #1
+; CHECK-NEXT:    sbfx w13, w8, #4, #1
+; CHECK-NEXT:    stp w11, w10, [sp, #8]
+; CHECK-NEXT:    sbfx w10, w8, #1, #1
+; CHECK-NEXT:    sbfx w8, w8, #0, #1
+; CHECK-NEXT:    stp w13, w12, [sp, #16]
+; CHECK-NEXT:    stp w8, w10, [sp]
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x9]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x1]
+; CHECK-NEXT:    and z0.s, z0.s, #0x1
+; CHECK-NEXT:    cmpne p1.s, p1/z, z0.s, #0
+; CHECK-NEXT:    sel z0.s, p1, z1.s, z2.s
+; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-NEXT:    mov sp, x29
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
   %mask = load <8 x i1>, <8 x i1>* %c
   %op1 = load <8 x float>, <8 x float>* %a
   %op2 = load <8 x float>, <8 x float>* %b
@@ -157,17 +724,53 @@ define void @select_v8f32(<8 x float>* %a, <8 x float>* %b, <8 x i1>* %c) #0 {
 }
 
 define void @select_v16f32(<16 x float>* %a, <16 x float>* %b, <16 x i1>* %c) #0 {
-; CHECK-LABEL: select_v16f32:
-; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),16)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].s
-; VBITS_GE_512: ld1w { [[MASK:z[0-9]+]].s }, [[PG]]/z, [x9]
-; VBITS_GE_512-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: and [[AND:z[0-9]+]].s, [[MASK]].s, #0x1
-; VBITS_GE_512-NEXT: cmpne [[COND:p[0-9]+]].s, [[PG1]]/z, [[AND]].s, #0
-; VBITS_GE_512-NEXT: sel [[RES:z[0-9]+]].s, [[COND]], [[OP1]].s, [[OP2]].s
-; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_GE_512-LABEL: select_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_512-NEXT:    sub x9, sp, #112
+; VBITS_GE_512-NEXT:    mov x29, sp
+; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0
+; VBITS_GE_512-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_512-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_512-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_512-NEXT:    ldrh w8, [x2]
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    mov x9, sp
+; VBITS_GE_512-NEXT:    ptrue p1.s
+; VBITS_GE_512-NEXT:    sbfx w10, w8, #15, #1
+; VBITS_GE_512-NEXT:    sbfx w11, w8, #14, #1
+; VBITS_GE_512-NEXT:    stp w11, w10, [sp, #56]
+; VBITS_GE_512-NEXT:    sbfx w10, w8, #7, #1
+; VBITS_GE_512-NEXT:    sbfx w11, w8, #6, #1
+; VBITS_GE_512-NEXT:    sbfx w12, w8, #13, #1
+; VBITS_GE_512-NEXT:    sbfx w13, w8, #12, #1
+; VBITS_GE_512-NEXT:    stp w11, w10, [sp, #24]
+; VBITS_GE_512-NEXT:    sbfx w10, w8, #3, #1
+; VBITS_GE_512-NEXT:    sbfx w11, w8, #2, #1
+; VBITS_GE_512-NEXT:    sbfx w14, w8, #11, #1
+; VBITS_GE_512-NEXT:    sbfx w15, w8, #10, #1
+; VBITS_GE_512-NEXT:    sbfx w16, w8, #9, #1
+; VBITS_GE_512-NEXT:    sbfx w17, w8, #8, #1
+; VBITS_GE_512-NEXT:    stp w13, w12, [sp, #48]
+; VBITS_GE_512-NEXT:    sbfx w12, w8, #5, #1
+; VBITS_GE_512-NEXT:    sbfx w13, w8, #4, #1
+; VBITS_GE_512-NEXT:    stp w11, w10, [sp, #8]
+; VBITS_GE_512-NEXT:    sbfx w10, w8, #1, #1
+; VBITS_GE_512-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_512-NEXT:    stp w15, w14, [sp, #40]
+; VBITS_GE_512-NEXT:    stp w17, w16, [sp, #32]
+; VBITS_GE_512-NEXT:    stp w13, w12, [sp, #16]
+; VBITS_GE_512-NEXT:    stp w8, w10, [sp]
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x9]
+; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1w { z2.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    and z0.s, z0.s, #0x1
+; VBITS_GE_512-NEXT:    cmpne p1.s, p1/z, z0.s, #0
+; VBITS_GE_512-NEXT:    sel z0.s, p1, z1.s, z2.s
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_512-NEXT:    mov sp, x29
+; VBITS_GE_512-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_512-NEXT:    ret
   %mask = load <16 x i1>, <16 x i1>* %c
   %op1 = load <16 x float>, <16 x float>* %a
   %op2 = load <16 x float>, <16 x float>* %b
@@ -177,17 +780,80 @@ define void @select_v16f32(<16 x float>* %a, <16 x float>* %b, <16 x i1>* %c) #0
 }
 
 define void @select_v32f32(<32 x float>* %a, <32 x float>* %b, <32 x i1>* %c) #0 {
-; CHECK-LABEL: select_v32f32:
-; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),32)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].s
-; VBITS_GE_1024: ld1w { [[MASK:z[0-9]+]].s }, [[PG]]/z, [x9]
-; VBITS_GE_1024-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: and [[AND:z[0-9]+]].s, [[MASK]].s, #0x1
-; VBITS_GE_1024-NEXT: cmpne [[COND:p[0-9]+]].s, [[PG1]]/z, [[AND]].s, #0
-; VBITS_GE_1024-NEXT: sel [[RES:z[0-9]+]].s, [[COND]], [[OP1]].s, [[OP2]].s
-; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_GE_1024-LABEL: select_v32f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; VBITS_GE_1024-NEXT:    sub x9, sp, #224
+; VBITS_GE_1024-NEXT:    str x19, [sp, #16] // 8-byte Folded Spill
+; VBITS_GE_1024-NEXT:    mov x29, sp
+; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80
+; VBITS_GE_1024-NEXT:    .cfi_def_cfa w29, 32
+; VBITS_GE_1024-NEXT:    .cfi_offset w19, -16
+; VBITS_GE_1024-NEXT:    .cfi_offset w30, -24
+; VBITS_GE_1024-NEXT:    .cfi_offset w29, -32
+; VBITS_GE_1024-NEXT:    ldr w8, [x2]
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    mov x9, sp
+; VBITS_GE_1024-NEXT:    ptrue p1.s
+; VBITS_GE_1024-NEXT:    asr w10, w8, #31
+; VBITS_GE_1024-NEXT:    sbfx w11, w8, #30, #1
+; VBITS_GE_1024-NEXT:    stp w11, w10, [sp, #120]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #15, #1
+; VBITS_GE_1024-NEXT:    sbfx w11, w8, #14, #1
+; VBITS_GE_1024-NEXT:    sbfx w12, w8, #29, #1
+; VBITS_GE_1024-NEXT:    sbfx w13, w8, #28, #1
+; VBITS_GE_1024-NEXT:    stp w11, w10, [sp, #56]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #7, #1
+; VBITS_GE_1024-NEXT:    sbfx w11, w8, #6, #1
+; VBITS_GE_1024-NEXT:    sbfx w14, w8, #27, #1
+; VBITS_GE_1024-NEXT:    sbfx w15, w8, #26, #1
+; VBITS_GE_1024-NEXT:    sbfx w16, w8, #25, #1
+; VBITS_GE_1024-NEXT:    sbfx w17, w8, #24, #1
+; VBITS_GE_1024-NEXT:    stp w13, w12, [sp, #112]
+; VBITS_GE_1024-NEXT:    sbfx w12, w8, #13, #1
+; VBITS_GE_1024-NEXT:    sbfx w13, w8, #12, #1
+; VBITS_GE_1024-NEXT:    stp w11, w10, [sp, #24]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #3, #1
+; VBITS_GE_1024-NEXT:    sbfx w11, w8, #2, #1
+; VBITS_GE_1024-NEXT:    sbfx w18, w8, #23, #1
+; VBITS_GE_1024-NEXT:    sbfx w2, w8, #22, #1
+; VBITS_GE_1024-NEXT:    sbfx w3, w8, #21, #1
+; VBITS_GE_1024-NEXT:    sbfx w4, w8, #20, #1
+; VBITS_GE_1024-NEXT:    sbfx w5, w8, #19, #1
+; VBITS_GE_1024-NEXT:    sbfx w6, w8, #18, #1
+; VBITS_GE_1024-NEXT:    sbfx w7, w8, #17, #1
+; VBITS_GE_1024-NEXT:    sbfx w19, w8, #16, #1
+; VBITS_GE_1024-NEXT:    stp w15, w14, [sp, #104]
+; VBITS_GE_1024-NEXT:    stp w17, w16, [sp, #96]
+; VBITS_GE_1024-NEXT:    sbfx w14, w8, #11, #1
+; VBITS_GE_1024-NEXT:    sbfx w15, w8, #10, #1
+; VBITS_GE_1024-NEXT:    sbfx w16, w8, #9, #1
+; VBITS_GE_1024-NEXT:    sbfx w17, w8, #8, #1
+; VBITS_GE_1024-NEXT:    stp w13, w12, [sp, #48]
+; VBITS_GE_1024-NEXT:    sbfx w12, w8, #5, #1
+; VBITS_GE_1024-NEXT:    sbfx w13, w8, #4, #1
+; VBITS_GE_1024-NEXT:    stp w11, w10, [sp, #8]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #1, #1
+; VBITS_GE_1024-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_1024-NEXT:    stp w2, w18, [sp, #88]
+; VBITS_GE_1024-NEXT:    stp w4, w3, [sp, #80]
+; VBITS_GE_1024-NEXT:    stp w6, w5, [sp, #72]
+; VBITS_GE_1024-NEXT:    stp w19, w7, [sp, #64]
+; VBITS_GE_1024-NEXT:    stp w15, w14, [sp, #40]
+; VBITS_GE_1024-NEXT:    stp w17, w16, [sp, #32]
+; VBITS_GE_1024-NEXT:    stp w13, w12, [sp, #16]
+; VBITS_GE_1024-NEXT:    stp w8, w10, [sp]
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x9]
+; VBITS_GE_1024-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1w { z2.s }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    and z0.s, z0.s, #0x1
+; VBITS_GE_1024-NEXT:    cmpne p1.s, p1/z, z0.s, #0
+; VBITS_GE_1024-NEXT:    sel z0.s, p1, z1.s, z2.s
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_1024-NEXT:    mov sp, x29
+; VBITS_GE_1024-NEXT:    ldr x19, [sp, #16] // 8-byte Folded Reload
+; VBITS_GE_1024-NEXT:    ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; VBITS_GE_1024-NEXT:    ret
   %mask = load <32 x i1>, <32 x i1>* %c
   %op1 = load <32 x float>, <32 x float>* %a
   %op2 = load <32 x float>, <32 x float>* %b
@@ -197,17 +863,177 @@ define void @select_v32f32(<32 x float>* %a, <32 x float>* %b, <32 x i1>* %c) #0
 }
 
 define void @select_v64f32(<64 x float>* %a, <64 x float>* %b, <64 x i1>* %c) #0 {
-; CHECK-LABEL: select_v64f32:
-; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),64)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].s
-; VBITS_GE_2048: ld1w { [[MASK:z[0-9]+]].s }, [[PG]]/z, [x9]
-; VBITS_GE_2048-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: and [[AND:z[0-9]+]].s, [[MASK]].s, #0x1
-; VBITS_GE_2048-NEXT: cmpne [[COND:p[0-9]+]].s, [[PG1]]/z, [[AND]].s, #0
-; VBITS_GE_2048-NEXT: sel [[RES:z[0-9]+]].s, [[COND]], [[OP1]].s, [[OP2]].s
-; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: select_v64f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    stp x29, x30, [sp, #-96]! // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    sub x9, sp, #672
+; VBITS_GE_2048-NEXT:    stp x28, x27, [sp, #16] // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    stp x26, x25, [sp, #32] // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    stp x24, x23, [sp, #48] // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    stp x22, x21, [sp, #64] // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    stp x20, x19, [sp, #80] // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    mov x29, sp
+; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00
+; VBITS_GE_2048-NEXT:    .cfi_def_cfa w29, 96
+; VBITS_GE_2048-NEXT:    .cfi_offset w19, -8
+; VBITS_GE_2048-NEXT:    .cfi_offset w20, -16
+; VBITS_GE_2048-NEXT:    .cfi_offset w21, -24
+; VBITS_GE_2048-NEXT:    .cfi_offset w22, -32
+; VBITS_GE_2048-NEXT:    .cfi_offset w23, -40
+; VBITS_GE_2048-NEXT:    .cfi_offset w24, -48
+; VBITS_GE_2048-NEXT:    .cfi_offset w25, -56
+; VBITS_GE_2048-NEXT:    .cfi_offset w26, -64
+; VBITS_GE_2048-NEXT:    .cfi_offset w27, -72
+; VBITS_GE_2048-NEXT:    .cfi_offset w28, -80
+; VBITS_GE_2048-NEXT:    .cfi_offset w30, -88
+; VBITS_GE_2048-NEXT:    .cfi_offset w29, -96
+; VBITS_GE_2048-NEXT:    ldr x8, [x2]
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    add x9, sp, #256
+; VBITS_GE_2048-NEXT:    ptrue p1.s
+; VBITS_GE_2048-NEXT:    asr x10, x8, #63
+; VBITS_GE_2048-NEXT:    str w10, [sp, #508]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #37, #1
+; VBITS_GE_2048-NEXT:    sbfx x11, x8, #62, #1
+; VBITS_GE_2048-NEXT:    str w10, [sp, #404]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #18, #1
+; VBITS_GE_2048-NEXT:    sbfx x12, x8, #61, #1
+; VBITS_GE_2048-NEXT:    sbfx x13, x8, #60, #1
+; VBITS_GE_2048-NEXT:    sbfx x14, x8, #59, #1
+; VBITS_GE_2048-NEXT:    str w11, [sp, #504]
+; VBITS_GE_2048-NEXT:    sbfx x11, x8, #36, #1
+; VBITS_GE_2048-NEXT:    str w10, [sp, #328]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #9, #1
+; VBITS_GE_2048-NEXT:    sbfx x15, x8, #58, #1
+; VBITS_GE_2048-NEXT:    sbfx x16, x8, #57, #1
+; VBITS_GE_2048-NEXT:    sbfx x17, x8, #56, #1
+; VBITS_GE_2048-NEXT:    sbfx x18, x8, #55, #1
+; VBITS_GE_2048-NEXT:    str w12, [sp, #500]
+; VBITS_GE_2048-NEXT:    sbfx x12, x8, #35, #1
+; VBITS_GE_2048-NEXT:    str w13, [sp, #496]
+; VBITS_GE_2048-NEXT:    sbfx x13, x8, #34, #1
+; VBITS_GE_2048-NEXT:    str w14, [sp, #492]
+; VBITS_GE_2048-NEXT:    sbfx x14, x8, #33, #1
+; VBITS_GE_2048-NEXT:    str w11, [sp, #400]
+; VBITS_GE_2048-NEXT:    sbfx w11, w8, #17, #1
+; VBITS_GE_2048-NEXT:    str w10, [sp, #292]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #4, #1
+; VBITS_GE_2048-NEXT:    sbfx x2, x8, #54, #1
+; VBITS_GE_2048-NEXT:    sbfx x3, x8, #53, #1
+; VBITS_GE_2048-NEXT:    sbfx x4, x8, #52, #1
+; VBITS_GE_2048-NEXT:    sbfx x5, x8, #51, #1
+; VBITS_GE_2048-NEXT:    sbfx x6, x8, #50, #1
+; VBITS_GE_2048-NEXT:    sbfx x7, x8, #49, #1
+; VBITS_GE_2048-NEXT:    sbfx x19, x8, #48, #1
+; VBITS_GE_2048-NEXT:    sbfx x20, x8, #47, #1
+; VBITS_GE_2048-NEXT:    sbfx x21, x8, #46, #1
+; VBITS_GE_2048-NEXT:    sbfx x22, x8, #45, #1
+; VBITS_GE_2048-NEXT:    str w15, [sp, #488]
+; VBITS_GE_2048-NEXT:    sbfx x15, x8, #32, #1
+; VBITS_GE_2048-NEXT:    str w16, [sp, #484]
+; VBITS_GE_2048-NEXT:    asr w16, w8, #31
+; VBITS_GE_2048-NEXT:    str w17, [sp, #480]
+; VBITS_GE_2048-NEXT:    sbfx w17, w8, #30, #1
+; VBITS_GE_2048-NEXT:    str w18, [sp, #476]
+; VBITS_GE_2048-NEXT:    sbfx w18, w8, #29, #1
+; VBITS_GE_2048-NEXT:    str w12, [sp, #396]
+; VBITS_GE_2048-NEXT:    str w13, [sp, #392]
+; VBITS_GE_2048-NEXT:    str w14, [sp, #388]
+; VBITS_GE_2048-NEXT:    sbfx w12, w8, #16, #1
+; VBITS_GE_2048-NEXT:    sbfx w13, w8, #15, #1
+; VBITS_GE_2048-NEXT:    sbfx w14, w8, #14, #1
+; VBITS_GE_2048-NEXT:    str w11, [sp, #324]
+; VBITS_GE_2048-NEXT:    sbfx w11, w8, #8, #1
+; VBITS_GE_2048-NEXT:    str w10, [sp, #272]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #2, #1
+; VBITS_GE_2048-NEXT:    sbfx x23, x8, #44, #1
+; VBITS_GE_2048-NEXT:    sbfx x24, x8, #43, #1
+; VBITS_GE_2048-NEXT:    sbfx x25, x8, #42, #1
+; VBITS_GE_2048-NEXT:    sbfx x26, x8, #41, #1
+; VBITS_GE_2048-NEXT:    sbfx x27, x8, #40, #1
+; VBITS_GE_2048-NEXT:    sbfx x28, x8, #39, #1
+; VBITS_GE_2048-NEXT:    sbfx x30, x8, #38, #1
+; VBITS_GE_2048-NEXT:    str w2, [sp, #472]
+; VBITS_GE_2048-NEXT:    sbfx w2, w8, #28, #1
+; VBITS_GE_2048-NEXT:    str w3, [sp, #468]
+; VBITS_GE_2048-NEXT:    sbfx w3, w8, #27, #1
+; VBITS_GE_2048-NEXT:    str w4, [sp, #464]
+; VBITS_GE_2048-NEXT:    sbfx w4, w8, #26, #1
+; VBITS_GE_2048-NEXT:    str w5, [sp, #460]
+; VBITS_GE_2048-NEXT:    str w6, [sp, #456]
+; VBITS_GE_2048-NEXT:    sbfx w5, w8, #25, #1
+; VBITS_GE_2048-NEXT:    str w7, [sp, #452]
+; VBITS_GE_2048-NEXT:    str w19, [sp, #448]
+; VBITS_GE_2048-NEXT:    sbfx w6, w8, #24, #1
+; VBITS_GE_2048-NEXT:    str w20, [sp, #444]
+; VBITS_GE_2048-NEXT:    str w21, [sp, #440]
+; VBITS_GE_2048-NEXT:    sbfx w7, w8, #23, #1
+; VBITS_GE_2048-NEXT:    str w22, [sp, #436]
+; VBITS_GE_2048-NEXT:    sbfx w19, w8, #22, #1
+; VBITS_GE_2048-NEXT:    sbfx w20, w8, #21, #1
+; VBITS_GE_2048-NEXT:    sbfx w21, w8, #20, #1
+; VBITS_GE_2048-NEXT:    sbfx w22, w8, #19, #1
+; VBITS_GE_2048-NEXT:    str w15, [sp, #384]
+; VBITS_GE_2048-NEXT:    str w16, [sp, #380]
+; VBITS_GE_2048-NEXT:    str w17, [sp, #376]
+; VBITS_GE_2048-NEXT:    str w18, [sp, #372]
+; VBITS_GE_2048-NEXT:    sbfx w15, w8, #13, #1
+; VBITS_GE_2048-NEXT:    sbfx w16, w8, #12, #1
+; VBITS_GE_2048-NEXT:    sbfx w17, w8, #11, #1
+; VBITS_GE_2048-NEXT:    sbfx w18, w8, #10, #1
+; VBITS_GE_2048-NEXT:    str w12, [sp, #320]
+; VBITS_GE_2048-NEXT:    str w13, [sp, #316]
+; VBITS_GE_2048-NEXT:    str w14, [sp, #312]
+; VBITS_GE_2048-NEXT:    sbfx w12, w8, #7, #1
+; VBITS_GE_2048-NEXT:    sbfx w13, w8, #6, #1
+; VBITS_GE_2048-NEXT:    sbfx w14, w8, #5, #1
+; VBITS_GE_2048-NEXT:    str w11, [sp, #288]
+; VBITS_GE_2048-NEXT:    sbfx w11, w8, #3, #1
+; VBITS_GE_2048-NEXT:    str w10, [sp, #264]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #1, #1
+; VBITS_GE_2048-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_2048-NEXT:    str w23, [sp, #432]
+; VBITS_GE_2048-NEXT:    str w24, [sp, #428]
+; VBITS_GE_2048-NEXT:    str w25, [sp, #424]
+; VBITS_GE_2048-NEXT:    str w26, [sp, #420]
+; VBITS_GE_2048-NEXT:    str w27, [sp, #416]
+; VBITS_GE_2048-NEXT:    str w28, [sp, #412]
+; VBITS_GE_2048-NEXT:    str w30, [sp, #408]
+; VBITS_GE_2048-NEXT:    str w2, [sp, #368]
+; VBITS_GE_2048-NEXT:    str w3, [sp, #364]
+; VBITS_GE_2048-NEXT:    str w4, [sp, #360]
+; VBITS_GE_2048-NEXT:    str w5, [sp, #356]
+; VBITS_GE_2048-NEXT:    str w6, [sp, #352]
+; VBITS_GE_2048-NEXT:    str w7, [sp, #348]
+; VBITS_GE_2048-NEXT:    str w19, [sp, #344]
+; VBITS_GE_2048-NEXT:    str w20, [sp, #340]
+; VBITS_GE_2048-NEXT:    str w21, [sp, #336]
+; VBITS_GE_2048-NEXT:    str w22, [sp, #332]
+; VBITS_GE_2048-NEXT:    str w15, [sp, #308]
+; VBITS_GE_2048-NEXT:    str w16, [sp, #304]
+; VBITS_GE_2048-NEXT:    str w17, [sp, #300]
+; VBITS_GE_2048-NEXT:    str w18, [sp, #296]
+; VBITS_GE_2048-NEXT:    str w12, [sp, #284]
+; VBITS_GE_2048-NEXT:    str w13, [sp, #280]
+; VBITS_GE_2048-NEXT:    str w14, [sp, #276]
+; VBITS_GE_2048-NEXT:    str w11, [sp, #268]
+; VBITS_GE_2048-NEXT:    str w10, [sp, #260]
+; VBITS_GE_2048-NEXT:    str w8, [sp, #256]
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x9]
+; VBITS_GE_2048-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1w { z2.s }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    and z0.s, z0.s, #0x1
+; VBITS_GE_2048-NEXT:    cmpne p1.s, p1/z, z0.s, #0
+; VBITS_GE_2048-NEXT:    sel z0.s, p1, z1.s, z2.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    mov sp, x29
+; VBITS_GE_2048-NEXT:    ldp x20, x19, [sp, #80] // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ldp x22, x21, [sp, #64] // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ldp x24, x23, [sp, #48] // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ldp x26, x25, [sp, #32] // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ldp x28, x27, [sp, #16] // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ldp x29, x30, [sp], #96 // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ret
   %mask = load <64 x i1>, <64 x i1>* %c
   %op1 = load <64 x float>, <64 x float>* %a
   %op2 = load <64 x float>, <64 x float>* %b
@@ -219,8 +1045,12 @@ define void @select_v64f32(<64 x float>* %a, <64 x float>* %b, <64 x i1>* %c) #0
 ; Don't use SVE for 64-bit vectors.
 define <1 x double> @select_v1f64(<1 x double> %op1, <1 x double> %op2, <1 x i1> %mask) #0 {
 ; CHECK-LABEL: select_v1f64:
-; CHECK: bif v0.8b, v1.8b, v2.8b
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst w0, #0x1
+; CHECK-NEXT:    csetm x8, ne
+; CHECK-NEXT:    fmov d2, x8
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    ret
   %sel = select <1 x i1> %mask, <1 x double> %op1, <1 x double> %op2
   ret <1 x double> %sel
 }
@@ -228,24 +1058,49 @@ define <1 x double> @select_v1f64(<1 x double> %op1, <1 x double> %op2, <1 x i1>
 ; Don't use SVE for 128-bit vectors.
 define <2 x double> @select_v2f64(<2 x double> %op1, <2 x double> %op2, <2 x i1> %mask) #0 {
 ; CHECK-LABEL: select_v2f64:
-; CHECK: bif v0.16b, v1.16b, v2.16b
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ushll v2.2d, v2.2s, #0
+; CHECK-NEXT:    shl v2.2d, v2.2d, #63
+; CHECK-NEXT:    sshr v2.2d, v2.2d, #63
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ret
   %sel = select <2 x i1> %mask, <2 x double> %op1, <2 x double> %op2
   ret <2 x double> %sel
 }
 
 define void @select_v4f64(<4 x double>* %a, <4 x double>* %b, <4 x i1>* %c) #0 {
 ; CHECK-LABEL: select_v4f64:
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),4)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].d
-; CHECK: ld1d { [[MASK:z[0-9]+]].d }, [[PG]]/z, [x9]
-; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: and [[AND:z[0-9]+]].d, [[MASK]].d, #0x1
-; CHECK-NEXT: cmpne [[COND:p[0-9]+]].d, [[PG1]]/z, [[AND]].d, #0
-; CHECK-NEXT: sel [[RES:z[0-9]+]].d, [[COND]], [[OP1]].d, [[OP2]].d
-; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    sub x9, sp, #48
+; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
+; CHECK-NEXT:    .cfi_def_cfa w29, 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    ldrb w8, [x2]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    mov x9, sp
+; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    lsr w10, w8, #3
+; CHECK-NEXT:    lsr w11, w8, #2
+; CHECK-NEXT:    sbfx x10, x10, #0, #1
+; CHECK-NEXT:    sbfx x11, x11, #0, #1
+; CHECK-NEXT:    stp x11, x10, [sp, #16]
+; CHECK-NEXT:    sbfx x10, x8, #0, #1
+; CHECK-NEXT:    lsr w8, w8, #1
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    stp x10, x8, [sp]
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x9]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; CHECK-NEXT:    and z0.d, z0.d, #0x1
+; CHECK-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; CHECK-NEXT:    sel z0.d, p1, z1.d, z2.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    mov sp, x29
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
   %mask = load <4 x i1>, <4 x i1>* %c
   %op1 = load <4 x double>, <4 x double>* %a
   %op2 = load <4 x double>, <4 x double>* %b
@@ -255,17 +1110,48 @@ define void @select_v4f64(<4 x double>* %a, <4 x double>* %b, <4 x i1>* %c) #0 {
 }
 
 define void @select_v8f64(<8 x double>* %a, <8 x double>* %b, <8 x i1>* %c) #0 {
-; CHECK-LABEL: select_v8f64:
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),8)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].d
-; VBITS_GE_512: ld1d { [[MASK:z[0-9]+]].d }, [[PG]]/z, [x9]
-; VBITS_GE_512-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: and [[AND:z[0-9]+]].d, [[MASK]].d, #0x1
-; VBITS_GE_512-NEXT: cmpne [[COND:p[0-9]+]].d, [[PG1]]/z, [[AND]].d, #0
-; VBITS_GE_512-NEXT: sel [[RES:z[0-9]+]].d, [[COND]], [[OP1]].d, [[OP2]].d
-; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_GE_512-LABEL: select_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_512-NEXT:    sub x9, sp, #112
+; VBITS_GE_512-NEXT:    mov x29, sp
+; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0
+; VBITS_GE_512-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_512-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_512-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_512-NEXT:    ldrb w8, [x2]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    mov x9, sp
+; VBITS_GE_512-NEXT:    ptrue p1.d
+; VBITS_GE_512-NEXT:    lsr w10, w8, #7
+; VBITS_GE_512-NEXT:    lsr w11, w8, #6
+; VBITS_GE_512-NEXT:    lsr w12, w8, #5
+; VBITS_GE_512-NEXT:    lsr w13, w8, #4
+; VBITS_GE_512-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_512-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_512-NEXT:    stp x11, x10, [sp, #48]
+; VBITS_GE_512-NEXT:    sbfx x11, x12, #0, #1
+; VBITS_GE_512-NEXT:    sbfx x12, x13, #0, #1
+; VBITS_GE_512-NEXT:    lsr w10, w8, #3
+; VBITS_GE_512-NEXT:    stp x12, x11, [sp, #32]
+; VBITS_GE_512-NEXT:    lsr w11, w8, #2
+; VBITS_GE_512-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_512-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_512-NEXT:    stp x11, x10, [sp, #16]
+; VBITS_GE_512-NEXT:    sbfx x10, x8, #0, #1
+; VBITS_GE_512-NEXT:    lsr w8, w8, #1
+; VBITS_GE_512-NEXT:    sbfx x8, x8, #0, #1
+; VBITS_GE_512-NEXT:    stp x10, x8, [sp]
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x9]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    and z0.d, z0.d, #0x1
+; VBITS_GE_512-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_512-NEXT:    sel z0.d, p1, z1.d, z2.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_512-NEXT:    mov sp, x29
+; VBITS_GE_512-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_512-NEXT:    ret
   %mask = load <8 x i1>, <8 x i1>* %c
   %op1 = load <8 x double>, <8 x double>* %a
   %op2 = load <8 x double>, <8 x double>* %b
@@ -275,17 +1161,68 @@ define void @select_v8f64(<8 x double>* %a, <8 x double>* %b, <8 x i1>* %c) #0 {
 }
 
 define void @select_v16f64(<16 x double>* %a, <16 x double>* %b, <16 x i1>* %c) #0 {
-; CHECK-LABEL: select_v16f64:
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),16)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].d
-; VBITS_GE_1024: ld1d { [[MASK:z[0-9]+]].d }, [[PG]]/z, [x9]
-; VBITS_GE_1024-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: and [[AND:z[0-9]+]].d, [[MASK]].d, #0x1
-; VBITS_GE_1024-NEXT: cmpne [[COND:p[0-9]+]].d, [[PG1]]/z, [[AND]].d, #0
-; VBITS_GE_1024-NEXT: sel [[RES:z[0-9]+]].d, [[COND]], [[OP1]].d, [[OP2]].d
-; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_GE_1024-LABEL: select_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_1024-NEXT:    sub x9, sp, #240
+; VBITS_GE_1024-NEXT:    mov x29, sp
+; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80
+; VBITS_GE_1024-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_1024-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_1024-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_1024-NEXT:    ldrh w8, [x2]
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    mov x9, sp
+; VBITS_GE_1024-NEXT:    ptrue p1.d
+; VBITS_GE_1024-NEXT:    lsr w10, w8, #15
+; VBITS_GE_1024-NEXT:    lsr w11, w8, #14
+; VBITS_GE_1024-NEXT:    lsr w12, w8, #13
+; VBITS_GE_1024-NEXT:    lsr w13, w8, #12
+; VBITS_GE_1024-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_1024-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_1024-NEXT:    lsr w14, w8, #11
+; VBITS_GE_1024-NEXT:    lsr w15, w8, #10
+; VBITS_GE_1024-NEXT:    stp x11, x10, [sp, #112]
+; VBITS_GE_1024-NEXT:    sbfx x11, x12, #0, #1
+; VBITS_GE_1024-NEXT:    sbfx x12, x13, #0, #1
+; VBITS_GE_1024-NEXT:    lsr w16, w8, #9
+; VBITS_GE_1024-NEXT:    lsr w17, w8, #8
+; VBITS_GE_1024-NEXT:    stp x12, x11, [sp, #96]
+; VBITS_GE_1024-NEXT:    sbfx x12, x14, #0, #1
+; VBITS_GE_1024-NEXT:    sbfx x13, x15, #0, #1
+; VBITS_GE_1024-NEXT:    lsr w10, w8, #7
+; VBITS_GE_1024-NEXT:    lsr w11, w8, #6
+; VBITS_GE_1024-NEXT:    stp x13, x12, [sp, #80]
+; VBITS_GE_1024-NEXT:    sbfx x13, x16, #0, #1
+; VBITS_GE_1024-NEXT:    sbfx x14, x17, #0, #1
+; VBITS_GE_1024-NEXT:    lsr w12, w8, #5
+; VBITS_GE_1024-NEXT:    stp x14, x13, [sp, #64]
+; VBITS_GE_1024-NEXT:    lsr w13, w8, #4
+; VBITS_GE_1024-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_1024-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_1024-NEXT:    stp x11, x10, [sp, #48]
+; VBITS_GE_1024-NEXT:    sbfx x11, x12, #0, #1
+; VBITS_GE_1024-NEXT:    sbfx x12, x13, #0, #1
+; VBITS_GE_1024-NEXT:    lsr w10, w8, #3
+; VBITS_GE_1024-NEXT:    stp x12, x11, [sp, #32]
+; VBITS_GE_1024-NEXT:    lsr w11, w8, #2
+; VBITS_GE_1024-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_1024-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_1024-NEXT:    stp x11, x10, [sp, #16]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #0, #1
+; VBITS_GE_1024-NEXT:    lsr w8, w8, #1
+; VBITS_GE_1024-NEXT:    sbfx x8, x8, #0, #1
+; VBITS_GE_1024-NEXT:    stp x10, x8, [sp]
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x9]
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    and z0.d, z0.d, #0x1
+; VBITS_GE_1024-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_1024-NEXT:    sel z0.d, p1, z1.d, z2.d
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_1024-NEXT:    mov sp, x29
+; VBITS_GE_1024-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_1024-NEXT:    ret
   %mask = load <16 x i1>, <16 x i1>* %c
   %op1 = load <16 x double>, <16 x double>* %a
   %op2 = load <16 x double>, <16 x double>* %b
@@ -295,17 +1232,142 @@ define void @select_v16f64(<16 x double>* %a, <16 x double>* %b, <16 x i1>* %c)
 }
 
 define void @select_v32f64(<32 x double>* %a, <32 x double>* %b, <32 x i1>* %c) #0 {
-; CHECK-LABEL: select_v32f64:
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),32)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].d
-; VBITS_GE_2048: ld1d { [[MASK:z[0-9]+]].d }, [[PG]]/z, [x9]
-; VBITS_GE_2048-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: and [[AND:z[0-9]+]].d, [[MASK]].d, #0x1
-; VBITS_GE_2048-NEXT: cmpne [[COND:p[0-9]+]].d, [[PG1]]/z, [[AND]].d, #0
-; VBITS_GE_2048-NEXT: sel [[RES:z[0-9]+]].d, [[COND]], [[OP1]].d, [[OP2]].d
-; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: select_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    sub x9, sp, #480
+; VBITS_GE_2048-NEXT:    str x19, [sp, #16] // 8-byte Folded Spill
+; VBITS_GE_2048-NEXT:    mov x29, sp
+; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00
+; VBITS_GE_2048-NEXT:    .cfi_def_cfa w29, 32
+; VBITS_GE_2048-NEXT:    .cfi_offset w19, -16
+; VBITS_GE_2048-NEXT:    .cfi_offset w30, -24
+; VBITS_GE_2048-NEXT:    .cfi_offset w29, -32
+; VBITS_GE_2048-NEXT:    ldr w8, [x2]
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    mov x9, sp
+; VBITS_GE_2048-NEXT:    ptrue p1.d
+; VBITS_GE_2048-NEXT:    ubfx x10, x8, #31, #1
+; VBITS_GE_2048-NEXT:    ubfx x11, x8, #30, #2
+; VBITS_GE_2048-NEXT:    ubfx x12, x8, #29, #3
+; VBITS_GE_2048-NEXT:    // kill: def $w10 killed $w10 killed $x10 def $x10
+; VBITS_GE_2048-NEXT:    // kill: def $w11 killed $w11 killed $x11 def $x11
+; VBITS_GE_2048-NEXT:    ubfx x13, x8, #28, #4
+; VBITS_GE_2048-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_2048-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w12 killed $w12 killed $x12 def $x12
+; VBITS_GE_2048-NEXT:    ubfx x14, x8, #27, #5
+; VBITS_GE_2048-NEXT:    ubfx x15, x8, #26, #6
+; VBITS_GE_2048-NEXT:    stp x11, x10, [sp, #240]
+; VBITS_GE_2048-NEXT:    sbfx x11, x12, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w13 killed $w13 killed $x13 def $x13
+; VBITS_GE_2048-NEXT:    sbfx x12, x13, #0, #1
+; VBITS_GE_2048-NEXT:    ubfx x16, x8, #25, #7
+; VBITS_GE_2048-NEXT:    ubfx x17, x8, #24, #8
+; VBITS_GE_2048-NEXT:    stp x12, x11, [sp, #224]
+; VBITS_GE_2048-NEXT:    // kill: def $w14 killed $w14 killed $x14 def $x14
+; VBITS_GE_2048-NEXT:    sbfx x12, x14, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w15 killed $w15 killed $x15 def $x15
+; VBITS_GE_2048-NEXT:    sbfx x13, x15, #0, #1
+; VBITS_GE_2048-NEXT:    ubfx x18, x8, #23, #9
+; VBITS_GE_2048-NEXT:    ubfx x2, x8, #22, #10
+; VBITS_GE_2048-NEXT:    stp x13, x12, [sp, #208]
+; VBITS_GE_2048-NEXT:    // kill: def $w16 killed $w16 killed $x16 def $x16
+; VBITS_GE_2048-NEXT:    sbfx x13, x16, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w17 killed $w17 killed $x17 def $x17
+; VBITS_GE_2048-NEXT:    sbfx x14, x17, #0, #1
+; VBITS_GE_2048-NEXT:    ubfx x3, x8, #21, #11
+; VBITS_GE_2048-NEXT:    ubfx x4, x8, #20, #12
+; VBITS_GE_2048-NEXT:    ubfx x10, x8, #15, #17
+; VBITS_GE_2048-NEXT:    ubfx x11, x8, #14, #18
+; VBITS_GE_2048-NEXT:    stp x14, x13, [sp, #192]
+; VBITS_GE_2048-NEXT:    // kill: def $w18 killed $w18 killed $x18 def $x18
+; VBITS_GE_2048-NEXT:    sbfx x14, x18, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w2 killed $w2 killed $x2 def $x2
+; VBITS_GE_2048-NEXT:    sbfx x15, x2, #0, #1
+; VBITS_GE_2048-NEXT:    ubfx x5, x8, #19, #13
+; VBITS_GE_2048-NEXT:    ubfx x6, x8, #18, #14
+; VBITS_GE_2048-NEXT:    ubfx x12, x8, #13, #19
+; VBITS_GE_2048-NEXT:    stp x15, x14, [sp, #176]
+; VBITS_GE_2048-NEXT:    // kill: def $w3 killed $w3 killed $x3 def $x3
+; VBITS_GE_2048-NEXT:    sbfx x15, x3, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w4 killed $w4 killed $x4 def $x4
+; VBITS_GE_2048-NEXT:    sbfx x16, x4, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w10 killed $w10 killed $x10 def $x10
+; VBITS_GE_2048-NEXT:    // kill: def $w11 killed $w11 killed $x11 def $x11
+; VBITS_GE_2048-NEXT:    ubfx x7, x8, #17, #15
+; VBITS_GE_2048-NEXT:    ubfx x19, x8, #16, #16
+; VBITS_GE_2048-NEXT:    ubfx x13, x8, #12, #20
+; VBITS_GE_2048-NEXT:    stp x16, x15, [sp, #160]
+; VBITS_GE_2048-NEXT:    // kill: def $w5 killed $w5 killed $x5 def $x5
+; VBITS_GE_2048-NEXT:    sbfx x16, x5, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w6 killed $w6 killed $x6 def $x6
+; VBITS_GE_2048-NEXT:    sbfx x17, x6, #0, #1
+; VBITS_GE_2048-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_2048-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w12 killed $w12 killed $x12 def $x12
+; VBITS_GE_2048-NEXT:    ubfx x14, x8, #11, #21
+; VBITS_GE_2048-NEXT:    ubfx x15, x8, #10, #22
+; VBITS_GE_2048-NEXT:    stp x17, x16, [sp, #144]
+; VBITS_GE_2048-NEXT:    // kill: def $w7 killed $w7 killed $x7 def $x7
+; VBITS_GE_2048-NEXT:    sbfx x17, x7, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w19 killed $w19 killed $x19 def $x19
+; VBITS_GE_2048-NEXT:    sbfx x18, x19, #0, #1
+; VBITS_GE_2048-NEXT:    stp x11, x10, [sp, #112]
+; VBITS_GE_2048-NEXT:    sbfx x11, x12, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w13 killed $w13 killed $x13 def $x13
+; VBITS_GE_2048-NEXT:    sbfx x12, x13, #0, #1
+; VBITS_GE_2048-NEXT:    ubfx x16, x8, #9, #23
+; VBITS_GE_2048-NEXT:    stp x18, x17, [sp, #128]
+; VBITS_GE_2048-NEXT:    ubfx x17, x8, #8, #24
+; VBITS_GE_2048-NEXT:    ubfx x10, x8, #7, #25
+; VBITS_GE_2048-NEXT:    stp x12, x11, [sp, #96]
+; VBITS_GE_2048-NEXT:    ubfx x11, x8, #6, #26
+; VBITS_GE_2048-NEXT:    // kill: def $w14 killed $w14 killed $x14 def $x14
+; VBITS_GE_2048-NEXT:    sbfx x12, x14, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w15 killed $w15 killed $x15 def $x15
+; VBITS_GE_2048-NEXT:    sbfx x13, x15, #0, #1
+; VBITS_GE_2048-NEXT:    stp x13, x12, [sp, #80]
+; VBITS_GE_2048-NEXT:    ubfx x12, x8, #5, #27
+; VBITS_GE_2048-NEXT:    // kill: def $w16 killed $w16 killed $x16 def $x16
+; VBITS_GE_2048-NEXT:    sbfx x13, x16, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w17 killed $w17 killed $x17 def $x17
+; VBITS_GE_2048-NEXT:    sbfx x14, x17, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w10 killed $w10 killed $x10 def $x10
+; VBITS_GE_2048-NEXT:    // kill: def $w11 killed $w11 killed $x11 def $x11
+; VBITS_GE_2048-NEXT:    stp x14, x13, [sp, #64]
+; VBITS_GE_2048-NEXT:    ubfx x13, x8, #4, #28
+; VBITS_GE_2048-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_2048-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w12 killed $w12 killed $x12 def $x12
+; VBITS_GE_2048-NEXT:    stp x11, x10, [sp, #48]
+; VBITS_GE_2048-NEXT:    sbfx x11, x12, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w13 killed $w13 killed $x13 def $x13
+; VBITS_GE_2048-NEXT:    sbfx x12, x13, #0, #1
+; VBITS_GE_2048-NEXT:    ubfx x10, x8, #3, #29
+; VBITS_GE_2048-NEXT:    stp x12, x11, [sp, #32]
+; VBITS_GE_2048-NEXT:    ubfx x11, x8, #2, #30
+; VBITS_GE_2048-NEXT:    // kill: def $w10 killed $w10 killed $x10 def $x10
+; VBITS_GE_2048-NEXT:    // kill: def $w11 killed $w11 killed $x11 def $x11
+; VBITS_GE_2048-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_2048-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_2048-NEXT:    stp x11, x10, [sp, #16]
+; VBITS_GE_2048-NEXT:    ubfx x10, x8, #1, #31
+; VBITS_GE_2048-NEXT:    // kill: def $w10 killed $w10 killed $x10 def $x10
+; VBITS_GE_2048-NEXT:    sbfx x8, x8, #0, #1
+; VBITS_GE_2048-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_2048-NEXT:    stp x8, x10, [sp]
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x9]
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    and z0.d, z0.d, #0x1
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    sel z0.d, p1, z1.d, z2.d
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_2048-NEXT:    mov sp, x29
+; VBITS_GE_2048-NEXT:    ldr x19, [sp, #16] // 8-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ret
   %mask = load <32 x i1>, <32 x i1>* %c
   %op1 = load <32 x double>, <32 x double>* %a
   %op2 = load <32 x double>, <32 x double>* %b

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-insert-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-insert-vector-elt.ll
index 6aa1d7bd8670d..3251d74fb085e 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-insert-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-insert-vector-elt.ll
@@ -1,19 +1,19 @@
 ; RUN: llc -aarch64-sve-vector-bits-min=128  < %s | FileCheck %s -D#VBYTES=16  -check-prefix=NO_SVE
-; RUN: llc -aarch64-sve-vector-bits-min=256  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256
-; RUN: llc -aarch64-sve-vector-bits-min=384  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256
-; RUN: llc -aarch64-sve-vector-bits-min=512  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=640  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=768  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=896  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -check-prefixes=VBITS_GE_256
+; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -check-prefixes=VBITS_GE_256
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -check-prefixes=VBITS_GE_256,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -check-prefixes=VBITS_GE_256,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -check-prefixes=VBITS_GE_256,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -check-prefixes=VBITS_GE_256,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -check-prefixes=VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -check-prefixes=VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -check-prefixes=VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -check-prefixes=VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -check-prefixes=VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -check-prefixes=VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -check-prefixes=VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -check-prefixes=VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=VBITS_GE_256,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -26,27 +26,32 @@ target triple = "aarch64-unknown-linux-gnu"
 
 ; Don't use SVE for 64-bit vectors.
 define <4 x half> @insertelement_v4f16(<4 x half> %op1) #0 {
-; CHECK-LABEL: insertelement_v4f16:
-; CHECK:         fmov h1, #5.00000000
-; CHECK-NEXT:    mov v0.h[3], v1.h[0]
-; CHECK-NEXT:    ret
+; VBITS_GE_256-LABEL: insertelement_v4f16:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    // kill: def $d0 killed $d0 def $q0
+; VBITS_GE_256-NEXT:    fmov h1, #5.00000000
+; VBITS_GE_256-NEXT:    mov v0.h[3], v1.h[0]
+; VBITS_GE_256-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; VBITS_GE_256-NEXT:    ret
     %r = insertelement <4 x half> %op1, half 5.0, i64 3
     ret <4 x half> %r
 }
 
 ; Don't use SVE for 128-bit vectors.
 define <8 x half> @insertelement_v8f16(<8 x half> %op1) #0 {
-; CHECK-LABEL: insertelement_v8f16:
-; CHECK:         fmov h1, #5.00000000
-; CHECK-NEXT:    mov v0.h[7], v1.h[0]
-; CHECK-NEXT:    ret
+; VBITS_GE_256-LABEL: insertelement_v8f16:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    fmov h1, #5.00000000
+; VBITS_GE_256-NEXT:    mov v0.h[7], v1.h[0]
+; VBITS_GE_256-NEXT:    ret
     %r = insertelement <8 x half> %op1, half 5.0, i64 7
     ret <8 x half> %r
 }
 
 define <16 x half> @insertelement_v16f16(<16 x half>* %a) #0 {
-; CHECK-LABEL: insertelement_v16f16:
-; VBITS_GE_256:         ptrue p0.h, vl16
+; VBITS_GE_256-LABEL: insertelement_v16f16:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
 ; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    mov w9, #15
 ; VBITS_GE_256-NEXT:    mov z1.h, w9
@@ -63,8 +68,9 @@ define <16 x half> @insertelement_v16f16(<16 x half>* %a) #0 {
 }
 
 define <32 x half> @insertelement_v32f16(<32 x half>* %a) #0 {
-; CHECK-LABEL: insertelement_v32f16:
-; VBITS_GE_512:         ptrue p0.h, vl32
+; VBITS_GE_512-LABEL: insertelement_v32f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
 ; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; VBITS_GE_512-NEXT:    mov w9, #31
 ; VBITS_GE_512-NEXT:    mov z1.h, w9
@@ -81,17 +87,18 @@ define <32 x half> @insertelement_v32f16(<32 x half>* %a) #0 {
 }
 
 define <64 x half> @insertelement_v64f16(<64 x half>* %a) #0 {
-; CHECK-LABEL: insertelement_v64f16:
-; VBITS_GE_1024:         ptrue   p0.h, vl64
-; VBITS_GE_1024-NEXT:    ld1h    { z0.h }, p0/z, [x0]
-; VBITS_GE_1024-NEXT:    mov     w9, #63
-; VBITS_GE_1024-NEXT:    mov     z1.h, w9
-; VBITS_GE_1024-NEXT:    index   z2.h, #0, #1
-; VBITS_GE_1024-NEXT:    ptrue   p1.h
-; VBITS_GE_1024-NEXT:    cmpeq   p1.h, p1/z, z2.h, z1.h
-; VBITS_GE_1024-NEXT:    fmov    h1, #5.00000000
-; VBITS_GE_1024-NEXT:    mov     z0.h, p1/m, h1
-; VBITS_GE_1024-NEXT:    st1h    { z0.h }, p0, [x8]
+; VBITS_GE_1024-LABEL: insertelement_v64f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    mov w9, #63
+; VBITS_GE_1024-NEXT:    mov z1.h, w9
+; VBITS_GE_1024-NEXT:    index z2.h, #0, #1
+; VBITS_GE_1024-NEXT:    ptrue p1.h
+; VBITS_GE_1024-NEXT:    cmpeq p1.h, p1/z, z2.h, z1.h
+; VBITS_GE_1024-NEXT:    fmov h1, #5.00000000
+; VBITS_GE_1024-NEXT:    mov z0.h, p1/m, h1
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x8]
 ; VBITS_GE_1024-NEXT:    ret
     %op1 = load <64 x half>, <64 x half>* %a
     %r = insertelement <64 x half> %op1, half 5.0, i64 63
@@ -99,18 +106,19 @@ define <64 x half> @insertelement_v64f16(<64 x half>* %a) #0 {
 }
 
 define <128 x half> @insertelement_v128f16(<128 x half>* %a) #0 {
-; CHECK-LABEL: insertelement_v128f16:
-; VBITS_GE_2048: ptrue   p0.h, vl128
-; VBITS_GE_2048-NEXT: ld1h    { z0.h }, p0/z, [x0]
-; VBITS_GE_2048-NEXT: mov     w9, #127
-; VBITS_GE_2048-NEXT: mov     z1.h, w9
-; VBITS_GE_2048-NEXT: index   z2.h, #0, #1
-; VBITS_GE_2048-NEXT: ptrue   p1.h
-; VBITS_GE_2048-NEXT: cmpeq   p1.h, p1/z, z2.h, z1.h
-; VBITS_GE_2048-NEXT: fmov    h1, #5.00000000
-; VBITS_GE_2048-NEXT: mov     z0.h, p1/m, h1
-; VBITS_GE_2048-NEXT: st1h    { z0.h }, p0, [x8]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: insertelement_v128f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    mov w9, #127
+; VBITS_GE_2048-NEXT:    mov z1.h, w9
+; VBITS_GE_2048-NEXT:    index z2.h, #0, #1
+; VBITS_GE_2048-NEXT:    ptrue p1.h
+; VBITS_GE_2048-NEXT:    cmpeq p1.h, p1/z, z2.h, z1.h
+; VBITS_GE_2048-NEXT:    fmov h1, #5.00000000
+; VBITS_GE_2048-NEXT:    mov z0.h, p1/m, h1
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x8]
+; VBITS_GE_2048-NEXT:    ret
     %op1 = load <128 x half>, <128 x half>* %a
     %r = insertelement <128 x half> %op1, half 5.0, i64 127
     ret <128 x half> %r
@@ -118,27 +126,32 @@ define <128 x half> @insertelement_v128f16(<128 x half>* %a) #0 {
 
 ; Don't use SVE for 64-bit vectors.
 define <2 x float> @insertelement_v2f32(<2 x float> %op1) #0 {
-; CHECK-LABEL: insertelement_v2f32:
-; CHECK:         fmov s1, #5.00000000
-; CHECK-NEXT:    mov v0.s[1], v1.s[0]
-; CHECK-NEXT:    ret
+; VBITS_GE_256-LABEL: insertelement_v2f32:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    // kill: def $d0 killed $d0 def $q0
+; VBITS_GE_256-NEXT:    fmov s1, #5.00000000
+; VBITS_GE_256-NEXT:    mov v0.s[1], v1.s[0]
+; VBITS_GE_256-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; VBITS_GE_256-NEXT:    ret
     %r = insertelement <2 x float> %op1, float 5.0, i64 1
     ret <2 x float> %r
 }
 
 ; Don't use SVE for 128-bit vectors.
 define <4 x float> @insertelement_v4f32(<4 x float> %op1) #0 {
-; CHECK-LABEL: insertelement_v4f32:
-; CHECK:         fmov s1, #5.00000000
-; CHECK-NEXT:    mov v0.s[3], v1.s[0]
-; CHECK-NEXT:    ret
+; VBITS_GE_256-LABEL: insertelement_v4f32:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    fmov s1, #5.00000000
+; VBITS_GE_256-NEXT:    mov v0.s[3], v1.s[0]
+; VBITS_GE_256-NEXT:    ret
     %r = insertelement <4 x float> %op1, float 5.0, i64 3
     ret <4 x float> %r
 }
 
 define <8 x float> @insertelement_v8f32(<8 x float>* %a) #0 {
-; CHECK-LABEL: insertelement_v8f32:
-; VBITS_GE_256:         ptrue p0.s, vl8
+; VBITS_GE_256-LABEL: insertelement_v8f32:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    mov w9, #7
 ; VBITS_GE_256-NEXT:    mov z1.s, w9
@@ -155,17 +168,18 @@ define <8 x float> @insertelement_v8f32(<8 x float>* %a) #0 {
 }
 
 define <16 x float> @insertelement_v16f32(<16 x float>* %a) #0 {
-; CHECK-LABEL: insertelement_v16f32:
-; VBITS_GE_512:         ptrue   p0.s, vl16
-; VBITS_GE_512-NEXT:    ld1w    { z0.s }, p0/z, [x0]
-; VBITS_GE_512-NEXT:    mov     w9, #15
-; VBITS_GE_512-NEXT:    mov     z1.s, w9
-; VBITS_GE_512-NEXT:    index   z2.s, #0, #1
-; VBITS_GE_512-NEXT:    ptrue   p1.s
-; VBITS_GE_512-NEXT:    cmpeq   p1.s, p1/z, z2.s, z1.s
-; VBITS_GE_512-NEXT:    fmov    s1, #5.00000000
-; VBITS_GE_512-NEXT:    mov     z0.s, p1/m, s1
-; VBITS_GE_512-NEXT:    st1w    { z0.s }, p0, [x8]
+; VBITS_GE_512-LABEL: insertelement_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    mov w9, #15
+; VBITS_GE_512-NEXT:    mov z1.s, w9
+; VBITS_GE_512-NEXT:    index z2.s, #0, #1
+; VBITS_GE_512-NEXT:    ptrue p1.s
+; VBITS_GE_512-NEXT:    cmpeq p1.s, p1/z, z2.s, z1.s
+; VBITS_GE_512-NEXT:    fmov s1, #5.00000000
+; VBITS_GE_512-NEXT:    mov z0.s, p1/m, s1
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x8]
 ; VBITS_GE_512-NEXT:    ret
     %op1 = load <16 x float>, <16 x float>* %a
     %r = insertelement <16 x float> %op1, float 5.0, i64 15
@@ -173,36 +187,38 @@ define <16 x float> @insertelement_v16f32(<16 x float>* %a) #0 {
 }
 
 define <32 x float> @insertelement_v32f32(<32 x float>* %a) #0 {
-; CHECK-LABEL: insertelement_v32f32:
-; VBITS_GE_1024:        ptrue   p0.s, vl32
-; VBITS_GE_1024-NEXT:   ld1w    { z0.s }, p0/z, [x0]
-; VBITS_GE_1024-NEXT:   mov     w9, #31
-; VBITS_GE_1024-NEXT:   mov     z1.s, w9
-; VBITS_GE_1024-NEXT:   index   z2.s, #0, #1
-; VBITS_GE_1024-NEXT:   ptrue   p1.s
-; VBITS_GE_1024-NEXT:   cmpeq   p1.s, p1/z, z2.s, z1.s
-; VBITS_GE_1024-NEXT:   fmov    s1, #5.00000000
-; VBITS_GE_1024-NEXT:   mov     z0.s, p1/m, s1
-; VBITS_GE_1024-NEXT:   st1w    { z0.s }, p0, [x8]
-; VBITS_GE_1024-NEXT:   ret
+; VBITS_GE_1024-LABEL: insertelement_v32f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    mov w9, #31
+; VBITS_GE_1024-NEXT:    mov z1.s, w9
+; VBITS_GE_1024-NEXT:    index z2.s, #0, #1
+; VBITS_GE_1024-NEXT:    ptrue p1.s
+; VBITS_GE_1024-NEXT:    cmpeq p1.s, p1/z, z2.s, z1.s
+; VBITS_GE_1024-NEXT:    fmov s1, #5.00000000
+; VBITS_GE_1024-NEXT:    mov z0.s, p1/m, s1
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_GE_1024-NEXT:    ret
     %op1 = load <32 x float>, <32 x float>* %a
     %r = insertelement <32 x float> %op1, float 5.0, i64 31
     ret <32 x float> %r
 }
 
 define <64 x float> @insertelement_v64f32(<64 x float>* %a) #0 {
-; CHECK-LABEL: insertelement_v64f32:
-; VBITS_GE_2048:        ptrue   p0.s, vl64
-; VBITS_GE_2048-NEXT:   ld1w    { z0.s }, p0/z, [x0]
-; VBITS_GE_2048-NEXT:   mov     w9, #63
-; VBITS_GE_2048-NEXT:   mov     z1.s, w9
-; VBITS_GE_2048-NEXT:   index   z2.s, #0, #1
-; VBITS_GE_2048-NEXT:   ptrue   p1.s
-; VBITS_GE_2048-NEXT:   cmpeq   p1.s, p1/z, z2.s, z1.s
-; VBITS_GE_2048-NEXT:   fmov    s1, #5.00000000
-; VBITS_GE_2048-NEXT:   mov     z0.s, p1/m, s1
-; VBITS_GE_2048-NEXT:   st1w    { z0.s }, p0, [x8]
-; VBITS_GE_2048-NEXT:   ret
+; VBITS_GE_2048-LABEL: insertelement_v64f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    mov w9, #63
+; VBITS_GE_2048-NEXT:    mov z1.s, w9
+; VBITS_GE_2048-NEXT:    index z2.s, #0, #1
+; VBITS_GE_2048-NEXT:    ptrue p1.s
+; VBITS_GE_2048-NEXT:    cmpeq p1.s, p1/z, z2.s, z1.s
+; VBITS_GE_2048-NEXT:    fmov s1, #5.00000000
+; VBITS_GE_2048-NEXT:    mov z0.s, p1/m, s1
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_GE_2048-NEXT:    ret
     %op1 = load <64 x float>, <64 x float>* %a
     %r = insertelement <64 x float> %op1, float 5.0, i64 63
     ret <64 x float> %r
@@ -210,26 +226,29 @@ define <64 x float> @insertelement_v64f32(<64 x float>* %a) #0 {
 
 ; Don't use SVE for 64-bit vectors.
 define <1 x double> @insertelement_v1f64(<1 x double> %op1) #0 {
-; CHECK-LABEL: insertelement_v1f64:
-; CHECK:         fmov d0, #5.00000000
-; CHECK-NEXT:    ret
+; VBITS_GE_256-LABEL: insertelement_v1f64:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    fmov d0, #5.00000000
+; VBITS_GE_256-NEXT:    ret
     %r = insertelement <1 x double> %op1, double 5.0, i64 0
     ret <1 x double> %r
 }
 
 ; Don't use SVE for 128-bit vectors.
 define <2 x double> @insertelement_v2f64(<2 x double> %op1) #0 {
-; CHECK-LABEL: insertelement_v2f64:
-; CHECK:         fmov d1, #5.00000000
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-NEXT:    ret
+; VBITS_GE_256-LABEL: insertelement_v2f64:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    fmov d1, #5.00000000
+; VBITS_GE_256-NEXT:    mov v0.d[1], v1.d[0]
+; VBITS_GE_256-NEXT:    ret
     %r = insertelement <2 x double> %op1, double 5.0, i64 1
     ret <2 x double> %r
 }
 
 define <4 x double> @insertelement_v4f64(<4 x double>* %a) #0 {
-; CHECK-LABEL: insertelement_v4f64:
-; VBITS_GE_256:         ptrue p0.d, vl4
+; VBITS_GE_256-LABEL: insertelement_v4f64:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
 ; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    mov w9, #3
 ; VBITS_GE_256-NEXT:    mov z1.d, x9
@@ -246,17 +265,18 @@ define <4 x double> @insertelement_v4f64(<4 x double>* %a) #0 {
 }
 
 define <8 x double> @insertelement_v8f64(<8 x double>* %a) #0 {
-; CHECK-LABEL: insertelement_v8f64:
-; VBITS_GE_512:         ptrue   p0.d, vl8
-; VBITS_GE_512-NEXT:    ld1d    { z0.d }, p0/z, [x0]
-; VBITS_GE_512-NEXT:    mov     w9, #7
-; VBITS_GE_512-NEXT:    mov     z1.d, x9
-; VBITS_GE_512-NEXT:    index   z2.d, #0, #1
-; VBITS_GE_512-NEXT:    ptrue   p1.d
-; VBITS_GE_512-NEXT:    cmpeq   p1.d, p1/z, z2.d, z1.d
-; VBITS_GE_512-NEXT:    fmov    d1, #5.00000000
-; VBITS_GE_512-NEXT:    mov     z0.d, p1/m, d1
-; VBITS_GE_512-NEXT:    st1d    { z0.d }, p0, [x8]
+; VBITS_GE_512-LABEL: insertelement_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    mov w9, #7
+; VBITS_GE_512-NEXT:    mov z1.d, x9
+; VBITS_GE_512-NEXT:    index z2.d, #0, #1
+; VBITS_GE_512-NEXT:    ptrue p1.d
+; VBITS_GE_512-NEXT:    cmpeq p1.d, p1/z, z2.d, z1.d
+; VBITS_GE_512-NEXT:    fmov d1, #5.00000000
+; VBITS_GE_512-NEXT:    mov z0.d, p1/m, d1
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
 ; VBITS_GE_512-NEXT:    ret
     %op1 = load <8 x double>, <8 x double>* %a
     %r = insertelement <8 x double> %op1, double 5.0, i64 7
@@ -264,17 +284,18 @@ define <8 x double> @insertelement_v8f64(<8 x double>* %a) #0 {
 }
 
 define <16 x double> @insertelement_v16f64(<16 x double>* %a) #0 {
-; CHECK-LABEL: insertelement_v16f64:
-; VBITS_GE_1024:         ptrue   p0.d, vl16
-; VBITS_GE_1024-NEXT:    ld1d    { z0.d }, p0/z, [x0]
-; VBITS_GE_1024-NEXT:    mov     w9, #15
-; VBITS_GE_1024-NEXT:    mov     z1.d, x9
-; VBITS_GE_1024-NEXT:    index   z2.d, #0, #1
-; VBITS_GE_1024-NEXT:    ptrue   p1.d
-; VBITS_GE_1024-NEXT:    cmpeq   p1.d, p1/z, z2.d, z1.d
-; VBITS_GE_1024-NEXT:    fmov    d1, #5.00000000
-; VBITS_GE_1024-NEXT:    mov     z0.d, p1/m, d1
-; VBITS_GE_1024-NEXT:    st1d    { z0.d }, p0, [x8]
+; VBITS_GE_1024-LABEL: insertelement_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    mov w9, #15
+; VBITS_GE_1024-NEXT:    mov z1.d, x9
+; VBITS_GE_1024-NEXT:    index z2.d, #0, #1
+; VBITS_GE_1024-NEXT:    ptrue p1.d
+; VBITS_GE_1024-NEXT:    cmpeq p1.d, p1/z, z2.d, z1.d
+; VBITS_GE_1024-NEXT:    fmov d1, #5.00000000
+; VBITS_GE_1024-NEXT:    mov z0.d, p1/m, d1
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x8]
 ; VBITS_GE_1024-NEXT:    ret
     %op1 = load <16 x double>, <16 x double>* %a
     %r = insertelement <16 x double> %op1, double 5.0, i64 15
@@ -282,17 +303,18 @@ define <16 x double> @insertelement_v16f64(<16 x double>* %a) #0 {
 }
 
 define <32 x double> @insertelement_v32f64(<32 x double>* %a) #0 {
-; CHECK-LABEL: insertelement_v32f64:
-; VBITS_GE_2048:         ptrue   p0.d, vl32
-; VBITS_GE_2048-NEXT:    ld1d    { z0.d }, p0/z, [x0]
-; VBITS_GE_2048-NEXT:    mov     w9, #31
-; VBITS_GE_2048-NEXT:    mov     z1.d, x9
-; VBITS_GE_2048-NEXT:    index   z2.d, #0, #1
-; VBITS_GE_2048-NEXT:    ptrue   p1.d
-; VBITS_GE_2048-NEXT:    cmpeq   p1.d, p1/z, z2.d, z1.d
-; VBITS_GE_2048-NEXT:    fmov    d1, #5.00000000
-; VBITS_GE_2048-NEXT:    mov     z0.d, p1/m, d1
-; VBITS_GE_2048-NEXT:    st1d    { z0.d }, p0, [x8]
+; VBITS_GE_2048-LABEL: insertelement_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    mov w9, #31
+; VBITS_GE_2048-NEXT:    mov z1.d, x9
+; VBITS_GE_2048-NEXT:    index z2.d, #0, #1
+; VBITS_GE_2048-NEXT:    ptrue p1.d
+; VBITS_GE_2048-NEXT:    cmpeq p1.d, p1/z, z2.d, z1.d
+; VBITS_GE_2048-NEXT:    fmov d1, #5.00000000
+; VBITS_GE_2048-NEXT:    mov z0.d, p1/m, d1
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x8]
 ; VBITS_GE_2048-NEXT:    ret
     %op1 = load <32 x double>, <32 x double>* %a
     %r = insertelement <32 x double> %op1, double 5.0, i64 31

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll
index 2af8926cd1e37..756b6def8b784 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll
@@ -1,19 +1,19 @@
 ; RUN: llc -aarch64-sve-vector-bits-min=128  < %s | FileCheck %s -D#VBYTES=16  -check-prefix=NO_SVE
-; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -D#VBYTES=32  -check-prefixes=CHECK,VBITS_GE_256,VBITS_EQ_256
-; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -D#VBYTES=32  -check-prefixes=CHECK,VBITS_GE_256
-; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_256,VBITS_EQ_512
-; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_256
-; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_256
-; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_256
-; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256,VBITS_EQ_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256
-; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256
-; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256
-; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256
-; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256
-; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256
-; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256
-; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_2048,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -D#VBYTES=32  -check-prefixes=CHECK,VBITS_EQ_256
+; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -D#VBYTES=32  -check-prefixes=CHECK
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512,VBITS_EQ_512
+; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_EQ_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
 
 ; VBYTES represents the useful byte size of a vector register from the code
 ; generator's point of view. It is clamped to power-of-2 values because
@@ -35,17 +35,27 @@ target triple = "aarch64-unknown-linux-gnu"
 ; Don't use SVE for 64-bit vectors.
 define <8 x i8> @smulh_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
 ; CHECK-LABEL: smulh_v8i8:
-; CHECK: smull v0.8h, v0.8b, v1.8b
-; CHECK: ushr v1.8h, v0.8h, #8
-; CHECK: umov w8, v1.h[0]
-; CHECK: fmov s0, w8
-; CHECK: umov w8, v1.h[1]
-; CHECK: mov v0.b[1], w8
-; CHECK: umov w8, v1.h[2]
-; CHECK: mov v0.b[2], w8
-; CHECK: umov w8, v1.h[3]
-; CHECK: mov v0.b[3], w8
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT:    ushr v1.8h, v0.8h, #8
+; CHECK-NEXT:    umov w8, v1.h[0]
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    umov w8, v1.h[1]
+; CHECK-NEXT:    mov v0.b[1], w8
+; CHECK-NEXT:    umov w8, v1.h[2]
+; CHECK-NEXT:    mov v0.b[2], w8
+; CHECK-NEXT:    umov w8, v1.h[3]
+; CHECK-NEXT:    mov v0.b[3], w8
+; CHECK-NEXT:    umov w8, v1.h[4]
+; CHECK-NEXT:    mov v0.b[4], w8
+; CHECK-NEXT:    umov w8, v1.h[5]
+; CHECK-NEXT:    mov v0.b[5], w8
+; CHECK-NEXT:    umov w8, v1.h[6]
+; CHECK-NEXT:    mov v0.b[6], w8
+; CHECK-NEXT:    umov w8, v1.h[7]
+; CHECK-NEXT:    mov v0.b[7], w8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %insert = insertelement <8 x i16> undef, i16 8, i64 0
   %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer
   %1 = sext <8 x i8> %op1 to <8 x i16>
@@ -59,10 +69,11 @@ define <8 x i8> @smulh_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <16 x i8> @smulh_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
 ; CHECK-LABEL: smulh_v16i8:
-; CHECK: smull2 v2.8h, v0.16b, v1.16b
-; CHECK: smull v0.8h, v0.8b, v1.8b
-; CHECK: uzp2 v0.16b, v0.16b, v2.16b
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull2 v2.8h, v0.16b, v1.16b
+; CHECK-NEXT:    smull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT:    uzp2 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    ret
   %insert = insertelement <16 x i16> undef, i16 8, i64 0
   %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer
   %1 = sext <16 x i8> %op1 to <16 x i16>
@@ -74,20 +85,25 @@ define <16 x i8> @smulh_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
 }
 
 define void @smulh_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
-; CHECK-LABEL: smulh_v32i8:
-; VBITS_EQ_256: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,32)]]
-; VBITS_EQ_256-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_EQ_256: smulh [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
-; VBITS_EQ_256: ret
-
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl[[#min(VBYTES,32)]]
-; VBITS_GE_512-DAG: ld1sb { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_512-DAG: ld1sb { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_512: mul [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
-; VBITS_GE_512: lsr [[RES]].h, [[PG]]/m, [[RES]].h, #8
-; VBITS_GE_512: st1b { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_EQ_256-LABEL: smulh_v32i8:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    ptrue p0.b, vl32
+; VBITS_EQ_256-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ld1b { z1.b }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    smulh z0.b, p0/m, z0.b, z1.b
+; VBITS_EQ_256-NEXT:    st1b { z0.b }, p0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: smulh_v32i8:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ld1sb { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1sb { z1.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mul z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_512-NEXT:    lsr z0.h, p0/m, z0.h, #8
+; VBITS_GE_512-NEXT:    st1b { z0.h }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
+
   %op1 = load <32 x i8>, <32 x i8>* %a
   %op2 = load <32 x i8>, <32 x i8>* %b
   %insert = insertelement <32 x i16> undef, i16 8, i64 0
@@ -102,20 +118,25 @@ define void @smulh_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 }
 
 define void @smulh_v64i8(<64 x i8>* %a, <64 x i8>* %b) #0 {
-; CHECK-LABEL: smulh_v64i8:
-; VBITS_EQ_512-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_EQ_512-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_EQ_512: smulh [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
-; VBITS_EQ_512: st1b { [[RES]].b }, [[PG]], [x0]
-; VBITS_EQ_512: ret
-
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].h, vl[[#min(VBYTES,64)]]
-; VBITS_GE_1024-DAG: ld1sb { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_1024-DAG: ld1sb { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_1024: mul [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
-; VBITS_GE_1024: lsr [[RES]].h, [[PG]]/m, [[RES]].h, #8
-; VBITS_GE_1024: st1b { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_EQ_512-LABEL: smulh_v64i8:
+; VBITS_EQ_512:       // %bb.0:
+; VBITS_EQ_512-NEXT:    ptrue p0.b, vl64
+; VBITS_EQ_512-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_EQ_512-NEXT:    ld1b { z1.b }, p0/z, [x1]
+; VBITS_EQ_512-NEXT:    smulh z0.b, p0/m, z0.b, z1.b
+; VBITS_EQ_512-NEXT:    st1b { z0.b }, p0, [x0]
+; VBITS_EQ_512-NEXT:    ret
+;
+; VBITS_GE_1024-LABEL: smulh_v64i8:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_1024-NEXT:    ld1sb { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1sb { z1.h }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mul z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_1024-NEXT:    lsr z0.h, p0/m, z0.h, #8
+; VBITS_GE_1024-NEXT:    st1b { z0.h }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
+
   %op1 = load <64 x i8>, <64 x i8>* %a
   %op2 = load <64 x i8>, <64 x i8>* %b
   %insert = insertelement <64 x i16> undef, i16 8, i64 0
@@ -130,20 +151,25 @@ define void @smulh_v64i8(<64 x i8>* %a, <64 x i8>* %b) #0 {
 }
 
 define void @smulh_v128i8(<128 x i8>* %a, <128 x i8>* %b) #0 {
-; CHECK-LABEL: smulh_v128i8:
-; VBITS_EQ_1024-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_EQ_1024-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_EQ_1024: smulh [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
-; VBITS_EQ_1024: st1b { [[RES]].b }, [[PG]], [x0]
-; VBITS_EQ_1024: ret
-
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl[[#min(VBYTES,128)]]
-; VBITS_GE_2048-DAG: ld1sb { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1sb { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_2048: mul [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
-; VBITS_GE_2048: lsr [[RES]].h, [[PG]]/m, [[RES]].h, #8
-; VBITS_GE_2048: st1b { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_EQ_1024-LABEL: smulh_v128i8:
+; VBITS_EQ_1024:       // %bb.0:
+; VBITS_EQ_1024-NEXT:    ptrue p0.b, vl128
+; VBITS_EQ_1024-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_EQ_1024-NEXT:    ld1b { z1.b }, p0/z, [x1]
+; VBITS_EQ_1024-NEXT:    smulh z0.b, p0/m, z0.b, z1.b
+; VBITS_EQ_1024-NEXT:    st1b { z0.b }, p0, [x0]
+; VBITS_EQ_1024-NEXT:    ret
+;
+; VBITS_GE_2048-LABEL: smulh_v128i8:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    ld1sb { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1sb { z1.h }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mul z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_2048-NEXT:    lsr z0.h, p0/m, z0.h, #8
+; VBITS_GE_2048-NEXT:    st1b { z0.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
+
   %op1 = load <128 x i8>, <128 x i8>* %a
   %op2 = load <128 x i8>, <128 x i8>* %b
   %insert = insertelement <128 x i16> undef, i16 8, i64 0
@@ -158,13 +184,14 @@ define void @smulh_v128i8(<128 x i8>* %a, <128 x i8>* %b) #0 {
 }
 
 define void @smulh_v256i8(<256 x i8>* %a, <256 x i8>* %b) #0 {
-; CHECK-LABEL: smulh_v256i8:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,256)]]
-; VBITS_GE_2048-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_GE_2048: smulh [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
-; VBITS_GE_2048: st1b { [[RES]].b }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: smulh_v256i8:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.b, vl256
+; VBITS_GE_2048-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1b { z1.b }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    smulh z0.b, p0/m, z0.b, z1.b
+; VBITS_GE_2048-NEXT:    st1b { z0.b }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <256 x i8>, <256 x i8>* %a
   %op2 = load <256 x i8>, <256 x i8>* %b
   %insert = insertelement <256 x i16> undef, i16 8, i64 0
@@ -181,15 +208,17 @@ define void @smulh_v256i8(<256 x i8>* %a, <256 x i8>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <4 x i16> @smulh_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
 ; CHECK-LABEL: smulh_v4i16:
-; CHECK: smull v0.4s, v0.4h, v1.4h
-; CHECK: ushr v0.4s, v0.4s, #16
-; CHECK: mov w8, v0.s[1]
-; CHECK: mov w9, v0.s[2]
-; CHECK: mov w10, v0.s[3]
-; CHECK: mov v0.h[1], w8
-; CHECK: mov v0.h[2], w9
-; CHECK: mov v0.h[3], w10
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    ushr v0.4s, v0.4s, #16
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    mov w9, v0.s[2]
+; CHECK-NEXT:    mov w10, v0.s[3]
+; CHECK-NEXT:    mov v0.h[1], w8
+; CHECK-NEXT:    mov v0.h[2], w9
+; CHECK-NEXT:    mov v0.h[3], w10
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %insert = insertelement <4 x i32> undef, i32 16, i64 0
   %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer
   %1 = sext <4 x i16> %op1 to <4 x i32>
@@ -203,10 +232,11 @@ define <4 x i16> @smulh_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <8 x i16> @smulh_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
 ; CHECK-LABEL: smulh_v8i16:
-; CHECK: smull2 v2.4s, v0.8h, v1.8h
-; CHECK: smull v0.4s, v0.4h, v1.4h
-; CHECK: uzp2 v0.8h, v0.8h, v2.8h
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull2 v2.4s, v0.8h, v1.8h
+; CHECK-NEXT:    smull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    ret
   %insert = insertelement <8 x i32> undef, i32 16, i64 0
   %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer
   %1 = sext <8 x i16> %op1 to <8 x i32>
@@ -218,21 +248,25 @@ define <8 x i16> @smulh_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
 }
 
 define void @smulh_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
-; CHECK-LABEL: smulh_v16i16:
-; VBITS_EQ_256: ptrue [[PG:p[0-9]+]].h, vl[[#min(VBYTES,16)]]
-; VBITS_EQ_256-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_EQ_256: smulh [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
-; VBITS_EQ_256: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_EQ_256: ret
-
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl[[#min(VBYTES,16)]]
-; VBITS_GE_512-DAG: ld1sh { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_512-DAG: ld1sh { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_512: mul [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_GE_512: lsr [[RES]].s, [[PG]]/m, [[RES]].s, #16
-; VBITS_GE_512: st1h { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_EQ_256-LABEL: smulh_v16i16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    smulh z0.h, p0/m, z0.h, z1.h
+; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: smulh_v16i16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1sh { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1sh { z1.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mul z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_512-NEXT:    lsr z0.s, p0/m, z0.s, #16
+; VBITS_GE_512-NEXT:    st1h { z0.s }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
+
   %op1 = load <16 x i16>, <16 x i16>* %a
   %op2 = load <16 x i16>, <16 x i16>* %b
   %insert = insertelement <16 x i32> undef, i32 16, i64 0
@@ -247,21 +281,25 @@ define void @smulh_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 }
 
 define void @smulh_v32i16(<32 x i16>* %a, <32 x i16>* %b) #0 {
-; CHECK-LABEL: smulh_v32i16:
-; VBITS_EQ_512: ptrue [[PG:p[0-9]+]].h, vl[[#min(VBYTES,32)]]
-; VBITS_EQ_512-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_EQ_512-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_EQ_512: smulh [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
-; VBITS_EQ_512: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_EQ_512: ret
-
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].s, vl[[#min(VBYTES,32)]]
-; VBITS_GE_1024-DAG: ld1sh { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_1024-DAG: ld1sh { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_1024: mul [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_GE_1024: lsr [[RES]].s, [[PG]]/m, [[RES]].s, #16
-; VBITS_GE_1024: st1h { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_EQ_512-LABEL: smulh_v32i16:
+; VBITS_EQ_512:       // %bb.0:
+; VBITS_EQ_512-NEXT:    ptrue p0.h, vl32
+; VBITS_EQ_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_EQ_512-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_EQ_512-NEXT:    smulh z0.h, p0/m, z0.h, z1.h
+; VBITS_EQ_512-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_EQ_512-NEXT:    ret
+;
+; VBITS_GE_1024-LABEL: smulh_v32i16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1sh { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1sh { z1.s }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mul z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_1024-NEXT:    lsr z0.s, p0/m, z0.s, #16
+; VBITS_GE_1024-NEXT:    st1h { z0.s }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
+
   %op1 = load <32 x i16>, <32 x i16>* %a
   %op2 = load <32 x i16>, <32 x i16>* %b
   %insert = insertelement <32 x i32> undef, i32 16, i64 0
@@ -276,21 +314,25 @@ define void @smulh_v32i16(<32 x i16>* %a, <32 x i16>* %b) #0 {
 }
 
 define void @smulh_v64i16(<64 x i16>* %a, <64 x i16>* %b) #0 {
-; CHECK-LABEL: smulh_v64i16:
-; VBITS_EQ_1024: ptrue [[PG:p[0-9]+]].h, vl[[#min(VBYTES,64)]]
-; VBITS_EQ_1024-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_EQ_1024-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_EQ_1024: smulh [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
-; VBITS_EQ_1024: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_EQ_1024: ret
-
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl[[#min(VBYTES,64)]]
-; VBITS_GE_2048-DAG: ld1sh { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1sh { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_2048: mul [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_GE_2048: lsr [[RES]].s, [[PG]]/m, [[RES]].s, #16
-; VBITS_GE_2048: st1h { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_EQ_1024-LABEL: smulh_v64i16:
+; VBITS_EQ_1024:       // %bb.0:
+; VBITS_EQ_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_EQ_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_EQ_1024-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_EQ_1024-NEXT:    smulh z0.h, p0/m, z0.h, z1.h
+; VBITS_EQ_1024-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_EQ_1024-NEXT:    ret
+;
+; VBITS_GE_2048-LABEL: smulh_v64i16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1sh { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1sh { z1.s }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mul z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_2048-NEXT:    lsr z0.s, p0/m, z0.s, #16
+; VBITS_GE_2048-NEXT:    st1h { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
+
   %op1 = load <64 x i16>, <64 x i16>* %a
   %op2 = load <64 x i16>, <64 x i16>* %b
   %insert = insertelement <64 x i32> undef, i32 16, i64 0
@@ -305,13 +347,14 @@ define void @smulh_v64i16(<64 x i16>* %a, <64 x i16>* %b) #0 {
 }
 
 define void @smulh_v128i16(<128 x i16>* %a, <128 x i16>* %b) #0 {
-; CHECK-LABEL: smulh_v128i16:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl[[#min(VBYTES,128)]]
-; VBITS_GE_2048-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_2048: smulh [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
-; VBITS_GE_2048: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: smulh_v128i16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    smulh z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <128 x i16>, <128 x i16>* %a
   %op2 = load <128 x i16>, <128 x i16>* %b
   %insert = insertelement <128 x i32> undef, i32 16, i64 0
@@ -328,12 +371,13 @@ define void @smulh_v128i16(<128 x i16>* %a, <128 x i16>* %b) #0 {
 ; Vector i64 multiplications are not legal for NEON so use SVE when available.
 define <2 x i32> @smulh_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 {
 ; CHECK-LABEL: smulh_v2i32:
-; CHECK: sshll v0.2d, v0.2s, #0
-; CHECK: sshll v1.2d, v1.2s, #0
-; CHECK: ptrue p0.d, vl2
-; CHECK: mul z0.d, p0/m, z0.d, z1.d
-; CHECK: shrn v0.2s, v0.2d, #32
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sshll v0.2d, v0.2s, #0
+; CHECK-NEXT:    sshll v1.2d, v1.2s, #0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    shrn v0.2s, v0.2d, #32
+; CHECK-NEXT:    ret
   %insert = insertelement <2 x i64> undef, i64 32, i64 0
   %splat = shufflevector <2 x i64> %insert, <2 x i64> undef, <2 x i32> zeroinitializer
   %1 = sext <2 x i32> %op1 to <2 x i64>
@@ -347,10 +391,11 @@ define <2 x i32> @smulh_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <4 x i32> @smulh_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
 ; CHECK-LABEL: smulh_v4i32:
-; CHECK: smull2 v2.2d, v0.4s, v1.4s
-; CHECK: smull v0.2d, v0.2s, v1.2s
-; CHECK: uzp2 v0.4s, v0.4s, v2.4s
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smull2 v2.2d, v0.4s, v1.4s
+; CHECK-NEXT:    smull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT:    uzp2 v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    ret
   %insert = insertelement <4 x i64> undef, i64 32, i64 0
   %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer
   %1 = sext <4 x i32> %op1 to <4 x i64>
@@ -362,21 +407,25 @@ define <4 x i32> @smulh_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
 }
 
 define void @smulh_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
-; CHECK-LABEL: smulh_v8i32:
-; VBITS_EQ_256: ptrue [[PG:p[0-9]+]].s, vl[[#min(VBYTES,8)]]
-; VBITS_EQ_256-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_EQ_256: smulh [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_EQ_256: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_EQ_256: ret
-
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl[[#min(VBYTES,8)]]
-; VBITS_GE_512-DAG: ld1sw { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_512-DAG: ld1sw { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512: mul [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_512: lsr [[RES]].d, [[PG]]/m, [[RES]].d, #32
-; VBITS_GE_512: st1w { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_EQ_256-LABEL: smulh_v8i32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    smulh z0.s, p0/m, z0.s, z1.s
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: smulh_v8i32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1sw { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1sw { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_512-NEXT:    lsr z0.d, p0/m, z0.d, #32
+; VBITS_GE_512-NEXT:    st1w { z0.d }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
+
   %op1 = load <8 x i32>, <8 x i32>* %a
   %op2 = load <8 x i32>, <8 x i32>* %b
   %insert = insertelement <8 x i64> undef, i64 32, i64 0
@@ -391,20 +440,25 @@ define void @smulh_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 }
 
 define void @smulh_v16i32(<16 x i32>* %a, <16 x i32>* %b) #0 {
-; CHECK-LABEL: smulh_v16i32:
-; VBITS_EQ_512: ptrue [[PG:p[0-9]+]].s, vl[[#min(VBYTES,16)]]
-; VBITS_EQ_512-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_EQ_512-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_EQ_512: smulh [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_EQ_512: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_EQ_512: ret
-
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl[[#min(VBYTES,16)]]
-; VBITS_GE_1024-DAG: ld1sw { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_1024-DAG: ld1sw { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_1024: mul [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_1024: st1w { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_EQ_512-LABEL: smulh_v16i32:
+; VBITS_EQ_512:       // %bb.0:
+; VBITS_EQ_512-NEXT:    ptrue p0.s, vl16
+; VBITS_EQ_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_EQ_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_EQ_512-NEXT:    smulh z0.s, p0/m, z0.s, z1.s
+; VBITS_EQ_512-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_EQ_512-NEXT:    ret
+;
+; VBITS_GE_1024-LABEL: smulh_v16i32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1sw { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1sw { z1.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_1024-NEXT:    lsr z0.d, p0/m, z0.d, #32
+; VBITS_GE_1024-NEXT:    st1w { z0.d }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
+
   %op1 = load <16 x i32>, <16 x i32>* %a
   %op2 = load <16 x i32>, <16 x i32>* %b
   %insert = insertelement <16 x i64> undef, i64 32, i64 0
@@ -419,21 +473,25 @@ define void @smulh_v16i32(<16 x i32>* %a, <16 x i32>* %b) #0 {
 }
 
 define void @smulh_v32i32(<32 x i32>* %a, <32 x i32>* %b) #0 {
-; CHECK-LABEL: smulh_v32i32:
-; VBITS_EQ_1024: ptrue [[PG:p[0-9]+]].s, vl[[#min(VBYTES,32)]]
-; VBITS_EQ_1024-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_EQ_1024-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_EQ_1024: smulh [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_EQ_1024: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_EQ_1024: ret
-
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl[[#min(VBYTES,32)]]
-; VBITS_GE_2048-DAG: ld1sw { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1sw { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_2048: mul [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_2048: lsr [[RES]].d, [[PG]]/m, [[RES]].d, #32
-; VBITS_GE_2048: st1w { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_EQ_1024-LABEL: smulh_v32i32:
+; VBITS_EQ_1024:       // %bb.0:
+; VBITS_EQ_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_EQ_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_EQ_1024-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_EQ_1024-NEXT:    smulh z0.s, p0/m, z0.s, z1.s
+; VBITS_EQ_1024-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_EQ_1024-NEXT:    ret
+;
+; VBITS_GE_2048-LABEL: smulh_v32i32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1sw { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1sw { z1.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_2048-NEXT:    lsr z0.d, p0/m, z0.d, #32
+; VBITS_GE_2048-NEXT:    st1w { z0.d }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
+
   %op1 = load <32 x i32>, <32 x i32>* %a
   %op2 = load <32 x i32>, <32 x i32>* %b
   %insert = insertelement <32 x i64> undef, i64 32, i64 0
@@ -448,13 +506,14 @@ define void @smulh_v32i32(<32 x i32>* %a, <32 x i32>* %b) #0 {
 }
 
 define void @smulh_v64i32(<64 x i32>* %a, <64 x i32>* %b) #0 {
-; CHECK-LABEL: smulh_v64i32:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl[[#min(VBYTES,64)]]
-; VBITS_GE_2048-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_2048: smulh [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_GE_2048: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: smulh_v64i32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    smulh z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <64 x i32>, <64 x i32>* %a
   %op2 = load <64 x i32>, <64 x i32>* %b
   %insert = insertelement <64 x i64> undef, i64 32, i64 0
@@ -471,9 +530,13 @@ define void @smulh_v64i32(<64 x i32>* %a, <64 x i32>* %b) #0 {
 ; Vector i64 multiplications are not legal for NEON so use SVE when available.
 define <1 x i64> @smulh_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 {
 ; CHECK-LABEL: smulh_v1i64:
-; CHECK: ptrue p0.d, vl1
-; CHECK: smulh z0.d, p0/m, z0.d, z1.d
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    smulh z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
   %insert = insertelement <1 x i128> undef, i128 64, i128 0
   %splat = shufflevector <1 x i128> %insert, <1 x i128> undef, <1 x i32> zeroinitializer
   %1 = sext <1 x i64> %op1 to <1 x i128>
@@ -487,9 +550,13 @@ define <1 x i64> @smulh_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 {
 ; Vector i64 multiplications are not legal for NEON so use SVE when available.
 define <2 x i64> @smulh_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
 ; CHECK-LABEL: smulh_v2i64:
-; CHECK: ptrue p0.d, vl2
-; CHECK: smulh z0.d, p0/m, z0.d, z1.d
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    smulh z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
   %insert = insertelement <2 x i128> undef, i128 64, i128 0
   %splat = shufflevector <2 x i128> %insert, <2 x i128> undef, <2 x i32> zeroinitializer
   %1 = sext <2 x i64> %op1 to <2 x i128>
@@ -502,12 +569,13 @@ define <2 x i64> @smulh_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
 
 define void @smulh_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-LABEL: smulh_v4i64:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl[[#min(VBYTES,4)]]
-; VBITS_GE_256-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_256: smulh [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_256: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_256: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    smulh z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %op2 = load <4 x i64>, <4 x i64>* %b
   %insert = insertelement <4 x i128> undef, i128 64, i128 0
@@ -522,13 +590,14 @@ define void @smulh_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 }
 
 define void @smulh_v8i64(<8 x i64>* %a, <8 x i64>* %b) #0 {
-; CHECK-LABEL: smulh_v8i64:
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl[[#min(VBYTES,8)]]
-; VBITS_GE_512-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_512-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512: smulh [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_512: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_GE_512-LABEL: smulh_v8i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    smulh z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x i64>, <8 x i64>* %a
   %op2 = load <8 x i64>, <8 x i64>* %b
   %insert = insertelement <8 x i128> undef, i128 64, i128 0
@@ -543,13 +612,14 @@ define void @smulh_v8i64(<8 x i64>* %a, <8 x i64>* %b) #0 {
 }
 
 define void @smulh_v16i64(<16 x i64>* %a, <16 x i64>* %b) #0 {
-; CHECK-LABEL: smulh_v16i64:
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl[[#min(VBYTES,16)]]
-; VBITS_GE_1024-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_1024-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_1024: smulh [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_1024: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_GE_1024-LABEL: smulh_v16i64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    smulh z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x i64>, <16 x i64>* %a
   %op2 = load <16 x i64>, <16 x i64>* %b
   %insert = insertelement <16 x i128> undef, i128 64, i128 0
@@ -564,13 +634,14 @@ define void @smulh_v16i64(<16 x i64>* %a, <16 x i64>* %b) #0 {
 }
 
 define void @smulh_v32i64(<32 x i64>* %a, <32 x i64>* %b) #0 {
-; CHECK-LABEL: smulh_v32i64:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl[[#min(VBYTES,32)]]
-; VBITS_GE_2048-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_2048: smulh [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_2048: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: smulh_v32i64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    smulh z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x i64>, <32 x i64>* %a
   %op2 = load <32 x i64>, <32 x i64>* %b
   %insert = insertelement <32 x i128> undef, i128 64, i128 0
@@ -591,17 +662,27 @@ define void @smulh_v32i64(<32 x i64>* %a, <32 x i64>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <8 x i8> @umulh_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
 ; CHECK-LABEL: umulh_v8i8:
-; CHECK: umull v0.8h, v0.8b, v1.8b
-; CHECK: ushr v1.8h, v0.8h, #8
-; CHECK: umov w8, v1.h[0]
-; CHECK: fmov s0, w8
-; CHECK: umov w8, v1.h[1]
-; CHECK: mov v0.b[1], w8
-; CHECK: umov w8, v1.h[2]
-; CHECK: mov v0.b[2], w8
-; CHECK: umov w8, v1.h[3]
-; CHECK: mov v0.b[3], w8
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT:    ushr v1.8h, v0.8h, #8
+; CHECK-NEXT:    umov w8, v1.h[0]
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    umov w8, v1.h[1]
+; CHECK-NEXT:    mov v0.b[1], w8
+; CHECK-NEXT:    umov w8, v1.h[2]
+; CHECK-NEXT:    mov v0.b[2], w8
+; CHECK-NEXT:    umov w8, v1.h[3]
+; CHECK-NEXT:    mov v0.b[3], w8
+; CHECK-NEXT:    umov w8, v1.h[4]
+; CHECK-NEXT:    mov v0.b[4], w8
+; CHECK-NEXT:    umov w8, v1.h[5]
+; CHECK-NEXT:    mov v0.b[5], w8
+; CHECK-NEXT:    umov w8, v1.h[6]
+; CHECK-NEXT:    mov v0.b[6], w8
+; CHECK-NEXT:    umov w8, v1.h[7]
+; CHECK-NEXT:    mov v0.b[7], w8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %insert = insertelement <8 x i16> undef, i16 8, i64 0
   %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer
   %1 = zext <8 x i8> %op1 to <8 x i16>
@@ -615,10 +696,11 @@ define <8 x i8> @umulh_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <16 x i8> @umulh_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
 ; CHECK-LABEL: umulh_v16i8:
-; CHECK: umull2 v2.8h, v0.16b, v1.16b
-; CHECK: umull v0.8h, v0.8b, v1.8b
-; CHECK: uzp2 v0.16b, v0.16b, v2.16b
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull2 v2.8h, v0.16b, v1.16b
+; CHECK-NEXT:    umull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT:    uzp2 v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    ret
   %insert = insertelement <16 x i16> undef, i16 8, i64 0
   %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer
   %1 = zext <16 x i8> %op1 to <16 x i16>
@@ -630,21 +712,25 @@ define <16 x i8> @umulh_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
 }
 
 define void @umulh_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
-; CHECK-LABEL: umulh_v32i8:
-; VBITS_EQ_256: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,32)]]
-; VBITS_EQ_256-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_EQ_256: umulh [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
-; VBITS_EQ_256: st1b { [[RES]].b }, [[PG]], [x0]
-; VBITS_EQ_256: ret
-
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl[[#min(VBYTES,32)]]
-; VBITS_GE_512-DAG: ld1b { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_512-DAG: ld1b { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_512: mul [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
-; VBIGS_GE_512: lsr [[RES]].h, [[PG]]/m, [[RES]].h, #8
-; VBITS_GE_512: st1b { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_EQ_256-LABEL: umulh_v32i8:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    ptrue p0.b, vl32
+; VBITS_EQ_256-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ld1b { z1.b }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    umulh z0.b, p0/m, z0.b, z1.b
+; VBITS_EQ_256-NEXT:    st1b { z0.b }, p0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: umulh_v32i8:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ld1b { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1b { z1.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mul z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_512-NEXT:    lsr z0.h, p0/m, z0.h, #8
+; VBITS_GE_512-NEXT:    st1b { z0.h }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
+
   %op1 = load <32 x i8>, <32 x i8>* %a
   %op2 = load <32 x i8>, <32 x i8>* %b
   %insert = insertelement <32 x i16> undef, i16 8, i64 0
@@ -659,19 +745,25 @@ define void @umulh_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 }
 
 define void @umulh_v64i8(<64 x i8>* %a, <64 x i8>* %b) #0 {
-; CHECK-LABEL: umulh_v64i8:
-; VBITS_EQ_512: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,64)]]
-; VBITS_EQ_512-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_EQ_512-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_EQ_512: umulh [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
-; VBITS_EQ_512: ret
-
-; VBITS_GE_1024-DAG: ld1b { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_1024-DAG: ld1b { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_1024: mul [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
-; VBIGS_GE_1024: lsr [[RES]].h, [[PG]]/m, [[RES]].h, #8
-; VBITS_GE_1024: st1b { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_EQ_512-LABEL: umulh_v64i8:
+; VBITS_EQ_512:       // %bb.0:
+; VBITS_EQ_512-NEXT:    ptrue p0.b, vl64
+; VBITS_EQ_512-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_EQ_512-NEXT:    ld1b { z1.b }, p0/z, [x1]
+; VBITS_EQ_512-NEXT:    umulh z0.b, p0/m, z0.b, z1.b
+; VBITS_EQ_512-NEXT:    st1b { z0.b }, p0, [x0]
+; VBITS_EQ_512-NEXT:    ret
+;
+; VBITS_GE_1024-LABEL: umulh_v64i8:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_1024-NEXT:    ld1b { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1b { z1.h }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mul z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_1024-NEXT:    lsr z0.h, p0/m, z0.h, #8
+; VBITS_GE_1024-NEXT:    st1b { z0.h }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
+
   %op1 = load <64 x i8>, <64 x i8>* %a
   %op2 = load <64 x i8>, <64 x i8>* %b
   %insert = insertelement <64 x i16> undef, i16 8, i64 0
@@ -686,21 +778,25 @@ define void @umulh_v64i8(<64 x i8>* %a, <64 x i8>* %b) #0 {
 }
 
 define void @umulh_v128i8(<128 x i8>* %a, <128 x i8>* %b) #0 {
-; CHECK-LABEL: umulh_v128i8:
-; VBITS_EQ_1024: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,128)]]
-; VBITS_EQ_1024-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_EQ_1024-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_EQ_1024: umulh [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
-; VBITS_EQ_1024: st1b { [[RES]].b }, [[PG]], [x0]
-; VBITS_EQ_1024: ret
-
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl[[#min(VBYTES,128)]]
-; VBITS_GE_2048-DAG: ld1b { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1b { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_2048: mul [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
-; VBIGS_GE_2048: lsr [[RES]].h, [[PG]]/m, [[RES]].h, #8
-; VBITS_GE_2048: st1b { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_EQ_1024-LABEL: umulh_v128i8:
+; VBITS_EQ_1024:       // %bb.0:
+; VBITS_EQ_1024-NEXT:    ptrue p0.b, vl128
+; VBITS_EQ_1024-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_EQ_1024-NEXT:    ld1b { z1.b }, p0/z, [x1]
+; VBITS_EQ_1024-NEXT:    umulh z0.b, p0/m, z0.b, z1.b
+; VBITS_EQ_1024-NEXT:    st1b { z0.b }, p0, [x0]
+; VBITS_EQ_1024-NEXT:    ret
+;
+; VBITS_GE_2048-LABEL: umulh_v128i8:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    ld1b { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1b { z1.h }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mul z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_2048-NEXT:    lsr z0.h, p0/m, z0.h, #8
+; VBITS_GE_2048-NEXT:    st1b { z0.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
+
   %op1 = load <128 x i8>, <128 x i8>* %a
   %op2 = load <128 x i8>, <128 x i8>* %b
   %insert = insertelement <128 x i16> undef, i16 8, i64 0
@@ -715,13 +811,14 @@ define void @umulh_v128i8(<128 x i8>* %a, <128 x i8>* %b) #0 {
 }
 
 define void @umulh_v256i8(<256 x i8>* %a, <256 x i8>* %b) #0 {
-; CHECK-LABEL: umulh_v256i8:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,256)]]
-; VBITS_GE_2048-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_GE_2048: umulh [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
-; VBITS_GE_2048: st1b { [[RES]].b }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: umulh_v256i8:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.b, vl256
+; VBITS_GE_2048-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1b { z1.b }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    umulh z0.b, p0/m, z0.b, z1.b
+; VBITS_GE_2048-NEXT:    st1b { z0.b }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <256 x i8>, <256 x i8>* %a
   %op2 = load <256 x i8>, <256 x i8>* %b
   %insert = insertelement <256 x i16> undef, i16 8, i64 0
@@ -738,15 +835,17 @@ define void @umulh_v256i8(<256 x i8>* %a, <256 x i8>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <4 x i16> @umulh_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
 ; CHECK-LABEL: umulh_v4i16:
-; CHECK: umull v0.4s, v0.4h, v1.4h
-; CHECK: ushr v0.4s, v0.4s, #16
-; CHECK: mov w8, v0.s[1]
-; CHECK: mov w9, v0.s[2]
-; CHECK: mov w10, v0.s[3]
-; CHECK: mov v0.h[1], w8
-; CHECK: mov v0.h[2], w9
-; CHECK: mov v0.h[3], w10
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    ushr v0.4s, v0.4s, #16
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    mov w9, v0.s[2]
+; CHECK-NEXT:    mov w10, v0.s[3]
+; CHECK-NEXT:    mov v0.h[1], w8
+; CHECK-NEXT:    mov v0.h[2], w9
+; CHECK-NEXT:    mov v0.h[3], w10
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %insert = insertelement <4 x i32> undef, i32 16, i64 0
   %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer
   %1 = zext <4 x i16> %op1 to <4 x i32>
@@ -760,10 +859,11 @@ define <4 x i16> @umulh_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <8 x i16> @umulh_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
 ; CHECK-LABEL: umulh_v8i16:
-; CHECK: umull2 v2.4s, v0.8h, v1.8h
-; CHECK: umull v0.4s, v0.4h, v1.4h
-; CHECK: uzp2 v0.8h, v0.8h, v2.8h
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull2 v2.4s, v0.8h, v1.8h
+; CHECK-NEXT:    umull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    ret
   %insert = insertelement <8 x i32> undef, i32 16, i64 0
   %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer
   %1 = zext <8 x i16> %op1 to <8 x i32>
@@ -775,21 +875,25 @@ define <8 x i16> @umulh_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
 }
 
 define void @umulh_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
-; CHECK-LABEL: umulh_v16i16:
-; VBITS_EQ_256: ptrue [[PG:p[0-9]+]].h, vl[[#min(VBYTES,16)]]
-; VBITS_EQ_256-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_EQ_256: umulh [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
-; VBITS_EQ_256: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_EQ_256: ret
-
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl[[#min(VBYTES,16)]]
-; VBITS_GE_512-DAG: ld1h { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_512-DAG: ld1h { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_512: mul [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_GE_512: lsr [[RES]].s, [[PG]]/m, [[RES]].s, #16
-; VBITS_GE_512: st1h { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_EQ_256-LABEL: umulh_v16i16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    umulh z0.h, p0/m, z0.h, z1.h
+; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: umulh_v16i16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1h { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1h { z1.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mul z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_512-NEXT:    lsr z0.s, p0/m, z0.s, #16
+; VBITS_GE_512-NEXT:    st1h { z0.s }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
+
   %op1 = load <16 x i16>, <16 x i16>* %a
   %op2 = load <16 x i16>, <16 x i16>* %b
   %insert = insertelement <16 x i32> undef, i32 16, i64 0
@@ -804,21 +908,25 @@ define void @umulh_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 }
 
 define void @umulh_v32i16(<32 x i16>* %a, <32 x i16>* %b) #0 {
-; CHECK-LABEL: umulh_v32i16:
-; VBITS_EQ_512: ptrue [[PG:p[0-9]+]].h, vl[[#min(VBYTES,32)]]
-; VBITS_EQ_512-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_EQ_512-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_EQ_512: umulh [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
-; VBITS_EQ_512: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_EQ_512: ret
-
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].s, vl[[#min(VBYTES,32)]]
-; VBITS_GE_1024-DAG: ld1h { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_1024-DAG: ld1h { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_1024: mul [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_GE_1024: lsr [[RES]].s, [[PG]]/m, [[RES]].s, #16
-; VBITS_GE_1024: st1h { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_EQ_512-LABEL: umulh_v32i16:
+; VBITS_EQ_512:       // %bb.0:
+; VBITS_EQ_512-NEXT:    ptrue p0.h, vl32
+; VBITS_EQ_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_EQ_512-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_EQ_512-NEXT:    umulh z0.h, p0/m, z0.h, z1.h
+; VBITS_EQ_512-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_EQ_512-NEXT:    ret
+;
+; VBITS_GE_1024-LABEL: umulh_v32i16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1h { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1h { z1.s }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mul z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_1024-NEXT:    lsr z0.s, p0/m, z0.s, #16
+; VBITS_GE_1024-NEXT:    st1h { z0.s }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
+
   %op1 = load <32 x i16>, <32 x i16>* %a
   %op2 = load <32 x i16>, <32 x i16>* %b
   %insert = insertelement <32 x i32> undef, i32 16, i64 0
@@ -833,20 +941,25 @@ define void @umulh_v32i16(<32 x i16>* %a, <32 x i16>* %b) #0 {
 }
 
 define void @umulh_v64i16(<64 x i16>* %a, <64 x i16>* %b) #0 {
-; CHECK-LABEL: umulh_v64i16:
-; VBITS_EQ_1024: ptrue [[PG:p[0-9]+]].h, vl[[#min(VBYTES,64)]]
-; VBITS_EQ_1024-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_EQ_1024-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_EQ_1024: umulh [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
-; VBITS_EQ_1024: ret
-
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl[[#min(VBYTES,64)]]
-; VBITS_GE_2048-DAG: ld1h { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1h { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_2048: mul [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_GE_2048: lsr [[RES]].s, [[PG]]/m, [[RES]].s, #16
-; VBITS_GE_2048: st1h { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_EQ_1024-LABEL: umulh_v64i16:
+; VBITS_EQ_1024:       // %bb.0:
+; VBITS_EQ_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_EQ_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_EQ_1024-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_EQ_1024-NEXT:    umulh z0.h, p0/m, z0.h, z1.h
+; VBITS_EQ_1024-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_EQ_1024-NEXT:    ret
+;
+; VBITS_GE_2048-LABEL: umulh_v64i16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1h { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1h { z1.s }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mul z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_2048-NEXT:    lsr z0.s, p0/m, z0.s, #16
+; VBITS_GE_2048-NEXT:    st1h { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
+
   %op1 = load <64 x i16>, <64 x i16>* %a
   %op2 = load <64 x i16>, <64 x i16>* %b
   %insert = insertelement <64 x i32> undef, i32 16, i64 0
@@ -861,13 +974,14 @@ define void @umulh_v64i16(<64 x i16>* %a, <64 x i16>* %b) #0 {
 }
 
 define void @umulh_v128i16(<128 x i16>* %a, <128 x i16>* %b) #0 {
-; CHECK-LABEL: umulh_v128i16:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl[[#min(VBYTES,128)]]
-; VBITS_GE_2048-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_2048: umulh [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
-; VBITS_GE_2048: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: umulh_v128i16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    umulh z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <128 x i16>, <128 x i16>* %a
   %op2 = load <128 x i16>, <128 x i16>* %b
   %insert = insertelement <128 x i32> undef, i32 16, i64 0
@@ -884,12 +998,13 @@ define void @umulh_v128i16(<128 x i16>* %a, <128 x i16>* %b) #0 {
 ; Vector i64 multiplications are not legal for NEON so use SVE when available.
 define <2 x i32> @umulh_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 {
 ; CHECK-LABEL: umulh_v2i32:
-; CHECK: ushll v0.2d, v0.2s, #0
-; CHECK: ushll v1.2d, v1.2s, #0
-; CHECK: ptrue p0.d, vl2
-; CHECK: mul z0.d, p0/m, z0.d, z1.d
-; CHECK: shrn v0.2s, v0.2d, #32
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
+; CHECK-NEXT:    ushll v1.2d, v1.2s, #0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    shrn v0.2s, v0.2d, #32
+; CHECK-NEXT:    ret
   %insert = insertelement <2 x i64> undef, i64 32, i64 0
   %splat = shufflevector <2 x i64> %insert, <2 x i64> undef, <2 x i32> zeroinitializer
   %1 = zext <2 x i32> %op1 to <2 x i64>
@@ -903,10 +1018,11 @@ define <2 x i32> @umulh_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <4 x i32> @umulh_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
 ; CHECK-LABEL: umulh_v4i32:
-; CHECK: umull2 v2.2d, v0.4s, v1.4s
-; CHECK: umull v0.2d, v0.2s, v1.2s
-; CHECK: uzp2 v0.4s, v0.4s, v2.4s
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umull2 v2.2d, v0.4s, v1.4s
+; CHECK-NEXT:    umull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT:    uzp2 v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    ret
   %insert = insertelement <4 x i64> undef, i64 32, i64 0
   %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer
   %1 = zext <4 x i32> %op1 to <4 x i64>
@@ -918,21 +1034,25 @@ define <4 x i32> @umulh_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
 }
 
 define void @umulh_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
-; CHECK-LABEL: umulh_v8i32:
-; VBITS_EQ_256: ptrue [[PG:p[0-9]+]].s, vl[[#min(VBYTES,8)]]
-; VBITS_EQ_256-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_EQ_256: umulh [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_EQ_256: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_EQ_256: ret
-
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl[[#min(VBYTES,8)]]
-; VBITS_GE_512-DAG: ld1w { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_512-DAG: ld1w { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512: mul [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_512: lsr [[RES]].d, [[PG]]/m, [[RES]].d, #32
-; VBITS_GE_512: st1w { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_EQ_256-LABEL: umulh_v8i32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    umulh z0.s, p0/m, z0.s, z1.s
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: umulh_v8i32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1w { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1w { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_512-NEXT:    lsr z0.d, p0/m, z0.d, #32
+; VBITS_GE_512-NEXT:    st1w { z0.d }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
+
   %op1 = load <8 x i32>, <8 x i32>* %a
   %op2 = load <8 x i32>, <8 x i32>* %b
   %insert = insertelement <8 x i64> undef, i64 32, i64 0
@@ -947,20 +1067,25 @@ define void @umulh_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 }
 
 define void @umulh_v16i32(<16 x i32>* %a, <16 x i32>* %b) #0 {
-; CHECK-LABEL: umulh_v16i32:
-; VBITS_EQ_512: ptrue [[PG:p[0-9]+]].s, vl[[#min(VBYTES,16)]]
-; VBITS_EQ_512-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_EQ_512-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_EQ_512: umulh [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_EQ_512: st1w { [[RES]].s }, [[PG]], [x0]
-
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl[[#min(VBYTES,16)]]
-; VBITS_GE_1024-DAG: ld1w { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_1024-DAG: ld1w { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_1024: mul [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_1024: lsr [[RES]].d, [[PG]]/m, [[RES]].d, #32
-; VBITS_GE_1024: st1w { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_EQ_512-LABEL: umulh_v16i32:
+; VBITS_EQ_512:       // %bb.0:
+; VBITS_EQ_512-NEXT:    ptrue p0.s, vl16
+; VBITS_EQ_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_EQ_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_EQ_512-NEXT:    umulh z0.s, p0/m, z0.s, z1.s
+; VBITS_EQ_512-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_EQ_512-NEXT:    ret
+;
+; VBITS_GE_1024-LABEL: umulh_v16i32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1w { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1w { z1.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_1024-NEXT:    lsr z0.d, p0/m, z0.d, #32
+; VBITS_GE_1024-NEXT:    st1w { z0.d }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
+
   %op1 = load <16 x i32>, <16 x i32>* %a
   %op2 = load <16 x i32>, <16 x i32>* %b
   %insert = insertelement <16 x i64> undef, i64 32, i64 0
@@ -975,21 +1100,25 @@ define void @umulh_v16i32(<16 x i32>* %a, <16 x i32>* %b) #0 {
 }
 
 define void @umulh_v32i32(<32 x i32>* %a, <32 x i32>* %b) #0 {
-; CHECK-LABEL: umulh_v32i32:
-; VBITS_EQ_1024: ptrue [[PG:p[0-9]+]].s, vl[[#min(VBYTES,32)]]
-; VBITS_EQ_1024-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_EQ_1024-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_EQ_1024: umulh [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_EQ_1024: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_EQ_1024: ret
-
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl[[#min(VBYTES,32)]]
-; VBITS_GE_2048-DAG: ld1w { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1w { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_2048: mul [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_2048: lsr [[RES]].d, [[PG]]/m, [[RES]].d, #32
-; VBITS_GE_2048: st1w { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_EQ_1024-LABEL: umulh_v32i32:
+; VBITS_EQ_1024:       // %bb.0:
+; VBITS_EQ_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_EQ_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_EQ_1024-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_EQ_1024-NEXT:    umulh z0.s, p0/m, z0.s, z1.s
+; VBITS_EQ_1024-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_EQ_1024-NEXT:    ret
+;
+; VBITS_GE_2048-LABEL: umulh_v32i32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1w { z1.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_2048-NEXT:    lsr z0.d, p0/m, z0.d, #32
+; VBITS_GE_2048-NEXT:    st1w { z0.d }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
+
   %op1 = load <32 x i32>, <32 x i32>* %a
   %op2 = load <32 x i32>, <32 x i32>* %b
   %insert = insertelement <32 x i64> undef, i64 32, i64 0
@@ -1004,13 +1133,14 @@ define void @umulh_v32i32(<32 x i32>* %a, <32 x i32>* %b) #0 {
 }
 
 define void @umulh_v64i32(<64 x i32>* %a, <64 x i32>* %b) #0 {
-; CHECK-LABEL: umulh_v64i32:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl[[#min(VBYTES,64)]]
-; VBITS_GE_2048-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_2048: umulh [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
-; VBITS_GE_2048: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: umulh_v64i32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    umulh z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <64 x i32>, <64 x i32>* %a
   %op2 = load <64 x i32>, <64 x i32>* %b
   %insert = insertelement <64 x i64> undef, i64 32, i64 0
@@ -1027,9 +1157,13 @@ define void @umulh_v64i32(<64 x i32>* %a, <64 x i32>* %b) #0 {
 ; Vector i64 multiplications are not legal for NEON so use SVE when available.
 define <1 x i64> @umulh_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 {
 ; CHECK-LABEL: umulh_v1i64:
-; CHECK: ptrue p0.d, vl1
-; CHECK: umulh z0.d, p0/m, z0.d, z1.d
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    umulh z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
   %insert = insertelement <1 x i128> undef, i128 64, i128 0
   %splat = shufflevector <1 x i128> %insert, <1 x i128> undef, <1 x i32> zeroinitializer
   %1 = zext <1 x i64> %op1 to <1 x i128>
@@ -1043,9 +1177,13 @@ define <1 x i64> @umulh_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 {
 ; Vector i64 multiplications are not legal for NEON so use SVE when available.
 define <2 x i64> @umulh_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
 ; CHECK-LABEL: umulh_v2i64:
-; CHECK: ptrue p0.d, vl2
-; CHECK: umulh z0.d, p0/m, z0.d, z1.d
-; CHECK: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    umulh z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
   %insert = insertelement <2 x i128> undef, i128 64, i128 0
   %splat = shufflevector <2 x i128> %insert, <2 x i128> undef, <2 x i32> zeroinitializer
   %1 = zext <2 x i64> %op1 to <2 x i128>
@@ -1058,12 +1196,13 @@ define <2 x i64> @umulh_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
 
 define void @umulh_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-LABEL: umulh_v4i64:
-; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl[[#min(VBYTES,4)]]
-; VBITS_GE_256-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_256-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_256: umulh [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_256: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_256: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    umulh z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %op2 = load <4 x i64>, <4 x i64>* %b
   %insert = insertelement <4 x i128> undef, i128 64, i128 0
@@ -1078,13 +1217,14 @@ define void @umulh_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 }
 
 define void @umulh_v8i64(<8 x i64>* %a, <8 x i64>* %b) #0 {
-; CHECK-LABEL: umulh_v8i64:
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl[[#min(VBYTES,8)]]
-; VBITS_GE_512-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_512-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512: umulh [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_512: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_GE_512-LABEL: umulh_v8i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    umulh z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x i64>, <8 x i64>* %a
   %op2 = load <8 x i64>, <8 x i64>* %b
   %insert = insertelement <8 x i128> undef, i128 64, i128 0
@@ -1099,13 +1239,14 @@ define void @umulh_v8i64(<8 x i64>* %a, <8 x i64>* %b) #0 {
 }
 
 define void @umulh_v16i64(<16 x i64>* %a, <16 x i64>* %b) #0 {
-; CHECK-LABEL: umulh_v16i64:
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl[[#min(VBYTES,16)]]
-; VBITS_GE_1024-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_1024-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_1024: umulh [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_1024: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_GE_1024-LABEL: umulh_v16i64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    umulh z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x i64>, <16 x i64>* %a
   %op2 = load <16 x i64>, <16 x i64>* %b
   %insert = insertelement <16 x i128> undef, i128 64, i128 0
@@ -1120,13 +1261,14 @@ define void @umulh_v16i64(<16 x i64>* %a, <16 x i64>* %b) #0 {
 }
 
 define void @umulh_v32i64(<32 x i64>* %a, <32 x i64>* %b) #0 {
-; CHECK-LABEL: umulh_v32i64:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl[[#min(VBYTES,32)]]
-; VBITS_GE_2048-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_2048-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_2048: umulh [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
-; VBITS_GE_2048: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: umulh_v32i64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    umulh z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x i64>, <32 x i64>* %a
   %op2 = load <32 x i64>, <32 x i64>* %b
   %insert = insertelement <32 x i128> undef, i128 64, i128 0

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll
index b13fa82ca73d1..650cddd37df36 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll
@@ -1,19 +1,19 @@
-; RUN: llc -aarch64-sve-vector-bits-min=128  -asm-verbose=0 < %s | FileCheck %s -check-prefix=NO_SVE
-; RUN: llc -aarch64-sve-vector-bits-min=256  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
-; RUN: llc -aarch64-sve-vector-bits-min=384  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK
-; RUN: llc -aarch64-sve-vector-bits-min=512  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=640  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=768  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=896  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
+; RUN: llc -aarch64-sve-vector-bits-min=128  < %s | FileCheck %s -check-prefix=NO_SVE
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
+; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -check-prefixes=CHECK
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -27,8 +27,9 @@ target triple = "aarch64-unknown-linux-gnu"
 ; Don't use SVE for 64-bit vectors.
 define <4 x half> @ucvtf_v4i16_v4f16(<4 x i16> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v4i16_v4f16:
-; CHECK: ucvtf v0.4h, v0.4h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ucvtf v0.4h, v0.4h
+; CHECK-NEXT:    ret
   %res = uitofp <4 x i16> %op1 to <4 x half>
   ret <4 x half> %res
 }
@@ -36,10 +37,11 @@ define <4 x half> @ucvtf_v4i16_v4f16(<4 x i16> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define void @ucvtf_v8i16_v8f16(<8 x i16>* %a, <8 x half>* %b) #0 {
 ; CHECK-LABEL: ucvtf_v8i16_v8f16:
-; CHECK: ldr q0, [x0]
-; CHECK-NEXT: ucvtf v0.8h, v0.8h
-; CHECK-NEXT: str q0, [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ucvtf v0.8h, v0.8h
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <8 x i16>, <8 x i16>* %a
   %res = uitofp <8 x i16> %op1 to <8 x half>
   store <8 x half> %res, <8 x half>* %b
@@ -48,11 +50,12 @@ define void @ucvtf_v8i16_v8f16(<8 x i16>* %a, <8 x half>* %b) #0 {
 
 define void @ucvtf_v16i16_v16f16(<16 x i16>* %a, <16 x half>* %b) #0 {
 ; CHECK-LABEL: ucvtf_v16i16_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-NEXT: ucvtf [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
-; CHECK-NEXT: st1h { [[RES]].h }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ucvtf z0.h, p0/m, z0.h
+; CHECK-NEXT:    st1h { z0.h }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <16 x i16>, <16 x i16>* %a
   %res = uitofp <16 x i16> %op1 to <16 x half>
   store <16 x half> %res, <16 x half>* %b
@@ -60,23 +63,25 @@ define void @ucvtf_v16i16_v16f16(<16 x i16>* %a, <16 x half>* %b) #0 {
 }
 
 define void @ucvtf_v32i16_v32f16(<32 x i16>* %a, <32 x half>* %b) #0 {
-; CHECK-LABEL: ucvtf_v32i16_v32f16:
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32
-; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ucvtf [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
-; VBITS_GE_512-NEXT: st1h { [[RES]].h }, [[PG]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #16
-; VBITS_EQ_256-DAG: ld1h { [[LO:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1h { [[HI:z[0-9]+]].h }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #1]
-; VBITS_EQ_256-DAG: ucvtf [[RES_LO:z[0-9]+]].h, [[PG]]/m, [[LO]].h
-; VBITS_EQ_256-DAG: ucvtf [[RES_HI:z[0-9]+]].h, [[PG]]/m, [[HI]].h
-; VBITS_EQ_256-DAG: st1h { [[RES_LO]].h }, [[PG]], [x1]
-; VBITS_EQ_256-DAG: st1h { [[RES_HI]].h }, [[PG]], [x1, x[[NUMELTS]], lsl #1]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: ucvtf_v32i16_v32f16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #16
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ucvtf z0.h, p0/m, z0.h
+; VBITS_EQ_256-NEXT:    ucvtf z1.h, p0/m, z1.h
+; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    st1h { z1.h }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: ucvtf_v32i16_v32f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ucvtf z0.h, p0/m, z0.h
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <32 x i16>, <32 x i16>* %a
   %res = uitofp <32 x i16> %op1 to <32 x half>
   store <32 x half> %res, <32 x half>* %b
@@ -84,12 +89,13 @@ define void @ucvtf_v32i16_v32f16(<32 x i16>* %a, <32 x half>* %b) #0 {
 }
 
 define void @ucvtf_v64i16_v64f16(<64 x i16>* %a, <64 x half>* %b) #0 {
-; CHECK-LABEL: ucvtf_v64i16_v64f16:
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].h, vl64
-; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ucvtf [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
-; VBITS_GE_1024-NEXT: st1h { [[RES]].h }, [[PG]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: ucvtf_v64i16_v64f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ucvtf z0.h, p0/m, z0.h
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <64 x i16>, <64 x i16>* %a
   %res = uitofp <64 x i16> %op1 to <64 x half>
   store <64 x half> %res, <64 x half>* %b
@@ -97,12 +103,13 @@ define void @ucvtf_v64i16_v64f16(<64 x i16>* %a, <64 x half>* %b) #0 {
 }
 
 define void @ucvtf_v128i16_v128f16(<128 x i16>* %a, <128 x half>* %b) #0 {
-; CHECK-LABEL: ucvtf_v128i16_v128f16:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128
-; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ucvtf [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
-; VBITS_GE_2048-NEXT: st1h { [[RES]].h }, [[PG]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: ucvtf_v128i16_v128f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ucvtf z0.h, p0/m, z0.h
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <128 x i16>, <128 x i16>* %a
   %res = uitofp <128 x i16> %op1 to <128 x half>
   store <128 x half> %res, <128 x half>* %b
@@ -116,10 +123,11 @@ define void @ucvtf_v128i16_v128f16(<128 x i16>* %a, <128 x half>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <2 x float> @ucvtf_v2i16_v2f32(<2 x i16> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v2i16_v2f32:
-; CHECK: movi d1, #0x00ffff0000ffff
-; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
-; CHECK-NEXT: ucvtf v0.2s, v0.2s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi d1, #0x00ffff0000ffff
+; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    ucvtf v0.2s, v0.2s
+; CHECK-NEXT:    ret
   %res = uitofp <2 x i16> %op1 to <2 x float>
   ret <2 x float> %res
 }
@@ -127,20 +135,23 @@ define <2 x float> @ucvtf_v2i16_v2f32(<2 x i16> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <4 x float> @ucvtf_v4i16_v4f32(<4 x i16> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v4i16_v4f32:
-; CHECK: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    ret
   %res = uitofp <4 x i16> %op1 to <4 x float>
   ret <4 x float> %res
 }
 
 define void @ucvtf_v8i16_v8f32(<8 x i16>* %a, <8 x float>* %b) #0 {
 ; CHECK-LABEL: ucvtf_v8i16_v8f32:
-; CHECK: ldr q[[OP:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].s, vl8
-; CHECK-NEXT: uunpklo [[UPK:z[0-9]+]].s, z[[OP]].h
-; CHECK-NEXT: ucvtf [[RES:z[0-9]+]].s, [[PG]]/m, [[UPK]].s
-; CHECK-NEXT: st1w { [[RES]].s }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    ucvtf z0.s, p0/m, z0.s
+; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <8 x i16>, <8 x i16>* %a
   %res = uitofp <8 x i16> %op1 to <8 x float>
   store <8 x float> %res, <8 x float>* %b
@@ -148,29 +159,41 @@ define void @ucvtf_v8i16_v8f32(<8 x i16>* %a, <8 x float>* %b) #0 {
 }
 
 define void @ucvtf_v16i16_v16f32(<16 x i16>* %a, <16 x float>* %b) #0 {
-; CHECK-LABEL: ucvtf_v16i16_v16f32:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].h, vl16
-; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_512-NEXT: ucvtf [[RES:z[0-9]+]].s, [[PG2]]/m, [[UPK]].s
-; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG1]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation - fixed type extract_subvector codegen is poor currently.
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].h, vl16
-; VBITS_EQ_256-DAG: ld1h { [[VEC:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_EQ_256-DAG: mov x8, sp
-; VBITS_EQ_256-DAG: st1h { [[VEC:z[0-9]+]].h }, [[PG1]], [x8]
-; VBITS_EQ_256-DAG: ldp q[[LO:[0-9]+]], q[[HI:[0-9]+]], [sp]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #8
-; VBITS_EQ_256-DAG: uunpklo [[UPK_LO:z[0-9]+]].s, z[[LO]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK_HI:z[0-9]+]].s, z[[HI]].h
-; VBITS_EQ_256-DAG: ucvtf [[RES_LO:z[0-9]+]].s, [[PG2]]/m, [[UPK_LO]].s
-; VBITS_EQ_256-DAG: ucvtf [[RES_HI:z[0-9]+]].s, [[PG2]]/m, [[UPK_HI]].s
-; VBITS_EQ_256-DAG: st1w { [[RES_LO]].s }, [[PG2]], [x1]
-; VBITS_EQ_256-DAG: st1w { [[RES_HI]].s }, [[PG2]], [x1, x[[NUMELTS]], lsl #2]
+; VBITS_EQ_256-LABEL: ucvtf_v16i16_v16f32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_EQ_256-NEXT:    sub x9, sp, #48
+; VBITS_EQ_256-NEXT:    mov x29, sp
+; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
+; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
+; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    mov x8, sp
+; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x8]
+; VBITS_EQ_256-NEXT:    ldp q0, q1, [sp]
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    mov x8, #8
+; VBITS_EQ_256-NEXT:    uunpklo z0.s, z0.h
+; VBITS_EQ_256-NEXT:    uunpklo z1.s, z1.h
+; VBITS_EQ_256-NEXT:    ucvtf z0.s, p0/m, z0.s
+; VBITS_EQ_256-NEXT:    ucvtf z1.s, p0/m, z1.s
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_EQ_256-NEXT:    mov sp, x29
+; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: ucvtf_v16i16_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    ucvtf z0.s, p0/m, z0.s
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <16 x i16>, <16 x i16>* %a
   %res = uitofp <16 x i16> %op1 to <16 x float>
   store <16 x float> %res, <16 x float>* %b
@@ -178,14 +201,15 @@ define void @ucvtf_v16i16_v16f32(<16 x i16>* %a, <16 x float>* %b) #0 {
 }
 
 define void @ucvtf_v32i16_v32f32(<32 x i16>* %a, <32 x float>* %b) #0 {
-; CHECK-LABEL: ucvtf_v32i16_v32f32:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].h, vl32
-; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].s, vl32
-; VBITS_GE_1024-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_1024-NEXT: ucvtf [[RES:z[0-9]+]].s, [[PG2]]/m, [[UPK]].s
-; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG1]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: ucvtf_v32i16_v32f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_1024-NEXT:    ucvtf z0.s, p0/m, z0.s
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <32 x i16>, <32 x i16>* %a
   %res = uitofp <32 x i16> %op1 to <32 x float>
   store <32 x float> %res, <32 x float>* %b
@@ -193,14 +217,15 @@ define void @ucvtf_v32i16_v32f32(<32 x i16>* %a, <32 x float>* %b) #0 {
 }
 
 define void @ucvtf_v64i16_v64f32(<64 x i16>* %a, <64 x float>* %b) #0 {
-; CHECK-LABEL: ucvtf_v64i16_v64f32:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].h, vl64
-; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].s, vl64
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_2048-NEXT: ucvtf [[RES:z[0-9]+]].s, [[PG2]]/m, [[UPK]].s
-; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG1]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: ucvtf_v64i16_v64f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    ucvtf z0.s, p0/m, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <64 x i16>, <64 x i16>* %a
   %res = uitofp <64 x i16> %op1 to <64 x float>
   store <64 x float> %res, <64 x float>* %b
@@ -214,11 +239,14 @@ define void @ucvtf_v64i16_v64f32(<64 x i16>* %a, <64 x float>* %b) #0 {
 ; v1i16 is perfered to be widened to v4i16, which pushes the output into SVE types, so use SVE
 define <1 x double> @ucvtf_v1i16_v1f64(<1 x i16> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v1i16_v1f64:
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z0.h
-; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; CHECK-NEXT: ucvtf z0.d, [[PG]]/m, [[UPK2]].d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    ucvtf z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
   %res = uitofp <1 x i16> %op1 to <1 x double>
   ret <1 x double> %res
 }
@@ -226,24 +254,26 @@ define <1 x double> @ucvtf_v1i16_v1f64(<1 x i16> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <2 x double> @ucvtf_v2i16_v2f64(<2 x i16> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v2i16_v2f64:
-; CHECK: movi d1, #0x00ffff0000ffff
-; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
-; CHECK-NEXT: ushll v0.2d, v0.2s, #0
-; CHECK-NEXT: ucvtf v0.2d, v0.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi d1, #0x00ffff0000ffff
+; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
+; CHECK-NEXT:    ucvtf v0.2d, v0.2d
+; CHECK-NEXT:    ret
   %res = uitofp <2 x i16> %op1 to <2 x double>
   ret <2 x double> %res
 }
 
 define void @ucvtf_v4i16_v4f64(<4 x i16>* %a, <4 x double>* %b) #0 {
 ; CHECK-LABEL: ucvtf_v4i16_v4f64:
-; CHECK: ldr d[[OP:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[OP]].h
-; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; CHECK-NEXT: ucvtf [[RES:z[0-9]+]].d, [[PG]]/m, [[UPK2]].d
-; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    ucvtf z0.d, p0/m, z0.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <4 x i16>, <4 x i16>* %a
   %res = uitofp <4 x i16> %op1 to <4 x double>
   store <4 x double> %res, <4 x double>* %b
@@ -251,29 +281,31 @@ define void @ucvtf_v4i16_v4f64(<4 x i16>* %a, <4 x double>* %b) #0 {
 }
 
 define void @ucvtf_v8i16_v8f64(<8 x i16>* %a, <8 x double>* %b) #0 {
-; CHECK-LABEL: ucvtf_v8i16_v8f64:
-; VBITS_GE_512: ldr q[[OP:[0-9]+]], [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[OP]].h
-; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_512-NEXT: ucvtf [[RES:z[0-9]+]].d, [[PG]]/m, [[UPK2]].d
-; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ldr q[[OP:[0-9]+]], [x0]
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ext v[[HI:[0-9]+]].16b, v[[LO:[0-9]+]].16b, v[[OP]].16b, #8
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_LO:z[0-9]+]].s, z[[LO]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_HI:z[0-9]+]].s, z[[HI]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK2_LO:z[0-9]+]].d, [[UPK1_LO]].s
-; VBITS_EQ_256-DAG: uunpklo [[UPK2_HI:z[0-9]+]].d, [[UPK1_HI]].s
-; VBITS_EQ_256-DAG: ucvtf [[RES_LO:z[0-9]+]].d, [[PG2]]/m, [[UPK2_LO]].d
-; VBITS_EQ_256-DAG: ucvtf [[RES_HI:z[0-9]+]].d, [[PG2]]/m, [[UPK2_HI]].d
-; VBITS_EQ_256-DAG: st1d { [[RES_LO]].d }, [[PG2]], [x1]
-; VBITS_EQ_256-DAG: st1d { [[RES_HI]].d }, [[PG2]], [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: ucvtf_v8i16_v8f64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    ldr q0, [x0]
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    uunpklo z1.s, z0.h
+; VBITS_EQ_256-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; VBITS_EQ_256-NEXT:    uunpklo z0.s, z0.h
+; VBITS_EQ_256-NEXT:    uunpklo z1.d, z1.s
+; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
+; VBITS_EQ_256-NEXT:    ucvtf z1.d, p0/m, z1.d
+; VBITS_EQ_256-NEXT:    ucvtf z0.d, p0/m, z0.d
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: ucvtf_v8i16_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr q0, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    ucvtf z0.d, p0/m, z0.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x i16>, <8 x i16>* %a
   %res = uitofp <8 x i16> %op1 to <8 x double>
   store <8 x double> %res, <8 x double>* %b
@@ -281,15 +313,16 @@ define void @ucvtf_v8i16_v8f64(<8 x i16>* %a, <8 x double>* %b) #0 {
 }
 
 define void @ucvtf_v16i16_v16f64(<16 x i16>* %a, <16 x double>* %b) #0 {
-; CHECK-LABEL: ucvtf_v16i16_v16f64:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].h, vl16
-; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_1024-NEXT: ucvtf [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK2]].d
-; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: ucvtf_v16i16_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_1024-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    ucvtf z0.d, p0/m, z0.d
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x i16>, <16 x i16>* %a
   %res = uitofp <16 x i16> %op1 to <16 x double>
   store <16 x double> %res, <16 x double>* %b
@@ -297,15 +330,16 @@ define void @ucvtf_v16i16_v16f64(<16 x i16>* %a, <16 x double>* %b) #0 {
 }
 
 define void @ucvtf_v32i16_v32f64(<32 x i16>* %a, <32 x double>* %b) #0 {
-; CHECK-LABEL: ucvtf_v32i16_v32f64:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK]].s
-; VBITS_GE_2048-NEXT: ucvtf [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK2]].d
-; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: ucvtf_v32i16_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    ucvtf z0.d, p0/m, z0.d
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x i16>, <32 x i16>* %a
   %res = uitofp <32 x i16> %op1 to <32 x double>
   store <32 x double> %res, <32 x double>* %b
@@ -319,9 +353,11 @@ define void @ucvtf_v32i16_v32f64(<32 x i16>* %a, <32 x double>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <2 x half> @ucvtf_v2i32_v2f16(<2 x i32> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v2i32_v2f16:
-; CHECK: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
   %res = uitofp <2 x i32> %op1 to <2 x half>
   ret <2 x half> %res
 }
@@ -329,52 +365,57 @@ define <2 x half> @ucvtf_v2i32_v2f16(<2 x i32> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <4 x half> @ucvtf_v4i32_v4f16(<4 x i32> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v4i32_v4f16:
-; CHECK: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
   %res = uitofp <4 x i32> %op1 to <4 x half>
   ret <4 x half> %res
 }
 
 define <8 x half> @ucvtf_v8i32_v8f16(<8 x i32>* %a) #0 {
 ; CHECK-LABEL: ucvtf_v8i32_v8f16:
-; CHECK: ptrue [[PG1:p[0-9]+]].s, vl8
-; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ptrue [[PG2:p[0-9]+]].s
-; CHECK-NEXT: ucvtf [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].s
-; CHECK-NEXT: uzp1 z0.h, [[CVT]].h, [[CVT]].h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ucvtf z0.h, p0/m, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
   %op1 = load <8 x i32>, <8 x i32>* %a
   %res = uitofp <8 x i32> %op1 to <8 x half>
   ret <8 x half> %res
 }
 
 define void @ucvtf_v16i32_v16f16(<16 x i32>* %a, <16 x half>* %b) #0 {
-; CHECK-LABEL: ucvtf_v16i32_v16f16:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s
-; VBITS_GE_512-NEXT: ucvtf [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].s
-; VBITS_GE_512-NEXT: uzp1 [[RES:z[0-9]+]].h, [[CVT]].h, [[CVT]].h
-; VBITS_GE_512-NEXT: ptrue [[PG3:p[0-9]+]].h, vl16
-; VBITS_GE_512-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #8
-; VBITS_EQ_256-DAG: ld1w { [[LO:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1w { [[HI:z[0-9]+]].s }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].s
-; VBITS_EQ_256-DAG: ptrue [[PG3:p[0-9]+]].h, vl8
-; VBITS_EQ_256-DAG: ucvtf [[CVT_LO:z[0-9]+]].h, [[PG2]]/m, [[LO]].s
-; VBITS_EQ_256-DAG: ucvtf [[CVT_HI:z[0-9]+]].h, [[PG2]]/m, [[HI]].s
-; VBITS_EQ_256-DAG: uzp1 [[RES_LO:z[0-9]+]].h, [[CVT_LO]].h, [[CVT_LO]].h
-; VBITS_EQ_256-DAG: uzp1 [[RES_HI:z[0-9]+]].h, [[CVT_HI]].h, [[CVT_HI]].h
-; VBITS_EQ_256-DAG: splice [[RES:z[0-9]+]].h, [[PG3]], [[RES_LO]].h, [[RES_HI]].h
-; VBITS_EQ_256-DAG: ptrue [[PG4:p[0-9]+]].h, vl16
-; VBITS_EQ_256-NEXT: st1h { [[RES]].h }, [[PG4]], [x1]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: ucvtf_v16i32_v16f16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #8
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ptrue p0.s
+; VBITS_EQ_256-NEXT:    ptrue p1.h, vl8
+; VBITS_EQ_256-NEXT:    ucvtf z0.h, p0/m, z0.s
+; VBITS_EQ_256-NEXT:    ucvtf z1.h, p0/m, z1.s
+; VBITS_EQ_256-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_EQ_256-NEXT:    uzp1 z1.h, z1.h, z1.h
+; VBITS_EQ_256-NEXT:    splice z1.h, p1, z1.h, z0.h
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    st1h { z1.h }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: ucvtf_v16i32_v16f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.s
+; VBITS_GE_512-NEXT:    ucvtf z0.h, p0/m, z0.s
+; VBITS_GE_512-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <16 x i32>, <16 x i32>* %a
   %res = uitofp <16 x i32> %op1 to <16 x half>
   store <16 x half> %res, <16 x half>* %b
@@ -382,15 +423,16 @@ define void @ucvtf_v16i32_v16f16(<16 x i32>* %a, <16 x half>* %b) #0 {
 }
 
 define void @ucvtf_v32i32_v32f16(<32 x i32>* %a, <32 x half>* %b) #0 {
-; CHECK-LABEL: ucvtf_v32i32_v32f16:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].s, vl32
-; VBITS_GE_1024-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].s
-; VBITS_GE_1024-NEXT: ucvtf [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].s
-; VBITS_GE_1024-NEXT: uzp1 [[RES:z[0-9]+]].h, [[CVT]].h, [[CVT]].h
-; VBITS_GE_1024-NEXT: ptrue [[PG3:p[0-9]+]].h, vl32
-; VBITS_GE_1024-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: ucvtf_v32i32_v32f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.s
+; VBITS_GE_1024-NEXT:    ucvtf z0.h, p0/m, z0.s
+; VBITS_GE_1024-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <32 x i32>, <32 x i32>* %a
   %res = uitofp <32 x i32> %op1 to <32 x half>
   store <32 x half> %res, <32 x half>* %b
@@ -398,15 +440,16 @@ define void @ucvtf_v32i32_v32f16(<32 x i32>* %a, <32 x half>* %b) #0 {
 }
 
 define void @ucvtf_v64i32_v64f16(<64 x i32>* %a, <64 x half>* %b) #0 {
-; CHECK-LABEL: ucvtf_v64i32_v64f16:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].s, vl64
-; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].s
-; VBITS_GE_2048-NEXT: ucvtf [[RES:z[0-9]+]].h, [[PG2]]/m, [[UPK]].s
-; VBITS_GE_2048-NEXT: uzp1 [[RES:z[0-9]+]].h, [[CVT]].h, [[CVT]].h
-; VBITS_GE_2048-NEXT: ptrue [[PG3:p[0-9]+]].h, vl64
-; VBITS_GE_2048-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: ucvtf_v64i32_v64f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.s
+; VBITS_GE_2048-NEXT:    ucvtf z0.h, p0/m, z0.s
+; VBITS_GE_2048-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <64 x i32>, <64 x i32>* %a
   %res = uitofp <64 x i32> %op1 to <64 x half>
   store <64 x half> %res, <64 x half>* %b
@@ -420,8 +463,9 @@ define void @ucvtf_v64i32_v64f16(<64 x i32>* %a, <64 x half>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <2 x float> @ucvtf_v2i32_v2f32(<2 x i32> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v2i32_v2f32:
-; CHECK: ucvtf v0.2s, v0.2s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ucvtf v0.2s, v0.2s
+; CHECK-NEXT:    ret
   %res = uitofp <2 x i32> %op1 to <2 x float>
   ret <2 x float> %res
 }
@@ -429,19 +473,21 @@ define <2 x float> @ucvtf_v2i32_v2f32(<2 x i32> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <4 x float> @ucvtf_v4i32_v4f32(<4 x i32> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v4i32_v4f32:
-; CHECK: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    ret
   %res = uitofp <4 x i32> %op1 to <4 x float>
   ret <4 x float> %res
 }
 
 define void @ucvtf_v8i32_v8f32(<8 x i32>* %a, <8 x float>* %b) #0 {
 ; CHECK-LABEL: ucvtf_v8i32_v8f32:
-; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
-; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; CHECK-NEXT: ucvtf [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
-; CHECK-NEXT: st1w { [[RES]].s }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ucvtf z0.s, p0/m, z0.s
+; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <8 x i32>, <8 x i32>* %a
   %res = uitofp <8 x i32> %op1 to <8 x float>
   store <8 x float> %res, <8 x float>* %b
@@ -449,23 +495,25 @@ define void @ucvtf_v8i32_v8f32(<8 x i32>* %a, <8 x float>* %b) #0 {
 }
 
 define void @ucvtf_v16i32_v16f32(<16 x i32>* %a, <16 x float>* %b) #0 {
-; CHECK-LABEL: ucvtf_v16i32_v16f32:
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ucvtf [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
-; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #8
-; VBITS_EQ_256-DAG: ld1w { [[LO:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1w { [[HI:z[0-9]+]].s }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-DAG: ucvtf [[RES_LO:z[0-9]+]].s, [[PG]]/m, [[LO]].s
-; VBITS_EQ_256-DAG: ucvtf [[RES_HI:z[0-9]+]].s, [[PG]]/m, [[HI]].s
-; VBITS_EQ_256-DAG: st1w { [[RES_LO]].s }, [[PG]], [x1]
-; VBITS_EQ_256-DAG: st1w { [[RES_HI]].s }, [[PG]], [x1, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: ucvtf_v16i32_v16f32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #8
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ucvtf z0.s, p0/m, z0.s
+; VBITS_EQ_256-NEXT:    ucvtf z1.s, p0/m, z1.s
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: ucvtf_v16i32_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ucvtf z0.s, p0/m, z0.s
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <16 x i32>, <16 x i32>* %a
   %res = uitofp <16 x i32> %op1 to <16 x float>
   store <16 x float> %res, <16 x float>* %b
@@ -473,12 +521,13 @@ define void @ucvtf_v16i32_v16f32(<16 x i32>* %a, <16 x float>* %b) #0 {
 }
 
 define void @ucvtf_v32i32_v32f32(<32 x i32>* %a, <32 x float>* %b) #0 {
-; CHECK-LABEL: ucvtf_v32i32_v32f32:
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].s, vl32
-; VBITS_GE_1024-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ucvtf [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
-; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: ucvtf_v32i32_v32f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ucvtf z0.s, p0/m, z0.s
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <32 x i32>, <32 x i32>* %a
   %res = uitofp <32 x i32> %op1 to <32 x float>
   store <32 x float> %res, <32 x float>* %b
@@ -486,12 +535,13 @@ define void @ucvtf_v32i32_v32f32(<32 x i32>* %a, <32 x float>* %b) #0 {
 }
 
 define void @ucvtf_v64i32_v64f32(<64 x i32>* %a, <64 x float>* %b) #0 {
-; CHECK-LABEL: ucvtf_v64i32_v64f32:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl64
-; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ucvtf [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
-; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: ucvtf_v64i32_v64f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ucvtf z0.s, p0/m, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <64 x i32>, <64 x i32>* %a
   %res = uitofp <64 x i32> %op1 to <64 x float>
   store <64 x float> %res, <64 x float>* %b
@@ -505,9 +555,11 @@ define void @ucvtf_v64i32_v64f32(<64 x i32>* %a, <64 x float>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <1 x double> @ucvtf_v1i32_v1f64(<1 x i32> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v1i32_v1f64:
-; CHECK: ushll v0.2d, v0.2s, #0
-; CHECK-NEXT: ucvtf v0.2d, v0.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
+; CHECK-NEXT:    ucvtf v0.2d, v0.2d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %res = uitofp <1 x i32> %op1 to <1 x double>
   ret <1 x double> %res
 }
@@ -515,21 +567,23 @@ define <1 x double> @ucvtf_v1i32_v1f64(<1 x i32> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <2 x double> @ucvtf_v2i32_v2f64(<2 x i32> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v2i32_v2f64:
-; CHECK: ushll v0.2d, v0.2s, #0
-; CHECK-NEXT: ucvtf v0.2d, v0.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
+; CHECK-NEXT:    ucvtf v0.2d, v0.2d
+; CHECK-NEXT:    ret
   %res = uitofp <2 x i32> %op1 to <2 x double>
   ret <2 x double> %res
 }
 
 define void @ucvtf_v4i32_v4f64(<4 x i32>* %a, <4 x double>* %b) #0 {
 ; CHECK-LABEL: ucvtf_v4i32_v4f64:
-; CHECK: ldr q[[OP:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: uunpklo [[UPK:z[0-9]+]].d, z[[OP]].s
-; CHECK-NEXT: ucvtf [[RES:z[0-9]+]].d, [[PG]]/m, [[UPK]].d
-; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    ucvtf z0.d, p0/m, z0.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <4 x i32>, <4 x i32>* %a
   %res = uitofp <4 x i32> %op1 to <4 x double>
   store <4 x double> %res, <4 x double>* %b
@@ -537,29 +591,41 @@ define void @ucvtf_v4i32_v4f64(<4 x i32>* %a, <4 x double>* %b) #0 {
 }
 
 define void @ucvtf_v8i32_v8f64(<8 x i32>* %a, <8 x double>* %b) #0 {
-; CHECK-LABEL: ucvtf_v8i32_v8f64:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].s, vl8
-; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[OP]].s
-; VBITS_GE_512-NEXT: ucvtf [[RES:z[0-9]+]].d, [[PG1]]/m, [[UPK]].d
-; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation - fixed type extract_subvector codegen is poor currently.
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: ld1w { [[VEC:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_EQ_256-DAG: mov x8, sp
-; VBITS_EQ_256-DAG: st1w { [[VEC:z[0-9]+]].s }, [[PG1]], [x8]
-; VBITS_EQ_256-DAG: ldp q[[LO:[0-9]+]], q[[HI:[0-9]+]], [sp]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS]], #4
-; VBITS_EQ_256-DAG: uunpklo [[UPK_LO:z[0-9]+]].d, z[[LO]].s
-; VBITS_EQ_256-DAG: uunpklo [[UPK_HI:z[0-9]+]].d, z[[HI]].s
-; VBITS_EQ_256-DAG: ucvtf [[RES_LO:z[0-9]+]].d, [[PG2]]/m, [[UPK_LO]].d
-; VBITS_EQ_256-DAG: ucvtf [[RES_HI:z[0-9]+]].d, [[PG2]]/m, [[UPK_HI]].d
-; VBITS_EQ_256-DAG: st1d { [[RES_LO]].d }, [[PG2]], [x1]
-; VBITS_EQ_256-DAG: st1d { [[RES_HI]].d }, [[PG2]], [x1, x[[NUMELTS]], lsl #3]
+; VBITS_EQ_256-LABEL: ucvtf_v8i32_v8f64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_EQ_256-NEXT:    sub x9, sp, #48
+; VBITS_EQ_256-NEXT:    mov x29, sp
+; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
+; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
+; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    mov x8, sp
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_EQ_256-NEXT:    ldp q0, q1, [sp]
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
+; VBITS_EQ_256-NEXT:    uunpklo z1.d, z1.s
+; VBITS_EQ_256-NEXT:    ucvtf z0.d, p0/m, z0.d
+; VBITS_EQ_256-NEXT:    ucvtf z1.d, p0/m, z1.d
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_EQ_256-NEXT:    mov sp, x29
+; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: ucvtf_v8i32_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    ucvtf z0.d, p0/m, z0.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x i32>, <8 x i32>* %a
   %res = uitofp <8 x i32> %op1 to <8 x double>
   store <8 x double> %res, <8 x double>* %b
@@ -567,14 +633,15 @@ define void @ucvtf_v8i32_v8f64(<8 x i32>* %a, <8 x double>* %b) #0 {
 }
 
 define void @ucvtf_v16i32_v16f64(<16 x i32>* %a, <16 x double>* %b) #0 {
-; CHECK-LABEL: ucvtf_v16i32_v16f64:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].s, vl16
-; VBITS_GE_1024-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[OP]].s
-; VBITS_GE_1024-NEXT: ucvtf [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK]].d
-; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: ucvtf_v16i32_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    ucvtf z0.d, p0/m, z0.d
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x i32>, <16 x i32>* %a
   %res = uitofp <16 x i32> %op1 to <16 x double>
   store <16 x double> %res, <16 x double>* %b
@@ -582,14 +649,15 @@ define void @ucvtf_v16i32_v16f64(<16 x i32>* %a, <16 x double>* %b) #0 {
 }
 
 define void @ucvtf_v32i32_v32f64(<32 x i32>* %a, <32 x double>* %b) #0 {
-; CHECK-LABEL: ucvtf_v32i32_v32f64:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[OP]].s
-; VBITS_GE_2048-NEXT: ucvtf [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK]].d
-; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: ucvtf_v32i32_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    ucvtf z0.d, p0/m, z0.d
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x i32>, <32 x i32>* %a
   %res = uitofp <32 x i32> %op1 to <32 x double>
   store <32 x double> %res, <32 x double>* %b
@@ -604,9 +672,11 @@ define void @ucvtf_v32i32_v32f64(<32 x i32>* %a, <32 x double>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <1 x half> @ucvtf_v1i64_v1f16(<1 x i64> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v1i64_v1f16:
-; CHECK: fmov x8, d0
-; CHECK-NEXT: ucvtf h0, x8
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    ucvtf h0, x8
+; CHECK-NEXT:    ret
   %res = uitofp <1 x i64> %op1 to <1 x half>
   ret <1 x half> %res
 }
@@ -614,69 +684,79 @@ define <1 x half> @ucvtf_v1i64_v1f16(<1 x i64> %op1) #0 {
 ; v2f16 is not legal for NEON, so use SVE
 define <2 x half> @ucvtf_v2i64_v2f16(<2 x i64> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v2i64_v2f16:
-; CHECK: ptrue [[PG:p[0-9]+]].d
-; CHECK-NEXT: ucvtf [[CVT:z[0-9]+]].h, [[PG]]/m, z0.d
-; CHECK-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; CHECK-NEXT: uzp1 z0.h, [[UZP]].h, [[UZP]].h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ucvtf z0.h, p0/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
   %res = uitofp <2 x i64> %op1 to <2 x half>
   ret <2 x half> %res
 }
 
 define <4 x half> @ucvtf_v4i64_v4f16(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: ucvtf_v4i64_v4f16:
-; CHECK: ptrue [[PG1:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ptrue [[PG2:p[0-9]+]].d
-; CHECK-NEXT: ucvtf [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].d
-; CHECK-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; CHECK-NEXT: uzp1 z0.h, [[UZP]].h, [[UZP]].h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ucvtf z0.h, p0/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %res = uitofp <4 x i64> %op1 to <4 x half>
   ret <4 x half> %res
 }
 
 define <8 x half> @ucvtf_v8i64_v8f16(<8 x i64>* %a) #0 {
-; CHECK-LABEL: ucvtf_v8i64_v8f16:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_512-NEXT: ucvtf [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].d
-; VBITS_GE_512-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_512-NEXT: uzp1 z0.h, [[UZP]].h, [[UZP]].h
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[LO:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1d { [[HI:z[0-9]+]].d }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].d
-; VBITS_EQ_256-DAG: ucvtf [[CVT_LO:z[0-9]+]].h, [[PG2]]/m, [[LO]].d
-; VBITS_EQ_256-DAG: ucvtf [[CVT_HI:z[0-9]+]].h, [[PG2]]/m, [[HI]].d
-; VBITS_EQ_256-DAG: uzp1 [[UZP_LO:z[0-9]+]].s, [[CVT_LO]].s, [[CVT_LO]].s
-; VBITS_EQ_256-DAG: uzp1 [[UZP_HI:z[0-9]+]].s, [[CVT_HI]].s, [[CVT_HI]].s
-; VBITS_EQ_256-DAG: uzp1 z0.h, [[UZP_LO]].h, [[UZP_LO]].h
-; VBITS_EQ_256-DAG: uzp1 z[[RES_HI:[0-9]+]].h, [[UZP_HI]].h, [[UZP_HI]].h
-; VBITS_EQ_256-NEXT: mov v0.d[1], v[[RES_HI]].d[0]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: ucvtf_v8i64_v8f16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ptrue p0.d
+; VBITS_EQ_256-NEXT:    ucvtf z0.h, p0/m, z0.d
+; VBITS_EQ_256-NEXT:    ucvtf z1.h, p0/m, z1.d
+; VBITS_EQ_256-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_EQ_256-NEXT:    uzp1 z1.s, z1.s, z1.s
+; VBITS_EQ_256-NEXT:    uzp1 z2.h, z0.h, z0.h
+; VBITS_EQ_256-NEXT:    uzp1 z0.h, z1.h, z1.h
+; VBITS_EQ_256-NEXT:    mov v0.d[1], v2.d[0]
+; VBITS_EQ_256-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: ucvtf_v8i64_v8f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d
+; VBITS_GE_512-NEXT:    ucvtf z0.h, p0/m, z0.d
+; VBITS_GE_512-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_512-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_512-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x i64>, <8 x i64>* %a
   %res = uitofp <8 x i64> %op1 to <8 x half>
   ret <8 x half> %res
 }
 
 define void @ucvtf_v16i64_v16f16(<16 x i64>* %a, <16 x half>* %b) #0 {
-; CHECK-LABEL: ucvtf_v16i64_v16f16:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_1024-NEXT: ucvtf [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].d
-; VBITS_GE_1024-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_1024-NEXT: uzp1 [[RES:z[0-9]+]].h, [[UZP]].h, [[UZP]].h
-; VBITS_GE_1024-NEXT: ptrue [[PG3:p[0-9]+]].h, vl16
-; VBITS_GE_1024-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: ucvtf_v16i64_v16f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d
+; VBITS_GE_1024-NEXT:    ucvtf z0.h, p0/m, z0.d
+; VBITS_GE_1024-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_1024-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x i64>, <16 x i64>* %a
   %res = uitofp <16 x i64> %op1 to <16 x half>
   store <16 x half> %res, <16 x half>* %b
@@ -684,16 +764,17 @@ define void @ucvtf_v16i64_v16f16(<16 x i64>* %a, <16 x half>* %b) #0 {
 }
 
 define void @ucvtf_v32i64_v32f16(<32 x i64>* %a, <32 x half>* %b) #0 {
-; CHECK-LABEL: ucvtf_v32i64_v32f16:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_2048-NEXT: ucvtf [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].d
-; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_2048-NEXT: uzp1 [[RES:z[0-9]+]].h, [[UZP]].h, [[UZP]].h
-; VBITS_GE_2048-NEXT: ptrue [[PG3:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: ucvtf_v32i64_v32f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.d
+; VBITS_GE_2048-NEXT:    ucvtf z0.h, p0/m, z0.d
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x i64>, <32 x i64>* %a
   %res = uitofp <32 x i64> %op1 to <32 x half>
   store <32 x half> %res, <32 x half>* %b
@@ -707,9 +788,11 @@ define void @ucvtf_v32i64_v32f16(<32 x i64>* %a, <32 x half>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <1 x float> @ucvtf_v1i64_v1f32(<1 x i64> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v1i64_v1f32:
-; CHECK: ucvtf v0.2d, v0.2d
-; CHECK-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    ucvtf v0.2d, v0.2d
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    ret
   %res = uitofp <1 x i64> %op1 to <1 x float>
   ret <1 x float> %res
 }
@@ -717,52 +800,57 @@ define <1 x float> @ucvtf_v1i64_v1f32(<1 x i64> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <2 x float> @ucvtf_v2i64_v2f32(<2 x i64> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v2i64_v2f32:
-; CHECK: ucvtf v0.2d, v0.2d
-; CHECK-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ucvtf v0.2d, v0.2d
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    ret
   %res = uitofp <2 x i64> %op1 to <2 x float>
   ret <2 x float> %res
 }
 
 define <4 x float> @ucvtf_v4i64_v4f32(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: ucvtf_v4i64_v4f32:
-; CHECK: ptrue [[PG1:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ptrue [[PG2:p[0-9]+]].d
-; CHECK-NEXT: ucvtf [[CVT:z[0-9]+]].s, [[PG2]]/m, [[OP]].d
-; CHECK-NEXT: uzp1 z0.s, [[CVT]].s, [[CVT]].s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ucvtf z0.s, p0/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %res = uitofp <4 x i64> %op1 to <4 x float>
   ret <4 x float> %res
 }
 
 define void @ucvtf_v8i64_v8f32(<8 x i64>* %a, <8 x float>* %b) #0 {
-; CHECK-LABEL: ucvtf_v8i64_v8f32:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_512-NEXT: ucvtf [[CVT:z[0-9]+]].s, [[PG2]]/m, [[OP]].d
-; VBITS_GE_512-NEXT: uzp1 [[RES:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_512-NEXT: ptrue [[PG3:p[0-9]+]].s, vl8
-; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG3]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[LO:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1d { [[HI:z[0-9]+]].d }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].d
-; VBITS_EQ_256-DAG: ptrue [[PG3:p[0-9]+]].s, vl4
-; VBITS_EQ_256-DAG: ucvtf [[CVT_LO:z[0-9]+]].s, [[PG2]]/m, [[LO]].d
-; VBITS_EQ_256-DAG: ucvtf [[CVT_HI:z[0-9]+]].s, [[PG2]]/m, [[HI]].d
-; VBITS_EQ_256-DAG: uzp1 [[RES_LO:z[0-9]+]].s, [[CVT_LO]].s, [[CVT_LO]].s
-; VBITS_EQ_256-DAG: uzp1 [[RES_HI:z[0-9]+]].s, [[CVT_HI]].s, [[CVT_HI]].s
-; VBITS_EQ_256-DAG: splice [[RES:z[0-9]+]].s, [[PG3]], [[RES_LO]].s, [[RES_HI]].s
-; VBITS_EQ_256-DAG: ptrue [[PG4:p[0-9]+]].s, vl8
-; VBITS_EQ_256-NEXT: st1w { [[RES]].s }, [[PG4]], [x1]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: ucvtf_v8i64_v8f32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ptrue p0.d
+; VBITS_EQ_256-NEXT:    ptrue p1.s, vl4
+; VBITS_EQ_256-NEXT:    ucvtf z0.s, p0/m, z0.d
+; VBITS_EQ_256-NEXT:    ucvtf z1.s, p0/m, z1.d
+; VBITS_EQ_256-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_EQ_256-NEXT:    uzp1 z1.s, z1.s, z1.s
+; VBITS_EQ_256-NEXT:    splice z1.s, p1, z1.s, z0.s
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: ucvtf_v8i64_v8f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d
+; VBITS_GE_512-NEXT:    ucvtf z0.s, p0/m, z0.d
+; VBITS_GE_512-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x i64>, <8 x i64>* %a
   %res = uitofp <8 x i64> %op1 to <8 x float>
   store <8 x float> %res, <8 x float>* %b
@@ -770,15 +858,16 @@ define void @ucvtf_v8i64_v8f32(<8 x i64>* %a, <8 x float>* %b) #0 {
 }
 
 define void @ucvtf_v16i64_v16f32(<16 x i64>* %a, <16 x float>* %b) #0 {
-; CHECK-LABEL: ucvtf_v16i64_v16f32:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_1024-NEXT: ucvtf [[CVT:z[0-9]+]].s, [[PG2]]/m, [[OP]].d
-; VBITS_GE_1024-NEXT: uzp1 [[RES:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_1024-NEXT: ptrue [[PG3:p[0-9]+]].s, vl16
-; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG3]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: ucvtf_v16i64_v16f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d
+; VBITS_GE_1024-NEXT:    ucvtf z0.s, p0/m, z0.d
+; VBITS_GE_1024-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x i64>, <16 x i64>* %a
   %res = uitofp <16 x i64> %op1 to <16 x float>
   store <16 x float> %res, <16 x float>* %b
@@ -786,15 +875,16 @@ define void @ucvtf_v16i64_v16f32(<16 x i64>* %a, <16 x float>* %b) #0 {
 }
 
 define void @ucvtf_v32i64_v32f32(<32 x i64>* %a, <32 x float>* %b) #0 {
-; CHECK-LABEL: ucvtf_v32i64_v32f32:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_2048-NEXT: ucvtf [[CVT:z[0-9]+]].s, [[PG2]]/m, [[OP]].d
-; VBITS_GE_2048-NEXT: uzp1 [[RES:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_2048-NEXT: ptrue [[PG3:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG3]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: ucvtf_v32i64_v32f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.d
+; VBITS_GE_2048-NEXT:    ucvtf z0.s, p0/m, z0.d
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x i64>, <32 x i64>* %a
   %res = uitofp <32 x i64> %op1 to <32 x float>
   store <32 x float> %res, <32 x float>* %b
@@ -808,9 +898,11 @@ define void @ucvtf_v32i64_v32f32(<32 x i64>* %a, <32 x float>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <1 x double> @ucvtf_v1i64_v1f64(<1 x i64> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v1i64_v1f64:
-; CHECK: fmov x8, d0
-; CHECK-NEXT: ucvtf d0, x8
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    ucvtf d0, x8
+; CHECK-NEXT:    ret
   %res = uitofp <1 x i64> %op1 to <1 x double>
   ret <1 x double> %res
 }
@@ -818,19 +910,21 @@ define <1 x double> @ucvtf_v1i64_v1f64(<1 x i64> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <2 x double> @ucvtf_v2i64_v2f64(<2 x i64> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v2i64_v2f64:
-; CHECK: ucvtf v0.2d, v0.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ucvtf v0.2d, v0.2d
+; CHECK-NEXT:    ret
   %res = uitofp <2 x i64> %op1 to <2 x double>
   ret <2 x double> %res
 }
 
 define void @ucvtf_v4i64_v4f64(<4 x i64>* %a, <4 x double>* %b) #0 {
 ; CHECK-LABEL: ucvtf_v4i64_v4f64:
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; CHECK-NEXT: ucvtf [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
-; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ucvtf z0.d, p0/m, z0.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %res = uitofp <4 x i64> %op1 to <4 x double>
   store <4 x double> %res, <4 x double>* %b
@@ -838,23 +932,25 @@ define void @ucvtf_v4i64_v4f64(<4 x i64>* %a, <4 x double>* %b) #0 {
 }
 
 define void @ucvtf_v8i64_v8f64(<8 x i64>* %a, <8 x double>* %b) #0 {
-; CHECK-LABEL: ucvtf_v8i64_v8f64:
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ucvtf [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
-; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[LO:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1d { [[HI:z[0-9]+]].d }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: ucvtf [[RES_LO:z[0-9]+]].d, [[PG]]/m, [[LO]].d
-; VBITS_EQ_256-DAG: ucvtf [[RES_HI:z[0-9]+]].d, [[PG]]/m, [[HI]].d
-; VBITS_EQ_256-DAG: st1d { [[RES_LO]].d }, [[PG]], [x1]
-; VBITS_EQ_256-DAG: st1d { [[RES_HI]].d }, [[PG]], [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: ucvtf_v8i64_v8f64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ucvtf z0.d, p0/m, z0.d
+; VBITS_EQ_256-NEXT:    ucvtf z1.d, p0/m, z1.d
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: ucvtf_v8i64_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ucvtf z0.d, p0/m, z0.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x i64>, <8 x i64>* %a
   %res = uitofp <8 x i64> %op1 to <8 x double>
   store <8 x double> %res, <8 x double>* %b
@@ -862,12 +958,13 @@ define void @ucvtf_v8i64_v8f64(<8 x i64>* %a, <8 x double>* %b) #0 {
 }
 
 define void @ucvtf_v16i64_v16f64(<16 x i64>* %a, <16 x double>* %b) #0 {
-; CHECK-LABEL: ucvtf_v16i64_v16f64:
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ucvtf [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
-; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: ucvtf_v16i64_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ucvtf z0.d, p0/m, z0.d
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x i64>, <16 x i64>* %a
   %res = uitofp <16 x i64> %op1 to <16 x double>
   store <16 x double> %res, <16 x double>* %b
@@ -875,12 +972,13 @@ define void @ucvtf_v16i64_v16f64(<16 x i64>* %a, <16 x double>* %b) #0 {
 }
 
 define void @ucvtf_v32i64_v32f64(<32 x i64>* %a, <32 x double>* %b) #0 {
-; CHECK-LABEL: ucvtf_v32i64_v32f64:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ucvtf [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
-; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: ucvtf_v32i64_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ucvtf z0.d, p0/m, z0.d
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x i64>, <32 x i64>* %a
   %res = uitofp <32 x i64> %op1 to <32 x double>
   store <32 x double> %res, <32 x double>* %b
@@ -894,8 +992,9 @@ define void @ucvtf_v32i64_v32f64(<32 x i64>* %a, <32 x double>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <4 x half> @scvtf_v4i16_v4f16(<4 x i16> %op1) #0 {
 ; CHECK-LABEL: scvtf_v4i16_v4f16:
-; CHECK: scvtf v0.4h, v0.4h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    scvtf v0.4h, v0.4h
+; CHECK-NEXT:    ret
   %res = sitofp <4 x i16> %op1 to <4 x half>
   ret <4 x half> %res
 }
@@ -903,10 +1002,11 @@ define <4 x half> @scvtf_v4i16_v4f16(<4 x i16> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define void @scvtf_v8i16_v8f16(<8 x i16>* %a, <8 x half>* %b) #0 {
 ; CHECK-LABEL: scvtf_v8i16_v8f16:
-; CHECK: ldr q0, [x0]
-; CHECK-NEXT: scvtf v0.8h, v0.8h
-; CHECK-NEXT: str q0, [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    scvtf v0.8h, v0.8h
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <8 x i16>, <8 x i16>* %a
   %res = sitofp <8 x i16> %op1 to <8 x half>
   store <8 x half> %res, <8 x half>* %b
@@ -915,11 +1015,12 @@ define void @scvtf_v8i16_v8f16(<8 x i16>* %a, <8 x half>* %b) #0 {
 
 define void @scvtf_v16i16_v16f16(<16 x i16>* %a, <16 x half>* %b) #0 {
 ; CHECK-LABEL: scvtf_v16i16_v16f16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-NEXT: scvtf [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
-; CHECK-NEXT: st1h { [[RES]].h }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    scvtf z0.h, p0/m, z0.h
+; CHECK-NEXT:    st1h { z0.h }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <16 x i16>, <16 x i16>* %a
   %res = sitofp <16 x i16> %op1 to <16 x half>
   store <16 x half> %res, <16 x half>* %b
@@ -927,23 +1028,25 @@ define void @scvtf_v16i16_v16f16(<16 x i16>* %a, <16 x half>* %b) #0 {
 }
 
 define void @scvtf_v32i16_v32f16(<32 x i16>* %a, <32 x half>* %b) #0 {
-; CHECK-LABEL: scvtf_v32i16_v32f16:
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32
-; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: scvtf [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
-; VBITS_GE_512-NEXT: st1h { [[RES]].h }, [[PG]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #16
-; VBITS_EQ_256-DAG: ld1h { [[LO:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1h { [[HI:z[0-9]+]].h }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #1]
-; VBITS_EQ_256-DAG: scvtf [[RES_LO:z[0-9]+]].h, [[PG]]/m, [[LO]].h
-; VBITS_EQ_256-DAG: scvtf [[RES_HI:z[0-9]+]].h, [[PG]]/m, [[HI]].h
-; VBITS_EQ_256-DAG: st1h { [[RES_LO]].h }, [[PG]], [x1]
-; VBITS_EQ_256-DAG: st1h { [[RES_HI]].h }, [[PG]], [x1, x[[NUMELTS]], lsl #1]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: scvtf_v32i16_v32f16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #16
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    scvtf z0.h, p0/m, z0.h
+; VBITS_EQ_256-NEXT:    scvtf z1.h, p0/m, z1.h
+; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    st1h { z1.h }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: scvtf_v32i16_v32f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    scvtf z0.h, p0/m, z0.h
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <32 x i16>, <32 x i16>* %a
   %res = sitofp <32 x i16> %op1 to <32 x half>
   store <32 x half> %res, <32 x half>* %b
@@ -951,12 +1054,13 @@ define void @scvtf_v32i16_v32f16(<32 x i16>* %a, <32 x half>* %b) #0 {
 }
 
 define void @scvtf_v64i16_v64f16(<64 x i16>* %a, <64 x half>* %b) #0 {
-; CHECK-LABEL: scvtf_v64i16_v64f16:
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].h, vl64
-; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: scvtf [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
-; VBITS_GE_1024-NEXT: st1h { [[RES]].h }, [[PG]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: scvtf_v64i16_v64f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    scvtf z0.h, p0/m, z0.h
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <64 x i16>, <64 x i16>* %a
   %res = sitofp <64 x i16> %op1 to <64 x half>
   store <64 x half> %res, <64 x half>* %b
@@ -964,12 +1068,13 @@ define void @scvtf_v64i16_v64f16(<64 x i16>* %a, <64 x half>* %b) #0 {
 }
 
 define void @scvtf_v128i16_v128f16(<128 x i16>* %a, <128 x half>* %b) #0 {
-; CHECK-LABEL: scvtf_v128i16_v128f16:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128
-; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: scvtf [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
-; VBITS_GE_2048-NEXT: st1h { [[RES]].h }, [[PG]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: scvtf_v128i16_v128f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    scvtf z0.h, p0/m, z0.h
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <128 x i16>, <128 x i16>* %a
   %res = sitofp <128 x i16> %op1 to <128 x half>
   store <128 x half> %res, <128 x half>* %b
@@ -983,10 +1088,11 @@ define void @scvtf_v128i16_v128f16(<128 x i16>* %a, <128 x half>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <2 x float> @scvtf_v2i16_v2f32(<2 x i16> %op1) #0 {
 ; CHECK-LABEL: scvtf_v2i16_v2f32:
-; CHECK: shl v0.2s, v0.2s, #16
-; CHECK-NEXT: sshr v0.2s, v0.2s, #16
-; CHECK-NEXT: scvtf v0.2s, v0.2s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shl v0.2s, v0.2s, #16
+; CHECK-NEXT:    sshr v0.2s, v0.2s, #16
+; CHECK-NEXT:    scvtf v0.2s, v0.2s
+; CHECK-NEXT:    ret
   %res = sitofp <2 x i16> %op1 to <2 x float>
   ret <2 x float> %res
 }
@@ -994,20 +1100,23 @@ define <2 x float> @scvtf_v2i16_v2f32(<2 x i16> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <4 x float> @scvtf_v4i16_v4f32(<4 x i16> %op1) #0 {
 ; CHECK-LABEL: scvtf_v4i16_v4f32:
-; CHECK: scvtf v0.4s, v0.4s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sshll v0.4s, v0.4h, #0
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    ret
   %res = sitofp <4 x i16> %op1 to <4 x float>
   ret <4 x float> %res
 }
 
 define void @scvtf_v8i16_v8f32(<8 x i16>* %a, <8 x float>* %b) #0 {
 ; CHECK-LABEL: scvtf_v8i16_v8f32:
-; CHECK: ldr q[[OP:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].s, vl8
-; CHECK-NEXT: sunpklo [[UPK:z[0-9]+]].s, z[[OP]].h
-; CHECK-NEXT: scvtf [[RES:z[0-9]+]].s, [[PG]]/m, [[UPK]].s
-; CHECK-NEXT: st1w { [[RES]].s }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    scvtf z0.s, p0/m, z0.s
+; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <8 x i16>, <8 x i16>* %a
   %res = sitofp <8 x i16> %op1 to <8 x float>
   store <8 x float> %res, <8 x float>* %b
@@ -1015,29 +1124,41 @@ define void @scvtf_v8i16_v8f32(<8 x i16>* %a, <8 x float>* %b) #0 {
 }
 
 define void @scvtf_v16i16_v16f32(<16 x i16>* %a, <16 x float>* %b) #0 {
-; CHECK-LABEL: scvtf_v16i16_v16f32:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].h, vl16
-; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: sunpklo [[UPK:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_512-NEXT: scvtf [[RES:z[0-9]+]].s, [[PG2]]/m, [[UPK]].s
-; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG1]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation - fixed type extract_subvector codegen is poor currently.
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].h, vl16
-; VBITS_EQ_256-DAG: ld1h { [[VEC:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_EQ_256-DAG: mov x8, sp
-; VBITS_EQ_256-DAG: st1h { [[VEC:z[0-9]+]].h }, [[PG1]], [x8]
-; VBITS_EQ_256-DAG: ldp q[[LO:[0-9]+]], q[[HI:[0-9]+]], [sp]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #8
-; VBITS_EQ_256-DAG: sunpklo [[UPK_LO:z[0-9]+]].s, z[[LO]].h
-; VBITS_EQ_256-DAG: sunpklo [[UPK_HI:z[0-9]+]].s, z[[HI]].h
-; VBITS_EQ_256-DAG: scvtf [[RES_LO:z[0-9]+]].s, [[PG2]]/m, [[UPK_LO]].s
-; VBITS_EQ_256-DAG: scvtf [[RES_HI:z[0-9]+]].s, [[PG2]]/m, [[UPK_HI]].s
-; VBITS_EQ_256-DAG: st1w { [[RES_LO]].s }, [[PG2]], [x1]
-; VBITS_EQ_256-DAG: st1w { [[RES_HI]].s }, [[PG2]], [x1, x[[NUMELTS]], lsl #2]
+; VBITS_EQ_256-LABEL: scvtf_v16i16_v16f32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_EQ_256-NEXT:    sub x9, sp, #48
+; VBITS_EQ_256-NEXT:    mov x29, sp
+; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
+; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
+; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    mov x8, sp
+; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x8]
+; VBITS_EQ_256-NEXT:    ldp q0, q1, [sp]
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    mov x8, #8
+; VBITS_EQ_256-NEXT:    sunpklo z0.s, z0.h
+; VBITS_EQ_256-NEXT:    sunpklo z1.s, z1.h
+; VBITS_EQ_256-NEXT:    scvtf z0.s, p0/m, z0.s
+; VBITS_EQ_256-NEXT:    scvtf z1.s, p0/m, z1.s
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_EQ_256-NEXT:    mov sp, x29
+; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: scvtf_v16i16_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    sunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    scvtf z0.s, p0/m, z0.s
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <16 x i16>, <16 x i16>* %a
   %res = sitofp <16 x i16> %op1 to <16 x float>
   store <16 x float> %res, <16 x float>* %b
@@ -1045,14 +1166,15 @@ define void @scvtf_v16i16_v16f32(<16 x i16>* %a, <16 x float>* %b) #0 {
 }
 
 define void @scvtf_v32i16_v32f32(<32 x i16>* %a, <32 x float>* %b) #0 {
-; CHECK-LABEL: scvtf_v32i16_v32f32:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].h, vl32
-; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].s, vl32
-; VBITS_GE_1024-NEXT: sunpklo [[UPK:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_1024-NEXT: scvtf [[RES:z[0-9]+]].s, [[PG2]]/m, [[UPK]].s
-; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG1]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: scvtf_v32i16_v32f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    sunpklo z0.s, z0.h
+; VBITS_GE_1024-NEXT:    scvtf z0.s, p0/m, z0.s
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <32 x i16>, <32 x i16>* %a
   %res = sitofp <32 x i16> %op1 to <32 x float>
   store <32 x float> %res, <32 x float>* %b
@@ -1060,14 +1182,15 @@ define void @scvtf_v32i16_v32f32(<32 x i16>* %a, <32 x float>* %b) #0 {
 }
 
 define void @scvtf_v64i16_v64f32(<64 x i16>* %a, <64 x float>* %b) #0 {
-; CHECK-LABEL: scvtf_v64i16_v64f32:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].h, vl64
-; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].s, vl64
-; VBITS_GE_2048-NEXT: sunpklo [[UPK:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_2048-NEXT: scvtf [[RES:z[0-9]+]].s, [[PG2]]/m, [[UPK]].s
-; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG1]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: scvtf_v64i16_v64f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    sunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    scvtf z0.s, p0/m, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <64 x i16>, <64 x i16>* %a
   %res = sitofp <64 x i16> %op1 to <64 x float>
   store <64 x float> %res, <64 x float>* %b
@@ -1081,11 +1204,14 @@ define void @scvtf_v64i16_v64f32(<64 x i16>* %a, <64 x float>* %b) #0 {
 ; v1i16 is perfered to be widened to v4i16, which pushes the output into SVE types, so use SVE
 define <1 x double> @scvtf_v1i16_v1f64(<1 x i16> %op1) #0 {
 ; CHECK-LABEL: scvtf_v1i16_v1f64:
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: sunpklo [[UPK1:z[0-9]+]].s, z0.h
-; CHECK-NEXT: sunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; CHECK-NEXT: scvtf z0.d, [[PG]]/m, [[UPK2]].d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    sunpklo z0.d, z0.s
+; CHECK-NEXT:    scvtf z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
   %res = sitofp <1 x i16> %op1 to <1 x double>
   ret <1 x double> %res
 }
@@ -1093,24 +1219,26 @@ define <1 x double> @scvtf_v1i16_v1f64(<1 x i16> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <2 x double> @scvtf_v2i16_v2f64(<2 x i16> %op1) #0 {
 ; CHECK-LABEL: scvtf_v2i16_v2f64:
-; CHECK: shl v0.2s, v0.2s, #16
-; CHECK-NEXT: sshr v0.2s, v0.2s, #16
-; CHECK-NEXT: sshll v0.2d, v0.2s, #0
-; CHECK-NEXT: scvtf v0.2d, v0.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shl v0.2s, v0.2s, #16
+; CHECK-NEXT:    sshr v0.2s, v0.2s, #16
+; CHECK-NEXT:    sshll v0.2d, v0.2s, #0
+; CHECK-NEXT:    scvtf v0.2d, v0.2d
+; CHECK-NEXT:    ret
   %res = sitofp <2 x i16> %op1 to <2 x double>
   ret <2 x double> %res
 }
 
 define void @scvtf_v4i16_v4f64(<4 x i16>* %a, <4 x double>* %b) #0 {
 ; CHECK-LABEL: scvtf_v4i16_v4f64:
-; CHECK: ldr d[[OP:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: sunpklo [[UPK1:z[0-9]+]].s, z[[OP]].h
-; CHECK-NEXT: sunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; CHECK-NEXT: scvtf [[RES:z[0-9]+]].d, [[PG]]/m, [[UPK2]].d
-; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    sunpklo z0.d, z0.s
+; CHECK-NEXT:    scvtf z0.d, p0/m, z0.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <4 x i16>, <4 x i16>* %a
   %res = sitofp <4 x i16> %op1 to <4 x double>
   store <4 x double> %res, <4 x double>* %b
@@ -1118,29 +1246,31 @@ define void @scvtf_v4i16_v4f64(<4 x i16>* %a, <4 x double>* %b) #0 {
 }
 
 define void @scvtf_v8i16_v8f64(<8 x i16>* %a, <8 x double>* %b) #0 {
-; CHECK-LABEL: scvtf_v8i16_v8f64:
-; VBITS_GE_512: ldr q[[OP:[0-9]+]], [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: sunpklo [[UPK1:z[0-9]+]].s, z[[OP]].h
-; VBITS_GE_512-NEXT: sunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_512-NEXT: scvtf [[RES:z[0-9]+]].d, [[PG]]/m, [[UPK2]].d
-; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ldr q[[OP:[0-9]+]], [x0]
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ext v[[HI:[0-9]+]].16b, v[[LO:[0-9]+]].16b, v[[OP]].16b, #8
-; VBITS_EQ_256-DAG: sunpklo [[UPK1_LO:z[0-9]+]].s, z[[LO]].h
-; VBITS_EQ_256-DAG: sunpklo [[UPK1_HI:z[0-9]+]].s, z[[HI]].h
-; VBITS_EQ_256-DAG: sunpklo [[UPK2_LO:z[0-9]+]].d, [[UPK1_LO]].s
-; VBITS_EQ_256-DAG: sunpklo [[UPK2_HI:z[0-9]+]].d, [[UPK1_HI]].s
-; VBITS_EQ_256-DAG: scvtf [[RES_LO:z[0-9]+]].d, [[PG2]]/m, [[UPK2_LO]].d
-; VBITS_EQ_256-DAG: scvtf [[RES_HI:z[0-9]+]].d, [[PG2]]/m, [[UPK2_HI]].d
-; VBITS_EQ_256-DAG: st1d { [[RES_LO]].d }, [[PG2]], [x1]
-; VBITS_EQ_256-DAG: st1d { [[RES_HI]].d }, [[PG2]], [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: scvtf_v8i16_v8f64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    ldr q0, [x0]
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    sunpklo z1.s, z0.h
+; VBITS_EQ_256-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; VBITS_EQ_256-NEXT:    sunpklo z0.s, z0.h
+; VBITS_EQ_256-NEXT:    sunpklo z1.d, z1.s
+; VBITS_EQ_256-NEXT:    sunpklo z0.d, z0.s
+; VBITS_EQ_256-NEXT:    scvtf z1.d, p0/m, z1.d
+; VBITS_EQ_256-NEXT:    scvtf z0.d, p0/m, z0.d
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: scvtf_v8i16_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr q0, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    sunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    sunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    scvtf z0.d, p0/m, z0.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x i16>, <8 x i16>* %a
   %res = sitofp <8 x i16> %op1 to <8 x double>
   store <8 x double> %res, <8 x double>* %b
@@ -1148,15 +1278,16 @@ define void @scvtf_v8i16_v8f64(<8 x i16>* %a, <8 x double>* %b) #0 {
 }
 
 define void @scvtf_v16i16_v16f64(<16 x i16>* %a, <16 x double>* %b) #0 {
-; CHECK-LABEL: scvtf_v16i16_v16f64:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].h, vl16
-; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: sunpklo [[UPK1:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_1024-NEXT: sunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_1024-NEXT: scvtf [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK2]].d
-; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: scvtf_v16i16_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    sunpklo z0.s, z0.h
+; VBITS_GE_1024-NEXT:    sunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    scvtf z0.d, p0/m, z0.d
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x i16>, <16 x i16>* %a
   %res = sitofp <16 x i16> %op1 to <16 x double>
   store <16 x double> %res, <16 x double>* %b
@@ -1164,15 +1295,16 @@ define void @scvtf_v16i16_v16f64(<16 x i16>* %a, <16 x double>* %b) #0 {
 }
 
 define void @scvtf_v32i16_v32f64(<32 x i16>* %a, <32 x double>* %b) #0 {
-; CHECK-LABEL: scvtf_v32i16_v32f64:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: sunpklo [[UPK1:z[0-9]+]].s, [[OP]].h
-; VBITS_GE_2048-NEXT: sunpklo [[UPK2:z[0-9]+]].d, [[UPK]].s
-; VBITS_GE_2048-NEXT: scvtf [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK2]].d
-; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: scvtf_v32i16_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    sunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    sunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    scvtf z0.d, p0/m, z0.d
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x i16>, <32 x i16>* %a
   %res = sitofp <32 x i16> %op1 to <32 x double>
   store <32 x double> %res, <32 x double>* %b
@@ -1186,9 +1318,11 @@ define void @scvtf_v32i16_v32f64(<32 x i16>* %a, <32 x double>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <2 x half> @scvtf_v2i32_v2f16(<2 x i32> %op1) #0 {
 ; CHECK-LABEL: scvtf_v2i32_v2f16:
-; CHECK: scvtf v0.4s, v0.4s
-; CHECK-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
   %res = sitofp <2 x i32> %op1 to <2 x half>
   ret <2 x half> %res
 }
@@ -1196,52 +1330,57 @@ define <2 x half> @scvtf_v2i32_v2f16(<2 x i32> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <4 x half> @scvtf_v4i32_v4f16(<4 x i32> %op1) #0 {
 ; CHECK-LABEL: scvtf_v4i32_v4f16:
-; CHECK: scvtf v0.4s, v0.4s
-; CHECK-NEXT: fcvtn v0.4h, v0.4s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
   %res = sitofp <4 x i32> %op1 to <4 x half>
   ret <4 x half> %res
 }
 
 define <8 x half> @scvtf_v8i32_v8f16(<8 x i32>* %a) #0 {
 ; CHECK-LABEL: scvtf_v8i32_v8f16:
-; CHECK: ptrue [[PG1:p[0-9]+]].s, vl8
-; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ptrue [[PG2:p[0-9]+]].s
-; CHECK-NEXT: scvtf [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].s
-; CHECK-NEXT: uzp1 z0.h, [[CVT]].h, [[CVT]].h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    scvtf z0.h, p0/m, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
   %op1 = load <8 x i32>, <8 x i32>* %a
   %res = sitofp <8 x i32> %op1 to <8 x half>
   ret <8 x half> %res
 }
 
 define void @scvtf_v16i32_v16f16(<16 x i32>* %a, <16 x half>* %b) #0 {
-; CHECK-LABEL: scvtf_v16i32_v16f16:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s
-; VBITS_GE_512-NEXT: scvtf [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].s
-; VBITS_GE_512-NEXT: uzp1 [[RES:z[0-9]+]].h, [[CVT]].h, [[CVT]].h
-; VBITS_GE_512-NEXT: ptrue [[PG3:p[0-9]+]].h, vl16
-; VBITS_GE_512-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #8
-; VBITS_EQ_256-DAG: ld1w { [[LO:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1w { [[HI:z[0-9]+]].s }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].s
-; VBITS_EQ_256-DAG: ptrue [[PG3:p[0-9]+]].h, vl8
-; VBITS_EQ_256-DAG: scvtf [[CVT_LO:z[0-9]+]].h, [[PG2]]/m, [[LO]].s
-; VBITS_EQ_256-DAG: scvtf [[CVT_HI:z[0-9]+]].h, [[PG2]]/m, [[HI]].s
-; VBITS_EQ_256-DAG: uzp1 [[RES_LO:z[0-9]+]].h, [[CVT_LO]].h, [[CVT_LO]].h
-; VBITS_EQ_256-DAG: uzp1 [[RES_HI:z[0-9]+]].h, [[CVT_HI]].h, [[CVT_HI]].h
-; VBITS_EQ_256-DAG: splice [[RES:z[0-9]+]].h, [[PG3]], [[RES_LO]].h, [[RES_HI]].h
-; VBITS_EQ_256-DAG: ptrue [[PG4:p[0-9]+]].h, vl16
-; VBITS_EQ_256-NEXT: st1h { [[RES]].h }, [[PG4]], [x1]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: scvtf_v16i32_v16f16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #8
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ptrue p0.s
+; VBITS_EQ_256-NEXT:    ptrue p1.h, vl8
+; VBITS_EQ_256-NEXT:    scvtf z0.h, p0/m, z0.s
+; VBITS_EQ_256-NEXT:    scvtf z1.h, p0/m, z1.s
+; VBITS_EQ_256-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_EQ_256-NEXT:    uzp1 z1.h, z1.h, z1.h
+; VBITS_EQ_256-NEXT:    splice z1.h, p1, z1.h, z0.h
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    st1h { z1.h }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: scvtf_v16i32_v16f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.s
+; VBITS_GE_512-NEXT:    scvtf z0.h, p0/m, z0.s
+; VBITS_GE_512-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <16 x i32>, <16 x i32>* %a
   %res = sitofp <16 x i32> %op1 to <16 x half>
   store <16 x half> %res, <16 x half>* %b
@@ -1249,15 +1388,16 @@ define void @scvtf_v16i32_v16f16(<16 x i32>* %a, <16 x half>* %b) #0 {
 }
 
 define void @scvtf_v32i32_v32f16(<32 x i32>* %a, <32 x half>* %b) #0 {
-; CHECK-LABEL: scvtf_v32i32_v32f16:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].s, vl32
-; VBITS_GE_1024-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].s
-; VBITS_GE_1024-NEXT: scvtf [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].s
-; VBITS_GE_1024-NEXT: uzp1 [[RES:z[0-9]+]].h, [[CVT]].h, [[CVT]].h
-; VBITS_GE_1024-NEXT: ptrue [[PG3:p[0-9]+]].h, vl32
-; VBITS_GE_1024-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: scvtf_v32i32_v32f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.s
+; VBITS_GE_1024-NEXT:    scvtf z0.h, p0/m, z0.s
+; VBITS_GE_1024-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <32 x i32>, <32 x i32>* %a
   %res = sitofp <32 x i32> %op1 to <32 x half>
   store <32 x half> %res, <32 x half>* %b
@@ -1265,15 +1405,16 @@ define void @scvtf_v32i32_v32f16(<32 x i32>* %a, <32 x half>* %b) #0 {
 }
 
 define void @scvtf_v64i32_v64f16(<64 x i32>* %a, <64 x half>* %b) #0 {
-; CHECK-LABEL: scvtf_v64i32_v64f16:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].s, vl64
-; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].s
-; VBITS_GE_2048-NEXT: scvtf [[RES:z[0-9]+]].h, [[PG2]]/m, [[UPK]].s
-; VBITS_GE_2048-NEXT: uzp1 [[RES:z[0-9]+]].h, [[CVT]].h, [[CVT]].h
-; VBITS_GE_2048-NEXT: ptrue [[PG3:p[0-9]+]].h, vl64
-; VBITS_GE_2048-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: scvtf_v64i32_v64f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.s
+; VBITS_GE_2048-NEXT:    scvtf z0.h, p0/m, z0.s
+; VBITS_GE_2048-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <64 x i32>, <64 x i32>* %a
   %res = sitofp <64 x i32> %op1 to <64 x half>
   store <64 x half> %res, <64 x half>* %b
@@ -1287,8 +1428,9 @@ define void @scvtf_v64i32_v64f16(<64 x i32>* %a, <64 x half>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <2 x float> @scvtf_v2i32_v2f32(<2 x i32> %op1) #0 {
 ; CHECK-LABEL: scvtf_v2i32_v2f32:
-; CHECK: scvtf v0.2s, v0.2s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    scvtf v0.2s, v0.2s
+; CHECK-NEXT:    ret
   %res = sitofp <2 x i32> %op1 to <2 x float>
   ret <2 x float> %res
 }
@@ -1296,19 +1438,21 @@ define <2 x float> @scvtf_v2i32_v2f32(<2 x i32> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <4 x float> @scvtf_v4i32_v4f32(<4 x i32> %op1) #0 {
 ; CHECK-LABEL: scvtf_v4i32_v4f32:
-; CHECK: scvtf v0.4s, v0.4s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    ret
   %res = sitofp <4 x i32> %op1 to <4 x float>
   ret <4 x float> %res
 }
 
 define void @scvtf_v8i32_v8f32(<8 x i32>* %a, <8 x float>* %b) #0 {
 ; CHECK-LABEL: scvtf_v8i32_v8f32:
-; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
-; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; CHECK-NEXT: scvtf [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
-; CHECK-NEXT: st1w { [[RES]].s }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    scvtf z0.s, p0/m, z0.s
+; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <8 x i32>, <8 x i32>* %a
   %res = sitofp <8 x i32> %op1 to <8 x float>
   store <8 x float> %res, <8 x float>* %b
@@ -1316,23 +1460,25 @@ define void @scvtf_v8i32_v8f32(<8 x i32>* %a, <8 x float>* %b) #0 {
 }
 
 define void @scvtf_v16i32_v16f32(<16 x i32>* %a, <16 x float>* %b) #0 {
-; CHECK-LABEL: scvtf_v16i32_v16f32:
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: scvtf [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
-; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #8
-; VBITS_EQ_256-DAG: ld1w { [[LO:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1w { [[HI:z[0-9]+]].s }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-DAG: scvtf [[RES_LO:z[0-9]+]].s, [[PG]]/m, [[LO]].s
-; VBITS_EQ_256-DAG: scvtf [[RES_HI:z[0-9]+]].s, [[PG]]/m, [[HI]].s
-; VBITS_EQ_256-DAG: st1w { [[RES_LO]].s }, [[PG]], [x1]
-; VBITS_EQ_256-DAG: st1w { [[RES_HI]].s }, [[PG]], [x1, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: scvtf_v16i32_v16f32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #8
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    scvtf z0.s, p0/m, z0.s
+; VBITS_EQ_256-NEXT:    scvtf z1.s, p0/m, z1.s
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: scvtf_v16i32_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    scvtf z0.s, p0/m, z0.s
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <16 x i32>, <16 x i32>* %a
   %res = sitofp <16 x i32> %op1 to <16 x float>
   store <16 x float> %res, <16 x float>* %b
@@ -1340,12 +1486,13 @@ define void @scvtf_v16i32_v16f32(<16 x i32>* %a, <16 x float>* %b) #0 {
 }
 
 define void @scvtf_v32i32_v32f32(<32 x i32>* %a, <32 x float>* %b) #0 {
-; CHECK-LABEL: scvtf_v32i32_v32f32:
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].s, vl32
-; VBITS_GE_1024-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: scvtf [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
-; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: scvtf_v32i32_v32f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    scvtf z0.s, p0/m, z0.s
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <32 x i32>, <32 x i32>* %a
   %res = sitofp <32 x i32> %op1 to <32 x float>
   store <32 x float> %res, <32 x float>* %b
@@ -1353,12 +1500,13 @@ define void @scvtf_v32i32_v32f32(<32 x i32>* %a, <32 x float>* %b) #0 {
 }
 
 define void @scvtf_v64i32_v64f32(<64 x i32>* %a, <64 x float>* %b) #0 {
-; CHECK-LABEL: scvtf_v64i32_v64f32:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl64
-; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: scvtf [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
-; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: scvtf_v64i32_v64f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    scvtf z0.s, p0/m, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <64 x i32>, <64 x i32>* %a
   %res = sitofp <64 x i32> %op1 to <64 x float>
   store <64 x float> %res, <64 x float>* %b
@@ -1372,9 +1520,11 @@ define void @scvtf_v64i32_v64f32(<64 x i32>* %a, <64 x float>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <1 x double> @scvtf_v1i32_v1f64(<1 x i32> %op1) #0 {
 ; CHECK-LABEL: scvtf_v1i32_v1f64:
-; CHECK: sshll v0.2d, v0.2s, #0
-; CHECK-NEXT: scvtf v0.2d, v0.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sshll v0.2d, v0.2s, #0
+; CHECK-NEXT:    scvtf v0.2d, v0.2d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
   %res = sitofp <1 x i32> %op1 to <1 x double>
   ret <1 x double> %res
 }
@@ -1382,21 +1532,23 @@ define <1 x double> @scvtf_v1i32_v1f64(<1 x i32> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <2 x double> @scvtf_v2i32_v2f64(<2 x i32> %op1) #0 {
 ; CHECK-LABEL: scvtf_v2i32_v2f64:
-; CHECK: sshll v0.2d, v0.2s, #0
-; CHECK-NEXT: scvtf v0.2d, v0.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sshll v0.2d, v0.2s, #0
+; CHECK-NEXT:    scvtf v0.2d, v0.2d
+; CHECK-NEXT:    ret
   %res = sitofp <2 x i32> %op1 to <2 x double>
   ret <2 x double> %res
 }
 
 define void @scvtf_v4i32_v4f64(<4 x i32>* %a, <4 x double>* %b) #0 {
 ; CHECK-LABEL: scvtf_v4i32_v4f64:
-; CHECK: ldr q[[OP:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: sunpklo [[UPK:z[0-9]+]].d, z[[OP]].s
-; CHECK-NEXT: scvtf [[RES:z[0-9]+]].d, [[PG]]/m, [[UPK]].d
-; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    sunpklo z0.d, z0.s
+; CHECK-NEXT:    scvtf z0.d, p0/m, z0.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <4 x i32>, <4 x i32>* %a
   %res = sitofp <4 x i32> %op1 to <4 x double>
   store <4 x double> %res, <4 x double>* %b
@@ -1404,29 +1556,41 @@ define void @scvtf_v4i32_v4f64(<4 x i32>* %a, <4 x double>* %b) #0 {
 }
 
 define void @scvtf_v8i32_v8f64(<8 x i32>* %a, <8 x double>* %b) #0 {
-; CHECK-LABEL: scvtf_v8i32_v8f64:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].s, vl8
-; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: sunpklo [[UPK:z[0-9]+]].d, [[OP]].s
-; VBITS_GE_512-NEXT: scvtf [[RES:z[0-9]+]].d, [[PG1]]/m, [[UPK]].d
-; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation - fixed type extract_subvector codegen is poor currently.
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: ld1w { [[VEC:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_EQ_256-DAG: mov x8, sp
-; VBITS_EQ_256-DAG: st1w { [[VEC:z[0-9]+]].s }, [[PG1]], [x8]
-; VBITS_EQ_256-DAG: ldp q[[LO:[0-9]+]], q[[HI:[0-9]+]], [sp]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: sunpklo [[UPK_LO:z[0-9]+]].d, z[[LO]].s
-; VBITS_EQ_256-DAG: sunpklo [[UPK_HI:z[0-9]+]].d, z[[HI]].s
-; VBITS_EQ_256-DAG: scvtf [[RES_LO:z[0-9]+]].d, [[PG2]]/m, [[UPK_LO]].d
-; VBITS_EQ_256-DAG: scvtf [[RES_HI:z[0-9]+]].d, [[PG2]]/m, [[UPK_HI]].d
-; VBITS_EQ_256-DAG: st1d { [[RES_LO]].d }, [[PG2]], [x1]
-; VBITS_EQ_256-DAG: st1d { [[RES_HI]].d }, [[PG2]], [x1, x[[NUMELTS]], lsl #3]
+; VBITS_EQ_256-LABEL: scvtf_v8i32_v8f64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_EQ_256-NEXT:    sub x9, sp, #48
+; VBITS_EQ_256-NEXT:    mov x29, sp
+; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
+; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
+; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    mov x8, sp
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_EQ_256-NEXT:    ldp q0, q1, [sp]
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    sunpklo z0.d, z0.s
+; VBITS_EQ_256-NEXT:    sunpklo z1.d, z1.s
+; VBITS_EQ_256-NEXT:    scvtf z0.d, p0/m, z0.d
+; VBITS_EQ_256-NEXT:    scvtf z1.d, p0/m, z1.d
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_EQ_256-NEXT:    mov sp, x29
+; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: scvtf_v8i32_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    sunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    scvtf z0.d, p0/m, z0.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x i32>, <8 x i32>* %a
   %res = sitofp <8 x i32> %op1 to <8 x double>
   store <8 x double> %res, <8 x double>* %b
@@ -1434,14 +1598,15 @@ define void @scvtf_v8i32_v8f64(<8 x i32>* %a, <8 x double>* %b) #0 {
 }
 
 define void @scvtf_v16i32_v16f64(<16 x i32>* %a, <16 x double>* %b) #0 {
-; CHECK-LABEL: scvtf_v16i32_v16f64:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].s, vl16
-; VBITS_GE_1024-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: sunpklo [[UPK:z[0-9]+]].d, [[OP]].s
-; VBITS_GE_1024-NEXT: scvtf [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK]].d
-; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: scvtf_v16i32_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    sunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    scvtf z0.d, p0/m, z0.d
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x i32>, <16 x i32>* %a
   %res = sitofp <16 x i32> %op1 to <16 x double>
   store <16 x double> %res, <16 x double>* %b
@@ -1449,14 +1614,15 @@ define void @scvtf_v16i32_v16f64(<16 x i32>* %a, <16 x double>* %b) #0 {
 }
 
 define void @scvtf_v32i32_v32f64(<32 x i32>* %a, <32 x double>* %b) #0 {
-; CHECK-LABEL: scvtf_v32i32_v32f64:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: sunpklo [[UPK:z[0-9]+]].d, [[OP]].s
-; VBITS_GE_2048-NEXT: scvtf [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK]].d
-; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: scvtf_v32i32_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    sunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    scvtf z0.d, p0/m, z0.d
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x i32>, <32 x i32>* %a
   %res = sitofp <32 x i32> %op1 to <32 x double>
   store <32 x double> %res, <32 x double>* %b
@@ -1471,9 +1637,11 @@ define void @scvtf_v32i32_v32f64(<32 x i32>* %a, <32 x double>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <1 x half> @scvtf_v1i64_v1f16(<1 x i64> %op1) #0 {
 ; CHECK-LABEL: scvtf_v1i64_v1f16:
-; CHECK: fmov x8, d0
-; CHECK-NEXT: scvtf h0, x8
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    scvtf h0, x8
+; CHECK-NEXT:    ret
   %res = sitofp <1 x i64> %op1 to <1 x half>
   ret <1 x half> %res
 }
@@ -1481,69 +1649,79 @@ define <1 x half> @scvtf_v1i64_v1f16(<1 x i64> %op1) #0 {
 ; v2f16 is not legal for NEON, so use SVE
 define <2 x half> @scvtf_v2i64_v2f16(<2 x i64> %op1) #0 {
 ; CHECK-LABEL: scvtf_v2i64_v2f16:
-; CHECK: ptrue [[PG:p[0-9]+]].d
-; CHECK-NEXT: scvtf [[CVT:z[0-9]+]].h, [[PG]]/m, z0.d
-; CHECK-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; CHECK-NEXT: uzp1 z0.h, [[UZP]].h, [[UZP]].h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    scvtf z0.h, p0/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
   %res = sitofp <2 x i64> %op1 to <2 x half>
   ret <2 x half> %res
 }
 
 define <4 x half> @scvtf_v4i64_v4f16(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: scvtf_v4i64_v4f16:
-; CHECK: ptrue [[PG1:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ptrue [[PG2:p[0-9]+]].d
-; CHECK-NEXT: scvtf [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].d
-; CHECK-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; CHECK-NEXT: uzp1 z0.h, [[UZP]].h, [[UZP]].h
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    scvtf z0.h, p0/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %res = sitofp <4 x i64> %op1 to <4 x half>
   ret <4 x half> %res
 }
 
 define <8 x half> @scvtf_v8i64_v8f16(<8 x i64>* %a) #0 {
-; CHECK-LABEL: scvtf_v8i64_v8f16:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_512-NEXT: scvtf [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].d
-; VBITS_GE_512-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_512-NEXT: uzp1 z0.h, [[UZP]].h, [[UZP]].h
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[LO:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1d { [[HI:z[0-9]+]].d }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].d
-; VBITS_EQ_256-DAG: scvtf [[CVT_LO:z[0-9]+]].h, [[PG2]]/m, [[LO]].d
-; VBITS_EQ_256-DAG: scvtf [[CVT_HI:z[0-9]+]].h, [[PG2]]/m, [[HI]].d
-; VBITS_EQ_256-DAG: uzp1 [[UZP_LO:z[0-9]+]].s, [[CVT_LO]].s, [[CVT_LO]].s
-; VBITS_EQ_256-DAG: uzp1 [[UZP_HI:z[0-9]+]].s, [[CVT_HI]].s, [[CVT_HI]].s
-; VBITS_EQ_256-DAG: uzp1 z[[RES_LO:[0-9]+]].h, [[UZP_LO]].h, [[UZP_LO]].h
-; VBITS_EQ_256-DAG: uzp1 z[[RES_HI:[0-9]+]].h, [[UZP_HI]].h, [[UZP_HI]].h
-; VBITS_EQ_256-NEXT: mov v[[RES_LO]].d[1], v[[RES_HI]].d[0]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: scvtf_v8i64_v8f16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ptrue p0.d
+; VBITS_EQ_256-NEXT:    scvtf z0.h, p0/m, z0.d
+; VBITS_EQ_256-NEXT:    scvtf z1.h, p0/m, z1.d
+; VBITS_EQ_256-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_EQ_256-NEXT:    uzp1 z1.s, z1.s, z1.s
+; VBITS_EQ_256-NEXT:    uzp1 z2.h, z0.h, z0.h
+; VBITS_EQ_256-NEXT:    uzp1 z0.h, z1.h, z1.h
+; VBITS_EQ_256-NEXT:    mov v0.d[1], v2.d[0]
+; VBITS_EQ_256-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: scvtf_v8i64_v8f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d
+; VBITS_GE_512-NEXT:    scvtf z0.h, p0/m, z0.d
+; VBITS_GE_512-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_512-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_512-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x i64>, <8 x i64>* %a
   %res = sitofp <8 x i64> %op1 to <8 x half>
   ret <8 x half> %res
 }
 
 define void @scvtf_v16i64_v16f16(<16 x i64>* %a, <16 x half>* %b) #0 {
-; CHECK-LABEL: scvtf_v16i64_v16f16:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_1024-NEXT: scvtf [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].d
-; VBITS_GE_1024-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_1024-NEXT: uzp1 [[RES:z[0-9]+]].h, [[UZP]].h, [[UZP]].h
-; VBITS_GE_1024-NEXT: ptrue [[PG3:p[0-9]+]].h, vl16
-; VBITS_GE_1024-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: scvtf_v16i64_v16f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d
+; VBITS_GE_1024-NEXT:    scvtf z0.h, p0/m, z0.d
+; VBITS_GE_1024-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_1024-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x i64>, <16 x i64>* %a
   %res = sitofp <16 x i64> %op1 to <16 x half>
   store <16 x half> %res, <16 x half>* %b
@@ -1551,16 +1729,17 @@ define void @scvtf_v16i64_v16f16(<16 x i64>* %a, <16 x half>* %b) #0 {
 }
 
 define void @scvtf_v32i64_v32f16(<32 x i64>* %a, <32 x half>* %b) #0 {
-; CHECK-LABEL: scvtf_v32i64_v32f16:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_2048-NEXT: scvtf [[CVT:z[0-9]+]].h, [[PG2]]/m, [[OP]].d
-; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_2048-NEXT: uzp1 [[RES:z[0-9]+]].h, [[UZP]].h, [[UZP]].h
-; VBITS_GE_2048-NEXT: ptrue [[PG3:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: scvtf_v32i64_v32f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.d
+; VBITS_GE_2048-NEXT:    scvtf z0.h, p0/m, z0.d
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x i64>, <32 x i64>* %a
   %res = sitofp <32 x i64> %op1 to <32 x half>
   store <32 x half> %res, <32 x half>* %b
@@ -1574,9 +1753,11 @@ define void @scvtf_v32i64_v32f16(<32 x i64>* %a, <32 x half>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <1 x float> @scvtf_v1i64_v1f32(<1 x i64> %op1) #0 {
 ; CHECK-LABEL: scvtf_v1i64_v1f32:
-; CHECK: scvtf v0.2d, v0.2d
-; CHECK-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    scvtf v0.2d, v0.2d
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    ret
   %res = sitofp <1 x i64> %op1 to <1 x float>
   ret <1 x float> %res
 }
@@ -1584,52 +1765,57 @@ define <1 x float> @scvtf_v1i64_v1f32(<1 x i64> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <2 x float> @scvtf_v2i64_v2f32(<2 x i64> %op1) #0 {
 ; CHECK-LABEL: scvtf_v2i64_v2f32:
-; CHECK: scvtf v0.2d, v0.2d
-; CHECK-NEXT: fcvtn v0.2s, v0.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    scvtf v0.2d, v0.2d
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    ret
   %res = sitofp <2 x i64> %op1 to <2 x float>
   ret <2 x float> %res
 }
 
 define <4 x float> @scvtf_v4i64_v4f32(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: scvtf_v4i64_v4f32:
-; CHECK: ptrue [[PG1:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ptrue [[PG2:p[0-9]+]].d
-; CHECK-NEXT: scvtf [[CVT:z[0-9]+]].s, [[PG2]]/m, [[OP]].d
-; CHECK-NEXT: uzp1 z0.s, [[CVT]].s, [[CVT]].s
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    scvtf z0.s, p0/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %res = sitofp <4 x i64> %op1 to <4 x float>
   ret <4 x float> %res
 }
 
 define void @scvtf_v8i64_v8f32(<8 x i64>* %a, <8 x float>* %b) #0 {
-; CHECK-LABEL: scvtf_v8i64_v8f32:
-; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_512-NEXT: scvtf [[CVT:z[0-9]+]].s, [[PG2]]/m, [[OP]].d
-; VBITS_GE_512-NEXT: uzp1 [[RES:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_512-NEXT: ptrue [[PG3:p[0-9]+]].s, vl8
-; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG3]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[LO:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1d { [[HI:z[0-9]+]].d }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].d
-; VBITS_EQ_256-DAG: ptrue [[PG3:p[0-9]+]].s, vl4
-; VBITS_EQ_256-DAG: scvtf [[CVT_LO:z[0-9]+]].s, [[PG2]]/m, [[LO]].d
-; VBITS_EQ_256-DAG: scvtf [[CVT_HI:z[0-9]+]].s, [[PG2]]/m, [[HI]].d
-; VBITS_EQ_256-DAG: uzp1 [[RES_LO:z[0-9]+]].s, [[CVT_LO]].s, [[CVT_LO]].s
-; VBITS_EQ_256-DAG: uzp1 [[RES_HI:z[0-9]+]].s, [[CVT_HI]].s, [[CVT_HI]].s
-; VBITS_EQ_256-DAG: splice [[RES:z[0-9]+]].s, [[PG3]], [[RES_LO]].s, [[RES_HI]].s
-; VBITS_EQ_256-DAG: ptrue [[PG4:p[0-9]+]].s, vl8
-; VBITS_EQ_256-NEXT: st1w { [[RES]].s }, [[PG4]], [x1]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: scvtf_v8i64_v8f32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ptrue p0.d
+; VBITS_EQ_256-NEXT:    ptrue p1.s, vl4
+; VBITS_EQ_256-NEXT:    scvtf z0.s, p0/m, z0.d
+; VBITS_EQ_256-NEXT:    scvtf z1.s, p0/m, z1.d
+; VBITS_EQ_256-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_EQ_256-NEXT:    uzp1 z1.s, z1.s, z1.s
+; VBITS_EQ_256-NEXT:    splice z1.s, p1, z1.s, z0.s
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: scvtf_v8i64_v8f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d
+; VBITS_GE_512-NEXT:    scvtf z0.s, p0/m, z0.d
+; VBITS_GE_512-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x i64>, <8 x i64>* %a
   %res = sitofp <8 x i64> %op1 to <8 x float>
   store <8 x float> %res, <8 x float>* %b
@@ -1637,15 +1823,16 @@ define void @scvtf_v8i64_v8f32(<8 x i64>* %a, <8 x float>* %b) #0 {
 }
 
 define void @scvtf_v16i64_v16f32(<16 x i64>* %a, <16 x float>* %b) #0 {
-; CHECK-LABEL: scvtf_v16i64_v16f32:
-; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_1024-NEXT: scvtf [[CVT:z[0-9]+]].s, [[PG2]]/m, [[OP]].d
-; VBITS_GE_1024-NEXT: uzp1 [[RES:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_1024-NEXT: ptrue [[PG3:p[0-9]+]].s, vl16
-; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG3]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: scvtf_v16i64_v16f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d
+; VBITS_GE_1024-NEXT:    scvtf z0.s, p0/m, z0.d
+; VBITS_GE_1024-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x i64>, <16 x i64>* %a
   %res = sitofp <16 x i64> %op1 to <16 x float>
   store <16 x float> %res, <16 x float>* %b
@@ -1653,15 +1840,16 @@ define void @scvtf_v16i64_v16f32(<16 x i64>* %a, <16 x float>* %b) #0 {
 }
 
 define void @scvtf_v32i64_v32f32(<32 x i64>* %a, <32 x float>* %b) #0 {
-; CHECK-LABEL: scvtf_v32i64_v32f32:
-; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d
-; VBITS_GE_2048-NEXT: scvtf [[CVT:z[0-9]+]].s, [[PG2]]/m, [[OP]].d
-; VBITS_GE_2048-NEXT: uzp1 [[RES:z[0-9]+]].s, [[CVT]].s, [[CVT]].s
-; VBITS_GE_2048-NEXT: ptrue [[PG3:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG3]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: scvtf_v32i64_v32f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.d
+; VBITS_GE_2048-NEXT:    scvtf z0.s, p0/m, z0.d
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x i64>, <32 x i64>* %a
   %res = sitofp <32 x i64> %op1 to <32 x float>
   store <32 x float> %res, <32 x float>* %b
@@ -1675,9 +1863,11 @@ define void @scvtf_v32i64_v32f32(<32 x i64>* %a, <32 x float>* %b) #0 {
 ; Don't use SVE for 64-bit vectors.
 define <1 x double> @scvtf_v1i64_v1f64(<1 x i64> %op1) #0 {
 ; CHECK-LABEL: scvtf_v1i64_v1f64:
-; CHECK: fmov x8, d0
-; CHECK-NEXT: scvtf d0, x8
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    scvtf d0, x8
+; CHECK-NEXT:    ret
   %res = sitofp <1 x i64> %op1 to <1 x double>
   ret <1 x double> %res
 }
@@ -1685,19 +1875,21 @@ define <1 x double> @scvtf_v1i64_v1f64(<1 x i64> %op1) #0 {
 ; Don't use SVE for 128-bit vectors.
 define <2 x double> @scvtf_v2i64_v2f64(<2 x i64> %op1) #0 {
 ; CHECK-LABEL: scvtf_v2i64_v2f64:
-; CHECK: scvtf v0.2d, v0.2d
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    scvtf v0.2d, v0.2d
+; CHECK-NEXT:    ret
   %res = sitofp <2 x i64> %op1 to <2 x double>
   ret <2 x double> %res
 }
 
 define void @scvtf_v4i64_v4f64(<4 x i64>* %a, <4 x double>* %b) #0 {
 ; CHECK-LABEL: scvtf_v4i64_v4f64:
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; CHECK-NEXT: scvtf [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
-; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    scvtf z0.d, p0/m, z0.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
+; CHECK-NEXT:    ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %res = sitofp <4 x i64> %op1 to <4 x double>
   store <4 x double> %res, <4 x double>* %b
@@ -1705,23 +1897,25 @@ define void @scvtf_v4i64_v4f64(<4 x i64>* %a, <4 x double>* %b) #0 {
 }
 
 define void @scvtf_v8i64_v8f64(<8 x i64>* %a, <8 x double>* %b) #0 {
-; CHECK-LABEL: scvtf_v8i64_v8f64:
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: scvtf [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
-; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; VBITS_GE_512-NEXT: ret
-
-; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[LO:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1d { [[HI:z[0-9]+]].d }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: scvtf [[RES_LO:z[0-9]+]].d, [[PG]]/m, [[LO]].d
-; VBITS_EQ_256-DAG: scvtf [[RES_HI:z[0-9]+]].d, [[PG]]/m, [[HI]].d
-; VBITS_EQ_256-DAG: st1d { [[RES_LO]].d }, [[PG]], [x1]
-; VBITS_EQ_256-DAG: st1d { [[RES_HI]].d }, [[PG]], [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: scvtf_v8i64_v8f64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    scvtf z0.d, p0/m, z0.d
+; VBITS_EQ_256-NEXT:    scvtf z1.d, p0/m, z1.d
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: scvtf_v8i64_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    scvtf z0.d, p0/m, z0.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x i64>, <8 x i64>* %a
   %res = sitofp <8 x i64> %op1 to <8 x double>
   store <8 x double> %res, <8 x double>* %b
@@ -1729,12 +1923,13 @@ define void @scvtf_v8i64_v8f64(<8 x i64>* %a, <8 x double>* %b) #0 {
 }
 
 define void @scvtf_v16i64_v16f64(<16 x i64>* %a, <16 x double>* %b) #0 {
-; CHECK-LABEL: scvtf_v16i64_v16f64:
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: scvtf [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
-; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: scvtf_v16i64_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    scvtf z0.d, p0/m, z0.d
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x i64>, <16 x i64>* %a
   %res = sitofp <16 x i64> %op1 to <16 x double>
   store <16 x double> %res, <16 x double>* %b
@@ -1742,12 +1937,13 @@ define void @scvtf_v16i64_v16f64(<16 x i64>* %a, <16 x double>* %b) #0 {
 }
 
 define void @scvtf_v32i64_v32f64(<32 x i64>* %a, <32 x double>* %b) #0 {
-; CHECK-LABEL: scvtf_v32i64_v32f64:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: scvtf [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
-; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG]], [x1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: scvtf_v32i64_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    scvtf z0.d, p0/m, z0.d
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x i64>, <32 x i64>* %a
   %res = sitofp <32 x i64> %op1 to <32 x double>
   store <32 x double> %res, <32 x double>* %b

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
index 4b5a8a81a8c42..f5ef3ea541cfa 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-vselect.ll
@@ -1,19 +1,19 @@
-; RUN: llc -aarch64-sve-vector-bits-min=128  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=16 -check-prefix=NO_SVE
-; RUN: llc -aarch64-sve-vector-bits-min=256  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=32
-; RUN: llc -aarch64-sve-vector-bits-min=384  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=32
-; RUN: llc -aarch64-sve-vector-bits-min=512  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=640  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=768  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=896  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
+; RUN: llc -aarch64-sve-vector-bits-min=128  < %s | FileCheck %s -D#VBYTES=16 -check-prefix=NO_SVE
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -D#VBYTES=32
+; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -D#VBYTES=32
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -22,34 +22,116 @@ target triple = "aarch64-unknown-linux-gnu"
 
 ; Don't use SVE for 64-bit vectors.
 define <8 x i8> @select_v8i8(<8 x i8> %op1, <8 x i8> %op2, <8 x i1> %mask) #0 {
-; CHECK: select_v8i8:
-; CHECK: bif v0.8b, v1.8b, v2.8b
-; CHECK: ret
+; CHECK-LABEL: select_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shl v2.8b, v2.8b, #7
+; CHECK-NEXT:    sshr v2.8b, v2.8b, #7
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    ret
   %sel = select <8 x i1> %mask, <8 x i8> %op1, <8 x i8> %op2
   ret <8 x i8> %sel
 }
 
 ; Don't use SVE for 128-bit vectors.
 define <16 x i8> @select_v16i8(<16 x i8> %op1, <16 x i8> %op2, <16 x i1> %mask) #0 {
-; CHECK: select_v16i8:
-; CHECK: bif v0.16b, v1.16b, v2.16b
-; CHECK: ret
+; CHECK-LABEL: select_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shl v2.16b, v2.16b, #7
+; CHECK-NEXT:    sshr v2.16b, v2.16b, #7
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ret
   %sel = select <16 x i1> %mask, <16 x i8> %op1, <16 x i8> %op2
   ret <16 x i8> %sel
 }
 
 define void @select_v32i8(<32 x i8>* %a, <32 x i8>* %b, <32 x i1>* %c) #0 {
-; CHECK: select_v32i8:
-; CHECK: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,32)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].b
-; CHECK: ld1b { [[MASK:z[0-9]+]].b }, [[PG]]/z, [x9]
-; CHECK-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; CHECK-NEXT: and [[AND:z[0-9]+]].b, [[MASK]].b, #0x1
-; CHECK-NEXT: cmpne [[COND:p[0-9]+]].b, [[PG1]]/z, [[AND]].b, #0
-; CHECK-NEXT: sel [[RES:z[0-9]+]].b, [[COND]], [[OP1]].b, [[OP2]].b
-; CHECK-NEXT: st1b { [[RES]].b }, [[PG]], [x0]
-; CHECK: ret
+; CHECK-LABEL: select_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    sub x9, sp, #48
+; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
+; CHECK-NEXT:    .cfi_def_cfa w29, 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    ldr w8, [x2]
+; CHECK-NEXT:    ptrue p0.b, vl32
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    asr w9, w8, #31
+; CHECK-NEXT:    strb w9, [sp, #31]
+; CHECK-NEXT:    sbfx w9, w8, #30, #1
+; CHECK-NEXT:    strb w9, [sp, #30]
+; CHECK-NEXT:    sbfx w9, w8, #29, #1
+; CHECK-NEXT:    strb w9, [sp, #29]
+; CHECK-NEXT:    sbfx w9, w8, #28, #1
+; CHECK-NEXT:    strb w9, [sp, #28]
+; CHECK-NEXT:    sbfx w9, w8, #27, #1
+; CHECK-NEXT:    strb w9, [sp, #27]
+; CHECK-NEXT:    sbfx w9, w8, #26, #1
+; CHECK-NEXT:    strb w9, [sp, #26]
+; CHECK-NEXT:    sbfx w9, w8, #25, #1
+; CHECK-NEXT:    strb w9, [sp, #25]
+; CHECK-NEXT:    sbfx w9, w8, #24, #1
+; CHECK-NEXT:    strb w9, [sp, #24]
+; CHECK-NEXT:    sbfx w9, w8, #23, #1
+; CHECK-NEXT:    strb w9, [sp, #23]
+; CHECK-NEXT:    sbfx w9, w8, #22, #1
+; CHECK-NEXT:    strb w9, [sp, #22]
+; CHECK-NEXT:    sbfx w9, w8, #21, #1
+; CHECK-NEXT:    strb w9, [sp, #21]
+; CHECK-NEXT:    sbfx w9, w8, #20, #1
+; CHECK-NEXT:    strb w9, [sp, #20]
+; CHECK-NEXT:    sbfx w9, w8, #19, #1
+; CHECK-NEXT:    strb w9, [sp, #19]
+; CHECK-NEXT:    sbfx w9, w8, #18, #1
+; CHECK-NEXT:    strb w9, [sp, #18]
+; CHECK-NEXT:    sbfx w9, w8, #17, #1
+; CHECK-NEXT:    strb w9, [sp, #17]
+; CHECK-NEXT:    sbfx w9, w8, #16, #1
+; CHECK-NEXT:    strb w9, [sp, #16]
+; CHECK-NEXT:    sbfx w9, w8, #15, #1
+; CHECK-NEXT:    strb w9, [sp, #15]
+; CHECK-NEXT:    sbfx w9, w8, #14, #1
+; CHECK-NEXT:    strb w9, [sp, #14]
+; CHECK-NEXT:    sbfx w9, w8, #13, #1
+; CHECK-NEXT:    strb w9, [sp, #13]
+; CHECK-NEXT:    sbfx w9, w8, #12, #1
+; CHECK-NEXT:    strb w9, [sp, #12]
+; CHECK-NEXT:    sbfx w9, w8, #11, #1
+; CHECK-NEXT:    strb w9, [sp, #11]
+; CHECK-NEXT:    sbfx w9, w8, #10, #1
+; CHECK-NEXT:    strb w9, [sp, #10]
+; CHECK-NEXT:    sbfx w9, w8, #9, #1
+; CHECK-NEXT:    strb w9, [sp, #9]
+; CHECK-NEXT:    sbfx w9, w8, #8, #1
+; CHECK-NEXT:    strb w9, [sp, #8]
+; CHECK-NEXT:    sbfx w9, w8, #7, #1
+; CHECK-NEXT:    strb w9, [sp, #7]
+; CHECK-NEXT:    sbfx w9, w8, #6, #1
+; CHECK-NEXT:    strb w9, [sp, #6]
+; CHECK-NEXT:    sbfx w9, w8, #5, #1
+; CHECK-NEXT:    strb w9, [sp, #5]
+; CHECK-NEXT:    sbfx w9, w8, #4, #1
+; CHECK-NEXT:    strb w9, [sp, #4]
+; CHECK-NEXT:    sbfx w9, w8, #3, #1
+; CHECK-NEXT:    strb w9, [sp, #3]
+; CHECK-NEXT:    sbfx w9, w8, #2, #1
+; CHECK-NEXT:    strb w9, [sp, #2]
+; CHECK-NEXT:    sbfx w9, w8, #1, #1
+; CHECK-NEXT:    sbfx w8, w8, #0, #1
+; CHECK-NEXT:    strb w9, [sp, #1]
+; CHECK-NEXT:    mov x9, sp
+; CHECK-NEXT:    strb w8, [sp]
+; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x9]
+; CHECK-NEXT:    ld1b { z1.b }, p0/z, [x0]
+; CHECK-NEXT:    ld1b { z2.b }, p0/z, [x1]
+; CHECK-NEXT:    and z0.b, z0.b, #0x1
+; CHECK-NEXT:    cmpne p1.b, p1/z, z0.b, #0
+; CHECK-NEXT:    sel z0.b, p1, z1.b, z2.b
+; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
+; CHECK-NEXT:    mov sp, x29
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
   %mask = load <32 x i1>, <32 x i1>* %c
   %op1 = load <32 x i8>, <32 x i8>* %a
   %op2 = load <32 x i8>, <32 x i8>* %b
@@ -59,17 +141,157 @@ define void @select_v32i8(<32 x i8>* %a, <32 x i8>* %b, <32 x i1>* %c) #0 {
 }
 
 define void @select_v64i8(<64 x i8>* %a, <64 x i8>* %b, <64 x i1>* %c) #0 {
-; CHECK: select_v64i8:
-; CHECK: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,64)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].b
-; VBITS_GE_512: ld1b { [[MASK:z[0-9]+]].b }, [[PG]]/z, [x9]
-; VBITS_GE_512-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: and [[AND:z[0-9]+]].b, [[MASK]].b, #0x1
-; VBITS_GE_512-NEXT: cmpne [[COND:p[0-9]+]].b, [[PG1]]/z, [[AND]].b, #0
-; VBITS_GE_512-NEXT: sel [[RES:z[0-9]+]].b, [[COND]], [[OP1]].b, [[OP2]].b
-; VBITS_GE_512-NEXT: st1b { [[RES]].b }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_GE_512-LABEL: select_v64i8:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_512-NEXT:    sub x9, sp, #112
+; VBITS_GE_512-NEXT:    mov x29, sp
+; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0
+; VBITS_GE_512-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_512-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_512-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_512-NEXT:    ldr x8, [x2]
+; VBITS_GE_512-NEXT:    ptrue p0.b, vl64
+; VBITS_GE_512-NEXT:    ptrue p1.b
+; VBITS_GE_512-NEXT:    asr x9, x8, #63
+; VBITS_GE_512-NEXT:    strb w9, [sp, #63]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #62, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #62]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #61, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #61]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #60, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #60]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #59, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #59]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #58, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #58]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #57, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #57]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #56, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #56]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #55, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #55]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #54, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #54]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #53, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #53]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #52, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #52]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #51, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #51]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #50, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #50]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #49, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #49]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #48, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #48]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #47, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #47]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #46, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #46]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #45, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #45]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #44, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #44]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #43, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #43]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #42, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #42]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #41, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #41]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #40, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #40]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #39, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #39]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #38, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #38]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #37, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #37]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #36, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #36]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #35, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #35]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #34, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #34]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #33, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #33]
+; VBITS_GE_512-NEXT:    sbfx x9, x8, #32, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #32]
+; VBITS_GE_512-NEXT:    asr w9, w8, #31
+; VBITS_GE_512-NEXT:    strb w9, [sp, #31]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #30, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #30]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #29, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #29]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #28, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #28]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #27, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #27]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #26, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #26]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #25, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #25]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #24, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #24]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #23, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #23]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #22, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #22]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #21, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #21]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #20, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #20]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #19, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #19]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #18, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #18]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #17, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #17]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #16, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #16]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #15, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #15]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #14, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #14]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #13, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #13]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #12, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #12]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #11, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #11]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #10, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #10]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #9, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #9]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #8, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #8]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #7, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #7]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #6, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #6]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #5, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #5]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #4, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #4]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #3, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #3]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #2, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #2]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #1, #1
+; VBITS_GE_512-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_512-NEXT:    strb w9, [sp, #1]
+; VBITS_GE_512-NEXT:    mov x9, sp
+; VBITS_GE_512-NEXT:    strb w8, [sp]
+; VBITS_GE_512-NEXT:    ld1b { z0.b }, p0/z, [x9]
+; VBITS_GE_512-NEXT:    ld1b { z1.b }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1b { z2.b }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    and z0.b, z0.b, #0x1
+; VBITS_GE_512-NEXT:    cmpne p1.b, p1/z, z0.b, #0
+; VBITS_GE_512-NEXT:    sel z0.b, p1, z1.b, z2.b
+; VBITS_GE_512-NEXT:    st1b { z0.b }, p0, [x0]
+; VBITS_GE_512-NEXT:    mov sp, x29
+; VBITS_GE_512-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_512-NEXT:    ret
   %mask = load <64 x i1>, <64 x i1>* %c
   %op1 = load <64 x i8>, <64 x i8>* %a
   %op2 = load <64 x i8>, <64 x i8>* %b
@@ -79,17 +301,286 @@ define void @select_v64i8(<64 x i8>* %a, <64 x i8>* %b, <64 x i1>* %c) #0 {
 }
 
 define void @select_v128i8(<128 x i8>* %a, <128 x i8>* %b, <128 x i1>* %c) #0 {
-; CHECK: select_v128i8:
-; CHECK: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,128)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].b
-; VBITS_GE_1024: ld1b { [[MASK:z[0-9]+]].b }, [[PG]]/z, [x9]
-; VBITS_GE_1024-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: and [[AND:z[0-9]+]].b, [[MASK]].b, #0x1
-; VBITS_GE_1024-NEXT: cmpne [[COND:p[0-9]+]].b, [[PG1]]/z, [[AND]].b, #0
-; VBITS_GE_1024-NEXT: sel [[RES:z[0-9]+]].b, [[COND]], [[OP1]].b, [[OP2]].b
-; VBITS_GE_1024-NEXT: st1b { [[RES]].b }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_GE_1024-LABEL: select_v128i8:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_1024-NEXT:    sub x9, sp, #240
+; VBITS_GE_1024-NEXT:    mov x29, sp
+; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80
+; VBITS_GE_1024-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_1024-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_1024-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_1024-NEXT:    ldr x8, [x2, #8]
+; VBITS_GE_1024-NEXT:    ptrue p0.b, vl128
+; VBITS_GE_1024-NEXT:    ptrue p1.b
+; VBITS_GE_1024-NEXT:    asr x9, x8, #63
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #127]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #62, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #126]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #61, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #125]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #60, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #124]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #59, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #123]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #58, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #122]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #57, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #121]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #56, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #120]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #55, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #119]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #54, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #118]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #53, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #117]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #52, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #116]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #51, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #115]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #50, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #114]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #49, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #113]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #48, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #112]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #47, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #111]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #46, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #110]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #45, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #109]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #44, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #108]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #43, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #107]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #42, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #106]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #41, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #105]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #40, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #104]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #39, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #103]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #38, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #102]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #37, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #101]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #36, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #100]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #35, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #99]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #34, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #98]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #33, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #97]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #32, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #96]
+; VBITS_GE_1024-NEXT:    asr w9, w8, #31
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #95]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #30, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #94]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #29, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #93]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #28, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #92]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #27, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #91]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #26, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #90]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #25, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #89]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #24, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #88]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #23, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #87]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #22, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #86]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #21, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #85]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #20, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #84]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #19, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #83]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #18, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #82]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #17, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #81]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #16, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #80]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #15, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #79]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #14, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #78]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #13, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #77]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #12, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #76]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #11, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #75]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #10, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #74]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #9, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #73]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #8, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #72]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #7, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #71]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #6, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #70]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #5, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #69]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #4, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #68]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #3, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #67]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #2, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #66]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #1, #1
+; VBITS_GE_1024-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_1024-NEXT:    strb w9, [sp, #65]
+; VBITS_GE_1024-NEXT:    strb w8, [sp, #64]
+; VBITS_GE_1024-NEXT:    ldr x8, [x2]
+; VBITS_GE_1024-NEXT:    mov x9, sp
+; VBITS_GE_1024-NEXT:    asr x10, x8, #63
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #63]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #62, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #62]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #61, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #61]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #60, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #60]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #59, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #59]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #58, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #58]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #57, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #57]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #56, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #56]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #55, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #55]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #54, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #54]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #53, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #53]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #52, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #52]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #51, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #51]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #50, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #50]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #49, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #49]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #48, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #48]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #47, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #47]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #46, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #46]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #45, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #45]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #44, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #44]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #43, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #43]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #42, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #42]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #41, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #41]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #40, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #40]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #39, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #39]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #38, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #38]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #37, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #37]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #36, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #36]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #35, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #35]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #34, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #34]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #33, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #33]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #32, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #32]
+; VBITS_GE_1024-NEXT:    asr w10, w8, #31
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #31]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #30, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #30]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #29, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #29]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #28, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #28]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #27, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #27]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #26, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #26]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #25, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #25]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #24, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #24]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #23, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #23]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #22, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #22]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #21, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #21]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #20, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #20]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #19, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #19]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #18, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #18]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #17, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #17]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #16, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #16]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #15, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #15]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #14, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #14]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #13, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #13]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #12, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #12]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #11, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #11]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #10, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #10]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #9, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #9]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #8, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #8]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #7, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #7]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #6, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #6]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #5, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #5]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #4, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #4]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #3, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #3]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #2, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #2]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #1, #1
+; VBITS_GE_1024-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_1024-NEXT:    strb w10, [sp, #1]
+; VBITS_GE_1024-NEXT:    strb w8, [sp]
+; VBITS_GE_1024-NEXT:    ld1b { z0.b }, p0/z, [x9]
+; VBITS_GE_1024-NEXT:    ld1b { z1.b }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1b { z2.b }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    and z0.b, z0.b, #0x1
+; VBITS_GE_1024-NEXT:    cmpne p1.b, p1/z, z0.b, #0
+; VBITS_GE_1024-NEXT:    sel z0.b, p1, z1.b, z2.b
+; VBITS_GE_1024-NEXT:    st1b { z0.b }, p0, [x0]
+; VBITS_GE_1024-NEXT:    mov sp, x29
+; VBITS_GE_1024-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_1024-NEXT:    ret
   %mask = load <128 x i1>, <128 x i1>* %c
   %op1 = load <128 x i8>, <128 x i8>* %a
   %op2 = load <128 x i8>, <128 x i8>* %b
@@ -99,17 +590,544 @@ define void @select_v128i8(<128 x i8>* %a, <128 x i8>* %b, <128 x i1>* %c) #0 {
 }
 
 define void @select_v256i8(<256 x i8>* %a, <256 x i8>* %b, <256 x i1>* %c) #0 {
-; CHECK: select_v256i8:
-; CHECK: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,256)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].b
-; VBITS_GE_2048: ld1b { [[MASK:z[0-9]+]].b }, [[PG]]/z, [x9]
-; VBITS_GE_2048-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: and [[AND:z[0-9]+]].b, [[MASK]].b, #0x1
-; VBITS_GE_2048-NEXT: cmpne [[COND:p[0-9]+]].b, [[PG1]]/z, [[AND]].b, #0
-; VBITS_GE_2048-NEXT: sel [[RES:z[0-9]+]].b, [[COND]], [[OP1]].b, [[OP2]].b
-; VBITS_GE_2048-NEXT: st1b { [[RES]].b }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: select_v256i8:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    sub x9, sp, #496
+; VBITS_GE_2048-NEXT:    mov x29, sp
+; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00
+; VBITS_GE_2048-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_2048-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_2048-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_2048-NEXT:    ldr x8, [x2, #24]
+; VBITS_GE_2048-NEXT:    ptrue p0.b, vl256
+; VBITS_GE_2048-NEXT:    ptrue p1.b
+; VBITS_GE_2048-NEXT:    asr x9, x8, #63
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #255]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #62, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #254]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #61, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #253]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #60, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #252]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #59, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #251]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #58, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #250]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #57, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #249]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #56, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #248]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #55, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #247]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #54, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #246]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #53, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #245]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #52, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #244]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #51, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #243]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #50, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #242]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #49, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #241]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #48, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #240]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #47, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #239]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #46, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #238]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #45, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #237]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #44, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #236]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #43, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #235]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #42, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #234]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #41, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #233]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #40, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #232]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #39, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #231]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #38, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #230]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #37, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #229]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #36, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #228]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #35, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #227]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #34, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #226]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #33, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #225]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #32, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #224]
+; VBITS_GE_2048-NEXT:    asr w9, w8, #31
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #223]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #30, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #222]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #29, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #221]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #28, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #220]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #27, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #219]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #26, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #218]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #25, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #217]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #24, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #216]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #23, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #215]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #22, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #214]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #21, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #213]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #20, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #212]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #19, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #211]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #18, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #210]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #17, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #209]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #16, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #208]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #15, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #207]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #14, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #206]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #13, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #205]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #12, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #204]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #11, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #203]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #10, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #202]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #9, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #201]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #8, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #200]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #7, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #199]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #6, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #198]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #5, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #197]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #4, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #196]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #3, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #195]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #2, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #194]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #1, #1
+; VBITS_GE_2048-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #193]
+; VBITS_GE_2048-NEXT:    strb w8, [sp, #192]
+; VBITS_GE_2048-NEXT:    ldr x8, [x2, #16]
+; VBITS_GE_2048-NEXT:    asr x9, x8, #63
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #191]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #62, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #190]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #61, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #189]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #60, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #188]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #59, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #187]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #58, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #186]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #57, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #185]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #56, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #184]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #55, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #183]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #54, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #182]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #53, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #181]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #52, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #180]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #51, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #179]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #50, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #178]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #49, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #177]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #48, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #176]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #47, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #175]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #46, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #174]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #45, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #173]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #44, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #172]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #43, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #171]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #42, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #170]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #41, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #169]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #40, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #168]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #39, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #167]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #38, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #166]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #37, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #165]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #36, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #164]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #35, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #163]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #34, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #162]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #33, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #161]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #32, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #160]
+; VBITS_GE_2048-NEXT:    asr w9, w8, #31
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #159]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #30, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #158]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #29, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #157]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #28, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #156]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #27, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #155]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #26, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #154]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #25, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #153]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #24, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #152]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #23, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #151]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #22, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #150]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #21, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #149]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #20, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #148]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #19, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #147]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #18, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #146]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #17, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #145]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #16, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #144]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #15, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #143]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #14, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #142]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #13, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #141]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #12, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #140]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #11, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #139]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #10, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #138]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #9, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #137]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #8, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #136]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #7, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #135]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #6, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #134]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #5, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #133]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #4, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #132]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #3, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #131]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #2, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #130]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #1, #1
+; VBITS_GE_2048-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #129]
+; VBITS_GE_2048-NEXT:    strb w8, [sp, #128]
+; VBITS_GE_2048-NEXT:    ldr x8, [x2, #8]
+; VBITS_GE_2048-NEXT:    asr x9, x8, #63
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #127]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #62, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #126]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #61, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #125]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #60, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #124]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #59, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #123]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #58, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #122]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #57, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #121]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #56, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #120]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #55, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #119]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #54, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #118]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #53, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #117]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #52, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #116]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #51, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #115]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #50, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #114]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #49, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #113]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #48, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #112]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #47, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #111]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #46, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #110]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #45, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #109]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #44, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #108]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #43, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #107]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #42, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #106]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #41, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #105]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #40, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #104]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #39, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #103]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #38, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #102]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #37, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #101]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #36, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #100]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #35, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #99]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #34, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #98]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #33, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #97]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #32, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #96]
+; VBITS_GE_2048-NEXT:    asr w9, w8, #31
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #95]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #30, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #94]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #29, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #93]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #28, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #92]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #27, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #91]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #26, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #90]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #25, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #89]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #24, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #88]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #23, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #87]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #22, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #86]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #21, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #85]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #20, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #84]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #19, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #83]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #18, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #82]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #17, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #81]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #16, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #80]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #15, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #79]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #14, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #78]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #13, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #77]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #12, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #76]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #11, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #75]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #10, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #74]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #9, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #73]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #8, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #72]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #7, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #71]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #6, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #70]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #5, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #69]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #4, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #68]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #3, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #67]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #2, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #66]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #1, #1
+; VBITS_GE_2048-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_2048-NEXT:    strb w9, [sp, #65]
+; VBITS_GE_2048-NEXT:    strb w8, [sp, #64]
+; VBITS_GE_2048-NEXT:    ldr x8, [x2]
+; VBITS_GE_2048-NEXT:    mov x9, sp
+; VBITS_GE_2048-NEXT:    asr x10, x8, #63
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #63]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #62, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #62]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #61, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #61]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #60, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #60]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #59, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #59]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #58, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #58]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #57, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #57]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #56, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #56]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #55, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #55]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #54, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #54]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #53, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #53]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #52, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #52]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #51, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #51]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #50, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #50]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #49, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #49]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #48, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #48]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #47, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #47]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #46, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #46]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #45, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #45]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #44, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #44]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #43, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #43]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #42, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #42]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #41, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #41]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #40, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #40]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #39, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #39]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #38, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #38]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #37, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #37]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #36, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #36]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #35, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #35]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #34, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #34]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #33, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #33]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #32, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #32]
+; VBITS_GE_2048-NEXT:    asr w10, w8, #31
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #31]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #30, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #30]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #29, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #29]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #28, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #28]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #27, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #27]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #26, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #26]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #25, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #25]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #24, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #24]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #23, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #23]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #22, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #22]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #21, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #21]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #20, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #20]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #19, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #19]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #18, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #18]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #17, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #17]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #16, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #16]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #15, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #15]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #14, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #14]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #13, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #13]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #12, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #12]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #11, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #11]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #10, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #10]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #9, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #9]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #8, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #8]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #7, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #7]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #6, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #6]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #5, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #5]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #4, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #4]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #3, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #3]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #2, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #2]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #1, #1
+; VBITS_GE_2048-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_2048-NEXT:    strb w10, [sp, #1]
+; VBITS_GE_2048-NEXT:    strb w8, [sp]
+; VBITS_GE_2048-NEXT:    ld1b { z0.b }, p0/z, [x9]
+; VBITS_GE_2048-NEXT:    ld1b { z1.b }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1b { z2.b }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    and z0.b, z0.b, #0x1
+; VBITS_GE_2048-NEXT:    cmpne p1.b, p1/z, z0.b, #0
+; VBITS_GE_2048-NEXT:    sel z0.b, p1, z1.b, z2.b
+; VBITS_GE_2048-NEXT:    st1b { z0.b }, p0, [x0]
+; VBITS_GE_2048-NEXT:    mov sp, x29
+; VBITS_GE_2048-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ret
   %mask = load <256 x i1>, <256 x i1>* %c
   %op1 = load <256 x i8>, <256 x i8>* %a
   %op2 = load <256 x i8>, <256 x i8>* %b
@@ -120,34 +1138,85 @@ define void @select_v256i8(<256 x i8>* %a, <256 x i8>* %b, <256 x i1>* %c) #0 {
 
 ; Don't use SVE for 64-bit vectors.
 define <4 x i16> @select_v4i16(<4 x i16> %op1, <4 x i16> %op2, <4 x i1> %mask) #0 {
-; CHECK: select_v4i16:
-; CHECK: bif v0.8b, v1.8b, v2.8b
-; CHECK: ret
+; CHECK-LABEL: select_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shl v2.4h, v2.4h, #15
+; CHECK-NEXT:    sshr v2.4h, v2.4h, #15
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    ret
   %sel = select <4 x i1> %mask, <4 x i16> %op1, <4 x i16> %op2
   ret <4 x i16> %sel
 }
 
 ; Don't use SVE for 128-bit vectors.
 define <8 x i16> @select_v8i16(<8 x i16> %op1, <8 x i16> %op2, <8 x i1> %mask) #0 {
-; CHECK: select_v8i16:
-; CHECK: bif v0.16b, v1.16b, v2.16b
-; CHECK: ret
+; CHECK-LABEL: select_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ushll v2.8h, v2.8b, #0
+; CHECK-NEXT:    shl v2.8h, v2.8h, #15
+; CHECK-NEXT:    sshr v2.8h, v2.8h, #15
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ret
   %sel = select <8 x i1> %mask, <8 x i16> %op1, <8 x i16> %op2
   ret <8 x i16> %sel
 }
 
 define void @select_v16i16(<16 x i16>* %a, <16 x i16>* %b, <16 x i1>* %c) #0 {
-; CHECK: select_v16i16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),16)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].h
-; CHECK: ld1h { [[MASK:z[0-9]+]].h }, [[PG]]/z, [x9]
-; CHECK-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: and [[AND:z[0-9]+]].h, [[MASK]].h, #0x1
-; CHECK-NEXT: cmpne [[COND:p[0-9]+]].h, [[PG1]]/z, [[AND]].h, #0
-; CHECK-NEXT: sel [[RES:z[0-9]+]].h, [[COND]], [[OP1]].h, [[OP2]].h
-; CHECK-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
-; CHECK: ret
+; CHECK-LABEL: select_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    sub x9, sp, #48
+; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
+; CHECK-NEXT:    .cfi_def_cfa w29, 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    ldrh w8, [x2]
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ptrue p1.h
+; CHECK-NEXT:    sbfx w9, w8, #15, #1
+; CHECK-NEXT:    strh w9, [sp, #30]
+; CHECK-NEXT:    sbfx w9, w8, #14, #1
+; CHECK-NEXT:    strh w9, [sp, #28]
+; CHECK-NEXT:    sbfx w9, w8, #13, #1
+; CHECK-NEXT:    strh w9, [sp, #26]
+; CHECK-NEXT:    sbfx w9, w8, #12, #1
+; CHECK-NEXT:    strh w9, [sp, #24]
+; CHECK-NEXT:    sbfx w9, w8, #11, #1
+; CHECK-NEXT:    strh w9, [sp, #22]
+; CHECK-NEXT:    sbfx w9, w8, #10, #1
+; CHECK-NEXT:    strh w9, [sp, #20]
+; CHECK-NEXT:    sbfx w9, w8, #9, #1
+; CHECK-NEXT:    strh w9, [sp, #18]
+; CHECK-NEXT:    sbfx w9, w8, #8, #1
+; CHECK-NEXT:    strh w9, [sp, #16]
+; CHECK-NEXT:    sbfx w9, w8, #7, #1
+; CHECK-NEXT:    strh w9, [sp, #14]
+; CHECK-NEXT:    sbfx w9, w8, #6, #1
+; CHECK-NEXT:    strh w9, [sp, #12]
+; CHECK-NEXT:    sbfx w9, w8, #5, #1
+; CHECK-NEXT:    strh w9, [sp, #10]
+; CHECK-NEXT:    sbfx w9, w8, #4, #1
+; CHECK-NEXT:    strh w9, [sp, #8]
+; CHECK-NEXT:    sbfx w9, w8, #3, #1
+; CHECK-NEXT:    strh w9, [sp, #6]
+; CHECK-NEXT:    sbfx w9, w8, #2, #1
+; CHECK-NEXT:    strh w9, [sp, #4]
+; CHECK-NEXT:    sbfx w9, w8, #1, #1
+; CHECK-NEXT:    sbfx w8, w8, #0, #1
+; CHECK-NEXT:    strh w9, [sp, #2]
+; CHECK-NEXT:    mov x9, sp
+; CHECK-NEXT:    strh w8, [sp]
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x9]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x1]
+; CHECK-NEXT:    and z0.h, z0.h, #0x1
+; CHECK-NEXT:    cmpne p1.h, p1/z, z0.h, #0
+; CHECK-NEXT:    sel z0.h, p1, z1.h, z2.h
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    mov sp, x29
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
   %mask = load <16 x i1>, <16 x i1>* %c
   %op1 = load <16 x i16>, <16 x i16>* %a
   %op2 = load <16 x i16>, <16 x i16>* %b
@@ -157,17 +1226,93 @@ define void @select_v16i16(<16 x i16>* %a, <16 x i16>* %b, <16 x i1>* %c) #0 {
 }
 
 define void @select_v32i16(<32 x i16>* %a, <32 x i16>* %b, <32 x i1>* %c) #0 {
-; CHECK: select_v32i16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),32)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].h
-; VBITS_GE_512: ld1h { [[MASK:z[0-9]+]].h }, [[PG]]/z, [x9]
-; VBITS_GE_512-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: and [[AND:z[0-9]+]].h, [[MASK]].h, #0x1
-; VBITS_GE_512-NEXT: cmpne [[COND:p[0-9]+]].h, [[PG1]]/z, [[AND]].h, #0
-; VBITS_GE_512-NEXT: sel [[RES:z[0-9]+]].h, [[COND]], [[OP1]].h, [[OP2]].h
-; VBITS_GE_512-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_GE_512-LABEL: select_v32i16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_512-NEXT:    sub x9, sp, #112
+; VBITS_GE_512-NEXT:    mov x29, sp
+; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0
+; VBITS_GE_512-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_512-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_512-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_512-NEXT:    ldr w8, [x2]
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ptrue p1.h
+; VBITS_GE_512-NEXT:    asr w9, w8, #31
+; VBITS_GE_512-NEXT:    strh w9, [sp, #62]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #30, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #60]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #29, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #58]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #28, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #56]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #27, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #54]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #26, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #52]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #25, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #50]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #24, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #48]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #23, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #46]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #22, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #44]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #21, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #42]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #20, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #40]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #19, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #38]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #18, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #36]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #17, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #34]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #16, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #32]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #15, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #30]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #14, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #28]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #13, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #26]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #12, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #24]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #11, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #22]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #10, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #20]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #9, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #18]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #8, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #16]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #7, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #14]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #6, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #12]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #5, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #10]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #4, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #8]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #3, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #6]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #2, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #4]
+; VBITS_GE_512-NEXT:    sbfx w9, w8, #1, #1
+; VBITS_GE_512-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_512-NEXT:    strh w9, [sp, #2]
+; VBITS_GE_512-NEXT:    mov x9, sp
+; VBITS_GE_512-NEXT:    strh w8, [sp]
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x9]
+; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1h { z2.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    and z0.h, z0.h, #0x1
+; VBITS_GE_512-NEXT:    cmpne p1.h, p1/z, z0.h, #0
+; VBITS_GE_512-NEXT:    sel z0.h, p1, z1.h, z2.h
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_512-NEXT:    mov sp, x29
+; VBITS_GE_512-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_512-NEXT:    ret
   %mask = load <32 x i1>, <32 x i1>* %c
   %op1 = load <32 x i16>, <32 x i16>* %a
   %op2 = load <32 x i16>, <32 x i16>* %b
@@ -177,17 +1322,157 @@ define void @select_v32i16(<32 x i16>* %a, <32 x i16>* %b, <32 x i1>* %c) #0 {
 }
 
 define void @select_v64i16(<64 x i16>* %a, <64 x i16>* %b, <64 x i1>* %c) #0 {
-; CHECK: select_v64i16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),64)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].h
-; VBITS_GE_1024: ld1h { [[MASK:z[0-9]+]].h }, [[PG]]/z, [x9]
-; VBITS_GE_1024-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: and [[AND:z[0-9]+]].h, [[MASK]].h, #0x1
-; VBITS_GE_1024-NEXT: cmpne [[COND:p[0-9]+]].h, [[PG1]]/z, [[AND]].h, #0
-; VBITS_GE_1024-NEXT: sel [[RES:z[0-9]+]].h, [[COND]], [[OP1]].h, [[OP2]].h
-; VBITS_GE_1024-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_GE_1024-LABEL: select_v64i16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_1024-NEXT:    sub x9, sp, #240
+; VBITS_GE_1024-NEXT:    mov x29, sp
+; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80
+; VBITS_GE_1024-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_1024-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_1024-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_1024-NEXT:    ldr x8, [x2]
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_1024-NEXT:    ptrue p1.h
+; VBITS_GE_1024-NEXT:    asr x9, x8, #63
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #126]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #62, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #124]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #61, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #122]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #60, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #120]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #59, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #118]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #58, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #116]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #57, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #114]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #56, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #112]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #55, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #110]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #54, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #108]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #53, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #106]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #52, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #104]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #51, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #102]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #50, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #100]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #49, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #98]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #48, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #96]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #47, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #94]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #46, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #92]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #45, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #90]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #44, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #88]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #43, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #86]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #42, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #84]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #41, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #82]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #40, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #80]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #39, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #78]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #38, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #76]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #37, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #74]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #36, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #72]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #35, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #70]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #34, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #68]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #33, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #66]
+; VBITS_GE_1024-NEXT:    sbfx x9, x8, #32, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #64]
+; VBITS_GE_1024-NEXT:    asr w9, w8, #31
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #62]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #30, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #60]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #29, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #58]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #28, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #56]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #27, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #54]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #26, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #52]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #25, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #50]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #24, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #48]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #23, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #46]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #22, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #44]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #21, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #42]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #20, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #40]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #19, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #38]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #18, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #36]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #17, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #34]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #16, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #32]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #15, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #30]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #14, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #28]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #13, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #26]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #12, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #24]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #11, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #22]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #10, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #20]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #9, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #18]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #8, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #16]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #7, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #14]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #6, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #12]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #5, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #10]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #4, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #8]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #3, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #6]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #2, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #4]
+; VBITS_GE_1024-NEXT:    sbfx w9, w8, #1, #1
+; VBITS_GE_1024-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_1024-NEXT:    strh w9, [sp, #2]
+; VBITS_GE_1024-NEXT:    mov x9, sp
+; VBITS_GE_1024-NEXT:    strh w8, [sp]
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x9]
+; VBITS_GE_1024-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1h { z2.h }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    and z0.h, z0.h, #0x1
+; VBITS_GE_1024-NEXT:    cmpne p1.h, p1/z, z0.h, #0
+; VBITS_GE_1024-NEXT:    sel z0.h, p1, z1.h, z2.h
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_1024-NEXT:    mov sp, x29
+; VBITS_GE_1024-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_1024-NEXT:    ret
   %mask = load <64 x i1>, <64 x i1>* %c
   %op1 = load <64 x i16>, <64 x i16>* %a
   %op2 = load <64 x i16>, <64 x i16>* %b
@@ -197,17 +1482,286 @@ define void @select_v64i16(<64 x i16>* %a, <64 x i16>* %b, <64 x i1>* %c) #0 {
 }
 
 define void @select_v128i16(<128 x i16>* %a, <128 x i16>* %b, <128 x i1>* %c) #0 {
-; CHECK: select_v128i16:
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),128)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].h
-; VBITS_GE_2048: ld1h { [[MASK:z[0-9]+]].h }, [[PG]]/z, [x9]
-; VBITS_GE_2048-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: and [[AND:z[0-9]+]].h, [[MASK]].h, #0x1
-; VBITS_GE_2048-NEXT: cmpne [[COND:p[0-9]+]].h, [[PG1]]/z, [[AND]].h, #0
-; VBITS_GE_2048-NEXT: sel [[RES:z[0-9]+]].h, [[COND]], [[OP1]].h, [[OP2]].h
-; VBITS_GE_2048-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: select_v128i16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    sub x9, sp, #496
+; VBITS_GE_2048-NEXT:    mov x29, sp
+; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00
+; VBITS_GE_2048-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_2048-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_2048-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_2048-NEXT:    ldr x8, [x2, #8]
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    ptrue p1.h
+; VBITS_GE_2048-NEXT:    asr x9, x8, #63
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #254]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #62, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #252]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #61, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #250]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #60, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #248]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #59, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #246]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #58, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #244]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #57, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #242]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #56, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #240]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #55, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #238]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #54, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #236]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #53, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #234]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #52, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #232]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #51, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #230]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #50, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #228]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #49, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #226]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #48, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #224]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #47, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #222]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #46, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #220]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #45, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #218]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #44, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #216]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #43, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #214]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #42, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #212]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #41, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #210]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #40, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #208]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #39, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #206]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #38, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #204]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #37, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #202]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #36, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #200]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #35, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #198]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #34, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #196]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #33, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #194]
+; VBITS_GE_2048-NEXT:    sbfx x9, x8, #32, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #192]
+; VBITS_GE_2048-NEXT:    asr w9, w8, #31
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #190]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #30, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #188]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #29, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #186]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #28, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #184]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #27, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #182]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #26, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #180]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #25, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #178]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #24, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #176]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #23, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #174]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #22, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #172]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #21, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #170]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #20, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #168]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #19, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #166]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #18, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #164]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #17, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #162]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #16, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #160]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #15, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #158]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #14, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #156]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #13, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #154]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #12, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #152]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #11, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #150]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #10, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #148]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #9, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #146]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #8, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #144]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #7, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #142]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #6, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #140]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #5, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #138]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #4, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #136]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #3, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #134]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #2, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #132]
+; VBITS_GE_2048-NEXT:    sbfx w9, w8, #1, #1
+; VBITS_GE_2048-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_2048-NEXT:    strh w9, [sp, #130]
+; VBITS_GE_2048-NEXT:    strh w8, [sp, #128]
+; VBITS_GE_2048-NEXT:    ldr x8, [x2]
+; VBITS_GE_2048-NEXT:    mov x9, sp
+; VBITS_GE_2048-NEXT:    asr x10, x8, #63
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #126]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #62, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #124]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #61, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #122]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #60, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #120]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #59, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #118]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #58, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #116]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #57, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #114]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #56, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #112]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #55, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #110]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #54, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #108]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #53, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #106]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #52, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #104]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #51, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #102]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #50, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #100]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #49, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #98]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #48, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #96]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #47, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #94]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #46, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #92]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #45, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #90]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #44, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #88]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #43, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #86]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #42, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #84]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #41, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #82]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #40, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #80]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #39, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #78]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #38, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #76]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #37, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #74]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #36, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #72]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #35, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #70]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #34, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #68]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #33, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #66]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #32, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #64]
+; VBITS_GE_2048-NEXT:    asr w10, w8, #31
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #62]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #30, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #60]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #29, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #58]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #28, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #56]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #27, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #54]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #26, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #52]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #25, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #50]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #24, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #48]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #23, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #46]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #22, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #44]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #21, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #42]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #20, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #40]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #19, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #38]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #18, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #36]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #17, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #34]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #16, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #32]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #15, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #30]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #14, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #28]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #13, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #26]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #12, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #24]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #11, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #22]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #10, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #20]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #9, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #18]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #8, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #16]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #7, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #14]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #6, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #12]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #5, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #10]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #4, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #8]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #3, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #6]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #2, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #4]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #1, #1
+; VBITS_GE_2048-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_2048-NEXT:    strh w10, [sp, #2]
+; VBITS_GE_2048-NEXT:    strh w8, [sp]
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x9]
+; VBITS_GE_2048-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1h { z2.h }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    and z0.h, z0.h, #0x1
+; VBITS_GE_2048-NEXT:    cmpne p1.h, p1/z, z0.h, #0
+; VBITS_GE_2048-NEXT:    sel z0.h, p1, z1.h, z2.h
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    mov sp, x29
+; VBITS_GE_2048-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ret
   %mask = load <128 x i1>, <128 x i1>* %c
   %op1 = load <128 x i16>, <128 x i16>* %a
   %op2 = load <128 x i16>, <128 x i16>* %b
@@ -218,34 +1772,65 @@ define void @select_v128i16(<128 x i16>* %a, <128 x i16>* %b, <128 x i1>* %c) #0
 
 ; Don't use SVE for 64-bit vectors.
 define <2 x i32> @select_v2i32(<2 x i32> %op1, <2 x i32> %op2, <2 x i1> %mask) #0 {
-; CHECK: select_v2i32:
-; CHECK: bif v0.8b, v1.8b, v2.8b
-; CHECK: ret
+; CHECK-LABEL: select_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    shl v2.2s, v2.2s, #31
+; CHECK-NEXT:    sshr v2.2s, v2.2s, #31
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    ret
   %sel = select <2 x i1> %mask, <2 x i32> %op1, <2 x i32> %op2
   ret <2 x i32> %sel
 }
 
 ; Don't use SVE for 128-bit vectors.
 define <4 x i32> @select_v4i32(<4 x i32> %op1, <4 x i32> %op2, <4 x i1> %mask) #0 {
-; CHECK: select_v4i32:
-; CHECK: bif v0.16b, v1.16b, v2.16b
-; CHECK: ret
+; CHECK-LABEL: select_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ushll v2.4s, v2.4h, #0
+; CHECK-NEXT:    shl v2.4s, v2.4s, #31
+; CHECK-NEXT:    sshr v2.4s, v2.4s, #31
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ret
   %sel = select <4 x i1> %mask, <4 x i32> %op1, <4 x i32> %op2
   ret <4 x i32> %sel
 }
 
 define void @select_v8i32(<8 x i32>* %a, <8 x i32>* %b, <8 x i1>* %c) #0 {
-; CHECK: select_v8i32:
-; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].s
-; CHECK: ld1w { [[MASK:z[0-9]+]].s }, [[PG]]/z, [x9]
-; CHECK-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; CHECK-NEXT: and [[AND:z[0-9]+]].s, [[MASK]].s, #0x1
-; CHECK-NEXT: cmpne [[COND:p[0-9]+]].s, [[PG1]]/z, [[AND]].s, #0
-; CHECK-NEXT: sel [[RES:z[0-9]+]].s, [[COND]], [[OP1]].s, [[OP2]].s
-; CHECK-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
-; CHECK: ret
+; CHECK-LABEL: select_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    sub x9, sp, #48
+; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
+; CHECK-NEXT:    .cfi_def_cfa w29, 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    ldrb w8, [x2]
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    mov x9, sp
+; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    sbfx w10, w8, #7, #1
+; CHECK-NEXT:    sbfx w11, w8, #6, #1
+; CHECK-NEXT:    stp w11, w10, [sp, #24]
+; CHECK-NEXT:    sbfx w10, w8, #3, #1
+; CHECK-NEXT:    sbfx w11, w8, #2, #1
+; CHECK-NEXT:    sbfx w12, w8, #5, #1
+; CHECK-NEXT:    sbfx w13, w8, #4, #1
+; CHECK-NEXT:    stp w11, w10, [sp, #8]
+; CHECK-NEXT:    sbfx w10, w8, #1, #1
+; CHECK-NEXT:    sbfx w8, w8, #0, #1
+; CHECK-NEXT:    stp w13, w12, [sp, #16]
+; CHECK-NEXT:    stp w8, w10, [sp]
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x9]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z2.s }, p0/z, [x1]
+; CHECK-NEXT:    and z0.s, z0.s, #0x1
+; CHECK-NEXT:    cmpne p1.s, p1/z, z0.s, #0
+; CHECK-NEXT:    sel z0.s, p1, z1.s, z2.s
+; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-NEXT:    mov sp, x29
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
   %mask = load <8 x i1>, <8 x i1>* %c
   %op1 = load <8 x i32>, <8 x i32>* %a
   %op2 = load <8 x i32>, <8 x i32>* %b
@@ -255,17 +1840,53 @@ define void @select_v8i32(<8 x i32>* %a, <8 x i32>* %b, <8 x i1>* %c) #0 {
 }
 
 define void @select_v16i32(<16 x i32>* %a, <16 x i32>* %b, <16 x i1>* %c) #0 {
-; CHECK: select_v16i32:
-; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),16)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].s
-; VBITS_GE_512: ld1w { [[MASK:z[0-9]+]].s }, [[PG]]/z, [x9]
-; VBITS_GE_512-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: and [[AND:z[0-9]+]].s, [[MASK]].s, #0x1
-; VBITS_GE_512-NEXT: cmpne [[COND:p[0-9]+]].s, [[PG1]]/z, [[AND]].s, #0
-; VBITS_GE_512-NEXT: sel [[RES:z[0-9]+]].s, [[COND]], [[OP1]].s, [[OP2]].s
-; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_GE_512-LABEL: select_v16i32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_512-NEXT:    sub x9, sp, #112
+; VBITS_GE_512-NEXT:    mov x29, sp
+; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0
+; VBITS_GE_512-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_512-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_512-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_512-NEXT:    ldrh w8, [x2]
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    mov x9, sp
+; VBITS_GE_512-NEXT:    ptrue p1.s
+; VBITS_GE_512-NEXT:    sbfx w10, w8, #15, #1
+; VBITS_GE_512-NEXT:    sbfx w11, w8, #14, #1
+; VBITS_GE_512-NEXT:    stp w11, w10, [sp, #56]
+; VBITS_GE_512-NEXT:    sbfx w10, w8, #7, #1
+; VBITS_GE_512-NEXT:    sbfx w11, w8, #6, #1
+; VBITS_GE_512-NEXT:    sbfx w12, w8, #13, #1
+; VBITS_GE_512-NEXT:    sbfx w13, w8, #12, #1
+; VBITS_GE_512-NEXT:    stp w11, w10, [sp, #24]
+; VBITS_GE_512-NEXT:    sbfx w10, w8, #3, #1
+; VBITS_GE_512-NEXT:    sbfx w11, w8, #2, #1
+; VBITS_GE_512-NEXT:    sbfx w14, w8, #11, #1
+; VBITS_GE_512-NEXT:    sbfx w15, w8, #10, #1
+; VBITS_GE_512-NEXT:    sbfx w16, w8, #9, #1
+; VBITS_GE_512-NEXT:    sbfx w17, w8, #8, #1
+; VBITS_GE_512-NEXT:    stp w13, w12, [sp, #48]
+; VBITS_GE_512-NEXT:    sbfx w12, w8, #5, #1
+; VBITS_GE_512-NEXT:    sbfx w13, w8, #4, #1
+; VBITS_GE_512-NEXT:    stp w11, w10, [sp, #8]
+; VBITS_GE_512-NEXT:    sbfx w10, w8, #1, #1
+; VBITS_GE_512-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_512-NEXT:    stp w15, w14, [sp, #40]
+; VBITS_GE_512-NEXT:    stp w17, w16, [sp, #32]
+; VBITS_GE_512-NEXT:    stp w13, w12, [sp, #16]
+; VBITS_GE_512-NEXT:    stp w8, w10, [sp]
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x9]
+; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1w { z2.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    and z0.s, z0.s, #0x1
+; VBITS_GE_512-NEXT:    cmpne p1.s, p1/z, z0.s, #0
+; VBITS_GE_512-NEXT:    sel z0.s, p1, z1.s, z2.s
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_512-NEXT:    mov sp, x29
+; VBITS_GE_512-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_512-NEXT:    ret
   %mask = load <16 x i1>, <16 x i1>* %c
   %op1 = load <16 x i32>, <16 x i32>* %a
   %op2 = load <16 x i32>, <16 x i32>* %b
@@ -275,17 +1896,80 @@ define void @select_v16i32(<16 x i32>* %a, <16 x i32>* %b, <16 x i1>* %c) #0 {
 }
 
 define void @select_v32i32(<32 x i32>* %a, <32 x i32>* %b, <32 x i1>* %c) #0 {
-; CHECK: select_v32i32:
-; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),32)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].s
-; VBITS_GE_1024: ld1w { [[MASK:z[0-9]+]].s }, [[PG]]/z, [x9]
-; VBITS_GE_1024-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: and [[AND:z[0-9]+]].s, [[MASK]].s, #0x1
-; VBITS_GE_1024-NEXT: cmpne [[COND:p[0-9]+]].s, [[PG1]]/z, [[AND]].s, #0
-; VBITS_GE_1024-NEXT: sel [[RES:z[0-9]+]].s, [[COND]], [[OP1]].s, [[OP2]].s
-; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_GE_1024-LABEL: select_v32i32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; VBITS_GE_1024-NEXT:    sub x9, sp, #224
+; VBITS_GE_1024-NEXT:    str x19, [sp, #16] // 8-byte Folded Spill
+; VBITS_GE_1024-NEXT:    mov x29, sp
+; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80
+; VBITS_GE_1024-NEXT:    .cfi_def_cfa w29, 32
+; VBITS_GE_1024-NEXT:    .cfi_offset w19, -16
+; VBITS_GE_1024-NEXT:    .cfi_offset w30, -24
+; VBITS_GE_1024-NEXT:    .cfi_offset w29, -32
+; VBITS_GE_1024-NEXT:    ldr w8, [x2]
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    mov x9, sp
+; VBITS_GE_1024-NEXT:    ptrue p1.s
+; VBITS_GE_1024-NEXT:    asr w10, w8, #31
+; VBITS_GE_1024-NEXT:    sbfx w11, w8, #30, #1
+; VBITS_GE_1024-NEXT:    stp w11, w10, [sp, #120]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #15, #1
+; VBITS_GE_1024-NEXT:    sbfx w11, w8, #14, #1
+; VBITS_GE_1024-NEXT:    sbfx w12, w8, #29, #1
+; VBITS_GE_1024-NEXT:    sbfx w13, w8, #28, #1
+; VBITS_GE_1024-NEXT:    stp w11, w10, [sp, #56]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #7, #1
+; VBITS_GE_1024-NEXT:    sbfx w11, w8, #6, #1
+; VBITS_GE_1024-NEXT:    sbfx w14, w8, #27, #1
+; VBITS_GE_1024-NEXT:    sbfx w15, w8, #26, #1
+; VBITS_GE_1024-NEXT:    sbfx w16, w8, #25, #1
+; VBITS_GE_1024-NEXT:    sbfx w17, w8, #24, #1
+; VBITS_GE_1024-NEXT:    stp w13, w12, [sp, #112]
+; VBITS_GE_1024-NEXT:    sbfx w12, w8, #13, #1
+; VBITS_GE_1024-NEXT:    sbfx w13, w8, #12, #1
+; VBITS_GE_1024-NEXT:    stp w11, w10, [sp, #24]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #3, #1
+; VBITS_GE_1024-NEXT:    sbfx w11, w8, #2, #1
+; VBITS_GE_1024-NEXT:    sbfx w18, w8, #23, #1
+; VBITS_GE_1024-NEXT:    sbfx w2, w8, #22, #1
+; VBITS_GE_1024-NEXT:    sbfx w3, w8, #21, #1
+; VBITS_GE_1024-NEXT:    sbfx w4, w8, #20, #1
+; VBITS_GE_1024-NEXT:    sbfx w5, w8, #19, #1
+; VBITS_GE_1024-NEXT:    sbfx w6, w8, #18, #1
+; VBITS_GE_1024-NEXT:    sbfx w7, w8, #17, #1
+; VBITS_GE_1024-NEXT:    sbfx w19, w8, #16, #1
+; VBITS_GE_1024-NEXT:    stp w15, w14, [sp, #104]
+; VBITS_GE_1024-NEXT:    stp w17, w16, [sp, #96]
+; VBITS_GE_1024-NEXT:    sbfx w14, w8, #11, #1
+; VBITS_GE_1024-NEXT:    sbfx w15, w8, #10, #1
+; VBITS_GE_1024-NEXT:    sbfx w16, w8, #9, #1
+; VBITS_GE_1024-NEXT:    sbfx w17, w8, #8, #1
+; VBITS_GE_1024-NEXT:    stp w13, w12, [sp, #48]
+; VBITS_GE_1024-NEXT:    sbfx w12, w8, #5, #1
+; VBITS_GE_1024-NEXT:    sbfx w13, w8, #4, #1
+; VBITS_GE_1024-NEXT:    stp w11, w10, [sp, #8]
+; VBITS_GE_1024-NEXT:    sbfx w10, w8, #1, #1
+; VBITS_GE_1024-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_1024-NEXT:    stp w2, w18, [sp, #88]
+; VBITS_GE_1024-NEXT:    stp w4, w3, [sp, #80]
+; VBITS_GE_1024-NEXT:    stp w6, w5, [sp, #72]
+; VBITS_GE_1024-NEXT:    stp w19, w7, [sp, #64]
+; VBITS_GE_1024-NEXT:    stp w15, w14, [sp, #40]
+; VBITS_GE_1024-NEXT:    stp w17, w16, [sp, #32]
+; VBITS_GE_1024-NEXT:    stp w13, w12, [sp, #16]
+; VBITS_GE_1024-NEXT:    stp w8, w10, [sp]
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x9]
+; VBITS_GE_1024-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1w { z2.s }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    and z0.s, z0.s, #0x1
+; VBITS_GE_1024-NEXT:    cmpne p1.s, p1/z, z0.s, #0
+; VBITS_GE_1024-NEXT:    sel z0.s, p1, z1.s, z2.s
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_1024-NEXT:    mov sp, x29
+; VBITS_GE_1024-NEXT:    ldr x19, [sp, #16] // 8-byte Folded Reload
+; VBITS_GE_1024-NEXT:    ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; VBITS_GE_1024-NEXT:    ret
   %mask = load <32 x i1>, <32 x i1>* %c
   %op1 = load <32 x i32>, <32 x i32>* %a
   %op2 = load <32 x i32>, <32 x i32>* %b
@@ -295,17 +1979,177 @@ define void @select_v32i32(<32 x i32>* %a, <32 x i32>* %b, <32 x i1>* %c) #0 {
 }
 
 define void @select_v64i32(<64 x i32>* %a, <64 x i32>* %b, <64 x i1>* %c) #0 {
-; CHECK: select_v64i32:
-; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),64)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].s
-; VBITS_GE_2048: ld1w { [[MASK:z[0-9]+]].s }, [[PG]]/z, [x9]
-; VBITS_GE_2048-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: and [[AND:z[0-9]+]].s, [[MASK]].s, #0x1
-; VBITS_GE_2048-NEXT: cmpne [[COND:p[0-9]+]].s, [[PG1]]/z, [[AND]].s, #0
-; VBITS_GE_2048-NEXT: sel [[RES:z[0-9]+]].s, [[COND]], [[OP1]].s, [[OP2]].s
-; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: select_v64i32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    stp x29, x30, [sp, #-96]! // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    sub x9, sp, #672
+; VBITS_GE_2048-NEXT:    stp x28, x27, [sp, #16] // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    stp x26, x25, [sp, #32] // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    stp x24, x23, [sp, #48] // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    stp x22, x21, [sp, #64] // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    stp x20, x19, [sp, #80] // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    mov x29, sp
+; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00
+; VBITS_GE_2048-NEXT:    .cfi_def_cfa w29, 96
+; VBITS_GE_2048-NEXT:    .cfi_offset w19, -8
+; VBITS_GE_2048-NEXT:    .cfi_offset w20, -16
+; VBITS_GE_2048-NEXT:    .cfi_offset w21, -24
+; VBITS_GE_2048-NEXT:    .cfi_offset w22, -32
+; VBITS_GE_2048-NEXT:    .cfi_offset w23, -40
+; VBITS_GE_2048-NEXT:    .cfi_offset w24, -48
+; VBITS_GE_2048-NEXT:    .cfi_offset w25, -56
+; VBITS_GE_2048-NEXT:    .cfi_offset w26, -64
+; VBITS_GE_2048-NEXT:    .cfi_offset w27, -72
+; VBITS_GE_2048-NEXT:    .cfi_offset w28, -80
+; VBITS_GE_2048-NEXT:    .cfi_offset w30, -88
+; VBITS_GE_2048-NEXT:    .cfi_offset w29, -96
+; VBITS_GE_2048-NEXT:    ldr x8, [x2]
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    add x9, sp, #256
+; VBITS_GE_2048-NEXT:    ptrue p1.s
+; VBITS_GE_2048-NEXT:    asr x10, x8, #63
+; VBITS_GE_2048-NEXT:    str w10, [sp, #508]
+; VBITS_GE_2048-NEXT:    sbfx x10, x8, #37, #1
+; VBITS_GE_2048-NEXT:    sbfx x11, x8, #62, #1
+; VBITS_GE_2048-NEXT:    str w10, [sp, #404]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #18, #1
+; VBITS_GE_2048-NEXT:    sbfx x12, x8, #61, #1
+; VBITS_GE_2048-NEXT:    sbfx x13, x8, #60, #1
+; VBITS_GE_2048-NEXT:    sbfx x14, x8, #59, #1
+; VBITS_GE_2048-NEXT:    str w11, [sp, #504]
+; VBITS_GE_2048-NEXT:    sbfx x11, x8, #36, #1
+; VBITS_GE_2048-NEXT:    str w10, [sp, #328]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #9, #1
+; VBITS_GE_2048-NEXT:    sbfx x15, x8, #58, #1
+; VBITS_GE_2048-NEXT:    sbfx x16, x8, #57, #1
+; VBITS_GE_2048-NEXT:    sbfx x17, x8, #56, #1
+; VBITS_GE_2048-NEXT:    sbfx x18, x8, #55, #1
+; VBITS_GE_2048-NEXT:    str w12, [sp, #500]
+; VBITS_GE_2048-NEXT:    sbfx x12, x8, #35, #1
+; VBITS_GE_2048-NEXT:    str w13, [sp, #496]
+; VBITS_GE_2048-NEXT:    sbfx x13, x8, #34, #1
+; VBITS_GE_2048-NEXT:    str w14, [sp, #492]
+; VBITS_GE_2048-NEXT:    sbfx x14, x8, #33, #1
+; VBITS_GE_2048-NEXT:    str w11, [sp, #400]
+; VBITS_GE_2048-NEXT:    sbfx w11, w8, #17, #1
+; VBITS_GE_2048-NEXT:    str w10, [sp, #292]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #4, #1
+; VBITS_GE_2048-NEXT:    sbfx x2, x8, #54, #1
+; VBITS_GE_2048-NEXT:    sbfx x3, x8, #53, #1
+; VBITS_GE_2048-NEXT:    sbfx x4, x8, #52, #1
+; VBITS_GE_2048-NEXT:    sbfx x5, x8, #51, #1
+; VBITS_GE_2048-NEXT:    sbfx x6, x8, #50, #1
+; VBITS_GE_2048-NEXT:    sbfx x7, x8, #49, #1
+; VBITS_GE_2048-NEXT:    sbfx x19, x8, #48, #1
+; VBITS_GE_2048-NEXT:    sbfx x20, x8, #47, #1
+; VBITS_GE_2048-NEXT:    sbfx x21, x8, #46, #1
+; VBITS_GE_2048-NEXT:    sbfx x22, x8, #45, #1
+; VBITS_GE_2048-NEXT:    str w15, [sp, #488]
+; VBITS_GE_2048-NEXT:    sbfx x15, x8, #32, #1
+; VBITS_GE_2048-NEXT:    str w16, [sp, #484]
+; VBITS_GE_2048-NEXT:    asr w16, w8, #31
+; VBITS_GE_2048-NEXT:    str w17, [sp, #480]
+; VBITS_GE_2048-NEXT:    sbfx w17, w8, #30, #1
+; VBITS_GE_2048-NEXT:    str w18, [sp, #476]
+; VBITS_GE_2048-NEXT:    sbfx w18, w8, #29, #1
+; VBITS_GE_2048-NEXT:    str w12, [sp, #396]
+; VBITS_GE_2048-NEXT:    str w13, [sp, #392]
+; VBITS_GE_2048-NEXT:    str w14, [sp, #388]
+; VBITS_GE_2048-NEXT:    sbfx w12, w8, #16, #1
+; VBITS_GE_2048-NEXT:    sbfx w13, w8, #15, #1
+; VBITS_GE_2048-NEXT:    sbfx w14, w8, #14, #1
+; VBITS_GE_2048-NEXT:    str w11, [sp, #324]
+; VBITS_GE_2048-NEXT:    sbfx w11, w8, #8, #1
+; VBITS_GE_2048-NEXT:    str w10, [sp, #272]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #2, #1
+; VBITS_GE_2048-NEXT:    sbfx x23, x8, #44, #1
+; VBITS_GE_2048-NEXT:    sbfx x24, x8, #43, #1
+; VBITS_GE_2048-NEXT:    sbfx x25, x8, #42, #1
+; VBITS_GE_2048-NEXT:    sbfx x26, x8, #41, #1
+; VBITS_GE_2048-NEXT:    sbfx x27, x8, #40, #1
+; VBITS_GE_2048-NEXT:    sbfx x28, x8, #39, #1
+; VBITS_GE_2048-NEXT:    sbfx x30, x8, #38, #1
+; VBITS_GE_2048-NEXT:    str w2, [sp, #472]
+; VBITS_GE_2048-NEXT:    sbfx w2, w8, #28, #1
+; VBITS_GE_2048-NEXT:    str w3, [sp, #468]
+; VBITS_GE_2048-NEXT:    sbfx w3, w8, #27, #1
+; VBITS_GE_2048-NEXT:    str w4, [sp, #464]
+; VBITS_GE_2048-NEXT:    sbfx w4, w8, #26, #1
+; VBITS_GE_2048-NEXT:    str w5, [sp, #460]
+; VBITS_GE_2048-NEXT:    str w6, [sp, #456]
+; VBITS_GE_2048-NEXT:    sbfx w5, w8, #25, #1
+; VBITS_GE_2048-NEXT:    str w7, [sp, #452]
+; VBITS_GE_2048-NEXT:    str w19, [sp, #448]
+; VBITS_GE_2048-NEXT:    sbfx w6, w8, #24, #1
+; VBITS_GE_2048-NEXT:    str w20, [sp, #444]
+; VBITS_GE_2048-NEXT:    str w21, [sp, #440]
+; VBITS_GE_2048-NEXT:    sbfx w7, w8, #23, #1
+; VBITS_GE_2048-NEXT:    str w22, [sp, #436]
+; VBITS_GE_2048-NEXT:    sbfx w19, w8, #22, #1
+; VBITS_GE_2048-NEXT:    sbfx w20, w8, #21, #1
+; VBITS_GE_2048-NEXT:    sbfx w21, w8, #20, #1
+; VBITS_GE_2048-NEXT:    sbfx w22, w8, #19, #1
+; VBITS_GE_2048-NEXT:    str w15, [sp, #384]
+; VBITS_GE_2048-NEXT:    str w16, [sp, #380]
+; VBITS_GE_2048-NEXT:    str w17, [sp, #376]
+; VBITS_GE_2048-NEXT:    str w18, [sp, #372]
+; VBITS_GE_2048-NEXT:    sbfx w15, w8, #13, #1
+; VBITS_GE_2048-NEXT:    sbfx w16, w8, #12, #1
+; VBITS_GE_2048-NEXT:    sbfx w17, w8, #11, #1
+; VBITS_GE_2048-NEXT:    sbfx w18, w8, #10, #1
+; VBITS_GE_2048-NEXT:    str w12, [sp, #320]
+; VBITS_GE_2048-NEXT:    str w13, [sp, #316]
+; VBITS_GE_2048-NEXT:    str w14, [sp, #312]
+; VBITS_GE_2048-NEXT:    sbfx w12, w8, #7, #1
+; VBITS_GE_2048-NEXT:    sbfx w13, w8, #6, #1
+; VBITS_GE_2048-NEXT:    sbfx w14, w8, #5, #1
+; VBITS_GE_2048-NEXT:    str w11, [sp, #288]
+; VBITS_GE_2048-NEXT:    sbfx w11, w8, #3, #1
+; VBITS_GE_2048-NEXT:    str w10, [sp, #264]
+; VBITS_GE_2048-NEXT:    sbfx w10, w8, #1, #1
+; VBITS_GE_2048-NEXT:    sbfx w8, w8, #0, #1
+; VBITS_GE_2048-NEXT:    str w23, [sp, #432]
+; VBITS_GE_2048-NEXT:    str w24, [sp, #428]
+; VBITS_GE_2048-NEXT:    str w25, [sp, #424]
+; VBITS_GE_2048-NEXT:    str w26, [sp, #420]
+; VBITS_GE_2048-NEXT:    str w27, [sp, #416]
+; VBITS_GE_2048-NEXT:    str w28, [sp, #412]
+; VBITS_GE_2048-NEXT:    str w30, [sp, #408]
+; VBITS_GE_2048-NEXT:    str w2, [sp, #368]
+; VBITS_GE_2048-NEXT:    str w3, [sp, #364]
+; VBITS_GE_2048-NEXT:    str w4, [sp, #360]
+; VBITS_GE_2048-NEXT:    str w5, [sp, #356]
+; VBITS_GE_2048-NEXT:    str w6, [sp, #352]
+; VBITS_GE_2048-NEXT:    str w7, [sp, #348]
+; VBITS_GE_2048-NEXT:    str w19, [sp, #344]
+; VBITS_GE_2048-NEXT:    str w20, [sp, #340]
+; VBITS_GE_2048-NEXT:    str w21, [sp, #336]
+; VBITS_GE_2048-NEXT:    str w22, [sp, #332]
+; VBITS_GE_2048-NEXT:    str w15, [sp, #308]
+; VBITS_GE_2048-NEXT:    str w16, [sp, #304]
+; VBITS_GE_2048-NEXT:    str w17, [sp, #300]
+; VBITS_GE_2048-NEXT:    str w18, [sp, #296]
+; VBITS_GE_2048-NEXT:    str w12, [sp, #284]
+; VBITS_GE_2048-NEXT:    str w13, [sp, #280]
+; VBITS_GE_2048-NEXT:    str w14, [sp, #276]
+; VBITS_GE_2048-NEXT:    str w11, [sp, #268]
+; VBITS_GE_2048-NEXT:    str w10, [sp, #260]
+; VBITS_GE_2048-NEXT:    str w8, [sp, #256]
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x9]
+; VBITS_GE_2048-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1w { z2.s }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    and z0.s, z0.s, #0x1
+; VBITS_GE_2048-NEXT:    cmpne p1.s, p1/z, z0.s, #0
+; VBITS_GE_2048-NEXT:    sel z0.s, p1, z1.s, z2.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    mov sp, x29
+; VBITS_GE_2048-NEXT:    ldp x20, x19, [sp, #80] // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ldp x22, x21, [sp, #64] // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ldp x24, x23, [sp, #48] // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ldp x26, x25, [sp, #32] // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ldp x28, x27, [sp, #16] // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ldp x29, x30, [sp], #96 // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ret
   %mask = load <64 x i1>, <64 x i1>* %c
   %op1 = load <64 x i32>, <64 x i32>* %a
   %op2 = load <64 x i32>, <64 x i32>* %b
@@ -316,34 +2160,63 @@ define void @select_v64i32(<64 x i32>* %a, <64 x i32>* %b, <64 x i1>* %c) #0 {
 
 ; Don't use SVE for 64-bit vectors.
 define <1 x i64> @select_v1i64(<1 x i64> %op1, <1 x i64> %op2, <1 x i1> %mask) #0 {
-; CHECK: select_v1i64:
-; CHECK: bif v0.8b, v1.8b, v2.8b
-; CHECK: ret
+; CHECK-LABEL: select_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst w0, #0x1
+; CHECK-NEXT:    csetm x8, ne
+; CHECK-NEXT:    fmov d2, x8
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    ret
   %sel = select <1 x i1> %mask, <1 x i64> %op1, <1 x i64> %op2
   ret <1 x i64> %sel
 }
 
 ; Don't use SVE for 128-bit vectors.
 define <2 x i64> @select_v2i64(<2 x i64> %op1, <2 x i64> %op2, <2 x i1> %mask) #0 {
-; CHECK: select_v2i64:
-; CHECK: bif v0.16b, v1.16b, v2.16b
-; CHECK: ret
+; CHECK-LABEL: select_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ushll v2.2d, v2.2s, #0
+; CHECK-NEXT:    shl v2.2d, v2.2d, #63
+; CHECK-NEXT:    sshr v2.2d, v2.2d, #63
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    ret
   %sel = select <2 x i1> %mask, <2 x i64> %op1, <2 x i64> %op2
   ret <2 x i64> %sel
 }
 
 define void @select_v4i64(<4 x i64>* %a, <4 x i64>* %b, <4 x i1>* %c) #0 {
-; CHECK: select_v4i64:
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),4)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].d
-; CHECK: ld1d { [[MASK:z[0-9]+]].d }, [[PG]]/z, [x9]
-; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: and [[AND:z[0-9]+]].d, [[MASK]].d, #0x1
-; CHECK-NEXT: cmpne [[COND:p[0-9]+]].d, [[PG1]]/z, [[AND]].d, #0
-; CHECK-NEXT: sel [[RES:z[0-9]+]].d, [[COND]], [[OP1]].d, [[OP2]].d
-; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
-; CHECK: ret
+; CHECK-LABEL: select_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    sub x9, sp, #48
+; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
+; CHECK-NEXT:    .cfi_def_cfa w29, 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    ldrb w8, [x2]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    mov x9, sp
+; CHECK-NEXT:    ptrue p1.d
+; CHECK-NEXT:    lsr w10, w8, #3
+; CHECK-NEXT:    lsr w11, w8, #2
+; CHECK-NEXT:    sbfx x10, x10, #0, #1
+; CHECK-NEXT:    sbfx x11, x11, #0, #1
+; CHECK-NEXT:    stp x11, x10, [sp, #16]
+; CHECK-NEXT:    sbfx x10, x8, #0, #1
+; CHECK-NEXT:    lsr w8, w8, #1
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    stp x10, x8, [sp]
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x9]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; CHECK-NEXT:    and z0.d, z0.d, #0x1
+; CHECK-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; CHECK-NEXT:    sel z0.d, p1, z1.d, z2.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    mov sp, x29
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
   %mask = load <4 x i1>, <4 x i1>* %c
   %op1 = load <4 x i64>, <4 x i64>* %a
   %op2 = load <4 x i64>, <4 x i64>* %b
@@ -353,17 +2226,48 @@ define void @select_v4i64(<4 x i64>* %a, <4 x i64>* %b, <4 x i1>* %c) #0 {
 }
 
 define void @select_v8i64(<8 x i64>* %a, <8 x i64>* %b, <8 x i1>* %c) #0 {
-; CHECK: select_v8i64:
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),8)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].d
-; VBITS_GE_512: ld1d { [[MASK:z[0-9]+]].d }, [[PG]]/z, [x9]
-; VBITS_GE_512-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: and [[AND:z[0-9]+]].d, [[MASK]].d, #0x1
-; VBITS_GE_512-NEXT: cmpne [[COND:p[0-9]+]].d, [[PG1]]/z, [[AND]].d, #0
-; VBITS_GE_512-NEXT: sel [[RES:z[0-9]+]].d, [[COND]], [[OP1]].d, [[OP2]].d
-; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_512: ret
+; VBITS_GE_512-LABEL: select_v8i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_512-NEXT:    sub x9, sp, #112
+; VBITS_GE_512-NEXT:    mov x29, sp
+; VBITS_GE_512-NEXT:    and sp, x9, #0xffffffffffffffc0
+; VBITS_GE_512-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_512-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_512-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_512-NEXT:    ldrb w8, [x2]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    mov x9, sp
+; VBITS_GE_512-NEXT:    ptrue p1.d
+; VBITS_GE_512-NEXT:    lsr w10, w8, #7
+; VBITS_GE_512-NEXT:    lsr w11, w8, #6
+; VBITS_GE_512-NEXT:    lsr w12, w8, #5
+; VBITS_GE_512-NEXT:    lsr w13, w8, #4
+; VBITS_GE_512-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_512-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_512-NEXT:    stp x11, x10, [sp, #48]
+; VBITS_GE_512-NEXT:    sbfx x11, x12, #0, #1
+; VBITS_GE_512-NEXT:    sbfx x12, x13, #0, #1
+; VBITS_GE_512-NEXT:    lsr w10, w8, #3
+; VBITS_GE_512-NEXT:    stp x12, x11, [sp, #32]
+; VBITS_GE_512-NEXT:    lsr w11, w8, #2
+; VBITS_GE_512-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_512-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_512-NEXT:    stp x11, x10, [sp, #16]
+; VBITS_GE_512-NEXT:    sbfx x10, x8, #0, #1
+; VBITS_GE_512-NEXT:    lsr w8, w8, #1
+; VBITS_GE_512-NEXT:    sbfx x8, x8, #0, #1
+; VBITS_GE_512-NEXT:    stp x10, x8, [sp]
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x9]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    and z0.d, z0.d, #0x1
+; VBITS_GE_512-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_512-NEXT:    sel z0.d, p1, z1.d, z2.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_512-NEXT:    mov sp, x29
+; VBITS_GE_512-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_512-NEXT:    ret
   %mask = load <8 x i1>, <8 x i1>* %c
   %op1 = load <8 x i64>, <8 x i64>* %a
   %op2 = load <8 x i64>, <8 x i64>* %b
@@ -373,17 +2277,68 @@ define void @select_v8i64(<8 x i64>* %a, <8 x i64>* %b, <8 x i1>* %c) #0 {
 }
 
 define void @select_v16i64(<16 x i64>* %a, <16 x i64>* %b, <16 x i1>* %c) #0 {
-; CHECK: select_v16i64:
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),16)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].d
-; VBITS_GE_1024: ld1d { [[MASK:z[0-9]+]].d }, [[PG]]/z, [x9]
-; VBITS_GE_1024-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: and [[AND:z[0-9]+]].d, [[MASK]].d, #0x1
-; VBITS_GE_1024-NEXT: cmpne [[COND:p[0-9]+]].d, [[PG1]]/z, [[AND]].d, #0
-; VBITS_GE_1024-NEXT: sel [[RES:z[0-9]+]].d, [[COND]], [[OP1]].d, [[OP2]].d
-; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_1024: ret
+; VBITS_GE_1024-LABEL: select_v16i64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_GE_1024-NEXT:    sub x9, sp, #240
+; VBITS_GE_1024-NEXT:    mov x29, sp
+; VBITS_GE_1024-NEXT:    and sp, x9, #0xffffffffffffff80
+; VBITS_GE_1024-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_GE_1024-NEXT:    .cfi_offset w30, -8
+; VBITS_GE_1024-NEXT:    .cfi_offset w29, -16
+; VBITS_GE_1024-NEXT:    ldrh w8, [x2]
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    mov x9, sp
+; VBITS_GE_1024-NEXT:    ptrue p1.d
+; VBITS_GE_1024-NEXT:    lsr w10, w8, #15
+; VBITS_GE_1024-NEXT:    lsr w11, w8, #14
+; VBITS_GE_1024-NEXT:    lsr w12, w8, #13
+; VBITS_GE_1024-NEXT:    lsr w13, w8, #12
+; VBITS_GE_1024-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_1024-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_1024-NEXT:    lsr w14, w8, #11
+; VBITS_GE_1024-NEXT:    lsr w15, w8, #10
+; VBITS_GE_1024-NEXT:    stp x11, x10, [sp, #112]
+; VBITS_GE_1024-NEXT:    sbfx x11, x12, #0, #1
+; VBITS_GE_1024-NEXT:    sbfx x12, x13, #0, #1
+; VBITS_GE_1024-NEXT:    lsr w16, w8, #9
+; VBITS_GE_1024-NEXT:    lsr w17, w8, #8
+; VBITS_GE_1024-NEXT:    stp x12, x11, [sp, #96]
+; VBITS_GE_1024-NEXT:    sbfx x12, x14, #0, #1
+; VBITS_GE_1024-NEXT:    sbfx x13, x15, #0, #1
+; VBITS_GE_1024-NEXT:    lsr w10, w8, #7
+; VBITS_GE_1024-NEXT:    lsr w11, w8, #6
+; VBITS_GE_1024-NEXT:    stp x13, x12, [sp, #80]
+; VBITS_GE_1024-NEXT:    sbfx x13, x16, #0, #1
+; VBITS_GE_1024-NEXT:    sbfx x14, x17, #0, #1
+; VBITS_GE_1024-NEXT:    lsr w12, w8, #5
+; VBITS_GE_1024-NEXT:    stp x14, x13, [sp, #64]
+; VBITS_GE_1024-NEXT:    lsr w13, w8, #4
+; VBITS_GE_1024-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_1024-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_1024-NEXT:    stp x11, x10, [sp, #48]
+; VBITS_GE_1024-NEXT:    sbfx x11, x12, #0, #1
+; VBITS_GE_1024-NEXT:    sbfx x12, x13, #0, #1
+; VBITS_GE_1024-NEXT:    lsr w10, w8, #3
+; VBITS_GE_1024-NEXT:    stp x12, x11, [sp, #32]
+; VBITS_GE_1024-NEXT:    lsr w11, w8, #2
+; VBITS_GE_1024-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_1024-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_1024-NEXT:    stp x11, x10, [sp, #16]
+; VBITS_GE_1024-NEXT:    sbfx x10, x8, #0, #1
+; VBITS_GE_1024-NEXT:    lsr w8, w8, #1
+; VBITS_GE_1024-NEXT:    sbfx x8, x8, #0, #1
+; VBITS_GE_1024-NEXT:    stp x10, x8, [sp]
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x9]
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    and z0.d, z0.d, #0x1
+; VBITS_GE_1024-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_1024-NEXT:    sel z0.d, p1, z1.d, z2.d
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_1024-NEXT:    mov sp, x29
+; VBITS_GE_1024-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_GE_1024-NEXT:    ret
   %mask = load <16 x i1>, <16 x i1>* %c
   %op1 = load <16 x i64>, <16 x i64>* %a
   %op2 = load <16 x i64>, <16 x i64>* %b
@@ -393,17 +2348,142 @@ define void @select_v16i64(<16 x i64>* %a, <16 x i64>* %b, <16 x i1>* %c) #0 {
 }
 
 define void @select_v32i64(<32 x i64>* %a, <32 x i64>* %b, <32 x i1>* %c) #0 {
-; CHECK: select_v32i64:
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),32)]]
-; CHECK: ptrue [[PG1:p[0-9]+]].d
-; VBITS_GE_2048: ld1d { [[MASK:z[0-9]+]].d }, [[PG]]/z, [x9]
-; VBITS_GE_2048-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: and [[AND:z[0-9]+]].d, [[MASK]].d, #0x1
-; VBITS_GE_2048-NEXT: cmpne [[COND:p[0-9]+]].d, [[PG1]]/z, [[AND]].d, #0
-; VBITS_GE_2048-NEXT: sel [[RES:z[0-9]+]].d, [[COND]], [[OP1]].d, [[OP2]].d
-; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_2048: ret
+; VBITS_GE_2048-LABEL: select_v32i64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; VBITS_GE_2048-NEXT:    sub x9, sp, #480
+; VBITS_GE_2048-NEXT:    str x19, [sp, #16] // 8-byte Folded Spill
+; VBITS_GE_2048-NEXT:    mov x29, sp
+; VBITS_GE_2048-NEXT:    and sp, x9, #0xffffffffffffff00
+; VBITS_GE_2048-NEXT:    .cfi_def_cfa w29, 32
+; VBITS_GE_2048-NEXT:    .cfi_offset w19, -16
+; VBITS_GE_2048-NEXT:    .cfi_offset w30, -24
+; VBITS_GE_2048-NEXT:    .cfi_offset w29, -32
+; VBITS_GE_2048-NEXT:    ldr w8, [x2]
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    mov x9, sp
+; VBITS_GE_2048-NEXT:    ptrue p1.d
+; VBITS_GE_2048-NEXT:    ubfx x10, x8, #31, #1
+; VBITS_GE_2048-NEXT:    ubfx x11, x8, #30, #2
+; VBITS_GE_2048-NEXT:    ubfx x12, x8, #29, #3
+; VBITS_GE_2048-NEXT:    // kill: def $w10 killed $w10 killed $x10 def $x10
+; VBITS_GE_2048-NEXT:    // kill: def $w11 killed $w11 killed $x11 def $x11
+; VBITS_GE_2048-NEXT:    ubfx x13, x8, #28, #4
+; VBITS_GE_2048-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_2048-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w12 killed $w12 killed $x12 def $x12
+; VBITS_GE_2048-NEXT:    ubfx x14, x8, #27, #5
+; VBITS_GE_2048-NEXT:    ubfx x15, x8, #26, #6
+; VBITS_GE_2048-NEXT:    stp x11, x10, [sp, #240]
+; VBITS_GE_2048-NEXT:    sbfx x11, x12, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w13 killed $w13 killed $x13 def $x13
+; VBITS_GE_2048-NEXT:    sbfx x12, x13, #0, #1
+; VBITS_GE_2048-NEXT:    ubfx x16, x8, #25, #7
+; VBITS_GE_2048-NEXT:    ubfx x17, x8, #24, #8
+; VBITS_GE_2048-NEXT:    stp x12, x11, [sp, #224]
+; VBITS_GE_2048-NEXT:    // kill: def $w14 killed $w14 killed $x14 def $x14
+; VBITS_GE_2048-NEXT:    sbfx x12, x14, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w15 killed $w15 killed $x15 def $x15
+; VBITS_GE_2048-NEXT:    sbfx x13, x15, #0, #1
+; VBITS_GE_2048-NEXT:    ubfx x18, x8, #23, #9
+; VBITS_GE_2048-NEXT:    ubfx x2, x8, #22, #10
+; VBITS_GE_2048-NEXT:    stp x13, x12, [sp, #208]
+; VBITS_GE_2048-NEXT:    // kill: def $w16 killed $w16 killed $x16 def $x16
+; VBITS_GE_2048-NEXT:    sbfx x13, x16, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w17 killed $w17 killed $x17 def $x17
+; VBITS_GE_2048-NEXT:    sbfx x14, x17, #0, #1
+; VBITS_GE_2048-NEXT:    ubfx x3, x8, #21, #11
+; VBITS_GE_2048-NEXT:    ubfx x4, x8, #20, #12
+; VBITS_GE_2048-NEXT:    ubfx x10, x8, #15, #17
+; VBITS_GE_2048-NEXT:    ubfx x11, x8, #14, #18
+; VBITS_GE_2048-NEXT:    stp x14, x13, [sp, #192]
+; VBITS_GE_2048-NEXT:    // kill: def $w18 killed $w18 killed $x18 def $x18
+; VBITS_GE_2048-NEXT:    sbfx x14, x18, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w2 killed $w2 killed $x2 def $x2
+; VBITS_GE_2048-NEXT:    sbfx x15, x2, #0, #1
+; VBITS_GE_2048-NEXT:    ubfx x5, x8, #19, #13
+; VBITS_GE_2048-NEXT:    ubfx x6, x8, #18, #14
+; VBITS_GE_2048-NEXT:    ubfx x12, x8, #13, #19
+; VBITS_GE_2048-NEXT:    stp x15, x14, [sp, #176]
+; VBITS_GE_2048-NEXT:    // kill: def $w3 killed $w3 killed $x3 def $x3
+; VBITS_GE_2048-NEXT:    sbfx x15, x3, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w4 killed $w4 killed $x4 def $x4
+; VBITS_GE_2048-NEXT:    sbfx x16, x4, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w10 killed $w10 killed $x10 def $x10
+; VBITS_GE_2048-NEXT:    // kill: def $w11 killed $w11 killed $x11 def $x11
+; VBITS_GE_2048-NEXT:    ubfx x7, x8, #17, #15
+; VBITS_GE_2048-NEXT:    ubfx x19, x8, #16, #16
+; VBITS_GE_2048-NEXT:    ubfx x13, x8, #12, #20
+; VBITS_GE_2048-NEXT:    stp x16, x15, [sp, #160]
+; VBITS_GE_2048-NEXT:    // kill: def $w5 killed $w5 killed $x5 def $x5
+; VBITS_GE_2048-NEXT:    sbfx x16, x5, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w6 killed $w6 killed $x6 def $x6
+; VBITS_GE_2048-NEXT:    sbfx x17, x6, #0, #1
+; VBITS_GE_2048-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_2048-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w12 killed $w12 killed $x12 def $x12
+; VBITS_GE_2048-NEXT:    ubfx x14, x8, #11, #21
+; VBITS_GE_2048-NEXT:    ubfx x15, x8, #10, #22
+; VBITS_GE_2048-NEXT:    stp x17, x16, [sp, #144]
+; VBITS_GE_2048-NEXT:    // kill: def $w7 killed $w7 killed $x7 def $x7
+; VBITS_GE_2048-NEXT:    sbfx x17, x7, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w19 killed $w19 killed $x19 def $x19
+; VBITS_GE_2048-NEXT:    sbfx x18, x19, #0, #1
+; VBITS_GE_2048-NEXT:    stp x11, x10, [sp, #112]
+; VBITS_GE_2048-NEXT:    sbfx x11, x12, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w13 killed $w13 killed $x13 def $x13
+; VBITS_GE_2048-NEXT:    sbfx x12, x13, #0, #1
+; VBITS_GE_2048-NEXT:    ubfx x16, x8, #9, #23
+; VBITS_GE_2048-NEXT:    stp x18, x17, [sp, #128]
+; VBITS_GE_2048-NEXT:    ubfx x17, x8, #8, #24
+; VBITS_GE_2048-NEXT:    ubfx x10, x8, #7, #25
+; VBITS_GE_2048-NEXT:    stp x12, x11, [sp, #96]
+; VBITS_GE_2048-NEXT:    ubfx x11, x8, #6, #26
+; VBITS_GE_2048-NEXT:    // kill: def $w14 killed $w14 killed $x14 def $x14
+; VBITS_GE_2048-NEXT:    sbfx x12, x14, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w15 killed $w15 killed $x15 def $x15
+; VBITS_GE_2048-NEXT:    sbfx x13, x15, #0, #1
+; VBITS_GE_2048-NEXT:    stp x13, x12, [sp, #80]
+; VBITS_GE_2048-NEXT:    ubfx x12, x8, #5, #27
+; VBITS_GE_2048-NEXT:    // kill: def $w16 killed $w16 killed $x16 def $x16
+; VBITS_GE_2048-NEXT:    sbfx x13, x16, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w17 killed $w17 killed $x17 def $x17
+; VBITS_GE_2048-NEXT:    sbfx x14, x17, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w10 killed $w10 killed $x10 def $x10
+; VBITS_GE_2048-NEXT:    // kill: def $w11 killed $w11 killed $x11 def $x11
+; VBITS_GE_2048-NEXT:    stp x14, x13, [sp, #64]
+; VBITS_GE_2048-NEXT:    ubfx x13, x8, #4, #28
+; VBITS_GE_2048-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_2048-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w12 killed $w12 killed $x12 def $x12
+; VBITS_GE_2048-NEXT:    stp x11, x10, [sp, #48]
+; VBITS_GE_2048-NEXT:    sbfx x11, x12, #0, #1
+; VBITS_GE_2048-NEXT:    // kill: def $w13 killed $w13 killed $x13 def $x13
+; VBITS_GE_2048-NEXT:    sbfx x12, x13, #0, #1
+; VBITS_GE_2048-NEXT:    ubfx x10, x8, #3, #29
+; VBITS_GE_2048-NEXT:    stp x12, x11, [sp, #32]
+; VBITS_GE_2048-NEXT:    ubfx x11, x8, #2, #30
+; VBITS_GE_2048-NEXT:    // kill: def $w10 killed $w10 killed $x10 def $x10
+; VBITS_GE_2048-NEXT:    // kill: def $w11 killed $w11 killed $x11 def $x11
+; VBITS_GE_2048-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_2048-NEXT:    sbfx x11, x11, #0, #1
+; VBITS_GE_2048-NEXT:    stp x11, x10, [sp, #16]
+; VBITS_GE_2048-NEXT:    ubfx x10, x8, #1, #31
+; VBITS_GE_2048-NEXT:    // kill: def $w10 killed $w10 killed $x10 def $x10
+; VBITS_GE_2048-NEXT:    sbfx x8, x8, #0, #1
+; VBITS_GE_2048-NEXT:    sbfx x10, x10, #0, #1
+; VBITS_GE_2048-NEXT:    stp x8, x10, [sp]
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x9]
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    and z0.d, z0.d, #0x1
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    sel z0.d, p1, z1.d, z2.d
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_2048-NEXT:    mov sp, x29
+; VBITS_GE_2048-NEXT:    ldr x19, [sp, #16] // 8-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; VBITS_GE_2048-NEXT:    ret
   %mask = load <32 x i1>, <32 x i1>* %c
   %op1 = load <32 x i64>, <32 x i64>* %a
   %op2 = load <32 x i64>, <32 x i64>* %b

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
index d330ff77a477e..a347c1f668508 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
@@ -1,19 +1,19 @@
-; RUN: llc -aarch64-sve-vector-bits-min=128  -asm-verbose=0 < %s | FileCheck %s -check-prefix=NO_SVE
-; RUN: llc -aarch64-sve-vector-bits-min=256  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
-; RUN: llc -aarch64-sve-vector-bits-min=384  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK
-; RUN: llc -aarch64-sve-vector-bits-min=512  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=640  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=768  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=896  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_2048,VBITS_GE_1024,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=128  < %s | FileCheck %s -check-prefix=NO_SVE
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
+; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -check-prefixes=CHECK
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -26,20 +26,21 @@ target triple = "aarch64-unknown-linux-gnu"
 
 define void @masked_gather_v2i8(<2 x i8>* %a, <2 x i8*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v2i8:
-; CHECK: ldrb [[VALS_LO:w[0-9]+]], [x0]
-; CHECK-NEXT: ldrb [[VALS_HI:w[0-9]+]], [x0, #1]
-; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
-; CHECK-NEXT: fmov s[[VALS:[0-9]+]], [[VALS_LO]]
-; CHECK-NEXT: mov v[[VALS]].s[1], [[VALS_HI]]
-; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0
-; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[CMP]].2s, #0
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[SHL]].d, #0
-; CHECK-NEXT: ld1sb { z[[RES:[0-9]+]].d }, [[MASK]]/z, [z[[PTRS]].d]
-; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].s, vl2
-; CHECK-NEXT: xtn v[[XTN:[0-9]+]].2s, v[[RES]].2d
-; CHECK-NEXT: st1b { z[[XTN]].s }, [[PG1]],  [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldrb w8, [x0]
+; CHECK-NEXT:    ldrb w9, [x0, #1]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fmov s1, w8
+; CHECK-NEXT:    mov v1.s[1], w9
+; CHECK-NEXT:    cmeq v1.2s, v1.2s, #0
+; CHECK-NEXT:    ushll v1.2d, v1.2s, #0
+; CHECK-NEXT:    cmpne p0.d, p0/z, z1.d, #0
+; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [z0.d]
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    st1b { z0.s }, p0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <2 x i8>, <2 x i8>* %a
   %ptrs = load <2 x i8*>, <2 x i8*>* %b
   %mask = icmp eq <2 x i8> %cval, zeroinitializer
@@ -50,20 +51,21 @@ define void @masked_gather_v2i8(<2 x i8>* %a, <2 x i8*>* %b) #0 {
 
 define void @masked_gather_v4i8(<4 x i8>* %a, <4 x i8*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v4i8:
-; CHECK: ldr s[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; CHECK-NEXT: ushll [[SHL:v[0-9]+]].8h, v[[VALS]].8b, #0
-; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4h, [[SHL]].4h, #0
-; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
-; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, [[UPK2]].d, #0
-; CHECK-NEXT: ld1sb { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; CHECK-NEXT: ptrue [[PG1:p[0-9]+]].h, vl4
-; CHECK-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; CHECK-NEXT: uzp1 z[[UZP2:[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; CHECK-NEXT: st1b { z[[UZP2]].h }, [[PG0]], [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    cmeq v0.4h, v0.4h, #0
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [z1.d]
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    st1b { z0.h }, p0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <4 x i8>, <4 x i8>* %a
   %ptrs = load <4 x i8*>, <4 x i8*>* %b
   %mask = icmp eq <4 x i8> %cval, zeroinitializer
@@ -73,50 +75,53 @@ define void @masked_gather_v4i8(<4 x i8>* %a, <4 x i8*>* %b) #0 {
 }
 
 define void @masked_gather_v8i8(<8 x i8>* %a, <8 x i8*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v8i8:
-; VBITS_GE_512: ldr d[[VALS:[0-9]+]], [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: cmeq v[[CMP:[0-9]+]].8b, v[[VALS]].8b, #0
-; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].h, z[[CMP]].b
-; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].s, [[UPK1]].h
-; VBITS_GE_512-NEXT: uunpklo [[UPK3:z[0-9]+]].d, [[UPK2]].s
-; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK3]].d, #0
-; VBITS_GE_512-NEXT: ld1b { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_512-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; VBITS_GE_512-NEXT: uzp1 z[[UZP3:[0-9]+]].b, [[UZP2]].b, [[UZP2]].b
-; VBITS_GE_512-NEXT: str d[[UZP3]], [x0]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ldr d[[VALS:[0-9]+]], [x0]
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_LO:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_HI:z[0-9]+]].d }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: cmeq [[ZMSK:v[0-9]+]].8b, v[[VALS]].8b, #0
-; VBITS_EQ_256-DAG: zip1 [[VAL_LO:v[0-9]+]].8b, [[ZMSK]].8b, v[[VALS]].8b
-; VBITS_EQ_256-DAG: zip2 [[VAL_HI:v[0-9]+]].8b, [[ZMSK]].8b, v[[VALS]].8b
-; VBITS_EQ_256-DAG: shl [[SHL_LO:v[0-9]+]].4h, [[VAL_LO]].4h, #8
-; VBITS_EQ_256-DAG: shl [[SHL_HI:v[0-9]+]].4h, [[VAL_HI]].4h, #8
-; VBITS_EQ_256-DAG: sshr v[[SSHR_LO:[0-9]+]].4h, [[SHL_LO]].4h, #8
-; VBITS_EQ_256-DAG: sshr v[[SSHR_HI:[0-9]+]].4h, [[SHL_HI]].4h, #8
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_LO:z[0-9]+]].s, z[[SSHR_LO]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK2_LO:z[0-9]+]].d, [[UPK1_LO]].s
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_HI:z[0-9]+]].s, z[[SSHR_HI]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK2_HI:z[0-9]+]].d, [[UPK1_HI]].s
-; VBITS_EQ_256-DAG: cmpne [[MASK_LO:p[0-9]+]].d, [[PG]]/z, [[UPK2_LO]].d, #0
-; VBITS_EQ_256-DAG: cmpne [[MASK_HI:p[0-9]+]].d, [[PG]]/z, [[UPK2_HI]].d, #0
-; VBITS_EQ_256-DAG: ld1sb { [[RES_LO:z[0-9]+]].d }, [[MASK_LO]]/z, {{\[}}[[PTRS_LO]].d]
-; VBITS_EQ_256-DAG: ld1sb { [[RES_HI:z[0-9]+]].d }, [[MASK_HI]]/z, {{\[}}[[PTRS_HI]].d]
-; VBITS_EQ_256-DAG: uzp1 [[UZP1_LO:z[0-9]+]].s, [[RES_LO]].s, [[RES_LO]].s
-; VBITS_EQ_256-DAG: uzp1 [[UZP1_HI:z[0-9]+]].s, [[RES_HI]].s, [[RES_HI]].s
-; VBITS_EQ_256-DAG: uzp1 z[[UZP2_LO:[0-9]+]].h, [[UZP1_LO]].h, [[UZP1_LO]].h
-; VBITS_EQ_256-DAG: uzp1 z[[UZP2_HI:[0-9]+]].h, [[UZP1_HI]].h, [[UZP1_HI]].h
-; VBITS_EQ_256-NEXT: uzp1 v[[UZP3:[0-9]+]].8b, v[[UZP2_LO]].8b, v[[UZP2_HI]].8b
-; VBITS_EQ_256-NEXT: str d[[UZP3]], [x0]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: masked_gather_v8i8:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    ldr d0, [x0]
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    cmeq v0.8b, v0.8b, #0
+; VBITS_EQ_256-NEXT:    zip2 v3.8b, v0.8b, v0.8b
+; VBITS_EQ_256-NEXT:    zip1 v0.8b, v0.8b, v0.8b
+; VBITS_EQ_256-NEXT:    shl v3.4h, v3.4h, #8
+; VBITS_EQ_256-NEXT:    shl v0.4h, v0.4h, #8
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    sshr v3.4h, v3.4h, #8
+; VBITS_EQ_256-NEXT:    sshr v0.4h, v0.4h, #8
+; VBITS_EQ_256-NEXT:    uunpklo z3.s, z3.h
+; VBITS_EQ_256-NEXT:    uunpklo z0.s, z0.h
+; VBITS_EQ_256-NEXT:    uunpklo z3.d, z3.s
+; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
+; VBITS_EQ_256-NEXT:    cmpne p1.d, p0/z, z3.d, #0
+; VBITS_EQ_256-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; VBITS_EQ_256-NEXT:    ld1sb { z0.d }, p1/z, [z1.d]
+; VBITS_EQ_256-NEXT:    ld1sb { z1.d }, p0/z, [z2.d]
+; VBITS_EQ_256-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_EQ_256-NEXT:    uzp1 z1.s, z1.s, z1.s
+; VBITS_EQ_256-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_EQ_256-NEXT:    uzp1 z1.h, z1.h, z1.h
+; VBITS_EQ_256-NEXT:    uzp1 v0.8b, v1.8b, v0.8b
+; VBITS_EQ_256-NEXT:    str d0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: masked_gather_v8i8:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr d0, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmeq v0.8b, v0.8b, #0
+; VBITS_GE_512-NEXT:    uunpklo z0.h, z0.b
+; VBITS_GE_512-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1b { z0.d }, p0/z, [z1.d]
+; VBITS_GE_512-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_512-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_512-NEXT:    uzp1 z0.b, z0.b, z0.b
+; VBITS_GE_512-NEXT:    str d0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %cval = load <8 x i8>, <8 x i8>* %a
   %ptrs = load <8 x i8*>, <8 x i8*>* %b
   %mask = icmp eq <8 x i8> %cval, zeroinitializer
@@ -126,21 +131,22 @@ define void @masked_gather_v8i8(<8 x i8>* %a, <8 x i8*>* %b) #0 {
 }
 
 define void @masked_gather_v16i8(<16 x i8>* %a, <16 x i8*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v16i8:
-; VBITS_GE_1024: ldr q[[VALS:[0-9]+]], [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: cmeq v[[CMP:[0-9]+]].16b, v[[VALS]].16b, #0
-; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].h, z[[CMP]].b
-; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].s, [[UPK1]].h
-; VBITS_GE_1024-NEXT: uunpklo [[UPK3:z[0-9]+]].d, [[UPK2]].s
-; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK3]].d, #0
-; VBITS_GE_1024-NEXT: ld1b { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_1024-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_1024-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; VBITS_GE_1024-NEXT: uzp1 z[[UZP3:[0-9]+]].b, [[UZP2]].b, [[UZP2]].b
-; VBITS_GE_1024-NEXT: str q[[UZP3]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_gather_v16i8:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ldr q0, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    cmeq v0.16b, v0.16b, #0
+; VBITS_GE_1024-NEXT:    uunpklo z0.h, z0.b
+; VBITS_GE_1024-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_1024-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; VBITS_GE_1024-NEXT:    ld1b { z0.d }, p0/z, [z1.d]
+; VBITS_GE_1024-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_1024-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_1024-NEXT:    uzp1 z0.b, z0.b, z0.b
+; VBITS_GE_1024-NEXT:    str q0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %cval = load <16 x i8>, <16 x i8>* %a
   %ptrs = load <16 x i8*>, <16 x i8*>* %b
   %mask = icmp eq <16 x i8> %cval, zeroinitializer
@@ -150,23 +156,24 @@ define void @masked_gather_v16i8(<16 x i8>* %a, <16 x i8*>* %b) #0 {
 }
 
 define void @masked_gather_v32i8(<32 x i8>* %a, <32 x i8*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v32i8:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].b, vl32
-; VBITS_GE_2048-NEXT: ld1b { [[VALS:z[0-9]+]].b }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: cmpeq [[CMP:p[0-9]+]].b, [[PG0]]/z, [[VALS]].b, #0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].b, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].h, [[MONE]].b
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].s, [[UPK1]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK3:z[0-9]+]].d, [[UPK2]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK3]].d, #0
-; VBITS_GE_2048-NEXT: ld1b { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; VBITS_GE_2048-NEXT: uzp1 [[UZP3:z[0-9]+]].b, [[UZP2]].b, [[UZP2]].b
-; VBITS_GE_2048-NEXT: st1b { [[UZP3]].b }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_v32i8:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.b, vl32
+; VBITS_GE_2048-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    cmpeq p2.b, p0/z, z0.b, #0
+; VBITS_GE_2048-NEXT:    mov z0.b, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.h, z0.b
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1b { z0.d }, p1/z, [z1.d]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_2048-NEXT:    uzp1 z0.b, z0.b, z0.b
+; VBITS_GE_2048-NEXT:    st1b { z0.b }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cval = load <32 x i8>, <32 x i8>* %a
   %ptrs = load <32 x i8*>, <32 x i8*>* %b
   %mask = icmp eq <32 x i8> %cval, zeroinitializer
@@ -181,20 +188,21 @@ define void @masked_gather_v32i8(<32 x i8>* %a, <32 x i8*>* %b) #0 {
 
 define void @masked_gather_v2i16(<2 x i16>* %a, <2 x i16*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v2i16:
-; CHECK: ldrh [[VALS_LO:w[0-9]+]], [x0]
-; CHECK-NEXT: ldrh [[VALS_HI:w[0-9]+]], [x0, #2]
-; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
-; CHECK-NEXT: fmov s[[VALS:[0-9]+]], [[VALS_LO]]
-; CHECK-NEXT: mov v[[VALS]].s[1], [[VALS_HI]]
-; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0
-; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[CMP]].2s, #0
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[SHL]].d, #0
-; CHECK-NEXT: ld1sh { z[[RES:[0-9]+]].d }, [[MASK]]/z, [z[[PTRS]].d]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].s, vl2
-; CHECK-NEXT: xtn v[[XTN:[0-9]+]].2s, v[[RES]].2d
-; CHECK-NEXT: st1h { z[[RES]].s }, [[PG0]], [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldrh w8, [x0]
+; CHECK-NEXT:    ldrh w9, [x0, #2]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fmov s1, w8
+; CHECK-NEXT:    mov v1.s[1], w9
+; CHECK-NEXT:    cmeq v1.2s, v1.2s, #0
+; CHECK-NEXT:    ushll v1.2d, v1.2s, #0
+; CHECK-NEXT:    cmpne p0.d, p0/z, z1.d, #0
+; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [z0.d]
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    st1h { z0.s }, p0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <2 x i16>, <2 x i16>* %a
   %ptrs = load <2 x i16*>, <2 x i16*>* %b
   %mask = icmp eq <2 x i16> %cval, zeroinitializer
@@ -205,18 +213,19 @@ define void @masked_gather_v2i16(<2 x i16>* %a, <2 x i16*>* %b) #0 {
 
 define void @masked_gather_v4i16(<4 x i16>* %a, <4 x i16*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v4i16:
-; CHECK: ldr d[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0
-; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
-; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK2]].d, #0
-; CHECK-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; CHECK-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; CHECK-NEXT: uzp1 z[[UZP2:[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; CHECK-NEXT: str d[[UZP2]], [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    cmeq v0.4h, v0.4h, #0
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [z1.d]
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <4 x i16>, <4 x i16>* %a
   %ptrs = load <4 x i16*>, <4 x i16*>* %b
   %mask = icmp eq <4 x i16> %cval, zeroinitializer
@@ -226,43 +235,46 @@ define void @masked_gather_v4i16(<4 x i16>* %a, <4 x i16*>* %b) #0 {
 }
 
 define void @masked_gather_v8i16(<8 x i16>* %a, <8 x i16*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v8i16:
-; VBITS_GE_512: ldr q[[VALS:[0-9]+]], [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: cmeq v[[CMP:[0-9]+]].8h, v[[VALS]].8h, #0
-; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
-; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK2]].d, #0
-; VBITS_GE_512-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_512-NEXT: uzp1 z[[UZP2:[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; VBITS_GE_512-NEXT: str q[[UZP2]], [x0]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ldr q[[VALS:[0-9]+]], [x0]
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_LO:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_HI:z[0-9]+]].d }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: cmeq v[[ZMSK:[0-9]+]].8h, v[[VALS]].8h, #0
-; VBITS_EQ_256-DAG: ext v[[ZEXT:[0-9]+]].16b, v[[ZMSK]].16b, v[[ZMSK]].16b, #8
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_LO:z[0-9]+]].s, z[[ZMSK]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK2_LO:z[0-9]+]].d, [[UPK1_LO]].s
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_HI:z[0-9]+]].s, z[[ZEXT]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK2_HI:z[0-9]+]].d, [[UPK1_HI]].s
-; VBITS_EQ_256-DAG: cmpne [[MASK_LO:p[0-9]+]].d, [[PG]]/z, [[UPK2_LO]].d, #0
-; VBITS_EQ_256-DAG: cmpne [[MASK_HI:p[0-9]+]].d, [[PG]]/z, [[UPK2_HI]].d, #0
-; VBITS_EQ_256-DAG: ld1h { [[RES_LO:z[0-9]+]].d }, [[MASK_LO]]/z, {{\[}}[[PTRS_LO]].d]
-; VBITS_EQ_256-DAG: ld1h { [[RES_HI:z[0-9]+]].d }, [[MASK_HI]]/z, {{\[}}[[PTRS_HI]].d]
-; VBITS_EQ_256-DAG: uzp1 [[UZP1_LO:z[0-9]+]].s, [[RES_LO]].s, [[RES_LO]].s
-; VBITS_EQ_256-DAG: uzp1 z[[UZP2_LO:[0-9]+]].h, [[UZP1_LO]].h, [[UZP1_LO]].h
-; VBITS_EQ_256-DAG: uzp1 [[UZP1_HI:z[0-9]+]].s, [[RES_HI]].s, [[RES_HI]].s
-; VBITS_EQ_256-DAG: uzp1 z[[UZP2_HI:[0-9]+]].h, [[UZP1_HI]].h, [[UZP1_HI]].h
-; VBITS_EQ_256-NEXT: mov v[[UZP2_LO]].d[1], v[[UZP2_HI]].d[0]
-; VBITS_EQ_256-NEXT: str q[[UZP2_LO]], [x0]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: masked_gather_v8i16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    ldr q0, [x0]
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    cmeq v0.8h, v0.8h, #0
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    uunpklo z3.s, z0.h
+; VBITS_EQ_256-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; VBITS_EQ_256-NEXT:    uunpklo z0.s, z0.h
+; VBITS_EQ_256-NEXT:    uunpklo z3.d, z3.s
+; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
+; VBITS_EQ_256-NEXT:    cmpne p1.d, p0/z, z3.d, #0
+; VBITS_EQ_256-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; VBITS_EQ_256-NEXT:    ld1h { z2.d }, p1/z, [z2.d]
+; VBITS_EQ_256-NEXT:    ld1h { z0.d }, p0/z, [z1.d]
+; VBITS_EQ_256-NEXT:    uzp1 z1.s, z2.s, z2.s
+; VBITS_EQ_256-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_EQ_256-NEXT:    uzp1 z1.h, z1.h, z1.h
+; VBITS_EQ_256-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_EQ_256-NEXT:    mov v1.d[1], v0.d[0]
+; VBITS_EQ_256-NEXT:    str q1, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: masked_gather_v8i16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr q0, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmeq v0.8h, v0.8h, #0
+; VBITS_GE_512-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1h { z0.d }, p0/z, [z1.d]
+; VBITS_GE_512-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_512-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_512-NEXT:    str q0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %cval = load <8 x i16>, <8 x i16>* %a
   %ptrs = load <8 x i16*>, <8 x i16*>* %b
   %mask = icmp eq <8 x i16> %cval, zeroinitializer
@@ -272,21 +284,22 @@ define void @masked_gather_v8i16(<8 x i16>* %a, <8 x i16*>* %b) #0 {
 }
 
 define void @masked_gather_v16i16(<16 x i16>* %a, <16 x i16*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v16i16:
-; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].h, vl16
-; VBITS_GE_1024-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_1024-NEXT: cmpeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0
-; VBITS_GE_1024-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_1024-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_1024-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_1024-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; VBITS_GE_1024-NEXT: st1h { [[UZP2]].h }, [[PG0]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_gather_v16i16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p1.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_1024-NEXT:    cmpeq p2.h, p0/z, z0.h, #0
+; VBITS_GE_1024-NEXT:    mov z0.h, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_1024-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_1024-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_1024-NEXT:    ld1h { z0.d }, p1/z, [z1.d]
+; VBITS_GE_1024-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_1024-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %cval = load <16 x i16>, <16 x i16>* %a
   %ptrs = load <16 x i16*>, <16 x i16*>* %b
   %mask = icmp eq <16 x i16> %cval, zeroinitializer
@@ -296,21 +309,22 @@ define void @masked_gather_v16i16(<16 x i16>* %a, <16 x i16*>* %b) #0 {
 }
 
 define void @masked_gather_v32i16(<32 x i16>* %a, <32 x i16*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v32i16:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: cmpeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_2048-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; VBITS_GE_2048-NEXT: st1h { [[UZP2]].h }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_v32i16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    cmpeq p2.h, p0/z, z0.h, #0
+; VBITS_GE_2048-NEXT:    mov z0.h, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1h { z0.d }, p1/z, [z1.d]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cval = load <32 x i16>, <32 x i16>* %a
   %ptrs = load <32 x i16*>, <32 x i16*>* %b
   %mask = icmp eq <32 x i16> %cval, zeroinitializer
@@ -325,16 +339,17 @@ define void @masked_gather_v32i16(<32 x i16>* %a, <32 x i16*>* %b) #0 {
 
 define void @masked_gather_v2i32(<2 x i32>* %a, <2 x i32*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v2i32:
-; CHECK: ldr d[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
-; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0
-; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[CMP]].2s, #0
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[SHL]].d, #0
-; CHECK-NEXT: ld1w { z[[RES:[0-9]+]].d }, [[MASK]]/z, [z[[PTRS]].d]
-; CHECK-NEXT: xtn v[[XTN:[0-9]+]].2s, v[[RES]].2d
-; CHECK-NEXT: str d[[XTN]], [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    cmeq v0.2s, v0.2s, #0
+; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
+; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [z1.d]
+; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <2 x i32>, <2 x i32>* %a
   %ptrs = load <2 x i32*>, <2 x i32*>* %b
   %mask = icmp eq <2 x i32> %cval, zeroinitializer
@@ -345,16 +360,17 @@ define void @masked_gather_v2i32(<2 x i32>* %a, <2 x i32*>* %b) #0 {
 
 define void @masked_gather_v4i32(<4 x i32>* %a, <4 x i32*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v4i32:
-; CHECK: ldr q[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4s, v[[VALS]].4s, #0
-; CHECK-NEXT: uunpklo [[UPK:z[0-9]+]].d, z[[CMP]].s
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK]].d, #0
-; CHECK-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; CHECK-NEXT: uzp1 z[[UZP:[0-9]+]].s, [[RES]].s, [[RES]].s
-; CHECK-NEXT: str q[[UZP]], [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    cmeq v0.4s, v0.4s, #0
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [z1.d]
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <4 x i32>, <4 x i32>* %a
   %ptrs = load <4 x i32*>, <4 x i32*>* %b
   %mask = icmp eq <4 x i32> %cval, zeroinitializer
@@ -364,44 +380,57 @@ define void @masked_gather_v4i32(<4 x i32>* %a, <4 x i32*>* %b) #0 {
 }
 
 define void @masked_gather_v8i32(<8 x i32>* %a, <8 x i32*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v8i32:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8
-; VBITS_GE_512-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0
-; VBITS_GE_512-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_512-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_512-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_512-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG0:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_LO:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_HI:z[0-9]+]].d }, [[PG1]]/z, [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: cmpeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0
-; VBITS_EQ_256-DAG: mov x8, sp
-; VBITS_EQ_256-DAG: mov [[MONE:z[0-9]+]].s, [[MASK]]/z, #-1
-; VBITS_EQ_256-DAG: st1w  { [[MONE]].s }, [[PG0]], [x8]
-; VBITS_EQ_256-DAG: ldr q[[CMP_HI:[0-9]+]], [sp, #16]
-; VBITS_EQ_256-DAG: uunpklo [[UPK_HI:z[0-9]+]].d, z[[CMP_HI]].s
-; VBITS_EQ_256-DAG: cmpne [[MASK_HI:p[0-9]+]].d, [[PG1]]/z, [[UPK_HI]].d, #0
-; VBITS_EQ_256-DAG: ld1w { [[RES_HI:z[0-9]+]].d }, [[MASK_HI]]/z, {{\[}}[[PTRS_HI]].d]
-; VBITS_EQ_256-DAG: ldr q[[CMP_LO:[0-9]+]], [sp]
-; VBITS_EQ_256-DAG: uzp1 [[UZP_HI:z[0-9]+]].s, [[RES_HI]].s, [[RES_HI]].s
-; VBITS_EQ_256-DAG: uunpklo [[UPK_LO:z[0-9]+]].d, z[[CMP_LO]].s
-; VBITS_EQ_256-DAG: cmpne [[MASK_LO:p[0-9]+]].d, [[PG1]]/z, [[UPK_LO]].d, #0
-; VBITS_EQ_256-DAG: ld1w { [[RES_LO:z[0-9]+]].d }, [[MASK_LO]]/z, {{\[}}[[PTRS_LO]].d]
-; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].s, vl4
-; VBITS_EQ_256-DAG: uzp1 [[UZP_LO:z[0-9]+]].s, [[RES_LO]].s, [[RES_LO]].s
-; VBITS_EQ_256-DAG: splice [[RES:z[0-9]+]].s, [[PG1]], [[RES_LO]].s, [[RES_HI]].s
-; VBITS_EQ_256-DAG: st1w { [[RES]].s }, [[PG0]], [x0]
+; VBITS_EQ_256-LABEL: masked_gather_v8i32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_EQ_256-NEXT:    sub x9, sp, #48
+; VBITS_EQ_256-NEXT:    mov x29, sp
+; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
+; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
+; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p1.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p1/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    cmpeq p2.s, p0/z, z0.s, #0
+; VBITS_EQ_256-NEXT:    mov x8, sp
+; VBITS_EQ_256-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p1/z, [x1]
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_EQ_256-NEXT:    ldr q0, [sp, #16]
+; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
+; VBITS_EQ_256-NEXT:    cmpne p2.d, p1/z, z0.d, #0
+; VBITS_EQ_256-NEXT:    ld1w { z0.d }, p2/z, [z1.d]
+; VBITS_EQ_256-NEXT:    ldr q1, [sp]
+; VBITS_EQ_256-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_EQ_256-NEXT:    uunpklo z1.d, z1.s
+; VBITS_EQ_256-NEXT:    cmpne p1.d, p1/z, z1.d, #0
+; VBITS_EQ_256-NEXT:    ld1w { z1.d }, p1/z, [z2.d]
+; VBITS_EQ_256-NEXT:    ptrue p1.s, vl4
+; VBITS_EQ_256-NEXT:    uzp1 z1.s, z1.s, z1.s
+; VBITS_EQ_256-NEXT:    splice z1.s, p1, z1.s, z0.s
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_EQ_256-NEXT:    mov sp, x29
+; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: masked_gather_v8i32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p1.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p2.s, p0/z, z0.s, #0
+; VBITS_GE_512-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1w { z0.d }, p1/z, [z1.d]
+; VBITS_GE_512-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %cval = load <8 x i32>, <8 x i32>* %a
   %ptrs = load <8 x i32*>, <8 x i32*>* %b
   %mask = icmp eq <8 x i32> %cval, zeroinitializer
@@ -411,19 +440,20 @@ define void @masked_gather_v8i32(<8 x i32>* %a, <8 x i32*>* %b) #0 {
 }
 
 define void @masked_gather_v16i32(<16 x i32>* %a, <16 x i32*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v16i32:
-; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].s, vl16
-; VBITS_GE_1024-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_1024-NEXT: cmpeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0
-; VBITS_GE_1024-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_1024-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_1024-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_1024-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_1024-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_gather_v16i32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p1.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_1024-NEXT:    cmpeq p2.s, p0/z, z0.s, #0
+; VBITS_GE_1024-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_1024-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_1024-NEXT:    ld1w { z0.d }, p1/z, [z1.d]
+; VBITS_GE_1024-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %cval = load <16 x i32>, <16 x i32>* %a
   %ptrs = load <16 x i32*>, <16 x i32*>* %b
   %mask = icmp eq <16 x i32> %cval, zeroinitializer
@@ -433,19 +463,20 @@ define void @masked_gather_v16i32(<16 x i32>* %a, <16 x i32*>* %b) #0 {
 }
 
 define void @masked_gather_v32i32(<32 x i32>* %a, <32 x i32*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v32i32:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: cmpeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_v32i32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    cmpeq p2.s, p0/z, z0.s, #0
+; VBITS_GE_2048-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1w { z0.d }, p1/z, [z1.d]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cval = load <32 x i32>, <32 x i32>* %a
   %ptrs = load <32 x i32*>, <32 x i32*>* %b
   %mask = icmp eq <32 x i32> %cval, zeroinitializer
@@ -461,7 +492,18 @@ define void @masked_gather_v32i32(<32 x i32>* %a, <32 x i32*>* %b) #0 {
 ; Scalarize 1 x i64 gathers
 define void @masked_gather_v1i64(<1 x i64>* %a, <1 x i64*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v1i64:
-; CHECK-NOT: ptrue
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    // implicit-def: $d0
+; CHECK-NEXT:    cbnz x8, .LBB15_2
+; CHECK-NEXT:  // %bb.1: // %cond.load
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    ldr d0, [x8]
+; CHECK-NEXT:  .LBB15_2: // %else
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <1 x i64>, <1 x i64>* %a
   %ptrs = load <1 x i64*>, <1 x i64*>* %b
   %mask = icmp eq <1 x i64> %cval, zeroinitializer
@@ -472,14 +514,15 @@ define void @masked_gather_v1i64(<1 x i64>* %a, <1 x i64*>* %b) #0 {
 
 define void @masked_gather_v2i64(<2 x i64>* %a, <2 x i64*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v2i64:
-; CHECK: ldr q[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
-; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2d, v[[VALS]].2d, #0
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[CMP]].d, #0
-; CHECK-NEXT: ld1d { z[[RES:[0-9]+]].d }, [[MASK]]/z, [z[[PTRS]].d]
-; CHECK-NEXT: str q[[RES]], [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    cmeq v0.2d, v0.2d, #0
+; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [z1.d]
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <2 x i64>, <2 x i64>* %a
   %ptrs = load <2 x i64*>, <2 x i64*>* %b
   %mask = icmp eq <2 x i64> %cval, zeroinitializer
@@ -490,13 +533,14 @@ define void @masked_gather_v2i64(<2 x i64>* %a, <2 x i64*>* %b) #0 {
 
 define void @masked_gather_v4i64(<4 x i64>* %a, <4 x i64*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v4i64:
-; CHECK: ptrue [[PG0:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; CHECK-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0
-; CHECK-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; CHECK-NEXT: st1d { [[RES]].d }, [[PG0]], [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; CHECK-NEXT:    ld1d { z0.d }, p1/z, [z1.d]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <4 x i64>, <4 x i64>* %a
   %ptrs = load <4 x i64*>, <4 x i64*>* %b
   %mask = icmp eq <4 x i64> %cval, zeroinitializer
@@ -506,29 +550,33 @@ define void @masked_gather_v4i64(<4 x i64>* %a, <4 x i64*>* %b) #0 {
 }
 
 define void @masked_gather_v8i64(<8 x i64>* %a, <8 x i64*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v8i64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0
-; VBITS_GE_512-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG0]], [x0]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG0:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[VALS_LO:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1d { [[VALS_HI:z[0-9]+]].d }, [[PG0]]/z, [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_LO:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_HI:z[0-9]+]].d }, [[PG0]]/z, [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: cmpeq [[MASK_LO:p[0-9]+]].d, [[PG0]]/z, [[VALS_LO]].d, #0
-; VBITS_EQ_256-DAG: cmpeq [[MASK_HI:p[0-9]+]].d, [[PG0]]/z, [[VALS_HI]].d, #0
-; VBITS_EQ_256-DAG: ld1d { [[RES_LO:z[0-9]+]].d }, [[MASK_LO]]/z, {{\[}}[[PTRS_LO]].d]
-; VBITS_EQ_256-DAG: ld1d { [[RES_HI:z[0-9]+]].d }, [[MASK_HI]]/z, {{\[}}[[PTRS_HI]].d]
-; VBITS_EQ_256-DAG: st1d { [[RES_LO]].d }, [[PG0]], [x0]
-; VBITS_EQ_256-DAG: st1d { [[RES_HI]].d }, [[PG0]], [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: masked_gather_v8i64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; VBITS_EQ_256-NEXT:    cmpeq p2.d, p0/z, z1.d, #0
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p1/z, [z2.d]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p2/z, [z3.d]
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: masked_gather_v8i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p1/z, [z1.d]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
+
   %cval = load <8 x i64>, <8 x i64>* %a
   %ptrs = load <8 x i64*>, <8 x i64*>* %b
   %mask = icmp eq <8 x i64> %cval, zeroinitializer
@@ -538,14 +586,15 @@ define void @masked_gather_v8i64(<8 x i64>* %a, <8 x i64*>* %b) #0 {
 }
 
 define void @masked_gather_v16i64(<16 x i64>* %a, <16 x i64*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v16i64:
-; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_1024-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0
-; VBITS_GE_1024-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG0]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_gather_v16i64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p1/z, [z1.d]
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %cval = load <16 x i64>, <16 x i64>* %a
   %ptrs = load <16 x i64*>, <16 x i64*>* %b
   %mask = icmp eq <16 x i64> %cval, zeroinitializer
@@ -555,14 +604,15 @@ define void @masked_gather_v16i64(<16 x i64>* %a, <16 x i64*>* %b) #0 {
 }
 
 define void @masked_gather_v32i64(<32 x i64>* %a, <32 x i64*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v32i64:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_2048-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0
-; VBITS_GE_2048-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_v32i64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p1/z, [z1.d]
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cval = load <32 x i64>, <32 x i64>* %a
   %ptrs = load <32 x i64*>, <32 x i64*>* %b
   %mask = icmp eq <32 x i64> %cval, zeroinitializer
@@ -577,31 +627,32 @@ define void @masked_gather_v32i64(<32 x i64>* %a, <32 x i64*>* %b) #0 {
 
 define void @masked_gather_v2f16(<2 x half>* %a, <2 x half*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v2f16:
-; CHECK: ldr s[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: movi d[[ZERO:[0-9]+]], #0000000000000000
-; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl4
-; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0.0
-; CHECK-NEXT: umov w8, v[[CMP]].h[0]
-; CHECK-NEXT: umov w9, v[[CMP]].h[1]
-; CHECK-NEXT: fmov s[[CMP]], w8
-; CHECK-NEXT: mov v[[CMP]].s[1], w9
-; CHECK-NEXT: shl v[[CMP]].2s, v[[CMP]].2s, #16
-; CHECK-NEXT: sshr v[[CMP]].2s, v[[CMP]].2s, #16
-; CHECK-NEXT: fmov w9, s[[CMP]]
-; CHECK-NEXT: mov w8, v[[CMP]].s[1]
-; CHECK-NEXT: mov v[[NCMP:[0-9]+]].h[0], w9
-; CHECK-NEXT: mov v[[NCMP]].h[1], w8
-; CHECK-NEXT: shl v[[SHL:[0-9]+]].4h, v[[NCMP]].4h, #15
-; CHECK-NEXT: sshr v[[SHL]].4h, v[[SHL]].4h, #15
-; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[SHL]].h
-; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, [[UPK2]].d, #0
-; CHECK-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, [z[[PTRS]].d]
-; CHECK-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; CHECK-NEXT: uzp1 z[[UZP2:[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; CHECK-NEXT: str s[[UZP2]], [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    movi d2, #0000000000000000
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    fcmeq v0.4h, v0.4h, #0.0
+; CHECK-NEXT:    umov w8, v0.h[0]
+; CHECK-NEXT:    umov w9, v0.h[1]
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    mov v0.s[1], w9
+; CHECK-NEXT:    shl v0.2s, v0.2s, #16
+; CHECK-NEXT:    sshr v0.2s, v0.2s, #16
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    mov v2.h[0], w9
+; CHECK-NEXT:    mov v2.h[1], w8
+; CHECK-NEXT:    shl v0.4h, v2.4h, #15
+; CHECK-NEXT:    sshr v0.4h, v0.4h, #15
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [z1.d]
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    str s0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <2 x half>, <2 x half>* %a
   %ptrs = load <2 x half*>, <2 x half*>* %b
   %mask = fcmp oeq <2 x half> %cval, zeroinitializer
@@ -612,18 +663,19 @@ define void @masked_gather_v2f16(<2 x half>* %a, <2 x half*>* %b) #0 {
 
 define void @masked_gather_v4f16(<4 x half>* %a, <4 x half*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v4f16:
-; CHECK: ldr d[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0
-; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
-; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK2]].d, #0
-; CHECK-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; CHECK-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; CHECK-NEXT: uzp1 z[[UZP2:[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; CHECK-NEXT: str d[[UZP2]], [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq v0.4h, v0.4h, #0.0
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [z1.d]
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <4 x half>, <4 x half>* %a
   %ptrs = load <4 x half*>, <4 x half*>* %b
   %mask = fcmp oeq <4 x half> %cval, zeroinitializer
@@ -633,19 +685,20 @@ define void @masked_gather_v4f16(<4 x half>* %a, <4 x half*>* %b) #0 {
 }
 
 define void @masked_gather_v8f16(<8 x half>* %a, <8 x half*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v8f16:
-; VBITS_GE_512: ldr q[[VALS:[0-9]+]], [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: fcmeq v[[CMP:[0-9]+]].8h, v[[VALS]].8h, #0
-; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
-; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK2]].d, #0
-; VBITS_GE_512-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_512-NEXT: uzp1 z[[UZP2:[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; VBITS_GE_512-NEXT: str q[[UZP2]], [x0]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_gather_v8f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr q0, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    fcmeq v0.8h, v0.8h, #0.0
+; VBITS_GE_512-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1h { z0.d }, p0/z, [z1.d]
+; VBITS_GE_512-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_512-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_512-NEXT:    str q0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %cval = load <8 x half>, <8 x half>* %a
   %ptrs = load <8 x half*>, <8 x half*>* %b
   %mask = fcmp oeq <8 x half> %cval, zeroinitializer
@@ -655,21 +708,22 @@ define void @masked_gather_v8f16(<8 x half>* %a, <8 x half*>* %b) #0 {
 }
 
 define void @masked_gather_v16f16(<16 x half>* %a, <16 x half*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v16f16:
-; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].h, vl16
-; VBITS_GE_1024-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_1024-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
-; VBITS_GE_1024-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_1024-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_1024-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_1024-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; VBITS_GE_1024-NEXT: st1h { [[UZP2]].h }, [[PG0]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_gather_v16f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p1.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_1024-NEXT:    fcmeq p2.h, p0/z, z0.h, #0.0
+; VBITS_GE_1024-NEXT:    mov z0.h, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_1024-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_1024-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_1024-NEXT:    ld1h { z0.d }, p1/z, [z1.d]
+; VBITS_GE_1024-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_1024-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %cval = load <16 x half>, <16 x half>* %a
   %ptrs = load <16 x half*>, <16 x half*>* %b
   %mask = fcmp oeq <16 x half> %cval, zeroinitializer
@@ -679,21 +733,22 @@ define void @masked_gather_v16f16(<16 x half>* %a, <16 x half*>* %b) #0 {
 }
 
 define void @masked_gather_v32f16(<32 x half>* %a, <32 x half*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v32f16:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_2048-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; VBITS_GE_2048-NEXT: st1h { [[UZP2]].h }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_v32f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p2.h, p0/z, z0.h, #0.0
+; VBITS_GE_2048-NEXT:    mov z0.h, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1h { z0.d }, p1/z, [z1.d]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cval = load <32 x half>, <32 x half>* %a
   %ptrs = load <32 x half*>, <32 x half*>* %b
   %mask = fcmp oeq <32 x half> %cval, zeroinitializer
@@ -708,16 +763,17 @@ define void @masked_gather_v32f16(<32 x half>* %a, <32 x half*>* %b) #0 {
 
 define void @masked_gather_v2f32(<2 x float>* %a, <2 x float*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v2f32:
-; CHECK: ldr d[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
-; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0
-; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[CMP]].2s, #0
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[SHL]].d, #0
-; CHECK-NEXT: ld1w { z[[RES:[0-9]+]].d }, [[MASK]]/z, [z[[PTRS]].d]
-; CHECK-NEXT: xtn v[[XTN:[0-9]+]].2s, v[[RES]].2d
-; CHECK-NEXT: str d[[XTN]], [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcmeq v0.2s, v0.2s, #0.0
+; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
+; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [z1.d]
+; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <2 x float>, <2 x float>* %a
   %ptrs = load <2 x float*>, <2 x float*>* %b
   %mask = fcmp oeq <2 x float> %cval, zeroinitializer
@@ -728,16 +784,17 @@ define void @masked_gather_v2f32(<2 x float>* %a, <2 x float*>* %b) #0 {
 
 define void @masked_gather_v4f32(<4 x float>* %a, <4 x float*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v4f32:
-; CHECK: ldr q[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4s, v[[VALS]].4s, #0
-; CHECK-NEXT: uunpklo [[UPK:z[0-9]+]].d, z[[CMP]].s
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK]].d, #0
-; CHECK-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; CHECK-NEXT: uzp1 z[[UZP:[0-9]+]].s, [[RES]].s, [[RES]].s
-; CHECK-NEXT: str q[[UZP]], [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq v0.4s, v0.4s, #0.0
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [z1.d]
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <4 x float>, <4 x float>* %a
   %ptrs = load <4 x float*>, <4 x float*>* %b
   %mask = fcmp oeq <4 x float> %cval, zeroinitializer
@@ -747,19 +804,20 @@ define void @masked_gather_v4f32(<4 x float>* %a, <4 x float*>* %b) #0 {
 }
 
 define void @masked_gather_v8f32(<8 x float>* %a, <8 x float*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v8f32:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8
-; VBITS_GE_512-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_512-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_512-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_512-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_512-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_512-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_gather_v8f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p1.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_512-NEXT:    fcmeq p2.s, p0/z, z0.s, #0.0
+; VBITS_GE_512-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1w { z0.d }, p1/z, [z1.d]
+; VBITS_GE_512-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %cval = load <8 x float>, <8 x float>* %a
   %ptrs = load <8 x float*>, <8 x float*>* %b
   %mask = fcmp oeq <8 x float> %cval, zeroinitializer
@@ -769,19 +827,20 @@ define void @masked_gather_v8f32(<8 x float>* %a, <8 x float*>* %b) #0 {
 }
 
 define void @masked_gather_v16f32(<16 x float>* %a, <16 x float*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v16f32:
-; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].s, vl16
-; VBITS_GE_1024-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_1024-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_1024-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_1024-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_1024-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_1024-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_1024-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_gather_v16f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p1.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_1024-NEXT:    fcmeq p2.s, p0/z, z0.s, #0.0
+; VBITS_GE_1024-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_1024-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_1024-NEXT:    ld1w { z0.d }, p1/z, [z1.d]
+; VBITS_GE_1024-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %cval = load <16 x float>, <16 x float>* %a
   %ptrs = load <16 x float*>, <16 x float*>* %b
   %mask = fcmp oeq <16 x float> %cval, zeroinitializer
@@ -791,19 +850,20 @@ define void @masked_gather_v16f32(<16 x float>* %a, <16 x float*>* %b) #0 {
 }
 
 define void @masked_gather_v32f32(<32 x float>* %a, <32 x float*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v32f32:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_v32f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p2.s, p0/z, z0.s, #0.0
+; VBITS_GE_2048-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1w { z0.d }, p1/z, [z1.d]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cval = load <32 x float>, <32 x float>* %a
   %ptrs = load <32 x float*>, <32 x float*>* %b
   %mask = fcmp oeq <32 x float> %cval, zeroinitializer
@@ -819,7 +879,18 @@ define void @masked_gather_v32f32(<32 x float>* %a, <32 x float*>* %b) #0 {
 ; Scalarize 1 x double gathers
 define void @masked_gather_v1f64(<1 x double>* %a, <1 x double*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v1f64:
-; CHECK-NOT: ptrue
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    fcmp d0, #0.0
+; CHECK-NEXT:    // implicit-def: $d0
+; CHECK-NEXT:    b.ne .LBB31_2
+; CHECK-NEXT:  // %bb.1: // %cond.load
+; CHECK-NEXT:    ldr d0, [x1]
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    ldr d0, [x8]
+; CHECK-NEXT:  .LBB31_2: // %else
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <1 x double>, <1 x double>* %a
   %ptrs = load <1 x double*>, <1 x double*>* %b
   %mask = fcmp oeq <1 x double> %cval, zeroinitializer
@@ -830,14 +901,15 @@ define void @masked_gather_v1f64(<1 x double>* %a, <1 x double*>* %b) #0 {
 
 define void @masked_gather_v2f64(<2 x double>* %a, <2 x double*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v2f64:
-; CHECK: ldr q[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
-; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].2d, v[[VALS]].2d, #0
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[CMP]].d, #0
-; CHECK-NEXT: ld1d { z[[RES:[0-9]+]].d }, [[MASK]]/z, [z[[PTRS]].d]
-; CHECK-NEXT: str q[[RES]], [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcmeq v0.2d, v0.2d, #0.0
+; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [z1.d]
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <2 x double>, <2 x double>* %a
   %ptrs = load <2 x double*>, <2 x double*>* %b
   %mask = fcmp oeq <2 x double> %cval, zeroinitializer
@@ -848,13 +920,14 @@ define void @masked_gather_v2f64(<2 x double>* %a, <2 x double*>* %b) #0 {
 
 define void @masked_gather_v4f64(<4 x double>* %a, <4 x double*>* %b) #0 {
 ; CHECK-LABEL: masked_gather_v4f64:
-; CHECK: ptrue [[PG0:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; CHECK-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
-; CHECK-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; CHECK-NEXT: st1d { [[RES]].d }, [[PG0]], [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq p1.d, p0/z, z0.d, #0.0
+; CHECK-NEXT:    ld1d { z0.d }, p1/z, [z1.d]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    ret
   %cval = load <4 x double>, <4 x double>* %a
   %ptrs = load <4 x double*>, <4 x double*>* %b
   %mask = fcmp oeq <4 x double> %cval, zeroinitializer
@@ -864,14 +937,15 @@ define void @masked_gather_v4f64(<4 x double>* %a, <4 x double*>* %b) #0 {
 }
 
 define void @masked_gather_v8f64(<8 x double>* %a, <8 x double*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v8f64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
-; VBITS_GE_512-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG0]], [x0]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_gather_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    fcmeq p1.d, p0/z, z0.d, #0.0
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p1/z, [z1.d]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %cval = load <8 x double>, <8 x double>* %a
   %ptrs = load <8 x double*>, <8 x double*>* %b
   %mask = fcmp oeq <8 x double> %cval, zeroinitializer
@@ -881,14 +955,15 @@ define void @masked_gather_v8f64(<8 x double>* %a, <8 x double*>* %b) #0 {
 }
 
 define void @masked_gather_v16f64(<16 x double>* %a, <16 x double*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v16f64:
-; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_1024-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
-; VBITS_GE_1024-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG0]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_gather_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    fcmeq p1.d, p0/z, z0.d, #0.0
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p1/z, [z1.d]
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %cval = load <16 x double>, <16 x double>* %a
   %ptrs = load <16 x double*>, <16 x double*>* %b
   %mask = fcmp oeq <16 x double> %cval, zeroinitializer
@@ -898,14 +973,15 @@ define void @masked_gather_v16f64(<16 x double>* %a, <16 x double*>* %b) #0 {
 }
 
 define void @masked_gather_v32f64(<32 x double>* %a, <32 x double*>* %b) #0 {
-; CHECK-LABEL: masked_gather_v32f64:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
-; VBITS_GE_2048-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p1.d, p0/z, z0.d, #0.0
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p1/z, [z1.d]
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cval = load <32 x double>, <32 x double>* %a
   %ptrs = load <32 x double*>, <32 x double*>* %b
   %mask = fcmp oeq <32 x double> %cval, zeroinitializer
@@ -919,21 +995,22 @@ define void @masked_gather_v32f64(<32 x double>* %a, <32 x double*>* %b) #0 {
 
 ; NOTE: This produces an non-optimal addressing mode due to a temporary workaround
 define void @masked_gather_32b_scaled_sext_f16(<32 x half>* %a, <32 x i32>* %b, half* %base) #0 {
-; CHECK-LABEL: masked_gather_32b_scaled_sext_f16:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1sw { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_2048-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, [x2, [[PTRS]].d, lsl #1]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; VBITS_GE_2048-NEXT: st1h { [[UZP2]].h }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_32b_scaled_sext_f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1sw { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p2.h, p0/z, z0.h, #0.0
+; VBITS_GE_2048-NEXT:    mov z0.h, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1h { z0.d }, p1/z, [x2, z1.d, lsl #1]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cvals = load <32 x half>, <32 x half>* %a
   %idxs = load <32 x i32>, <32 x i32>* %b
   %ext = sext <32 x i32> %idxs to <32 x i64>
@@ -946,19 +1023,20 @@ define void @masked_gather_32b_scaled_sext_f16(<32 x half>* %a, <32 x i32>* %b,
 
 ; NOTE: This produces an non-optimal addressing mode due to a temporary workaround
 define void @masked_gather_32b_scaled_sext_f32(<32 x float>* %a, <32 x i32>* %b, float* %base) #0 {
-; CHECK-LABEL: masked_gather_32b_scaled_sext_f32:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1sw { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, [x2, [[PTRS]].d, lsl #2]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_32b_scaled_sext_f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1sw { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p2.s, p0/z, z0.s, #0.0
+; VBITS_GE_2048-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1w { z0.d }, p1/z, [x2, z1.d, lsl #2]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cvals = load <32 x float>, <32 x float>* %a
   %idxs = load <32 x i32>, <32 x i32>* %b
   %ext = sext <32 x i32> %idxs to <32 x i64>
@@ -971,14 +1049,15 @@ define void @masked_gather_32b_scaled_sext_f32(<32 x float>* %a, <32 x i32>* %b,
 
 ; NOTE: This produces an non-optimal addressing mode due to a temporary workaround
 define void @masked_gather_32b_scaled_sext_f64(<32 x double>* %a, <32 x i32>* %b, double* %base) #0 {
-; CHECK-LABEL: masked_gather_32b_scaled_sext_f64:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1sw { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG]]/z, [[VALS]].d, #0.0
-; VBITS_GE_2048-NEXT: ld1d { [[RES:z[0-9]+]].d }, [[MASK]]/z, [x2, [[PTRS]].d, lsl #3]
-; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_32b_scaled_sext_f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1sw { z1.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p1.d, p0/z, z0.d, #0.0
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p1/z, [x2, z1.d, lsl #3]
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cvals = load <32 x double>, <32 x double>* %a
   %idxs = load <32 x i32>, <32 x i32>* %b
   %ext = sext <32 x i32> %idxs to <32 x i64>
@@ -991,21 +1070,22 @@ define void @masked_gather_32b_scaled_sext_f64(<32 x double>* %a, <32 x i32>* %b
 
 ; NOTE: This produces an non-optimal addressing mode due to a temporary workaround
 define void @masked_gather_32b_scaled_zext(<32 x half>* %a, <32 x i32>* %b, half* %base) #0 {
-; CHECK-LABEL: masked_gather_32b_scaled_zext:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_2048-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, [x2, [[PTRS]].d, lsl #1]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; VBITS_GE_2048-NEXT: st1h { [[UZP2]].h }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_32b_scaled_zext:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p2.h, p0/z, z0.h, #0.0
+; VBITS_GE_2048-NEXT:    mov z0.h, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1h { z0.d }, p1/z, [x2, z1.d, lsl #1]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cvals = load <32 x half>, <32 x half>* %a
   %idxs = load <32 x i32>, <32 x i32>* %b
   %ext = zext <32 x i32> %idxs to <32 x i64>
@@ -1018,21 +1098,22 @@ define void @masked_gather_32b_scaled_zext(<32 x half>* %a, <32 x i32>* %b, half
 
 ; NOTE: This produces an non-optimal addressing mode due to a temporary workaround
 define void @masked_gather_32b_unscaled_sext(<32 x half>* %a, <32 x i32>* %b, i8* %base) #0 {
-; CHECK-LABEL: masked_gather_32b_unscaled_sext:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1sw { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_2048-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, [x2, [[PTRS]].d]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; VBITS_GE_2048-NEXT: st1h { [[UZP2]].h }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_32b_unscaled_sext:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1sw { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p2.h, p0/z, z0.h, #0.0
+; VBITS_GE_2048-NEXT:    mov z0.h, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1h { z0.d }, p1/z, [x2, z1.d]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cvals = load <32 x half>, <32 x half>* %a
   %idxs = load <32 x i32>, <32 x i32>* %b
   %ext = sext <32 x i32> %idxs to <32 x i64>
@@ -1046,21 +1127,22 @@ define void @masked_gather_32b_unscaled_sext(<32 x half>* %a, <32 x i32>* %b, i8
 
 ; NOTE: This produces an non-optimal addressing mode due to a temporary workaround
 define void @masked_gather_32b_unscaled_zext(<32 x half>* %a, <32 x i32>* %b, i8* %base) #0 {
-; CHECK-LABEL: masked_gather_32b_unscaled_zext:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_2048-NEXT: ld1h { [[RES:z[0-9]+]].d }, [[MASK]]/z, [x2, [[PTRS]].d]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP1:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: uzp1 [[UZP2:z[0-9]+]].h, [[UZP1]].h, [[UZP1]].h
-; VBITS_GE_2048-NEXT: st1h { [[UZP2]].h }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_32b_unscaled_zext:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p2.h, p0/z, z0.h, #0.0
+; VBITS_GE_2048-NEXT:    mov z0.h, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1h { z0.d }, p1/z, [x2, z1.d]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cvals = load <32 x half>, <32 x half>* %a
   %idxs = load <32 x i32>, <32 x i32>* %b
   %ext = zext <32 x i32> %idxs to <32 x i64>
@@ -1073,19 +1155,20 @@ define void @masked_gather_32b_unscaled_zext(<32 x half>* %a, <32 x i32>* %b, i8
 }
 
 define void @masked_gather_64b_scaled(<32 x float>* %a, <32 x i64>* %b, float* %base) #0 {
-; CHECK-LABEL: masked_gather_64b_scaled:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, [x2, [[PTRS]].d, lsl #2]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_64b_scaled:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p2.s, p0/z, z0.s, #0.0
+; VBITS_GE_2048-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1w { z0.d }, p1/z, [x2, z1.d, lsl #2]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cvals = load <32 x float>, <32 x float>* %a
   %idxs = load <32 x i64>, <32 x i64>* %b
   %ptrs = getelementptr float, float* %base, <32 x i64> %idxs
@@ -1096,19 +1179,20 @@ define void @masked_gather_64b_scaled(<32 x float>* %a, <32 x i64>* %b, float* %
 }
 
 define void @masked_gather_64b_unscaled(<32 x float>* %a, <32 x i64>* %b, i8* %base) #0 {
-; CHECK-LABEL: masked_gather_64b_unscaled:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, [x2, [[PTRS]].d]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_64b_unscaled:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p2.s, p0/z, z0.s, #0.0
+; VBITS_GE_2048-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1w { z0.d }, p1/z, [x2, z1.d]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cvals = load <32 x float>, <32 x float>* %a
   %idxs = load <32 x i64>, <32 x i64>* %b
   %byte_ptrs = getelementptr i8, i8* %base, <32 x i64> %idxs
@@ -1121,21 +1205,22 @@ define void @masked_gather_64b_unscaled(<32 x float>* %a, <32 x i64>* %b, i8* %b
 
 ; FIXME: This case does not yet codegen well due to deficiencies in opcode selection
 define void @masked_gather_vec_plus_reg(<32 x float>* %a, <32 x i8*>* %b, i64 %off) #0 {
-; CHECK-LABEL: masked_gather_vec_plus_reg:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: mov [[OFF:z[0-9]+]].d, x2
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: add [[PTRS_ADD:z[0-9]+]].d, [[PG1]]/m, [[PTRS]].d, [[OFF]].d
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS_ADD]].d]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_vec_plus_reg:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    mov z2.d, x2
+; VBITS_GE_2048-NEXT:    fcmeq p2.s, p0/z, z0.s, #0.0
+; VBITS_GE_2048-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    add z1.d, p1/m, z1.d, z2.d
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1w { z0.d }, p1/z, [z1.d]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cvals = load <32 x float>, <32 x float>* %a
   %bases = load <32 x i8*>, <32 x i8*>* %b
   %byte_ptrs = getelementptr i8, <32 x i8*> %bases, i64 %off
@@ -1148,21 +1233,22 @@ define void @masked_gather_vec_plus_reg(<32 x float>* %a, <32 x i8*>* %b, i64 %o
 
 ; FIXME: This case does not yet codegen well due to deficiencies in opcode selection
 define void @masked_gather_vec_plus_imm(<32 x float>* %a, <32 x i8*>* %b) #0 {
-; CHECK-LABEL: masked_gather_vec_plus_imm:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: mov [[OFF:z[0-9]+]].d, #4
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: add [[PTRS_ADD:z[0-9]+]].d, [[PG1]]/m, [[PTRS]].d, [[OFF]].d
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS_ADD]].d]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_vec_plus_imm:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    mov z2.d, #4 // =0x4
+; VBITS_GE_2048-NEXT:    fcmeq p2.s, p0/z, z0.s, #0.0
+; VBITS_GE_2048-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    add z1.d, p1/m, z1.d, z2.d
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1w { z0.d }, p1/z, [z1.d]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cvals = load <32 x float>, <32 x float>* %a
   %bases = load <32 x i8*>, <32 x i8*>* %b
   %byte_ptrs = getelementptr i8, <32 x i8*> %bases, i64 4
@@ -1174,21 +1260,22 @@ define void @masked_gather_vec_plus_imm(<32 x float>* %a, <32 x i8*>* %b) #0 {
 }
 
 define void @masked_gather_passthru(<32 x float>* %a, <32 x float*>* %b, <32 x float>* %c) #0 {
-; CHECK-LABEL: masked_gather_passthru:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: ld1w { [[PT:z[0-9]+]].s }, [[PG0]]/z, [x2]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: sel [[SEL:z[0-9]+]].s, [[CMP]], [[UZP]].s, [[PT]].s
-; VBITS_GE_2048-NEXT: st1w { [[SEL]].s }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_passthru:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    ld1w { z2.s }, p0/z, [x2]
+; VBITS_GE_2048-NEXT:    fcmeq p2.s, p0/z, z0.s, #0.0
+; VBITS_GE_2048-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1w { z0.d }, p1/z, [z1.d]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    sel z0.s, p2, z0.s, z2.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cvals = load <32 x float>, <32 x float>* %a
   %ptrs = load <32 x float*>, <32 x float*>* %b
   %passthru = load <32 x float>, <32 x float>* %c
@@ -1199,19 +1286,20 @@ define void @masked_gather_passthru(<32 x float>* %a, <32 x float*>* %b, <32 x f
 }
 
 define void @masked_gather_passthru_0(<32 x float>* %a, <32 x float*>* %b) #0 {
-; CHECK-LABEL: masked_gather_passthru_0:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: ld1w { [[RES:z[0-9]+]].d }, [[MASK]]/z, {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: uzp1 [[UZP:z[0-9]+]].s, [[RES]].s, [[RES]].s
-; VBITS_GE_2048-NEXT: st1w { [[UZP]].s }, [[PG0]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_gather_passthru_0:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p2.s, p0/z, z0.s, #0.0
+; VBITS_GE_2048-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    ld1w { z0.d }, p1/z, [z1.d]
+; VBITS_GE_2048-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %cvals = load <32 x float>, <32 x float>* %a
   %ptrs = load <32 x float*>, <32 x float*>* %b
   %mask = fcmp oeq <32 x float> %cvals, zeroinitializer

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll
index 6c361382f7e84..6ff9f3bc1094e 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll
@@ -1,19 +1,19 @@
-; RUN: llc -aarch64-sve-vector-bits-min=128  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=16  -check-prefix=NO_SVE
-; RUN: llc -aarch64-sve-vector-bits-min=256  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=32  -check-prefixes=CHECK
-; RUN: llc -aarch64-sve-vector-bits-min=384  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=32  -check-prefixes=CHECK
-; RUN: llc -aarch64-sve-vector-bits-min=512  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=640  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=768  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=896  -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_2048,VBITS_GE_1024,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=128  < %s | FileCheck %s -D#VBYTES=16  -check-prefix=NO_SVE
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -D#VBYTES=32  -check-prefixes=CHECK
+; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -D#VBYTES=32  -check-prefixes=CHECK
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -25,26 +25,28 @@ target triple = "aarch64-unknown-linux-gnu"
 ;
 define <2 x half> @masked_load_v2f16(<2 x half>* %ap, <2 x half>* %bp) #0 {
 ; CHECK-LABEL: masked_load_v2f16:
-; CHECK: ldr s[[N0:[0-9]+]], [x0]
-; CHECK-NEXT: ldr s[[N1:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].h, vl4
-; CHECK-NEXT: fcmeq v[[N2:[0-9]+]].4h, v[[N0]].4h, v[[N1]].4h
-; CHECK-NEXT: umov [[W0:w[0-9]+]], v[[N2]].h[0]
-; CHECK-NEXT: umov [[W1:w[0-9]+]], v[[N2]].h[1]
-; CHECK-NEXT: fmov s[[V0:[0-9]+]], [[W0]]
-; CHECK-NEXT: mov v[[V0]].s[1], [[W1]]
-; CHECK-NEXT: shl v[[V0]].2s, v[[V0]].2s, #16
-; CHECK-NEXT: sshr v[[V0]].2s, v[[V0]].2s, #16
-; CHECK-NEXT: movi [[D0:d[0-9]+]], #0000000000000000
-; CHECK-NEXT: fmov [[W1]], s[[V0]]
-; CHECK-NEXT: mov [[W0]], v[[V0]].s[1]
-; CHECK-NEXT: mov [[V1:v[0-9]+]].h[0], [[W1]]
-; CHECK-NEXT: mov [[V1]].h[1], [[W0]]
-; CHECK-NEXT: shl v[[V0]].4h, [[V1]].4h, #15
-; CHECK-NEXT: sshr v[[V0]].4h, v[[V0]].4h, #15
-; CHECK-NEXT: cmpne [[PG1:p[0-9]+]].h, [[PG0]]/z, z[[N2]].h, #0
-; CHECK-NEXT: ld1h { z0.h }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    ldr s1, [x1]
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    fcmeq v0.4h, v0.4h, v1.4h
+; CHECK-NEXT:    umov w8, v0.h[0]
+; CHECK-NEXT:    umov w9, v0.h[1]
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    mov v0.s[1], w9
+; CHECK-NEXT:    shl v0.2s, v0.2s, #16
+; CHECK-NEXT:    sshr v0.2s, v0.2s, #16
+; CHECK-NEXT:    movi d1, #0000000000000000
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    mov v1.h[0], w9
+; CHECK-NEXT:    mov v1.h[1], w8
+; CHECK-NEXT:    shl v0.4h, v1.4h, #15
+; CHECK-NEXT:    sshr v0.4h, v0.4h, #15
+; CHECK-NEXT:    cmpne p0.h, p0/z, z0.h, #0
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
   %a = load <2 x half>, <2 x half>* %ap
   %b = load <2 x half>, <2 x half>* %bp
   %mask = fcmp oeq <2 x half> %a, %b
@@ -54,13 +56,15 @@ define <2 x half> @masked_load_v2f16(<2 x half>* %ap, <2 x half>* %bp) #0 {
 
 define <2 x float> @masked_load_v2f32(<2 x float>* %ap, <2 x float>* %bp) #0 {
 ; CHECK-LABEL: masked_load_v2f32:
-; CHECK: ldr d[[N0:[0-9]+]], [x0]
-; CHECK-NEXT: ldr d[[N1:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].s, vl2
-; CHECK-NEXT: fcmeq v[[N2:[0-9]+]].2s, v[[N0]].2s, v[[N1]].2s
-; CHECK-NEXT: cmpne [[PG1:p[0-9]+]].s, [[PG0]]/z, z[[N2]].s, #0
-; CHECK-NEXT: ld1w { z0.s }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    fcmeq v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    cmpne p0.s, p0/z, z0.s, #0
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
   %a = load <2 x float>, <2 x float>* %ap
   %b = load <2 x float>, <2 x float>* %bp
   %mask = fcmp oeq <2 x float> %a, %b
@@ -70,13 +74,15 @@ define <2 x float> @masked_load_v2f32(<2 x float>* %ap, <2 x float>* %bp) #0 {
 
 define <4 x float> @masked_load_v4f32(<4 x float>* %ap, <4 x float>* %bp) #0 {
 ; CHECK-LABEL: masked_load_v4f32:
-; CHECK: ldr q[[N0:[0-9]+]], [x0]
-; CHECK-NEXT: ldr q[[N1:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].s, vl4
-; CHECK-NEXT: fcmeq v[[N2:[0-9]+]].4s, v[[N0]].4s, v[[N1]].4s
-; CHECK-NEXT: cmpne [[PG1:p[0-9]+]].s, [[PG0]]/z, z[[N2]].s, #0
-; CHECK-NEXT: ld1w { z0.s }, [[PG1]]/z, [x0]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    fcmeq v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    cmpne p0.s, p0/z, z0.s, #0
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %ap
   %b = load <4 x float>, <4 x float>* %bp
   %mask = fcmp oeq <4 x float> %a, %b
@@ -86,13 +92,14 @@ define <4 x float> @masked_load_v4f32(<4 x float>* %ap, <4 x float>* %bp) #0 {
 
 define <8 x float> @masked_load_v8f32(<8 x float>* %ap, <8 x float>* %bp) #0 {
 ; CHECK-LABEL: masked_load_v8f32:
-; CHECK: ptrue [[PG0:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
-; CHECK-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0]
-; CHECK-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1]
-; CHECK-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
-; CHECK-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x0]
-; CHECK-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq p1.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    ld1w { z0.s }, p1/z, [x0]
+; CHECK-NEXT:    st1w { z0.s }, p0, [x8]
+; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %ap
   %b = load <8 x float>, <8 x float>* %bp
   %mask = fcmp oeq <8 x float> %a, %b
@@ -101,14 +108,15 @@ define <8 x float> @masked_load_v8f32(<8 x float>* %ap, <8 x float>* %bp) #0 {
 }
 
 define <16 x float> @masked_load_v16f32(<16 x float>* %ap, <16 x float>* %bp) #0 {
-; CHECK-LABEL: masked_load_v16f32:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl[[#min(div(VBYTES,4),16)]]
-; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0]
-; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1]
-; VBITS_GE_512-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
-; VBITS_GE_512-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    fcmeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %ap
   %b = load <16 x float>, <16 x float>* %bp
   %mask = fcmp oeq <16 x float> %a, %b
@@ -117,14 +125,15 @@ define <16 x float> @masked_load_v16f32(<16 x float>* %ap, <16 x float>* %bp) #0
 }
 
 define <32 x float> @masked_load_v32f32(<32 x float>* %ap, <32 x float>* %bp) #0 {
-; CHECK-LABEL: masked_load_v32f32:
-; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].s, vl[[#min(div(VBYTES,4),32)]]
-; VBITS_GE_1024-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0]
-; VBITS_GE_1024-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1]
-; VBITS_GE_1024-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
-; VBITS_GE_1024-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}]
-; VBITS_GE_1024-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_load_v32f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    fcmeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p1/z, [x0]
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_GE_1024-NEXT:    ret
   %a = load <32 x float>, <32 x float>* %ap
   %b = load <32 x float>, <32 x float>* %bp
   %mask = fcmp oeq <32 x float> %a, %b
@@ -133,15 +142,15 @@ define <32 x float> @masked_load_v32f32(<32 x float>* %ap, <32 x float>* %bp) #0
 }
 
 define <64 x float> @masked_load_v64f32(<64 x float>* %ap, <64 x float>* %bp) #0 {
-; CHECK-LABEL: masked_load_v64f32:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl[[#min(div(VBYTES,4),64)]]
-; VBITS_GE_2048-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0]
-; VBITS_GE_2048-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
-; VBITS_GE_2048-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}]
-; VBITS_GE_2048-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
-; VBITS_GE_2048-NEXT: ret
-
+; VBITS_GE_2048-LABEL: masked_load_v64f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p1/z, [x0]
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_GE_2048-NEXT:    ret
   %a = load <64 x float>, <64 x float>* %ap
   %b = load <64 x float>, <64 x float>* %bp
   %mask = fcmp oeq <64 x float> %a, %b
@@ -150,14 +159,15 @@ define <64 x float> @masked_load_v64f32(<64 x float>* %ap, <64 x float>* %bp) #0
 }
 
 define <64 x i8> @masked_load_v64i8(<64 x i8>* %ap, <64 x i8>* %bp) #0 {
-; CHECK-LABEL: masked_load_v64i8:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].b, vl64
-; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].b }, p0/z, [x0]
-; VBITS_GE_512-NEXT: ld1b { [[Z1:z[0-9]+]].b }, p0/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, [[Z1]].b
-; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG1]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1b { [[Z0]].b }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_v64i8:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.b, vl64
+; VBITS_GE_512-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1b { z1.b }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.b, p0/z, z0.b, z1.b
+; VBITS_GE_512-NEXT:    ld1b { z0.b }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1b { z0.b }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %a = load <64 x i8>, <64 x i8>* %ap
   %b = load <64 x i8>, <64 x i8>* %bp
   %mask = icmp eq <64 x i8> %a, %b
@@ -166,14 +176,15 @@ define <64 x i8> @masked_load_v64i8(<64 x i8>* %ap, <64 x i8>* %bp) #0 {
 }
 
 define <32 x i16> @masked_load_v32i16(<32 x i16>* %ap, <32 x i16>* %bp) #0 {
-; CHECK-LABEL: masked_load_v32i16:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].h }, p0/z, [x0]
-; VBITS_GE_512-NEXT: ld1h { [[Z1:z[0-9]+]].h }, p0/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, [[Z1]].h
-; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, [[PG1]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG0]], [x8]
-; VBITS_GE_512: ret
+; VBITS_GE_512-LABEL: masked_load_v32i16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.h, p0/z, z0.h, z1.h
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %a = load <32 x i16>, <32 x i16>* %ap
   %b = load <32 x i16>, <32 x i16>* %bp
   %mask = icmp eq <32 x i16> %a, %b
@@ -182,14 +193,15 @@ define <32 x i16> @masked_load_v32i16(<32 x i16>* %ap, <32 x i16>* %bp) #0 {
 }
 
 define <16 x i32> @masked_load_v16i32(<16 x i32>* %ap, <16 x i32>* %bp) #0 {
-; CHECK-LABEL: masked_load_v16i32:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0]
-; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
-; VBITS_GE_512-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_v16i32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.s, p0/z, z0.s, z1.s
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %a = load <16 x i32>, <16 x i32>* %ap
   %b = load <16 x i32>, <16 x i32>* %bp
   %mask = icmp eq <16 x i32> %a, %b
@@ -198,14 +210,15 @@ define <16 x i32> @masked_load_v16i32(<16 x i32>* %ap, <16 x i32>* %bp) #0 {
 }
 
 define <8 x i64> @masked_load_v8i64(<8 x i64>* %ap, <8 x i64>* %bp) #0 {
-; CHECK-LABEL: masked_load_v8i64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, p0/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d
-; VBITS_GE_512-NEXT: ld1d { [[Z0]].d }, [[PG1]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_v8i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %ap
   %b = load <8 x i64>, <8 x i64>* %bp
   %mask = icmp eq <8 x i64> %a, %b
@@ -214,15 +227,16 @@ define <8 x i64> @masked_load_v8i64(<8 x i64>* %ap, <8 x i64>* %bp) #0 {
 }
 
 define <8 x i64> @masked_load_passthru_v8i64(<8 x i64>* %ap, <8 x i64>* %bp) #0 {
-; CHECK-LABEL: masked_load_passthru_v8i64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, p0/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d
-; VBITS_GE_512-NEXT: ld1d { [[Z0]].d }, [[PG1]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: sel [[Z2:z[0-9]+]].d, [[PG1]], [[Z0]].d, [[Z1]].d
-; VBITS_GE_512-NEXT: st1d { [[Z2]].d }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_passthru_v8i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    sel z0.d, p1, z0.d, z1.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %ap
   %b = load <8 x i64>, <8 x i64>* %bp
   %mask = icmp eq <8 x i64> %a, %b
@@ -231,15 +245,16 @@ define <8 x i64> @masked_load_passthru_v8i64(<8 x i64>* %ap, <8 x i64>* %bp) #0
 }
 
 define <8 x double> @masked_load_passthru_v8f64(<8 x double>* %ap, <8 x double>* %bp) #0 {
-; CHECK-LABEL: masked_load_passthru_v8f64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, p0/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1]
-; VBITS_GE_512-NEXT: fcmeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d
-; VBITS_GE_512-NEXT: ld1d { [[Z0]].d }, [[PG1]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: sel [[Z2:z[0-9]+]].d, [[PG1]], [[Z0]].d, [[Z1]].d
-; VBITS_GE_512-NEXT: st1d { [[Z2]].d }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_passthru_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    fcmeq p1.d, p0/z, z0.d, z1.d
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    sel z0.d, p1, z0.d, z1.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %a = load <8 x double>, <8 x double>* %ap
   %b = load <8 x double>, <8 x double>* %bp
   %mask = fcmp oeq <8 x double> %a, %b
@@ -248,17 +263,18 @@ define <8 x double> @masked_load_passthru_v8f64(<8 x double>* %ap, <8 x double>*
 }
 
 define <32 x i16> @masked_load_sext_v32i8i16(<32 x i8>* %ap, <32 x i8>* %bp) #0 {
-; CHECK-LABEL: masked_load_sext_v32i8i16:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].b, vl32
-; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].b }, p0/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, #0
-; VBITS_GE_512-NEXT: mov [[Z0]].b, [[PG0]]/z, #-1
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].h, vl32
-; VBITS_GE_512-NEXT: cmpne [[PG3:p[0-9]+]].h, [[PG2]]/z, [[Z0]].h, #0
-; VBITS_GE_512-NEXT: ld1sb { [[Z0]].h }, [[PG3]]/z, [x0]
-; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG2]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_sext_v32i8i16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.b, vl32
+; VBITS_GE_512-NEXT:    ld1b { z0.b }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p0.b, p0/z, z0.b, #0
+; VBITS_GE_512-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    sunpklo z0.h, z0.b
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    cmpne p1.h, p0/z, z0.h, #0
+; VBITS_GE_512-NEXT:    ld1sb { z0.h }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <32 x i8>, <32 x i8>* %bp
   %mask = icmp eq <32 x i8> %b, zeroinitializer
   %load = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %ap, i32 8, <32 x i1> %mask, <32 x i8> undef)
@@ -267,16 +283,17 @@ define <32 x i16> @masked_load_sext_v32i8i16(<32 x i8>* %ap, <32 x i8>* %bp) #0
 }
 
 define <16 x i32> @masked_load_sext_v16i8i32(<16 x i8>* %ap, <16 x i8>* %bp) #0 {
-; CHECK-LABEL: masked_load_sext_v16i8i32:
-; VBITS_GE_512: ldr q0, [x1]
-; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].16b, v{{[0-9]+}}.16b, #0
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h
-; VBITS_GE_512-NEXT: cmpne [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, #0
-; VBITS_GE_512-NEXT: ld1sb { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
-; VBITS_GE_512: ret
+; VBITS_GE_512-LABEL: masked_load_sext_v16i8i32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr q0, [x1]
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    cmeq v0.16b, v0.16b, #0
+; VBITS_GE_512-NEXT:    sunpklo z0.h, z0.b
+; VBITS_GE_512-NEXT:    sunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    cmpne p1.s, p0/z, z0.s, #0
+; VBITS_GE_512-NEXT:    ld1sb { z0.s }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <16 x i8>, <16 x i8>* %bp
   %mask = icmp eq <16 x i8> %b, zeroinitializer
   %load = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %ap, i32 8, <16 x i1> %mask, <16 x i8> undef)
@@ -285,17 +302,18 @@ define <16 x i32> @masked_load_sext_v16i8i32(<16 x i8>* %ap, <16 x i8>* %bp) #0
 }
 
 define <8 x i64> @masked_load_sext_v8i8i64(<8 x i8>* %ap, <8 x i8>* %bp) #0 {
-; CHECK-LABEL: masked_load_sext_v8i8i64:
-; VBITS_GE_512: ldr d0, [x1]
-; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].8b, v{{[0-9]+}}.8b, #0
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s
-; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].d, p0/z, z[[V]].d, #0
-; VBITS_GE_512-NEXT: ld1sb { [[Z0]].d }, p[[PG]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_sext_v8i8i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr d0, [x1]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    cmeq v0.8b, v0.8b, #0
+; VBITS_GE_512-NEXT:    sunpklo z0.h, z0.b
+; VBITS_GE_512-NEXT:    sunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    sunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    cmpne p1.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1sb { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <8 x i8>, <8 x i8>* %bp
   %mask = icmp eq <8 x i8> %b, zeroinitializer
   %load = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %ap, i32 8, <8 x i1> %mask, <8 x i8> undef)
@@ -304,17 +322,18 @@ define <8 x i64> @masked_load_sext_v8i8i64(<8 x i8>* %ap, <8 x i8>* %bp) #0 {
 }
 
 define <16 x i32> @masked_load_sext_v16i16i32(<16 x i16>* %ap, <16 x i16>* %bp) #0 {
-; CHECK-LABEL: masked_load_sext_v16i16i32:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl16
-; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].h }, p0/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, #0
-; VBITS_GE_512-NEXT: mov [[Z0]].h, [[PG1]]/z, #-1
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: cmpne [[PG3:p[0-9]+]].s, [[PG2]]/z, [[Z0]].s, #0
-; VBITS_GE_512-NEXT: ld1sh { [[Z0]].s }, [[PG3]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG2]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_sext_v16i16i32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p0.h, p0/z, z0.h, #0
+; VBITS_GE_512-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    sunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    cmpne p1.s, p0/z, z0.s, #0
+; VBITS_GE_512-NEXT:    ld1sh { z0.s }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <16 x i16>, <16 x i16>* %bp
   %mask = icmp eq <16 x i16> %b, zeroinitializer
   %load = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %ap, i32 8, <16 x i1> %mask, <16 x i16> undef)
@@ -323,16 +342,17 @@ define <16 x i32> @masked_load_sext_v16i16i32(<16 x i16>* %ap, <16 x i16>* %bp)
 }
 
 define <8 x i64> @masked_load_sext_v8i16i64(<8 x i16>* %ap, <8 x i16>* %bp) #0 {
-; CHECK-LABEL: masked_load_sext_v8i16i64:
-; VBITS_GE_512: ldr q0, [x1]
-; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].8h, v{{[0-9]+}}.8h, #0
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s
-; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].d, p0/z, z[[V]].d, #0
-; VBITS_GE_512-NEXT: ld1sh { [[Z0]].d }, p[[PG]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_sext_v8i16i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr q0, [x1]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    cmeq v0.8h, v0.8h, #0
+; VBITS_GE_512-NEXT:    sunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    sunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    cmpne p1.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1sh { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <8 x i16>, <8 x i16>* %bp
   %mask = icmp eq <8 x i16> %b, zeroinitializer
   %load = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %ap, i32 8, <8 x i1> %mask, <8 x i16> undef)
@@ -341,17 +361,18 @@ define <8 x i64> @masked_load_sext_v8i16i64(<8 x i16>* %ap, <8 x i16>* %bp) #0 {
 }
 
 define <8 x i64> @masked_load_sext_v8i32i64(<8 x i32>* %ap, <8 x i32>* %bp) #0 {
-; CHECK-LABEL: masked_load_sext_v8i32i64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8
-; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, #0
-; VBITS_GE_512-NEXT: mov [[Z0]].s, [[PG0]]/z, #-1
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: cmpne [[PG3:p[0-9]+]].d, [[PG2]]/z, [[Z0]].d, #0
-; VBITS_GE_512-NEXT: ld1sw { [[Z0]].d }, [[PG3]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_sext_v8i32i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p0.s, p0/z, z0.s, #0
+; VBITS_GE_512-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    sunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    cmpne p1.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1sw { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <8 x i32>, <8 x i32>* %bp
   %mask = icmp eq <8 x i32> %b, zeroinitializer
   %load = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %ap, i32 8, <8 x i1> %mask, <8 x i32> undef)
@@ -360,17 +381,18 @@ define <8 x i64> @masked_load_sext_v8i32i64(<8 x i32>* %ap, <8 x i32>* %bp) #0 {
 }
 
 define <32 x i16> @masked_load_zext_v32i8i16(<32 x i8>* %ap, <32 x i8>* %bp) #0 {
-; CHECK-LABEL: masked_load_zext_v32i8i16:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].b, vl32
-; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].b }, p0/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, #0
-; VBITS_GE_512-NEXT: mov [[Z0]].b, [[PG0]]/z, #-1
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].h, vl32
-; VBITS_GE_512-NEXT: cmpne [[PG3:p[0-9]+]].h, [[PG2]]/z, [[Z0]].h, #0
-; VBITS_GE_512-NEXT: ld1b { [[Z0]].h }, [[PG3]]/z, [x0]
-; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG2]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_zext_v32i8i16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.b, vl32
+; VBITS_GE_512-NEXT:    ld1b { z0.b }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p0.b, p0/z, z0.b, #0
+; VBITS_GE_512-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    sunpklo z0.h, z0.b
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    cmpne p1.h, p0/z, z0.h, #0
+; VBITS_GE_512-NEXT:    ld1b { z0.h }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <32 x i8>, <32 x i8>* %bp
   %mask = icmp eq <32 x i8> %b, zeroinitializer
   %load = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %ap, i32 8, <32 x i1> %mask, <32 x i8> undef)
@@ -379,16 +401,17 @@ define <32 x i16> @masked_load_zext_v32i8i16(<32 x i8>* %ap, <32 x i8>* %bp) #0
 }
 
 define <16 x i32> @masked_load_zext_v16i8i32(<16 x i8>* %ap, <16 x i8>* %bp) #0 {
-; CHECK-LABEL: masked_load_zext_v16i8i32:
-; VBITS_GE_512: ldr q0, [x1]
-; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].16b, v{{[0-9]+}}.16b, #0
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h
-; VBITS_GE_512-NEXT: cmpne [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, #0
-; VBITS_GE_512-NEXT: ld1b { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
-; VBITS_GE_512: ret
+; VBITS_GE_512-LABEL: masked_load_zext_v16i8i32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr q0, [x1]
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    cmeq v0.16b, v0.16b, #0
+; VBITS_GE_512-NEXT:    sunpklo z0.h, z0.b
+; VBITS_GE_512-NEXT:    sunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    cmpne p1.s, p0/z, z0.s, #0
+; VBITS_GE_512-NEXT:    ld1b { z0.s }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <16 x i8>, <16 x i8>* %bp
   %mask = icmp eq <16 x i8> %b, zeroinitializer
   %load = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %ap, i32 8, <16 x i1> %mask, <16 x i8> undef)
@@ -397,17 +420,18 @@ define <16 x i32> @masked_load_zext_v16i8i32(<16 x i8>* %ap, <16 x i8>* %bp) #0
 }
 
 define <8 x i64> @masked_load_zext_v8i8i64(<8 x i8>* %ap, <8 x i8>* %bp) #0 {
-; CHECK-LABEL: masked_load_zext_v8i8i64:
-; VBITS_GE_512: ldr d0, [x1]
-; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].8b, v{{[0-9]+}}.8b, #0
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s
-; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].d, p0/z, z[[V]].d, #0
-; VBITS_GE_512-NEXT: ld1b { [[Z0]].d }, p[[PG]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_zext_v8i8i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr d0, [x1]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    cmeq v0.8b, v0.8b, #0
+; VBITS_GE_512-NEXT:    sunpklo z0.h, z0.b
+; VBITS_GE_512-NEXT:    sunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    sunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    cmpne p1.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1b { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <8 x i8>, <8 x i8>* %bp
   %mask = icmp eq <8 x i8> %b, zeroinitializer
   %load = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %ap, i32 8, <8 x i1> %mask, <8 x i8> undef)
@@ -416,17 +440,18 @@ define <8 x i64> @masked_load_zext_v8i8i64(<8 x i8>* %ap, <8 x i8>* %bp) #0 {
 }
 
 define <16 x i32> @masked_load_zext_v16i16i32(<16 x i16>* %ap, <16 x i16>* %bp) #0 {
-; CHECK-LABEL: masked_load_zext_v16i16i32:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl16
-; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].h }, p0/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, #0
-; VBITS_GE_512-NEXT: mov [[Z0]].h, [[PG1]]/z, #-1
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: cmpne [[PG3:p[0-9]+]].s, [[PG2]]/z, [[Z0]].s, #0
-; VBITS_GE_512-NEXT: ld1h { [[Z0]].s }, [[PG3]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG2]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_zext_v16i16i32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p0.h, p0/z, z0.h, #0
+; VBITS_GE_512-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    sunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    cmpne p1.s, p0/z, z0.s, #0
+; VBITS_GE_512-NEXT:    ld1h { z0.s }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <16 x i16>, <16 x i16>* %bp
   %mask = icmp eq <16 x i16> %b, zeroinitializer
   %load = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %ap, i32 8, <16 x i1> %mask, <16 x i16> undef)
@@ -435,16 +460,17 @@ define <16 x i32> @masked_load_zext_v16i16i32(<16 x i16>* %ap, <16 x i16>* %bp)
 }
 
 define <8 x i64> @masked_load_zext_v8i16i64(<8 x i16>* %ap, <8 x i16>* %bp) #0 {
-; CHECK-LABEL: masked_load_zext_v8i16i64:
-; VBITS_GE_512: ldr q0, [x1]
-; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].8h, v{{[0-9]+}}.8h, #0
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s
-; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].d, p0/z, z[[V]].d, #0
-; VBITS_GE_512-NEXT: ld1h { [[Z0]].d }, p[[PG]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_zext_v8i16i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr q0, [x1]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    cmeq v0.8h, v0.8h, #0
+; VBITS_GE_512-NEXT:    sunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    sunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    cmpne p1.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1h { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <8 x i16>, <8 x i16>* %bp
   %mask = icmp eq <8 x i16> %b, zeroinitializer
   %load = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %ap, i32 8, <8 x i1> %mask, <8 x i16> undef)
@@ -453,16 +479,18 @@ define <8 x i64> @masked_load_zext_v8i16i64(<8 x i16>* %ap, <8 x i16>* %bp) #0 {
 }
 
 define <8 x i64> @masked_load_zext_v8i32i64(<8 x i32>* %ap, <8 x i32>* %bp) #0 {
-; CHECK-LABEL: masked_load_zext_v8i32i64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8
-; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, #0
-; VBITS_GE_512-NEXT: mov [[Z0]].s, [[PG0]]/z, #-1
-; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s
-; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: cmpne [[PG3:p[0-9]+]].d, [[PG2]]/z, [[Z0]].d, #0
-; VBITS_GE_512-NEXT: ld1w { [[Z0]].d }, [[PG3]]/z, [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8]
+; VBITS_GE_512-LABEL: masked_load_zext_v8i32i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p0.s, p0/z, z0.s, #0
+; VBITS_GE_512-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    sunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    cmpne p1.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1w { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <8 x i32>, <8 x i32>* %bp
   %mask = icmp eq <8 x i32> %b, zeroinitializer
   %load = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %ap, i32 8, <8 x i1> %mask, <8 x i32> undef)
@@ -471,13 +499,14 @@ define <8 x i64> @masked_load_zext_v8i32i64(<8 x i32>* %ap, <8 x i32>* %bp) #0 {
 }
 
 define <32 x i16> @masked_load_sext_v32i8i16_m16(<32 x i8>* %ap, <32 x i16>* %bp) #0 {
-; CHECK-LABEL: masked_load_sext_v32i8i16_m16:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].h }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, #0
-; VBITS_GE_512-NEXT: ld1sb { [[Z0]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_sext_v32i8i16_m16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.h, p0/z, z0.h, #0
+; VBITS_GE_512-NEXT:    ld1sb { z0.h }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <32 x i16>, <32 x i16>* %bp
   %mask = icmp eq <32 x i16> %b, zeroinitializer
   %load = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %ap, i32 8, <32 x i1> %mask, <32 x i8> undef)
@@ -486,13 +515,14 @@ define <32 x i16> @masked_load_sext_v32i8i16_m16(<32 x i8>* %ap, <32 x i16>* %bp
 }
 
 define <16 x i32> @masked_load_sext_v16i8i32_m32(<16 x i8>* %ap, <16 x i32>* %bp) #0 {
-; CHECK-LABEL: masked_load_sext_v16i8i32_m32:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, #0
-; VBITS_GE_512-NEXT: ld1sb { [[Z0]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
-; VBITS_GE_512: ret
+; VBITS_GE_512-LABEL: masked_load_sext_v16i8i32_m32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.s, p0/z, z0.s, #0
+; VBITS_GE_512-NEXT:    ld1sb { z0.s }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <16 x i32>, <16 x i32>* %bp
   %mask = icmp eq <16 x i32> %b, zeroinitializer
   %load = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %ap, i32 8, <16 x i1> %mask, <16 x i8> undef)
@@ -501,13 +531,14 @@ define <16 x i32> @masked_load_sext_v16i8i32_m32(<16 x i8>* %ap, <16 x i32>* %bp
 }
 
 define <8 x i64> @masked_load_sext_v8i8i64_m64(<8 x i8>* %ap, <8 x i64>* %bp) #0 {
-; CHECK-LABEL: masked_load_sext_v8i8i64_m64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, #0
-; VBITS_GE_512-NEXT: ld1sb { [[Z0]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_sext_v8i8i64_m64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1sb { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <8 x i64>, <8 x i64>* %bp
   %mask = icmp eq <8 x i64> %b, zeroinitializer
   %load = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %ap, i32 8, <8 x i1> %mask, <8 x i8> undef)
@@ -516,13 +547,14 @@ define <8 x i64> @masked_load_sext_v8i8i64_m64(<8 x i8>* %ap, <8 x i64>* %bp) #0
 }
 
 define <16 x i32> @masked_load_sext_v16i16i32_m32(<16 x i16>* %ap, <16 x i32>* %bp) #0 {
-; CHECK-LABEL: masked_load_sext_v16i16i32_m32:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, #0
-; VBITS_GE_512-NEXT: ld1sh { [[Z0]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_sext_v16i16i32_m32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.s, p0/z, z0.s, #0
+; VBITS_GE_512-NEXT:    ld1sh { z0.s }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <16 x i32>, <16 x i32>* %bp
   %mask = icmp eq <16 x i32> %b, zeroinitializer
   %load = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %ap, i32 8, <16 x i1> %mask, <16 x i16> undef)
@@ -531,13 +563,14 @@ define <16 x i32> @masked_load_sext_v16i16i32_m32(<16 x i16>* %ap, <16 x i32>* %
 }
 
 define <8 x i64> @masked_load_sext_v8i16i64_m64(<8 x i16>* %ap, <8 x i64>* %bp) #0 {
-; CHECK-LABEL: masked_load_sext_v8i16i64_m64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, #0
-; VBITS_GE_512-NEXT: ld1sh { [[Z0]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_sext_v8i16i64_m64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1sh { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <8 x i64>, <8 x i64>* %bp
   %mask = icmp eq <8 x i64> %b, zeroinitializer
   %load = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %ap, i32 8, <8 x i1> %mask, <8 x i16> undef)
@@ -546,13 +579,14 @@ define <8 x i64> @masked_load_sext_v8i16i64_m64(<8 x i16>* %ap, <8 x i64>* %bp)
 }
 
 define <8 x i64> @masked_load_sext_v8i32i64_m64(<8 x i32>* %ap, <8 x i64>* %bp) #0 {
-; CHECK-LABEL: masked_load_sext_v8i32i64_m64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, #0
-; VBITS_GE_512-NEXT: ld1sw { [[Z0]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_sext_v8i32i64_m64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1sw { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <8 x i64>, <8 x i64>* %bp
   %mask = icmp eq <8 x i64> %b, zeroinitializer
   %load = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %ap, i32 8, <8 x i1> %mask, <8 x i32> undef)
@@ -561,13 +595,14 @@ define <8 x i64> @masked_load_sext_v8i32i64_m64(<8 x i32>* %ap, <8 x i64>* %bp)
 }
 
 define <32 x i16> @masked_load_zext_v32i8i16_m16(<32 x i8>* %ap, <32 x i16>* %bp) #0 {
-; CHECK-LABEL: masked_load_zext_v32i8i16_m16:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].h }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, #0
-; VBITS_GE_512-NEXT: ld1b { [[Z0]].h }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_zext_v32i8i16_m16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.h, p0/z, z0.h, #0
+; VBITS_GE_512-NEXT:    ld1b { z0.h }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <32 x i16>, <32 x i16>* %bp
   %mask = icmp eq <32 x i16> %b, zeroinitializer
   %load = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %ap, i32 8, <32 x i1> %mask, <32 x i8> undef)
@@ -576,13 +611,14 @@ define <32 x i16> @masked_load_zext_v32i8i16_m16(<32 x i8>* %ap, <32 x i16>* %bp
 }
 
 define <16 x i32> @masked_load_zext_v16i8i32_m32(<16 x i8>* %ap, <16 x i32>* %bp) #0 {
-; CHECK-LABEL: masked_load_zext_v16i8i32_m32:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, #0
-; VBITS_GE_512-NEXT: ld1b { [[Z0]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_zext_v16i8i32_m32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.s, p0/z, z0.s, #0
+; VBITS_GE_512-NEXT:    ld1b { z0.s }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <16 x i32>, <16 x i32>* %bp
   %mask = icmp eq <16 x i32> %b, zeroinitializer
   %load = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %ap, i32 8, <16 x i1> %mask, <16 x i8> undef)
@@ -591,13 +627,14 @@ define <16 x i32> @masked_load_zext_v16i8i32_m32(<16 x i8>* %ap, <16 x i32>* %bp
 }
 
 define <8 x i64> @masked_load_zext_v8i8i64_m64(<8 x i8>* %ap, <8 x i64>* %bp) #0 {
-; CHECK-LABEL: masked_load_zext_v8i8i64_m64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, #0
-; VBITS_GE_512-NEXT: ld1b { [[Z0]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_zext_v8i8i64_m64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1b { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <8 x i64>, <8 x i64>* %bp
   %mask = icmp eq <8 x i64> %b, zeroinitializer
   %load = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %ap, i32 8, <8 x i1> %mask, <8 x i8> undef)
@@ -606,13 +643,14 @@ define <8 x i64> @masked_load_zext_v8i8i64_m64(<8 x i8>* %ap, <8 x i64>* %bp) #0
 }
 
 define <16 x i32> @masked_load_zext_v16i16i32_m32(<16 x i16>* %ap, <16 x i32>* %bp) #0 {
-; CHECK-LABEL: masked_load_zext_v16i16i32_m32:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, #0
-; VBITS_GE_512-NEXT: ld1h { [[Z0]].s }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_zext_v16i16i32_m32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.s, p0/z, z0.s, #0
+; VBITS_GE_512-NEXT:    ld1h { z0.s }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <16 x i32>, <16 x i32>* %bp
   %mask = icmp eq <16 x i32> %b, zeroinitializer
   %load = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %ap, i32 8, <16 x i1> %mask, <16 x i16> undef)
@@ -621,13 +659,14 @@ define <16 x i32> @masked_load_zext_v16i16i32_m32(<16 x i16>* %ap, <16 x i32>* %
 }
 
 define <8 x i64> @masked_load_zext_v8i16i64_m64(<8 x i16>* %ap, <8 x i64>* %bp) #0 {
-; CHECK-LABEL: masked_load_zext_v8i16i64_m64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, #0
-; VBITS_GE_512-NEXT: ld1h { [[Z0]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_zext_v8i16i64_m64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1h { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <8 x i64>, <8 x i64>* %bp
   %mask = icmp eq <8 x i64> %b, zeroinitializer
   %load = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %ap, i32 8, <8 x i1> %mask, <8 x i16> undef)
@@ -636,13 +675,14 @@ define <8 x i64> @masked_load_zext_v8i16i64_m64(<8 x i16>* %ap, <8 x i64>* %bp)
 }
 
 define <8 x i64> @masked_load_zext_v8i32i64_m64(<8 x i32>* %ap, <8 x i64>* %bp) #0 {
-; CHECK-LABEL: masked_load_zext_v8i32i64_m64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, #0
-; VBITS_GE_512-NEXT: ld1w { [[Z0]].d }, [[PG1]]/z, [x0]
-; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_load_zext_v8i32i64_m64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    ld1w { z0.d }, p1/z, [x0]
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x8]
+; VBITS_GE_512-NEXT:    ret
   %b = load <8 x i64>, <8 x i64>* %bp
   %mask = icmp eq <8 x i64> %b, zeroinitializer
   %load = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %ap, i32 8, <8 x i1> %mask, <8 x i32> undef)

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll
index e6e890020f21b..0921b8904eeb6 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll
@@ -1,19 +1,19 @@
-; RUN: llc -aarch64-sve-vector-bits-min=128  -asm-verbose=0 < %s | FileCheck %s -check-prefix=NO_SVE
-; RUN: llc -aarch64-sve-vector-bits-min=256  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
-; RUN: llc -aarch64-sve-vector-bits-min=384  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK
-; RUN: llc -aarch64-sve-vector-bits-min=512  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=640  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=768  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=896  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_2048,VBITS_GE_1024,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=128  < %s | FileCheck %s -check-prefix=NO_SVE
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
+; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -check-prefixes=CHECK
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -26,18 +26,19 @@ target triple = "aarch64-unknown-linux-gnu"
 
 define void @masked_scatter_v2i8(<2 x i8>* %a, <2 x i8*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v2i8:
-; CHECK: ldrb [[VALS_LO:w[0-9]+]], [x0]
-; CHECK-NEXT: ldrb [[VALS_HI:w[0-9]+]], [x0, #1]
-; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
-; CHECK-NEXT: fmov s[[VALS:[0-9]+]], [[VALS_LO]]
-; CHECK-NEXT: mov v[[VALS]].s[1], [[VALS_HI]]
-; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0
-; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[CMP]].2s, #0
-; CHECK-NEXT: ushll v[[SHL2:[0-9]+]].2d, v[[VALS]].2s, #0
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[SHL]].d, #0
-; CHECK-NEXT: st1b { z[[SHL2]].d }, [[MASK]], [z[[PTRS]].d]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldrb w8, [x0]
+; CHECK-NEXT:    ldrb w9, [x0, #1]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fmov s1, w8
+; CHECK-NEXT:    mov v1.s[1], w9
+; CHECK-NEXT:    cmeq v2.2s, v1.2s, #0
+; CHECK-NEXT:    ushll v2.2d, v2.2s, #0
+; CHECK-NEXT:    ushll v1.2d, v1.2s, #0
+; CHECK-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; CHECK-NEXT:    st1b { z1.d }, p0, [z0.d]
+; CHECK-NEXT:    ret
   %vals = load <2 x i8>, <2 x i8>* %a
   %ptrs = load <2 x i8*>, <2 x i8*>* %b
   %mask = icmp eq <2 x i8> %vals, zeroinitializer
@@ -47,18 +48,19 @@ define void @masked_scatter_v2i8(<2 x i8>* %a, <2 x i8*>* %b) #0 {
 
 define void @masked_scatter_v4i8(<4 x i8>* %a, <4 x i8*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v4i8:
-; CHECK: ldr s[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: ushll [[SHL:v[0-9]+]].8h, v[[VALS]].8b, #0
-; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4h, [[SHL]].4h, #0
-; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
-; CHECK-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, z[[VALS]].h
-; CHECK-NEXT: uunpklo z[[UPK2:[0-9]+]].d, [[UPK1]].s
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, z[[UPK2]].d, #0
-; CHECK-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
-; CHECK-NEXT: st1b { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    cmeq v2.4h, v0.4h, #0
+; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    st1b { z0.d }, p0, [z1.d]
+; CHECK-NEXT:    ret
   %vals = load <4 x i8>, <4 x i8>* %a
   %ptrs = load <4 x i8*>, <4 x i8*>* %b
   %mask = icmp eq <4 x i8> %vals, zeroinitializer
@@ -67,49 +69,52 @@ define void @masked_scatter_v4i8(<4 x i8>* %a, <4 x i8*>* %b) #0 {
 }
 
 define void @masked_scatter_v8i8(<8 x i8>* %a, <8 x i8*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v8i8:
-; VBITS_GE_512: ldr d[[VALS:[0-9]+]], [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: cmeq v[[CMP:[0-9]+]].8b, v[[VALS]].8b, #0
-; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].h, z[[CMP]].b
-; VBITS_GE_512-NEXT: uunpklo [[UPKV1:z[0-9]+]].h, z[[VALS]].b
-; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].s, [[UPK1]].h
-; VBITS_GE_512-NEXT: uunpklo [[UPKV2:z[0-9]+]].s, [[UPKV1]].h
-; VBITS_GE_512-NEXT: uunpklo [[UPK3:z[0-9]+]].d, [[UPK2]].s
-; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK3]].d, #0
-; VBITS_GE_512-NEXT: uunpklo [[UPKV3:z[0-9]+]].d, [[UPKV2]].s
-; VBITS_GE_512-NEXT: st1b { [[UPKV3]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ldr d[[VALS:[0-9]+]], [x0]
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: cmeq [[ZMSK:v[0-9]+]].8b, v[[VALS]].8b, #0
-; VBITS_EQ_256-DAG: zip1 v[[VAL_LO:[0-9]+]].8b, [[ZMSK]].8b, v[[VALS]].8b
-; VBITS_EQ_256-DAG: zip2 v[[VAL_HI:[0-9]+]].8b, [[ZMSK]].8b, v[[VALS]].8b
-; VBITS_EQ_256-DAG: shl [[SHL_LO:v[0-9]+]].4h, v[[VAL_LO]].4h, #8
-; VBITS_EQ_256-DAG: shl [[SHL_HI:v[0-9]+]].4h, v[[VAL_HI]].4h, #8
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_LO:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_HI:z[0-9]+]].d }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: sshr v[[SSHR_LO:[0-9]+]].4h, [[SHL_LO]].4h, #8
-; VBITS_EQ_256-DAG: sshr v[[SSHR_HI:[0-9]+]].4h, [[SHL_HI]].4h, #8
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_LO:z[0-9]+]].s, z[[VAL_LO]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_HI:z[0-9]+]].s, z[[VAL_HI]].h
-; VBITS_EQ_256-DAG: uunpklo z[[UPK2_LO:[0-9]+]].d, [[UPK1_LO]].s
-; VBITS_EQ_256-DAG: uunpklo z[[UPK2_HI:[0-9]+]].d, [[UPK1_HI]].s
-; VBITS_EQ_256-DAG: cmpne [[MASK_LO:p[0-9]+]].d, [[PG]]/z, z[[UPK2_LO]].d, #0
-; VBITS_EQ_256-DAG: cmpne [[MASK_HI:p[0-9]+]].d, [[PG]]/z, z[[UPK2_HI]].d, #0
-; VBITS_EQ_256-DAG: zip1 v[[VALS2_LO:[0-9]+]].8b, v[[VALS]].8b, v[[VALS]].8b
-; VBITS_EQ_256-DAG: zip2 v[[VALS2_HI:[0-9]+]].8b, v[[VALS]].8b, v[[VALS]].8b
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_LO:z[0-9]+]].s, z[[VALS2_LO]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_HI:z[0-9]+]].s, z[[VALS2_HI]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK2_LO:z[0-9]+]].d, [[UPK1_LO]].s
-; VBITS_EQ_256-DAG: uunpklo [[UPK2_HI:z[0-9]+]].d, [[UPK1_HI]].s
-; VBITS_EQ_256-DAG: st1b { [[UPK2_LO]].d }, [[MASK_LO]], {{\[}}[[PTRS_LO]].d]
-; VBITS_EQ_256-DAG: st1b { [[UPK2_HI]].d }, [[MASK_HI]], {{\[}}[[PTRS_HI]].d]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: masked_scatter_v8i8:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    ldr d0, [x0]
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    cmeq v3.8b, v0.8b, #0
+; VBITS_EQ_256-NEXT:    zip1 v4.8b, v3.8b, v0.8b
+; VBITS_EQ_256-NEXT:    zip2 v3.8b, v3.8b, v0.8b
+; VBITS_EQ_256-NEXT:    shl v4.4h, v4.4h, #8
+; VBITS_EQ_256-NEXT:    shl v3.4h, v3.4h, #8
+; VBITS_EQ_256-NEXT:    sshr v4.4h, v4.4h, #8
+; VBITS_EQ_256-NEXT:    sshr v3.4h, v3.4h, #8
+; VBITS_EQ_256-NEXT:    uunpklo z4.s, z4.h
+; VBITS_EQ_256-NEXT:    uunpklo z3.s, z3.h
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    uunpklo z4.d, z4.s
+; VBITS_EQ_256-NEXT:    uunpklo z3.d, z3.s
+; VBITS_EQ_256-NEXT:    cmpne p1.d, p0/z, z4.d, #0
+; VBITS_EQ_256-NEXT:    cmpne p0.d, p0/z, z3.d, #0
+; VBITS_EQ_256-NEXT:    zip1 v3.8b, v0.8b, v0.8b
+; VBITS_EQ_256-NEXT:    zip2 v0.8b, v0.8b, v0.8b
+; VBITS_EQ_256-NEXT:    uunpklo z3.s, z3.h
+; VBITS_EQ_256-NEXT:    uunpklo z0.s, z0.h
+; VBITS_EQ_256-NEXT:    uunpklo z3.d, z3.s
+; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
+; VBITS_EQ_256-NEXT:    st1b { z3.d }, p1, [z2.d]
+; VBITS_EQ_256-NEXT:    st1b { z0.d }, p0, [z1.d]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: masked_scatter_v8i8:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr d0, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmeq v2.8b, v0.8b, #0
+; VBITS_GE_512-NEXT:    uunpklo z2.h, z2.b
+; VBITS_GE_512-NEXT:    uunpklo z0.h, z0.b
+; VBITS_GE_512-NEXT:    uunpklo z2.s, z2.h
+; VBITS_GE_512-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_512-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; VBITS_GE_512-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    st1b { z0.d }, p0, [z1.d]
+; VBITS_GE_512-NEXT:    ret
   %vals = load <8 x i8>, <8 x i8>* %a
   %ptrs = load <8 x i8*>, <8 x i8*>* %b
   %mask = icmp eq <8 x i8> %vals, zeroinitializer
@@ -118,20 +123,21 @@ define void @masked_scatter_v8i8(<8 x i8>* %a, <8 x i8*>* %b) #0 {
 }
 
 define void @masked_scatter_v16i8(<16 x i8>* %a, <16 x i8*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v16i8:
-; VBITS_GE_1024: ldr q[[VALS:[0-9]+]], [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: cmeq v[[CMP:[0-9]+]].16b, v[[VALS]].16b, #0
-; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].h, z[[CMP]].b
-; VBITS_GE_1024-NEXT: uunpklo [[UPKV1:z[0-9]+]].h, z[[VALS]].b
-; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].s, [[UPK1]].h
-; VBITS_GE_1024-NEXT: uunpklo [[UPKV2:z[0-9]+]].s, [[UPKV1]].h
-; VBITS_GE_1024-NEXT: uunpklo [[UPK3:z[0-9]+]].d, [[UPK2]].s
-; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK3]].d, #0
-; VBITS_GE_1024-NEXT: uunpklo [[UPKV3:z[0-9]+]].d, [[UPKV2]].s
-; VBITS_GE_1024-NEXT: st1b { [[UPKV3]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_scatter_v16i8:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ldr q0, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    cmeq v2.16b, v0.16b, #0
+; VBITS_GE_1024-NEXT:    uunpklo z2.h, z2.b
+; VBITS_GE_1024-NEXT:    uunpklo z0.h, z0.b
+; VBITS_GE_1024-NEXT:    uunpklo z2.s, z2.h
+; VBITS_GE_1024-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_1024-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_1024-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; VBITS_GE_1024-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    st1b { z0.d }, p0, [z1.d]
+; VBITS_GE_1024-NEXT:    ret
   %vals = load <16 x i8>, <16 x i8>* %a
   %ptrs = load <16 x i8*>, <16 x i8*>* %b
   %mask = icmp eq <16 x i8> %vals, zeroinitializer
@@ -140,22 +146,23 @@ define void @masked_scatter_v16i8(<16 x i8>* %a, <16 x i8*>* %b) #0 {
 }
 
 define void @masked_scatter_v32i8(<32 x i8>* %a, <32 x i8*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v32i8:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].b, vl32
-; VBITS_GE_2048-NEXT: ld1b { [[VALS:z[0-9]+]].b }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: cmpeq [[CMP:p[0-9]+]].b, [[PG0]]/z, [[VALS]].b, #0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].b, [[PG0]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].h, [[MONE]].b
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV1:z[0-9]+]].h, [[VALS]].b
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].s, [[UPK1]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV2:z[0-9]+]].s, [[UPKV1]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK3:z[0-9]+]].d, [[UPK2]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK3]].d, #0
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV3:z[0-9]+]].d, [[UPKV2]].s
-; VBITS_GE_2048-NEXT: st1b { [[UPKV3]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_v32i8:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.b, vl32
+; VBITS_GE_2048-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    cmpeq p0.b, p0/z, z0.b, #0
+; VBITS_GE_2048-NEXT:    mov z2.b, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z2.h, z2.b
+; VBITS_GE_2048-NEXT:    uunpklo z0.h, z0.b
+; VBITS_GE_2048-NEXT:    uunpklo z2.s, z2.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_2048-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    st1b { z0.d }, p0, [z1.d]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x i8>, <32 x i8>* %a
   %ptrs = load <32 x i8*>, <32 x i8*>* %b
   %mask = icmp eq <32 x i8> %vals, zeroinitializer
@@ -169,18 +176,19 @@ define void @masked_scatter_v32i8(<32 x i8>* %a, <32 x i8*>* %b) #0 {
 
 define void @masked_scatter_v2i16(<2 x i16>* %a, <2 x i16*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v2i16:
-; CHECK: ldrh [[VALS_LO:w[0-9]+]], [x0]
-; CHECK-NEXT: ldrh [[VALS_HI:w[0-9]+]], [x0, #2]
-; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
-; CHECK-NEXT: fmov s[[VALS:[0-9]+]], [[VALS_LO]]
-; CHECK-NEXT: mov v[[VALS]].s[1], [[VALS_HI]]
-; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0
-; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[CMP]].2s, #0
-; CHECK-NEXT: ushll v[[SHL2:[0-9]+]].2d, v[[VALS]].2s, #0
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[SHL]].d, #0
-; CHECK-NEXT: st1h { z[[SHL2]].d }, [[MASK]], [z[[PTRS]].d]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldrh w8, [x0]
+; CHECK-NEXT:    ldrh w9, [x0, #2]
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fmov s1, w8
+; CHECK-NEXT:    mov v1.s[1], w9
+; CHECK-NEXT:    cmeq v2.2s, v1.2s, #0
+; CHECK-NEXT:    ushll v2.2d, v2.2s, #0
+; CHECK-NEXT:    ushll v1.2d, v1.2s, #0
+; CHECK-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; CHECK-NEXT:    st1h { z1.d }, p0, [z0.d]
+; CHECK-NEXT:    ret
   %vals = load <2 x i16>, <2 x i16>* %a
   %ptrs = load <2 x i16*>, <2 x i16*>* %b
   %mask = icmp eq <2 x i16> %vals, zeroinitializer
@@ -190,17 +198,18 @@ define void @masked_scatter_v2i16(<2 x i16>* %a, <2 x i16*>* %b) #0 {
 
 define void @masked_scatter_v4i16(<4 x i16>* %a, <4 x i16*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v4i16:
-; CHECK: ldr d[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0
-; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
-; CHECK-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, z[[VALS]].h
-; CHECK-NEXT: uunpklo z[[UPK2:[0-9]+]].d, [[UPK1]].s
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, z[[UPK2]].d, #0
-; CHECK-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
-; CHECK-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    cmeq v2.4h, v0.4h, #0
+; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    st1h { z0.d }, p0, [z1.d]
+; CHECK-NEXT:    ret
   %vals = load <4 x i16>, <4 x i16>* %a
   %ptrs = load <4 x i16*>, <4 x i16*>* %b
   %mask = icmp eq <4 x i16> %vals, zeroinitializer
@@ -209,41 +218,44 @@ define void @masked_scatter_v4i16(<4 x i16>* %a, <4 x i16*>* %b) #0 {
 }
 
 define void @masked_scatter_v8i16(<8 x i16>* %a, <8 x i16*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v8i16:
-; VBITS_GE_512: ldr q[[VALS:[0-9]+]], [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: cmeq v[[CMP:[0-9]+]].8h, v[[VALS]].8h, #0
-; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
-; VBITS_GE_512-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, z[[VALS]].h
-; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK2]].d, #0
-; VBITS_GE_512-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
-; VBITS_GE_512-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ldr q[[VALS:[0-9]+]], [x0]
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_LO:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_HI:z[0-9]+]].d }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: cmeq v[[ZMSK:[0-9]+]].8h, v[[VALS]].8h, #0
-; VBITS_EQ_256-DAG: ext v[[EXT:[0-9]+]].16b, v[[VALS]].16b, v[[VALS]].16b, #8
-; VBITS_EQ_256-DAG: ext v[[ZEXT:[0-9]+]].16b, v[[ZMSK]].16b, v[[ZMSK]].16b, #8
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_LO:z[0-9]+]].s, z[[ZMSK]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_HI:z[0-9]+]].s, z[[ZEXT]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK2_LO:z[0-9]+]].d, [[UPK1_LO]].s
-; VBITS_EQ_256-DAG: uunpklo [[UPK2_HI:z[0-9]+]].d, [[UPK1_HI]].s
-; VBITS_EQ_256-DAG: cmpne [[MASK_LO:p[0-9]+]].d, [[PG]]/z, [[UPK2_LO]].d, #0
-; VBITS_EQ_256-DAG: cmpne [[MASK_HI:p[0-9]+]].d, [[PG]]/z, [[UPK2_HI]].d, #0
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_LO:z[0-9]+]].s, z[[VALS]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_HI:z[0-9]+]].s, z[[EXT]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK2_LO:z[0-9]+]].d, [[UPK1_LO]].s
-; VBITS_EQ_256-DAG: uunpklo [[UPK2_HI:z[0-9]+]].d, [[UPK1_HI]].s
-; VBITS_EQ_256-DAG: st1h { [[UPK2_LO]].d }, [[MASK_LO]], {{\[}}[[PTRS_LO]].d]
-; VBITS_EQ_256-DAG: st1h { [[UPK2_HI]].d }, [[MASK_HI]], {{\[}}[[PTRS_HI]].d]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: masked_scatter_v8i16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    ldr q0, [x0]
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    cmeq v3.8h, v0.8h, #0
+; VBITS_EQ_256-NEXT:    uunpklo z4.s, z3.h
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    uunpklo z4.d, z4.s
+; VBITS_EQ_256-NEXT:    ext v3.16b, v3.16b, v3.16b, #8
+; VBITS_EQ_256-NEXT:    cmpne p1.d, p0/z, z4.d, #0
+; VBITS_EQ_256-NEXT:    uunpklo z4.s, z0.h
+; VBITS_EQ_256-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; VBITS_EQ_256-NEXT:    uunpklo z3.s, z3.h
+; VBITS_EQ_256-NEXT:    uunpklo z0.s, z0.h
+; VBITS_EQ_256-NEXT:    uunpklo z3.d, z3.s
+; VBITS_EQ_256-NEXT:    uunpklo z4.d, z4.s
+; VBITS_EQ_256-NEXT:    cmpne p0.d, p0/z, z3.d, #0
+; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
+; VBITS_EQ_256-NEXT:    st1h { z4.d }, p1, [z2.d]
+; VBITS_EQ_256-NEXT:    st1h { z0.d }, p0, [z1.d]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: masked_scatter_v8i16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr q0, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmeq v2.8h, v0.8h, #0
+; VBITS_GE_512-NEXT:    uunpklo z2.s, z2.h
+; VBITS_GE_512-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_512-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; VBITS_GE_512-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    st1h { z0.d }, p0, [z1.d]
+; VBITS_GE_512-NEXT:    ret
   %vals = load <8 x i16>, <8 x i16>* %a
   %ptrs = load <8 x i16*>, <8 x i16*>* %b
   %mask = icmp eq <8 x i16> %vals, zeroinitializer
@@ -252,20 +264,21 @@ define void @masked_scatter_v8i16(<8 x i16>* %a, <8 x i16*>* %b) #0 {
 }
 
 define void @masked_scatter_v16i16(<16 x i16>* %a, <16 x i16*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v16i16:
-; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].h, vl16
-; VBITS_GE_1024-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_1024-NEXT: cmpeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0
-; VBITS_GE_1024-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_1024-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, [[VALS]].h
-; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_1024-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
-; VBITS_GE_1024-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_scatter_v16i16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p1.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_1024-NEXT:    cmpeq p0.h, p0/z, z0.h, #0
+; VBITS_GE_1024-NEXT:    mov z2.h, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_1024-NEXT:    uunpklo z2.s, z2.h
+; VBITS_GE_1024-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_1024-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_1024-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_1024-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    st1h { z0.d }, p0, [z1.d]
+; VBITS_GE_1024-NEXT:    ret
   %vals = load <16 x i16>, <16 x i16>* %a
   %ptrs = load <16 x i16*>, <16 x i16*>* %b
   %mask = icmp eq <16 x i16> %vals, zeroinitializer
@@ -274,20 +287,21 @@ define void @masked_scatter_v16i16(<16 x i16>* %a, <16 x i16*>* %b) #0 {
 }
 
 define void @masked_scatter_v32i16(<32 x i16>* %a, <32 x i16*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v32i16:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: cmpeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, [[VALS]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
-; VBITS_GE_2048-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_v32i16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    cmpeq p0.h, p0/z, z0.h, #0
+; VBITS_GE_2048-NEXT:    mov z2.h, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z2.s, z2.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_2048-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    st1h { z0.d }, p0, [z1.d]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x i16>, <32 x i16>* %a
   %ptrs = load <32 x i16*>, <32 x i16*>* %b
   %mask = icmp eq <32 x i16> %vals, zeroinitializer
@@ -301,15 +315,16 @@ define void @masked_scatter_v32i16(<32 x i16>* %a, <32 x i16*>* %b) #0 {
 
 define void @masked_scatter_v2i32(<2 x i32>* %a, <2 x i32*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v2i32:
-; CHECK: ldr d[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
-; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0
-; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[CMP]].2s, #0
-; CHECK-NEXT: ushll v[[SHL2:[0-9]+]].2d, v[[VALS]].2s, #0
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[SHL]].d, #0
-; CHECK-NEXT: st1w { z[[SHL2]].d }, [[MASK]], [z[[PTRS]].d]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    cmeq v2.2s, v0.2s, #0
+; CHECK-NEXT:    ushll v2.2d, v2.2s, #0
+; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
+; CHECK-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; CHECK-NEXT:    st1w { z0.d }, p0, [z1.d]
+; CHECK-NEXT:    ret
   %vals = load <2 x i32>, <2 x i32>* %a
   %ptrs = load <2 x i32*>, <2 x i32*>* %b
   %mask = icmp eq <2 x i32> %vals, zeroinitializer
@@ -319,15 +334,16 @@ define void @masked_scatter_v2i32(<2 x i32>* %a, <2 x i32*>* %b) #0 {
 
 define void @masked_scatter_v4i32(<4 x i32>* %a, <4 x i32*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v4i32:
-; CHECK: ldr q[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].4s, v[[VALS]].4s, #0
-; CHECK-NEXT: uunpklo [[UPK:z[0-9]+]].d, z[[CMP]].s
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK]].d, #0
-; CHECK-NEXT: uunpklo [[UPKV:z[0-9]+]].d, z[[VALS]].s
-; CHECK-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    cmeq v2.4s, v0.4s, #0
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    st1w { z0.d }, p0, [z1.d]
+; CHECK-NEXT:    ret
   %vals = load <4 x i32>, <4 x i32>* %a
   %ptrs = load <4 x i32*>, <4 x i32*>* %b
   %mask = icmp eq <4 x i32> %vals, zeroinitializer
@@ -336,44 +352,57 @@ define void @masked_scatter_v4i32(<4 x i32>* %a, <4 x i32*>* %b) #0 {
 }
 
 define void @masked_scatter_v8i32(<8 x i32>* %a, <8 x i32*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v8i32:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8
-; VBITS_GE_512-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0
-; VBITS_GE_512-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_512-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_512-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
-; VBITS_GE_512-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG0:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_LO:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_HI:z[0-9]+]].d }, [[PG1]]/z, [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: cmpeq [[MASK:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0
-; VBITS_EQ_256-DAG: add x8, sp, #32
-; VBITS_EQ_256-DAG: mov x9, sp
-; VBITS_EQ_256-DAG: mov [[MONE:z[0-9]+]].s, [[MASK]]/z, #-1
-; VBITS_EQ_256-DAG: st1w  { [[MONE]].s }, [[PG0]], [x8]
-; VBITS_EQ_256-DAG: st1w  { [[VALS]].s }, [[PG0]], [x9]
-; VBITS_EQ_256-DAG: ldr q[[CMP_LO:[0-9]+]], [sp, #32]
-; VBITS_EQ_256-DAG: ldr q[[VAL_LO:[0-9]+]], [sp]
-; VBITS_EQ_256-DAG: uunpklo [[UPKC_LO:z[0-9]+]].d, z[[CMP_LO]].s
-; VBITS_EQ_256-DAG: cmpne [[MASK_LO:p[0-9]+]].d, [[PG1]]/z, [[UPKC_LO]].d, #0
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_LO:z[0-9]+]].d, z[[VAL_LO]].s
-; VBITS_EQ_256-DAG: st1w { [[UPK1_LO]].d }, [[MASK_LO]], {{\[}}[[PTRS_LO]].d]
-; VBITS_EQ_256-DAG: ldr q[[CMP_HI:[0-9]+]], [sp, #48]
-; VBITS_EQ_256-DAG: ldr q[[VAL_HI:[0-9]+]], [sp, #16]
-; VBITS_EQ_256-DAG: uunpklo [[UPKC_HI:z[0-9]+]].d, z[[CMP_HI]].s
-; VBITS_EQ_256-DAG: cmpne [[MASK_HI:p[0-9]+]].d, [[PG1]]/z, [[UPKC_HI]].d, #0
-; VBITS_EQ_256-DAG: uunpklo [[UPK1_HI:z[0-9]+]].d, z[[VAL_HI]].s
-; VBITS_EQ_256-DAG: st1w { [[UPK1_HI]].d }, [[MASK_HI]], {{\[}}[[PTRS_HI]].d]
+; VBITS_EQ_256-LABEL: masked_scatter_v8i32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; VBITS_EQ_256-NEXT:    sub x9, sp, #80
+; VBITS_EQ_256-NEXT:    mov x29, sp
+; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
+; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
+; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
+; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
+; VBITS_EQ_256-NEXT:    ptrue p1.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p1/z, [x0]
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    cmpeq p2.s, p1/z, z0.s, #0
+; VBITS_EQ_256-NEXT:    add x8, sp, #32
+; VBITS_EQ_256-NEXT:    mov x9, sp
+; VBITS_EQ_256-NEXT:    mov z3.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    st1w { z3.s }, p1, [x8]
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p1, [x9]
+; VBITS_EQ_256-NEXT:    ldr q0, [sp, #32]
+; VBITS_EQ_256-NEXT:    ldr q3, [sp]
+; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
+; VBITS_EQ_256-NEXT:    uunpklo z3.d, z3.s
+; VBITS_EQ_256-NEXT:    cmpne p1.d, p0/z, z0.d, #0
+; VBITS_EQ_256-NEXT:    st1w { z3.d }, p1, [z2.d]
+; VBITS_EQ_256-NEXT:    ldr q0, [sp, #48]
+; VBITS_EQ_256-NEXT:    ldr q2, [sp, #16]
+; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
+; VBITS_EQ_256-NEXT:    cmpne p0.d, p0/z, z0.d, #0
+; VBITS_EQ_256-NEXT:    uunpklo z0.d, z2.s
+; VBITS_EQ_256-NEXT:    st1w { z0.d }, p0, [z1.d]
+; VBITS_EQ_256-NEXT:    mov sp, x29
+; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: masked_scatter_v8i32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p1.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p0.s, p0/z, z0.s, #0
+; VBITS_GE_512-NEXT:    mov z2.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_512-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_512-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    st1w { z0.d }, p0, [z1.d]
+; VBITS_GE_512-NEXT:    ret
   %vals = load <8 x i32>, <8 x i32>* %a
   %ptrs = load <8 x i32*>, <8 x i32*>* %b
   %mask = icmp eq <8 x i32> %vals, zeroinitializer
@@ -382,18 +411,19 @@ define void @masked_scatter_v8i32(<8 x i32>* %a, <8 x i32*>* %b) #0 {
 }
 
 define void @masked_scatter_v16i32(<16 x i32>* %a, <16 x i32*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v16i32:
-; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].s, vl16
-; VBITS_GE_1024-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_1024-NEXT: cmpeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0
-; VBITS_GE_1024-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_1024-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_1024-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
-; VBITS_GE_1024-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_scatter_v16i32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p1.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_1024-NEXT:    cmpeq p0.s, p0/z, z0.s, #0
+; VBITS_GE_1024-NEXT:    mov z2.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_1024-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_1024-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_1024-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    st1w { z0.d }, p0, [z1.d]
+; VBITS_GE_1024-NEXT:    ret
   %vals = load <16 x i32>, <16 x i32>* %a
   %ptrs = load <16 x i32*>, <16 x i32*>* %b
   %mask = icmp eq <16 x i32> %vals, zeroinitializer
@@ -402,18 +432,19 @@ define void @masked_scatter_v16i32(<16 x i32>* %a, <16 x i32*>* %b) #0 {
 }
 
 define void @masked_scatter_v32i32(<32 x i32>* %a, <32 x i32*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v32i32:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: cmpeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
-; VBITS_GE_2048-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_v32i32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    cmpeq p0.s, p0/z, z0.s, #0
+; VBITS_GE_2048-NEXT:    mov z2.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_2048-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.d }, p0, [z1.d]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x i32>, <32 x i32>* %a
   %ptrs = load <32 x i32*>, <32 x i32*>* %b
   %mask = icmp eq <32 x i32> %vals, zeroinitializer
@@ -428,7 +459,16 @@ define void @masked_scatter_v32i32(<32 x i32>* %a, <32 x i32*>* %b) #0 {
 ; Scalarize 1 x i64 scatters
 define void @masked_scatter_v1i64(<1 x i64>* %a, <1 x i64*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v1i64:
-; CHECK-NOT: ptrue
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    cbnz x8, .LBB15_2
+; CHECK-NEXT:  // %bb.1: // %cond.store
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    fmov x8, d1
+; CHECK-NEXT:    str d0, [x8]
+; CHECK-NEXT:  .LBB15_2: // %else
+; CHECK-NEXT:    ret
   %vals = load <1 x i64>, <1 x i64>* %a
   %ptrs = load <1 x i64*>, <1 x i64*>* %b
   %mask = icmp eq <1 x i64> %vals, zeroinitializer
@@ -438,13 +478,14 @@ define void @masked_scatter_v1i64(<1 x i64>* %a, <1 x i64*>* %b) #0 {
 
 define void @masked_scatter_v2i64(<2 x i64>* %a, <2 x i64*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v2i64:
-; CHECK: ldr q[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
-; CHECK-NEXT: cmeq v[[CMP:[0-9]+]].2d, v[[VALS]].2d, #0
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[CMP]].d, #0
-; CHECK-NEXT: st1d { z[[VALS]].d }, [[MASK]], [z[[PTRS]].d]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    cmeq v2.2d, v0.2d, #0
+; CHECK-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; CHECK-NEXT:    st1d { z0.d }, p0, [z1.d]
+; CHECK-NEXT:    ret
   %vals = load <2 x i64>, <2 x i64>* %a
   %ptrs = load <2 x i64*>, <2 x i64*>* %b
   %mask = icmp eq <2 x i64> %vals, zeroinitializer
@@ -454,12 +495,13 @@ define void @masked_scatter_v2i64(<2 x i64>* %a, <2 x i64*>* %b) #0 {
 
 define void @masked_scatter_v4i64(<4 x i64>* %a, <4 x i64*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v4i64:
-; CHECK: ptrue [[PG0:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; CHECK-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0
-; CHECK-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, #0
+; CHECK-NEXT:    st1d { z0.d }, p0, [z1.d]
+; CHECK-NEXT:    ret
   %vals = load <4 x i64>, <4 x i64>* %a
   %ptrs = load <4 x i64*>, <4 x i64*>* %b
   %mask = icmp eq <4 x i64> %vals, zeroinitializer
@@ -468,26 +510,29 @@ define void @masked_scatter_v4i64(<4 x i64>* %a, <4 x i64*>* %b) #0 {
 }
 
 define void @masked_scatter_v8i64(<8 x i64>* %a, <8 x i64*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v8i64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0
-; VBITS_GE_512-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG0:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[VALS_LO:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_EQ_256-DAG: ld1d { [[VALS_HI:z[0-9]+]].d }, [[PG0]]/z, [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_LO:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1d { [[PTRS_HI:z[0-9]+]].d }, [[PG0]]/z, [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: cmpeq [[MASK_LO:p[0-9]+]].d, [[PG0]]/z, [[VALS_LO]].d, #0
-; VBITS_EQ_256-DAG: cmpeq [[MASK_HI:p[0-9]+]].d, [[PG0]]/z, [[VALS_HI]].d, #0
-; VBITS_EQ_256-DAG: st1d { [[VALS_LO]].d }, [[MASK_LO]], {{\[}}[[PTRS_LO]].d]
-; VBITS_EQ_256-DAG: st1d { [[VALS_HI]].d }, [[MASK_HI]], {{\[}}[[PTRS_HI]].d]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: masked_scatter_v8i64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    cmpeq p1.d, p0/z, z0.d, #0
+; VBITS_EQ_256-NEXT:    cmpeq p0.d, p0/z, z1.d, #0
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [z3.d]
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p1, [z2.d]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: masked_scatter_v8i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p0.d, p0/z, z0.d, #0
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [z1.d]
+; VBITS_GE_512-NEXT:    ret
   %vals = load <8 x i64>, <8 x i64>* %a
   %ptrs = load <8 x i64*>, <8 x i64*>* %b
   %mask = icmp eq <8 x i64> %vals, zeroinitializer
@@ -496,13 +541,14 @@ define void @masked_scatter_v8i64(<8 x i64>* %a, <8 x i64*>* %b) #0 {
 }
 
 define void @masked_scatter_v16i64(<16 x i64>* %a, <16 x i64*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v16i64:
-; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_1024-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0
-; VBITS_GE_1024-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_scatter_v16i64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    cmpeq p0.d, p0/z, z0.d, #0
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [z1.d]
+; VBITS_GE_1024-NEXT:    ret
   %vals = load <16 x i64>, <16 x i64>* %a
   %ptrs = load <16 x i64*>, <16 x i64*>* %b
   %mask = icmp eq <16 x i64> %vals, zeroinitializer
@@ -511,13 +557,14 @@ define void @masked_scatter_v16i64(<16 x i64>* %a, <16 x i64*>* %b) #0 {
 }
 
 define void @masked_scatter_v32i64(<32 x i64>* %a, <32 x i64*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v32i64:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_2048-NEXT: cmpeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0
-; VBITS_GE_2048-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_v32i64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    cmpeq p0.d, p0/z, z0.d, #0
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [z1.d]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x i64>, <32 x i64>* %a
   %ptrs = load <32 x i64*>, <32 x i64*>* %b
   %mask = icmp eq <32 x i64> %vals, zeroinitializer
@@ -531,30 +578,31 @@ define void @masked_scatter_v32i64(<32 x i64>* %a, <32 x i64*>* %b) #0 {
 
 define void @masked_scatter_v2f16(<2 x half>* %a, <2 x half*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v2f16:
-; CHECK: ldr s[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: movi d2, #0000000000000000
-; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl4
-; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0.0
-; CHECK-NEXT: umov w8, v[[CMP]].h[0]
-; CHECK-NEXT: umov w9, v[[CMP]].h[1]
-; CHECK-NEXT: fmov s[[CMP]], w8
-; CHECK-NEXT: mov v[[CMP]].s[1], w9
-; CHECK-NEXT: shl v[[CMP]].2s, v[[CMP]].2s, #16
-; CHECK-NEXT: sshr v[[CMP]].2s, v[[CMP]].2s, #16
-; CHECK-NEXT: fmov w9, s[[CMP]]
-; CHECK-NEXT: mov w8, v[[CMP]].s[1]
-; CHECK-NEXT: mov v[[NCMP:[0-9]+]].h[0], w9
-; CHECK-NEXT: mov v[[NCMP]].h[1], w8
-; CHECK-NEXT: shl v[[NCMP]].4h, v[[NCMP]].4h, #15
-; CHECK-NEXT: sshr v[[NCMP]].4h, v[[NCMP]].4h, #15
-; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[NCMP]].h
-; CHECK-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, z[[VALS]].h
-; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, [[UPK2]].d, #0
-; CHECK-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
-; CHECK-NEXT: st1h { [[UPKV2]].d }, [[MASK]], [z[[PTRS]].d]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    movi d2, #0000000000000000
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    fcmeq v3.4h, v0.4h, #0.0
+; CHECK-NEXT:    umov w8, v3.h[0]
+; CHECK-NEXT:    umov w9, v3.h[1]
+; CHECK-NEXT:    fmov s3, w8
+; CHECK-NEXT:    mov v3.s[1], w9
+; CHECK-NEXT:    shl v3.2s, v3.2s, #16
+; CHECK-NEXT:    sshr v3.2s, v3.2s, #16
+; CHECK-NEXT:    fmov w9, s3
+; CHECK-NEXT:    mov w8, v3.s[1]
+; CHECK-NEXT:    mov v2.h[0], w9
+; CHECK-NEXT:    mov v2.h[1], w8
+; CHECK-NEXT:    shl v2.4h, v2.4h, #15
+; CHECK-NEXT:    sshr v2.4h, v2.4h, #15
+; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    st1h { z0.d }, p0, [z1.d]
+; CHECK-NEXT:    ret
   %vals = load <2 x half>, <2 x half>* %a
   %ptrs = load <2 x half*>, <2 x half*>* %b
   %mask = fcmp oeq <2 x half> %vals, zeroinitializer
@@ -564,17 +612,18 @@ define void @masked_scatter_v2f16(<2 x half>* %a, <2 x half*>* %b) #0 {
 
 define void @masked_scatter_v4f16(<4 x half>* %a, <4 x half*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v4f16:
-; CHECK: ldr d[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4h, v[[VALS]].4h, #0
-; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
-; CHECK-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, z[[VALS]].h
-; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK2]].d, #0
-; CHECK-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
-; CHECK-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq v2.4h, v0.4h, #0.0
+; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    st1h { z0.d }, p0, [z1.d]
+; CHECK-NEXT:    ret
   %vals = load <4 x half>, <4 x half>* %a
   %ptrs = load <4 x half*>, <4 x half*>* %b
   %mask = fcmp oeq <4 x half> %vals, zeroinitializer
@@ -583,18 +632,19 @@ define void @masked_scatter_v4f16(<4 x half>* %a, <4 x half*>* %b) #0 {
 }
 
 define void @masked_scatter_v8f16(<8 x half>* %a, <8 x half*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v8f16:
-; VBITS_GE_512: ldr q[[VALS:[0-9]+]], [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: fcmeq v[[CMP:[0-9]+]].8h, v[[VALS]].8h, #0
-; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[CMP]].h
-; VBITS_GE_512-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, z[[VALS]].h
-; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK2]].d, #0
-; VBITS_GE_512-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
-; VBITS_GE_512-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_scatter_v8f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ldr q0, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    fcmeq v2.8h, v0.8h, #0.0
+; VBITS_GE_512-NEXT:    uunpklo z2.s, z2.h
+; VBITS_GE_512-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_512-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_512-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; VBITS_GE_512-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    st1h { z0.d }, p0, [z1.d]
+; VBITS_GE_512-NEXT:    ret
   %vals = load <8 x half>, <8 x half>* %a
   %ptrs = load <8 x half*>, <8 x half*>* %b
   %mask = fcmp oeq <8 x half> %vals, zeroinitializer
@@ -603,20 +653,21 @@ define void @masked_scatter_v8f16(<8 x half>* %a, <8 x half*>* %b) #0 {
 }
 
 define void @masked_scatter_v16f16(<16 x half>* %a, <16 x half*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v16f16:
-; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].h, vl16
-; VBITS_GE_1024-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_1024-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
-; VBITS_GE_1024-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_1024-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, [[VALS]].h
-; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_1024-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
-; VBITS_GE_1024-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_scatter_v16f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p1.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_1024-NEXT:    fcmeq p0.h, p0/z, z0.h, #0.0
+; VBITS_GE_1024-NEXT:    mov z2.h, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_1024-NEXT:    uunpklo z2.s, z2.h
+; VBITS_GE_1024-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_1024-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_1024-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_1024-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    st1h { z0.d }, p0, [z1.d]
+; VBITS_GE_1024-NEXT:    ret
   %vals = load <16 x half>, <16 x half>* %a
   %ptrs = load <16 x half*>, <16 x half*>* %b
   %mask = fcmp oeq <16 x half> %vals, zeroinitializer
@@ -625,20 +676,21 @@ define void @masked_scatter_v16f16(<16 x half>* %a, <16 x half*>* %b) #0 {
 }
 
 define void @masked_scatter_v32f16(<32 x half>* %a, <32 x half*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v32f16:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, [[VALS]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
-; VBITS_GE_2048-NEXT: st1h { [[UPKV2]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_v32f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p0.h, p0/z, z0.h, #0.0
+; VBITS_GE_2048-NEXT:    mov z2.h, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z2.s, z2.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_2048-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    st1h { z0.d }, p0, [z1.d]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x half>, <32 x half>* %a
   %ptrs = load <32 x half*>, <32 x half*>* %b
   %mask = fcmp oeq <32 x half> %vals, zeroinitializer
@@ -652,15 +704,16 @@ define void @masked_scatter_v32f16(<32 x half>* %a, <32 x half*>* %b) #0 {
 
 define void @masked_scatter_v2f32(<2 x float>* %a, <2 x float*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v2f32:
-; CHECK: ldr d[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
-; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].2s, v[[VALS]].2s, #0
-; CHECK-NEXT: ushll v[[SHLC:[0-9]+]].2d, v[[CMP]].2s, #0
-; CHECK-NEXT: ushll v[[SHL:[0-9]+]].2d, v[[VALS]].2s, #0
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[SHLC]].d, #0
-; CHECK-NEXT: st1w { z[[SHL]].d }, [[MASK]], [z[[PTRS]].d]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcmeq v2.2s, v0.2s, #0.0
+; CHECK-NEXT:    ushll v2.2d, v2.2s, #0
+; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
+; CHECK-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; CHECK-NEXT:    st1w { z0.d }, p0, [z1.d]
+; CHECK-NEXT:    ret
   %vals = load <2 x float>, <2 x float>* %a
   %ptrs = load <2 x float*>, <2 x float*>* %b
   %mask = fcmp oeq <2 x float> %vals, zeroinitializer
@@ -670,15 +723,16 @@ define void @masked_scatter_v2f32(<2 x float>* %a, <2 x float*>* %b) #0 {
 
 define void @masked_scatter_v4f32(<4 x float>* %a, <4 x float*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v4f32:
-; CHECK: ldr q[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].4s, v[[VALS]].4s, #0
-; CHECK-NEXT: uunpklo [[UPK:z[0-9]+]].d, z[[CMP]].s
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG]]/z, [[UPK]].d, #0
-; CHECK-NEXT: uunpklo [[UPKV:z[0-9]+]].d, z[[VALS]].s
-; CHECK-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq v2.4s, v0.4s, #0.0
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    st1w { z0.d }, p0, [z1.d]
+; CHECK-NEXT:    ret
   %vals = load <4 x float>, <4 x float>* %a
   %ptrs = load <4 x float*>, <4 x float*>* %b
   %mask = fcmp oeq <4 x float> %vals, zeroinitializer
@@ -687,18 +741,19 @@ define void @masked_scatter_v4f32(<4 x float>* %a, <4 x float*>* %b) #0 {
 }
 
 define void @masked_scatter_v8f32(<8 x float>* %a, <8 x float*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v8f32:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8
-; VBITS_GE_512-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_512-NEXT: ptrue [[PG1:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_512-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_512-NEXT: mov [[MONE:z[0-9]]].s, [[CMP]]/z, #-1
-; VBITS_GE_512-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_512-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_512-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
-; VBITS_GE_512-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_scatter_v8f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p1.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_512-NEXT:    fcmeq p0.s, p0/z, z0.s, #0.0
+; VBITS_GE_512-NEXT:    mov z2.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_512-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_512-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_512-NEXT:    st1w { z0.d }, p0, [z1.d]
+; VBITS_GE_512-NEXT:    ret
   %vals = load <8 x float>, <8 x float>* %a
   %ptrs = load <8 x float*>, <8 x float*>* %b
   %mask = fcmp oeq <8 x float> %vals, zeroinitializer
@@ -707,18 +762,19 @@ define void @masked_scatter_v8f32(<8 x float>* %a, <8 x float*>* %b) #0 {
 }
 
 define void @masked_scatter_v16f32(<16 x float>* %a, <16 x float*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v16f32:
-; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].s, vl16
-; VBITS_GE_1024-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_1024-NEXT: ptrue [[PG1:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_1024-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_1024-NEXT: mov [[MONE:z[0-9]]].s, [[CMP]]/z, #-1
-; VBITS_GE_1024-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_1024-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_1024-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
-; VBITS_GE_1024-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_scatter_v16f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p1.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_1024-NEXT:    fcmeq p0.s, p0/z, z0.s, #0.0
+; VBITS_GE_1024-NEXT:    mov z2.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_1024-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_1024-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_1024-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_1024-NEXT:    st1w { z0.d }, p0, [z1.d]
+; VBITS_GE_1024-NEXT:    ret
   %vals = load <16 x float>, <16 x float>* %a
   %ptrs = load <16 x float*>, <16 x float*>* %b
   %mask = fcmp oeq <16 x float> %vals, zeroinitializer
@@ -727,18 +783,19 @@ define void @masked_scatter_v16f32(<16 x float>* %a, <16 x float*>* %b) #0 {
 }
 
 define void @masked_scatter_v32f32(<32 x float>* %a, <32 x float*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v32f32:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]]].s, [[CMP]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
-; VBITS_GE_2048-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_v32f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p0.s, p0/z, z0.s, #0.0
+; VBITS_GE_2048-NEXT:    mov z2.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_2048-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.d }, p0, [z1.d]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x float>, <32 x float>* %a
   %ptrs = load <32 x float*>, <32 x float*>* %b
   %mask = fcmp oeq <32 x float> %vals, zeroinitializer
@@ -753,7 +810,16 @@ define void @masked_scatter_v32f32(<32 x float>* %a, <32 x float*>* %b) #0 {
 ; Scalarize 1 x double scatters
 define void @masked_scatter_v1f64(<1 x double>* %a, <1 x double*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v1f64:
-; CHECK-NOT: ptrue
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    fcmp d0, #0.0
+; CHECK-NEXT:    b.ne .LBB31_2
+; CHECK-NEXT:  // %bb.1: // %cond.store
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    fmov x8, d1
+; CHECK-NEXT:    str d0, [x8]
+; CHECK-NEXT:  .LBB31_2: // %else
+; CHECK-NEXT:    ret
   %vals = load <1 x double>, <1 x double>* %a
   %ptrs = load <1 x double*>, <1 x double*>* %b
   %mask = fcmp oeq <1 x double> %vals, zeroinitializer
@@ -763,13 +829,14 @@ define void @masked_scatter_v1f64(<1 x double>* %a, <1 x double*>* %b) #0 {
 
 define void @masked_scatter_v2f64(<2 x double>* %a, <2 x double*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v2f64:
-; CHECK: ldr q[[VALS:[0-9]+]], [x0]
-; CHECK-NEXT: ldr q[[PTRS:[0-9]+]], [x1]
-; CHECK-NEXT: ptrue [[PG0:p[0-9]+]].d, vl2
-; CHECK-NEXT: fcmeq v[[CMP:[0-9]+]].2d, v[[VALS]].2d, #0
-; CHECK-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG0]]/z, z[[CMP]].d, #0
-; CHECK-NEXT: st1d { z[[VALS]].d }, [[MASK]], [z[[PTRS]].d]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcmeq v2.2d, v0.2d, #0.0
+; CHECK-NEXT:    cmpne p0.d, p0/z, z2.d, #0
+; CHECK-NEXT:    st1d { z0.d }, p0, [z1.d]
+; CHECK-NEXT:    ret
   %vals = load <2 x double>, <2 x double>* %a
   %ptrs = load <2 x double*>, <2 x double*>* %b
   %mask = fcmp oeq <2 x double> %vals, zeroinitializer
@@ -779,12 +846,13 @@ define void @masked_scatter_v2f64(<2 x double>* %a, <2 x double*>* %b) #0 {
 
 define void @masked_scatter_v4f64(<4 x double>* %a, <4 x double*>* %b) #0 {
 ; CHECK-LABEL: masked_scatter_v4f64:
-; CHECK: ptrue [[PG0:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; CHECK-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; CHECK-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
-; CHECK-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq p0.d, p0/z, z0.d, #0.0
+; CHECK-NEXT:    st1d { z0.d }, p0, [z1.d]
+; CHECK-NEXT:    ret
   %vals = load <4 x double>, <4 x double>* %a
   %ptrs = load <4 x double*>, <4 x double*>* %b
   %mask = fcmp oeq <4 x double> %vals, zeroinitializer
@@ -793,13 +861,14 @@ define void @masked_scatter_v4f64(<4 x double>* %a, <4 x double*>* %b) #0 {
 }
 
 define void @masked_scatter_v8f64(<8 x double>* %a, <8 x double*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v8f64:
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
-; VBITS_GE_512-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_scatter_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    fcmeq p0.d, p0/z, z0.d, #0.0
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [z1.d]
+; VBITS_GE_512-NEXT:    ret
   %vals = load <8 x double>, <8 x double>* %a
   %ptrs = load <8 x double*>, <8 x double*>* %b
   %mask = fcmp oeq <8 x double> %vals, zeroinitializer
@@ -808,13 +877,14 @@ define void @masked_scatter_v8f64(<8 x double>* %a, <8 x double*>* %b) #0 {
 }
 
 define void @masked_scatter_v16f64(<16 x double>* %a, <16 x double*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v16f64:
-; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_1024-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
-; VBITS_GE_1024-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_scatter_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    fcmeq p0.d, p0/z, z0.d, #0.0
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [z1.d]
+; VBITS_GE_1024-NEXT:    ret
   %vals = load <16 x double>, <16 x double>* %a
   %ptrs = load <16 x double*>, <16 x double*>* %b
   %mask = fcmp oeq <16 x double> %vals, zeroinitializer
@@ -823,13 +893,14 @@ define void @masked_scatter_v16f64(<16 x double>* %a, <16 x double*>* %b) #0 {
 }
 
 define void @masked_scatter_v32f64(<32 x double>* %a, <32 x double*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_v32f64:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
-; VBITS_GE_2048-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p0.d, p0/z, z0.d, #0.0
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [z1.d]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x double>, <32 x double>* %a
   %ptrs = load <32 x double*>, <32 x double*>* %b
   %mask = fcmp oeq <32 x double> %vals, zeroinitializer
@@ -842,20 +913,21 @@ define void @masked_scatter_v32f64(<32 x double>* %a, <32 x double*>* %b) #0 {
 
 ; NOTE: This produces an non-optimal addressing mode due to a temporary workaround
 define void @masked_scatter_32b_scaled_sext_f16(<32 x half>* %a, <32 x i32>* %b, half* %base) #0 {
-; CHECK-LABEL: masked_scatter_32b_scaled_sext_f16:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1sw { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[PG0]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, [[VALS]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
-; VBITS_GE_2048-NEXT: st1h { [[UPKV2]].d }, [[MASK]], [x2, [[PTRS]].d, lsl #1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_32b_scaled_sext_f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1sw { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p0.h, p0/z, z0.h, #0.0
+; VBITS_GE_2048-NEXT:    mov z2.h, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z2.s, z2.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_2048-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    st1h { z0.d }, p0, [x2, z1.d, lsl #1]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x half>, <32 x half>* %a
   %idxs = load <32 x i32>, <32 x i32>* %b
   %ext = sext <32 x i32> %idxs to <32 x i64>
@@ -867,18 +939,19 @@ define void @masked_scatter_32b_scaled_sext_f16(<32 x half>* %a, <32 x i32>* %b,
 
 ; NOTE: This produces an non-optimal addressing mode due to a temporary workaround
 define void @masked_scatter_32b_scaled_sext_f32(<32 x float>* %a, <32 x i32>* %b, float* %base) #0 {
-; CHECK-LABEL: masked_scatter_32b_scaled_sext_f32:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1sw { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[PG0]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
-; VBITS_GE_2048-NEXT: st1w { [[UPKV]].d }, [[MASK]], [x2, [[PTRS]].d, lsl #2]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_32b_scaled_sext_f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1sw { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p0.s, p0/z, z0.s, #0.0
+; VBITS_GE_2048-NEXT:    mov z2.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_2048-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.d }, p0, [x2, z1.d, lsl #2]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x float>, <32 x float>* %a
   %idxs = load <32 x i32>, <32 x i32>* %b
   %ext = sext <32 x i32> %idxs to <32 x i64>
@@ -890,13 +963,14 @@ define void @masked_scatter_32b_scaled_sext_f32(<32 x float>* %a, <32 x i32>* %b
 
 ; NOTE: This produces an non-optimal addressing mode due to a temporary workaround
 define void @masked_scatter_32b_scaled_sext_f64(<32 x double>* %a, <32 x i32>* %b, double* %base) #0 {
-; CHECK-LABEL: masked_scatter_32b_scaled_sext_f64:
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1sw { [[PTRS:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG]]/z, [[VALS]].d, #0.0
-; VBITS_GE_2048-NEXT: st1d { [[VALS]].d }, [[MASK]], [x2, [[PTRS]].d, lsl #3]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_32b_scaled_sext_f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1sw { z1.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p0.d, p0/z, z0.d, #0.0
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x2, z1.d, lsl #3]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x double>, <32 x double>* %a
   %idxs = load <32 x i32>, <32 x i32>* %b
   %ext = sext <32 x i32> %idxs to <32 x i64>
@@ -908,20 +982,21 @@ define void @masked_scatter_32b_scaled_sext_f64(<32 x double>* %a, <32 x i32>* %
 
 ; NOTE: This produces an non-optimal addressing mode due to a temporary workaround
 define void @masked_scatter_32b_scaled_zext(<32 x half>* %a, <32 x i32>* %b, half* %base) #0 {
-; CHECK-LABEL: masked_scatter_32b_scaled_zext:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[PG0]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, [[VALS]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
-; VBITS_GE_2048-NEXT: st1h { [[UPKV2]].d }, [[MASK]], [x2, [[PTRS]].d, lsl #1]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_32b_scaled_zext:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p0.h, p0/z, z0.h, #0.0
+; VBITS_GE_2048-NEXT:    mov z2.h, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z2.s, z2.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_2048-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    st1h { z0.d }, p0, [x2, z1.d, lsl #1]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x half>, <32 x half>* %a
   %idxs = load <32 x i32>, <32 x i32>* %b
   %ext = zext <32 x i32> %idxs to <32 x i64>
@@ -933,20 +1008,21 @@ define void @masked_scatter_32b_scaled_zext(<32 x half>* %a, <32 x i32>* %b, hal
 
 ; NOTE: This produces an non-optimal addressing mode due to a temporary workaround
 define void @masked_scatter_32b_unscaled_sext(<32 x half>* %a, <32 x i32>* %b, i8* %base) #0 {
-; CHECK-LABEL: masked_scatter_32b_unscaled_sext:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1sw { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[PG0]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, [[VALS]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
-; VBITS_GE_2048-NEXT: st1h { [[UPKV2]].d }, [[MASK]], [x2, [[PTRS]].d]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_32b_unscaled_sext:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1sw { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p0.h, p0/z, z0.h, #0.0
+; VBITS_GE_2048-NEXT:    mov z2.h, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z2.s, z2.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_2048-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    st1h { z0.d }, p0, [x2, z1.d]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x half>, <32 x half>* %a
   %idxs = load <32 x i32>, <32 x i32>* %b
   %ext = sext <32 x i32> %idxs to <32 x i64>
@@ -959,20 +1035,21 @@ define void @masked_scatter_32b_unscaled_sext(<32 x half>* %a, <32 x i32>* %b, i
 
 ; NOTE: This produces an non-optimal addressing mode due to a temporary workaround
 define void @masked_scatter_32b_unscaled_zext(<32 x half>* %a, <32 x i32>* %b, i8* %base) #0 {
-; CHECK-LABEL: masked_scatter_32b_unscaled_zext:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].h, vl32
-; VBITS_GE_2048-NEXT: ld1h { [[VALS:z[0-9]+]].h }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].h, [[PG0]]/z, [[VALS]].h, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].h, [[PG0]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[MONE]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV1:z[0-9]+]].s, [[VALS]].h
-; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK2]].d, #0
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV2:z[0-9]+]].d, [[UPKV1]].s
-; VBITS_GE_2048-NEXT: st1h { [[UPKV2]].d }, [[MASK]], [x2, [[PTRS]].d]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_32b_unscaled_zext:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p0.h, p0/z, z0.h, #0.0
+; VBITS_GE_2048-NEXT:    mov z2.h, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z2.s, z2.h
+; VBITS_GE_2048-NEXT:    uunpklo z0.s, z0.h
+; VBITS_GE_2048-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_2048-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    st1h { z0.d }, p0, [x2, z1.d]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x half>, <32 x half>* %a
   %idxs = load <32 x i32>, <32 x i32>* %b
   %ext = zext <32 x i32> %idxs to <32 x i64>
@@ -984,18 +1061,19 @@ define void @masked_scatter_32b_unscaled_zext(<32 x half>* %a, <32 x i32>* %b, i
 }
 
 define void @masked_scatter_64b_scaled(<32 x float>* %a, <32 x i64>* %b, float* %base) #0 {
-; CHECK-LABEL: masked_scatter_64b_scaled:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[PG0]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
-; VBITS_GE_2048-NEXT: st1w { [[UPKV]].d }, [[MASK]], [x2, [[PTRS]].d, lsl #2]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_64b_scaled:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p0.s, p0/z, z0.s, #0.0
+; VBITS_GE_2048-NEXT:    mov z2.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_2048-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.d }, p0, [x2, z1.d, lsl #2]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x float>, <32 x float>* %a
   %idxs = load <32 x i64>, <32 x i64>* %b
   %ptrs = getelementptr float, float* %base, <32 x i64> %idxs
@@ -1005,18 +1083,19 @@ define void @masked_scatter_64b_scaled(<32 x float>* %a, <32 x i64>* %b, float*
 }
 
 define void @masked_scatter_64b_unscaled(<32 x float>* %a, <32 x i64>* %b, i8* %base) #0 {
-; CHECK-LABEL: masked_scatter_64b_unscaled:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[PG0]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
-; VBITS_GE_2048-NEXT: st1w { [[UPKV]].d }, [[MASK]], [x2, [[PTRS]].d]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_64b_unscaled:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p0.s, p0/z, z0.s, #0.0
+; VBITS_GE_2048-NEXT:    mov z2.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_2048-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.d }, p0, [x2, z1.d]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x float>, <32 x float>* %a
   %idxs = load <32 x i64>, <32 x i64>* %b
   %byte_ptrs = getelementptr i8, i8* %base, <32 x i64> %idxs
@@ -1028,20 +1107,21 @@ define void @masked_scatter_64b_unscaled(<32 x float>* %a, <32 x i64>* %b, i8* %
 
 ; FIXME: This case does not yet codegen well due to deficiencies in opcode selection
 define void @masked_scatter_vec_plus_reg(<32 x float>* %a, <32 x i8*>* %b, i64 %off) #0 {
-; CHECK-LABEL: masked_scatter_vec_plus_reg:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: mov [[OFF:z[0-9]+]].d, x2
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_2048-NEXT: add [[PTRS_ADD:z[0-9]+]].d, [[PG1]]/m, [[PTRS]].d, [[OFF]].d
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[PG0]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
-; VBITS_GE_2048-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS_ADD]].d]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_vec_plus_reg:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    mov z2.d, x2
+; VBITS_GE_2048-NEXT:    fcmeq p0.s, p0/z, z0.s, #0.0
+; VBITS_GE_2048-NEXT:    add z1.d, p1/m, z1.d, z2.d
+; VBITS_GE_2048-NEXT:    mov z2.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_2048-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.d }, p0, [z1.d]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x float>, <32 x float>* %a
   %bases = load <32 x i8*>, <32 x i8*>* %b
   %byte_ptrs = getelementptr i8, <32 x i8*> %bases, i64 %off
@@ -1053,20 +1133,21 @@ define void @masked_scatter_vec_plus_reg(<32 x float>* %a, <32 x i8*>* %b, i64 %
 
 ; FIXME: This case does not yet codegen well due to deficiencies in opcode selection
 define void @masked_scatter_vec_plus_imm(<32 x float>* %a, <32 x i8*>* %b) #0 {
-; CHECK-LABEL: masked_scatter_vec_plus_imm:
-; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl32
-; VBITS_GE_2048-NEXT: ptrue [[PG1:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1w { [[VALS:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG1]]/z, [x1]
-; VBITS_GE_2048-NEXT: mov [[OFF:z[0-9]+]].d, #4
-; VBITS_GE_2048-NEXT: fcmeq [[CMP:p[0-9]+]].s, [[PG0]]/z, [[VALS]].s, #0.0
-; VBITS_GE_2048-NEXT: add [[PTRS_ADD:z[0-9]+]].d, [[PG1]]/m, [[PTRS]].d, [[OFF]].d
-; VBITS_GE_2048-NEXT: mov [[MONE:z[0-9]+]].s, [[PG0]]/z, #-1
-; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[MONE]].s
-; VBITS_GE_2048-NEXT: cmpne [[MASK:p[0-9]+]].d, [[PG1]]/z, [[UPK]].d, #0
-; VBITS_GE_2048-NEXT: uunpklo [[UPKV:z[0-9]+]].d, [[VALS]].s
-; VBITS_GE_2048-NEXT: st1w { [[UPKV]].d }, [[MASK]], {{\[}}[[PTRS_ADD]].d]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_scatter_vec_plus_imm:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ptrue p1.d, vl32
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p1/z, [x1]
+; VBITS_GE_2048-NEXT:    mov z2.d, #4 // =0x4
+; VBITS_GE_2048-NEXT:    fcmeq p0.s, p0/z, z0.s, #0.0
+; VBITS_GE_2048-NEXT:    add z1.d, p1/m, z1.d, z2.d
+; VBITS_GE_2048-NEXT:    mov z2.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_2048-NEXT:    uunpklo z2.d, z2.s
+; VBITS_GE_2048-NEXT:    cmpne p0.d, p1/z, z2.d, #0
+; VBITS_GE_2048-NEXT:    uunpklo z0.d, z0.s
+; VBITS_GE_2048-NEXT:    st1w { z0.d }, p0, [z1.d]
+; VBITS_GE_2048-NEXT:    ret
   %vals = load <32 x float>, <32 x float>* %a
   %bases = load <32 x i8*>, <32 x i8*>* %b
   %byte_ptrs = getelementptr i8, <32 x i8*> %bases, i64 4
@@ -1085,15 +1166,17 @@ define void @masked_scatter_vec_plus_imm(<32 x float>* %a, <32 x i8*>* %b) #0 {
 ; 
diff erent block to the scatter store.  If not, the problematic bitcast will be
 ; removed before operation legalisation and thus not exercise the combine.
 define void @masked_scatter_bitcast_infinite_loop(<8 x double>* %a, <8 x double*>* %b, i1 %cond) #0 {
-; CHECK-LABEL: masked_scatter_bitcast_infinite_loop
-; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[VALS:z[0-9]+]].d }, [[PG0]]/z, [x0]
-; VBITS_GE_512-NEXT: tbz w2, #0, [[LABEL:.*]]
-; VBITS_GE_512-NEXT: ld1d { [[PTRS:z[0-9]+]].d }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: fcmeq [[MASK:p[0-9]+]].d, [[PG0]]/z, [[VALS]].d, #0.0
-; VBITS_GE_512-NEXT: st1d { [[VALS]].d }, [[MASK]], {{\[}}[[PTRS]].d]
-; VBITS_GE_512-NEXT: [[LABEL]]:
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_scatter_bitcast_infinite_loop:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    tbz w2, #0, .LBB47_2
+; VBITS_GE_512-NEXT:  // %bb.1: // %bb.1
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    fcmeq p0.d, p0/z, z0.d, #0.0
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [z1.d]
+; VBITS_GE_512-NEXT:  .LBB47_2: // %bb.2
+; VBITS_GE_512-NEXT:    ret
   %vals = load volatile <8 x double>, <8 x double>* %a
   br i1 %cond, label %bb.1, label %bb.2
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll
index 6f5c5cee303c6..c04def10a0115 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll
@@ -5,15 +5,15 @@
 ; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
 ; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
 ; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -D#VBYTES=64  -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_2048,VBITS_GE_1024,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -25,26 +25,27 @@ target triple = "aarch64-unknown-linux-gnu"
 ;;
 define void @masked_store_v2f16(<2 x half>* %ap, <2 x half>* %bp) #0 {
 ; CHECK-LABEL: masked_store_v2f16:
-; CHECK: ldr s0, [x0]
-; CHECK-NEXT: ldr s1, [x1]
-; CHECK-NEXT: movi [[D0:d[0-9]+]], #0000000000000000
-; CHECK-NEXT: ptrue p[[P0:[0-9]+]].h, vl4
-; CHECK-NEXT: fcmeq v[[P1:[0-9]+]].4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
-; CHECK-NEXT: umov [[W0:w[0-9]+]], v[[P1]].h[0]
-; CHECK-NEXT: umov [[W1:w[0-9]+]], v[[P1]].h[1]
-; CHECK-NEXT: fmov s[[V0:[0-9]+]], [[W0]]
-; CHECK-NEXT: mov v[[V0]].s[1], [[W1]]
-; CHECK-NEXT: shl v[[V0]].2s, v[[V0]].2s, #16
-; CHECK-NEXT: sshr v[[V0]].2s, v[[V0]].2s, #16
-; CHECK-NEXT: fmov [[W1]], s[[V0]]
-; CHECK-NEXT: mov [[W0]], v[[V0]].s[1]
-; CHECK-NEXT: mov [[V1:v[0-9]+]].h[0], [[W1]]
-; CHECK-NEXT: mov [[V1]].h[1], [[W0]]
-; CHECK-NEXT: shl v[[V0]].4h, [[V1]].4h, #15
-; CHECK-NEXT: sshr v[[V0]].4h, v[[V0]].4h, #15
-; CHECK-NEXT: cmpne p[[P2:[0-9]+]].h, p[[P0]]/z, z[[P1]].h, #0
-; CHECK-NEXT: st1h { z0.h }, p[[P2]], [x{{[0-9]+}}]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    ldr s1, [x1]
+; CHECK-NEXT:    movi d2, #0000000000000000
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    fcmeq v1.4h, v0.4h, v1.4h
+; CHECK-NEXT:    umov w8, v1.h[0]
+; CHECK-NEXT:    umov w9, v1.h[1]
+; CHECK-NEXT:    fmov s1, w8
+; CHECK-NEXT:    mov v1.s[1], w9
+; CHECK-NEXT:    shl v1.2s, v1.2s, #16
+; CHECK-NEXT:    sshr v1.2s, v1.2s, #16
+; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    mov w8, v1.s[1]
+; CHECK-NEXT:    mov v2.h[0], w9
+; CHECK-NEXT:    mov v2.h[1], w8
+; CHECK-NEXT:    shl v1.4h, v2.4h, #15
+; CHECK-NEXT:    sshr v1.4h, v1.4h, #15
+; CHECK-NEXT:    cmpne p0.h, p0/z, z1.h, #0
+; CHECK-NEXT:    st1h { z0.h }, p0, [x1]
+; CHECK-NEXT:    ret
   %a = load <2 x half>, <2 x half>* %ap
   %b = load <2 x half>, <2 x half>* %bp
   %mask = fcmp oeq <2 x half> %a, %b
@@ -55,13 +56,14 @@ define void @masked_store_v2f16(<2 x half>* %ap, <2 x half>* %bp) #0 {
 
 define void @masked_store_v2f32(<2 x float>* %ap, <2 x float>* %bp) #0 {
 ; CHECK-LABEL: masked_store_v2f32:
-; CHECK: ldr d0, [x0]
-; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: ptrue p[[P0:[0-9]+]].s, vl2
-; CHECK-NEXT: fcmeq v[[P1:[0-9]+]].2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
-; CHECK-NEXT: cmpne p[[P2:[0-9]+]].s, p[[P0]]/z, z[[P1]].s, #0
-; CHECK-NEXT: st1w { z0.s }, p[[P2]], [x{{[0-9]+}}]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    fcmeq v1.2s, v0.2s, v1.2s
+; CHECK-NEXT:    cmpne p0.s, p0/z, z1.s, #0
+; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
+; CHECK-NEXT:    ret
   %a = load <2 x float>, <2 x float>* %ap
   %b = load <2 x float>, <2 x float>* %bp
   %mask = fcmp oeq <2 x float> %a, %b
@@ -71,13 +73,14 @@ define void @masked_store_v2f32(<2 x float>* %ap, <2 x float>* %bp) #0 {
 
 define void @masked_store_v4f32(<4 x float>* %ap, <4 x float>* %bp) #0 {
 ; CHECK-LABEL: masked_store_v4f32:
-; CHECK: ldr q0, [x0]
-; CHECK-NEXT: ldr q1, [x1]
-; CHECK-NEXT: ptrue p[[P0:[0-9]+]].s, vl4
-; CHECK-NEXT: fcmeq v[[P1:[0-9]+]].4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
-; CHECK-NEXT: cmpne p[[P2:[0-9]+]].s, p[[P0]]/z, z[[P1]].s, #0
-; CHECK-NEXT: st1w { z0.s }, p[[P2]], [x{{[0-9]+}}]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    fcmeq v1.4s, v0.4s, v1.4s
+; CHECK-NEXT:    cmpne p0.s, p0/z, z1.s, #0
+; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
+; CHECK-NEXT:    ret
   %a = load <4 x float>, <4 x float>* %ap
   %b = load <4 x float>, <4 x float>* %bp
   %mask = fcmp oeq <4 x float> %a, %b
@@ -87,12 +90,13 @@ define void @masked_store_v4f32(<4 x float>* %ap, <4 x float>* %bp) #0 {
 
 define void @masked_store_v8f32(<8 x float>* %ap, <8 x float>* %bp) #0 {
 ; CHECK-LABEL: masked_store_v8f32:
-; CHECK: ptrue [[PG0:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
-; CHECK-NEXT: ld1w { [[Z0:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; CHECK-NEXT: ld1w { [[Z1:z[0-9]+]].s }, [[PG0]]/z, [x1]
-; CHECK-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
-; CHECK-NEXT: st1w { z0.s }, [[PG1]], [x{{[0-9]+}}]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    fcmeq p0.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
+; CHECK-NEXT:    ret
   %a = load <8 x float>, <8 x float>* %ap
   %b = load <8 x float>, <8 x float>* %bp
   %mask = fcmp oeq <8 x float> %a, %b
@@ -101,13 +105,14 @@ define void @masked_store_v8f32(<8 x float>* %ap, <8 x float>* %bp) #0 {
 }
 
 define void @masked_store_v16f32(<16 x float>* %ap, <16 x float>* %bp) #0 {
-; CHECK-LABEL: masked_store_v16f32:
-; VBITS_GE_512: ptrue p[[P0:[0-9]+]].s, vl[[#min(div(VBYTES,4),16)]]
-; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, [[PG0]]/z, [x1]
-; VBITS_GE_512-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
-; VBITS_GE_512-NEXT: st1w { z0.s }, [[PG1]], [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_store_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    fcmeq p0.s, p0/z, z0.s, z1.s
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %a = load <16 x float>, <16 x float>* %ap
   %b = load <16 x float>, <16 x float>* %bp
   %mask = fcmp oeq <16 x float> %a, %b
@@ -116,13 +121,14 @@ define void @masked_store_v16f32(<16 x float>* %ap, <16 x float>* %bp) #0 {
 }
 
 define void @masked_store_v32f32(<32 x float>* %ap, <32 x float>* %bp) #0 {
-; CHECK-LABEL: masked_store_v32f32:
-; VBITS_GE_1024: ptrue p[[P0:[0-9]+]].s, vl[[#min(div(VBYTES,4),32)]]
-; VBITS_GE_1024-NEXT: ld1w { [[Z0:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1w { [[Z1:z[0-9]+]].s }, [[PG0]]/z, [x1]
-; VBITS_GE_1024-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
-; VBITS_GE_1024-NEXT: st1w { z0.s }, [[PG1]], [x{{[0-9]+}}]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: masked_store_v32f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    fcmeq p0.s, p0/z, z0.s, z1.s
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %a = load <32 x float>, <32 x float>* %ap
   %b = load <32 x float>, <32 x float>* %bp
   %mask = fcmp oeq <32 x float> %a, %b
@@ -131,13 +137,14 @@ define void @masked_store_v32f32(<32 x float>* %ap, <32 x float>* %bp) #0 {
 }
 
 define void @masked_store_v64f32(<64 x float>* %ap, <64 x float>* %bp) #0 {
-; CHECK-LABEL: masked_store_v64f32:
-; VBITS_GE_2048: ptrue p[[P0:[0-9]+]].s, vl[[#min(div(VBYTES,4),64)]]
-; VBITS_GE_2048-NEXT: ld1w { [[Z0:z[0-9]+]].s }, [[PG0]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1w { [[Z1:z[0-9]+]].s }, [[PG0]]/z, [x1]
-; VBITS_GE_2048-NEXT: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s
-; VBITS_GE_2048-NEXT: st1w { z0.s }, [[PG1]], [x{{[0-9]+}}]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: masked_store_v64f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    fcmeq p0.s, p0/z, z0.s, z1.s
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %a = load <64 x float>, <64 x float>* %ap
   %b = load <64 x float>, <64 x float>* %bp
   %mask = fcmp oeq <64 x float> %a, %b
@@ -146,20 +153,23 @@ define void @masked_store_v64f32(<64 x float>* %ap, <64 x float>* %bp) #0 {
 }
 
 define void @masked_store_trunc_v8i64i8(<8 x i64>* %ap, <8 x i64>* %bp, <8 x i8>* %dest) #0 {
-; CHECK-LABEL: masked_store_trunc_v8i64i8:
-; VBITS_GE_512: ptrue p[[P0:[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, p0/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1]
-; VBITS_GE_512-NEXT: cmpeq p[[P1:[0-9]+]].d, p[[P0]]/z, [[Z0]].d, [[Z1]].d
-; VBITS_GE_512-DAG: uzp1 [[Z1]].s, [[Z1]].s, [[Z1]].s
-; VBITS_GE_512-DAG: uzp1 [[Z1]].h, [[Z1]].h, [[Z1]].h
-; VBITS_GE_512-DAG: uzp1 [[Z1]].b, [[Z1]].b, [[Z1]].b
-; VBITS_GE_512-DAG: cmpne p[[P2:[0-9]+]].b, p{{[0-9]+}}/z, [[Z1]].b, #0
-; VBITS_GE_512-DAG: uzp1 [[Z0]].s, [[Z0]].s, [[Z0]].s
-; VBITS_GE_512-DAG: uzp1 [[Z0]].h, [[Z0]].h, [[Z0]].h
-; VBITS_GE_512-DAG: uzp1 [[Z0]].b, [[Z0]].b, [[Z0]].b
-; VBITS_GE_512-NEXT: st1b { [[Z0]].b }, p[[P2]], [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_store_trunc_v8i64i8:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
+; VBITS_GE_512-NEXT:    mov z1.d, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    uzp1 z1.s, z1.s, z1.s
+; VBITS_GE_512-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_512-NEXT:    uzp1 z1.h, z1.h, z1.h
+; VBITS_GE_512-NEXT:    ptrue p0.b, vl8
+; VBITS_GE_512-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_512-NEXT:    uzp1 z1.b, z1.b, z1.b
+; VBITS_GE_512-NEXT:    cmpne p0.b, p0/z, z1.b, #0
+; VBITS_GE_512-NEXT:    uzp1 z0.b, z0.b, z0.b
+; VBITS_GE_512-NEXT:    st1b { z0.b }, p0, [x2]
+; VBITS_GE_512-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %ap
   %b = load <8 x i64>, <8 x i64>* %bp
   %mask = icmp eq <8 x i64> %a, %b
@@ -169,20 +179,21 @@ define void @masked_store_trunc_v8i64i8(<8 x i64>* %ap, <8 x i64>* %bp, <8 x i8>
 }
 
 define void @masked_store_trunc_v8i64i16(<8 x i64>* %ap, <8 x i64>* %bp, <8 x i16>* %dest) #0 {
-; CHECK-LABEL: masked_store_trunc_v8i64i16:
-; VBITS_GE_512: ptrue p[[P0:[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, p0/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1]
-; VBITS_GE_512-DAG: ptrue p{{[0-9]+}}.h, vl8
-; VBITS_GE_512-DAG: cmpeq p[[P1:[0-9]+]].d, p[[P0]]/z, [[Z0]].d, [[Z1]].d
-; VBITS_GE_512-NEXT: mov [[Z1]].d, p[[P0]]/z, #-1
-; VBITS_GE_512-DAG: uzp1 [[Z1]].s, [[Z1]].s, [[Z1]].s
-; VBITS_GE_512-DAG: uzp1 [[Z1]].h, [[Z1]].h, [[Z1]].h
-; VBITS_GE_512-DAG: cmpne p[[P2:[0-9]+]].h, p{{[0-9]+}}/z, [[Z1]].h, #0
-; VBITS_GE_512-DAG: uzp1 [[Z0]].s, [[Z0]].s, [[Z0]].s
-; VBITS_GE_512-DAG: uzp1 [[Z0]].h, [[Z0]].h, [[Z0]].h
-; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, p[[P2]], [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_store_trunc_v8i64i16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    ptrue p1.h, vl8
+; VBITS_GE_512-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
+; VBITS_GE_512-NEXT:    mov z1.d, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    uzp1 z1.s, z1.s, z1.s
+; VBITS_GE_512-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_512-NEXT:    uzp1 z1.h, z1.h, z1.h
+; VBITS_GE_512-NEXT:    cmpne p0.h, p1/z, z1.h, #0
+; VBITS_GE_512-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x2]
+; VBITS_GE_512-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %ap
   %b = load <8 x i64>, <8 x i64>* %bp
   %mask = icmp eq <8 x i64> %a, %b
@@ -192,18 +203,19 @@ define void @masked_store_trunc_v8i64i16(<8 x i64>* %ap, <8 x i64>* %bp, <8 x i1
 }
 
 define void @masked_store_trunc_v8i64i32(<8 x i64>* %ap, <8 x i64>* %bp, <8 x i32>* %dest) #0 {
-; CHECK-LABEL: masked_store_trunc_v8i64i32:
-; VBITS_GE_512: ptrue p[[P0:[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[Z0:z[0-9]+]].d }, p0/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1]
-; VBITS_GE_512-DAG: ptrue p{{[0-9]+}}.s, vl8
-; VBITS_GE_512-DAG: cmpeq p[[P1:[0-9]+]].d, p[[P0]]/z, [[Z0]].d, [[Z1]].d
-; VBITS_GE_512-NEXT: mov [[Z1]].d, p[[P0]]/z, #-1
-; VBITS_GE_512-DAG: uzp1 [[Z1]].s, [[Z1]].s, [[Z1]].s
-; VBITS_GE_512-DAG: cmpne p[[P2:[0-9]+]].s, p{{[0-9]+}}/z, [[Z1]].s, #0
-; VBITS_GE_512-DAG: uzp1 [[Z0]].s, [[Z0]].s, [[Z0]].s
-; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, p[[P2]], [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_store_trunc_v8i64i32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    ptrue p1.s, vl8
+; VBITS_GE_512-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
+; VBITS_GE_512-NEXT:    mov z1.d, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    uzp1 z1.s, z1.s, z1.s
+; VBITS_GE_512-NEXT:    cmpne p0.s, p1/z, z1.s, #0
+; VBITS_GE_512-NEXT:    uzp1 z0.s, z0.s, z0.s
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x2]
+; VBITS_GE_512-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %ap
   %b = load <8 x i64>, <8 x i64>* %bp
   %mask = icmp eq <8 x i64> %a, %b
@@ -213,20 +225,21 @@ define void @masked_store_trunc_v8i64i32(<8 x i64>* %ap, <8 x i64>* %bp, <8 x i3
 }
 
 define void @masked_store_trunc_v16i32i8(<16 x i32>* %ap, <16 x i32>* %bp, <16 x i8>* %dest) #0 {
-; CHECK-LABEL: masked_store_trunc_v16i32i8:
-; VBITS_GE_512: ptrue p[[P0:[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0]
-; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1]
-; VBITS_GE_512-DAG: ptrue p{{[0-9]+}}.b, vl16
-; VBITS_GE_512-DAG: cmpeq p[[P1:[0-9]+]].s, p[[P0]]/z, [[Z0]].s, [[Z1]].s
-; VBITS_GE_512-NEXT: mov [[Z1]].s, p[[P0]]/z, #-1
-; VBITS_GE_512-DAG: uzp1 [[Z1]].h, [[Z1]].h, [[Z1]].h
-; VBITS_GE_512-DAG: uzp1 [[Z1]].b, [[Z1]].b, [[Z1]].b
-; VBITS_GE_512-DAG: cmpne p[[P2:[0-9]+]].b, p{{[0-9]+}}/z, [[Z1]].b, #0
-; VBITS_GE_512-DAG: uzp1 [[Z0]].h, [[Z0]].h, [[Z0]].h
-; VBITS_GE_512-DAG: uzp1 [[Z0]].b, [[Z0]].b, [[Z0]].b
-; VBITS_GE_512-NEXT: st1b { [[Z0]].b }, p[[P2]], [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_store_trunc_v16i32i8:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    ptrue p1.b, vl16
+; VBITS_GE_512-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
+; VBITS_GE_512-NEXT:    mov z1.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    uzp1 z1.h, z1.h, z1.h
+; VBITS_GE_512-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_512-NEXT:    uzp1 z1.b, z1.b, z1.b
+; VBITS_GE_512-NEXT:    cmpne p0.b, p1/z, z1.b, #0
+; VBITS_GE_512-NEXT:    uzp1 z0.b, z0.b, z0.b
+; VBITS_GE_512-NEXT:    st1b { z0.b }, p0, [x2]
+; VBITS_GE_512-NEXT:    ret
   %a = load <16 x i32>, <16 x i32>* %ap
   %b = load <16 x i32>, <16 x i32>* %bp
   %mask = icmp eq <16 x i32> %a, %b
@@ -236,18 +249,19 @@ define void @masked_store_trunc_v16i32i8(<16 x i32>* %ap, <16 x i32>* %bp, <16 x
 }
 
 define void @masked_store_trunc_v16i32i16(<16 x i32>* %ap, <16 x i32>* %bp, <16 x i16>* %dest) #0 {
-; CHECK-LABEL: masked_store_trunc_v16i32i16:
-; VBITS_GE_512: ptrue p[[P0:[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0]
-; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1]
-; VBITS_GE_512-DAG: ptrue p{{[0-9]+}}.h, vl16
-; VBITS_GE_512-DAG: cmpeq p[[P1:[0-9]+]].s, p[[P0]]/z, [[Z0]].s, [[Z1]].s
-; VBITS_GE_512-NEXT: mov [[Z1]].s, p[[P0]]/z, #-1
-; VBITS_GE_512-DAG: uzp1 [[Z1]].h, [[Z1]].h, [[Z1]].h
-; VBITS_GE_512-DAG: cmpne p[[P2:[0-9]+]].h, p{{[0-9]+}}/z, [[Z1]].h, #0
-; VBITS_GE_512-DAG: uzp1 [[Z0]].h, [[Z0]].h, [[Z0]].h
-; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, p[[P2]], [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_store_trunc_v16i32i16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    ptrue p1.h, vl16
+; VBITS_GE_512-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
+; VBITS_GE_512-NEXT:    mov z1.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    uzp1 z1.h, z1.h, z1.h
+; VBITS_GE_512-NEXT:    cmpne p0.h, p1/z, z1.h, #0
+; VBITS_GE_512-NEXT:    uzp1 z0.h, z0.h, z0.h
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x2]
+; VBITS_GE_512-NEXT:    ret
   %a = load <16 x i32>, <16 x i32>* %ap
   %b = load <16 x i32>, <16 x i32>* %bp
   %mask = icmp eq <16 x i32> %a, %b
@@ -257,18 +271,19 @@ define void @masked_store_trunc_v16i32i16(<16 x i32>* %ap, <16 x i32>* %bp, <16
 }
 
 define void @masked_store_trunc_v32i16i8(<32 x i16>* %ap, <32 x i16>* %bp, <32 x i8>* %dest) #0 {
-; CHECK-LABEL: masked_store_trunc_v32i16i8:
-; VBITS_GE_512: ptrue p[[P0:[0-9]+]].h, vl32
-; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].h }, p0/z, [x0]
-; VBITS_GE_512-NEXT: ld1h { [[Z1:z[0-9]+]].h }, p0/z, [x1]
-; VBITS_GE_512-DAG: ptrue p{{[0-9]+}}.b, vl32
-; VBITS_GE_512-DAG: cmpeq p[[P1:[0-9]+]].h, p[[P0]]/z, [[Z0]].h, [[Z1]].h
-; VBITS_GE_512-NEXT: mov [[Z1]].h, p[[P0]]/z, #-1
-; VBITS_GE_512-DAG: uzp1 [[Z1]].b, [[Z1]].b, [[Z1]].b
-; VBITS_GE_512-DAG: cmpne p[[P2:[0-9]+]].b, p{{[0-9]+}}/z, [[Z1]].b, #0
-; VBITS_GE_512-DAG: uzp1 [[Z0]].b, [[Z0]].b, [[Z0]].b
-; VBITS_GE_512-NEXT: st1b { [[Z0]].b }, p[[P2]], [x{{[0-9]+}}]
-; VBITS_GE_512-NEXT: ret
+; VBITS_GE_512-LABEL: masked_store_trunc_v32i16i8:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    ptrue p1.b, vl32
+; VBITS_GE_512-NEXT:    cmpeq p0.h, p0/z, z0.h, z1.h
+; VBITS_GE_512-NEXT:    mov z1.h, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_GE_512-NEXT:    uzp1 z1.b, z1.b, z1.b
+; VBITS_GE_512-NEXT:    cmpne p0.b, p1/z, z1.b, #0
+; VBITS_GE_512-NEXT:    uzp1 z0.b, z0.b, z0.b
+; VBITS_GE_512-NEXT:    st1b { z0.b }, p0, [x2]
+; VBITS_GE_512-NEXT:    ret
   %a = load <32 x i16>, <32 x i16>* %ap
   %b = load <32 x i16>, <32 x i16>* %bp
   %mask = icmp eq <32 x i16> %a, %b

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll
index 5dad03dc567f1..96f7865f937a1 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-vector-shuffle.ll
@@ -1,19 +1,19 @@
-; RUN: llc -aarch64-sve-vector-bits-min=128  -asm-verbose=0 < %s | FileCheck %s -check-prefix=NO_SVE
-; RUN: llc -aarch64-sve-vector-bits-min=256  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
-; RUN: llc -aarch64-sve-vector-bits-min=384  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK
-; RUN: llc -aarch64-sve-vector-bits-min=512  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=640  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=768  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=896  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
-; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
-; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
+; RUN: llc -aarch64-sve-vector-bits-min=128  < %s | FileCheck %s -check-prefix=NO_SVE
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
+; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -check-prefixes=CHECK
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -22,33 +22,36 @@ target triple = "aarch64-unknown-linux-gnu"
 
 ; Don't use SVE for 64-bit vectors
 define <8 x i8> @shuffle_ext_byone_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v8i8
-; CHECK: ext v0.8b, v0.8b, v1.8b, #7
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.8b, v0.8b, v1.8b, #7
+; CHECK-NEXT:    ret
   %ret = shufflevector <8 x i8> %op1, <8 x i8> %op2, <8 x i32> <i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
   ret <8 x i8> %ret
 }
 
 ; Don't use SVE for 128-bit vectors
 define <16 x i8> @shuffle_ext_byone_v16i8(<16 x i8> %op1, <16 x i8> %op2) {
-; CHECK-LABEL: shuffle_ext_byone_v16i8
-; CHECK: ext v0.16b, v0.16b, v1.16b, #15
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.16b, v0.16b, v1.16b, #15
+; CHECK-NEXT:    ret
   %ret = shufflevector <16 x i8> %op1, <16 x i8> %op2, <16 x i32> <i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22,
                                                                    i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30>
   ret <16 x i8> %ret
 }
 
 define void @shuffle_ext_byone_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v32i8
-; CHECK: ptrue [[PG:p[0-9]+]].b, vl32
-; CHECK-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov z[[ELEM:[0-9]+]].b, [[OP1]].b[31]
-; CHECK-NEXT: fmov [[TMP:w[0-9]+]], s[[ELEM]]
-; CHECK-NEXT: insr [[OP2]].b, [[TMP]]
-; CHECK-NEXT: st1b { [[OP2]].b }, [[PG]], [x0]
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.b, vl32
+; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT:    ld1b { z1.b }, p0/z, [x1]
+; CHECK-NEXT:    mov z0.b, z0.b[31]
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    insr z1.b, w8
+; CHECK-NEXT:    st1b { z1.b }, p0, [x0]
+; CHECK-NEXT:    ret
   %op1 = load <32 x i8>, <32 x i8>* %a
   %op2 = load <32 x i8>, <32 x i8>* %b
   %ret = shufflevector <32 x i8> %op1, <32 x i8> %op2, <32 x i32> <i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38,
@@ -60,31 +63,34 @@ define void @shuffle_ext_byone_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v64i8(<64 x i8>* %a, <64 x i8>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v64i8
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].b, vl64
-; VBITS_GE_512-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: mov z[[ELEM:[0-9]+]].b, [[OP1]].b[63]
-; VBITS_GE_512-NEXT: fmov [[TMP:w[0-9]+]], s[[ELEM]]
-; VBITS_GE_512-NEXT: insr [[OP2]].b, [[TMP]]
-; VBITS_GE_512-NEXT: st1b { [[OP2]].b }, [[PG]], [x0]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].b, vl32
-; VBITS_EQ_256-DAG: mov w[[NUMELTS:[0-9]+]], #32
-; VBITS_EQ_256-DAG: ld1b { [[OP1_HI:z[0-9]+]].b }, [[PG]]/z, [x0, x[[NUMELTS]]]
-; VBITS_EQ_256-DAG: ld1b { [[OP2_LO:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1b { [[OP2_HI:z[0-9]+]].b }, [[PG]]/z, [x1, x[[NUMELTS]]]
-; VBITS_EQ_256-DAG: mov z[[ELEM1:[0-9]+]].b, [[OP1_HI]].b[31]
-; VBITS_EQ_256-DAG: fmov [[TMP1:w[0-9]+]], s[[ELEM1]]
-; VBITS_EQ_256-DAG: mov z[[ELEM2:[0-9]+]].b, [[OP2_LO]].b[31]
-; VBITS_EQ_256-DAG: insr [[OP2_LO]].b, [[TMP1]]
-; VBITS_EQ_256-DAG: fmov [[TMP2:w[0-9]+]], s[[ELEM2]]
-; VBITS_EQ_256-DAG: insr [[OP2_HI]].b, [[TMP2]]
-; VBITS_EQ_256-DAG: st1b { [[OP2_LO]].b }, [[PG]], [x0]
-; VBITS_EQ_256-DAG: st1b { [[OP2_HI]].b }, [[PG]], [x0, x[[NUMELTS]]]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: shuffle_ext_byone_v64i8:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    ptrue p0.b, vl32
+; VBITS_EQ_256-NEXT:    mov w8, #32
+; VBITS_EQ_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
+; VBITS_EQ_256-NEXT:    ld1b { z1.b }, p0/z, [x1, x8]
+; VBITS_EQ_256-NEXT:    ld1b { z2.b }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    mov z0.b, z0.b[31]
+; VBITS_EQ_256-NEXT:    fmov w9, s0
+; VBITS_EQ_256-NEXT:    mov z3.b, z2.b[31]
+; VBITS_EQ_256-NEXT:    insr z2.b, w9
+; VBITS_EQ_256-NEXT:    fmov w9, s3
+; VBITS_EQ_256-NEXT:    insr z1.b, w9
+; VBITS_EQ_256-NEXT:    st1b { z1.b }, p0, [x0, x8]
+; VBITS_EQ_256-NEXT:    st1b { z2.b }, p0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: shuffle_ext_byone_v64i8:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.b, vl64
+; VBITS_GE_512-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1b { z1.b }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mov z0.b, z0.b[63]
+; VBITS_GE_512-NEXT:    fmov w8, s0
+; VBITS_GE_512-NEXT:    insr z1.b, w8
+; VBITS_GE_512-NEXT:    st1b { z1.b }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <64 x i8>, <64 x i8>* %a
   %op2 = load <64 x i8>, <64 x i8>* %b
   %ret = shufflevector <64 x i8> %op1, <64 x i8> %op2, <64 x i32> <i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70,
@@ -100,16 +106,17 @@ define void @shuffle_ext_byone_v64i8(<64 x i8>* %a, <64 x i8>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v128i8(<128 x i8>* %a, <128 x i8>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v128i8
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].b, vl128
-; VBITS_GE_1024-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: mov w[[TMP:[0-9]+]], #127
-; VBITS_GE_1024-NEXT: whilels [[WPG:p[0-9]+]].b, xzr, x[[TMP]]
-; VBITS_GE_1024-NEXT: lastb [[TMP2:w[0-9]+]], [[WPG]], [[OP1]].b
-; VBITS_GE_1024-NEXT: insr [[OP2]].b, [[TMP2]]
-; VBITS_GE_1024-NEXT: st1b { [[OP2]].b }, [[PG]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: shuffle_ext_byone_v128i8:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.b, vl128
+; VBITS_GE_1024-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1b { z1.b }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mov w8, #127
+; VBITS_GE_1024-NEXT:    whilels p1.b, xzr, x8
+; VBITS_GE_1024-NEXT:    lastb w8, p1, z0.b
+; VBITS_GE_1024-NEXT:    insr z1.b, w8
+; VBITS_GE_1024-NEXT:    st1b { z1.b }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <128 x i8>, <128 x i8>* %a
   %op2 = load <128 x i8>, <128 x i8>* %b
   %ret = shufflevector <128 x i8> %op1, <128 x i8> %op2, <128 x i32> <i32 127,  i32 128,  i32 129,  i32 130,  i32 131,  i32 132,  i32 133,  i32 134,
@@ -133,16 +140,17 @@ define void @shuffle_ext_byone_v128i8(<128 x i8>* %a, <128 x i8>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v256i8(<256 x i8>* %a, <256 x i8>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v256i8
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].b, vl256
-; VBITS_GE_2048-NEXT: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: mov w[[TMP:[0-9]+]], #255
-; VBITS_GE_2048-NEXT: whilels [[WPG:p[0-9]+]].b, xzr, x[[TMP]]
-; VBITS_GE_2048-NEXT: lastb [[TMP2:w[0-9]+]], [[WPG]], [[OP1]].b
-; VBITS_GE_2048-NEXT: insr [[OP2]].b, [[TMP2]]
-; VBITS_GE_2048-NEXT: st1b { [[OP2]].b }, [[PG]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: shuffle_ext_byone_v256i8:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.b, vl256
+; VBITS_GE_2048-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1b { z1.b }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mov w8, #255
+; VBITS_GE_2048-NEXT:    whilels p1.b, xzr, x8
+; VBITS_GE_2048-NEXT:    lastb w8, p1, z0.b
+; VBITS_GE_2048-NEXT:    insr z1.b, w8
+; VBITS_GE_2048-NEXT:    st1b { z1.b }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <256 x i8>, <256 x i8>* %a
   %op2 = load <256 x i8>, <256 x i8>* %b
   %ret = shufflevector <256 x i8> %op1, <256 x i8> %op2, <256 x i32> <i32 255,  i32 256,  i32 257,  i32 258,  i32 259,  i32 260,  i32 261,  i32 262,
@@ -183,32 +191,35 @@ define void @shuffle_ext_byone_v256i8(<256 x i8>* %a, <256 x i8>* %b) #0 {
 
 ; Don't use SVE for 64-bit vectors
 define <4 x i16> @shuffle_ext_byone_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v4i16
-; CHECK: ext v0.8b, v0.8b, v1.8b, #6
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.8b, v0.8b, v1.8b, #6
+; CHECK-NEXT:    ret
   %ret = shufflevector <4 x i16> %op1, <4 x i16> %op2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
   ret <4 x i16> %ret
 }
 
 ; Don't use SVE for 128-bit vectors
 define <8 x i16> @shuffle_ext_byone_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v8i16
-; CHECK: ext v0.16b, v0.16b, v1.16b, #14
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.16b, v0.16b, v1.16b, #14
+; CHECK-NEXT:    ret
   %ret = shufflevector <8 x i16> %op1, <8 x i16> %op2, <8 x i32> <i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
   ret <8 x i16> %ret
 }
 
 define void @shuffle_ext_byone_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v16i16
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov z[[ELEM:[0-9]+]].h, [[OP1]].h[15]
-; CHECK-NEXT: fmov [[TMP:w[0-9]+]], s[[ELEM]]
-; CHECK-NEXT: insr [[OP2]].h, [[TMP]]
-; CHECK-NEXT: st1h { [[OP2]].h }, [[PG]], [x0]
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    mov z0.h, z0.h[15]
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    insr z1.h, w8
+; CHECK-NEXT:    st1h { z1.h }, p0, [x0]
+; CHECK-NEXT:    ret
   %op1 = load <16 x i16>, <16 x i16>* %a
   %op2 = load <16 x i16>, <16 x i16>* %b
   %ret = shufflevector <16 x i16> %op1, <16 x i16> %op2, <16 x i32> <i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22,
@@ -218,31 +229,34 @@ define void @shuffle_ext_byone_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v32i16(<32 x i16>* %a, <32 x i16>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v32i16
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32
-; VBITS_GE_512-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: mov z[[ELEM:[0-9]+]].h, [[OP1]].h[31]
-; VBITS_GE_512-NEXT: fmov [[TMP:w[0-9]+]], s[[ELEM]]
-; VBITS_GE_512-NEXT: insr [[OP2]].h, [[TMP]]
-; VBITS_GE_512-NEXT: st1h { [[OP2]].h }, [[PG]], [x0]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #16
-; VBITS_EQ_256-DAG: ld1h { [[OP1_HI:z[0-9]+]].h }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #1]
-; VBITS_EQ_256-DAG: ld1h { [[OP2_LO:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1h { [[OP2_HI:z[0-9]+]].h }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #1]
-; VBITS_EQ_256-DAG: mov z[[ELEM1:[0-9]+]].h, [[OP1_HI]].h[15]
-; VBITS_EQ_256-DAG: fmov [[TMP1:w[0-9]+]], s[[ELEM1]]
-; VBITS_EQ_256-DAG: mov z[[ELEM2:[0-9]+]].h, [[OP2_LO]].h[15]
-; VBITS_EQ_256-DAG: insr [[OP2_LO]].h, [[TMP1]]
-; VBITS_EQ_256-DAG: fmov [[TMP2:w[0-9]+]], s[[ELEM2]]
-; VBITS_EQ_256-DAG: insr [[OP2_HI]].h, [[TMP2]]
-; VBITS_EQ_256-DAG: st1h { [[OP2_LO]].h }, [[PG]], [x0]
-; VBITS_EQ_256-DAG: st1h { [[OP2_HI]].h }, [[PG]], [x0, x[[NUMELTS]], lsl #1]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: shuffle_ext_byone_v32i16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #16
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z2.h }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    mov z0.h, z0.h[15]
+; VBITS_EQ_256-NEXT:    fmov w9, s0
+; VBITS_EQ_256-NEXT:    mov z3.h, z2.h[15]
+; VBITS_EQ_256-NEXT:    insr z2.h, w9
+; VBITS_EQ_256-NEXT:    fmov w9, s3
+; VBITS_EQ_256-NEXT:    insr z1.h, w9
+; VBITS_EQ_256-NEXT:    st1h { z1.h }, p0, [x0, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    st1h { z2.h }, p0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: shuffle_ext_byone_v32i16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mov z0.h, z0.h[31]
+; VBITS_GE_512-NEXT:    fmov w8, s0
+; VBITS_GE_512-NEXT:    insr z1.h, w8
+; VBITS_GE_512-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <32 x i16>, <32 x i16>* %a
   %op2 = load <32 x i16>, <32 x i16>* %b
   %ret = shufflevector <32 x i16> %op1, <32 x i16> %op2, <32 x i32> <i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38,
@@ -254,16 +268,17 @@ define void @shuffle_ext_byone_v32i16(<32 x i16>* %a, <32 x i16>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v64i16(<64 x i16>* %a, <64 x i16>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v64i16
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].h, vl64
-; VBITS_GE_1024-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: mov w[[TMP:[0-9]+]], #63
-; VBITS_GE_1024-NEXT: whilels [[WPG:p[0-9]+]].h, xzr, x[[TMP]]
-; VBITS_GE_1024-NEXT: lastb [[TMP2:w[0-9]+]], [[WPG]], [[OP1]].h
-; VBITS_GE_1024-NEXT: insr [[OP2]].h, [[TMP2]]
-; VBITS_GE_1024-NEXT: st1h { [[OP2]].h }, [[PG]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: shuffle_ext_byone_v64i16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mov w8, #63
+; VBITS_GE_1024-NEXT:    whilels p1.h, xzr, x8
+; VBITS_GE_1024-NEXT:    lastb w8, p1, z0.h
+; VBITS_GE_1024-NEXT:    insr z1.h, w8
+; VBITS_GE_1024-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <64 x i16>, <64 x i16>* %a
   %op2 = load <64 x i16>, <64 x i16>* %b
   %ret = shufflevector <64 x i16> %op1, <64 x i16> %op2, <64 x i32> <i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70,
@@ -279,16 +294,17 @@ define void @shuffle_ext_byone_v64i16(<64 x i16>* %a, <64 x i16>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v128i16(<128 x i16>* %a, <128 x i16>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v128i16
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128
-; VBITS_GE_2048-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: mov w[[TMP:[0-9]+]], #127
-; VBITS_GE_2048-NEXT: whilels [[WPG:p[0-9]+]].h, xzr, x[[TMP]]
-; VBITS_GE_2048-NEXT: lastb [[TMP2:w[0-9]+]], [[WPG]], [[OP1]].h
-; VBITS_GE_2048-NEXT: insr [[OP2]].h, [[TMP2]]
-; VBITS_GE_2048-NEXT: st1h { [[OP2]].h }, [[PG]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: shuffle_ext_byone_v128i16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mov w8, #127
+; VBITS_GE_2048-NEXT:    whilels p1.h, xzr, x8
+; VBITS_GE_2048-NEXT:    lastb w8, p1, z0.h
+; VBITS_GE_2048-NEXT:    insr z1.h, w8
+; VBITS_GE_2048-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <128 x i16>, <128 x i16>* %a
   %op2 = load <128 x i16>, <128 x i16>* %b
   %ret = shufflevector <128 x i16> %op1, <128 x i16> %op2, <128 x i32> <i32 127,  i32 128,  i32 129,  i32 130,  i32 131,  i32 132,  i32 133,  i32 134,
@@ -313,32 +329,35 @@ define void @shuffle_ext_byone_v128i16(<128 x i16>* %a, <128 x i16>* %b) #0 {
 
 ; Don't use SVE for 64-bit vectors
 define <2 x i32> @shuffle_ext_byone_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v2i32
-; CHECK: ext v0.8b, v0.8b, v1.8b, #4
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.8b, v0.8b, v1.8b, #4
+; CHECK-NEXT:    ret
   %ret = shufflevector <2 x i32> %op1, <2 x i32> %op2, <2 x i32> <i32 1, i32 2>
   ret <2 x i32> %ret
 }
 
 ; Don't use SVE for 128-bit vectors
 define <4 x i32> @shuffle_ext_byone_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v4i32
-; CHECK: ext v0.16b, v0.16b, v1.16b, #12
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.16b, v0.16b, v1.16b, #12
+; CHECK-NEXT:    ret
   %ret = shufflevector <4 x i32> %op1, <4 x i32> %op2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
   ret <4 x i32> %ret
 }
 
 define void @shuffle_ext_byone_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v8i32
-; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
-; CHECK-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov z[[ELEM:[0-9]+]].s, [[OP1]].s[7]
-; CHECK-NEXT: fmov [[TMP:w[0-9]+]], s[[ELEM]]
-; CHECK-NEXT: insr [[OP2]].s, [[TMP]]
-; CHECK-NEXT: st1w { [[OP2]].s }, [[PG]], [x0]
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    mov z0.s, z0.s[7]
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    insr z1.s, w8
+; CHECK-NEXT:    st1w { z1.s }, p0, [x0]
+; CHECK-NEXT:    ret
   %op1 = load <8 x i32>, <8 x i32>* %a
   %op2 = load <8 x i32>, <8 x i32>* %b
   %ret = shufflevector <8 x i32> %op1, <8 x i32> %op2, <8 x i32> <i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
@@ -347,32 +366,34 @@ define void @shuffle_ext_byone_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v16i32(<16 x i32>* %a, <16 x i32>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v16i32
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: mov z[[ELEM:[0-9]+]].s, [[OP1]].s[15]
-; VBITS_GE_512-NEXT: fmov [[TMP:w[0-9]+]], s[[ELEM]]
-; VBITS_GE_512-NEXT: insr [[OP2]].s, [[TMP]]
-; VBITS_GE_512-NEXT: st1w { [[OP2]].s }, [[PG]], [x0]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #8
-; VBITS_EQ_256-DAG: ld1w { [[OP1_HI:z[0-9]+]].s }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-DAG: ld1w { [[OP2_LO:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1w { [[OP2_HI:z[0-9]+]].s }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-DAG: mov z[[ELEM1:[0-9]+]].s, [[OP1_HI]].s[7]
-; VBITS_EQ_256-DAG: fmov [[TMP1:w[0-9]+]], s[[ELEM1]]
-; VBITS_EQ_256-DAG: mov z[[ELEM2:[0-9]+]].s, [[OP2_LO]].s[7]
-; VBITS_EQ_256-DAG: insr [[OP2_LO]].s, [[TMP1]]
-; VBITS_EQ_256-DAG: fmov [[TMP2:w[0-9]+]], s[[ELEM2]]
-; VBITS_EQ_256-DAG: insr [[OP2_HI]].s, [[TMP2]]
-; VBITS_EQ_256-DAG: st1w { [[OP2_LO]].s }, [[PG]], [x0]
-; VBITS_EQ_256-DAG: st1w { [[OP2_HI]].s }, [[PG]], [x0, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-DAG: ret
-
+; VBITS_EQ_256-LABEL: shuffle_ext_byone_v16i32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #8
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z2.s }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    mov z0.s, z0.s[7]
+; VBITS_EQ_256-NEXT:    fmov w9, s0
+; VBITS_EQ_256-NEXT:    mov z3.s, z2.s[7]
+; VBITS_EQ_256-NEXT:    insr z2.s, w9
+; VBITS_EQ_256-NEXT:    fmov w9, s3
+; VBITS_EQ_256-NEXT:    insr z1.s, w9
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x0, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    st1w { z2.s }, p0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: shuffle_ext_byone_v16i32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mov z0.s, z0.s[15]
+; VBITS_GE_512-NEXT:    fmov w8, s0
+; VBITS_GE_512-NEXT:    insr z1.s, w8
+; VBITS_GE_512-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <16 x i32>, <16 x i32>* %a
   %op2 = load <16 x i32>, <16 x i32>* %b
   %ret = shufflevector <16 x i32> %op1, <16 x i32> %op2, <16 x i32> <i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22,
@@ -382,16 +403,17 @@ define void @shuffle_ext_byone_v16i32(<16 x i32>* %a, <16 x i32>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v32i32(<32 x i32>* %a, <32 x i32>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v32i32
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].s, vl32
-; VBITS_GE_1024-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: mov w[[TMP:[0-9]+]], #31
-; VBITS_GE_1024-NEXT: whilels [[WPG:p[0-9]+]].s, xzr, x[[TMP]]
-; VBITS_GE_1024-NEXT: lastb [[TMP2:w[0-9]+]], [[WPG]], [[OP1]].s
-; VBITS_GE_1024-NEXT: insr [[OP2]].s, [[TMP2]]
-; VBITS_GE_1024-NEXT: st1w { [[OP2]].s }, [[PG]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: shuffle_ext_byone_v32i32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mov w8, #31
+; VBITS_GE_1024-NEXT:    whilels p1.s, xzr, x8
+; VBITS_GE_1024-NEXT:    lastb w8, p1, z0.s
+; VBITS_GE_1024-NEXT:    insr z1.s, w8
+; VBITS_GE_1024-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <32 x i32>, <32 x i32>* %a
   %op2 = load <32 x i32>, <32 x i32>* %b
   %ret = shufflevector <32 x i32> %op1, <32 x i32> %op2, <32 x i32> <i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38,
@@ -403,16 +425,17 @@ define void @shuffle_ext_byone_v32i32(<32 x i32>* %a, <32 x i32>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v64i32(<64 x i32>* %a, <64 x i32>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v64i32
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl64
-; VBITS_GE_2048-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: mov w[[TMP:[0-9]+]], #63
-; VBITS_GE_2048-NEXT: whilels [[WPG:p[0-9]+]].s, xzr, x[[TMP]]
-; VBITS_GE_2048-NEXT: lastb [[TMP2:w[0-9]+]], [[WPG]], [[OP1]].s
-; VBITS_GE_2048-NEXT: insr [[OP2]].s, [[TMP2]]
-; VBITS_GE_2048-NEXT: st1w { [[OP2]].s }, [[PG]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: shuffle_ext_byone_v64i32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mov w8, #63
+; VBITS_GE_2048-NEXT:    whilels p1.s, xzr, x8
+; VBITS_GE_2048-NEXT:    lastb w8, p1, z0.s
+; VBITS_GE_2048-NEXT:    insr z1.s, w8
+; VBITS_GE_2048-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <64 x i32>, <64 x i32>* %a
   %op2 = load <64 x i32>, <64 x i32>* %b
   %ret = shufflevector <64 x i32> %op1, <64 x i32> %op2, <64 x i32> <i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70,
@@ -429,23 +452,25 @@ define void @shuffle_ext_byone_v64i32(<64 x i32>* %a, <64 x i32>* %b) #0 {
 
 ; Don't use SVE for 128-bit vectors
 define <2 x i64> @shuffle_ext_byone_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v2i64
-; CHECK: ext v0.16b, v0.16b, v1.16b, #8
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.16b, v0.16b, v1.16b, #8
+; CHECK-NEXT:    ret
   %ret = shufflevector <2 x i64> %op1, <2 x i64> %op2, <2 x i32> <i32 1, i32 2>
   ret <2 x i64> %ret
 }
 
 define void @shuffle_ext_byone_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v4i64
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov z[[ELEM:[0-9]+]].d, [[OP1]].d[3]
-; CHECK-NEXT: fmov [[TMP:x[0-9]+]], d[[ELEM]]
-; CHECK-NEXT: insr [[OP2]].d, [[TMP]]
-; CHECK-NEXT: st1d { [[OP2]].d }, [[PG]], [x0]
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    mov z0.d, z0.d[3]
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    insr z1.d, x8
+; CHECK-NEXT:    st1d { z1.d }, p0, [x0]
+; CHECK-NEXT:    ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %op2 = load <4 x i64>, <4 x i64>* %b
   %ret = shufflevector <4 x i64> %op1, <4 x i64> %op2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
@@ -454,31 +479,34 @@ define void @shuffle_ext_byone_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v8i64(<8 x i64>* %a, <8 x i64>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v8i64
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: mov z[[ELEM:[0-9]+]].d, [[OP1]].d[7]
-; VBITS_GE_512-NEXT: fmov [[TMP:x[0-9]+]], d[[ELEM]]
-; VBITS_GE_512-NEXT: insr [[OP2]].d, [[TMP]]
-; VBITS_GE_512-NEXT: st1d { [[OP2]].d }, [[PG]], [x0]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[OP1_HI:z[0-9]+]].d }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: ld1d { [[OP2_LO:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1d { [[OP2_HI:z[0-9]+]].d }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: mov z[[ELEM1:[0-9]+]].d, [[OP1_HI]].d[3]
-; VBITS_EQ_256-DAG: fmov [[TMP1:x[0-9]+]], d[[ELEM1]]
-; VBITS_EQ_256-DAG: mov z[[ELEM2:[0-9]+]].d, [[OP2_LO]].d[3]
-; VBITS_EQ_256-DAG: insr [[OP2_LO]].d, [[TMP1]]
-; VBITS_EQ_256-DAG: fmov [[TMP2:x[0-9]+]], d[[ELEM2]]
-; VBITS_EQ_256-DAG: insr [[OP2_HI]].d, [[TMP2]]
-; VBITS_EQ_256-DAG: st1d { [[OP2_LO]].d }, [[PG]], [x0]
-; VBITS_EQ_256-DAG: st1d { [[OP2_HI]].d }, [[PG]], [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: shuffle_ext_byone_v8i64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    mov z0.d, z0.d[3]
+; VBITS_EQ_256-NEXT:    fmov x9, d0
+; VBITS_EQ_256-NEXT:    mov z3.d, z2.d[3]
+; VBITS_EQ_256-NEXT:    insr z2.d, x9
+; VBITS_EQ_256-NEXT:    fmov x9, d3
+; VBITS_EQ_256-NEXT:    insr z1.d, x9
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    st1d { z2.d }, p0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: shuffle_ext_byone_v8i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mov z0.d, z0.d[7]
+; VBITS_GE_512-NEXT:    fmov x8, d0
+; VBITS_GE_512-NEXT:    insr z1.d, x8
+; VBITS_GE_512-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x i64>, <8 x i64>* %a
   %op2 = load <8 x i64>, <8 x i64>* %b
   %ret = shufflevector <8 x i64> %op1, <8 x i64> %op2, <8 x i32> <i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
@@ -487,16 +515,17 @@ define void @shuffle_ext_byone_v8i64(<8 x i64>* %a, <8 x i64>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v16i64(<16 x i64>* %a, <16 x i64>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v16i64
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: mov w[[TMP:[0-9]+]], #15
-; VBITS_GE_1024-NEXT: whilels [[WPG:p[0-9]+]].d, xzr, x[[TMP]]
-; VBITS_GE_1024-NEXT: lastb [[TMP2:x[0-9]+]], [[WPG]], [[OP1]].d
-; VBITS_GE_1024-NEXT: insr [[OP2]].d, [[TMP2]]
-; VBITS_GE_1024-NEXT: st1d { [[OP2]].d }, [[PG]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: shuffle_ext_byone_v16i64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mov w8, #15
+; VBITS_GE_1024-NEXT:    whilels p1.d, xzr, x8
+; VBITS_GE_1024-NEXT:    lastb x8, p1, z0.d
+; VBITS_GE_1024-NEXT:    insr z1.d, x8
+; VBITS_GE_1024-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x i64>, <16 x i64>* %a
   %op2 = load <16 x i64>, <16 x i64>* %b
   %ret = shufflevector <16 x i64> %op1, <16 x i64> %op2, <16 x i32> <i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22,
@@ -506,16 +535,17 @@ define void @shuffle_ext_byone_v16i64(<16 x i64>* %a, <16 x i64>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v32i64(<32 x i64>* %a, <32 x i64>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v32i64
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: mov w[[TMP:[0-9]+]], #31
-; VBITS_GE_2048-NEXT: whilels [[WPG:p[0-9]+]].d, xzr, x[[TMP]]
-; VBITS_GE_2048-NEXT: lastb [[TMP2:x[0-9]+]], [[WPG]], [[OP1]].d
-; VBITS_GE_2048-NEXT: insr [[OP2]].d, [[TMP2]]
-; VBITS_GE_2048-NEXT: st1d { [[OP2]].d }, [[PG]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: shuffle_ext_byone_v32i64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mov w8, #31
+; VBITS_GE_2048-NEXT:    whilels p1.d, xzr, x8
+; VBITS_GE_2048-NEXT:    lastb x8, p1, z0.d
+; VBITS_GE_2048-NEXT:    insr z1.d, x8
+; VBITS_GE_2048-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x i64>, <32 x i64>* %a
   %op2 = load <32 x i64>, <32 x i64>* %b
   %ret = shufflevector <32 x i64> %op1, <32 x i64> %op2, <32 x i32> <i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38,
@@ -528,31 +558,34 @@ define void @shuffle_ext_byone_v32i64(<32 x i64>* %a, <32 x i64>* %b) #0 {
 
 ; Don't use SVE for 64-bit vectors
 define <4 x half> @shuffle_ext_byone_v4f16(<4 x half> %op1, <4 x half> %op2) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v4f16
-; CHECK: ext v0.8b, v0.8b, v1.8b, #6
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.8b, v0.8b, v1.8b, #6
+; CHECK-NEXT:    ret
   %ret = shufflevector <4 x half> %op1, <4 x half> %op2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
   ret <4 x half> %ret
 }
 
 ; Don't use SVE for 128-bit vectors
 define <8 x half> @shuffle_ext_byone_v8f16(<8 x half> %op1, <8 x half> %op2) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v8f16
-; CHECK: ext v0.16b, v0.16b, v1.16b, #14
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.16b, v0.16b, v1.16b, #14
+; CHECK-NEXT:    ret
   %ret = shufflevector <8 x half> %op1, <8 x half> %op2, <8 x i32> <i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
   ret <8 x half> %ret
 }
 
 define void @shuffle_ext_byone_v16f16(<16 x half>* %a, <16 x half>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v16f16
-; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
-; CHECK-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov z[[ELEM:[0-9]+]].h, [[OP1]].h[15]
-; CHECK-NEXT: insr [[OP2]].h, h[[ELEM]]
-; CHECK-NEXT: st1h { [[OP2]].h }, [[PG]], [x0]
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    mov z0.h, z0.h[15]
+; CHECK-NEXT:    insr z1.h, h0
+; CHECK-NEXT:    st1h { z1.h }, p0, [x0]
+; CHECK-NEXT:    ret
   %op1 = load <16 x half>, <16 x half>* %a
   %op2 = load <16 x half>, <16 x half>* %b
   %ret = shufflevector <16 x half> %op1, <16 x half> %op2, <16 x i32> <i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22,
@@ -562,28 +595,31 @@ define void @shuffle_ext_byone_v16f16(<16 x half>* %a, <16 x half>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v32f16(<32 x half>* %a, <32 x half>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v32f16
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32
-; VBITS_GE_512-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: mov z[[ELEM:[0-9]+]].h, [[OP1]].h[31]
-; VBITS_GE_512-NEXT: insr [[OP2]].h, h[[ELEM]]
-; VBITS_GE_512-NEXT: st1h { [[OP2]].h }, [[PG]], [x0]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #16
-; VBITS_EQ_256-DAG: ld1h { [[OP1_HI:z[0-9]+]].h }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #1]
-; VBITS_EQ_256-DAG: ld1h { [[OP2_LO:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1h { [[OP2_HI:z[0-9]+]].h }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #1]
-; VBITS_EQ_256-DAG: mov z[[ELEM2:[0-9]+]].h, [[OP2_LO]].h[15]
-; VBITS_EQ_256-DAG: mov z[[ELEM1:[0-9]+]].h, [[OP1_HI]].h[15]
-; VBITS_EQ_256-DAG: insr [[OP2_LO]].h, h[[ELEM1]]
-; VBITS_EQ_256-DAG: insr [[OP2_HI]].h, h[[ELEM2]]
-; VBITS_EQ_256-DAG: st1h { [[OP2_LO]].h }, [[PG]], [x0]
-; VBITS_EQ_256-DAG: st1h { [[OP2_HI]].h }, [[PG]], [x0, x[[NUMELTS]], lsl #1]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: shuffle_ext_byone_v32f16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #16
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    ld1h { z2.h }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    mov z0.h, z0.h[15]
+; VBITS_EQ_256-NEXT:    mov z3.h, z2.h[15]
+; VBITS_EQ_256-NEXT:    insr z2.h, h0
+; VBITS_EQ_256-NEXT:    insr z1.h, h3
+; VBITS_EQ_256-NEXT:    st1h { z1.h }, p0, [x0, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    st1h { z2.h }, p0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: shuffle_ext_byone_v32f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mov z0.h, z0.h[31]
+; VBITS_GE_512-NEXT:    insr z1.h, h0
+; VBITS_GE_512-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <32 x half>, <32 x half>* %a
   %op2 = load <32 x half>, <32 x half>* %b
   %ret = shufflevector <32 x half> %op1, <32 x half> %op2, <32 x i32> <i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38,
@@ -595,16 +631,17 @@ define void @shuffle_ext_byone_v32f16(<32 x half>* %a, <32 x half>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v64f16(<64 x half>* %a, <64 x half>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v64f16
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].h, vl64
-; VBITS_GE_1024-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: mov w[[TMP:[0-9]+]], #63
-; VBITS_GE_1024-NEXT: whilels [[WPG:p[0-9]+]].h, xzr, x[[TMP]]
-; VBITS_GE_1024-NEXT: lastb [[TMP2:h[0-9]+]], [[WPG]], [[OP1]].h
-; VBITS_GE_1024-NEXT: insr [[OP2]].h, [[TMP2]]
-; VBITS_GE_1024-NEXT: st1h { [[OP2]].h }, [[PG]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: shuffle_ext_byone_v64f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mov w8, #63
+; VBITS_GE_1024-NEXT:    whilels p1.h, xzr, x8
+; VBITS_GE_1024-NEXT:    lastb h0, p1, z0.h
+; VBITS_GE_1024-NEXT:    insr z1.h, h0
+; VBITS_GE_1024-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <64 x half>, <64 x half>* %a
   %op2 = load <64 x half>, <64 x half>* %b
   %ret = shufflevector <64 x half> %op1, <64 x half> %op2, <64 x i32> <i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70,
@@ -620,16 +657,17 @@ define void @shuffle_ext_byone_v64f16(<64 x half>* %a, <64 x half>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v128f16(<128 x half>* %a, <128 x half>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v128f16
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128
-; VBITS_GE_2048-NEXT: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: mov w[[TMP:[0-9]+]], #127
-; VBITS_GE_2048-NEXT: whilels [[WPG:p[0-9]+]].h, xzr, x[[TMP]]
-; VBITS_GE_2048-NEXT: lastb [[TMP2:h[0-9]+]], [[WPG]], [[OP1]].h
-; VBITS_GE_2048-NEXT: insr [[OP2]].h, [[TMP2]]
-; VBITS_GE_2048-NEXT: st1h { [[OP2]].h }, [[PG]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: shuffle_ext_byone_v128f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mov w8, #127
+; VBITS_GE_2048-NEXT:    whilels p1.h, xzr, x8
+; VBITS_GE_2048-NEXT:    lastb h0, p1, z0.h
+; VBITS_GE_2048-NEXT:    insr z1.h, h0
+; VBITS_GE_2048-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <128 x half>, <128 x half>* %a
   %op2 = load <128 x half>, <128 x half>* %b
   %ret = shufflevector <128 x half> %op1, <128 x half> %op2, <128 x i32> <i32 127,  i32 128,  i32 129,  i32 130,  i32 131,  i32 132,  i32 133,  i32 134,
@@ -654,31 +692,34 @@ define void @shuffle_ext_byone_v128f16(<128 x half>* %a, <128 x half>* %b) #0 {
 
 ; Don't use SVE for 64-bit vectors
 define <2 x float> @shuffle_ext_byone_v2f32(<2 x float> %op1, <2 x float> %op2) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v2f32
-; CHECK: ext v0.8b, v0.8b, v1.8b, #4
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.8b, v0.8b, v1.8b, #4
+; CHECK-NEXT:    ret
   %ret = shufflevector <2 x float> %op1, <2 x float> %op2, <2 x i32> <i32 1, i32 2>
   ret <2 x float> %ret
 }
 
 ; Don't use SVE for 128-bit vectors
 define <4 x float> @shuffle_ext_byone_v4f32(<4 x float> %op1, <4 x float> %op2) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v4f32
-; CHECK: ext v0.16b, v0.16b, v1.16b, #12
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.16b, v0.16b, v1.16b, #12
+; CHECK-NEXT:    ret
   %ret = shufflevector <4 x float> %op1, <4 x float> %op2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
   ret <4 x float> %ret
 }
 
 define void @shuffle_ext_byone_v8f32(<8 x float>* %a, <8 x float>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v8f32
-; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
-; CHECK-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov z[[ELEM:[0-9]+]].s, [[OP1]].s[7]
-; CHECK-NEXT: insr [[OP2]].s, s[[ELEM]]
-; CHECK-NEXT: st1w { [[OP2]].s }, [[PG]], [x0]
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    mov z0.s, z0.s[7]
+; CHECK-NEXT:    insr z1.s, s0
+; CHECK-NEXT:    st1w { z1.s }, p0, [x0]
+; CHECK-NEXT:    ret
   %op1 = load <8 x float>, <8 x float>* %a
   %op2 = load <8 x float>, <8 x float>* %b
   %ret = shufflevector <8 x float> %op1, <8 x float> %op2, <8 x i32> <i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
@@ -687,28 +728,31 @@ define void @shuffle_ext_byone_v8f32(<8 x float>* %a, <8 x float>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v16f32(<16 x float>* %a, <16 x float>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v16f32
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16
-; VBITS_GE_512-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: mov z[[ELEM:[0-9]+]].s, [[OP1]].s[15]
-; VBITS_GE_512-NEXT: insr [[OP2]].s, s[[ELEM]]
-; VBITS_GE_512-NEXT: st1w { [[OP2]].s }, [[PG]], [x0]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #8
-; VBITS_EQ_256-DAG: ld1w { [[OP1_HI:z[0-9]+]].s }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-DAG: ld1w { [[OP2_LO:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1w { [[OP2_HI:z[0-9]+]].s }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-DAG: mov z[[ELEM2:[0-9]+]].s, [[OP2_LO]].s[7]
-; VBITS_EQ_256-DAG: mov z[[ELEM1:[0-9]+]].s, [[OP1_HI]].s[7]
-; VBITS_EQ_256-DAG: insr [[OP2_LO]].s, s[[ELEM1]]
-; VBITS_EQ_256-DAG: insr [[OP2_HI]].s, s[[ELEM2]]
-; VBITS_EQ_256-DAG: st1w { [[OP2_LO]].s }, [[PG]], [x0]
-; VBITS_EQ_256-DAG: st1w { [[OP2_HI]].s }, [[PG]], [x0, x[[NUMELTS]], lsl #2]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: shuffle_ext_byone_v16f32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #8
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    ld1w { z2.s }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    mov z0.s, z0.s[7]
+; VBITS_EQ_256-NEXT:    mov z3.s, z2.s[7]
+; VBITS_EQ_256-NEXT:    insr z2.s, s0
+; VBITS_EQ_256-NEXT:    insr z1.s, s3
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x0, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    st1w { z2.s }, p0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: shuffle_ext_byone_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mov z0.s, z0.s[15]
+; VBITS_GE_512-NEXT:    insr z1.s, s0
+; VBITS_GE_512-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <16 x float>, <16 x float>* %a
   %op2 = load <16 x float>, <16 x float>* %b
   %ret = shufflevector <16 x float> %op1, <16 x float> %op2, <16 x i32> <i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22,
@@ -718,16 +762,17 @@ define void @shuffle_ext_byone_v16f32(<16 x float>* %a, <16 x float>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v32f32(<32 x float>* %a, <32 x float>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v32f32
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].s, vl32
-; VBITS_GE_1024-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: mov w[[TMP:[0-9]+]], #31
-; VBITS_GE_1024-NEXT: whilels [[WPG:p[0-9]+]].s, xzr, x[[TMP]]
-; VBITS_GE_1024-NEXT: lastb [[TMP2:s[0-9]+]], [[WPG]], [[OP1]].s
-; VBITS_GE_1024-NEXT: insr [[OP2]].s, [[TMP2]]
-; VBITS_GE_1024-NEXT: st1w { [[OP2]].s }, [[PG]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: shuffle_ext_byone_v32f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mov w8, #31
+; VBITS_GE_1024-NEXT:    whilels p1.s, xzr, x8
+; VBITS_GE_1024-NEXT:    lastb s0, p1, z0.s
+; VBITS_GE_1024-NEXT:    insr z1.s, s0
+; VBITS_GE_1024-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <32 x float>, <32 x float>* %a
   %op2 = load <32 x float>, <32 x float>* %b
   %ret = shufflevector <32 x float> %op1, <32 x float> %op2, <32 x i32> <i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38,
@@ -739,16 +784,17 @@ define void @shuffle_ext_byone_v32f32(<32 x float>* %a, <32 x float>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v64f32(<64 x float>* %a, <64 x float>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v64f32
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl64
-; VBITS_GE_2048-NEXT: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: mov w[[TMP:[0-9]+]], #63
-; VBITS_GE_2048-NEXT: whilels [[WPG:p[0-9]+]].s, xzr, x[[TMP]]
-; VBITS_GE_2048-NEXT: lastb [[TMP2:s[0-9]+]], [[WPG]], [[OP1]].s
-; VBITS_GE_2048-NEXT: insr [[OP2]].s, [[TMP2]]
-; VBITS_GE_2048-NEXT: st1w { [[OP2]].s }, [[PG]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: shuffle_ext_byone_v64f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mov w8, #63
+; VBITS_GE_2048-NEXT:    whilels p1.s, xzr, x8
+; VBITS_GE_2048-NEXT:    lastb s0, p1, z0.s
+; VBITS_GE_2048-NEXT:    insr z1.s, s0
+; VBITS_GE_2048-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <64 x float>, <64 x float>* %a
   %op2 = load <64 x float>, <64 x float>* %b
   %ret = shufflevector <64 x float> %op1, <64 x float> %op2, <64 x i32> <i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70,
@@ -765,22 +811,24 @@ define void @shuffle_ext_byone_v64f32(<64 x float>* %a, <64 x float>* %b) #0 {
 
 ; Don't use SVE for 128-bit vectors
 define <2 x double> @shuffle_ext_byone_v2f64(<2 x double> %op1, <2 x double> %op2) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v2f64
-; CHECK: ext v0.16b, v0.16b, v1.16b, #8
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.16b, v0.16b, v1.16b, #8
+; CHECK-NEXT:    ret
   %ret = shufflevector <2 x double> %op1, <2 x double> %op2, <2 x i32> <i32 1, i32 2>
   ret <2 x double> %ret
 }
 
 define void @shuffle_ext_byone_v4f64(<4 x double>* %a, <4 x double>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v4f64
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov z[[ELEM:[0-9]+]].d, [[OP1]].d[3]
-; CHECK-NEXT: insr [[OP2]].d, d[[ELEM]]
-; CHECK-NEXT: st1d { [[OP2]].d }, [[PG]], [x0]
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    mov z0.d, z0.d[3]
+; CHECK-NEXT:    insr z1.d, d0
+; CHECK-NEXT:    st1d { z1.d }, p0, [x0]
+; CHECK-NEXT:    ret
   %op1 = load <4 x double>, <4 x double>* %a
   %op2 = load <4 x double>, <4 x double>* %b
   %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
@@ -789,28 +837,31 @@ define void @shuffle_ext_byone_v4f64(<4 x double>* %a, <4 x double>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v8f64(<8 x double>* %a, <8 x double>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v8f64
-; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_512-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_512-NEXT: mov z[[ELEM:[0-9]+]].d, [[OP1]].d[7]
-; VBITS_GE_512-NEXT: insr [[OP2]].d, d[[ELEM]]
-; VBITS_GE_512-NEXT: st1d { [[OP2]].d }, [[PG]], [x0]
-; VBITS_GE_512-NEXT: ret
-
 ; Ensure sensible type legalisation.
-; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: ld1d { [[OP1_HI:z[0-9]+]].d }, [[PG]]/z, [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: ld1d { [[OP2_LO:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_EQ_256-DAG: ld1d { [[OP2_HI:z[0-9]+]].d }, [[PG]]/z, [x1, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-DAG: mov z[[ELEM2:[0-9]+]].d, [[OP2_LO]].d[3]
-; VBITS_EQ_256-DAG: mov z[[ELEM1:[0-9]+]].d, [[OP1_HI]].d[3]
-; VBITS_EQ_256-DAG: insr [[OP2_LO]].d, d[[ELEM1]]
-; VBITS_EQ_256-DAG: insr [[OP2_HI]].d, d[[ELEM2]]
-; VBITS_EQ_256-DAG: st1d { [[OP2_LO]].d }, [[PG]], [x0]
-; VBITS_EQ_256-DAG: st1d { [[OP2_HI]].d }, [[PG]], [x0, x[[NUMELTS]], lsl #3]
-; VBITS_EQ_256-NEXT: ret
+; VBITS_EQ_256-LABEL: shuffle_ext_byone_v8f64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
+; VBITS_EQ_256-NEXT:    mov z0.d, z0.d[3]
+; VBITS_EQ_256-NEXT:    mov z3.d, z2.d[3]
+; VBITS_EQ_256-NEXT:    insr z2.d, d0
+; VBITS_EQ_256-NEXT:    insr z1.d, d3
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    st1d { z2.d }, p0, [x0]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: shuffle_ext_byone_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    mov z0.d, z0.d[7]
+; VBITS_GE_512-NEXT:    insr z1.d, d0
+; VBITS_GE_512-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x double>, <8 x double>* %a
   %op2 = load <8 x double>, <8 x double>* %b
   %ret = shufflevector <8 x double> %op1, <8 x double> %op2, <8 x i32> <i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
@@ -819,16 +870,17 @@ define void @shuffle_ext_byone_v8f64(<8 x double>* %a, <8 x double>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v16f64(<16 x double>* %a, <16 x double>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v16f64
-; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl16
-; VBITS_GE_1024-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_1024-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_1024-NEXT: mov w[[TMP:[0-9]+]], #15
-; VBITS_GE_1024-NEXT: whilels [[WPG:p[0-9]+]].d, xzr, x[[TMP]]
-; VBITS_GE_1024-NEXT: lastb [[TMP2:d[0-9]+]], [[WPG]], [[OP1]].d
-; VBITS_GE_1024-NEXT: insr [[OP2]].d, [[TMP2]]
-; VBITS_GE_1024-NEXT: st1d { [[OP2]].d }, [[PG]], [x0]
-; VBITS_GE_1024-NEXT: ret
+; VBITS_GE_1024-LABEL: shuffle_ext_byone_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_1024-NEXT:    mov w8, #15
+; VBITS_GE_1024-NEXT:    whilels p1.d, xzr, x8
+; VBITS_GE_1024-NEXT:    lastb d0, p1, z0.d
+; VBITS_GE_1024-NEXT:    insr z1.d, d0
+; VBITS_GE_1024-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_1024-NEXT:    ret
   %op1 = load <16 x double>, <16 x double>* %a
   %op2 = load <16 x double>, <16 x double>* %b
   %ret = shufflevector <16 x double> %op1, <16 x double> %op2, <16 x i32> <i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22,
@@ -838,16 +890,17 @@ define void @shuffle_ext_byone_v16f64(<16 x double>* %a, <16 x double>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_v32f64(<32 x double>* %a, <32 x double>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_v32f64
-; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32
-; VBITS_GE_2048-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; VBITS_GE_2048-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; VBITS_GE_2048-NEXT: mov w[[TMP:[0-9]+]], #31
-; VBITS_GE_2048-NEXT: whilels [[WPG:p[0-9]+]].d, xzr, x[[TMP]]
-; VBITS_GE_2048-NEXT: lastb [[TMP2:d[0-9]+]], [[WPG]], [[OP1]].d
-; VBITS_GE_2048-NEXT: insr [[OP2]].d, [[TMP2]]
-; VBITS_GE_2048-NEXT: st1d { [[OP2]].d }, [[PG]], [x0]
-; VBITS_GE_2048-NEXT: ret
+; VBITS_GE_2048-LABEL: shuffle_ext_byone_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_2048-NEXT:    mov w8, #31
+; VBITS_GE_2048-NEXT:    whilels p1.d, xzr, x8
+; VBITS_GE_2048-NEXT:    lastb d0, p1, z0.d
+; VBITS_GE_2048-NEXT:    insr z1.d, d0
+; VBITS_GE_2048-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_2048-NEXT:    ret
   %op1 = load <32 x double>, <32 x double>* %a
   %op2 = load <32 x double>, <32 x double>* %b
   %ret = shufflevector <32 x double> %op1, <32 x double> %op2, <32 x i32> <i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38,
@@ -859,14 +912,15 @@ define void @shuffle_ext_byone_v32f64(<32 x double>* %a, <32 x double>* %b) #0 {
 }
 
 define void @shuffle_ext_byone_reverse(<4 x double>* %a, <4 x double>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_byone_reverse
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov z[[ELEM:[0-9]+]].d, [[OP2]].d[3]
-; CHECK-NEXT: insr [[OP1]].d, d[[ELEM]]
-; CHECK-NEXT: st1d { [[OP1]].d }, [[PG]], [x0]
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_byone_reverse:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    mov z1.d, z1.d[3]
+; CHECK-NEXT:    insr z0.d, d1
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    ret
   %op1 = load <4 x double>, <4 x double>* %a
   %op2 = load <4 x double>, <4 x double>* %b
   %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> <i32 7, i32 0, i32 1, i32 2>
@@ -875,21 +929,29 @@ define void @shuffle_ext_byone_reverse(<4 x double>* %a, <4 x double>* %b) #0 {
 }
 
 define void @shuffle_ext_invalid(<4 x double>* %a, <4 x double>* %b) #0 {
-; CHECK-LABEL: shuffle_ext_invalid
-; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
-; CHECK-NEXT: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
-; CHECK-NEXT: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
-; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: mov z2.d, [[OP1]].d[3]
-; CHECK-NEXT: mov z3.d, [[OP2]].d[1]
-; CHECK-NEXT: mov z0.d, [[OP1]].d[2]
-; CHECK-NEXT: stp d1, d3, [sp, #16]
-; CHECK-NEXT: stp d0, d2, [sp]
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8]
-; CHECK-NEXT: st1d { z0.d }, p0, [x0]
-; CHECK-NEXT: mov sp, x29
-; CHECK-NEXT: ldp x29, x30, [sp], #16
-; CHECK-NEXT: ret
+; CHECK-LABEL: shuffle_ext_invalid:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    sub x9, sp, #48
+; CHECK-NEXT:    mov x29, sp
+; CHECK-NEXT:    and sp, x9, #0xffffffffffffffe0
+; CHECK-NEXT:    .cfi_def_cfa w29, 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    mov x8, sp
+; CHECK-NEXT:    mov z2.d, z0.d[3]
+; CHECK-NEXT:    mov z3.d, z1.d[1]
+; CHECK-NEXT:    mov z0.d, z0.d[2]
+; CHECK-NEXT:    stp d1, d3, [sp, #16]
+; CHECK-NEXT:    stp d0, d2, [sp]
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x8]
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    mov sp, x29
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
   %op1 = load <4 x double>, <4 x double>* %a
   %op2 = load <4 x double>, <4 x double>* %b
   %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> <i32 2, i32 3, i32 4, i32 5>


        


More information about the llvm-commits mailing list