[llvm] 2bda5a6 - [AArch64][SME][NFC]: Enable lowering truncate for enhancement.

Hassnaa Hamdi via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 30 19:54:42 PST 2022


Author: Hassnaa Hamdi
Date: 2022-12-01T03:54:28Z
New Revision: 2bda5a62870d6ebb0757e09590bd835ed20513af

URL: https://github.com/llvm/llvm-project/commit/2bda5a62870d6ebb0757e09590bd835ed20513af
DIFF: https://github.com/llvm/llvm-project/commit/2bda5a62870d6ebb0757e09590bd835ed20513af.diff

LOG: [AArch64][SME][NFC]: Enable lowering truncate for enhancement.

Enable lowering truncate to enhance the generated code.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7228b200a816..3dd5ccfd2264 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -12736,7 +12736,8 @@ SDValue AArch64TargetLowering::LowerTRUNCATE(SDValue Op,
   if (!VT.isVector() || VT.isScalableVector())
     return SDValue();
 
-  if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType()))
+  if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType(),
+                                   Subtarget->forceStreamingCompatibleSVE()))
     return LowerFixedLengthVectorTruncateToSVE(Op, DAG);
 
   return SDValue();

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
index 5c60abd3b8c6..75cf13137f70 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
@@ -161,22 +161,15 @@ define void @test_copysign_v4f64_v4f64(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v2f32_v2f64(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v2f32_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldr q0, [x1]
 ; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ldr d1, [x0]
 ; CHECK-NEXT:    fcvt z0.s, p0/m, z0.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    and z0.s, z0.s, #0x7fffffff
-; CHECK-NEXT:    ldr d1, [sp, #8]
-; CHECK-NEXT:    and z1.s, z1.s, #0x80000000
-; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    and z1.s, z1.s, #0x7fffffff
+; CHECK-NEXT:    and z0.s, z0.s, #0x80000000
+; CHECK-NEXT:    orr z0.d, z1.d, z0.d
 ; CHECK-NEXT:    str d0, [x0]
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %a = load <2 x float>, ptr %ap
   %b = load <2 x double>, ptr %bp
@@ -192,27 +185,19 @@ define void @test_copysign_v2f32_v2f64(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v4f32_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldp q1, q0, [x1]
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fcvt z1.s, p0/m, z1.d
-; CHECK-NEXT:    fmov x10, d1
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    ldr q2, [x0]
 ; CHECK-NEXT:    fcvt z0.s, p0/m, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    mov z2.d, z1.d[1]
-; CHECK-NEXT:    fmov x11, d2
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    stp w10, w11, [sp]
-; CHECK-NEXT:    and z0.s, z0.s, #0x7fffffff
-; CHECK-NEXT:    ldr q1, [sp]
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    splice z1.s, p0, z1.s, z0.s
 ; CHECK-NEXT:    and z1.s, z1.s, #0x80000000
-; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    and z2.s, z2.s, #0x7fffffff
+; CHECK-NEXT:    orr z0.d, z2.d, z1.d
 ; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %ap
   %b = load <4 x double>, ptr %bp
@@ -295,29 +280,15 @@ define void @test_copysign_v4f64_v4f32(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v4f16_v4f32(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v4f16_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldr q0, [x1]
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
 ; CHECK-NEXT:    ldr d1, [x0]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #14]
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; CHECK-NEXT:    and z1.h, z1.h, #0x7fff
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d0, [sp, #8]
 ; CHECK-NEXT:    and z0.h, z0.h, #0x8000
 ; CHECK-NEXT:    orr z0.d, z1.d, z0.d
 ; CHECK-NEXT:    str d0, [x0]
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %a = load <4 x half>, ptr %ap
   %b = load <4 x float>, ptr %bp
@@ -364,41 +335,19 @@ define void @test_copysign_v4f16_v4f64(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v8f16_v8f32(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v8f16_v8f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q1, q0, [x1]
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    mov z5.s, z0.s[2]
 ; CHECK-NEXT:    fcvt z1.h, p0/m, z1.s
-; CHECK-NEXT:    mov z6.s, z0.s[1]
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z2.s, z1.s[3]
-; CHECK-NEXT:    mov z3.s, z1.s[2]
-; CHECK-NEXT:    mov z4.s, z1.s[1]
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    strh w9, [sp]
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    strh w10, [sp, #14]
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    and z0.h, z0.h, #0x7fff
-; CHECK-NEXT:    strh w8, [sp, #12]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    strh w9, [sp, #10]
-; CHECK-NEXT:    fmov w9, s6
-; CHECK-NEXT:    strh w10, [sp, #6]
-; CHECK-NEXT:    strh w8, [sp, #4]
-; CHECK-NEXT:    strh w9, [sp, #2]
-; CHECK-NEXT:    ldr q1, [sp]
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    ldr q2, [x0]
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    splice z1.h, p0, z1.h, z0.h
 ; CHECK-NEXT:    and z1.h, z1.h, #0x8000
-; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    and z2.h, z2.h, #0x7fff
+; CHECK-NEXT:    orr z0.d, z2.d, z1.d
 ; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %a = load <8 x half>, ptr %ap
   %b = load <8 x float>, ptr %bp

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
index 95cff9d01cdb..e0764eacc62f 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
@@ -290,24 +290,11 @@ define <2 x i16> @fcvtzu_v2f32_v2i16(<2 x float> %op1) #0 {
 define <4 x i16> @fcvtzu_v4f32_v4i16(<4 x float> %op1) #0 {
 ; CHECK-LABEL: fcvtzu_v4f32_v4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    fcvtzu z0.s, p0/m, z0.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fptoui <4 x float> %op1 to <4 x i16>
   ret <4 x i16> %res
@@ -316,35 +303,15 @@ define <4 x i16> @fcvtzu_v4f32_v4i16(<4 x float> %op1) #0 {
 define <8 x i16> @fcvtzu_v8f32_v8i16(<8 x float>* %a) #0 {
 ; CHECK-LABEL: fcvtzu_v8f32_v8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    fcvtzu z1.s, p0/m, z1.s
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z5.s, z1.s[2]
 ; CHECK-NEXT:    fcvtzu z0.s, p0/m, z0.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z2.s, z0.s[3]
-; CHECK-NEXT:    mov z3.s, z0.s[2]
-; CHECK-NEXT:    mov z4.s, z0.s[1]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w9, [sp]
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    mov z0.s, z1.s[3]
-; CHECK-NEXT:    mov z1.s, z1.s[1]
-; CHECK-NEXT:    strh w10, [sp, #14]
-; CHECK-NEXT:    fmov w10, s0
-; CHECK-NEXT:    strh w8, [sp, #12]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    strh w9, [sp, #10]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    strh w10, [sp, #6]
-; CHECK-NEXT:    strh w8, [sp, #4]
-; CHECK-NEXT:    strh w9, [sp, #2]
-; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    uzp1 z2.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z0.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z2.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %op1 = load <8 x float>, <8 x float>* %a
   %res = fptoui <8 x float> %op1 to <8 x i16>
@@ -354,64 +321,21 @@ define <8 x i16> @fcvtzu_v8f32_v8i16(<8 x float>* %a) #0 {
 define void @fcvtzu_v16f32_v16i16(<16 x float>* %a, <16 x i16>* %b) #0 {
 ; CHECK-LABEL: fcvtzu_v16f32_v16i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ptrue p1.h, vl4
 ; CHECK-NEXT:    fcvtzu z0.s, p0/m, z0.s
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    mov z5.s, z0.s[2]
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    ldp q3, q2, [x0, #32]
 ; CHECK-NEXT:    fcvtzu z1.s, p0/m, z1.s
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z2.s, z1.s[3]
-; CHECK-NEXT:    mov z3.s, z1.s[2]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    ldp q6, q7, [x0, #32]
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    mov z4.s, z1.s[1]
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    strh w9, [sp]
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    strh w10, [sp, #14]
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    strh w8, [sp, #12]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    strh w9, [sp, #10]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    strh w10, [sp, #6]
-; CHECK-NEXT:    strh w8, [sp, #4]
-; CHECK-NEXT:    movprfx z1, z7
-; CHECK-NEXT:    fcvtzu z1.s, p0/m, z7.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z0.s, z1.s[3]
-; CHECK-NEXT:    mov z2.s, z1.s[2]
-; CHECK-NEXT:    mov z3.s, z1.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    movprfx z1, z6
-; CHECK-NEXT:    fcvtzu z1.s, p0/m, z6.s
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    strh w8, [sp, #2]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z4.s, z1.s[3]
-; CHECK-NEXT:    strh w9, [sp, #24]
-; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    strh w10, [sp, #16]
-; CHECK-NEXT:    fmov w10, s3
-; CHECK-NEXT:    mov z5.s, z1.s[2]
-; CHECK-NEXT:    mov z6.s, z1.s[1]
-; CHECK-NEXT:    strh w8, [sp, #30]
-; CHECK-NEXT:    fmov w8, s4
-; CHECK-NEXT:    strh w9, [sp, #28]
-; CHECK-NEXT:    fmov w9, s5
-; CHECK-NEXT:    strh w10, [sp, #26]
-; CHECK-NEXT:    fmov w10, s6
-; CHECK-NEXT:    strh w8, [sp, #22]
-; CHECK-NEXT:    strh w9, [sp, #20]
-; CHECK-NEXT:    strh w10, [sp, #18]
-; CHECK-NEXT:    ldp q1, q0, [sp]
-; CHECK-NEXT:    stp q1, q0, [x1]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p1, z0.h, z1.h
+; CHECK-NEXT:    fcvtzu z3.s, p0/m, z3.s
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    fcvtzu z2.s, p0/m, z2.s
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    splice z3.h, p1, z3.h, z2.h
+; CHECK-NEXT:    stp q0, q3, [x1]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x float>, <16 x float>* %a
   %res = fptoui <16 x float> %op1 to <16 x i16>
@@ -553,17 +477,11 @@ define <1 x i16> @fcvtzu_v1f64_v1i16(<1 x double> %op1) #0 {
 define <2 x i16> @fcvtzu_v2f64_v2i16(<2 x double> %op1) #0 {
 ; CHECK-LABEL: fcvtzu_v2f64_v2i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fptoui <2 x double> %op1 to <2 x i16>
   ret <2 x i16> %res
@@ -572,34 +490,26 @@ define <2 x i16> @fcvtzu_v2f64_v2i16(<2 x double> %op1) #0 {
 define <4 x i16> @fcvtzu_v4f64_v4i16(<4 x double>* %a) #0 {
 ; CHECK-LABEL: fcvtzu_v4f64_v4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    movprfx z0, z1
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #16]
-; CHECK-NEXT:    stp w10, w11, [sp, #8]
-; CHECK-NEXT:    ldp d1, d0, [sp, #8]
+; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    mov z1.s, z1.s[1]
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    strh w8, [sp, #28]
+; CHECK-NEXT:    fmov w10, s0
+; CHECK-NEXT:    strh w9, [sp, #8]
+; CHECK-NEXT:    strh w8, [sp, #12]
 ; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z1.s, z1.s[1]
-; CHECK-NEXT:    strh w9, [sp, #30]
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    strh w8, [sp, #24]
-; CHECK-NEXT:    strh w10, [sp, #26]
-; CHECK-NEXT:    ldr d0, [sp, #24]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    strh w10, [sp, #14]
+; CHECK-NEXT:    strh w8, [sp, #10]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, <4 x double>* %a
   %res = fptoui <4 x double> %op1 to <4 x i16>
@@ -609,57 +519,40 @@ define <4 x i16> @fcvtzu_v4f64_v4i16(<4 x double>* %a) #0 {
 define <8 x i16> @fcvtzu_v8f64_v8i16(<8 x double>* %a) #0 {
 ; CHECK-LABEL: fcvtzu_v8f64_v8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
-; CHECK-NEXT:    ldp q1, q0, [x0, #32]
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
-; CHECK-NEXT:    fmov x9, d1
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldp q0, q1, [x0, #32]
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x10, d4
-; CHECK-NEXT:    mov z0.d, z1.d[1]
-; CHECK-NEXT:    ldp q2, q3, [x0]
-; CHECK-NEXT:    movprfx z1, z3
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z3.d
-; CHECK-NEXT:    mov z3.d, z1.d[1]
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    stp w8, w10, [sp, #8]
-; CHECK-NEXT:    fmov x8, d3
-; CHECK-NEXT:    fmov x12, d0
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
-; CHECK-NEXT:    mov z0.d, z2.d[1]
-; CHECK-NEXT:    stp w11, w8, [sp, #16]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    stp w9, w12, [sp, #24]
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    ldp d0, d2, [sp, #8]
-; CHECK-NEXT:    ldr d1, [sp, #24]
-; CHECK-NEXT:    stp w9, w10, [sp]
-; CHECK-NEXT:    ldr d3, [sp]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w10, s3
-; CHECK-NEXT:    strh w8, [sp, #44]
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    ldp q3, q2, [x0]
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
 ; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z1.s, z1.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z1.s, z3.s[1]
-; CHECK-NEXT:    strh w8, [sp, #40]
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    strh w10, [sp, #32]
-; CHECK-NEXT:    strh w9, [sp, #42]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    strh w8, [sp, #36]
-; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    mov z4.s, z1.s[1]
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.d
+; CHECK-NEXT:    strh w9, [sp, #8]
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    fmov w9, s4
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
+; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    fmov w8, s3
+; CHECK-NEXT:    fmov w10, s2
+; CHECK-NEXT:    mov z1.s, z0.s[1]
 ; CHECK-NEXT:    mov z0.s, z2.s[1]
-; CHECK-NEXT:    strh w9, [sp, #34]
-; CHECK-NEXT:    strh w8, [sp, #46]
+; CHECK-NEXT:    mov z2.s, z3.s[1]
+; CHECK-NEXT:    strh w8, [sp]
 ; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w8, [sp, #38]
-; CHECK-NEXT:    ldr q0, [sp, #32]
-; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    strh w10, [sp, #4]
+; CHECK-NEXT:    fmov w10, s1
+; CHECK-NEXT:    strh w9, [sp, #14]
+; CHECK-NEXT:    fmov w9, s2
+; CHECK-NEXT:    strh w8, [sp, #6]
+; CHECK-NEXT:    strh w10, [sp, #10]
+; CHECK-NEXT:    strh w9, [sp, #2]
+; CHECK-NEXT:    ldr q0, [sp], #16
 ; CHECK-NEXT:    ret
   %op1 = load <8 x double>, <8 x double>* %a
   %res = fptoui <8 x double> %op1 to <8 x i16>
@@ -669,108 +562,73 @@ define <8 x i16> @fcvtzu_v8f64_v8i16(<8 x double>* %a) #0 {
 define void @fcvtzu_v16f64_v16i16(<16 x double>* %a, <16 x i16>* %b) #0 {
 ; CHECK-LABEL: fcvtzu_v16f64_v16i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #96
-; CHECK-NEXT:    .cfi_def_cfa_offset 96
-; CHECK-NEXT:    ldp q0, q1, [x0, #32]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    ldp q2, q3, [x0, #32]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    ldp q4, q5, [x0]
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.d
+; CHECK-NEXT:    fmov w9, s2
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    fmov w8, s3
+; CHECK-NEXT:    mov z6.s, z3.s[1]
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.d
+; CHECK-NEXT:    mov z3.s, z2.s[1]
+; CHECK-NEXT:    uzp1 z4.s, z4.s, z4.s
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.d
+; CHECK-NEXT:    ldp q0, q1, [x0, #64]
+; CHECK-NEXT:    uzp1 z5.s, z5.s, z5.s
+; CHECK-NEXT:    fmov w10, s5
+; CHECK-NEXT:    mov z5.s, z5.s[1]
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
-; CHECK-NEXT:    fmov x10, d0
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    ldp q2, q7, [x0, #96]
+; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    fmov w8, s4
+; CHECK-NEXT:    strh w9, [sp, #8]
+; CHECK-NEXT:    fmov w9, s6
+; CHECK-NEXT:    strh w10, [sp, #4]
+; CHECK-NEXT:    mov z4.s, z4.s[1]
+; CHECK-NEXT:    strh w8, [sp]
+; CHECK-NEXT:    fmov w8, s3
+; CHECK-NEXT:    strh w9, [sp, #14]
+; CHECK-NEXT:    movprfx z3, z7
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z7.d
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
+; CHECK-NEXT:    strh w8, [sp, #10]
+; CHECK-NEXT:    fmov w8, s3
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    fmov w9, s5
+; CHECK-NEXT:    fmov w10, s4
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
-; CHECK-NEXT:    mov z6.d, z1.d[1]
-; CHECK-NEXT:    fmov x8, d1
-; CHECK-NEXT:    fmov x9, d6
-; CHECK-NEXT:    mov z6.d, z0.d[1]
-; CHECK-NEXT:    fmov x11, d6
-; CHECK-NEXT:    ldp q7, q1, [x0]
-; CHECK-NEXT:    ldp q2, q3, [x0, #64]
-; CHECK-NEXT:    ldp q4, q5, [x0, #96]
-; CHECK-NEXT:    stp w8, w9, [sp, #32]
-; CHECK-NEXT:    stp w10, w11, [sp, #48]
-; CHECK-NEXT:    movprfx z0, z1
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    movprfx z0, z7
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z7.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #40]
-; CHECK-NEXT:    stp w10, w11, [sp]
-; CHECK-NEXT:    movprfx z0, z5
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z5.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    movprfx z0, z4
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z4.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #16]
-; CHECK-NEXT:    stp w10, w11, [sp, #8]
-; CHECK-NEXT:    movprfx z0, z3
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z3.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    movprfx z0, z2
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z2.d
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    ldp d0, d2, [sp, #32]
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #24]
-; CHECK-NEXT:    ldr d1, [sp, #48]
-; CHECK-NEXT:    ldr d3, [sp]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    stp w10, w11, [sp, #56]
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    strh w8, [sp, #76]
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z1.s, z1.s[1]
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    strh w9, [sp, #64]
-; CHECK-NEXT:    strh w8, [sp, #72]
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    strh w10, [sp, #74]
-; CHECK-NEXT:    strh w8, [sp, #68]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z0.s, z2.s[1]
-; CHECK-NEXT:    mov z2.s, z3.s[1]
-; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    ldr d2, [sp, #24]
-; CHECK-NEXT:    strh w8, [sp, #78]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    ldp d1, d0, [sp, #8]
-; CHECK-NEXT:    strh w9, [sp, #66]
-; CHECK-NEXT:    strh w8, [sp, #70]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    mov z0.s, z1.s[1]
-; CHECK-NEXT:    strh w8, [sp, #92]
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    ldr d1, [sp, #56]
-; CHECK-NEXT:    strh w9, [sp, #94]
-; CHECK-NEXT:    strh w8, [sp, #88]
+; CHECK-NEXT:    strh w8, [sp, #28]
 ; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov z2.s, z2.s[1]
+; CHECK-NEXT:    mov z3.s, z3.s[1]
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    strh w9, [sp, #6]
 ; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    strh w10, [sp, #2]
+; CHECK-NEXT:    fmov w10, s0
+; CHECK-NEXT:    strh w8, [sp, #24]
+; CHECK-NEXT:    fmov w8, s3
+; CHECK-NEXT:    mov z4.s, z2.s[1]
+; CHECK-NEXT:    mov z2.s, z1.s[1]
+; CHECK-NEXT:    mov z1.s, z0.s[1]
+; CHECK-NEXT:    strh w9, [sp, #20]
+; CHECK-NEXT:    fmov w9, s4
+; CHECK-NEXT:    strh w10, [sp, #16]
 ; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #84]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z0.s, z1.s[1]
-; CHECK-NEXT:    strh w9, [sp, #80]
-; CHECK-NEXT:    strh w10, [sp, #86]
-; CHECK-NEXT:    strh w8, [sp, #90]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w8, [sp, #82]
-; CHECK-NEXT:    ldp q1, q0, [sp, #64]
+; CHECK-NEXT:    strh w8, [sp, #30]
+; CHECK-NEXT:    fmov w8, s1
+; CHECK-NEXT:    strh w9, [sp, #26]
+; CHECK-NEXT:    strh w10, [sp, #22]
+; CHECK-NEXT:    strh w8, [sp, #18]
+; CHECK-NEXT:    ldp q1, q0, [sp]
 ; CHECK-NEXT:    stp q1, q0, [x1]
-; CHECK-NEXT:    add sp, sp, #96
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %op1 = load <16 x double>, <16 x double>* %a
   %res = fptoui <16 x double> %op1 to <16 x i16>
@@ -785,17 +643,11 @@ define void @fcvtzu_v16f64_v16i16(<16 x double>* %a, <16 x i16>* %b) #0 {
 define <1 x i32> @fcvtzu_v1f64_v1i32(<1 x double> %op1) #0 {
 ; CHECK-LABEL: fcvtzu_v1f64_v1i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    fcvtzu z0.d, p0/m, z0.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fptoui <1 x double> %op1 to <1 x i32>
   ret <1 x i32> %res
@@ -804,17 +656,11 @@ define <1 x i32> @fcvtzu_v1f64_v1i32(<1 x double> %op1) #0 {
 define <2 x i32> @fcvtzu_v2f64_v2i32(<2 x double> %op1) #0 {
 ; CHECK-LABEL: fcvtzu_v2f64_v2i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    fcvtzu z0.d, p0/m, z0.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fptoui <2 x double> %op1 to <2 x i32>
   ret <2 x i32> %res
@@ -823,22 +669,15 @@ define <2 x i32> @fcvtzu_v2f64_v2i32(<2 x double> %op1) #0 {
 define <4 x i32> @fcvtzu_v4f64_v4i32(<4 x double>* %a) #0 {
 ; CHECK-LABEL: fcvtzu_v4f64_v4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcvtzu z1.d, p0/m, z1.d
 ; CHECK-NEXT:    fcvtzu z0.d, p0/m, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    movprfx z0, z1
-; CHECK-NEXT:    fcvtzu z0.d, p0/m, z1.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    stp w10, w11, [sp]
-; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    uzp1 z2.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z2.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, <4 x double>* %a
   %res = fptoui <4 x double> %op1 to <4 x i32>
@@ -848,37 +687,21 @@ define <4 x i32> @fcvtzu_v4f64_v4i32(<4 x double>* %a) #0 {
 define void @fcvtzu_v8f64_v8i32(<8 x double>* %a, <8 x i32>* %b) #0 {
 ; CHECK-LABEL: fcvtzu_v8f64_v8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
-; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ptrue p1.s, vl2
 ; CHECK-NEXT:    fcvtzu z0.d, p0/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; CHECK-NEXT:    ldp q3, q2, [x0, #32]
-; CHECK-NEXT:    mov z4.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    movprfx z0, z1
-; CHECK-NEXT:    fcvtzu z0.d, p0/m, z1.d
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fcvtzu z1.d, p0/m, z2.d
-; CHECK-NEXT:    movprfx z2, z3
-; CHECK-NEXT:    fcvtzu z2.d, p0/m, z3.d
-; CHECK-NEXT:    fmov x9, d4
-; CHECK-NEXT:    mov z3.d, z0.d[1]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d3
-; CHECK-NEXT:    mov z0.d, z1.d[1]
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    fmov x9, d0
-; CHECK-NEXT:    mov z0.d, z2.d[1]
-; CHECK-NEXT:    fmov x8, d1
-; CHECK-NEXT:    stp w10, w11, [sp]
-; CHECK-NEXT:    fmov x10, d2
-; CHECK-NEXT:    fmov x11, d0
-; CHECK-NEXT:    stp w8, w9, [sp, #24]
-; CHECK-NEXT:    stp w10, w11, [sp, #16]
-; CHECK-NEXT:    ldp q1, q0, [sp]
-; CHECK-NEXT:    stp q1, q0, [x1]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    fcvtzu z1.d, p0/m, z1.d
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p1, z0.s, z1.s
+; CHECK-NEXT:    fcvtzu z3.d, p0/m, z3.d
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    fcvtzu z2.d, p0/m, z2.d
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    splice z3.s, p1, z3.s, z2.s
+; CHECK-NEXT:    stp q0, q3, [x1]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x double>, <8 x double>* %a
   %res = fptoui <8 x double> %op1 to <8 x i32>
@@ -1216,24 +1039,11 @@ define <2 x i16> @fcvtzs_v2f32_v2i16(<2 x float> %op1) #0 {
 define <4 x i16> @fcvtzs_v4f32_v4i16(<4 x float> %op1) #0 {
 ; CHECK-LABEL: fcvtzs_v4f32_v4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    fcvtzs z0.s, p0/m, z0.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fptosi <4 x float> %op1 to <4 x i16>
   ret <4 x i16> %res
@@ -1242,35 +1052,15 @@ define <4 x i16> @fcvtzs_v4f32_v4i16(<4 x float> %op1) #0 {
 define <8 x i16> @fcvtzs_v8f32_v8i16(<8 x float>* %a) #0 {
 ; CHECK-LABEL: fcvtzs_v8f32_v8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    fcvtzs z1.s, p0/m, z1.s
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z5.s, z1.s[2]
 ; CHECK-NEXT:    fcvtzs z0.s, p0/m, z0.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z2.s, z0.s[3]
-; CHECK-NEXT:    mov z3.s, z0.s[2]
-; CHECK-NEXT:    mov z4.s, z0.s[1]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w9, [sp]
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    mov z0.s, z1.s[3]
-; CHECK-NEXT:    mov z1.s, z1.s[1]
-; CHECK-NEXT:    strh w10, [sp, #14]
-; CHECK-NEXT:    fmov w10, s0
-; CHECK-NEXT:    strh w8, [sp, #12]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    strh w9, [sp, #10]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    strh w10, [sp, #6]
-; CHECK-NEXT:    strh w8, [sp, #4]
-; CHECK-NEXT:    strh w9, [sp, #2]
-; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    uzp1 z2.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z0.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z2.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %op1 = load <8 x float>, <8 x float>* %a
   %res = fptosi <8 x float> %op1 to <8 x i16>
@@ -1280,64 +1070,21 @@ define <8 x i16> @fcvtzs_v8f32_v8i16(<8 x float>* %a) #0 {
 define void @fcvtzs_v16f32_v16i16(<16 x float>* %a, <16 x i16>* %b) #0 {
 ; CHECK-LABEL: fcvtzs_v16f32_v16i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ptrue p1.h, vl4
 ; CHECK-NEXT:    fcvtzs z0.s, p0/m, z0.s
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    mov z5.s, z0.s[2]
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    ldp q3, q2, [x0, #32]
 ; CHECK-NEXT:    fcvtzs z1.s, p0/m, z1.s
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z2.s, z1.s[3]
-; CHECK-NEXT:    mov z3.s, z1.s[2]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    ldp q6, q7, [x0, #32]
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    mov z4.s, z1.s[1]
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    strh w9, [sp]
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    strh w10, [sp, #14]
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    strh w8, [sp, #12]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    strh w9, [sp, #10]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    strh w10, [sp, #6]
-; CHECK-NEXT:    strh w8, [sp, #4]
-; CHECK-NEXT:    movprfx z1, z7
-; CHECK-NEXT:    fcvtzs z1.s, p0/m, z7.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z0.s, z1.s[3]
-; CHECK-NEXT:    mov z2.s, z1.s[2]
-; CHECK-NEXT:    mov z3.s, z1.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    movprfx z1, z6
-; CHECK-NEXT:    fcvtzs z1.s, p0/m, z6.s
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    strh w8, [sp, #2]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z4.s, z1.s[3]
-; CHECK-NEXT:    strh w9, [sp, #24]
-; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    strh w10, [sp, #16]
-; CHECK-NEXT:    fmov w10, s3
-; CHECK-NEXT:    mov z5.s, z1.s[2]
-; CHECK-NEXT:    mov z6.s, z1.s[1]
-; CHECK-NEXT:    strh w8, [sp, #30]
-; CHECK-NEXT:    fmov w8, s4
-; CHECK-NEXT:    strh w9, [sp, #28]
-; CHECK-NEXT:    fmov w9, s5
-; CHECK-NEXT:    strh w10, [sp, #26]
-; CHECK-NEXT:    fmov w10, s6
-; CHECK-NEXT:    strh w8, [sp, #22]
-; CHECK-NEXT:    strh w9, [sp, #20]
-; CHECK-NEXT:    strh w10, [sp, #18]
-; CHECK-NEXT:    ldp q1, q0, [sp]
-; CHECK-NEXT:    stp q1, q0, [x1]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p1, z0.h, z1.h
+; CHECK-NEXT:    fcvtzs z3.s, p0/m, z3.s
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    fcvtzs z2.s, p0/m, z2.s
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    splice z3.h, p1, z3.h, z2.h
+; CHECK-NEXT:    stp q0, q3, [x1]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x float>, <16 x float>* %a
   %res = fptosi <16 x float> %op1 to <16 x i16>
@@ -1481,17 +1228,11 @@ define <1 x i16> @fcvtzs_v1f64_v1i16(<1 x double> %op1) #0 {
 define <2 x i16> @fcvtzs_v2f64_v2i16(<2 x double> %op1) #0 {
 ; CHECK-LABEL: fcvtzs_v2f64_v2i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fptosi <2 x double> %op1 to <2 x i16>
   ret <2 x i16> %res
@@ -1500,34 +1241,26 @@ define <2 x i16> @fcvtzs_v2f64_v2i16(<2 x double> %op1) #0 {
 define <4 x i16> @fcvtzs_v4f64_v4i16(<4 x double>* %a) #0 {
 ; CHECK-LABEL: fcvtzs_v4f64_v4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    movprfx z0, z1
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #16]
-; CHECK-NEXT:    stp w10, w11, [sp, #8]
-; CHECK-NEXT:    ldp d1, d0, [sp, #8]
+; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    mov z1.s, z1.s[1]
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    strh w8, [sp, #28]
+; CHECK-NEXT:    fmov w10, s0
+; CHECK-NEXT:    strh w9, [sp, #8]
+; CHECK-NEXT:    strh w8, [sp, #12]
 ; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z1.s, z1.s[1]
-; CHECK-NEXT:    strh w9, [sp, #30]
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    strh w8, [sp, #24]
-; CHECK-NEXT:    strh w10, [sp, #26]
-; CHECK-NEXT:    ldr d0, [sp, #24]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    strh w10, [sp, #14]
+; CHECK-NEXT:    strh w8, [sp, #10]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, <4 x double>* %a
   %res = fptosi <4 x double> %op1 to <4 x i16>
@@ -1537,57 +1270,40 @@ define <4 x i16> @fcvtzs_v4f64_v4i16(<4 x double>* %a) #0 {
 define <8 x i16> @fcvtzs_v8f64_v8i16(<8 x double>* %a) #0 {
 ; CHECK-LABEL: fcvtzs_v8f64_v8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
-; CHECK-NEXT:    ldp q1, q0, [x0, #32]
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
-; CHECK-NEXT:    fmov x9, d1
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldp q0, q1, [x0, #32]
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x10, d4
-; CHECK-NEXT:    mov z0.d, z1.d[1]
-; CHECK-NEXT:    ldp q2, q3, [x0]
-; CHECK-NEXT:    movprfx z1, z3
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z3.d
-; CHECK-NEXT:    mov z3.d, z1.d[1]
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    stp w8, w10, [sp, #8]
-; CHECK-NEXT:    fmov x8, d3
-; CHECK-NEXT:    fmov x12, d0
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
-; CHECK-NEXT:    mov z0.d, z2.d[1]
-; CHECK-NEXT:    stp w11, w8, [sp, #16]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    stp w9, w12, [sp, #24]
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    ldp d0, d2, [sp, #8]
-; CHECK-NEXT:    ldr d1, [sp, #24]
-; CHECK-NEXT:    stp w9, w10, [sp]
-; CHECK-NEXT:    ldr d3, [sp]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w10, s3
-; CHECK-NEXT:    strh w8, [sp, #44]
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    ldp q3, q2, [x0]
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
 ; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z1.s, z1.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z1.s, z3.s[1]
-; CHECK-NEXT:    strh w8, [sp, #40]
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    strh w10, [sp, #32]
-; CHECK-NEXT:    strh w9, [sp, #42]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    strh w8, [sp, #36]
-; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    mov z4.s, z1.s[1]
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.d
+; CHECK-NEXT:    strh w9, [sp, #8]
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    fmov w9, s4
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
+; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    fmov w8, s3
+; CHECK-NEXT:    fmov w10, s2
+; CHECK-NEXT:    mov z1.s, z0.s[1]
 ; CHECK-NEXT:    mov z0.s, z2.s[1]
-; CHECK-NEXT:    strh w9, [sp, #34]
-; CHECK-NEXT:    strh w8, [sp, #46]
+; CHECK-NEXT:    mov z2.s, z3.s[1]
+; CHECK-NEXT:    strh w8, [sp]
 ; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w8, [sp, #38]
-; CHECK-NEXT:    ldr q0, [sp, #32]
-; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    strh w10, [sp, #4]
+; CHECK-NEXT:    fmov w10, s1
+; CHECK-NEXT:    strh w9, [sp, #14]
+; CHECK-NEXT:    fmov w9, s2
+; CHECK-NEXT:    strh w8, [sp, #6]
+; CHECK-NEXT:    strh w10, [sp, #10]
+; CHECK-NEXT:    strh w9, [sp, #2]
+; CHECK-NEXT:    ldr q0, [sp], #16
 ; CHECK-NEXT:    ret
   %op1 = load <8 x double>, <8 x double>* %a
   %res = fptosi <8 x double> %op1 to <8 x i16>
@@ -1597,108 +1313,73 @@ define <8 x i16> @fcvtzs_v8f64_v8i16(<8 x double>* %a) #0 {
 define void @fcvtzs_v16f64_v16i16(<16 x double>* %a, <16 x i16>* %b) #0 {
 ; CHECK-LABEL: fcvtzs_v16f64_v16i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #96
-; CHECK-NEXT:    .cfi_def_cfa_offset 96
-; CHECK-NEXT:    ldp q0, q1, [x0, #32]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    ldp q2, q3, [x0, #32]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    ldp q4, q5, [x0]
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.d
+; CHECK-NEXT:    fmov w9, s2
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    fmov w8, s3
+; CHECK-NEXT:    mov z6.s, z3.s[1]
+; CHECK-NEXT:    fcvtzs z4.d, p0/m, z4.d
+; CHECK-NEXT:    mov z3.s, z2.s[1]
+; CHECK-NEXT:    uzp1 z4.s, z4.s, z4.s
+; CHECK-NEXT:    fcvtzs z5.d, p0/m, z5.d
+; CHECK-NEXT:    ldp q0, q1, [x0, #64]
+; CHECK-NEXT:    uzp1 z5.s, z5.s, z5.s
+; CHECK-NEXT:    fmov w10, s5
+; CHECK-NEXT:    mov z5.s, z5.s[1]
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
-; CHECK-NEXT:    fmov x10, d0
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    ldp q2, q7, [x0, #96]
+; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    fmov w8, s4
+; CHECK-NEXT:    strh w9, [sp, #8]
+; CHECK-NEXT:    fmov w9, s6
+; CHECK-NEXT:    strh w10, [sp, #4]
+; CHECK-NEXT:    mov z4.s, z4.s[1]
+; CHECK-NEXT:    strh w8, [sp]
+; CHECK-NEXT:    fmov w8, s3
+; CHECK-NEXT:    strh w9, [sp, #14]
+; CHECK-NEXT:    movprfx z3, z7
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z7.d
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
+; CHECK-NEXT:    strh w8, [sp, #10]
+; CHECK-NEXT:    fmov w8, s3
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    fmov w9, s5
+; CHECK-NEXT:    fmov w10, s4
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
-; CHECK-NEXT:    mov z6.d, z1.d[1]
-; CHECK-NEXT:    fmov x8, d1
-; CHECK-NEXT:    fmov x9, d6
-; CHECK-NEXT:    mov z6.d, z0.d[1]
-; CHECK-NEXT:    fmov x11, d6
-; CHECK-NEXT:    ldp q7, q1, [x0]
-; CHECK-NEXT:    ldp q2, q3, [x0, #64]
-; CHECK-NEXT:    ldp q4, q5, [x0, #96]
-; CHECK-NEXT:    stp w8, w9, [sp, #32]
-; CHECK-NEXT:    stp w10, w11, [sp, #48]
-; CHECK-NEXT:    movprfx z0, z1
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    movprfx z0, z7
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z7.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #40]
-; CHECK-NEXT:    stp w10, w11, [sp]
-; CHECK-NEXT:    movprfx z0, z5
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z5.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    movprfx z0, z4
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z4.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #16]
-; CHECK-NEXT:    stp w10, w11, [sp, #8]
-; CHECK-NEXT:    movprfx z0, z3
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z3.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    movprfx z0, z2
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z2.d
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    ldp d0, d2, [sp, #32]
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #24]
-; CHECK-NEXT:    ldr d1, [sp, #48]
-; CHECK-NEXT:    ldr d3, [sp]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    stp w10, w11, [sp, #56]
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    strh w8, [sp, #76]
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z1.s, z1.s[1]
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    strh w9, [sp, #64]
-; CHECK-NEXT:    strh w8, [sp, #72]
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    strh w10, [sp, #74]
-; CHECK-NEXT:    strh w8, [sp, #68]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z0.s, z2.s[1]
-; CHECK-NEXT:    mov z2.s, z3.s[1]
-; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    ldr d2, [sp, #24]
-; CHECK-NEXT:    strh w8, [sp, #78]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    ldp d1, d0, [sp, #8]
-; CHECK-NEXT:    strh w9, [sp, #66]
-; CHECK-NEXT:    strh w8, [sp, #70]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    mov z0.s, z1.s[1]
-; CHECK-NEXT:    strh w8, [sp, #92]
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    ldr d1, [sp, #56]
-; CHECK-NEXT:    strh w9, [sp, #94]
-; CHECK-NEXT:    strh w8, [sp, #88]
+; CHECK-NEXT:    strh w8, [sp, #28]
 ; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov z2.s, z2.s[1]
+; CHECK-NEXT:    mov z3.s, z3.s[1]
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    strh w9, [sp, #6]
 ; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    strh w10, [sp, #2]
+; CHECK-NEXT:    fmov w10, s0
+; CHECK-NEXT:    strh w8, [sp, #24]
+; CHECK-NEXT:    fmov w8, s3
+; CHECK-NEXT:    mov z4.s, z2.s[1]
+; CHECK-NEXT:    mov z2.s, z1.s[1]
+; CHECK-NEXT:    mov z1.s, z0.s[1]
+; CHECK-NEXT:    strh w9, [sp, #20]
+; CHECK-NEXT:    fmov w9, s4
+; CHECK-NEXT:    strh w10, [sp, #16]
 ; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #84]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z0.s, z1.s[1]
-; CHECK-NEXT:    strh w9, [sp, #80]
-; CHECK-NEXT:    strh w10, [sp, #86]
-; CHECK-NEXT:    strh w8, [sp, #90]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w8, [sp, #82]
-; CHECK-NEXT:    ldp q1, q0, [sp, #64]
+; CHECK-NEXT:    strh w8, [sp, #30]
+; CHECK-NEXT:    fmov w8, s1
+; CHECK-NEXT:    strh w9, [sp, #26]
+; CHECK-NEXT:    strh w10, [sp, #22]
+; CHECK-NEXT:    strh w8, [sp, #18]
+; CHECK-NEXT:    ldp q1, q0, [sp]
 ; CHECK-NEXT:    stp q1, q0, [x1]
-; CHECK-NEXT:    add sp, sp, #96
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %op1 = load <16 x double>, <16 x double>* %a
   %res = fptosi <16 x double> %op1 to <16 x i16>
@@ -1713,17 +1394,11 @@ define void @fcvtzs_v16f64_v16i16(<16 x double>* %a, <16 x i16>* %b) #0 {
 define <1 x i32> @fcvtzs_v1f64_v1i32(<1 x double> %op1) #0 {
 ; CHECK-LABEL: fcvtzs_v1f64_v1i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fptosi <1 x double> %op1 to <1 x i32>
   ret <1 x i32> %res
@@ -1732,17 +1407,11 @@ define <1 x i32> @fcvtzs_v1f64_v1i32(<1 x double> %op1) #0 {
 define <2 x i32> @fcvtzs_v2f64_v2i32(<2 x double> %op1) #0 {
 ; CHECK-LABEL: fcvtzs_v2f64_v2i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fptosi <2 x double> %op1 to <2 x i32>
   ret <2 x i32> %res
@@ -1751,22 +1420,15 @@ define <2 x i32> @fcvtzs_v2f64_v2i32(<2 x double> %op1) #0 {
 define <4 x i32> @fcvtzs_v4f64_v4i32(<4 x double>* %a) #0 {
 ; CHECK-LABEL: fcvtzs_v4f64_v4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    movprfx z0, z1
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    stp w10, w11, [sp]
-; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    uzp1 z2.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z2.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, <4 x double>* %a
   %res = fptosi <4 x double> %op1 to <4 x i32>
@@ -1776,37 +1438,21 @@ define <4 x i32> @fcvtzs_v4f64_v4i32(<4 x double>* %a) #0 {
 define void @fcvtzs_v8f64_v8i32(<8 x double>* %a, <8 x i32>* %b) #0 {
 ; CHECK-LABEL: fcvtzs_v8f64_v8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
-; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ptrue p1.s, vl2
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; CHECK-NEXT:    ldp q3, q2, [x0, #32]
-; CHECK-NEXT:    mov z4.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    movprfx z0, z1
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.d
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.d
-; CHECK-NEXT:    movprfx z2, z3
-; CHECK-NEXT:    fcvtzs z2.d, p0/m, z3.d
-; CHECK-NEXT:    fmov x9, d4
-; CHECK-NEXT:    mov z3.d, z0.d[1]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d3
-; CHECK-NEXT:    mov z0.d, z1.d[1]
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    fmov x9, d0
-; CHECK-NEXT:    mov z0.d, z2.d[1]
-; CHECK-NEXT:    fmov x8, d1
-; CHECK-NEXT:    stp w10, w11, [sp]
-; CHECK-NEXT:    fmov x10, d2
-; CHECK-NEXT:    fmov x11, d0
-; CHECK-NEXT:    stp w8, w9, [sp, #24]
-; CHECK-NEXT:    stp w10, w11, [sp, #16]
-; CHECK-NEXT:    ldp q1, q0, [sp]
-; CHECK-NEXT:    stp q1, q0, [x1]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p1, z0.s, z1.s
+; CHECK-NEXT:    fcvtzs z3.d, p0/m, z3.d
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    fcvtzs z2.d, p0/m, z2.d
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    splice z3.s, p1, z3.s, z2.s
+; CHECK-NEXT:    stp q0, q3, [x1]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x double>, <8 x double>* %a
   %res = fptosi <8 x double> %op1 to <8 x i32>

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
index 38e966cdce1a..12695bd22977 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
@@ -10,34 +10,21 @@ target triple = "aarch64-unknown-linux-gnu"
 define <4 x i8> @sdiv_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 {
 ; CHECK-LABEL: sdiv_v4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    adrp x8, .LCPI0_0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI0_0]
-; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z2.h
 ; CHECK-NEXT:    lsl z1.h, p0/m, z1.h, z2.h
-; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z2.h
 ; CHECK-NEXT:    asr z1.h, p0/m, z1.h, z2.h
-; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z2.h
 ; CHECK-NEXT:    sunpklo z1.s, z1.h
 ; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    sdiv z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = sdiv <4 x i8> %op1, %op2
   ret <4 x i8> %res
@@ -46,8 +33,6 @@ define <4 x i8> @sdiv_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 {
 define <8 x i8> @sdiv_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
 ; CHECK-LABEL: sdiv_v8i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    ptrue p0.s, vl4
@@ -60,31 +45,8 @@ define <8 x i8> @sdiv_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
 ; CHECK-NEXT:    sdivr z2.s, p0/m, z2.s, z3.s
 ; CHECK-NEXT:    sdiv z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z2.h
-; CHECK-NEXT:    mov z1.h, z0.h[7]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z2.h, z0.h[6]
-; CHECK-NEXT:    mov z3.h, z0.h[5]
-; CHECK-NEXT:    mov z4.h, z0.h[4]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strb w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    strb w9, [sp, #15]
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    mov z5.h, z0.h[3]
-; CHECK-NEXT:    mov z6.h, z0.h[2]
-; CHECK-NEXT:    mov z0.h, z0.h[1]
-; CHECK-NEXT:    strb w10, [sp, #14]
-; CHECK-NEXT:    fmov w10, s5
-; CHECK-NEXT:    strb w8, [sp, #13]
-; CHECK-NEXT:    fmov w8, s6
-; CHECK-NEXT:    strb w9, [sp, #12]
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    strb w10, [sp, #11]
-; CHECK-NEXT:    strb w8, [sp, #10]
-; CHECK-NEXT:    strb w9, [sp, #9]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = sdiv <8 x i8> %op1, %op2
   ret <8 x i8> %res
@@ -196,27 +158,14 @@ define <2 x i16> @sdiv_v2i16(<2 x i16> %op1, <2 x i16> %op2) #0 {
 define <4 x i16> @sdiv_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
 ; CHECK-LABEL: sdiv_v4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    sunpklo z1.s, z1.h
 ; CHECK-NEXT:    sunpklo z0.s, z0.h
 ; CHECK-NEXT:    sdiv z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = sdiv <4 x i16> %op1, %op2
   ret <4 x i16> %res
@@ -363,31 +312,18 @@ define void @sdiv_v4i64(<4 x i64>* %a, <4 x i64>* %b)  #0 {
 define <4 x i8> @udiv_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 {
 ; CHECK-LABEL: udiv_v4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    adrp x8, .LCPI14_0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI14_0]
-; CHECK-NEXT:    and z0.d, z0.d, z2.d
 ; CHECK-NEXT:    and z1.d, z1.d, z2.d
+; CHECK-NEXT:    and z0.d, z0.d, z2.d
 ; CHECK-NEXT:    uunpklo z1.s, z1.h
 ; CHECK-NEXT:    uunpklo z0.s, z0.h
 ; CHECK-NEXT:    udiv z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = udiv <4 x i8> %op1, %op2
   ret <4 x i8> %res
@@ -396,8 +332,6 @@ define <4 x i8> @udiv_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 {
 define <8 x i8> @udiv_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
 ; CHECK-LABEL: udiv_v8i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    ptrue p0.s, vl4
@@ -410,31 +344,8 @@ define <8 x i8> @udiv_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
 ; CHECK-NEXT:    udivr z2.s, p0/m, z2.s, z3.s
 ; CHECK-NEXT:    udiv z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z2.h
-; CHECK-NEXT:    mov z1.h, z0.h[7]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z2.h, z0.h[6]
-; CHECK-NEXT:    mov z3.h, z0.h[5]
-; CHECK-NEXT:    mov z4.h, z0.h[4]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strb w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    strb w9, [sp, #15]
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    mov z5.h, z0.h[3]
-; CHECK-NEXT:    mov z6.h, z0.h[2]
-; CHECK-NEXT:    mov z0.h, z0.h[1]
-; CHECK-NEXT:    strb w10, [sp, #14]
-; CHECK-NEXT:    fmov w10, s5
-; CHECK-NEXT:    strb w8, [sp, #13]
-; CHECK-NEXT:    fmov w8, s6
-; CHECK-NEXT:    strb w9, [sp, #12]
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    strb w10, [sp, #11]
-; CHECK-NEXT:    strb w8, [sp, #10]
-; CHECK-NEXT:    strb w9, [sp, #9]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = udiv <8 x i8> %op1, %op2
   ret <8 x i8> %res
@@ -544,27 +455,14 @@ define <2 x i16> @udiv_v2i16(<2 x i16> %op1, <2 x i16> %op2) #0 {
 define <4 x i16> @udiv_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
 ; CHECK-LABEL: udiv_v4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    uunpklo z1.s, z1.h
 ; CHECK-NEXT:    uunpklo z0.s, z0.h
 ; CHECK-NEXT:    udiv z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = udiv <4 x i16> %op1, %op2
   ret <4 x i16> %res

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
index 3af5e59c7ced..bc90841aebee 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
@@ -77,131 +77,41 @@ define <16 x i8> @smulh_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
 define void @smulh_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-LABEL: smulh_v32i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
-; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    ldp q1, q0, [x0]
 ; CHECK-NEXT:    adrp x8, .LCPI3_0
 ; CHECK-NEXT:    ptrue p0.h, vl8
-; CHECK-NEXT:    sunpklo z0.h, z2.b
-; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
-; CHECK-NEXT:    sunpklo z2.h, z2.b
-; CHECK-NEXT:    ldp q4, q5, [x1]
+; CHECK-NEXT:    sunpklo z4.h, z1.b
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    sunpklo z1.h, z1.b
+; CHECK-NEXT:    ldp q3, q2, [x1]
+; CHECK-NEXT:    sunpklo z5.h, z0.b
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z0.h, z0.b
 ; CHECK-NEXT:    sunpklo z6.h, z3.b
 ; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
 ; CHECK-NEXT:    sunpklo z3.h, z3.b
-; CHECK-NEXT:    sunpklo z1.h, z4.b
-; CHECK-NEXT:    ext z4.b, z4.b, z4.b, #8
-; CHECK-NEXT:    sunpklo z4.h, z4.b
-; CHECK-NEXT:    mul z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT:    sunpklo z7.h, z5.b
-; CHECK-NEXT:    ext z5.b, z5.b, z5.b, #8
-; CHECK-NEXT:    ldr q16, [x8, :lo12:.LCPI3_0]
-; CHECK-NEXT:    sunpklo z5.h, z5.b
-; CHECK-NEXT:    mul z3.h, p0/m, z3.h, z5.h
-; CHECK-NEXT:    movprfx z5, z6
+; CHECK-NEXT:    sunpklo z7.h, z2.b
+; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    sunpklo z2.h, z2.b
+; CHECK-NEXT:    mul z1.h, p0/m, z1.h, z3.h
+; CHECK-NEXT:    mul z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI3_0]
+; CHECK-NEXT:    movprfx z3, z4
+; CHECK-NEXT:    mul z3.h, p0/m, z3.h, z6.h
 ; CHECK-NEXT:    mul z5.h, p0/m, z5.h, z7.h
-; CHECK-NEXT:    mul z2.h, p0/m, z2.h, z4.h
 ; CHECK-NEXT:    movprfx z4, z5
-; CHECK-NEXT:    lsr z4.h, p0/m, z4.h, z16.h
-; CHECK-NEXT:    lsr z3.h, p0/m, z3.h, z16.h
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    mov z5.h, z3.h[7]
-; CHECK-NEXT:    mov z6.h, z3.h[6]
-; CHECK-NEXT:    mov z7.h, z3.h[5]
-; CHECK-NEXT:    fmov w10, s5
-; CHECK-NEXT:    strb w9, [sp, #16]
-; CHECK-NEXT:    strb w8, [sp, #24]
-; CHECK-NEXT:    fmov w8, s6
-; CHECK-NEXT:    fmov w9, s7
-; CHECK-NEXT:    mov z17.h, z3.h[4]
-; CHECK-NEXT:    mov z18.h, z3.h[3]
-; CHECK-NEXT:    mov z19.h, z3.h[2]
-; CHECK-NEXT:    strb w10, [sp, #31]
-; CHECK-NEXT:    fmov w10, s17
-; CHECK-NEXT:    strb w8, [sp, #30]
-; CHECK-NEXT:    fmov w8, s18
-; CHECK-NEXT:    strb w9, [sp, #29]
-; CHECK-NEXT:    fmov w9, s19
-; CHECK-NEXT:    mov z20.h, z3.h[1]
-; CHECK-NEXT:    mov z3.h, z4.h[7]
-; CHECK-NEXT:    mov z21.h, z4.h[6]
-; CHECK-NEXT:    strb w10, [sp, #28]
-; CHECK-NEXT:    fmov w10, s20
-; CHECK-NEXT:    strb w8, [sp, #27]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    strb w9, [sp, #26]
-; CHECK-NEXT:    fmov w9, s21
-; CHECK-NEXT:    mov z22.h, z4.h[5]
-; CHECK-NEXT:    mov z23.h, z4.h[4]
-; CHECK-NEXT:    mov z24.h, z4.h[3]
-; CHECK-NEXT:    strb w10, [sp, #25]
-; CHECK-NEXT:    fmov w10, s22
-; CHECK-NEXT:    strb w8, [sp, #23]
-; CHECK-NEXT:    fmov w8, s23
-; CHECK-NEXT:    strb w9, [sp, #22]
-; CHECK-NEXT:    fmov w9, s24
-; CHECK-NEXT:    mov z25.h, z4.h[2]
-; CHECK-NEXT:    mov z26.h, z4.h[1]
-; CHECK-NEXT:    strb w10, [sp, #21]
-; CHECK-NEXT:    fmov w10, s25
-; CHECK-NEXT:    strb w8, [sp, #20]
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    lsr z1.h, p0/m, z1.h, z16.h
-; CHECK-NEXT:    strb w9, [sp, #19]
-; CHECK-NEXT:    fmov w8, s26
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z16.h
-; CHECK-NEXT:    mov z2.h, z1.h[7]
-; CHECK-NEXT:    mov z3.h, z1.h[6]
-; CHECK-NEXT:    strb w10, [sp, #18]
-; CHECK-NEXT:    fmov w10, s0
-; CHECK-NEXT:    strb w8, [sp, #17]
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    strb w9, [sp, #8]
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    mov z4.h, z1.h[5]
-; CHECK-NEXT:    mov z5.h, z1.h[4]
-; CHECK-NEXT:    mov z6.h, z1.h[3]
-; CHECK-NEXT:    strb w10, [sp]
-; CHECK-NEXT:    fmov w10, s4
-; CHECK-NEXT:    strb w8, [sp, #15]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    strb w9, [sp, #14]
-; CHECK-NEXT:    fmov w9, s6
-; CHECK-NEXT:    mov z7.h, z1.h[2]
-; CHECK-NEXT:    mov z16.h, z1.h[1]
-; CHECK-NEXT:    mov z1.h, z0.h[7]
-; CHECK-NEXT:    strb w10, [sp, #13]
-; CHECK-NEXT:    fmov w10, s7
-; CHECK-NEXT:    strb w8, [sp, #12]
-; CHECK-NEXT:    fmov w8, s16
-; CHECK-NEXT:    strb w9, [sp, #11]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z17.h, z0.h[6]
-; CHECK-NEXT:    mov z18.h, z0.h[5]
-; CHECK-NEXT:    mov z19.h, z0.h[4]
-; CHECK-NEXT:    strb w10, [sp, #10]
-; CHECK-NEXT:    fmov w10, s17
-; CHECK-NEXT:    strb w8, [sp, #9]
-; CHECK-NEXT:    fmov w8, s18
-; CHECK-NEXT:    strb w9, [sp, #7]
-; CHECK-NEXT:    fmov w9, s19
-; CHECK-NEXT:    mov z20.h, z0.h[3]
-; CHECK-NEXT:    mov z21.h, z0.h[2]
-; CHECK-NEXT:    mov z22.h, z0.h[1]
-; CHECK-NEXT:    strb w10, [sp, #6]
-; CHECK-NEXT:    fmov w10, s20
-; CHECK-NEXT:    strb w8, [sp, #5]
-; CHECK-NEXT:    fmov w8, s21
-; CHECK-NEXT:    strb w9, [sp, #4]
-; CHECK-NEXT:    fmov w9, s22
-; CHECK-NEXT:    strb w10, [sp, #3]
-; CHECK-NEXT:    strb w8, [sp, #2]
-; CHECK-NEXT:    strb w9, [sp, #1]
-; CHECK-NEXT:    ldp q0, q1, [sp]
-; CHECK-NEXT:    stp q0, q1, [x0]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    lsr z4.h, p0/m, z4.h, z2.h
+; CHECK-NEXT:    lsr z3.h, p0/m, z3.h, z2.h
+; CHECK-NEXT:    lsr z1.h, p0/m, z1.h, z2.h
+; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    uzp1 z1.b, z1.b, z1.b
+; CHECK-NEXT:    uzp1 z2.b, z3.b, z3.b
+; CHECK-NEXT:    uzp1 z3.b, z4.b, z4.b
+; CHECK-NEXT:    splice z2.b, p0, z2.b, z1.b
+; CHECK-NEXT:    splice z3.b, p0, z3.b, z0.b
+; CHECK-NEXT:    stp q2, q3, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <32 x i8>, <32 x i8>* %a
   %op2 = load <32 x i8>, <32 x i8>* %b
@@ -516,131 +426,41 @@ define <16 x i8> @umulh_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
 define void @umulh_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-LABEL: umulh_v32i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
-; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    ldp q1, q0, [x0]
 ; CHECK-NEXT:    adrp x8, .LCPI17_0
 ; CHECK-NEXT:    ptrue p0.h, vl8
-; CHECK-NEXT:    uunpklo z0.h, z2.b
-; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
-; CHECK-NEXT:    uunpklo z2.h, z2.b
-; CHECK-NEXT:    ldp q4, q5, [x1]
+; CHECK-NEXT:    uunpklo z4.h, z1.b
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    uunpklo z1.h, z1.b
+; CHECK-NEXT:    ldp q3, q2, [x1]
+; CHECK-NEXT:    uunpklo z5.h, z0.b
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z0.h, z0.b
 ; CHECK-NEXT:    uunpklo z6.h, z3.b
 ; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
 ; CHECK-NEXT:    uunpklo z3.h, z3.b
-; CHECK-NEXT:    uunpklo z1.h, z4.b
-; CHECK-NEXT:    ext z4.b, z4.b, z4.b, #8
-; CHECK-NEXT:    uunpklo z4.h, z4.b
-; CHECK-NEXT:    mul z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT:    uunpklo z7.h, z5.b
-; CHECK-NEXT:    ext z5.b, z5.b, z5.b, #8
-; CHECK-NEXT:    ldr q16, [x8, :lo12:.LCPI17_0]
-; CHECK-NEXT:    uunpklo z5.h, z5.b
-; CHECK-NEXT:    mul z3.h, p0/m, z3.h, z5.h
-; CHECK-NEXT:    movprfx z5, z6
+; CHECK-NEXT:    uunpklo z7.h, z2.b
+; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    uunpklo z2.h, z2.b
+; CHECK-NEXT:    mul z1.h, p0/m, z1.h, z3.h
+; CHECK-NEXT:    mul z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI17_0]
+; CHECK-NEXT:    movprfx z3, z4
+; CHECK-NEXT:    mul z3.h, p0/m, z3.h, z6.h
 ; CHECK-NEXT:    mul z5.h, p0/m, z5.h, z7.h
-; CHECK-NEXT:    mul z2.h, p0/m, z2.h, z4.h
 ; CHECK-NEXT:    movprfx z4, z5
-; CHECK-NEXT:    lsr z4.h, p0/m, z4.h, z16.h
-; CHECK-NEXT:    lsr z3.h, p0/m, z3.h, z16.h
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    mov z5.h, z3.h[7]
-; CHECK-NEXT:    mov z6.h, z3.h[6]
-; CHECK-NEXT:    mov z7.h, z3.h[5]
-; CHECK-NEXT:    fmov w10, s5
-; CHECK-NEXT:    strb w9, [sp, #16]
-; CHECK-NEXT:    strb w8, [sp, #24]
-; CHECK-NEXT:    fmov w8, s6
-; CHECK-NEXT:    fmov w9, s7
-; CHECK-NEXT:    mov z17.h, z3.h[4]
-; CHECK-NEXT:    mov z18.h, z3.h[3]
-; CHECK-NEXT:    mov z19.h, z3.h[2]
-; CHECK-NEXT:    strb w10, [sp, #31]
-; CHECK-NEXT:    fmov w10, s17
-; CHECK-NEXT:    strb w8, [sp, #30]
-; CHECK-NEXT:    fmov w8, s18
-; CHECK-NEXT:    strb w9, [sp, #29]
-; CHECK-NEXT:    fmov w9, s19
-; CHECK-NEXT:    mov z20.h, z3.h[1]
-; CHECK-NEXT:    mov z3.h, z4.h[7]
-; CHECK-NEXT:    mov z21.h, z4.h[6]
-; CHECK-NEXT:    strb w10, [sp, #28]
-; CHECK-NEXT:    fmov w10, s20
-; CHECK-NEXT:    strb w8, [sp, #27]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    strb w9, [sp, #26]
-; CHECK-NEXT:    fmov w9, s21
-; CHECK-NEXT:    mov z22.h, z4.h[5]
-; CHECK-NEXT:    mov z23.h, z4.h[4]
-; CHECK-NEXT:    mov z24.h, z4.h[3]
-; CHECK-NEXT:    strb w10, [sp, #25]
-; CHECK-NEXT:    fmov w10, s22
-; CHECK-NEXT:    strb w8, [sp, #23]
-; CHECK-NEXT:    fmov w8, s23
-; CHECK-NEXT:    strb w9, [sp, #22]
-; CHECK-NEXT:    fmov w9, s24
-; CHECK-NEXT:    mov z25.h, z4.h[2]
-; CHECK-NEXT:    mov z26.h, z4.h[1]
-; CHECK-NEXT:    strb w10, [sp, #21]
-; CHECK-NEXT:    fmov w10, s25
-; CHECK-NEXT:    strb w8, [sp, #20]
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    lsr z1.h, p0/m, z1.h, z16.h
-; CHECK-NEXT:    strb w9, [sp, #19]
-; CHECK-NEXT:    fmov w8, s26
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z16.h
-; CHECK-NEXT:    mov z2.h, z1.h[7]
-; CHECK-NEXT:    mov z3.h, z1.h[6]
-; CHECK-NEXT:    strb w10, [sp, #18]
-; CHECK-NEXT:    fmov w10, s0
-; CHECK-NEXT:    strb w8, [sp, #17]
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    strb w9, [sp, #8]
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    mov z4.h, z1.h[5]
-; CHECK-NEXT:    mov z5.h, z1.h[4]
-; CHECK-NEXT:    mov z6.h, z1.h[3]
-; CHECK-NEXT:    strb w10, [sp]
-; CHECK-NEXT:    fmov w10, s4
-; CHECK-NEXT:    strb w8, [sp, #15]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    strb w9, [sp, #14]
-; CHECK-NEXT:    fmov w9, s6
-; CHECK-NEXT:    mov z7.h, z1.h[2]
-; CHECK-NEXT:    mov z16.h, z1.h[1]
-; CHECK-NEXT:    mov z1.h, z0.h[7]
-; CHECK-NEXT:    strb w10, [sp, #13]
-; CHECK-NEXT:    fmov w10, s7
-; CHECK-NEXT:    strb w8, [sp, #12]
-; CHECK-NEXT:    fmov w8, s16
-; CHECK-NEXT:    strb w9, [sp, #11]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z17.h, z0.h[6]
-; CHECK-NEXT:    mov z18.h, z0.h[5]
-; CHECK-NEXT:    mov z19.h, z0.h[4]
-; CHECK-NEXT:    strb w10, [sp, #10]
-; CHECK-NEXT:    fmov w10, s17
-; CHECK-NEXT:    strb w8, [sp, #9]
-; CHECK-NEXT:    fmov w8, s18
-; CHECK-NEXT:    strb w9, [sp, #7]
-; CHECK-NEXT:    fmov w9, s19
-; CHECK-NEXT:    mov z20.h, z0.h[3]
-; CHECK-NEXT:    mov z21.h, z0.h[2]
-; CHECK-NEXT:    mov z22.h, z0.h[1]
-; CHECK-NEXT:    strb w10, [sp, #6]
-; CHECK-NEXT:    fmov w10, s20
-; CHECK-NEXT:    strb w8, [sp, #5]
-; CHECK-NEXT:    fmov w8, s21
-; CHECK-NEXT:    strb w9, [sp, #4]
-; CHECK-NEXT:    fmov w9, s22
-; CHECK-NEXT:    strb w10, [sp, #3]
-; CHECK-NEXT:    strb w8, [sp, #2]
-; CHECK-NEXT:    strb w9, [sp, #1]
-; CHECK-NEXT:    ldp q0, q1, [sp]
-; CHECK-NEXT:    stp q0, q1, [x0]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    lsr z4.h, p0/m, z4.h, z2.h
+; CHECK-NEXT:    lsr z3.h, p0/m, z3.h, z2.h
+; CHECK-NEXT:    lsr z1.h, p0/m, z1.h, z2.h
+; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    uzp1 z1.b, z1.b, z1.b
+; CHECK-NEXT:    uzp1 z2.b, z3.b, z3.b
+; CHECK-NEXT:    uzp1 z3.b, z4.b, z4.b
+; CHECK-NEXT:    splice z2.b, p0, z2.b, z1.b
+; CHECK-NEXT:    splice z3.b, p0, z3.b, z0.b
+; CHECK-NEXT:    stp q2, q3, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <32 x i8>, <32 x i8>* %a
   %op2 = load <32 x i8>, <32 x i8>* %b

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
index 39edeeb61b22..7fa94d4d6342 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
@@ -10,36 +10,22 @@ target triple = "aarch64-unknown-linux-gnu"
 define <4 x i8> @srem_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 {
 ; CHECK-LABEL: srem_v4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    adrp x8, .LCPI0_0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    ptrue p1.s, vl4
 ; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI0_0]
-; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z2.h
 ; CHECK-NEXT:    lsl z1.h, p0/m, z1.h, z2.h
-; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z2.h
 ; CHECK-NEXT:    asr z1.h, p0/m, z1.h, z2.h
+; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z2.h
 ; CHECK-NEXT:    sunpklo z2.s, z1.h
 ; CHECK-NEXT:    sunpklo z3.s, z0.h
 ; CHECK-NEXT:    sdivr z2.s, p1/m, z2.s, z3.s
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov z3.s, z2.s[3]
-; CHECK-NEXT:    mov z4.s, z2.s[2]
-; CHECK-NEXT:    mov z2.s, z2.s[1]
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    fmov w10, s4
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d2, [sp, #8]
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
 ; CHECK-NEXT:    mls z0.h, p0/m, z2.h, z1.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %res = srem <4 x i8> %op1, %op2
   ret <4 x i8> %res
@@ -48,8 +34,6 @@ define <4 x i8> @srem_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 {
 define <8 x i8> @srem_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
 ; CHECK-LABEL: srem_v8i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    sunpklo z2.h, z1.b
@@ -63,33 +47,9 @@ define <8 x i8> @srem_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
 ; CHECK-NEXT:    sdivr z2.s, p0/m, z2.s, z3.s
 ; CHECK-NEXT:    ptrue p0.b, vl8
 ; CHECK-NEXT:    uzp1 z2.h, z2.h, z4.h
-; CHECK-NEXT:    mov z3.h, z2.h[7]
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    mov z4.h, z2.h[6]
-; CHECK-NEXT:    mov z5.h, z2.h[5]
-; CHECK-NEXT:    mov z6.h, z2.h[4]
-; CHECK-NEXT:    fmov w10, s4
-; CHECK-NEXT:    strb w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    strb w9, [sp, #15]
-; CHECK-NEXT:    fmov w9, s6
-; CHECK-NEXT:    mov z7.h, z2.h[3]
-; CHECK-NEXT:    mov z16.h, z2.h[2]
-; CHECK-NEXT:    mov z2.h, z2.h[1]
-; CHECK-NEXT:    strb w10, [sp, #14]
-; CHECK-NEXT:    fmov w10, s7
-; CHECK-NEXT:    strb w8, [sp, #13]
-; CHECK-NEXT:    fmov w8, s16
-; CHECK-NEXT:    strb w9, [sp, #12]
-; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    strb w10, [sp, #11]
-; CHECK-NEXT:    strb w8, [sp, #10]
-; CHECK-NEXT:    strb w9, [sp, #9]
-; CHECK-NEXT:    ldr d2, [sp, #8]
+; CHECK-NEXT:    uzp1 z2.b, z2.b, z2.b
 ; CHECK-NEXT:    mls z0.b, p0/m, z2.b, z1.b
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %res = srem <8 x i8> %op1, %op2
   ret <8 x i8> %res
@@ -187,8 +147,6 @@ define void @srem_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 define <4 x i16> @srem_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
 ; CHECK-LABEL: srem_v4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    ptrue p0.s, vl4
@@ -196,21 +154,9 @@ define <4 x i16> @srem_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
 ; CHECK-NEXT:    sunpklo z3.s, z0.h
 ; CHECK-NEXT:    sdivr z2.s, p0/m, z2.s, z3.s
 ; CHECK-NEXT:    ptrue p0.h, vl4
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov z3.s, z2.s[3]
-; CHECK-NEXT:    mov z4.s, z2.s[2]
-; CHECK-NEXT:    mov z2.s, z2.s[1]
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    fmov w10, s4
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d2, [sp, #8]
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
 ; CHECK-NEXT:    mls z0.h, p0/m, z2.h, z1.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %res = srem <4 x i16> %op1, %op2
   ret <4 x i16> %res
@@ -379,34 +325,20 @@ define void @srem_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 define <4 x i8> @urem_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 {
 ; CHECK-LABEL: urem_v4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    adrp x8, .LCPI13_0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI13_0]
-; CHECK-NEXT:    and z0.d, z0.d, z2.d
 ; CHECK-NEXT:    and z1.d, z1.d, z2.d
+; CHECK-NEXT:    and z0.d, z0.d, z2.d
 ; CHECK-NEXT:    uunpklo z2.s, z1.h
 ; CHECK-NEXT:    uunpklo z3.s, z0.h
 ; CHECK-NEXT:    udivr z2.s, p0/m, z2.s, z3.s
 ; CHECK-NEXT:    ptrue p0.h, vl4
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov z3.s, z2.s[3]
-; CHECK-NEXT:    mov z4.s, z2.s[2]
-; CHECK-NEXT:    mov z2.s, z2.s[1]
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    fmov w10, s4
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d2, [sp, #8]
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
 ; CHECK-NEXT:    mls z0.h, p0/m, z2.h, z1.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %res = urem <4 x i8> %op1, %op2
   ret <4 x i8> %res
@@ -415,8 +347,6 @@ define <4 x i8> @urem_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 {
 define <8 x i8> @urem_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
 ; CHECK-LABEL: urem_v8i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    uunpklo z2.h, z1.b
@@ -430,33 +360,9 @@ define <8 x i8> @urem_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
 ; CHECK-NEXT:    udivr z2.s, p0/m, z2.s, z3.s
 ; CHECK-NEXT:    ptrue p0.b, vl8
 ; CHECK-NEXT:    uzp1 z2.h, z2.h, z4.h
-; CHECK-NEXT:    mov z3.h, z2.h[7]
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    mov z4.h, z2.h[6]
-; CHECK-NEXT:    mov z5.h, z2.h[5]
-; CHECK-NEXT:    mov z6.h, z2.h[4]
-; CHECK-NEXT:    fmov w10, s4
-; CHECK-NEXT:    strb w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    strb w9, [sp, #15]
-; CHECK-NEXT:    fmov w9, s6
-; CHECK-NEXT:    mov z7.h, z2.h[3]
-; CHECK-NEXT:    mov z16.h, z2.h[2]
-; CHECK-NEXT:    mov z2.h, z2.h[1]
-; CHECK-NEXT:    strb w10, [sp, #14]
-; CHECK-NEXT:    fmov w10, s7
-; CHECK-NEXT:    strb w8, [sp, #13]
-; CHECK-NEXT:    fmov w8, s16
-; CHECK-NEXT:    strb w9, [sp, #12]
-; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    strb w10, [sp, #11]
-; CHECK-NEXT:    strb w8, [sp, #10]
-; CHECK-NEXT:    strb w9, [sp, #9]
-; CHECK-NEXT:    ldr d2, [sp, #8]
+; CHECK-NEXT:    uzp1 z2.b, z2.b, z2.b
 ; CHECK-NEXT:    mls z0.b, p0/m, z2.b, z1.b
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %res = urem <8 x i8> %op1, %op2
   ret <8 x i8> %res
@@ -554,8 +460,6 @@ define void @urem_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 define <4 x i16> @urem_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
 ; CHECK-LABEL: urem_v4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    ptrue p0.s, vl4
@@ -563,21 +467,9 @@ define <4 x i16> @urem_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
 ; CHECK-NEXT:    uunpklo z3.s, z0.h
 ; CHECK-NEXT:    udivr z2.s, p0/m, z2.s, z3.s
 ; CHECK-NEXT:    ptrue p0.h, vl4
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov z3.s, z2.s[3]
-; CHECK-NEXT:    mov z4.s, z2.s[2]
-; CHECK-NEXT:    mov z2.s, z2.s[1]
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    fmov w10, s4
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d2, [sp, #8]
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
 ; CHECK-NEXT:    mls z0.h, p0/m, z2.h, z1.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %res = urem <4 x i16> %op1, %op2
   ret <4 x i16> %res

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
index 2fcb98e0c984..fc66a2a37c7f 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
@@ -255,24 +255,11 @@ define void @ucvtf_v16i16_v16f64(<16 x i16>* %a, <16 x double>* %b) #0 {
 define <2 x half> @ucvtf_v2i32_v2f16(<2 x i32> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v2i32_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ucvtf z0.h, p0/m, z0.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = uitofp <2 x i32> %op1 to <2 x half>
   ret <2 x half> %res
@@ -281,24 +268,11 @@ define <2 x half> @ucvtf_v2i32_v2f16(<2 x i32> %op1) #0 {
 define <4 x half> @ucvtf_v4i32_v4f16(<4 x i32> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v4i32_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ucvtf z0.h, p0/m, z0.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = uitofp <4 x i32> %op1 to <4 x half>
   ret <4 x half> %res
@@ -307,35 +281,15 @@ define <4 x half> @ucvtf_v4i32_v4f16(<4 x i32> %op1) #0 {
 define <8 x half> @ucvtf_v8i32_v8f16(<8 x i32>* %a) #0 {
 ; CHECK-LABEL: ucvtf_v8i32_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ucvtf z1.h, p0/m, z1.s
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z5.s, z1.s[2]
 ; CHECK-NEXT:    ucvtf z0.h, p0/m, z0.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z2.s, z0.s[3]
-; CHECK-NEXT:    mov z3.s, z0.s[2]
-; CHECK-NEXT:    mov z4.s, z0.s[1]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w9, [sp]
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    mov z0.s, z1.s[3]
-; CHECK-NEXT:    mov z1.s, z1.s[1]
-; CHECK-NEXT:    strh w10, [sp, #14]
-; CHECK-NEXT:    fmov w10, s0
-; CHECK-NEXT:    strh w8, [sp, #12]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    strh w9, [sp, #10]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    strh w10, [sp, #6]
-; CHECK-NEXT:    strh w8, [sp, #4]
-; CHECK-NEXT:    strh w9, [sp, #2]
-; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    uzp1 z2.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z0.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z2.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %op1 = load <8 x i32>, <8 x i32>* %a
   %res = uitofp <8 x i32> %op1 to <8 x half>
@@ -345,64 +299,21 @@ define <8 x half> @ucvtf_v8i32_v8f16(<8 x i32>* %a) #0 {
 define void @ucvtf_v16i32_v16f16(<16 x i32>* %a, <16 x half>* %b) #0 {
 ; CHECK-LABEL: ucvtf_v16i32_v16f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    ldp q0, q1, [x0]
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ptrue p1.h, vl4
 ; CHECK-NEXT:    ucvtf z0.h, p0/m, z0.s
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    mov z5.s, z0.s[2]
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    ldp q3, q2, [x0, #32]
 ; CHECK-NEXT:    ucvtf z1.h, p0/m, z1.s
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z2.s, z1.s[3]
-; CHECK-NEXT:    mov z3.s, z1.s[2]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    ldp q6, q7, [x0, #32]
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    mov z4.s, z1.s[1]
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    strh w9, [sp]
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    strh w10, [sp, #14]
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    strh w8, [sp, #12]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    strh w9, [sp, #10]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    strh w10, [sp, #6]
-; CHECK-NEXT:    strh w8, [sp, #4]
-; CHECK-NEXT:    movprfx z1, z7
-; CHECK-NEXT:    ucvtf z1.h, p0/m, z7.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z0.s, z1.s[3]
-; CHECK-NEXT:    mov z2.s, z1.s[2]
-; CHECK-NEXT:    mov z3.s, z1.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    movprfx z1, z6
-; CHECK-NEXT:    ucvtf z1.h, p0/m, z6.s
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    strh w8, [sp, #2]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z4.s, z1.s[3]
-; CHECK-NEXT:    strh w9, [sp, #24]
-; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    strh w10, [sp, #16]
-; CHECK-NEXT:    fmov w10, s3
-; CHECK-NEXT:    mov z5.s, z1.s[2]
-; CHECK-NEXT:    mov z6.s, z1.s[1]
-; CHECK-NEXT:    strh w8, [sp, #30]
-; CHECK-NEXT:    fmov w8, s4
-; CHECK-NEXT:    strh w9, [sp, #28]
-; CHECK-NEXT:    fmov w9, s5
-; CHECK-NEXT:    strh w10, [sp, #26]
-; CHECK-NEXT:    fmov w10, s6
-; CHECK-NEXT:    strh w8, [sp, #22]
-; CHECK-NEXT:    strh w9, [sp, #20]
-; CHECK-NEXT:    strh w10, [sp, #18]
-; CHECK-NEXT:    ldp q1, q0, [sp]
-; CHECK-NEXT:    stp q1, q0, [x1]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p1, z0.h, z1.h
+; CHECK-NEXT:    ucvtf z3.h, p0/m, z3.s
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    ucvtf z2.h, p0/m, z2.s
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    splice z3.h, p1, z3.h, z2.h
+; CHECK-NEXT:    stp q0, q3, [x1]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x i32>, <16 x i32>* %a
   %res = uitofp <16 x i32> %op1 to <16 x half>
@@ -540,37 +451,19 @@ define <2 x half> @ucvtf_v2i64_v2f16(<2 x i64> %op1) #0 {
 define <4 x half> @ucvtf_v4i64_v4f16(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: ucvtf_v4i64_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ucvtf z1.s, p0/m, z1.d
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
 ; CHECK-NEXT:    ucvtf z0.s, p0/m, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    movprfx z0, z1
-; CHECK-NEXT:    ucvtf z0.s, p0/m, z1.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d1
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    splice z1.s, p0, z1.s, z0.s
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    stp w10, w11, [sp]
-; CHECK-NEXT:    ldr q0, [sp]
-; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #24]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #30]
-; CHECK-NEXT:    strh w10, [sp, #28]
-; CHECK-NEXT:    strh w8, [sp, #26]
-; CHECK-NEXT:    ldr d0, [sp, #24]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    movprfx z0, z1
+; CHECK-NEXT:    fcvt z0.h, p0/m, z1.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %res = uitofp <4 x i64> %op1 to <4 x half>
@@ -580,62 +473,29 @@ define <4 x half> @ucvtf_v4i64_v4f16(<4 x i64>* %a) #0 {
 define <8 x half> @ucvtf_v8i64_v8f16(<8 x i64>* %a) #0 {
 ; CHECK-LABEL: ucvtf_v8i64_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48
-; CHECK-NEXT:    .cfi_def_cfa_offset 48
-; CHECK-NEXT:    ldp q1, q0, [x0, #32]
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    ucvtf z1.s, p0/m, z1.d
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    ldp q0, q1, [x0, #32]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ptrue p1.s, vl2
+; CHECK-NEXT:    ptrue p2.s
 ; CHECK-NEXT:    ucvtf z0.s, p0/m, z0.d
-; CHECK-NEXT:    mov z4.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    mov z0.d, z1.d[1]
-; CHECK-NEXT:    fmov x10, d4
-; CHECK-NEXT:    fmov x12, d0
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    ldp q3, q2, [x0]
+; CHECK-NEXT:    ucvtf z1.s, p0/m, z1.d
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p1, z0.s, z1.s
+; CHECK-NEXT:    ucvtf z3.s, p0/m, z3.d
+; CHECK-NEXT:    fcvt z0.h, p2/m, z0.s
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
 ; CHECK-NEXT:    ucvtf z2.s, p0/m, z2.d
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    splice z3.s, p1, z3.s, z2.s
 ; CHECK-NEXT:    movprfx z1, z3
-; CHECK-NEXT:    ucvtf z1.s, p0/m, z3.d
-; CHECK-NEXT:    mov z0.d, z2.d[1]
-; CHECK-NEXT:    stp w8, w10, [sp, #24]
-; CHECK-NEXT:    mov z3.d, z1.d[1]
-; CHECK-NEXT:    stp w9, w12, [sp, #16]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    ldr q0, [sp, #16]
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    fmov x8, d3
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
-; CHECK-NEXT:    stp w11, w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    stp w9, w10, [sp]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    strh w8, [sp, #40]
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    ldr q1, [sp]
-; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    fmov w10, s0
-; CHECK-NEXT:    strh w8, [sp, #46]
-; CHECK-NEXT:    movprfx z0, z1
-; CHECK-NEXT:    fcvt z0.h, p0/m, z1.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z3.s, z0.s[1]
-; CHECK-NEXT:    strh w9, [sp, #44]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    strh w10, [sp, #42]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #32]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    strh w9, [sp, #38]
-; CHECK-NEXT:    strh w10, [sp, #36]
-; CHECK-NEXT:    strh w8, [sp, #34]
-; CHECK-NEXT:    ldr q0, [sp, #32]
-; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    fcvt z1.h, p2/m, z3.s
+; CHECK-NEXT:    uzp1 z2.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z0.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z2.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %op1 = load <8 x i64>, <8 x i64>* %a
   %res = uitofp <8 x i64> %op1 to <8 x half>
@@ -649,17 +509,11 @@ define <8 x half> @ucvtf_v8i64_v8f16(<8 x i64>* %a) #0 {
 define <2 x float> @ucvtf_v2i64_v2f32(<2 x i64> %op1) #0 {
 ; CHECK-LABEL: ucvtf_v2i64_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ucvtf z0.s, p0/m, z0.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = uitofp <2 x i64> %op1 to <2 x float>
   ret <2 x float> %res
@@ -668,22 +522,15 @@ define <2 x float> @ucvtf_v2i64_v2f32(<2 x i64> %op1) #0 {
 define <4 x float> @ucvtf_v4i64_v4f32(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: ucvtf_v4i64_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ucvtf z1.s, p0/m, z1.d
 ; CHECK-NEXT:    ucvtf z0.s, p0/m, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    movprfx z0, z1
-; CHECK-NEXT:    ucvtf z0.s, p0/m, z1.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    stp w10, w11, [sp]
-; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    uzp1 z2.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z2.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %res = uitofp <4 x i64> %op1 to <4 x float>
@@ -693,37 +540,21 @@ define <4 x float> @ucvtf_v4i64_v4f32(<4 x i64>* %a) #0 {
 define void @ucvtf_v8i64_v8f32(<8 x i64>* %a, <8 x float>* %b) #0 {
 ; CHECK-LABEL: ucvtf_v8i64_v8f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
-; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ptrue p1.s, vl2
 ; CHECK-NEXT:    ucvtf z0.s, p0/m, z0.d
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; CHECK-NEXT:    ldp q3, q2, [x0, #32]
-; CHECK-NEXT:    mov z4.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    movprfx z0, z1
-; CHECK-NEXT:    ucvtf z0.s, p0/m, z1.d
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    ucvtf z1.s, p0/m, z2.d
-; CHECK-NEXT:    movprfx z2, z3
-; CHECK-NEXT:    ucvtf z2.s, p0/m, z3.d
-; CHECK-NEXT:    fmov x9, d4
-; CHECK-NEXT:    mov z3.d, z0.d[1]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d3
-; CHECK-NEXT:    mov z0.d, z1.d[1]
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    fmov x9, d0
-; CHECK-NEXT:    mov z0.d, z2.d[1]
-; CHECK-NEXT:    fmov x8, d1
-; CHECK-NEXT:    stp w10, w11, [sp]
-; CHECK-NEXT:    fmov x10, d2
-; CHECK-NEXT:    fmov x11, d0
-; CHECK-NEXT:    stp w8, w9, [sp, #24]
-; CHECK-NEXT:    stp w10, w11, [sp, #16]
-; CHECK-NEXT:    ldp q1, q0, [sp]
-; CHECK-NEXT:    stp q1, q0, [x1]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    ucvtf z1.s, p0/m, z1.d
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p1, z0.s, z1.s
+; CHECK-NEXT:    ucvtf z3.s, p0/m, z3.d
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    ucvtf z2.s, p0/m, z2.d
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    splice z3.s, p1, z3.s, z2.s
+; CHECK-NEXT:    stp q0, q3, [x1]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x i64>, <8 x i64>* %a
   %res = uitofp <8 x i64> %op1 to <8 x float>
@@ -1004,24 +835,11 @@ define void @scvtf_v16i16_v16f64(<16 x i16>* %a, <16 x double>* %b) #0 {
 define <2 x half> @scvtf_v2i32_v2f16(<2 x i32> %op1) #0 {
 ; CHECK-LABEL: scvtf_v2i32_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    scvtf z0.h, p0/m, z0.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = sitofp <2 x i32> %op1 to <2 x half>
   ret <2 x half> %res
@@ -1030,24 +848,11 @@ define <2 x half> @scvtf_v2i32_v2f16(<2 x i32> %op1) #0 {
 define <4 x half> @scvtf_v4i32_v4f16(<4 x i32> %op1) #0 {
 ; CHECK-LABEL: scvtf_v4i32_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    scvtf z0.h, p0/m, z0.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = sitofp <4 x i32> %op1 to <4 x half>
   ret <4 x half> %res
@@ -1056,35 +861,15 @@ define <4 x half> @scvtf_v4i32_v4f16(<4 x i32> %op1) #0 {
 define <8 x half> @scvtf_v8i32_v8f16(<8 x i32>* %a) #0 {
 ; CHECK-LABEL: scvtf_v8i32_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    scvtf z1.h, p0/m, z1.s
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z5.s, z1.s[2]
 ; CHECK-NEXT:    scvtf z0.h, p0/m, z0.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z2.s, z0.s[3]
-; CHECK-NEXT:    mov z3.s, z0.s[2]
-; CHECK-NEXT:    mov z4.s, z0.s[1]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w9, [sp]
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    mov z0.s, z1.s[3]
-; CHECK-NEXT:    mov z1.s, z1.s[1]
-; CHECK-NEXT:    strh w10, [sp, #14]
-; CHECK-NEXT:    fmov w10, s0
-; CHECK-NEXT:    strh w8, [sp, #12]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    strh w9, [sp, #10]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    strh w10, [sp, #6]
-; CHECK-NEXT:    strh w8, [sp, #4]
-; CHECK-NEXT:    strh w9, [sp, #2]
-; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    uzp1 z2.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z0.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z2.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %op1 = load <8 x i32>, <8 x i32>* %a
   %res = sitofp <8 x i32> %op1 to <8 x half>
@@ -1263,37 +1048,19 @@ define <2 x half> @scvtf_v2i64_v2f16(<2 x i64> %op1) #0 {
 define <4 x half> @scvtf_v4i64_v4f16(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: scvtf_v4i64_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    scvtf z1.s, p0/m, z1.d
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
 ; CHECK-NEXT:    scvtf z0.s, p0/m, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    movprfx z0, z1
-; CHECK-NEXT:    scvtf z0.s, p0/m, z1.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d1
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    splice z1.s, p0, z1.s, z0.s
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    stp w10, w11, [sp]
-; CHECK-NEXT:    ldr q0, [sp]
-; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #24]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #30]
-; CHECK-NEXT:    strh w10, [sp, #28]
-; CHECK-NEXT:    strh w8, [sp, #26]
-; CHECK-NEXT:    ldr d0, [sp, #24]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    movprfx z0, z1
+; CHECK-NEXT:    fcvt z0.h, p0/m, z1.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %res = sitofp <4 x i64> %op1 to <4 x half>
@@ -1307,17 +1074,11 @@ define <4 x half> @scvtf_v4i64_v4f16(<4 x i64>* %a) #0 {
 define <2 x float> @scvtf_v2i64_v2f32(<2 x i64> %op1) #0 {
 ; CHECK-LABEL: scvtf_v2i64_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    scvtf z0.s, p0/m, z0.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = sitofp <2 x i64> %op1 to <2 x float>
   ret <2 x float> %res
@@ -1326,22 +1087,15 @@ define <2 x float> @scvtf_v2i64_v2f32(<2 x i64> %op1) #0 {
 define <4 x float> @scvtf_v4i64_v4f32(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: scvtf_v4i64_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    scvtf z1.s, p0/m, z1.d
 ; CHECK-NEXT:    scvtf z0.s, p0/m, z0.d
-; CHECK-NEXT:    mov z2.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    movprfx z0, z1
-; CHECK-NEXT:    scvtf z0.s, p0/m, z1.d
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    stp w8, w9, [sp, #8]
-; CHECK-NEXT:    stp w10, w11, [sp]
-; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    uzp1 z2.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z0.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z2.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %op1 = load <4 x i64>, <4 x i64>* %a
   %res = sitofp <4 x i64> %op1 to <4 x float>

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll
index 6004130a9f02..0b0d1144d4ae 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll
@@ -6,66 +6,30 @@ target triple = "aarch64-unknown-linux-gnu"
 define i1 @ptest_v16i1(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ptest_v16i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    adrp x8, .LCPI0_0
 ; CHECK-NEXT:    ptrue p0.s, vl4
-; CHECK-NEXT:    ldp q2, q0, [x0, #32]
-; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI0_0]
-; CHECK-NEXT:    ldp q4, q3, [x0]
-; CHECK-NEXT:    fcmne p1.s, p0/z, z0.s, z1.s
-; CHECK-NEXT:    mov z0.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    fcmne p1.s, p0/z, z2.s, z1.s
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z2.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    mov z5.s, z0.s[3]
-; CHECK-NEXT:    mov z6.s, z0.s[2]
-; CHECK-NEXT:    fcmne p1.s, p0/z, z3.s, z1.s
-; CHECK-NEXT:    fcmne p0.s, p0/z, z4.s, z1.s
-; CHECK-NEXT:    mov z3.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    mov z7.s, z0.s[1]
-; CHECK-NEXT:    mov z0.s, z2.s[3]
-; CHECK-NEXT:    mov z16.s, z2.s[2]
-; CHECK-NEXT:    mov z17.s, z2.s[1]
-; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    strb w8, [sp, #12]
-; CHECK-NEXT:    mov z1.s, z3.s[3]
-; CHECK-NEXT:    mov z2.s, z3.s[2]
-; CHECK-NEXT:    mov z4.s, z3.s[1]
-; CHECK-NEXT:    fmov w8, s3
+; CHECK-NEXT:    ldp q1, q2, [x0, #32]
+; CHECK-NEXT:    ptrue p1.h, vl4
+; CHECK-NEXT:    ldp q3, q4, [x0]
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    fcmne p2.s, p0/z, z2.s, z0.s
+; CHECK-NEXT:    mov z2.s, p2/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    fcmne p2.s, p0/z, z1.s, z0.s
+; CHECK-NEXT:    mov z1.s, p2/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    fcmne p2.s, p0/z, z4.s, z0.s
+; CHECK-NEXT:    fcmne p0.s, p0/z, z3.s, z0.s
+; CHECK-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
 ; CHECK-NEXT:    mov z3.s, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    fmov w10, s3
-; CHECK-NEXT:    strb w9, [sp, #8]
-; CHECK-NEXT:    fmov w9, s5
-; CHECK-NEXT:    mov z18.s, z3.s[3]
-; CHECK-NEXT:    strb w8, [sp, #4]
-; CHECK-NEXT:    fmov w8, s6
-; CHECK-NEXT:    strb w10, [sp]
-; CHECK-NEXT:    fmov w10, s7
-; CHECK-NEXT:    strb w9, [sp, #15]
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    strb w8, [sp, #14]
-; CHECK-NEXT:    fmov w8, s16
-; CHECK-NEXT:    strb w10, [sp, #13]
-; CHECK-NEXT:    fmov w10, s17
-; CHECK-NEXT:    strb w9, [sp, #11]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    strb w8, [sp, #10]
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    strb w10, [sp, #9]
-; CHECK-NEXT:    fmov w10, s4
-; CHECK-NEXT:    mov z19.s, z3.s[2]
-; CHECK-NEXT:    mov z20.s, z3.s[1]
-; CHECK-NEXT:    strb w9, [sp, #7]
-; CHECK-NEXT:    fmov w9, s18
-; CHECK-NEXT:    strb w8, [sp, #6]
-; CHECK-NEXT:    fmov w8, s19
-; CHECK-NEXT:    strb w10, [sp, #5]
-; CHECK-NEXT:    fmov w10, s20
-; CHECK-NEXT:    strb w9, [sp, #3]
-; CHECK-NEXT:    strb w8, [sp, #2]
-; CHECK-NEXT:    strb w10, [sp, #1]
-; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    splice z1.h, p1, z1.h, z2.h
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z2.h, z3.h, z3.h
+; CHECK-NEXT:    splice z2.h, p1, z2.h, z0.h
+; CHECK-NEXT:    uzp1 z1.b, z1.b, z1.b
+; CHECK-NEXT:    uzp1 z0.b, z2.b, z2.b
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    splice z0.b, p0, z0.b, z1.b
 ; CHECK-NEXT:    ptrue p0.b, vl16
 ; CHECK-NEXT:    orv b0, p0, z0.b
 ; CHECK-NEXT:    fmov w8, s0
@@ -81,128 +45,54 @@ define i1 @ptest_v16i1(ptr %a, ptr %b) #0 {
 define i1 @ptest_or_v16i1(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ptest_or_v16i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    adrp x8, .LCPI1_0
 ; CHECK-NEXT:    ptrue p0.s, vl4
-; CHECK-NEXT:    ldp q2, q1, [x0, #32]
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI1_0]
-; CHECK-NEXT:    ldp q4, q3, [x0]
-; CHECK-NEXT:    fcmne p1.s, p0/z, z1.s, z0.s
-; CHECK-NEXT:    mov z1.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    fcmne p1.s, p0/z, z2.s, z0.s
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z2.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    mov z5.s, z1.s[2]
-; CHECK-NEXT:    mov z6.s, z1.s[1]
-; CHECK-NEXT:    fcmne p1.s, p0/z, z3.s, z0.s
-; CHECK-NEXT:    mov z3.s, z1.s[3]
-; CHECK-NEXT:    mov z1.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    fcmne p1.s, p0/z, z4.s, z0.s
-; CHECK-NEXT:    mov z7.s, z2.s[3]
-; CHECK-NEXT:    mov z16.s, z2.s[2]
-; CHECK-NEXT:    mov z17.s, z2.s[1]
-; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    strb w8, [sp, #12]
-; CHECK-NEXT:    mov z2.s, z1.s[3]
-; CHECK-NEXT:    mov z4.s, z1.s[2]
-; CHECK-NEXT:    mov z18.s, z1.s[1]
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z1.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    strb w9, [sp, #8]
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    mov z19.s, z1.s[3]
-; CHECK-NEXT:    strb w8, [sp, #4]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    strb w10, [sp]
-; CHECK-NEXT:    fmov w10, s6
-; CHECK-NEXT:    strb w9, [sp, #15]
-; CHECK-NEXT:    fmov w9, s7
-; CHECK-NEXT:    strb w8, [sp, #14]
-; CHECK-NEXT:    fmov w8, s16
-; CHECK-NEXT:    strb w10, [sp, #13]
-; CHECK-NEXT:    fmov w10, s17
-; CHECK-NEXT:    strb w9, [sp, #11]
-; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    strb w8, [sp, #10]
-; CHECK-NEXT:    fmov w8, s4
-; CHECK-NEXT:    strb w10, [sp, #9]
-; CHECK-NEXT:    fmov w10, s18
-; CHECK-NEXT:    mov z20.s, z1.s[2]
-; CHECK-NEXT:    mov z21.s, z1.s[1]
-; CHECK-NEXT:    strb w9, [sp, #7]
-; CHECK-NEXT:    fmov w9, s19
-; CHECK-NEXT:    strb w8, [sp, #6]
-; CHECK-NEXT:    fmov w8, s20
-; CHECK-NEXT:    strb w10, [sp, #5]
-; CHECK-NEXT:    fmov w10, s21
-; CHECK-NEXT:    strb w9, [sp, #3]
-; CHECK-NEXT:    strb w8, [sp, #2]
-; CHECK-NEXT:    strb w10, [sp, #1]
-; CHECK-NEXT:    ldr q1, [x1, #48]
-; CHECK-NEXT:    ldr q5, [x1]
-; CHECK-NEXT:    ldr q7, [sp]
-; CHECK-NEXT:    fcmne p1.s, p0/z, z1.s, z0.s
-; CHECK-NEXT:    ldp q6, q1, [x1, #16]
-; CHECK-NEXT:    mov z2.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov z3.s, z2.s[3]
-; CHECK-NEXT:    mov z4.s, z2.s[2]
-; CHECK-NEXT:    mov z2.s, z2.s[1]
-; CHECK-NEXT:    strb w8, [sp, #28]
-; CHECK-NEXT:    fcmne p1.s, p0/z, z1.s, z0.s
-; CHECK-NEXT:    mov z1.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    fcmne p1.s, p0/z, z6.s, z0.s
-; CHECK-NEXT:    mov z6.s, z1.s[3]
-; CHECK-NEXT:    mov z16.s, z1.s[2]
-; CHECK-NEXT:    mov z17.s, z1.s[1]
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z1.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    fcmne p0.s, p0/z, z5.s, z0.s
-; CHECK-NEXT:    mov z0.s, z1.s[3]
-; CHECK-NEXT:    mov z5.s, z1.s[2]
-; CHECK-NEXT:    mov z18.s, z1.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z1.s, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    strb w8, [sp, #24]
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    strb w9, [sp, #20]
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    mov z19.s, z1.s[3]
-; CHECK-NEXT:    mov z20.s, z1.s[2]
-; CHECK-NEXT:    strb w10, [sp, #16]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strb w8, [sp, #31]
-; CHECK-NEXT:    fmov w8, s6
-; CHECK-NEXT:    strb w9, [sp, #30]
-; CHECK-NEXT:    fmov w9, s16
-; CHECK-NEXT:    strb w10, [sp, #29]
-; CHECK-NEXT:    fmov w10, s17
-; CHECK-NEXT:    strb w8, [sp, #27]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strb w9, [sp, #26]
-; CHECK-NEXT:    fmov w9, s5
-; CHECK-NEXT:    strb w10, [sp, #25]
-; CHECK-NEXT:    fmov w10, s18
-; CHECK-NEXT:    mov z21.s, z1.s[1]
-; CHECK-NEXT:    strb w8, [sp, #23]
-; CHECK-NEXT:    fmov w8, s19
-; CHECK-NEXT:    strb w9, [sp, #22]
-; CHECK-NEXT:    fmov w9, s20
-; CHECK-NEXT:    strb w10, [sp, #21]
-; CHECK-NEXT:    fmov w10, s21
+; CHECK-NEXT:    ldp q0, q2, [x0, #32]
+; CHECK-NEXT:    ptrue p1.h, vl4
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI1_0]
+; CHECK-NEXT:    ldp q3, q4, [x0]
+; CHECK-NEXT:    fcmne p2.s, p0/z, z2.s, z1.s
+; CHECK-NEXT:    fcmne p3.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z2.s, p3/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    splice z2.h, p1, z2.h, z0.h
+; CHECK-NEXT:    ldp q0, q5, [x1, #32]
+; CHECK-NEXT:    fcmne p2.s, p0/z, z4.s, z1.s
+; CHECK-NEXT:    uzp1 z2.b, z2.b, z2.b
+; CHECK-NEXT:    mov z4.s, p2/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    fcmne p2.s, p0/z, z3.s, z1.s
+; CHECK-NEXT:    mov z3.s, p2/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    uzp1 z4.h, z4.h, z4.h
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    fcmne p3.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    splice z3.h, p1, z3.h, z4.h
+; CHECK-NEXT:    fcmne p2.s, p0/z, z5.s, z1.s
+; CHECK-NEXT:    uzp1 z3.b, z3.b, z3.b
+; CHECK-NEXT:    ldp q4, q5, [x1]
+; CHECK-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    fcmne p2.s, p0/z, z5.s, z1.s
+; CHECK-NEXT:    fcmne p0.s, p0/z, z4.s, z1.s
+; CHECK-NEXT:    mov z5.s, p3/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.s, p2/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z4.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    uzp1 z5.h, z5.h, z5.h
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    uzp1 z4.h, z4.h, z4.h
+; CHECK-NEXT:    splice z5.h, p1, z5.h, z0.h
+; CHECK-NEXT:    splice z4.h, p1, z4.h, z1.h
+; CHECK-NEXT:    ptrue p3.b, vl8
+; CHECK-NEXT:    uzp1 z0.b, z5.b, z5.b
+; CHECK-NEXT:    uzp1 z1.b, z4.b, z4.b
+; CHECK-NEXT:    splice z3.b, p3, z3.b, z2.b
+; CHECK-NEXT:    splice z1.b, p3, z1.b, z0.b
 ; CHECK-NEXT:    ptrue p0.b, vl16
-; CHECK-NEXT:    strb w8, [sp, #19]
-; CHECK-NEXT:    strb w9, [sp, #18]
-; CHECK-NEXT:    strb w10, [sp, #17]
-; CHECK-NEXT:    ldr q0, [sp, #16]
-; CHECK-NEXT:    orr z0.d, z7.d, z0.d
+; CHECK-NEXT:    orr z0.d, z3.d, z1.d
 ; CHECK-NEXT:    orv b0, p0, z0.b
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    and w0, w8, #0x1
-; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %v0 = bitcast ptr %a to <16 x float>*
   %v1 = load <16 x float>, <16 x float>* %v0, align 4
@@ -224,128 +114,54 @@ declare i1 @llvm.vector.reduce.or.i1.v16i1(<16 x i1>)
 define i1 @ptest_and_v16i1(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ptest_and_v16i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    adrp x8, .LCPI2_0
 ; CHECK-NEXT:    ptrue p0.s, vl4
-; CHECK-NEXT:    ldp q2, q1, [x0, #32]
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI2_0]
-; CHECK-NEXT:    ldp q4, q3, [x0]
-; CHECK-NEXT:    fcmne p1.s, p0/z, z1.s, z0.s
-; CHECK-NEXT:    mov z1.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    fcmne p1.s, p0/z, z2.s, z0.s
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z2.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    mov z5.s, z1.s[2]
-; CHECK-NEXT:    mov z6.s, z1.s[1]
-; CHECK-NEXT:    fcmne p1.s, p0/z, z3.s, z0.s
-; CHECK-NEXT:    mov z3.s, z1.s[3]
-; CHECK-NEXT:    mov z1.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    fcmne p1.s, p0/z, z4.s, z0.s
-; CHECK-NEXT:    mov z7.s, z2.s[3]
-; CHECK-NEXT:    mov z16.s, z2.s[2]
-; CHECK-NEXT:    mov z17.s, z2.s[1]
-; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    strb w8, [sp, #12]
-; CHECK-NEXT:    mov z2.s, z1.s[3]
-; CHECK-NEXT:    mov z4.s, z1.s[2]
-; CHECK-NEXT:    mov z18.s, z1.s[1]
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z1.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    strb w9, [sp, #8]
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    mov z19.s, z1.s[3]
-; CHECK-NEXT:    strb w8, [sp, #4]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    strb w10, [sp]
-; CHECK-NEXT:    fmov w10, s6
-; CHECK-NEXT:    strb w9, [sp, #15]
-; CHECK-NEXT:    fmov w9, s7
-; CHECK-NEXT:    strb w8, [sp, #14]
-; CHECK-NEXT:    fmov w8, s16
-; CHECK-NEXT:    strb w10, [sp, #13]
-; CHECK-NEXT:    fmov w10, s17
-; CHECK-NEXT:    strb w9, [sp, #11]
-; CHECK-NEXT:    fmov w9, s2
-; CHECK-NEXT:    strb w8, [sp, #10]
-; CHECK-NEXT:    fmov w8, s4
-; CHECK-NEXT:    strb w10, [sp, #9]
-; CHECK-NEXT:    fmov w10, s18
-; CHECK-NEXT:    mov z20.s, z1.s[2]
-; CHECK-NEXT:    mov z21.s, z1.s[1]
-; CHECK-NEXT:    strb w9, [sp, #7]
-; CHECK-NEXT:    fmov w9, s19
-; CHECK-NEXT:    strb w8, [sp, #6]
-; CHECK-NEXT:    fmov w8, s20
-; CHECK-NEXT:    strb w10, [sp, #5]
-; CHECK-NEXT:    fmov w10, s21
-; CHECK-NEXT:    strb w9, [sp, #3]
-; CHECK-NEXT:    strb w8, [sp, #2]
-; CHECK-NEXT:    strb w10, [sp, #1]
-; CHECK-NEXT:    ldr q1, [x1, #48]
-; CHECK-NEXT:    ldr q5, [x1]
-; CHECK-NEXT:    ldr q7, [sp]
-; CHECK-NEXT:    fcmne p1.s, p0/z, z1.s, z0.s
-; CHECK-NEXT:    ldp q6, q1, [x1, #16]
-; CHECK-NEXT:    mov z2.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov z3.s, z2.s[3]
-; CHECK-NEXT:    mov z4.s, z2.s[2]
-; CHECK-NEXT:    mov z2.s, z2.s[1]
-; CHECK-NEXT:    strb w8, [sp, #28]
-; CHECK-NEXT:    fcmne p1.s, p0/z, z1.s, z0.s
-; CHECK-NEXT:    mov z1.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    fcmne p1.s, p0/z, z6.s, z0.s
-; CHECK-NEXT:    mov z6.s, z1.s[3]
-; CHECK-NEXT:    mov z16.s, z1.s[2]
-; CHECK-NEXT:    mov z17.s, z1.s[1]
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov z1.s, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    fcmne p0.s, p0/z, z5.s, z0.s
-; CHECK-NEXT:    mov z0.s, z1.s[3]
-; CHECK-NEXT:    mov z5.s, z1.s[2]
-; CHECK-NEXT:    mov z18.s, z1.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z1.s, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    strb w8, [sp, #24]
-; CHECK-NEXT:    fmov w10, s1
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    strb w9, [sp, #20]
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    mov z19.s, z1.s[3]
-; CHECK-NEXT:    mov z20.s, z1.s[2]
-; CHECK-NEXT:    strb w10, [sp, #16]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strb w8, [sp, #31]
-; CHECK-NEXT:    fmov w8, s6
-; CHECK-NEXT:    strb w9, [sp, #30]
-; CHECK-NEXT:    fmov w9, s16
-; CHECK-NEXT:    strb w10, [sp, #29]
-; CHECK-NEXT:    fmov w10, s17
-; CHECK-NEXT:    strb w8, [sp, #27]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strb w9, [sp, #26]
-; CHECK-NEXT:    fmov w9, s5
-; CHECK-NEXT:    strb w10, [sp, #25]
-; CHECK-NEXT:    fmov w10, s18
-; CHECK-NEXT:    mov z21.s, z1.s[1]
-; CHECK-NEXT:    strb w8, [sp, #23]
-; CHECK-NEXT:    fmov w8, s19
-; CHECK-NEXT:    strb w9, [sp, #22]
-; CHECK-NEXT:    fmov w9, s20
-; CHECK-NEXT:    strb w10, [sp, #21]
-; CHECK-NEXT:    fmov w10, s21
+; CHECK-NEXT:    ldp q0, q2, [x0, #32]
+; CHECK-NEXT:    ptrue p1.h, vl4
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI2_0]
+; CHECK-NEXT:    ldp q3, q4, [x0]
+; CHECK-NEXT:    fcmne p2.s, p0/z, z2.s, z1.s
+; CHECK-NEXT:    fcmne p3.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z2.s, p3/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    splice z2.h, p1, z2.h, z0.h
+; CHECK-NEXT:    ldp q0, q5, [x1, #32]
+; CHECK-NEXT:    fcmne p2.s, p0/z, z4.s, z1.s
+; CHECK-NEXT:    uzp1 z2.b, z2.b, z2.b
+; CHECK-NEXT:    mov z4.s, p2/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    fcmne p2.s, p0/z, z3.s, z1.s
+; CHECK-NEXT:    mov z3.s, p2/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    uzp1 z4.h, z4.h, z4.h
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    fcmne p3.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    splice z3.h, p1, z3.h, z4.h
+; CHECK-NEXT:    fcmne p2.s, p0/z, z5.s, z1.s
+; CHECK-NEXT:    uzp1 z3.b, z3.b, z3.b
+; CHECK-NEXT:    ldp q4, q5, [x1]
+; CHECK-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    fcmne p2.s, p0/z, z5.s, z1.s
+; CHECK-NEXT:    fcmne p0.s, p0/z, z4.s, z1.s
+; CHECK-NEXT:    mov z5.s, p3/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z1.s, p2/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z4.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    uzp1 z5.h, z5.h, z5.h
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    uzp1 z4.h, z4.h, z4.h
+; CHECK-NEXT:    splice z5.h, p1, z5.h, z0.h
+; CHECK-NEXT:    splice z4.h, p1, z4.h, z1.h
+; CHECK-NEXT:    ptrue p3.b, vl8
+; CHECK-NEXT:    uzp1 z0.b, z5.b, z5.b
+; CHECK-NEXT:    uzp1 z1.b, z4.b, z4.b
+; CHECK-NEXT:    splice z3.b, p3, z3.b, z2.b
+; CHECK-NEXT:    splice z1.b, p3, z1.b, z0.b
 ; CHECK-NEXT:    ptrue p0.b, vl16
-; CHECK-NEXT:    strb w8, [sp, #19]
-; CHECK-NEXT:    strb w9, [sp, #18]
-; CHECK-NEXT:    strb w10, [sp, #17]
-; CHECK-NEXT:    ldr q0, [sp, #16]
-; CHECK-NEXT:    and z0.d, z7.d, z0.d
+; CHECK-NEXT:    and z0.d, z3.d, z1.d
 ; CHECK-NEXT:    andv b0, p0, z0.b
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    and w0, w8, #0x1
-; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %v0 = bitcast ptr %a to <16 x float>*
   %v1 = load <16 x float>, <16 x float>* %v0, align 4

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll
index 001eec17f794..73c8e2aee5a0 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll
@@ -6,35 +6,9 @@ target triple = "aarch64-unknown-linux-gnu"
 define void @store_trunc_v8i16i8(<8 x i16>* %ap, <8 x i8>* %dest) #0 {
 ; CHECK-LABEL: store_trunc_v8i16i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    mov z1.h, z0.h[7]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z2.h, z0.h[6]
-; CHECK-NEXT:    mov z3.h, z0.h[5]
-; CHECK-NEXT:    mov z4.h, z0.h[4]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strb w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    strb w9, [sp, #15]
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    mov z5.h, z0.h[3]
-; CHECK-NEXT:    mov z6.h, z0.h[2]
-; CHECK-NEXT:    mov z0.h, z0.h[1]
-; CHECK-NEXT:    strb w10, [sp, #14]
-; CHECK-NEXT:    fmov w10, s5
-; CHECK-NEXT:    strb w8, [sp, #13]
-; CHECK-NEXT:    fmov w8, s6
-; CHECK-NEXT:    strb w9, [sp, #12]
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    strb w10, [sp, #11]
-; CHECK-NEXT:    strb w8, [sp, #10]
-; CHECK-NEXT:    strb w9, [sp, #9]
-; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
 ; CHECK-NEXT:    str d0, [x1]
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, <8 x i16>* %ap
   %val = trunc <8 x i16> %a to <8 x i8>
@@ -45,24 +19,10 @@ define void @store_trunc_v8i16i8(<8 x i16>* %ap, <8 x i8>* %dest) #0 {
 define void @store_trunc_v4i32i8(<4 x i32>* %ap, <4 x i8>* %dest) #0 {
 ; CHECK-LABEL: store_trunc_v4i32i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ptrue p0.h, vl4
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x1]
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %ap
   %val = trunc <4 x i32> %a to <4 x i8>
@@ -73,23 +33,9 @@ define void @store_trunc_v4i32i8(<4 x i32>* %ap, <4 x i8>* %dest) #0 {
 define void @store_trunc_v4i32i16(<4 x i32>* %ap, <4 x i16>* %dest) #0 {
 ; CHECK-LABEL: store_trunc_v4i32i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z1.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z0.s[2]
-; CHECK-NEXT:    mov z0.s, z0.s[1]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strh w8, [sp, #8]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    strh w10, [sp, #12]
-; CHECK-NEXT:    strh w8, [sp, #10]
-; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; CHECK-NEXT:    str d0, [x1]
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, <4 x i32>* %ap
   %val = trunc <4 x i32> %a to <4 x i16>
@@ -97,7 +43,7 @@ define void @store_trunc_v4i32i16(<4 x i32>* %ap, <4 x i16>* %dest) #0 {
   ret void
 }
 
-define void @store_trunc_v2i64i8(<2 x i64>* %ap, <2 x i32>* %dest) vscale_range(2,0) #0 {
+define void @store_trunc_v2i64i8(<2 x i64>* %ap, <2 x i32>* %dest) #0 {
 ; CHECK-LABEL: store_trunc_v2i64i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -110,7 +56,7 @@ define void @store_trunc_v2i64i8(<2 x i64>* %ap, <2 x i32>* %dest) vscale_range(
   ret void
 }
 
-define void @store_trunc_v2i256i64(<2 x i256>* %ap, <2 x i64>* %dest) vscale_range(2,0) #0 {
+define void @store_trunc_v2i256i64(<2 x i256>* %ap, <2 x i64>* %dest) #0 {
 ; CHECK-LABEL: store_trunc_v2i256i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #32]

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll
index e2a69397d4e3..503f800a88db 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll
@@ -7,12 +7,14 @@ target triple = "aarch64-unknown-linux-gnu"
 ; truncate i16 -> i8
 ;
 
-define <16 x i8> @trunc_v16i16_v16i8(<16 x i16>* %in) vscale_range(2,0) #0 {
+define <16 x i8> @trunc_v16i16_v16i8(<16 x i16>* %in) #0 {
 ; CHECK-LABEL: trunc_v16i16_v16i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl16
-; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl8
 ; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    uzp1 z1.b, z1.b, z1.b
+; CHECK-NEXT:    splice z0.b, p0, z0.b, z1.b
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %a = load <16 x i16>, <16 x i16>* %in
@@ -24,106 +26,18 @@ define <16 x i8> @trunc_v16i16_v16i8(<16 x i16>* %in) vscale_range(2,0) #0 {
 define void @trunc_v32i16_v32i8(<32 x i16>* %in, <32 x i8>* %out) #0 {
 ; CHECK-LABEL: trunc_v32i16_v32i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    ldp q1, q0, [x0, #32]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z17.h, z1.h[6]
-; CHECK-NEXT:    mov z18.h, z1.h[5]
-; CHECK-NEXT:    mov z19.h, z1.h[4]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z2.h, z0.h[7]
-; CHECK-NEXT:    mov z3.h, z0.h[6]
-; CHECK-NEXT:    mov z4.h, z0.h[5]
-; CHECK-NEXT:    ldp q22, q23, [x0]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strb w8, [sp, #24]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    strb w9, [sp, #16]
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    mov z5.h, z0.h[4]
-; CHECK-NEXT:    mov z6.h, z0.h[3]
-; CHECK-NEXT:    mov z7.h, z0.h[2]
-; CHECK-NEXT:    strb w10, [sp, #31]
-; CHECK-NEXT:    fmov w10, s5
-; CHECK-NEXT:    strb w8, [sp, #30]
-; CHECK-NEXT:    fmov w8, s6
-; CHECK-NEXT:    strb w9, [sp, #29]
-; CHECK-NEXT:    fmov w9, s7
-; CHECK-NEXT:    mov z16.h, z0.h[1]
-; CHECK-NEXT:    mov z0.h, z1.h[7]
-; CHECK-NEXT:    strb w10, [sp, #28]
-; CHECK-NEXT:    fmov w10, s16
-; CHECK-NEXT:    strb w8, [sp, #27]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strb w9, [sp, #26]
-; CHECK-NEXT:    fmov w9, s17
-; CHECK-NEXT:    mov z20.h, z1.h[3]
-; CHECK-NEXT:    strb w10, [sp, #25]
-; CHECK-NEXT:    fmov w10, s18
-; CHECK-NEXT:    strb w8, [sp, #23]
-; CHECK-NEXT:    fmov w8, s19
-; CHECK-NEXT:    strb w9, [sp, #22]
-; CHECK-NEXT:    fmov w9, s20
-; CHECK-NEXT:    mov z21.h, z1.h[2]
-; CHECK-NEXT:    mov z0.h, z1.h[1]
-; CHECK-NEXT:    strb w10, [sp, #21]
-; CHECK-NEXT:    fmov w10, s21
-; CHECK-NEXT:    strb w8, [sp, #20]
-; CHECK-NEXT:    strb w9, [sp, #19]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    fmov w9, s23
-; CHECK-NEXT:    mov z0.h, z23.h[7]
-; CHECK-NEXT:    mov z1.h, z23.h[6]
-; CHECK-NEXT:    strb w10, [sp, #18]
-; CHECK-NEXT:    fmov w10, s22
-; CHECK-NEXT:    strb w8, [sp, #17]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strb w9, [sp, #8]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z2.h, z23.h[5]
-; CHECK-NEXT:    mov z3.h, z23.h[4]
-; CHECK-NEXT:    mov z4.h, z23.h[3]
-; CHECK-NEXT:    strb w10, [sp]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    strb w8, [sp, #15]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    strb w9, [sp, #14]
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    mov z5.h, z23.h[2]
-; CHECK-NEXT:    mov z6.h, z23.h[1]
-; CHECK-NEXT:    mov z7.h, z22.h[7]
-; CHECK-NEXT:    strb w10, [sp, #13]
-; CHECK-NEXT:    fmov w10, s5
-; CHECK-NEXT:    strb w8, [sp, #12]
-; CHECK-NEXT:    fmov w8, s6
-; CHECK-NEXT:    strb w9, [sp, #11]
-; CHECK-NEXT:    fmov w9, s7
-; CHECK-NEXT:    mov z16.h, z22.h[6]
-; CHECK-NEXT:    mov z17.h, z22.h[5]
-; CHECK-NEXT:    mov z18.h, z22.h[4]
-; CHECK-NEXT:    strb w10, [sp, #10]
-; CHECK-NEXT:    fmov w10, s16
-; CHECK-NEXT:    strb w8, [sp, #9]
-; CHECK-NEXT:    fmov w8, s17
-; CHECK-NEXT:    strb w9, [sp, #7]
-; CHECK-NEXT:    fmov w9, s18
-; CHECK-NEXT:    mov z19.h, z22.h[3]
-; CHECK-NEXT:    mov z20.h, z22.h[2]
-; CHECK-NEXT:    mov z21.h, z22.h[1]
-; CHECK-NEXT:    strb w10, [sp, #6]
-; CHECK-NEXT:    fmov w10, s19
-; CHECK-NEXT:    strb w8, [sp, #5]
-; CHECK-NEXT:    fmov w8, s20
-; CHECK-NEXT:    strb w9, [sp, #4]
-; CHECK-NEXT:    fmov w9, s21
-; CHECK-NEXT:    strb w10, [sp, #3]
-; CHECK-NEXT:    strb w8, [sp, #2]
-; CHECK-NEXT:    strb w9, [sp, #1]
-; CHECK-NEXT:    ldp q1, q0, [sp]
-; CHECK-NEXT:    add z1.b, z1.b, z1.b
+; CHECK-NEXT:    ldp q0, q1, [x0, #32]
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    ldp q3, q2, [x0]
+; CHECK-NEXT:    uzp1 z1.b, z1.b, z1.b
+; CHECK-NEXT:    splice z0.b, p0, z0.b, z1.b
 ; CHECK-NEXT:    add z0.b, z0.b, z0.b
+; CHECK-NEXT:    uzp1 z3.b, z3.b, z3.b
+; CHECK-NEXT:    uzp1 z2.b, z2.b, z2.b
+; CHECK-NEXT:    splice z3.b, p0, z3.b, z2.b
+; CHECK-NEXT:    add z1.b, z3.b, z3.b
 ; CHECK-NEXT:    stp q1, q0, [x1]
-; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %a = load <32 x i16>, <32 x i16>* %in
   %b = trunc <32 x i16> %a to <32 x i8>
@@ -133,15 +47,32 @@ define void @trunc_v32i16_v32i8(<32 x i16>* %in, <32 x i8>* %out) #0 {
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v64i16_v64i8(<64 x i16>* %in, <64 x i8>* %out) vscale_range(8,0) #0 {
+define void @trunc_v64i16_v64i8(<64 x i16>* %in, <64 x i8>* %out) #0 {
 ; CHECK-LABEL: trunc_v64i16_v64i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl64
-; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.b, vl64
+; CHECK-NEXT:    ldp q0, q1, [x0, #64]
+; CHECK-NEXT:    ptrue p0.b, vl8
 ; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    ldp q2, q3, [x0, #96]
+; CHECK-NEXT:    uzp1 z1.b, z1.b, z1.b
+; CHECK-NEXT:    splice z0.b, p0, z0.b, z1.b
 ; CHECK-NEXT:    add z0.b, z0.b, z0.b
-; CHECK-NEXT:    st1b { z0.b }, p0, [x1]
+; CHECK-NEXT:    uzp1 z2.b, z2.b, z2.b
+; CHECK-NEXT:    ldp q4, q5, [x0]
+; CHECK-NEXT:    uzp1 z3.b, z3.b, z3.b
+; CHECK-NEXT:    splice z2.b, p0, z2.b, z3.b
+; CHECK-NEXT:    uzp1 z4.b, z4.b, z4.b
+; CHECK-NEXT:    ldp q6, q7, [x0, #32]
+; CHECK-NEXT:    uzp1 z1.b, z5.b, z5.b
+; CHECK-NEXT:    splice z4.b, p0, z4.b, z1.b
+; CHECK-NEXT:    uzp1 z3.b, z6.b, z6.b
+; CHECK-NEXT:    uzp1 z1.b, z7.b, z7.b
+; CHECK-NEXT:    splice z3.b, p0, z3.b, z1.b
+; CHECK-NEXT:    add z1.b, z2.b, z2.b
+; CHECK-NEXT:    stp q0, q1, [x1, #32]
+; CHECK-NEXT:    add z0.b, z4.b, z4.b
+; CHECK-NEXT:    add z1.b, z3.b, z3.b
+; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
   %a = load <64 x i16>, <64 x i16>* %in
   %b = trunc <64 x i16> %a to <64 x i8>
@@ -151,15 +82,54 @@ define void @trunc_v64i16_v64i8(<64 x i16>* %in, <64 x i8>* %out) vscale_range(8
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v128i16_v128i8(<128 x i16>* %in, <128 x i8>* %out) vscale_range(16,0) #0 {
+define void @trunc_v128i16_v128i8(<128 x i16>* %in, <128 x i8>* %out) #0 {
 ; CHECK-LABEL: trunc_v128i16_v128i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl128
-; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.b, vl128
+; CHECK-NEXT:    ldp q0, q1, [x0, #192]
+; CHECK-NEXT:    ptrue p0.b, vl8
 ; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    ldp q2, q3, [x0, #224]
+; CHECK-NEXT:    uzp1 z1.b, z1.b, z1.b
+; CHECK-NEXT:    splice z0.b, p0, z0.b, z1.b
 ; CHECK-NEXT:    add z0.b, z0.b, z0.b
-; CHECK-NEXT:    st1b { z0.b }, p0, [x1]
+; CHECK-NEXT:    uzp1 z2.b, z2.b, z2.b
+; CHECK-NEXT:    ldp q6, q7, [x0, #128]
+; CHECK-NEXT:    uzp1 z3.b, z3.b, z3.b
+; CHECK-NEXT:    splice z2.b, p0, z2.b, z3.b
+; CHECK-NEXT:    add z2.b, z2.b, z2.b
+; CHECK-NEXT:    uzp1 z6.b, z6.b, z6.b
+; CHECK-NEXT:    ldp q1, q3, [x0, #160]
+; CHECK-NEXT:    uzp1 z7.b, z7.b, z7.b
+; CHECK-NEXT:    splice z6.b, p0, z6.b, z7.b
+; CHECK-NEXT:    uzp1 z1.b, z1.b, z1.b
+; CHECK-NEXT:    ldp q16, q17, [x0, #64]
+; CHECK-NEXT:    uzp1 z3.b, z3.b, z3.b
+; CHECK-NEXT:    splice z1.b, p0, z1.b, z3.b
+; CHECK-NEXT:    add z1.b, z1.b, z1.b
+; CHECK-NEXT:    uzp1 z16.b, z16.b, z16.b
+; CHECK-NEXT:    ldp q7, q18, [x0, #96]
+; CHECK-NEXT:    uzp1 z17.b, z17.b, z17.b
+; CHECK-NEXT:    splice z16.b, p0, z16.b, z17.b
+; CHECK-NEXT:    uzp1 z7.b, z7.b, z7.b
+; CHECK-NEXT:    ldp q4, q5, [x0, #32]
+; CHECK-NEXT:    uzp1 z3.b, z18.b, z18.b
+; CHECK-NEXT:    splice z7.b, p0, z7.b, z3.b
+; CHECK-NEXT:    uzp1 z4.b, z4.b, z4.b
+; CHECK-NEXT:    ldp q19, q20, [x0]
+; CHECK-NEXT:    uzp1 z3.b, z5.b, z5.b
+; CHECK-NEXT:    stp q0, q2, [x1, #96]
+; CHECK-NEXT:    add z0.b, z6.b, z6.b
+; CHECK-NEXT:    splice z4.b, p0, z4.b, z3.b
+; CHECK-NEXT:    stp q0, q1, [x1, #64]
+; CHECK-NEXT:    add z0.b, z16.b, z16.b
+; CHECK-NEXT:    uzp1 z18.b, z19.b, z19.b
+; CHECK-NEXT:    add z1.b, z7.b, z7.b
+; CHECK-NEXT:    stp q0, q1, [x1, #32]
+; CHECK-NEXT:    add z1.b, z4.b, z4.b
+; CHECK-NEXT:    uzp1 z17.b, z20.b, z20.b
+; CHECK-NEXT:    splice z18.b, p0, z18.b, z17.b
+; CHECK-NEXT:    add z0.b, z18.b, z18.b
+; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
   %a = load <128 x i16>, <128 x i16>* %in
   %b = trunc <128 x i16> %a to <128 x i8>
@@ -172,12 +142,14 @@ define void @trunc_v128i16_v128i8(<128 x i16>* %in, <128 x i8>* %out) vscale_ran
 ; truncate i32 -> i8
 ;
 
-define <8 x i8> @trunc_v8i32_v8i8(<8 x i32>* %in) vscale_range(2,0) #0 {
+define <8 x i8> @trunc_v8i32_v8i8(<8 x i32>* %in) #0 {
 ; CHECK-LABEL: trunc_v8i32_v8i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl8
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
@@ -189,54 +161,20 @@ define <8 x i8> @trunc_v8i32_v8i8(<8 x i32>* %in) vscale_range(2,0) #0 {
 define <16 x i8> @trunc_v16i32_v16i8(<16 x i32>* %in) #0 {
 ; CHECK-LABEL: trunc_v16i32_v16i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    ldp q1, q0, [x0, #32]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z7.s, z1.s[2]
-; CHECK-NEXT:    mov z16.s, z1.s[1]
-; CHECK-NEXT:    ldp q2, q3, [x0]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z4.s, z0.s[3]
-; CHECK-NEXT:    mov z5.s, z0.s[2]
-; CHECK-NEXT:    mov z6.s, z0.s[1]
-; CHECK-NEXT:    strb w9, [sp, #8]
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    strb w8, [sp, #12]
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov z0.s, z1.s[3]
-; CHECK-NEXT:    mov z19.s, z2.s[2]
-; CHECK-NEXT:    fmov w10, s3
-; CHECK-NEXT:    strb w9, [sp, #15]
-; CHECK-NEXT:    strb w8, [sp]
-; CHECK-NEXT:    fmov w8, s6
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    mov z1.s, z3.s[3]
-; CHECK-NEXT:    strb w10, [sp, #4]
-; CHECK-NEXT:    fmov w10, s5
-; CHECK-NEXT:    strb w8, [sp, #13]
-; CHECK-NEXT:    fmov w8, s16
-; CHECK-NEXT:    mov z17.s, z3.s[2]
-; CHECK-NEXT:    mov z18.s, z3.s[1]
-; CHECK-NEXT:    strb w10, [sp, #14]
-; CHECK-NEXT:    fmov w10, s7
-; CHECK-NEXT:    strb w9, [sp, #11]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    strb w8, [sp, #9]
-; CHECK-NEXT:    fmov w8, s18
-; CHECK-NEXT:    strb w10, [sp, #10]
-; CHECK-NEXT:    fmov w10, s17
-; CHECK-NEXT:    mov z3.s, z2.s[3]
-; CHECK-NEXT:    mov z20.s, z2.s[1]
-; CHECK-NEXT:    strb w9, [sp, #7]
-; CHECK-NEXT:    fmov w9, s3
-; CHECK-NEXT:    strb w10, [sp, #6]
-; CHECK-NEXT:    fmov w10, s19
-; CHECK-NEXT:    strb w8, [sp, #5]
-; CHECK-NEXT:    fmov w8, s20
-; CHECK-NEXT:    strb w9, [sp, #3]
-; CHECK-NEXT:    strb w10, [sp, #2]
-; CHECK-NEXT:    strb w8, [sp, #1]
-; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ldp q0, q1, [x0, #32]
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    ldp q3, q2, [x0]
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z1.h
+; CHECK-NEXT:    uzp1 z1.b, z0.b, z0.b
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    splice z3.h, p0, z3.h, z2.h
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    uzp1 z0.b, z3.b, z3.b
+; CHECK-NEXT:    splice z0.b, p0, z0.b, z1.b
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %a = load <16 x i32>, <16 x i32>* %in
   %b = trunc <16 x i32> %a to <16 x i8>
@@ -244,16 +182,36 @@ define <16 x i8> @trunc_v16i32_v16i8(<16 x i32>* %in) #0 {
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v32i32_v32i8(<32 x i32>* %in, <32 x i8>* %out) vscale_range(8,0) #0 {
+define void @trunc_v32i32_v32i8(<32 x i32>* %in, <32 x i8>* %out) #0 {
 ; CHECK-LABEL: trunc_v32i32_v32i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl32
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.b, vl32
+; CHECK-NEXT:    ldp q0, q1, [x0, #96]
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    ptrue p1.b, vl8
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    ldp q2, q3, [x0, #64]
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
-; CHECK-NEXT:    add z0.b, z0.b, z0.b
-; CHECK-NEXT:    st1b { z0.b }, p0, [x1]
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    ldp q4, q5, [x0]
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    splice z2.h, p0, z2.h, z3.h
+; CHECK-NEXT:    uzp1 z1.b, z2.b, z2.b
+; CHECK-NEXT:    splice z1.b, p1, z1.b, z0.b
+; CHECK-NEXT:    uzp1 z4.h, z4.h, z4.h
+; CHECK-NEXT:    ldp q6, q7, [x0, #32]
+; CHECK-NEXT:    uzp1 z3.h, z5.h, z5.h
+; CHECK-NEXT:    splice z4.h, p0, z4.h, z3.h
+; CHECK-NEXT:    uzp1 z2.h, z6.h, z6.h
+; CHECK-NEXT:    uzp1 z0.h, z7.h, z7.h
+; CHECK-NEXT:    splice z2.h, p0, z2.h, z0.h
+; CHECK-NEXT:    uzp1 z0.b, z2.b, z2.b
+; CHECK-NEXT:    uzp1 z2.b, z4.b, z4.b
+; CHECK-NEXT:    splice z2.b, p1, z2.b, z0.b
+; CHECK-NEXT:    add z0.b, z1.b, z1.b
+; CHECK-NEXT:    add z1.b, z2.b, z2.b
+; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
   %a = load <32 x i32>, <32 x i32>* %in
   %b = trunc <32 x i32> %a to <32 x i8>
@@ -263,16 +221,61 @@ define void @trunc_v32i32_v32i8(<32 x i32>* %in, <32 x i8>* %out) vscale_range(8
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v64i32_v64i8(<64 x i32>* %in, <64 x i8>* %out) vscale_range(16,0) #0 {
+define void @trunc_v64i32_v64i8(<64 x i32>* %in, <64 x i8>* %out) #0 {
 ; CHECK-LABEL: trunc_v64i32_v64i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl64
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.b, vl64
+; CHECK-NEXT:    ldp q0, q1, [x0, #128]
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    ptrue p1.b, vl8
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    ldp q2, q3, [x0, #160]
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    splice z2.h, p0, z2.h, z3.h
+; CHECK-NEXT:    uzp1 z2.b, z2.b, z2.b
+; CHECK-NEXT:    ldp q1, q17, [x0, #224]
+; CHECK-NEXT:    splice z0.b, p1, z0.b, z2.b
 ; CHECK-NEXT:    add z0.b, z0.b, z0.b
-; CHECK-NEXT:    st1b { z0.b }, p0, [x1]
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    ldp q18, q2, [x0, #192]
+; CHECK-NEXT:    uzp1 z17.h, z17.h, z17.h
+; CHECK-NEXT:    splice z1.h, p0, z1.h, z17.h
+; CHECK-NEXT:    uzp1 z1.b, z1.b, z1.b
+; CHECK-NEXT:    uzp1 z18.h, z18.h, z18.h
+; CHECK-NEXT:    ldp q4, q5, [x0, #64]
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    splice z18.h, p0, z18.h, z2.h
+; CHECK-NEXT:    uzp1 z2.b, z18.b, z18.b
+; CHECK-NEXT:    splice z2.b, p1, z2.b, z1.b
+; CHECK-NEXT:    uzp1 z4.h, z4.h, z4.h
+; CHECK-NEXT:    ldp q6, q7, [x0, #96]
+; CHECK-NEXT:    uzp1 z5.h, z5.h, z5.h
+; CHECK-NEXT:    splice z4.h, p0, z4.h, z5.h
+; CHECK-NEXT:    uzp1 z4.b, z4.b, z4.b
+; CHECK-NEXT:    uzp1 z6.h, z6.h, z6.h
+; CHECK-NEXT:    ldp q3, q16, [x0]
+; CHECK-NEXT:    uzp1 z1.h, z7.h, z7.h
+; CHECK-NEXT:    splice z6.h, p0, z6.h, z1.h
+; CHECK-NEXT:    uzp1 z1.b, z6.b, z6.b
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    splice z4.b, p1, z4.b, z1.b
+; CHECK-NEXT:    add z1.b, z2.b, z2.b
+; CHECK-NEXT:    ldp q19, q20, [x0, #32]
+; CHECK-NEXT:    uzp1 z16.h, z16.h, z16.h
+; CHECK-NEXT:    stp q0, q1, [x1, #32]
+; CHECK-NEXT:    splice z3.h, p0, z3.h, z16.h
+; CHECK-NEXT:    add z1.b, z4.b, z4.b
+; CHECK-NEXT:    uzp1 z3.b, z3.b, z3.b
+; CHECK-NEXT:    uzp1 z18.h, z19.h, z19.h
+; CHECK-NEXT:    uzp1 z17.h, z20.h, z20.h
+; CHECK-NEXT:    splice z18.h, p0, z18.h, z17.h
+; CHECK-NEXT:    uzp1 z16.b, z18.b, z18.b
+; CHECK-NEXT:    splice z3.b, p1, z3.b, z16.b
+; CHECK-NEXT:    add z0.b, z3.b, z3.b
+; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
   %a = load <64 x i32>, <64 x i32>* %in
   %b = trunc <64 x i32> %a to <64 x i8>
@@ -285,12 +288,14 @@ define void @trunc_v64i32_v64i8(<64 x i32>* %in, <64 x i8>* %out) vscale_range(1
 ; truncate i32 -> i16
 ;
 
-define <8 x i16> @trunc_v8i32_v8i16(<8 x i32>* %in) vscale_range(2,0) #0 {
+define <8 x i16> @trunc_v8i32_v8i16(<8 x i32>* %in) #0 {
 ; CHECK-LABEL: trunc_v8i32_v8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl8
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %a = load <8 x i32>, <8 x i32>* %in
@@ -302,58 +307,18 @@ define <8 x i16> @trunc_v8i32_v8i16(<8 x i32>* %in) vscale_range(2,0) #0 {
 define void @trunc_v16i32_v16i16(<16 x i32>* %in, <16 x i16>* %out) #0 {
 ; CHECK-LABEL: trunc_v16i32_v16i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    ldp q1, q0, [x0, #32]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    mov z5.s, z1.s[2]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z2.s, z0.s[3]
-; CHECK-NEXT:    mov z3.s, z0.s[2]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    ldp q6, q7, [x0]
-; CHECK-NEXT:    strh w8, [sp, #24]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    mov z4.s, z0.s[1]
-; CHECK-NEXT:    mov z0.s, z1.s[3]
-; CHECK-NEXT:    strh w9, [sp, #16]
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    strh w10, [sp, #30]
-; CHECK-NEXT:    fmov w10, s0
-; CHECK-NEXT:    strh w8, [sp, #28]
-; CHECK-NEXT:    fmov w8, s5
-; CHECK-NEXT:    mov z0.s, z1.s[1]
-; CHECK-NEXT:    strh w9, [sp, #26]
-; CHECK-NEXT:    strh w10, [sp, #22]
-; CHECK-NEXT:    fmov w9, s7
-; CHECK-NEXT:    strh w8, [sp, #20]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    fmov w10, s6
-; CHECK-NEXT:    mov z0.s, z7.s[3]
-; CHECK-NEXT:    mov z1.s, z7.s[2]
-; CHECK-NEXT:    mov z2.s, z7.s[1]
-; CHECK-NEXT:    strh w8, [sp, #18]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    strh w9, [sp, #8]
-; CHECK-NEXT:    fmov w9, s1
-; CHECK-NEXT:    strh w10, [sp]
-; CHECK-NEXT:    fmov w10, s2
-; CHECK-NEXT:    mov z3.s, z6.s[3]
-; CHECK-NEXT:    mov z4.s, z6.s[2]
-; CHECK-NEXT:    mov z5.s, z6.s[1]
-; CHECK-NEXT:    strh w8, [sp, #14]
-; CHECK-NEXT:    fmov w8, s3
-; CHECK-NEXT:    strh w9, [sp, #12]
-; CHECK-NEXT:    fmov w9, s4
-; CHECK-NEXT:    strh w10, [sp, #10]
-; CHECK-NEXT:    fmov w10, s5
-; CHECK-NEXT:    strh w8, [sp, #6]
-; CHECK-NEXT:    strh w9, [sp, #4]
-; CHECK-NEXT:    strh w10, [sp, #2]
-; CHECK-NEXT:    ldp q1, q0, [sp]
-; CHECK-NEXT:    add z1.h, z1.h, z1.h
+; CHECK-NEXT:    ldp q0, q1, [x0, #32]
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    ldp q3, q2, [x0]
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    add z0.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    splice z3.h, p0, z3.h, z2.h
+; CHECK-NEXT:    add z1.h, z3.h, z3.h
 ; CHECK-NEXT:    stp q1, q0, [x1]
-; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %a = load <16 x i32>, <16 x i32>* %in
   %b = trunc <16 x i32> %a to <16 x i16>
@@ -363,15 +328,32 @@ define void @trunc_v16i32_v16i16(<16 x i32>* %in, <16 x i16>* %out) #0 {
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v32i32_v32i16(<32 x i32>* %in, <32 x i16>* %out) vscale_range(8,0) #0 {
+define void @trunc_v32i32_v32i16(<32 x i32>* %in, <32 x i16>* %out) #0 {
 ; CHECK-LABEL: trunc_v32i32_v32i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl32
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.h, vl32
+; CHECK-NEXT:    ldp q0, q1, [x0, #64]
+; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    ldp q2, q3, [x0, #96]
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    add z0.h, z0.h, z0.h
-; CHECK-NEXT:    st1h { z0.h }, p0, [x1]
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    ldp q4, q5, [x0]
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    splice z2.h, p0, z2.h, z3.h
+; CHECK-NEXT:    uzp1 z4.h, z4.h, z4.h
+; CHECK-NEXT:    ldp q6, q7, [x0, #32]
+; CHECK-NEXT:    uzp1 z1.h, z5.h, z5.h
+; CHECK-NEXT:    splice z4.h, p0, z4.h, z1.h
+; CHECK-NEXT:    uzp1 z3.h, z6.h, z6.h
+; CHECK-NEXT:    uzp1 z1.h, z7.h, z7.h
+; CHECK-NEXT:    splice z3.h, p0, z3.h, z1.h
+; CHECK-NEXT:    add z1.h, z2.h, z2.h
+; CHECK-NEXT:    stp q0, q1, [x1, #32]
+; CHECK-NEXT:    add z0.h, z4.h, z4.h
+; CHECK-NEXT:    add z1.h, z3.h, z3.h
+; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
   %a = load <32 x i32>, <32 x i32>* %in
   %b = trunc <32 x i32> %a to <32 x i16>
@@ -381,15 +363,54 @@ define void @trunc_v32i32_v32i16(<32 x i32>* %in, <32 x i16>* %out) vscale_range
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v64i32_v64i16(<64 x i32>* %in, <64 x i16>* %out) vscale_range(16,0) #0 {
+define void @trunc_v64i32_v64i16(<64 x i32>* %in, <64 x i16>* %out) #0 {
 ; CHECK-LABEL: trunc_v64i32_v64i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl64
-; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.h, vl64
+; CHECK-NEXT:    ldp q0, q1, [x0, #192]
+; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    ldp q2, q3, [x0, #224]
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    add z0.h, z0.h, z0.h
-; CHECK-NEXT:    st1h { z0.h }, p0, [x1]
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    ldp q6, q7, [x0, #128]
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    splice z2.h, p0, z2.h, z3.h
+; CHECK-NEXT:    add z2.h, z2.h, z2.h
+; CHECK-NEXT:    uzp1 z6.h, z6.h, z6.h
+; CHECK-NEXT:    ldp q1, q3, [x0, #160]
+; CHECK-NEXT:    uzp1 z7.h, z7.h, z7.h
+; CHECK-NEXT:    splice z6.h, p0, z6.h, z7.h
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    ldp q16, q17, [x0, #64]
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    splice z1.h, p0, z1.h, z3.h
+; CHECK-NEXT:    add z1.h, z1.h, z1.h
+; CHECK-NEXT:    uzp1 z16.h, z16.h, z16.h
+; CHECK-NEXT:    ldp q7, q18, [x0, #96]
+; CHECK-NEXT:    uzp1 z17.h, z17.h, z17.h
+; CHECK-NEXT:    splice z16.h, p0, z16.h, z17.h
+; CHECK-NEXT:    uzp1 z7.h, z7.h, z7.h
+; CHECK-NEXT:    ldp q4, q5, [x0, #32]
+; CHECK-NEXT:    uzp1 z3.h, z18.h, z18.h
+; CHECK-NEXT:    splice z7.h, p0, z7.h, z3.h
+; CHECK-NEXT:    uzp1 z4.h, z4.h, z4.h
+; CHECK-NEXT:    ldp q19, q20, [x0]
+; CHECK-NEXT:    uzp1 z3.h, z5.h, z5.h
+; CHECK-NEXT:    stp q0, q2, [x1, #96]
+; CHECK-NEXT:    add z0.h, z6.h, z6.h
+; CHECK-NEXT:    splice z4.h, p0, z4.h, z3.h
+; CHECK-NEXT:    stp q0, q1, [x1, #64]
+; CHECK-NEXT:    add z0.h, z16.h, z16.h
+; CHECK-NEXT:    uzp1 z18.h, z19.h, z19.h
+; CHECK-NEXT:    add z1.h, z7.h, z7.h
+; CHECK-NEXT:    stp q0, q1, [x1, #32]
+; CHECK-NEXT:    add z1.h, z4.h, z4.h
+; CHECK-NEXT:    uzp1 z17.h, z20.h, z20.h
+; CHECK-NEXT:    splice z18.h, p0, z18.h, z17.h
+; CHECK-NEXT:    add z0.h, z18.h, z18.h
+; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
   %a = load <64 x i32>, <64 x i32>* %in
   %b = trunc <64 x i32> %a to <64 x i16>
@@ -403,12 +424,14 @@ define void @trunc_v64i32_v64i16(<64 x i32>* %in, <64 x i16>* %out) vscale_range
 ;
 
 ; NOTE: v4i8 is not legal so result i8 elements are held within i16 containers.
-define <4 x i8> @trunc_v4i64_v4i8(<4 x i64>* %in) vscale_range(2,0) #0 {
+define <4 x i8> @trunc_v4i64_v4i8(<4 x i64>* %in) #0 {
 ; CHECK-LABEL: trunc_v4i64_v4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl4
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl2
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
@@ -420,45 +443,58 @@ define <4 x i8> @trunc_v4i64_v4i8(<4 x i64>* %in) vscale_range(2,0) #0 {
 define <8 x i8> @trunc_v8i64_v8i8(<8 x i64>* %in) #0 {
 ; CHECK-LABEL: trunc_v8i64_v8i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    ldp q1, q0, [x0, #32]
-; CHECK-NEXT:    fmov x9, d1
+; CHECK-NEXT:    ldp q0, q1, [x0, #32]
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; CHECK-NEXT:    ldp q3, q2, [x0]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    mov z4.d, z0.d[1]
-; CHECK-NEXT:    strb w9, [sp, #12]
-; CHECK-NEXT:    fmov x9, d4
-; CHECK-NEXT:    mov z0.d, z1.d[1]
-; CHECK-NEXT:    strb w8, [sp, #14]
-; CHECK-NEXT:    fmov x8, d3
-; CHECK-NEXT:    strb w9, [sp, #15]
-; CHECK-NEXT:    fmov x10, d2
-; CHECK-NEXT:    mov z1.d, z2.d[1]
-; CHECK-NEXT:    mov z2.d, z3.d[1]
-; CHECK-NEXT:    strb w8, [sp, #8]
-; CHECK-NEXT:    fmov x8, d1
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    strb w10, [sp, #10]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    strb w8, [sp, #11]
-; CHECK-NEXT:    strb w10, [sp, #13]
-; CHECK-NEXT:    strb w9, [sp, #9]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
+; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    splice z3.s, p0, z3.s, z2.s
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    uzp1 z1.h, z3.h, z3.h
+; CHECK-NEXT:    splice z1.h, p0, z1.h, z0.h
+; CHECK-NEXT:    uzp1 z0.b, z1.b, z1.b
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %in
   %b = trunc <8 x i64> %a to <8 x i8>
   ret <8 x i8> %b
 }
 
-define <16 x i8> @trunc_v16i64_v16i8(<16 x i64>* %in) vscale_range(8,0) #0 {
+define <16 x i8> @trunc_v16i64_v16i8(<16 x i64>* %in) #0 {
 ; CHECK-LABEL: trunc_v16i64_v16i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl16
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ldp q0, q1, [x0, #96]
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    ptrue p1.h, vl4
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    ldp q2, q3, [x0, #64]
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
-; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    ldp q4, q5, [x0]
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    splice z2.s, p0, z2.s, z3.s
+; CHECK-NEXT:    uzp1 z1.h, z2.h, z2.h
+; CHECK-NEXT:    splice z1.h, p1, z1.h, z0.h
+; CHECK-NEXT:    uzp1 z4.s, z4.s, z4.s
+; CHECK-NEXT:    uzp1 z1.b, z1.b, z1.b
+; CHECK-NEXT:    ldp q6, q7, [x0, #32]
+; CHECK-NEXT:    uzp1 z3.s, z5.s, z5.s
+; CHECK-NEXT:    splice z4.s, p0, z4.s, z3.s
+; CHECK-NEXT:    uzp1 z2.s, z6.s, z6.s
+; CHECK-NEXT:    uzp1 z0.s, z7.s, z7.s
+; CHECK-NEXT:    splice z2.s, p0, z2.s, z0.s
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    uzp1 z0.h, z2.h, z2.h
+; CHECK-NEXT:    uzp1 z2.h, z4.h, z4.h
+; CHECK-NEXT:    splice z2.h, p1, z2.h, z0.h
+; CHECK-NEXT:    uzp1 z0.b, z2.b, z2.b
+; CHECK-NEXT:    splice z0.b, p0, z0.b, z1.b
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %a = load <16 x i64>, <16 x i64>* %in
@@ -467,17 +503,65 @@ define <16 x i8> @trunc_v16i64_v16i8(<16 x i64>* %in) vscale_range(8,0) #0 {
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v32i64_v32i8(<32 x i64>* %in, <32 x i8>* %out) vscale_range(16,0) #0 {
+define void @trunc_v32i64_v32i8(<32 x i64>* %in, <32 x i8>* %out) #0 {
 ; CHECK-LABEL: trunc_v32i64_v32i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl32
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.b, vl32
+; CHECK-NEXT:    ldp q0, q1, [x0, #224]
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    ptrue p1.h, vl4
+; CHECK-NEXT:    ptrue p2.b, vl8
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    ldp q2, q3, [x0, #192]
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
-; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
-; CHECK-NEXT:    add z0.b, z0.b, z0.b
-; CHECK-NEXT:    st1b { z0.b }, p0, [x1]
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    ldp q1, q16, [x0, #160]
+; CHECK-NEXT:    splice z2.s, p0, z2.s, z3.s
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    splice z2.h, p1, z2.h, z0.h
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    uzp1 z0.b, z2.b, z2.b
+; CHECK-NEXT:    ldp q3, q17, [x0, #128]
+; CHECK-NEXT:    uzp1 z16.s, z16.s, z16.s
+; CHECK-NEXT:    splice z1.s, p0, z1.s, z16.s
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    uzp1 z17.s, z17.s, z17.s
+; CHECK-NEXT:    splice z3.s, p0, z3.s, z17.s
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    splice z3.h, p1, z3.h, z1.h
+; CHECK-NEXT:    ldp q4, q5, [x0]
+; CHECK-NEXT:    uzp1 z1.b, z3.b, z3.b
+; CHECK-NEXT:    splice z1.b, p2, z1.b, z0.b
+; CHECK-NEXT:    uzp1 z4.s, z4.s, z4.s
+; CHECK-NEXT:    ldp q6, q7, [x0, #64]
+; CHECK-NEXT:    uzp1 z5.s, z5.s, z5.s
+; CHECK-NEXT:    splice z4.s, p0, z4.s, z5.s
+; CHECK-NEXT:    uzp1 z6.s, z6.s, z6.s
+; CHECK-NEXT:    ldp q18, q19, [x0, #96]
+; CHECK-NEXT:    uzp1 z7.s, z7.s, z7.s
+; CHECK-NEXT:    splice z6.s, p0, z6.s, z7.s
+; CHECK-NEXT:    uzp1 z6.h, z6.h, z6.h
+; CHECK-NEXT:    uzp1 z16.s, z18.s, z18.s
+; CHECK-NEXT:    ldp q2, q3, [x0, #32]
+; CHECK-NEXT:    uzp1 z0.s, z19.s, z19.s
+; CHECK-NEXT:    splice z16.s, p0, z16.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z16.h, z16.h
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    splice z6.h, p1, z6.h, z0.h
+; CHECK-NEXT:    uzp1 z0.b, z6.b, z6.b
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    splice z2.s, p0, z2.s, z3.s
+; CHECK-NEXT:    uzp1 z3.h, z4.h, z4.h
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    splice z3.h, p1, z3.h, z2.h
+; CHECK-NEXT:    uzp1 z2.b, z3.b, z3.b
+; CHECK-NEXT:    splice z2.b, p2, z2.b, z0.b
+; CHECK-NEXT:    add z0.b, z1.b, z1.b
+; CHECK-NEXT:    add z1.b, z2.b, z2.b
+; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
   %a = load <32 x i64>, <32 x i64>* %in
   %b = trunc <32 x i64> %a to <32 x i8>
@@ -490,12 +574,14 @@ define void @trunc_v32i64_v32i8(<32 x i64>* %in, <32 x i8>* %out) vscale_range(1
 ; truncate i64 -> i16
 ;
 
-define <4 x i16> @trunc_v4i64_v4i16(<4 x i64>* %in) vscale_range(2,0) #0 {
+define <4 x i16> @trunc_v4i64_v4i16(<4 x i64>* %in) #0 {
 ; CHECK-LABEL: trunc_v4i64_v4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl4
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl2
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
@@ -507,30 +593,20 @@ define <4 x i16> @trunc_v4i64_v4i16(<4 x i64>* %in) vscale_range(2,0) #0 {
 define <8 x i16> @trunc_v8i64_v8i16(<8 x i64>* %in) #0 {
 ; CHECK-LABEL: trunc_v8i64_v8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    ldp q1, q0, [x0, #32]
-; CHECK-NEXT:    fmov x9, d1
+; CHECK-NEXT:    ldp q0, q1, [x0, #32]
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; CHECK-NEXT:    ldp q3, q2, [x0]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    mov z4.d, z0.d[1]
-; CHECK-NEXT:    strh w9, [sp, #8]
-; CHECK-NEXT:    fmov x9, d4
-; CHECK-NEXT:    mov z0.d, z1.d[1]
-; CHECK-NEXT:    strh w8, [sp, #12]
-; CHECK-NEXT:    fmov x8, d3
-; CHECK-NEXT:    strh w9, [sp, #14]
-; CHECK-NEXT:    fmov x10, d2
-; CHECK-NEXT:    mov z1.d, z2.d[1]
-; CHECK-NEXT:    mov z2.d, z3.d[1]
-; CHECK-NEXT:    strh w8, [sp]
-; CHECK-NEXT:    fmov x8, d1
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    strh w10, [sp, #4]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    strh w8, [sp, #6]
-; CHECK-NEXT:    strh w10, [sp, #10]
-; CHECK-NEXT:    strh w9, [sp, #2]
-; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
+; CHECK-NEXT:    uzp1 z1.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    splice z3.s, p0, z3.s, z2.s
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    uzp1 z0.h, z3.h, z3.h
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z1.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %in
   %b = trunc <8 x i64> %a to <8 x i16>
@@ -538,16 +614,36 @@ define <8 x i16> @trunc_v8i64_v8i16(<8 x i64>* %in) #0 {
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v16i64_v16i16(<16 x i64>* %in, <16 x i16>* %out) vscale_range(8,0) #0 {
+define void @trunc_v16i64_v16i16(<16 x i64>* %in, <16 x i16>* %out) #0 {
 ; CHECK-LABEL: trunc_v16i64_v16i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl16
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ldp q0, q1, [x0, #96]
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    ptrue p1.h, vl4
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    ldp q2, q3, [x0, #64]
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
-; CHECK-NEXT:    add z0.h, z0.h, z0.h
-; CHECK-NEXT:    st1h { z0.h }, p0, [x1]
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    ldp q4, q5, [x0]
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    splice z2.s, p0, z2.s, z3.s
+; CHECK-NEXT:    uzp1 z1.h, z2.h, z2.h
+; CHECK-NEXT:    splice z1.h, p1, z1.h, z0.h
+; CHECK-NEXT:    uzp1 z4.s, z4.s, z4.s
+; CHECK-NEXT:    ldp q6, q7, [x0, #32]
+; CHECK-NEXT:    uzp1 z3.s, z5.s, z5.s
+; CHECK-NEXT:    splice z4.s, p0, z4.s, z3.s
+; CHECK-NEXT:    uzp1 z2.s, z6.s, z6.s
+; CHECK-NEXT:    uzp1 z0.s, z7.s, z7.s
+; CHECK-NEXT:    splice z2.s, p0, z2.s, z0.s
+; CHECK-NEXT:    uzp1 z0.h, z2.h, z2.h
+; CHECK-NEXT:    uzp1 z2.h, z4.h, z4.h
+; CHECK-NEXT:    splice z2.h, p1, z2.h, z0.h
+; CHECK-NEXT:    add z0.h, z1.h, z1.h
+; CHECK-NEXT:    add z1.h, z2.h, z2.h
+; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
   %a = load <16 x i64>, <16 x i64>* %in
   %b = trunc <16 x i64> %a to <16 x i16>
@@ -557,16 +653,61 @@ define void @trunc_v16i64_v16i16(<16 x i64>* %in, <16 x i16>* %out) vscale_range
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v32i64_v32i16(<32 x i64>* %in, <32 x i16>* %out) vscale_range(16,0) #0 {
+define void @trunc_v32i64_v32i16(<32 x i64>* %in, <32 x i16>* %out) #0 {
 ; CHECK-LABEL: trunc_v32i64_v32i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl32
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.h, vl32
+; CHECK-NEXT:    ldp q0, q1, [x0, #128]
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    ptrue p1.h, vl4
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    ldp q2, q3, [x0, #160]
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    splice z2.s, p0, z2.s, z3.s
+; CHECK-NEXT:    uzp1 z2.h, z2.h, z2.h
+; CHECK-NEXT:    ldp q1, q17, [x0, #224]
+; CHECK-NEXT:    splice z0.h, p1, z0.h, z2.h
 ; CHECK-NEXT:    add z0.h, z0.h, z0.h
-; CHECK-NEXT:    st1h { z0.h }, p0, [x1]
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    ldp q18, q2, [x0, #192]
+; CHECK-NEXT:    uzp1 z17.s, z17.s, z17.s
+; CHECK-NEXT:    splice z1.s, p0, z1.s, z17.s
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    uzp1 z18.s, z18.s, z18.s
+; CHECK-NEXT:    ldp q4, q5, [x0, #64]
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    splice z18.s, p0, z18.s, z2.s
+; CHECK-NEXT:    uzp1 z2.h, z18.h, z18.h
+; CHECK-NEXT:    splice z2.h, p1, z2.h, z1.h
+; CHECK-NEXT:    uzp1 z4.s, z4.s, z4.s
+; CHECK-NEXT:    ldp q6, q7, [x0, #96]
+; CHECK-NEXT:    uzp1 z5.s, z5.s, z5.s
+; CHECK-NEXT:    splice z4.s, p0, z4.s, z5.s
+; CHECK-NEXT:    uzp1 z4.h, z4.h, z4.h
+; CHECK-NEXT:    uzp1 z6.s, z6.s, z6.s
+; CHECK-NEXT:    ldp q3, q16, [x0]
+; CHECK-NEXT:    uzp1 z1.s, z7.s, z7.s
+; CHECK-NEXT:    splice z6.s, p0, z6.s, z1.s
+; CHECK-NEXT:    uzp1 z1.h, z6.h, z6.h
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    splice z4.h, p1, z4.h, z1.h
+; CHECK-NEXT:    add z1.h, z2.h, z2.h
+; CHECK-NEXT:    ldp q19, q20, [x0, #32]
+; CHECK-NEXT:    uzp1 z16.s, z16.s, z16.s
+; CHECK-NEXT:    stp q0, q1, [x1, #32]
+; CHECK-NEXT:    splice z3.s, p0, z3.s, z16.s
+; CHECK-NEXT:    add z1.h, z4.h, z4.h
+; CHECK-NEXT:    uzp1 z3.h, z3.h, z3.h
+; CHECK-NEXT:    uzp1 z18.s, z19.s, z19.s
+; CHECK-NEXT:    uzp1 z17.s, z20.s, z20.s
+; CHECK-NEXT:    splice z18.s, p0, z18.s, z17.s
+; CHECK-NEXT:    uzp1 z16.h, z18.h, z18.h
+; CHECK-NEXT:    splice z3.h, p1, z3.h, z16.h
+; CHECK-NEXT:    add z0.h, z3.h, z3.h
+; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
   %a = load <32 x i64>, <32 x i64>* %in
   %b = trunc <32 x i64> %a to <32 x i16>
@@ -579,12 +720,14 @@ define void @trunc_v32i64_v32i16(<32 x i64>* %in, <32 x i16>* %out) vscale_range
 ; truncate i64 -> i32
 ;
 
-define <4 x i32> @trunc_v4i64_v4i32(<4 x i64>* %in) vscale_range(2,0) #0 {
+define <4 x i32> @trunc_v4i64_v4i32(<4 x i64>* %in) #0 {
 ; CHECK-LABEL: trunc_v4i64_v4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl4
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl2
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %a = load <4 x i64>, <4 x i64>* %in
@@ -596,30 +739,18 @@ define <4 x i32> @trunc_v4i64_v4i32(<4 x i64>* %in) vscale_range(2,0) #0 {
 define void @trunc_v8i64_v8i32(<8 x i64>* %in, <8 x i32>* %out) #0 {
 ; CHECK-LABEL: trunc_v8i64_v8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    ldp q1, q0, [x0, #32]
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    ldp q2, q3, [x0]
-; CHECK-NEXT:    mov z4.d, z0.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    mov z0.d, z1.d[1]
-; CHECK-NEXT:    fmov x10, d4
-; CHECK-NEXT:    fmov x12, d0
-; CHECK-NEXT:    mov z0.d, z2.d[1]
-; CHECK-NEXT:    stp w8, w10, [sp, #24]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    mov z1.d, z3.d[1]
-; CHECK-NEXT:    fmov x11, d3
-; CHECK-NEXT:    fmov x8, d1
-; CHECK-NEXT:    stp w9, w12, [sp, #16]
-; CHECK-NEXT:    fmov x9, d2
-; CHECK-NEXT:    stp w11, w8, [sp, #8]
-; CHECK-NEXT:    stp w9, w10, [sp]
-; CHECK-NEXT:    ldp q1, q0, [sp]
-; CHECK-NEXT:    add z1.s, z1.s, z1.s
+; CHECK-NEXT:    ldp q0, q1, [x0, #32]
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    ldp q3, q2, [x0]
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    add z0.s, z0.s, z0.s
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    splice z3.s, p0, z3.s, z2.s
+; CHECK-NEXT:    add z1.s, z3.s, z3.s
 ; CHECK-NEXT:    stp q1, q0, [x1]
-; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %a = load <8 x i64>, <8 x i64>* %in
   %b = trunc <8 x i64> %a to <8 x i32>
@@ -629,15 +760,32 @@ define void @trunc_v8i64_v8i32(<8 x i64>* %in, <8 x i32>* %out) #0 {
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v16i64_v16i32(<16 x i64>* %in, <16 x i32>* %out) vscale_range(8,0) #0 {
+define void @trunc_v16i64_v16i32(<16 x i64>* %in, <16 x i32>* %out) #0 {
 ; CHECK-LABEL: trunc_v16i64_v16i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl16
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.s, vl16
+; CHECK-NEXT:    ldp q0, q1, [x0, #64]
+; CHECK-NEXT:    ptrue p0.s, vl2
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    ldp q2, q3, [x0, #96]
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    add z0.s, z0.s, z0.s
-; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    ldp q4, q5, [x0]
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    splice z2.s, p0, z2.s, z3.s
+; CHECK-NEXT:    uzp1 z4.s, z4.s, z4.s
+; CHECK-NEXT:    ldp q6, q7, [x0, #32]
+; CHECK-NEXT:    uzp1 z1.s, z5.s, z5.s
+; CHECK-NEXT:    splice z4.s, p0, z4.s, z1.s
+; CHECK-NEXT:    uzp1 z3.s, z6.s, z6.s
+; CHECK-NEXT:    uzp1 z1.s, z7.s, z7.s
+; CHECK-NEXT:    splice z3.s, p0, z3.s, z1.s
+; CHECK-NEXT:    add z1.s, z2.s, z2.s
+; CHECK-NEXT:    stp q0, q1, [x1, #32]
+; CHECK-NEXT:    add z0.s, z4.s, z4.s
+; CHECK-NEXT:    add z1.s, z3.s, z3.s
+; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
   %a = load <16 x i64>, <16 x i64>* %in
   %b = trunc <16 x i64> %a to <16 x i32>
@@ -647,15 +795,54 @@ define void @trunc_v16i64_v16i32(<16 x i64>* %in, <16 x i32>* %out) vscale_range
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v32i64_v32i32(<32 x i64>* %in, <32 x i32>* %out) vscale_range(16,0) #0 {
+define void @trunc_v32i64_v32i32(<32 x i64>* %in, <32 x i32>* %out) #0 {
 ; CHECK-LABEL: trunc_v32i64_v32i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl32
-; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
-; CHECK-NEXT:    ptrue p0.s, vl32
+; CHECK-NEXT:    ldp q0, q1, [x0, #192]
+; CHECK-NEXT:    ptrue p0.s, vl2
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT:    ldp q2, q3, [x0, #224]
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    add z0.s, z0.s, z0.s
-; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
+; CHECK-NEXT:    uzp1 z2.s, z2.s, z2.s
+; CHECK-NEXT:    ldp q6, q7, [x0, #128]
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    splice z2.s, p0, z2.s, z3.s
+; CHECK-NEXT:    add z2.s, z2.s, z2.s
+; CHECK-NEXT:    uzp1 z6.s, z6.s, z6.s
+; CHECK-NEXT:    ldp q1, q3, [x0, #160]
+; CHECK-NEXT:    uzp1 z7.s, z7.s, z7.s
+; CHECK-NEXT:    splice z6.s, p0, z6.s, z7.s
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    ldp q16, q17, [x0, #64]
+; CHECK-NEXT:    uzp1 z3.s, z3.s, z3.s
+; CHECK-NEXT:    splice z1.s, p0, z1.s, z3.s
+; CHECK-NEXT:    add z1.s, z1.s, z1.s
+; CHECK-NEXT:    uzp1 z16.s, z16.s, z16.s
+; CHECK-NEXT:    ldp q7, q18, [x0, #96]
+; CHECK-NEXT:    uzp1 z17.s, z17.s, z17.s
+; CHECK-NEXT:    splice z16.s, p0, z16.s, z17.s
+; CHECK-NEXT:    uzp1 z7.s, z7.s, z7.s
+; CHECK-NEXT:    ldp q4, q5, [x0, #32]
+; CHECK-NEXT:    uzp1 z3.s, z18.s, z18.s
+; CHECK-NEXT:    splice z7.s, p0, z7.s, z3.s
+; CHECK-NEXT:    uzp1 z4.s, z4.s, z4.s
+; CHECK-NEXT:    ldp q19, q20, [x0]
+; CHECK-NEXT:    uzp1 z3.s, z5.s, z5.s
+; CHECK-NEXT:    stp q0, q2, [x1, #96]
+; CHECK-NEXT:    add z0.s, z6.s, z6.s
+; CHECK-NEXT:    splice z4.s, p0, z4.s, z3.s
+; CHECK-NEXT:    stp q0, q1, [x1, #64]
+; CHECK-NEXT:    add z0.s, z16.s, z16.s
+; CHECK-NEXT:    uzp1 z18.s, z19.s, z19.s
+; CHECK-NEXT:    add z1.s, z7.s, z7.s
+; CHECK-NEXT:    stp q0, q1, [x1, #32]
+; CHECK-NEXT:    add z1.s, z4.s, z4.s
+; CHECK-NEXT:    uzp1 z17.s, z20.s, z20.s
+; CHECK-NEXT:    splice z18.s, p0, z18.s, z17.s
+; CHECK-NEXT:    add z0.s, z18.s, z18.s
+; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
   %a = load <32 x i64>, <32 x i64>* %in
   %b = trunc <32 x i64> %a to <32 x i32>


        


More information about the llvm-commits mailing list