[llvm] 41a6bb4 - [LLVM][CodeGen][SVE] Prefer NEON instructions when zeroing Z registers. (#133929)

via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 3 05:15:09 PDT 2025


Author: Paul Walker
Date: 2025-04-03T13:15:05+01:00
New Revision: 41a6bb4c055cf08110676d9bc942f369fb19450d

URL: https://github.com/llvm/llvm-project/commit/41a6bb4c055cf08110676d9bc942f369fb19450d
DIFF: https://github.com/llvm/llvm-project/commit/41a6bb4c055cf08110676d9bc942f369fb19450d.diff

LOG: [LLVM][CodeGen][SVE] Prefer NEON instructions when zeroing Z registers. (#133929)

Several implementations have zero-latency instructions to zero
registers. To-date no implementation has a dedicated SVE instruction but
we can use the NEON equivalent because it is defined to zero bits
128..VL regardless of the immediate used.

NOTE: The relevant instruction is not available in streaming mode, where
the original SVE DUP instruction remains in use.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64InstrInfo.td
    llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll
    llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll
    llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-mul-scalable.ll
    llvm/test/CodeGen/AArch64/complex-deinterleaving-f32-mul-scalable.ll
    llvm/test/CodeGen/AArch64/complex-deinterleaving-f64-mul-scalable.ll
    llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll
    llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-mul-scalable.ll
    llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-mul-scalable.ll
    llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
    llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
    llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll
    llvm/test/CodeGen/AArch64/dag-combine-concat-vectors.ll
    llvm/test/CodeGen/AArch64/load-insert-zero.ll
    llvm/test/CodeGen/AArch64/sinksplat.ll
    llvm/test/CodeGen/AArch64/sve-bf16-int-converts.ll
    llvm/test/CodeGen/AArch64/sve-fcmp.ll
    llvm/test/CodeGen/AArch64/sve-fcvt.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
    llvm/test/CodeGen/AArch64/sve-fp-combine.ll
    llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll
    llvm/test/CodeGen/AArch64/sve-int-log.ll
    llvm/test/CodeGen/AArch64/sve-int-reduce.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-merging.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-scalar-to-vec.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-merging.ll
    llvm/test/CodeGen/AArch64/sve-knownbits.ll
    llvm/test/CodeGen/AArch64/sve-ld1r.ll
    llvm/test/CodeGen/AArch64/sve-masked-scatter.ll
    llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll
    llvm/test/CodeGen/AArch64/sve-pr92779.ll
    llvm/test/CodeGen/AArch64/sve-split-fcvt.ll
    llvm/test/CodeGen/AArch64/sve-vector-splat.ll
    llvm/test/CodeGen/AArch64/sve-vselect-imm.ll
    llvm/test/CodeGen/AArch64/sve-zeroinit.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmax.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmaxnm.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmin.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfminnm.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll
    llvm/test/CodeGen/AArch64/zeroing-forms-abs-neg.ll
    llvm/test/CodeGen/AArch64/zeroing-forms-counts-not.ll
    llvm/test/CodeGen/AArch64/zeroing-forms-ext.ll
    llvm/test/CodeGen/AArch64/zeroing-forms-fcvt-bfcvt.ll
    llvm/test/CodeGen/AArch64/zeroing-forms-fcvtlt-fcvtx.ll
    llvm/test/CodeGen/AArch64/zeroing-forms-fcvtzsu.ll
    llvm/test/CodeGen/AArch64/zeroing-forms-flogb.ll
    llvm/test/CodeGen/AArch64/zeroing-forms-frint-frecpx-fsqrt.ll
    llvm/test/CodeGen/AArch64/zeroing-forms-rev.ll
    llvm/test/CodeGen/AArch64/zeroing-forms-urecpe-ursqrte-sqabs-sqneg.ll
    llvm/test/CodeGen/AArch64/zeroing-forms-uscvtf.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index f291589e04c6b..a3b1ae55df028 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -7731,6 +7731,7 @@ def MOVIv2d_ns   : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
                                                 "movi", ".2d",
                    [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
 
+let Predicates = [HasNEON] in {
 def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
 def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
 def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
@@ -7740,6 +7741,23 @@ def : Pat<(v4f32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
 def : Pat<(v8f16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
 def : Pat<(v8bf16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
 
+// Prefer NEON instructions when zeroing ZPRs because they are potentially zero-latency.
+let AddedComplexity = 5 in {
+def : Pat<(nxv2i64 (splat_vector (i64 0))), (SUBREG_TO_REG (i32 0), (MOVIv2d_ns (i32 0)), zsub)>;
+def : Pat<(nxv4i32 (splat_vector (i32 0))), (SUBREG_TO_REG (i32 0), (MOVIv2d_ns (i32 0)), zsub)>;
+def : Pat<(nxv8i16 (splat_vector (i32 0))), (SUBREG_TO_REG (i32 0), (MOVIv2d_ns (i32 0)), zsub)>;
+def : Pat<(nxv16i8 (splat_vector (i32 0))), (SUBREG_TO_REG (i32 0), (MOVIv2d_ns (i32 0)), zsub)>;
+def : Pat<(nxv2f64 (splat_vector (f64 fpimm0))), (SUBREG_TO_REG (i32 0), (MOVIv2d_ns (i32 0)), zsub)>;
+def : Pat<(nxv2f32 (splat_vector (f32 fpimm0))), (SUBREG_TO_REG (i32 0), (MOVIv2d_ns (i32 0)), zsub)>;
+def : Pat<(nxv4f32 (splat_vector (f32 fpimm0))), (SUBREG_TO_REG (i32 0), (MOVIv2d_ns (i32 0)), zsub)>;
+def : Pat<(nxv2f16 (splat_vector (f16 fpimm0))), (SUBREG_TO_REG (i32 0), (MOVIv2d_ns (i32 0)), zsub)>;
+def : Pat<(nxv4f16 (splat_vector (f16 fpimm0))), (SUBREG_TO_REG (i32 0), (MOVIv2d_ns (i32 0)), zsub)>;
+def : Pat<(nxv8f16 (splat_vector (f16 fpimm0))), (SUBREG_TO_REG (i32 0), (MOVIv2d_ns (i32 0)), zsub)>;
+def : Pat<(nxv2bf16 (splat_vector (bf16 fpimm0))), (SUBREG_TO_REG (i32 0), (MOVIv2d_ns (i32 0)), zsub)>;
+def : Pat<(nxv4bf16 (splat_vector (bf16 fpimm0))), (SUBREG_TO_REG (i32 0), (MOVIv2d_ns (i32 0)), zsub)>;
+def : Pat<(nxv8bf16 (splat_vector (bf16 fpimm0))), (SUBREG_TO_REG (i32 0), (MOVIv2d_ns (i32 0)), zsub)>;
+}
+
 def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
 def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
 def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
@@ -7760,6 +7778,7 @@ def : Pat<(v1i64 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
 def : Pat<(v2i32 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
 def : Pat<(v4i16 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
 def : Pat<(v8i8  immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
+}
 
 // EDIT per word & halfword: 2s, 4h, 4s, & 8h
 let isReMaterializable = 1, isAsCheapAsAMove = 1 in

diff  --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll
index 98f5b4c19a9b9..533e831de0df8 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll
@@ -50,10 +50,10 @@ entry:
 define <vscale x 4 x double> @mul_add_mull(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x double> %d) {
 ; CHECK-LABEL: mul_add_mull:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z24.d, #0 // =0x0
-; CHECK-NEXT:    mov z25.d, #0 // =0x0
-; CHECK-NEXT:    mov z26.d, #0 // =0x0
-; CHECK-NEXT:    mov z27.d, #0 // =0x0
+; CHECK-NEXT:    movi v24.2d, #0000000000000000
+; CHECK-NEXT:    movi v25.2d, #0000000000000000
+; CHECK-NEXT:    movi v26.2d, #0000000000000000
+; CHECK-NEXT:    movi v27.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fcmla z24.d, p0/m, z2.d, z0.d, #0
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z3.d, z1.d, #0
@@ -101,10 +101,10 @@ entry:
 define <vscale x 4 x double> @mul_sub_mull(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x double> %d) {
 ; CHECK-LABEL: mul_sub_mull:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z24.d, #0 // =0x0
-; CHECK-NEXT:    mov z25.d, #0 // =0x0
-; CHECK-NEXT:    mov z26.d, #0 // =0x0
-; CHECK-NEXT:    mov z27.d, #0 // =0x0
+; CHECK-NEXT:    movi v24.2d, #0000000000000000
+; CHECK-NEXT:    movi v25.2d, #0000000000000000
+; CHECK-NEXT:    movi v26.2d, #0000000000000000
+; CHECK-NEXT:    movi v27.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fcmla z24.d, p0/m, z2.d, z0.d, #0
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z3.d, z1.d, #0
@@ -152,10 +152,10 @@ entry:
 define <vscale x 4 x double> @mul_conj_mull(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x double> %d) {
 ; CHECK-LABEL: mul_conj_mull:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z24.d, #0 // =0x0
-; CHECK-NEXT:    mov z25.d, #0 // =0x0
-; CHECK-NEXT:    mov z26.d, #0 // =0x0
-; CHECK-NEXT:    mov z27.d, #0 // =0x0
+; CHECK-NEXT:    movi v24.2d, #0000000000000000
+; CHECK-NEXT:    movi v25.2d, #0000000000000000
+; CHECK-NEXT:    movi v26.2d, #0000000000000000
+; CHECK-NEXT:    movi v27.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fcmla z24.d, p0/m, z2.d, z0.d, #0
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z3.d, z1.d, #0
@@ -204,7 +204,7 @@ define <vscale x 4 x double> @mul_add_rot_mull(<vscale x 4 x double> %a, <vscale
 ; CHECK-LABEL: mul_add_rot_mull:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    uzp2 z24.d, z4.d, z5.d
-; CHECK-NEXT:    mov z25.d, #0 // =0x0
+; CHECK-NEXT:    movi v25.2d, #0000000000000000
 ; CHECK-NEXT:    uzp1 z4.d, z4.d, z5.d
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z26.d, z24.d

diff  --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll
index 2fc91125bc0ac..1eed9722f57be 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll
@@ -41,8 +41,8 @@ entry:
 define <vscale x 4 x double> @mul_add_mull(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x double> %d) {
 ; CHECK-LABEL: mul_add_mull:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z24.d, #0 // =0x0
-; CHECK-NEXT:    mov z25.d, #0 // =0x0
+; CHECK-NEXT:    movi v24.2d, #0000000000000000
+; CHECK-NEXT:    movi v25.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z6.d, z4.d, #0
 ; CHECK-NEXT:    fcmla z24.d, p0/m, z7.d, z5.d, #0
@@ -90,8 +90,8 @@ entry:
 define <vscale x 4 x double> @mul_sub_mull(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x double> %d) {
 ; CHECK-LABEL: mul_sub_mull:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z24.d, #0 // =0x0
-; CHECK-NEXT:    mov z25.d, #0 // =0x0
+; CHECK-NEXT:    movi v24.2d, #0000000000000000
+; CHECK-NEXT:    movi v25.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z6.d, z4.d, #270
 ; CHECK-NEXT:    fcmla z24.d, p0/m, z7.d, z5.d, #270
@@ -139,8 +139,8 @@ entry:
 define <vscale x 4 x double> @mul_conj_mull(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x double> %d) {
 ; CHECK-LABEL: mul_conj_mull:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z24.d, #0 // =0x0
-; CHECK-NEXT:    mov z25.d, #0 // =0x0
+; CHECK-NEXT:    movi v24.2d, #0000000000000000
+; CHECK-NEXT:    movi v25.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z0.d, z2.d, #0
 ; CHECK-NEXT:    fcmla z24.d, p0/m, z1.d, z3.d, #0

diff  --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-mul-scalable.ll
index 80934d2cb98c2..a7442cae84c2d 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-mul-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-mul-scalable.ll
@@ -46,7 +46,7 @@ entry:
 define <vscale x 8 x half> @complex_mul_v8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
 ; CHECK-LABEL: complex_mul_v8f16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fcmla z2.h, p0/m, z1.h, z0.h, #0
 ; CHECK-NEXT:    fcmla z2.h, p0/m, z1.h, z0.h, #90
@@ -72,8 +72,8 @@ entry:
 define <vscale x 16 x half> @complex_mul_v16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b) {
 ; CHECK-LABEL: complex_mul_v16f16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z4.h, #0 // =0x0
-; CHECK-NEXT:    mov z5.h, #0 // =0x0
+; CHECK-NEXT:    movi v4.2d, #0000000000000000
+; CHECK-NEXT:    movi v5.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fcmla z5.h, p0/m, z2.h, z0.h, #0
 ; CHECK-NEXT:    fcmla z4.h, p0/m, z3.h, z1.h, #0
@@ -103,10 +103,10 @@ entry:
 define <vscale x 32 x half> @complex_mul_v32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b) {
 ; CHECK-LABEL: complex_mul_v32f16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z24.h, #0 // =0x0
-; CHECK-NEXT:    mov z25.h, #0 // =0x0
-; CHECK-NEXT:    mov z26.h, #0 // =0x0
-; CHECK-NEXT:    mov z27.h, #0 // =0x0
+; CHECK-NEXT:    movi v24.2d, #0000000000000000
+; CHECK-NEXT:    movi v25.2d, #0000000000000000
+; CHECK-NEXT:    movi v26.2d, #0000000000000000
+; CHECK-NEXT:    movi v27.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    fcmla z24.h, p0/m, z4.h, z0.h, #0
 ; CHECK-NEXT:    fcmla z25.h, p0/m, z5.h, z1.h, #0

diff  --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f32-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f32-mul-scalable.ll
index 874b5b538f1fd..3cad74b7f5fc6 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f32-mul-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f32-mul-scalable.ll
@@ -7,7 +7,7 @@ target triple = "aarch64"
 define <vscale x 4 x float> @complex_mul_v4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
 ; CHECK-LABEL: complex_mul_v4f32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z2.s, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fcmla z2.s, p0/m, z1.s, z0.s, #0
 ; CHECK-NEXT:    fcmla z2.s, p0/m, z1.s, z0.s, #90
@@ -34,8 +34,8 @@ entry:
 define <vscale x 8 x float> @complex_mul_v8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b) {
 ; CHECK-LABEL: complex_mul_v8f32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z4.s, #0 // =0x0
-; CHECK-NEXT:    mov z5.s, #0 // =0x0
+; CHECK-NEXT:    movi v4.2d, #0000000000000000
+; CHECK-NEXT:    movi v5.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fcmla z5.s, p0/m, z2.s, z0.s, #0
 ; CHECK-NEXT:    fcmla z4.s, p0/m, z3.s, z1.s, #0
@@ -65,10 +65,10 @@ entry:
 define <vscale x 16 x float> @complex_mul_v16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b) {
 ; CHECK-LABEL: complex_mul_v16f32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z24.s, #0 // =0x0
-; CHECK-NEXT:    mov z25.s, #0 // =0x0
-; CHECK-NEXT:    mov z26.s, #0 // =0x0
-; CHECK-NEXT:    mov z27.s, #0 // =0x0
+; CHECK-NEXT:    movi v24.2d, #0000000000000000
+; CHECK-NEXT:    movi v25.2d, #0000000000000000
+; CHECK-NEXT:    movi v26.2d, #0000000000000000
+; CHECK-NEXT:    movi v27.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fcmla z24.s, p0/m, z4.s, z0.s, #0
 ; CHECK-NEXT:    fcmla z25.s, p0/m, z5.s, z1.s, #0

diff  --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f64-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f64-mul-scalable.ll
index c9a092f52f159..e3d99fa457bbc 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f64-mul-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f64-mul-scalable.ll
@@ -7,7 +7,7 @@ target triple = "aarch64"
 define <vscale x 2 x double> @complex_mul_v2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
 ; CHECK-LABEL: complex_mul_v2f64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z2.d, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fcmla z2.d, p0/m, z1.d, z0.d, #0
 ; CHECK-NEXT:    fcmla z2.d, p0/m, z1.d, z0.d, #90
@@ -34,8 +34,8 @@ entry:
 define <vscale x 4 x double> @complex_mul_v4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b) {
 ; CHECK-LABEL: complex_mul_v4f64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z4.d, #0 // =0x0
-; CHECK-NEXT:    mov z5.d, #0 // =0x0
+; CHECK-NEXT:    movi v4.2d, #0000000000000000
+; CHECK-NEXT:    movi v5.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fcmla z5.d, p0/m, z2.d, z0.d, #0
 ; CHECK-NEXT:    fcmla z4.d, p0/m, z3.d, z1.d, #0
@@ -65,10 +65,10 @@ entry:
 define <vscale x 8 x double> @complex_mul_v8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b) {
 ; CHECK-LABEL: complex_mul_v8f64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z24.d, #0 // =0x0
-; CHECK-NEXT:    mov z25.d, #0 // =0x0
-; CHECK-NEXT:    mov z26.d, #0 // =0x0
-; CHECK-NEXT:    mov z27.d, #0 // =0x0
+; CHECK-NEXT:    movi v24.2d, #0000000000000000
+; CHECK-NEXT:    movi v25.2d, #0000000000000000
+; CHECK-NEXT:    movi v26.2d, #0000000000000000
+; CHECK-NEXT:    movi v27.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fcmla z24.d, p0/m, z4.d, z0.d, #0
 ; CHECK-NEXT:    fcmla z25.d, p0/m, z5.d, z1.d, #0

diff  --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll
index 58a0809ee093f..061fd07489284 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll
@@ -46,7 +46,7 @@ entry:
 define <vscale x 8 x i16> @complex_mul_v8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
 ; CHECK-LABEL: complex_mul_v8i16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    cmla z2.h, z1.h, z0.h, #0
 ; CHECK-NEXT:    cmla z2.h, z1.h, z0.h, #90
 ; CHECK-NEXT:    mov z0.d, z2.d
@@ -71,8 +71,8 @@ entry:
 define <vscale x 16 x i16> @complex_mul_v16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
 ; CHECK-LABEL: complex_mul_v16i16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z4.h, #0 // =0x0
-; CHECK-NEXT:    mov z5.h, #0 // =0x0
+; CHECK-NEXT:    movi v4.2d, #0000000000000000
+; CHECK-NEXT:    movi v5.2d, #0000000000000000
 ; CHECK-NEXT:    cmla z5.h, z2.h, z0.h, #0
 ; CHECK-NEXT:    cmla z4.h, z3.h, z1.h, #0
 ; CHECK-NEXT:    cmla z5.h, z2.h, z0.h, #90
@@ -101,10 +101,10 @@ entry:
 define <vscale x 32 x i16> @complex_mul_v32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
 ; CHECK-LABEL: complex_mul_v32i16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z24.h, #0 // =0x0
-; CHECK-NEXT:    mov z25.h, #0 // =0x0
-; CHECK-NEXT:    mov z26.h, #0 // =0x0
-; CHECK-NEXT:    mov z27.h, #0 // =0x0
+; CHECK-NEXT:    movi v24.2d, #0000000000000000
+; CHECK-NEXT:    movi v25.2d, #0000000000000000
+; CHECK-NEXT:    movi v26.2d, #0000000000000000
+; CHECK-NEXT:    movi v27.2d, #0000000000000000
 ; CHECK-NEXT:    cmla z24.h, z4.h, z0.h, #0
 ; CHECK-NEXT:    cmla z25.h, z5.h, z1.h, #0
 ; CHECK-NEXT:    cmla z27.h, z6.h, z2.h, #0

diff  --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-mul-scalable.ll
index 0958c60ed7cb0..52caa3279b927 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-mul-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i32-mul-scalable.ll
@@ -7,7 +7,7 @@ target triple = "aarch64"
 define <vscale x 4 x i32> @complex_mul_v4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: complex_mul_v4i32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z2.s, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    cmla z2.s, z1.s, z0.s, #0
 ; CHECK-NEXT:    cmla z2.s, z1.s, z0.s, #90
 ; CHECK-NEXT:    mov z0.d, z2.d
@@ -33,8 +33,8 @@ entry:
 define <vscale x 8 x i32> @complex_mul_v8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) {
 ; CHECK-LABEL: complex_mul_v8i32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z4.s, #0 // =0x0
-; CHECK-NEXT:    mov z5.s, #0 // =0x0
+; CHECK-NEXT:    movi v4.2d, #0000000000000000
+; CHECK-NEXT:    movi v5.2d, #0000000000000000
 ; CHECK-NEXT:    cmla z5.s, z2.s, z0.s, #0
 ; CHECK-NEXT:    cmla z4.s, z3.s, z1.s, #0
 ; CHECK-NEXT:    cmla z5.s, z2.s, z0.s, #90
@@ -63,10 +63,10 @@ entry:
 define <vscale x 16 x i32> @complex_mul_v16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) {
 ; CHECK-LABEL: complex_mul_v16i32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z24.s, #0 // =0x0
-; CHECK-NEXT:    mov z25.s, #0 // =0x0
-; CHECK-NEXT:    mov z26.s, #0 // =0x0
-; CHECK-NEXT:    mov z27.s, #0 // =0x0
+; CHECK-NEXT:    movi v24.2d, #0000000000000000
+; CHECK-NEXT:    movi v25.2d, #0000000000000000
+; CHECK-NEXT:    movi v26.2d, #0000000000000000
+; CHECK-NEXT:    movi v27.2d, #0000000000000000
 ; CHECK-NEXT:    cmla z24.s, z4.s, z0.s, #0
 ; CHECK-NEXT:    cmla z25.s, z5.s, z1.s, #0
 ; CHECK-NEXT:    cmla z27.s, z6.s, z2.s, #0

diff  --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-mul-scalable.ll
index 30c06838c81bc..bdc21e7828277 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-mul-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i64-mul-scalable.ll
@@ -7,7 +7,7 @@ target triple = "aarch64"
 define <vscale x 2 x i64> @complex_mul_v2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: complex_mul_v2i64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z2.d, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    cmla z2.d, z1.d, z0.d, #0
 ; CHECK-NEXT:    cmla z2.d, z1.d, z0.d, #90
 ; CHECK-NEXT:    mov z0.d, z2.d
@@ -33,8 +33,8 @@ entry:
 define <vscale x 4 x i64> @complex_mul_v4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) {
 ; CHECK-LABEL: complex_mul_v4i64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z4.d, #0 // =0x0
-; CHECK-NEXT:    mov z5.d, #0 // =0x0
+; CHECK-NEXT:    movi v4.2d, #0000000000000000
+; CHECK-NEXT:    movi v5.2d, #0000000000000000
 ; CHECK-NEXT:    cmla z5.d, z2.d, z0.d, #0
 ; CHECK-NEXT:    cmla z4.d, z3.d, z1.d, #0
 ; CHECK-NEXT:    cmla z5.d, z2.d, z0.d, #90
@@ -63,10 +63,10 @@ entry:
 define <vscale x 8 x i64> @complex_mul_v8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b) {
 ; CHECK-LABEL: complex_mul_v8i64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z24.d, #0 // =0x0
-; CHECK-NEXT:    mov z25.d, #0 // =0x0
-; CHECK-NEXT:    mov z26.d, #0 // =0x0
-; CHECK-NEXT:    mov z27.d, #0 // =0x0
+; CHECK-NEXT:    movi v24.2d, #0000000000000000
+; CHECK-NEXT:    movi v25.2d, #0000000000000000
+; CHECK-NEXT:    movi v26.2d, #0000000000000000
+; CHECK-NEXT:    movi v27.2d, #0000000000000000
 ; CHECK-NEXT:    cmla z24.d, z4.d, z0.d, #0
 ; CHECK-NEXT:    cmla z25.d, z5.d, z1.d, #0
 ; CHECK-NEXT:    cmla z27.d, z6.d, z2.d, #0
@@ -101,10 +101,10 @@ entry:
 define <vscale x 8 x i64> @complex_minus_mul_v8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b) {
 ; CHECK-LABEL: complex_minus_mul_v8i64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z24.d, #0 // =0x0
-; CHECK-NEXT:    mov z25.d, #0 // =0x0
-; CHECK-NEXT:    mov z26.d, #0 // =0x0
-; CHECK-NEXT:    mov z27.d, #0 // =0x0
+; CHECK-NEXT:    movi v24.2d, #0000000000000000
+; CHECK-NEXT:    movi v25.2d, #0000000000000000
+; CHECK-NEXT:    movi v26.2d, #0000000000000000
+; CHECK-NEXT:    movi v27.2d, #0000000000000000
 ; CHECK-NEXT:    cmla z24.d, z4.d, z0.d, #270
 ; CHECK-NEXT:    cmla z25.d, z5.d, z1.d, #270
 ; CHECK-NEXT:    cmla z27.d, z6.d, z2.d, #270

diff  --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
index dcc11609ca231..880bd2904154c 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
@@ -14,7 +14,7 @@ target triple = "aarch64"
 define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
 ; CHECK-LABEL: complex_mul_v2f64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    mov w8, #100 // =0x64
 ; CHECK-NEXT:    cntd x9
 ; CHECK-NEXT:    whilelo p1.d, xzr, x8
@@ -111,7 +111,7 @@ exit.block:                                     ; preds = %vector.body
 define %"class.std::complex" @complex_mul_predicated_v2f64(ptr %a, ptr %b, ptr %cond) {
 ; CHECK-LABEL: complex_mul_predicated_v2f64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    cntd x9
 ; CHECK-NEXT:    mov w11, #100 // =0x64
 ; CHECK-NEXT:    neg x10, x9
@@ -213,7 +213,7 @@ exit.block:                                     ; preds = %vector.body
 define %"class.std::complex" @complex_mul_predicated_x2_v2f64(ptr %a, ptr %b, ptr %cond) {
 ; CHECK-LABEL: complex_mul_predicated_x2_v2f64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    mov w8, #100 // =0x64
 ; CHECK-NEXT:    cntd x9
 ; CHECK-NEXT:    whilelo p1.d, xzr, x8

diff  --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
index 89f790210e193..29be231920305 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
@@ -14,7 +14,7 @@ target triple = "aarch64"
 define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
 ; CHECK-LABEL: complex_mul_v2f64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    cntd x8
 ; CHECK-NEXT:    mov w10, #100 // =0x64
 ; CHECK-NEXT:    neg x9, x8
@@ -98,20 +98,20 @@ exit.block:                                     ; preds = %vector.body
 define %"class.std::complex" @complex_mul_nonzero_init_v2f64(ptr %a, ptr %b) {
 ; CHECK-LABEL: complex_mul_nonzero_init_v2f64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    fmov d0, #1.00000000
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    fmov d1, #1.00000000
 ; CHECK-NEXT:    cntd x8
 ; CHECK-NEXT:    fmov d2, #2.00000000
 ; CHECK-NEXT:    ptrue p0.d, vl1
 ; CHECK-NEXT:    neg x9, x8
 ; CHECK-NEXT:    mov w10, #100 // =0x64
-; CHECK-NEXT:    sel z3.d, p0, z0.d, z1.d
 ; CHECK-NEXT:    and x9, x9, x10
 ; CHECK-NEXT:    rdvl x10, #2
-; CHECK-NEXT:    mov z1.d, p0/m, z2.d
+; CHECK-NEXT:    sel z1.d, p0, z1.d, z0.d
+; CHECK-NEXT:    sel z2.d, p0, z2.d, z0.d
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    zip2 z0.d, z1.d, z3.d
-; CHECK-NEXT:    zip1 z1.d, z1.d, z3.d
+; CHECK-NEXT:    zip2 z0.d, z2.d, z1.d
+; CHECK-NEXT:    zip1 z1.d, z2.d, z1.d
 ; CHECK-NEXT:  .LBB1_1: // %vector.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ldr z2, [x0, #1, mul vl]
@@ -183,7 +183,7 @@ exit.block:                                     ; preds = %vector.body
 define %"class.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) {
 ; CHECK-LABEL: complex_mul_v2f64_unrolled:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    cntw x8
 ; CHECK-NEXT:    mov w10, #1000 // =0x3e8
 ; CHECK-NEXT:    neg x9, x8
@@ -309,7 +309,7 @@ exit.block:                                     ; preds = %vector.body
 define dso_local %"class.std::complex" @reduction_mix(ptr %a, ptr %b, ptr noalias nocapture noundef readnone %c, [2 x double] %d.coerce, ptr nocapture noundef readonly %s, ptr nocapture noundef writeonly %outs) local_unnamed_addr #0 {
 ; CHECK-LABEL: reduction_mix:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z2.d, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    cntd x9
 ; CHECK-NEXT:    mov w11, #100 // =0x64
 ; CHECK-NEXT:    neg x10, x9

diff  --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll
index 407da6cd6002b..6615313613153 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-splat-scalable.ll
@@ -8,19 +8,19 @@ target triple = "aarch64"
 define <vscale x 4 x double> @complex_mul_const(<vscale x 4 x double> %a, <vscale x 4 x double> %b) {
 ; CHECK-LABEL: complex_mul_const:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z5.d, #0 // =0x0
-; CHECK-NEXT:    mov z4.d, #0 // =0x0
+; CHECK-NEXT:    movi v4.2d, #0000000000000000
+; CHECK-NEXT:    movi v5.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    fmov z6.d, #3.00000000
 ; CHECK-NEXT:    fmov z7.d, #11.00000000
-; CHECK-NEXT:    fcmla z5.d, p0/m, z1.d, z3.d, #0
 ; CHECK-NEXT:    fcmla z4.d, p0/m, z0.d, z2.d, #0
-; CHECK-NEXT:    fcmla z5.d, p0/m, z1.d, z3.d, #90
+; CHECK-NEXT:    fcmla z5.d, p0/m, z1.d, z3.d, #0
 ; CHECK-NEXT:    fcmla z4.d, p0/m, z0.d, z2.d, #90
-; CHECK-NEXT:    mov z2.d, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-NEXT:    fcmla z5.d, p0/m, z1.d, z3.d, #90
 ; CHECK-NEXT:    zip2 z1.d, z7.d, z6.d
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    zip1 z3.d, z7.d, z6.d
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
 ; CHECK-NEXT:    fcmla z2.d, p0/m, z5.d, z1.d, #0
 ; CHECK-NEXT:    fcmla z0.d, p0/m, z4.d, z3.d, #0
 ; CHECK-NEXT:    fcmla z2.d, p0/m, z5.d, z1.d, #90
@@ -55,25 +55,26 @@ entry:
 define <vscale x 4 x double> @complex_mul_non_const(<vscale x 4 x double> %a, <vscale x 4 x double> %b, [2 x double] %c) {
 ; CHECK-LABEL: complex_mul_non_const:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z6.d, #0 // =0x0
-; CHECK-NEXT:    mov z7.d, #0 // =0x0
+; CHECK-NEXT:    movi v7.2d, #0000000000000000
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    // kill: def $d5 killed $d5 def $z5
 ; CHECK-NEXT:    // kill: def $d4 killed $d4 def $z4
-; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    movi v6.2d, #0000000000000000
 ; CHECK-NEXT:    mov z5.d, d5
-; CHECK-NEXT:    mov z4.d, d4
-; CHECK-NEXT:    fcmla z6.d, p0/m, z0.d, z2.d, #0
+; CHECK-NEXT:    mov z24.d, d4
+; CHECK-NEXT:    movi v4.2d, #0000000000000000
 ; CHECK-NEXT:    fcmla z7.d, p0/m, z1.d, z3.d, #0
-; CHECK-NEXT:    zip2 z24.d, z4.d, z5.d
-; CHECK-NEXT:    fcmla z6.d, p0/m, z0.d, z2.d, #90
+; CHECK-NEXT:    fcmla z6.d, p0/m, z0.d, z2.d, #0
 ; CHECK-NEXT:    fcmla z7.d, p0/m, z1.d, z3.d, #90
-; CHECK-NEXT:    zip1 z2.d, z4.d, z5.d
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
+; CHECK-NEXT:    zip2 z1.d, z24.d, z5.d
+; CHECK-NEXT:    fcmla z6.d, p0/m, z0.d, z2.d, #90
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    zip1 z2.d, z24.d, z5.d
+; CHECK-NEXT:    fcmla z4.d, p0/m, z7.d, z1.d, #0
 ; CHECK-NEXT:    fcmla z0.d, p0/m, z6.d, z2.d, #0
-; CHECK-NEXT:    fcmla z1.d, p0/m, z7.d, z24.d, #0
+; CHECK-NEXT:    fcmla z4.d, p0/m, z7.d, z1.d, #90
 ; CHECK-NEXT:    fcmla z0.d, p0/m, z6.d, z2.d, #90
-; CHECK-NEXT:    fcmla z1.d, p0/m, z7.d, z24.d, #90
+; CHECK-NEXT:    mov z1.d, z4.d
 ; CHECK-NEXT:    ret
 entry:
   %c.coerce.fca.0.extract = extractvalue [2 x double] %c, 0

diff  --git a/llvm/test/CodeGen/AArch64/dag-combine-concat-vectors.ll b/llvm/test/CodeGen/AArch64/dag-combine-concat-vectors.ll
index 4cb1d5b2fb345..53126a08db86f 100644
--- a/llvm/test/CodeGen/AArch64/dag-combine-concat-vectors.ll
+++ b/llvm/test/CodeGen/AArch64/dag-combine-concat-vectors.ll
@@ -10,37 +10,22 @@ define fastcc i8 @allocno_reload_assign(ptr %p) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov d0, xzr
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    mov z16.d, #0 // =0x0
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, #0
 ; CHECK-NEXT:    uzp1 p0.s, p0.s, p0.s
 ; CHECK-NEXT:    uzp1 p0.h, p0.h, p0.h
 ; CHECK-NEXT:    uzp1 p8.b, p0.b, p0.b
 ; CHECK-NEXT:    mov z0.b, p8/z, #1 // =0x1
 ; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
-; CHECK-NEXT:    uunpklo z1.h, z0.b
-; CHECK-NEXT:    uunpkhi z0.h, z0.b
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    mvn w8, w8
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-NEXT:    whilelo p0.b, xzr, x8
-; CHECK-NEXT:    uunpklo z2.s, z1.h
-; CHECK-NEXT:    uunpkhi z3.s, z1.h
-; CHECK-NEXT:    uunpklo z5.s, z0.h
-; CHECK-NEXT:    uunpkhi z7.s, z0.h
 ; CHECK-NEXT:    punpklo p1.h, p0.b
 ; CHECK-NEXT:    punpkhi p0.h, p0.b
 ; CHECK-NEXT:    punpklo p2.h, p1.b
 ; CHECK-NEXT:    punpkhi p4.h, p1.b
-; CHECK-NEXT:    uunpklo z0.d, z2.s
-; CHECK-NEXT:    uunpkhi z1.d, z2.s
 ; CHECK-NEXT:    punpklo p6.h, p0.b
-; CHECK-NEXT:    uunpklo z2.d, z3.s
-; CHECK-NEXT:    uunpkhi z3.d, z3.s
 ; CHECK-NEXT:    punpkhi p0.h, p0.b
-; CHECK-NEXT:    uunpklo z4.d, z5.s
-; CHECK-NEXT:    uunpkhi z5.d, z5.s
-; CHECK-NEXT:    uunpklo z6.d, z7.s
-; CHECK-NEXT:    uunpkhi z7.d, z7.s
 ; CHECK-NEXT:    punpklo p1.h, p2.b
 ; CHECK-NEXT:    punpkhi p2.h, p2.b
 ; CHECK-NEXT:    punpklo p3.h, p4.b
@@ -50,14 +35,28 @@ define fastcc i8 @allocno_reload_assign(ptr %p) {
 ; CHECK-NEXT:    punpklo p7.h, p0.b
 ; CHECK-NEXT:    punpkhi p0.h, p0.b
 ; CHECK-NEXT:  .LBB0_1: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    st1b { z0.d }, p1, [z16.d]
-; CHECK-NEXT:    st1b { z1.d }, p2, [z16.d]
-; CHECK-NEXT:    st1b { z2.d }, p3, [z16.d]
-; CHECK-NEXT:    st1b { z3.d }, p4, [z16.d]
-; CHECK-NEXT:    st1b { z4.d }, p5, [z16.d]
-; CHECK-NEXT:    st1b { z5.d }, p6, [z16.d]
-; CHECK-NEXT:    st1b { z6.d }, p7, [z16.d]
-; CHECK-NEXT:    st1b { z7.d }, p0, [z16.d]
+; CHECK-NEXT:    uunpklo z1.h, z0.b
+; CHECK-NEXT:    uunpklo z2.s, z1.h
+; CHECK-NEXT:    uunpkhi z1.s, z1.h
+; CHECK-NEXT:    uunpklo z3.d, z2.s
+; CHECK-NEXT:    uunpkhi z2.d, z2.s
+; CHECK-NEXT:    st1b { z3.d }, p1, [z0.d]
+; CHECK-NEXT:    st1b { z2.d }, p2, [z0.d]
+; CHECK-NEXT:    uunpklo z2.d, z1.s
+; CHECK-NEXT:    uunpkhi z1.d, z1.s
+; CHECK-NEXT:    st1b { z2.d }, p3, [z0.d]
+; CHECK-NEXT:    uunpkhi z2.h, z0.b
+; CHECK-NEXT:    uunpklo z3.s, z2.h
+; CHECK-NEXT:    uunpkhi z2.s, z2.h
+; CHECK-NEXT:    st1b { z1.d }, p4, [z0.d]
+; CHECK-NEXT:    uunpklo z1.d, z3.s
+; CHECK-NEXT:    st1b { z1.d }, p5, [z0.d]
+; CHECK-NEXT:    uunpkhi z1.d, z3.s
+; CHECK-NEXT:    st1b { z1.d }, p6, [z0.d]
+; CHECK-NEXT:    uunpklo z1.d, z2.s
+; CHECK-NEXT:    st1b { z1.d }, p7, [z0.d]
+; CHECK-NEXT:    uunpkhi z1.d, z2.s
+; CHECK-NEXT:    st1b { z1.d }, p0, [z0.d]
 ; CHECK-NEXT:    str p8, [x0]
 ; CHECK-NEXT:    b .LBB0_1
   br label %1

diff  --git a/llvm/test/CodeGen/AArch64/load-insert-zero.ll b/llvm/test/CodeGen/AArch64/load-insert-zero.ll
index d66944e646dab..8b4cc7bcc0311 100644
--- a/llvm/test/CodeGen/AArch64/load-insert-zero.ll
+++ b/llvm/test/CodeGen/AArch64/load-insert-zero.ll
@@ -921,7 +921,7 @@ define void @predictor_4x4_neon_new(ptr nocapture noundef writeonly %0, i64 noun
 define <vscale x 8 x i8> @loadnxv8i8(ptr %p) {
 ; CHECK-LABEL: loadnxv8i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldrb w8, [x0]
 ; CHECK-NEXT:    ptrue p0.h, vl1
 ; CHECK-NEXT:    mov z0.h, p0/m, w8
@@ -944,7 +944,7 @@ define <vscale x 16 x i8> @loadnxv16i8(ptr %p) {
 define <vscale x 4 x i16> @loadnxv4i16(ptr %p) {
 ; CHECK-LABEL: loadnxv4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldrh w8, [x0]
 ; CHECK-NEXT:    ptrue p0.s, vl1
 ; CHECK-NEXT:    mov z0.s, p0/m, w8
@@ -967,7 +967,7 @@ define <vscale x 8 x i16> @loadnxv8i16(ptr %p) {
 define <vscale x 2 x i32> @loadnxv2i32(ptr %p) {
 ; CHECK-LABEL: loadnxv2i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    ptrue p0.d, vl1
 ; CHECK-NEXT:    mov z0.d, p0/m, x8
@@ -1006,7 +1006,7 @@ define <vscale x 4 x half> @loadnxv4f16(ptr %p) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldr h1, [x0]
 ; CHECK-NEXT:    mov z0.h, p0/m, h1
 ; CHECK-NEXT:    ret
@@ -1033,7 +1033,7 @@ define <vscale x 4 x bfloat> @loadnxv4bf16(ptr %p) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldr h1, [x0]
 ; CHECK-NEXT:    mov z0.h, p0/m, h1
 ; CHECK-NEXT:    ret
@@ -1060,7 +1060,7 @@ define <vscale x 2 x float> @loadnxv2f32(ptr %p) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldr s1, [x0]
 ; CHECK-NEXT:    mov z0.s, p0/m, s1
 ; CHECK-NEXT:    ret
@@ -1095,7 +1095,7 @@ define <vscale x 2 x double> @loadnxv2f64(ptr %p) {
 define <vscale x 8 x i8> @loadnxv8i8_offset(ptr %p) {
 ; CHECK-LABEL: loadnxv8i8_offset:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldrb w8, [x0, #1]
 ; CHECK-NEXT:    ptrue p0.h, vl1
 ; CHECK-NEXT:    mov z0.h, p0/m, w8
@@ -1120,7 +1120,7 @@ define <vscale x 16 x i8> @loadnxv16i8_offset(ptr %p) {
 define <vscale x 4 x i16> @loadnxv4i16_offset(ptr %p) {
 ; CHECK-LABEL: loadnxv4i16_offset:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldurh w8, [x0, #1]
 ; CHECK-NEXT:    ptrue p0.s, vl1
 ; CHECK-NEXT:    mov z0.s, p0/m, w8
@@ -1145,7 +1145,7 @@ define <vscale x 8 x i16> @loadnxv8i16_offset(ptr %p) {
 define <vscale x 2 x i32> @loadnxv2i32_offset(ptr %p) {
 ; CHECK-LABEL: loadnxv2i32_offset:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldur w8, [x0, #1]
 ; CHECK-NEXT:    ptrue p0.d, vl1
 ; CHECK-NEXT:    mov z0.d, p0/m, x8
@@ -1187,7 +1187,7 @@ define <vscale x 4 x half> @loadnxv4f16_offset(ptr %p) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldur h1, [x0, #1]
 ; CHECK-NEXT:    mov z0.h, p0/m, h1
 ; CHECK-NEXT:    ret
@@ -1216,7 +1216,7 @@ define <vscale x 4 x bfloat> @loadnxv4bf16_offset(ptr %p) {
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    cmpeq p0.s, p0/z, z0.s, z1.s
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldur h1, [x0, #1]
 ; CHECK-NEXT:    mov z0.h, p0/m, h1
 ; CHECK-NEXT:    ret
@@ -1245,7 +1245,7 @@ define <vscale x 2 x float> @loadnxv2f32_offset(ptr %p) {
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    cmpeq p0.d, p0/z, z0.d, z1.d
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ldur s1, [x0, #1]
 ; CHECK-NEXT:    mov z0.s, p0/m, s1
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sinksplat.ll b/llvm/test/CodeGen/AArch64/sinksplat.ll
index e329548f84d24..5743dc7cce580 100644
--- a/llvm/test/CodeGen/AArch64/sinksplat.ll
+++ b/llvm/test/CodeGen/AArch64/sinksplat.ll
@@ -510,7 +510,7 @@ define <vscale x 4 x float> @fmul_scalable(ptr %x, ptr %y) "target-features"="+s
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    rdvl x8, #1
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    sxtw x8, w8
 ; CHECK-NEXT:    mov w9, #1 // =0x1
 ; CHECK-NEXT:    ld1rw { z1.s }, p0/z, [x0]

diff  --git a/llvm/test/CodeGen/AArch64/sve-bf16-int-converts.ll b/llvm/test/CodeGen/AArch64/sve-bf16-int-converts.ll
index d6484c2483f49..bdfe90c8a6bb7 100644
--- a/llvm/test/CodeGen/AArch64/sve-bf16-int-converts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-bf16-int-converts.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mattr=+sve,+bf16            < %s | FileCheck %s
-; RUN: llc -mattr=+sme -force-streaming < %s | FileCheck %s
+; RUN: llc -mattr=+sve,+bf16            < %s | FileCheck %s --check-prefixes=CHECK,SVE
+; RUN: llc -mattr=+sme -force-streaming < %s | FileCheck %s --check-prefixes=CHECK,STREAMING-SVE
 
 target triple = "aarch64-unknown-linux-gnu"
 
@@ -430,11 +430,17 @@ define <vscale x 8 x i64> @fptoui_nxv8bf16_to_nxv8i64(<vscale x 8 x bfloat> %a)
 
 ; NOTE: f16(-1.875) == bf16(-1.0)
 define <vscale x 2 x bfloat> @sitofp_nxv2i1_to_nxv2bf16(<vscale x 2 x i1> %a) {
-; CHECK-LABEL: sitofp_nxv2i1_to_nxv2bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fmov z0.h, p0/m, #-1.87500000
-; CHECK-NEXT:    ret
+; SVE-LABEL: sitofp_nxv2i1_to_nxv2bf16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fmov z0.h, p0/m, #-1.87500000
+; SVE-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: sitofp_nxv2i1_to_nxv2bf16:
+; STREAMING-SVE:       // %bb.0:
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fmov z0.h, p0/m, #-1.87500000
+; STREAMING-SVE-NEXT:    ret
   %res = sitofp <vscale x 2 x i1> %a to <vscale x 2 x bfloat>
   ret <vscale x 2 x bfloat> %res
 }
@@ -486,11 +492,17 @@ define <vscale x 2 x bfloat> @sitofp_nxv2i64_to_nxv2bf16(<vscale x 2 x i64> %a)
 }
 
 define <vscale x 4 x bfloat> @sitofp_nxv4i1_to_nxv4bf16(<vscale x 4 x i1> %a) {
-; CHECK-LABEL: sitofp_nxv4i1_to_nxv4bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fmov z0.h, p0/m, #-1.87500000
-; CHECK-NEXT:    ret
+; SVE-LABEL: sitofp_nxv4i1_to_nxv4bf16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fmov z0.h, p0/m, #-1.87500000
+; SVE-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: sitofp_nxv4i1_to_nxv4bf16:
+; STREAMING-SVE:       // %bb.0:
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fmov z0.h, p0/m, #-1.87500000
+; STREAMING-SVE-NEXT:    ret
   %res = sitofp <vscale x 4 x i1> %a to <vscale x 4 x bfloat>
   ret <vscale x 4 x bfloat> %res
 }
@@ -545,11 +557,17 @@ define <vscale x 4 x bfloat> @sitofp_nxv4i64_to_nxv4bf16(<vscale x 4 x i64> %a)
 }
 
 define <vscale x 8 x bfloat> @sitofp_nxv8i1_to_nxv8bf16(<vscale x 8 x i1> %a) {
-; CHECK-LABEL: sitofp_nxv8i1_to_nxv8bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fmov z0.h, p0/m, #-1.87500000
-; CHECK-NEXT:    ret
+; SVE-LABEL: sitofp_nxv8i1_to_nxv8bf16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fmov z0.h, p0/m, #-1.87500000
+; SVE-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: sitofp_nxv8i1_to_nxv8bf16:
+; STREAMING-SVE:       // %bb.0:
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fmov z0.h, p0/m, #-1.87500000
+; STREAMING-SVE-NEXT:    ret
   %res = sitofp <vscale x 8 x i1> %a to <vscale x 8 x bfloat>
   ret <vscale x 8 x bfloat> %res
 }
@@ -624,11 +642,17 @@ define <vscale x 8 x bfloat> @sitofp_nxv8i64_to_nxv8bf16(<vscale x 8 x i64> %a)
 
 ; NOTE: f16(1.875) == bf16(1.0)
 define <vscale x 2 x bfloat> @uitofp_nxv2i1_to_nxv2bf16(<vscale x 2 x i1> %a) {
-; CHECK-LABEL: uitofp_nxv2i1_to_nxv2bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fmov z0.h, p0/m, #1.87500000
-; CHECK-NEXT:    ret
+; SVE-LABEL: uitofp_nxv2i1_to_nxv2bf16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fmov z0.h, p0/m, #1.87500000
+; SVE-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: uitofp_nxv2i1_to_nxv2bf16:
+; STREAMING-SVE:       // %bb.0:
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fmov z0.h, p0/m, #1.87500000
+; STREAMING-SVE-NEXT:    ret
   %res = uitofp <vscale x 2 x i1> %a to <vscale x 2 x bfloat>
   ret <vscale x 2 x bfloat> %res
 }
@@ -680,11 +704,17 @@ define <vscale x 2 x bfloat> @uitofp_nxv2i64_to_nxv2bf16(<vscale x 2 x i64> %a)
 }
 
 define <vscale x 4 x bfloat> @uitofp_nxv4i1_to_nxv4bf16(<vscale x 4 x i1> %a) {
-; CHECK-LABEL: uitofp_nxv4i1_to_nxv4bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fmov z0.h, p0/m, #1.87500000
-; CHECK-NEXT:    ret
+; SVE-LABEL: uitofp_nxv4i1_to_nxv4bf16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fmov z0.h, p0/m, #1.87500000
+; SVE-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: uitofp_nxv4i1_to_nxv4bf16:
+; STREAMING-SVE:       // %bb.0:
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fmov z0.h, p0/m, #1.87500000
+; STREAMING-SVE-NEXT:    ret
   %res = uitofp <vscale x 4 x i1> %a to <vscale x 4 x bfloat>
   ret <vscale x 4 x bfloat> %res
 }
@@ -739,11 +769,17 @@ define <vscale x 4 x bfloat> @uitofp_nxv4i64_to_nxv4bf16(<vscale x 4 x i64> %a)
 }
 
 define <vscale x 8 x bfloat> @uitofp_nxv8i1_to_nxv8bf16(<vscale x 8 x i1> %a) {
-; CHECK-LABEL: uitofp_nxv8i1_to_nxv8bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fmov z0.h, p0/m, #1.87500000
-; CHECK-NEXT:    ret
+; SVE-LABEL: uitofp_nxv8i1_to_nxv8bf16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fmov z0.h, p0/m, #1.87500000
+; SVE-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: uitofp_nxv8i1_to_nxv8bf16:
+; STREAMING-SVE:       // %bb.0:
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fmov z0.h, p0/m, #1.87500000
+; STREAMING-SVE-NEXT:    ret
   %res = uitofp <vscale x 8 x i1> %a to <vscale x 8 x bfloat>
   ret <vscale x 8 x bfloat> %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-fcmp.ll b/llvm/test/CodeGen/AArch64/sve-fcmp.ll
index fc5e640aed4ae..607cc92eb4505 100644
--- a/llvm/test/CodeGen/AArch64/sve-fcmp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fcmp.ll
@@ -374,7 +374,7 @@ define <vscale x 4 x i1> @one_zero(<vscale x 4 x float> %x) {
 define <vscale x 4 x i1> @ueq_zero(<vscale x 4 x float> %x) {
 ; CHECK-LABEL: ueq_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.s, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    fcmuo p1.s, p0/z, z0.s, z1.s
 ; CHECK-NEXT:    fcmeq p0.s, p0/z, z0.s, #0.0

diff  --git a/llvm/test/CodeGen/AArch64/sve-fcvt.ll b/llvm/test/CodeGen/AArch64/sve-fcvt.ll
index 8b8ddb624a040..743623b86f1b0 100644
--- a/llvm/test/CodeGen/AArch64/sve-fcvt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fcvt.ll
@@ -682,7 +682,7 @@ define <vscale x 2 x i64> @fcvtzu_d_nxv2f64(<vscale x 2 x double> %a) {
 define <vscale x 2 x half> @scvtf_h_nxv2i1(<vscale x 2 x i1> %a) {
 ; CHECK-LABEL: scvtf_h_nxv2i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.h, p0/m, #-1.00000000
 ; CHECK-NEXT:    ret
   %res = sitofp <vscale x 2 x i1> %a to <vscale x 2 x half>
@@ -722,7 +722,7 @@ define <vscale x 2 x half> @scvtf_h_nxv2i64(<vscale x 2 x i64> %a) {
 define <vscale x 3 x half> @scvtf_h_nxv3i1(<vscale x 3 x i1> %a) {
 ; CHECK-LABEL: scvtf_h_nxv3i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.h, p0/m, #-1.00000000
 ; CHECK-NEXT:    ret
   %res = sitofp <vscale x 3 x i1> %a to <vscale x 3 x half>
@@ -742,7 +742,7 @@ define <vscale x 3 x half> @scvtf_h_nxv3i16(<vscale x 3 x i16> %a) {
 define <vscale x 4 x half> @scvtf_h_nxv4i1(<vscale x 4 x i1> %a) {
 ; CHECK-LABEL: scvtf_h_nxv4i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.h, p0/m, #-1.00000000
 ; CHECK-NEXT:    ret
   %res = sitofp <vscale x 4 x i1> %a to <vscale x 4 x half>
@@ -772,7 +772,7 @@ define <vscale x 4 x half> @scvtf_h_nxv4i32(<vscale x 4 x i32> %a) {
 define <vscale x 7 x half> @scvtf_h_nxv7i1(<vscale x 7 x i1> %a) {
 ; CHECK-LABEL: scvtf_h_nxv7i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.h, p0/m, #-1.00000000
 ; CHECK-NEXT:    ret
   %res = sitofp <vscale x 7 x i1> %a to <vscale x 7 x half>
@@ -792,7 +792,7 @@ define <vscale x 7 x half> @scvtf_h_nxv7i16(<vscale x 7 x i16> %a) {
 define <vscale x 8 x half> @scvtf_h_nxv8i1(<vscale x 8 x i1> %a) {
 ; CHECK-LABEL: scvtf_h_nxv8i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.h, p0/m, #-1.00000000
 ; CHECK-NEXT:    ret
   %res = sitofp <vscale x 8 x i1> %a to <vscale x 8 x half>
@@ -812,7 +812,7 @@ define <vscale x 8 x half> @scvtf_h_nxv8i16(<vscale x 8 x i16> %a) {
 define <vscale x 2 x float> @scvtf_s_nxv2i1(<vscale x 2 x i1> %a) {
 ; CHECK-LABEL: scvtf_s_nxv2i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.s, p0/m, #-1.00000000
 ; CHECK-NEXT:    ret
   %res = sitofp <vscale x 2 x i1> %a to <vscale x 2 x float>
@@ -842,7 +842,7 @@ define <vscale x 2 x float> @scvtf_s_nxv2i64(<vscale x 2 x i64> %a) {
 define <vscale x 3 x float> @scvtf_s_nxv3i1(<vscale x 3 x i1> %a) {
 ; CHECK-LABEL: scvtf_s_nxv3i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.s, p0/m, #-1.00000000
 ; CHECK-NEXT:    ret
   %res = sitofp <vscale x 3 x i1> %a to <vscale x 3 x float>
@@ -862,7 +862,7 @@ define <vscale x 3 x float> @scvtf_s_nxv3i32(<vscale x 3 x i32> %a) {
 define <vscale x 4 x float> @scvtf_s_nxv4i1(<vscale x 4 x i1> %a) {
 ; CHECK-LABEL: scvtf_s_nxv4i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.s, p0/m, #-1.00000000
 ; CHECK-NEXT:    ret
   %res = sitofp <vscale x 4 x i1> %a to <vscale x 4 x float>
@@ -882,7 +882,7 @@ define <vscale x 4 x float> @scvtf_s_nxv4i32(<vscale x 4 x i32> %a) {
 define <vscale x 2 x double> @scvtf_d_nxv2i1(<vscale x 2 x i1> %a) {
 ; CHECK-LABEL: scvtf_d_nxv2i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.d, p0/m, #-1.00000000
 ; CHECK-NEXT:    ret
   %res = sitofp <vscale x 2 x i1> %a to <vscale x 2 x double>
@@ -914,7 +914,7 @@ define <vscale x 2 x double> @scvtf_d_nxv2i64(<vscale x 2 x i64> %a) {
 define <vscale x 2 x half> @ucvtf_h_nxv2i1(<vscale x 2 x i1> %a) {
 ; CHECK-LABEL: ucvtf_h_nxv2i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.h, p0/m, #1.00000000
 ; CHECK-NEXT:    ret
   %res = uitofp <vscale x 2 x i1> %a to <vscale x 2 x half>
@@ -954,7 +954,7 @@ define <vscale x 2 x half> @ucvtf_h_nxv2i64(<vscale x 2 x i64> %a) {
 define <vscale x 3 x half> @ucvtf_h_nxv3i1(<vscale x 3 x i1> %a) {
 ; CHECK-LABEL: ucvtf_h_nxv3i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.h, p0/m, #1.00000000
 ; CHECK-NEXT:    ret
   %res = uitofp <vscale x 3 x i1> %a to <vscale x 3 x half>
@@ -984,7 +984,7 @@ define <vscale x 3 x half> @ucvtf_h_nxv3i32(<vscale x 3 x i32> %a) {
 define <vscale x 4 x half> @ucvtf_h_nxv4i1(<vscale x 4 x i1> %a) {
 ; CHECK-LABEL: ucvtf_h_nxv4i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.h, p0/m, #1.00000000
 ; CHECK-NEXT:    ret
   %res = uitofp <vscale x 4 x i1> %a to <vscale x 4 x half>
@@ -1014,7 +1014,7 @@ define <vscale x 4 x half> @ucvtf_h_nxv4i32(<vscale x 4 x i32> %a) {
 define <vscale x 8 x half> @ucvtf_h_nxv8i1(<vscale x 8 x i1> %a) {
 ; CHECK-LABEL: ucvtf_h_nxv8i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.h, p0/m, #1.00000000
 ; CHECK-NEXT:    ret
   %res = uitofp <vscale x 8 x i1> %a to <vscale x 8 x half>
@@ -1034,7 +1034,7 @@ define <vscale x 8 x half> @ucvtf_h_nxv8i16(<vscale x 8 x i16> %a) {
 define <vscale x 2 x float> @ucvtf_s_nxv2i1(<vscale x 2 x i1> %a) {
 ; CHECK-LABEL: ucvtf_s_nxv2i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.s, p0/m, #1.00000000
 ; CHECK-NEXT:    ret
   %res = uitofp <vscale x 2 x i1> %a to <vscale x 2 x float>
@@ -1064,7 +1064,7 @@ define <vscale x 2 x float> @ucvtf_s_nxv2i64(<vscale x 2 x i64> %a) {
 define <vscale x 4 x float> @ucvtf_s_nxv4i1(<vscale x 4 x i1> %a) {
 ; CHECK-LABEL: ucvtf_s_nxv4i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.s, p0/m, #1.00000000
 ; CHECK-NEXT:    ret
   %res = uitofp <vscale x 4 x i1> %a to <vscale x 4 x float>
@@ -1084,7 +1084,7 @@ define <vscale x 4 x float> @ucvtf_s_nxv4i32(<vscale x 4 x i32> %a) {
 define <vscale x 2 x double> @ucvtf_d_nxv2i1(<vscale x 2 x i1> %a) {
 ; CHECK-LABEL: ucvtf_d_nxv2i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    fmov z0.d, p0/m, #1.00000000
 ; CHECK-NEXT:    ret
   %res = uitofp <vscale x 2 x i1> %a to <vscale x 2 x double>

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
index 0c47e7e14183a..b24a9513b83e3 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
@@ -28,8 +28,8 @@ define void @crash_when_lowering_extract_shuffle(ptr %dst, i1 %cond) vscale_rang
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    tbnz w1, #0, .LBB1_2
 ; CHECK-NEXT:  // %bb.1: // %vector.body
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
-; CHECK-NEXT:    mov z1.b, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    umov w8, v0.b[8]
 ; CHECK-NEXT:    mov v1.b[1], v0.b[1]

diff  --git a/llvm/test/CodeGen/AArch64/sve-fp-combine.ll b/llvm/test/CodeGen/AArch64/sve-fp-combine.ll
index ddede0feca16a..53aba04028d62 100644
--- a/llvm/test/CodeGen/AArch64/sve-fp-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fp-combine.ll
@@ -956,9 +956,9 @@ define <vscale x 2 x double> @fsub_d_sel_negzero(<vscale x 2 x double> %a, <vsca
 define <vscale x 8 x half> @fadd_sel_fmul_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: fadd_sel_fmul_h:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
 ; CHECK-NEXT:    fmul z1.h, z1.h, z2.h
-; CHECK-NEXT:    mov z2.h, #0 // =0x0
-; CHECK-NEXT:    sel z1.h, p0, z1.h, z2.h
+; CHECK-NEXT:    sel z1.h, p0, z1.h, z3.h
 ; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %fmul = fmul <vscale x 8 x half> %b, %c
@@ -970,9 +970,9 @@ define <vscale x 8 x half> @fadd_sel_fmul_h(<vscale x 8 x half> %a, <vscale x 8
 define <vscale x 4 x float> @fadd_sel_fmul_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: fadd_sel_fmul_s:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
 ; CHECK-NEXT:    fmul z1.s, z1.s, z2.s
-; CHECK-NEXT:    mov z2.s, #0 // =0x0
-; CHECK-NEXT:    sel z1.s, p0, z1.s, z2.s
+; CHECK-NEXT:    sel z1.s, p0, z1.s, z3.s
 ; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %fmul = fmul <vscale x 4 x float> %b, %c
@@ -984,9 +984,9 @@ define <vscale x 4 x float> @fadd_sel_fmul_s(<vscale x 4 x float> %a, <vscale x
 define <vscale x 2 x double> @fadd_sel_fmul_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: fadd_sel_fmul_d:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
 ; CHECK-NEXT:    fmul z1.d, z1.d, z2.d
-; CHECK-NEXT:    mov z2.d, #0 // =0x0
-; CHECK-NEXT:    sel z1.d, p0, z1.d, z2.d
+; CHECK-NEXT:    sel z1.d, p0, z1.d, z3.d
 ; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %fmul = fmul <vscale x 2 x double> %b, %c

diff  --git a/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll b/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll
index 73bbee094827e..ebec275c92c52 100644
--- a/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll
+++ b/llvm/test/CodeGen/AArch64/sve-implicit-zero-filling.ll
@@ -180,7 +180,7 @@ define <vscale x 2 x i64> @zero_fill_non_zero_index(<vscale x 2 x i1> %pg, <vsca
 ; CHECK-NEXT:    uminv d3, p0, z0.d
 ; CHECK-NEXT:    mov z2.d, x8
 ; CHECK-NEXT:    ptrue p1.d
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    cmpeq p0.d, p1/z, z1.d, z2.d
 ; CHECK-NEXT:    fmov x8, d3
 ; CHECK-NEXT:    mov z0.d, p0/m, x8
@@ -196,7 +196,7 @@ define <vscale x 4 x i64> @zero_fill_type_mismatch(<vscale x 2 x i1> %pg, <vscal
 ; CHECK-LABEL: zero_fill_type_mismatch:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uminv d0, p0, z0.d
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    ret
   %t1 = call i64 @llvm.aarch64.sve.uminv.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
   %t2 = insertelement <vscale x 4 x i64> zeroinitializer, i64 %t1, i64 0
@@ -211,7 +211,7 @@ define <vscale x 2 x i64> @zero_fill_no_zero_upper_lanes(<vscale x 2 x i1> %pg,
 ; CHECK-LABEL: zero_fill_no_zero_upper_lanes:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umin z0.d, p0/m, z0.d, z0.d
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.d, vl1
 ; CHECK-NEXT:    fmov x8, d0
 ; CHECK-NEXT:    mov z1.d, p0/m, x8

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-log.ll b/llvm/test/CodeGen/AArch64/sve-int-log.ll
index c45d0f437760f..cc5e5e5ddf86c 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-log.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-log.ll
@@ -40,7 +40,7 @@ define <vscale x 16 x i8> @and_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 define <vscale x 16 x i8> @and_b_zero(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: and_b_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ret
   %res = and <vscale x 16 x i8> %a, zeroinitializer
   ret <vscale x 16 x i8> %res

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-int-reduce.ll
index 6ec18477fe1a0..be936f0fd6d4a 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-reduce.ll
@@ -411,12 +411,12 @@ declare i8 @llvm.vector.reduce.add.nxv12i8(<vscale x 12 x i8>)
 define i8 @uaddv_nxv12i8(<vscale x 12 x i8> %a) {
 ; CHECK-LABEL: uaddv_nxv12i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uunpkhi z1.h, z0.b
-; CHECK-NEXT:    mov z2.s, #0 // =0x0
+; CHECK-NEXT:    uunpkhi z2.h, z0.b
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    uunpklo z0.h, z0.b
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    uunpklo z1.s, z1.h
-; CHECK-NEXT:    uzp1 z1.h, z1.h, z2.h
+; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    uzp1 z1.h, z2.h, z1.h
 ; CHECK-NEXT:    uzp1 z0.b, z0.b, z1.b
 ; CHECK-NEXT:    uaddv d0, p0, z0.b
 ; CHECK-NEXT:    fmov w0, s0
@@ -430,15 +430,15 @@ declare i8 @llvm.vector.reduce.umax.nxv14i8(<vscale x 14 x i8>)
 define i8 @umax_nxv14i8(<vscale x 14 x i8> %a) {
 ; CHECK-LABEL: umax_nxv14i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    uunpkhi z1.h, z0.b
-; CHECK-NEXT:    mov z3.d, #0 // =0x0
+; CHECK-NEXT:    uunpkhi z2.h, z0.b
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    uunpklo z0.h, z0.b
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    uunpkhi z2.s, z1.h
-; CHECK-NEXT:    uunpklo z1.s, z1.h
-; CHECK-NEXT:    uunpklo z2.d, z2.s
-; CHECK-NEXT:    uzp1 z2.s, z2.s, z3.s
-; CHECK-NEXT:    uzp1 z1.h, z1.h, z2.h
+; CHECK-NEXT:    uunpkhi z3.s, z2.h
+; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    uzp1 z1.s, z3.s, z1.s
+; CHECK-NEXT:    uzp1 z1.h, z2.h, z1.h
 ; CHECK-NEXT:    uzp1 z0.b, z0.b, z1.b
 ; CHECK-NEXT:    umaxv b0, p0, z0.b
 ; CHECK-NEXT:    fmov w0, s0

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll
index 73a2292b183ba..36761a344018e 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll
@@ -1352,7 +1352,7 @@ define <vscale x 16 x i8> @asr_i8_all_active(<vscale x 16 x i8> %a) {
 define <vscale x 16 x i8> @asr_i8_too_small(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
 ; CHECK-LABEL: asr_i8_too_small:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.b, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    asr z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT:    ret
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg,
@@ -1388,7 +1388,7 @@ define <vscale x 8 x i16> @asr_i16_all_active(<vscale x 8 x i16> %a) {
 define <vscale x 8 x i16> @asr_i16_too_small(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
 ; CHECK-LABEL: asr_i16_too_small:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.h, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg,
@@ -1424,7 +1424,7 @@ define <vscale x 4 x i32> @asr_i32_all_active(<vscale x 4 x i32> %a) {
 define <vscale x 4 x i32> @asr_i32_too_small(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
 ; CHECK-LABEL: asr_i32_too_small:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.s, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    asr z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %pg,
@@ -1460,7 +1460,7 @@ define <vscale x 2 x i64> @asr_i64_all_active(<vscale x 2 x i64> %a) {
 define <vscale x 2 x i64> @asr_i64_too_small(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
 ; CHECK-LABEL: asr_i64_too_small:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    asr z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %pg,
@@ -1688,7 +1688,7 @@ define <vscale x 16 x i8> @lsr_i8_all_active(<vscale x 16 x i8> %a) {
 define <vscale x 16 x i8> @lsr_i8_too_small(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
 ; CHECK-LABEL: lsr_i8_too_small:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.b, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    lsr z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT:    ret
   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> %pg,
@@ -1724,7 +1724,7 @@ define <vscale x 8 x i16> @lsr_i16_all_active(<vscale x 8 x i16> %a) {
 define <vscale x 8 x i16> @lsr_i16_too_small(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
 ; CHECK-LABEL: lsr_i16_too_small:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.h, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> %pg,
@@ -1760,7 +1760,7 @@ define <vscale x 4 x i32> @lsr_i32_all_active(<vscale x 4 x i32> %a) {
 define <vscale x 4 x i32> @lsr_i32_too_small(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
 ; CHECK-LABEL: lsr_i32_too_small:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.s, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> %pg,
@@ -1796,7 +1796,7 @@ define <vscale x 2 x i64> @lsr_i64_all_active(<vscale x 2 x i64> %a) {
 define <vscale x 2 x i64> @lsr_i64_too_small(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
 ; CHECK-LABEL: lsr_i64_too_small:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    lsr z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> %pg,

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-merging.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-merging.ll
index ed820e0fc8a25..19facd2ef8993 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-merging.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-merging.ll
@@ -397,7 +397,7 @@ define <vscale x 2 x i64> @bic_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64
 define <vscale x 2 x i64> @bic_i64_zero_no_unique_reg(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
 ; CHECK-LABEL: bic_i64_zero_no_unique_reg:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    mov z1.d, p0/m, z0.d
 ; CHECK-NEXT:    movprfx z0.d, p0/z, z0.d
 ; CHECK-NEXT:    bic z0.d, p0/m, z0.d, z1.d
@@ -414,7 +414,7 @@ define <vscale x 2 x i64> @bic_i64_zero_no_unique_reg(<vscale x 2 x i1> %pg, <vs
 define <vscale x 2 x i64> @bic_i64_zero_no_comm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: bic_i64_zero_no_comm:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.d, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    mov z2.d, p0/m, z0.d
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    bic z0.d, p0/m, z0.d, z2.d

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-scalar-to-vec.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-scalar-to-vec.ll
index 238f188f93815..53106b330efce 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-scalar-to-vec.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-scalar-to-vec.ll
@@ -96,7 +96,7 @@ define <vscale x 2 x double> @dup_f64(<vscale x 2 x double> %a, <vscale x 2 x i1
 define <vscale x 8 x bfloat> @test_svdup_n_bf16_z(<vscale x 8 x i1> %pg, bfloat %op) #0 {
 ; CHECK-LABEL: test_svdup_n_bf16_z:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.h, #0 // =0x0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    mov z1.h, p0/m, h0
 ; CHECK-NEXT:    mov z0.d, z1.d
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-merging.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-merging.ll
index 2324e3074a420..3a1c6f6731b08 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-merging.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-shifts-merging.ll
@@ -60,7 +60,7 @@ define <vscale x 2 x i64> @asr_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64
 define <vscale x 16 x i8> @asr_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: asr_wide_i8_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.b, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.b, p0, z0.b, z2.b
 ; CHECK-NEXT:    asr z0.b, p0/m, z0.b, z1.d
 ; CHECK-NEXT:    ret
@@ -74,7 +74,7 @@ define <vscale x 16 x i8> @asr_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16
 define <vscale x 8 x i16> @asr_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: asr_wide_i16_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z1.d
 ; CHECK-NEXT:    ret
@@ -88,7 +88,7 @@ define <vscale x 8 x i16> @asr_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8
 define <vscale x 4 x i32> @asr_wide_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: asr_wide_i32_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.s, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.s, p0, z0.s, z2.s
 ; CHECK-NEXT:    asr z0.s, p0/m, z0.s, z1.d
 ; CHECK-NEXT:    ret
@@ -214,7 +214,7 @@ define <vscale x 2 x i64> @lsl_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64
 define <vscale x 16 x i8> @lsl_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: lsl_wide_i8_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.b, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.b, p0, z0.b, z2.b
 ; CHECK-NEXT:    lsl z0.b, p0/m, z0.b, z1.d
 ; CHECK-NEXT:    ret
@@ -228,7 +228,7 @@ define <vscale x 16 x i8> @lsl_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16
 define <vscale x 8 x i16> @lsl_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: lsl_wide_i16_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z1.d
 ; CHECK-NEXT:    ret
@@ -242,7 +242,7 @@ define <vscale x 8 x i16> @lsl_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8
 define <vscale x 4 x i32> @lsl_wide_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: lsl_wide_i32_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.s, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.s, p0, z0.s, z2.s
 ; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z1.d
 ; CHECK-NEXT:    ret
@@ -312,7 +312,7 @@ define <vscale x 2 x i64> @lsr_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64
 define <vscale x 16 x i8> @lsr_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: lsr_wide_i8_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.b, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.b, p0, z0.b, z2.b
 ; CHECK-NEXT:    lsr z0.b, p0/m, z0.b, z1.d
 ; CHECK-NEXT:    ret
@@ -326,7 +326,7 @@ define <vscale x 16 x i8> @lsr_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16
 define <vscale x 8 x i16> @lsr_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: lsr_wide_i16_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z1.d
 ; CHECK-NEXT:    ret
@@ -340,7 +340,7 @@ define <vscale x 8 x i16> @lsr_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8
 define <vscale x 4 x i32> @lsr_wide_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: lsr_wide_i32_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.s, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.s, p0, z0.s, z2.s
 ; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, z1.d
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-knownbits.ll b/llvm/test/CodeGen/AArch64/sve-knownbits.ll
index c22d18c7e2ede..7d6ed08173bf4 100644
--- a/llvm/test/CodeGen/AArch64/sve-knownbits.ll
+++ b/llvm/test/CodeGen/AArch64/sve-knownbits.ll
@@ -4,7 +4,7 @@
 define <vscale x 8 x i16> @test_knownzero(<vscale x 8 x i16> %x) {
 ; CHECK-LABEL: test_knownzero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ret
   %a1 = shl <vscale x 8 x i16> %x, splat (i16 8)
   %a2 = and <vscale x 8 x i16> %a1, splat (i16 8)

diff  --git a/llvm/test/CodeGen/AArch64/sve-ld1r.ll b/llvm/test/CodeGen/AArch64/sve-ld1r.ll
index 43391c16e7cce..a6d7d17fd9eef 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld1r.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld1r.ll
@@ -1250,7 +1250,7 @@ define <vscale x 8 x half> @dup_ld1rh_half_passthruzero_nxv8f16(<vscale x 8 x i1
 ;
 ; CHECK-NO-LD1R-LABEL: dup_ld1rh_half_passthruzero_nxv8f16:
 ; CHECK-NO-LD1R:       // %bb.0:
-; CHECK-NO-LD1R-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NO-LD1R-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NO-LD1R-NEXT:    ldr h1, [x0]
 ; CHECK-NO-LD1R-NEXT:    mov z0.h, p0/m, h1
 ; CHECK-NO-LD1R-NEXT:    ret
@@ -1266,7 +1266,7 @@ define <vscale x 4 x float> @dup_ld1rs_float_passthruzero_nxv4f32(<vscale x 4 x
 ;
 ; CHECK-NO-LD1R-LABEL: dup_ld1rs_float_passthruzero_nxv4f32:
 ; CHECK-NO-LD1R:       // %bb.0:
-; CHECK-NO-LD1R-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NO-LD1R-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NO-LD1R-NEXT:    ldr s1, [x0]
 ; CHECK-NO-LD1R-NEXT:    mov z0.s, p0/m, s1
 ; CHECK-NO-LD1R-NEXT:    ret
@@ -1282,7 +1282,7 @@ define <vscale x 2 x double> @dup_ld1rd_double_passthruzero_nxv2f64(<vscale x 2
 ;
 ; CHECK-NO-LD1R-LABEL: dup_ld1rd_double_passthruzero_nxv2f64:
 ; CHECK-NO-LD1R:       // %bb.0:
-; CHECK-NO-LD1R-NEXT:    mov z0.d, #0 // =0x0
+; CHECK-NO-LD1R-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NO-LD1R-NEXT:    ldr d1, [x0]
 ; CHECK-NO-LD1R-NEXT:    mov z0.d, p0/m, d1
 ; CHECK-NO-LD1R-NEXT:    ret
@@ -1298,7 +1298,7 @@ define <vscale x 4 x half> @dup_ld1rh_half_passthruzero_nxv4f16(<vscale x 4 x i1
 ;
 ; CHECK-NO-LD1R-LABEL: dup_ld1rh_half_passthruzero_nxv4f16:
 ; CHECK-NO-LD1R:       // %bb.0:
-; CHECK-NO-LD1R-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NO-LD1R-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NO-LD1R-NEXT:    ldr h1, [x0]
 ; CHECK-NO-LD1R-NEXT:    mov z0.h, p0/m, h1
 ; CHECK-NO-LD1R-NEXT:    ret
@@ -1314,7 +1314,7 @@ define <vscale x 2 x half> @dup_ld1rh_half_passthruzero_nxv2f16(<vscale x 2 x i1
 ;
 ; CHECK-NO-LD1R-LABEL: dup_ld1rh_half_passthruzero_nxv2f16:
 ; CHECK-NO-LD1R:       // %bb.0:
-; CHECK-NO-LD1R-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NO-LD1R-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NO-LD1R-NEXT:    ldr h1, [x0]
 ; CHECK-NO-LD1R-NEXT:    mov z0.h, p0/m, h1
 ; CHECK-NO-LD1R-NEXT:    ret
@@ -1330,7 +1330,7 @@ define <vscale x 2 x float> @dup_ld1rs_float_passthruzero_nxv2f32(<vscale x 2 x
 ;
 ; CHECK-NO-LD1R-LABEL: dup_ld1rs_float_passthruzero_nxv2f32:
 ; CHECK-NO-LD1R:       // %bb.0:
-; CHECK-NO-LD1R-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NO-LD1R-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NO-LD1R-NEXT:    ldr s1, [x0]
 ; CHECK-NO-LD1R-NEXT:    mov z0.s, p0/m, s1
 ; CHECK-NO-LD1R-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll b/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll
index da541158276d3..8f2cbbdb55636 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll
@@ -76,7 +76,7 @@ define void @masked_scatter_nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x p
 define void @masked_scatter_splat_constant_pointer (<vscale x 4 x i1> %pg) {
 ; CHECK-LABEL: masked_scatter_splat_constant_pointer:
 ; CHECK:       // %bb.0: // %vector.body
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    punpklo p1.h, p0.b
 ; CHECK-NEXT:    punpkhi p0.h, p0.b
 ; CHECK-NEXT:    st1w { z0.d }, p1, [z0.d]

diff  --git a/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll b/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll
index d7bab3297cf29..8d3b12e359f3f 100644
--- a/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll
+++ b/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll
@@ -264,7 +264,7 @@ entry:
 define <vscale x 4 x i64> @udot_8to64(<vscale x 4 x i64> %acc, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-LABEL: udot_8to64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z4.s, #0 // =0x0
+; CHECK-NEXT:    movi v4.2d, #0000000000000000
 ; CHECK-NEXT:    udot z4.s, z2.b, z3.b
 ; CHECK-NEXT:    sunpklo z2.d, z4.s
 ; CHECK-NEXT:    sunpkhi z3.d, z4.s
@@ -327,7 +327,7 @@ entry:
 define <vscale x 4 x i64> @sdot_8to64(<vscale x 4 x i64> %acc, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b){
 ; CHECK-LABEL: sdot_8to64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z4.s, #0 // =0x0
+; CHECK-NEXT:    movi v4.2d, #0000000000000000
 ; CHECK-NEXT:    sdot z4.s, z2.b, z3.b
 ; CHECK-NEXT:    sunpklo z2.d, z4.s
 ; CHECK-NEXT:    sunpkhi z3.d, z4.s
@@ -390,7 +390,7 @@ entry:
 define <vscale x 4 x i64> @usdot_8to64(<vscale x 4 x i64> %acc, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b){
 ; CHECK-I8MM-LABEL: usdot_8to64:
 ; CHECK-I8MM:       // %bb.0: // %entry
-; CHECK-I8MM-NEXT:    mov z4.s, #0 // =0x0
+; CHECK-I8MM-NEXT:    movi v4.2d, #0000000000000000
 ; CHECK-I8MM-NEXT:    usdot z4.s, z2.b, z3.b
 ; CHECK-I8MM-NEXT:    sunpklo z2.d, z4.s
 ; CHECK-I8MM-NEXT:    sunpkhi z3.d, z4.s
@@ -523,7 +523,7 @@ entry:
 define <vscale x 4 x i64> @sudot_8to64(<vscale x 4 x i64> %acc, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
 ; CHECK-I8MM-LABEL: sudot_8to64:
 ; CHECK-I8MM:       // %bb.0: // %entry
-; CHECK-I8MM-NEXT:    mov z4.s, #0 // =0x0
+; CHECK-I8MM-NEXT:    movi v4.2d, #0000000000000000
 ; CHECK-I8MM-NEXT:    usdot z4.s, z3.b, z2.b
 ; CHECK-I8MM-NEXT:    sunpklo z2.d, z4.s
 ; CHECK-I8MM-NEXT:    sunpkhi z3.d, z4.s
@@ -758,11 +758,11 @@ entry:
 define <vscale x 4 x i64> @udot_no_bin_op_8to64(<vscale x 4 x i64> %acc, <vscale x 16 x i8> %a){
 ; CHECK-LABEL: udot_no_bin_op_8to64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z3.b, #1 // =0x1
-; CHECK-NEXT:    mov z4.s, #0 // =0x0
-; CHECK-NEXT:    udot z4.s, z2.b, z3.b
-; CHECK-NEXT:    sunpklo z2.d, z4.s
-; CHECK-NEXT:    sunpkhi z3.d, z4.s
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    mov z4.b, #1 // =0x1
+; CHECK-NEXT:    udot z3.s, z2.b, z4.b
+; CHECK-NEXT:    sunpklo z2.d, z3.s
+; CHECK-NEXT:    sunpkhi z3.d, z3.s
 ; CHECK-NEXT:    add z0.d, z0.d, z2.d
 ; CHECK-NEXT:    add z1.d, z1.d, z3.d
 ; CHECK-NEXT:    ret
@@ -800,11 +800,11 @@ define <vscale x 4 x i64> @udot_no_bin_op_8to64(<vscale x 4 x i64> %acc, <vscale
 define <vscale x 4 x i64> @sdot_no_bin_op_8to64(<vscale x 4 x i64> %acc, <vscale x 16 x i8> %a){
 ; CHECK-LABEL: sdot_no_bin_op_8to64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z3.b, #1 // =0x1
-; CHECK-NEXT:    mov z4.s, #0 // =0x0
-; CHECK-NEXT:    sdot z4.s, z2.b, z3.b
-; CHECK-NEXT:    sunpklo z2.d, z4.s
-; CHECK-NEXT:    sunpkhi z3.d, z4.s
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
+; CHECK-NEXT:    mov z4.b, #1 // =0x1
+; CHECK-NEXT:    sdot z3.s, z2.b, z4.b
+; CHECK-NEXT:    sunpklo z2.d, z3.s
+; CHECK-NEXT:    sunpkhi z3.d, z3.s
 ; CHECK-NEXT:    add z0.d, z0.d, z2.d
 ; CHECK-NEXT:    add z1.d, z1.d, z3.d
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-pr92779.ll b/llvm/test/CodeGen/AArch64/sve-pr92779.ll
index 1bb7801b6a047..3f34d79b3bb49 100644
--- a/llvm/test/CodeGen/AArch64/sve-pr92779.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pr92779.ll
@@ -4,8 +4,8 @@
 define void @main(ptr %0) {
 ; CHECK-LABEL: main:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.d, vl1
 ; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
 ; CHECK-NEXT:    uzp1 v0.2s, v1.2s, v0.2s

diff  --git a/llvm/test/CodeGen/AArch64/sve-split-fcvt.ll b/llvm/test/CodeGen/AArch64/sve-split-fcvt.ll
index 2fe09f8ac7c5d..2378b226c05e3 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-fcvt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-fcvt.ll
@@ -331,8 +331,8 @@ define <vscale x 4 x double> @scvtf_d_nxv4i32(<vscale x 4 x i32> %a) {
 define <vscale x 4 x double> @scvtf_d_nxv4i1(<vscale x 4 x i1> %a) {
 ; CHECK-LABEL: scvtf_d_nxv4i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    punpklo p1.h, p0.b
 ; CHECK-NEXT:    punpkhi p0.h, p0.b
 ; CHECK-NEXT:    fmov z0.d, p1/m, #-1.00000000
@@ -392,8 +392,8 @@ define <vscale x 4 x double> @ucvtf_d_nxv4i32(<vscale x 4 x i32> %a) {
 define <vscale x 4 x double> @ucvtf_d_nxv4i1(<vscale x 4 x i1> %a) {
 ; CHECK-LABEL: ucvtf_d_nxv4i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    punpklo p1.h, p0.b
 ; CHECK-NEXT:    punpkhi p0.h, p0.b
 ; CHECK-NEXT:    fmov z0.d, p1/m, #1.00000000

diff  --git a/llvm/test/CodeGen/AArch64/sve-vector-splat.ll b/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
index 4a75242848343..5cca5539048b5 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
@@ -365,7 +365,7 @@ define <vscale x 2 x double> @splat_nxv2f64(double %val) {
 define <vscale x 8 x bfloat> @splat_nxv8bf16_zero() #0 {
 ; CHECK-LABEL: splat_nxv8bf16_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ret
   ret <vscale x 8 x bfloat> zeroinitializer
 }
@@ -373,7 +373,7 @@ define <vscale x 8 x bfloat> @splat_nxv8bf16_zero() #0 {
 define <vscale x 4 x bfloat> @splat_nxv4bf16_zero() #0 {
 ; CHECK-LABEL: splat_nxv4bf16_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ret
   ret <vscale x 4 x bfloat> zeroinitializer
 }
@@ -381,7 +381,7 @@ define <vscale x 4 x bfloat> @splat_nxv4bf16_zero() #0 {
 define <vscale x 2 x bfloat> @splat_nxv2bf16_zero() #0 {
 ; CHECK-LABEL: splat_nxv2bf16_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ret
   ret <vscale x 2 x bfloat> zeroinitializer
 }
@@ -389,7 +389,7 @@ define <vscale x 2 x bfloat> @splat_nxv2bf16_zero() #0 {
 define <vscale x 8 x half> @splat_nxv8f16_zero() {
 ; CHECK-LABEL: splat_nxv8f16_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ret
   ret <vscale x 8 x half> zeroinitializer
 }
@@ -397,7 +397,7 @@ define <vscale x 8 x half> @splat_nxv8f16_zero() {
 define <vscale x 4 x half> @splat_nxv4f16_zero() {
 ; CHECK-LABEL: splat_nxv4f16_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ret
   ret <vscale x 4 x half> zeroinitializer
 }
@@ -405,7 +405,7 @@ define <vscale x 4 x half> @splat_nxv4f16_zero() {
 define <vscale x 2 x half> @splat_nxv2f16_zero() {
 ; CHECK-LABEL: splat_nxv2f16_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ret
   ret <vscale x 2 x half> zeroinitializer
 }
@@ -413,7 +413,7 @@ define <vscale x 2 x half> @splat_nxv2f16_zero() {
 define <vscale x 4 x float> @splat_nxv4f32_zero() {
 ; CHECK-LABEL: splat_nxv4f32_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ret
   ret <vscale x 4 x float> zeroinitializer
 }
@@ -421,7 +421,7 @@ define <vscale x 4 x float> @splat_nxv4f32_zero() {
 define <vscale x 2 x float> @splat_nxv2f32_zero() {
 ; CHECK-LABEL: splat_nxv2f32_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ret
   ret <vscale x 2 x float> zeroinitializer
 }
@@ -429,7 +429,7 @@ define <vscale x 2 x float> @splat_nxv2f32_zero() {
 define <vscale x 2 x double> @splat_nxv2f64_zero() {
 ; CHECK-LABEL: splat_nxv2f64_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ret
   ret <vscale x 2 x double> zeroinitializer
 }
@@ -512,7 +512,7 @@ define <vscale x 2 x bfloat> @splat_nxv2bf16_imm() {
 define <vscale x 4 x i32> @splat_nxv4i32_fold(<vscale x 4 x i32> %x) {
 ; CHECK-LABEL: splat_nxv4i32_fold:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ret
   %r = sub <vscale x 4 x i32> %x, %x
   ret <vscale x 4 x i32> %r
@@ -522,7 +522,7 @@ define <vscale x 4 x i32> @splat_nxv4i32_fold(<vscale x 4 x i32> %x) {
 define <vscale x 4 x float> @splat_nxv4f32_fold(<vscale x 4 x float> %x) {
 ; CHECK-LABEL: splat_nxv4f32_fold:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ret
   %r = fsub nnan <vscale x 4 x float> %x, %x
   ret <vscale x 4 x float> %r

diff  --git a/llvm/test/CodeGen/AArch64/sve-vselect-imm.ll b/llvm/test/CodeGen/AArch64/sve-vselect-imm.ll
index f16b6a4d50bca..6b5b3d6d436cb 100644
--- a/llvm/test/CodeGen/AArch64/sve-vselect-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vselect-imm.ll
@@ -107,9 +107,9 @@ define <vscale x 2 x i64> @sel_64_shifted(<vscale x 2 x i1> %p) {
 define <vscale x 8 x i16> @sel_16_illegal_wrong_extension(<vscale x 8 x i1> %p) {
 ; CHECK-LABEL: sel_16_illegal_wrong_extension:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #128 // =0x80
-; CHECK-NEXT:    mov z1.h, #0 // =0x0
-; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    mov z1.h, #128 // =0x80
+; CHECK-NEXT:    mov z0.h, p0/m, z1.h
 ; CHECK-NEXT:    ret
   %sel = select <vscale x 8 x i1> %p, <vscale x 8 x i16> splat (i16 128), <vscale x 8 x i16> zeroinitializer
   ret <vscale x 8 x i16> %sel
@@ -118,9 +118,9 @@ define <vscale x 8 x i16> @sel_16_illegal_wrong_extension(<vscale x 8 x i1> %p)
 define <vscale x 4 x i32> @sel_32_illegal_wrong_extension(<vscale x 4 x i1> %p) {
 ; CHECK-LABEL: sel_32_illegal_wrong_extension:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #128 // =0x80
-; CHECK-NEXT:    mov z1.s, #0 // =0x0
-; CHECK-NEXT:    sel z0.s, p0, z0.s, z1.s
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    mov z1.s, #128 // =0x80
+; CHECK-NEXT:    mov z0.s, p0/m, z1.s
 ; CHECK-NEXT:    ret
   %sel = select <vscale x 4 x i1> %p, <vscale x 4 x i32> splat (i32 128), <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x i32> %sel
@@ -129,9 +129,9 @@ define <vscale x 4 x i32> @sel_32_illegal_wrong_extension(<vscale x 4 x i1> %p)
 define <vscale x 2 x i64> @sel_64_illegal_wrong_extension(<vscale x 2 x i1> %p) {
 ; CHECK-LABEL: sel_64_illegal_wrong_extension:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.d, #128 // =0x80
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
-; CHECK-NEXT:    sel z0.d, p0, z0.d, z1.d
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    mov z1.d, #128 // =0x80
+; CHECK-NEXT:    mov z0.d, p0/m, z1.d
 ; CHECK-NEXT:    ret
   %sel = select <vscale x 2 x i1> %p, <vscale x 2 x i64> splat (i64 128), <vscale x 2 x i64> zeroinitializer
   ret <vscale x 2 x i64> %sel
@@ -140,10 +140,10 @@ define <vscale x 2 x i64> @sel_64_illegal_wrong_extension(<vscale x 2 x i1> %p)
 define <vscale x 8 x i16> @sel_16_illegal_shifted(<vscale x 8 x i1> %p) {
 ; CHECK-LABEL: sel_16_illegal_shifted:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    mov w8, #513 // =0x201
-; CHECK-NEXT:    mov z1.h, #0 // =0x0
-; CHECK-NEXT:    mov z0.h, w8
-; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
+; CHECK-NEXT:    mov z1.h, w8
+; CHECK-NEXT:    mov z0.h, p0/m, z1.h
 ; CHECK-NEXT:    ret
   %sel = select <vscale x 8 x i1> %p, <vscale x 8 x i16> splat (i16 513), <vscale x 8 x i16> zeroinitializer
   ret <vscale x 8 x i16> %sel
@@ -152,10 +152,10 @@ define <vscale x 8 x i16> @sel_16_illegal_shifted(<vscale x 8 x i1> %p) {
 define <vscale x 4 x i32> @sel_32_illegal_shifted(<vscale x 4 x i1> %p) {
 ; CHECK-LABEL: sel_32_illegal_shifted:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    mov w8, #513 // =0x201
-; CHECK-NEXT:    mov z1.s, #0 // =0x0
-; CHECK-NEXT:    mov z0.s, w8
-; CHECK-NEXT:    sel z0.s, p0, z0.s, z1.s
+; CHECK-NEXT:    mov z1.s, w8
+; CHECK-NEXT:    mov z0.s, p0/m, z1.s
 ; CHECK-NEXT:    ret
   %sel = select <vscale x 4 x i1> %p, <vscale x 4 x i32> splat (i32 513), <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x i32> %sel
@@ -164,10 +164,10 @@ define <vscale x 4 x i32> @sel_32_illegal_shifted(<vscale x 4 x i1> %p) {
 define <vscale x 2 x i64> @sel_64_illegal_shifted(<vscale x 2 x i1> %p) {
 ; CHECK-LABEL: sel_64_illegal_shifted:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    mov w8, #513 // =0x201
-; CHECK-NEXT:    mov z1.d, #0 // =0x0
-; CHECK-NEXT:    mov z0.d, x8
-; CHECK-NEXT:    sel z0.d, p0, z0.d, z1.d
+; CHECK-NEXT:    mov z1.d, x8
+; CHECK-NEXT:    mov z0.d, p0/m, z1.d
 ; CHECK-NEXT:    ret
   %sel = select <vscale x 2 x i1> %p, <vscale x 2 x i64> splat (i64 513), <vscale x 2 x i64> zeroinitializer
   ret <vscale x 2 x i64> %sel

diff  --git a/llvm/test/CodeGen/AArch64/sve-zeroinit.ll b/llvm/test/CodeGen/AArch64/sve-zeroinit.ll
index eab39d0ef4025..3d40fd920cfee 100644
--- a/llvm/test/CodeGen/AArch64/sve-zeroinit.ll
+++ b/llvm/test/CodeGen/AArch64/sve-zeroinit.ll
@@ -1,95 +1,145 @@
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mattr=+sve < %s                  | FileCheck %s --check-prefixes=CHECK,SVE
+; RUN: llc -mattr=+sme -force-streaming < %s | FileCheck %s --check-prefixes=CHECK,STREAMING-SVE
 
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-none-linux-gnu"
 
 define <vscale x 2 x i64> @test_zeroinit_2xi64() {
-; CHECK-LABEL: test_zeroinit_2xi64
-; CHECK:       mov z0.d, #0
-; CHECK-NEXT:  ret
+; SVE-LABEL: test_zeroinit_2xi64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_zeroinit_2xi64:
+; STREAMING-SVE:       // %bb.0:
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    ret
   ret <vscale x 2 x i64> zeroinitializer
 }
 
 define <vscale x 4 x i32> @test_zeroinit_4xi32() {
-; CHECK-LABEL: test_zeroinit_4xi32
-; CHECK:       mov z0.s, #0
-; CHECK-NEXT:  ret
+; SVE-LABEL: test_zeroinit_4xi32:
+; SVE:       // %bb.0:
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_zeroinit_4xi32:
+; STREAMING-SVE:       // %bb.0:
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    ret
   ret <vscale x 4 x i32> zeroinitializer
 }
 
 define <vscale x 8 x i16> @test_zeroinit_8xi16() {
-; CHECK-LABEL: test_zeroinit_8xi16
-; CHECK:       mov z0.h, #0
-; CHECK-NEXT:  ret
+; SVE-LABEL: test_zeroinit_8xi16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_zeroinit_8xi16:
+; STREAMING-SVE:       // %bb.0:
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    ret
   ret <vscale x 8 x i16> zeroinitializer
 }
 
 define <vscale x 16 x i8> @test_zeroinit_16xi8() {
-; CHECK-LABEL: test_zeroinit_16xi8
-; CHECK:       mov z0.b, #0
-; CHECK-NEXT:  ret
+; SVE-LABEL: test_zeroinit_16xi8:
+; SVE:       // %bb.0:
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_zeroinit_16xi8:
+; STREAMING-SVE:       // %bb.0:
+; STREAMING-SVE-NEXT:    mov z0.b, #0 // =0x0
+; STREAMING-SVE-NEXT:    ret
   ret <vscale x 16 x i8> zeroinitializer
 }
 
 define <vscale x 2 x double> @test_zeroinit_2xf64() {
-; CHECK-LABEL: test_zeroinit_2xf64
-; CHECK:       mov z0.d, #0
-; CHECK-NEXT:  ret
+; SVE-LABEL: test_zeroinit_2xf64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_zeroinit_2xf64:
+; STREAMING-SVE:       // %bb.0:
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    ret
   ret <vscale x 2 x double> zeroinitializer
 }
 
 define <vscale x 4 x float> @test_zeroinit_4xf32() {
-; CHECK-LABEL: test_zeroinit_4xf32
-; CHECK:       mov z0.s, #0
-; CHECK-NEXT:  ret
+; SVE-LABEL: test_zeroinit_4xf32:
+; SVE:       // %bb.0:
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_zeroinit_4xf32:
+; STREAMING-SVE:       // %bb.0:
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    ret
   ret <vscale x 4 x float> zeroinitializer
 }
 
 define <vscale x 8 x half> @test_zeroinit_8xf16() {
-; CHECK-LABEL: test_zeroinit_8xf16
-; CHECK:       mov z0.h, #0
-; CHECK-NEXT:  ret
+; SVE-LABEL: test_zeroinit_8xf16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_zeroinit_8xf16:
+; STREAMING-SVE:       // %bb.0:
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    ret
   ret <vscale x 8 x half> zeroinitializer
 }
 
 define <vscale x 1 x i1> @test_zeroinit_1xi1() {
-; CHECK-LABEL: test_zeroinit_1xi1
-; CHECK:       pfalse p0.b
-; CHECK-NEXT:  ret
+; CHECK-LABEL: test_zeroinit_1xi1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    pfalse p0.b
+; CHECK-NEXT:    ret
   ret <vscale x 1 x i1> zeroinitializer
 }
 
 define <vscale x 2 x i1> @test_zeroinit_2xi1() {
-; CHECK-LABEL: test_zeroinit_2xi1
-; CHECK:       pfalse p0.b
-; CHECK-NEXT:  ret
+; CHECK-LABEL: test_zeroinit_2xi1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    pfalse p0.b
+; CHECK-NEXT:    ret
   ret <vscale x 2 x i1> zeroinitializer
 }
 
 define <vscale x 4 x i1> @test_zeroinit_4xi1() {
-; CHECK-LABEL: test_zeroinit_4xi1
-; CHECK:       pfalse p0.b
-; CHECK-NEXT:  ret
+; CHECK-LABEL: test_zeroinit_4xi1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    pfalse p0.b
+; CHECK-NEXT:    ret
   ret <vscale x 4 x i1> zeroinitializer
 }
 
 define <vscale x 8 x i1> @test_zeroinit_8xi1() {
-; CHECK-LABEL: test_zeroinit_8xi1
-; CHECK:       pfalse p0.b
-; CHECK-NEXT:  ret
+; CHECK-LABEL: test_zeroinit_8xi1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    pfalse p0.b
+; CHECK-NEXT:    ret
   ret <vscale x 8 x i1> zeroinitializer
 }
 
 define <vscale x 16 x i1> @test_zeroinit_16xi1() {
-; CHECK-LABEL: test_zeroinit_16xi1
-; CHECK:       pfalse p0.b
-; CHECK-NEXT:  ret
+; CHECK-LABEL: test_zeroinit_16xi1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    pfalse p0.b
+; CHECK-NEXT:    ret
   ret <vscale x 16 x i1> zeroinitializer
 }
 
 define target("aarch64.svcount") @test_zeroinit_svcount() "target-features"="+sme2" {
-; CHECK-LABEL: test_zeroinit_svcount
-; CHECK:       pfalse p0.b
-; CHECK-NEXT:  ret
+; CHECK-LABEL: test_zeroinit_svcount:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    pfalse p0.b
+; CHECK-NEXT:    ret
   ret target("aarch64.svcount") zeroinitializer
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll
index 7b921d71cbfb4..0a18ce054bcaf 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll
@@ -46,7 +46,7 @@ define <vscale x 8 x bfloat> @bfadd_u_ptrue(<vscale x 8 x bfloat> %a, <vscale x
 define <vscale x 8 x bfloat> @bfadd_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
 ; CHECK-LABEL: bfadd_u_zeroing:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    bfadd z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmax.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmax.ll
index 55ef452b60308..dcf6d3c9f6bdf 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmax.ll
@@ -58,7 +58,7 @@ define <vscale x 8 x bfloat> @bfmax_u(<vscale x 8 x bfloat> %a, <vscale x 8 x bf
 define <vscale x 8 x bfloat> @bfmax_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
 ; CHECK-LABEL: bfmax_u_zeroing:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    bfmax z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmaxnm.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmaxnm.ll
index 9b0f7e039f2e5..cff013fa26083 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmaxnm.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmaxnm.ll
@@ -58,7 +58,7 @@ define <vscale x 8 x bfloat> @bfmaxnm_u(<vscale x 8 x bfloat> %a, <vscale x 8 x
 define <vscale x 8 x bfloat> @bfmaxnm_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
 ; CHECK-LABEL: bfmaxnm_u_zeroing:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    bfmaxnm z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmin.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmin.ll
index 8c586fd47f5a8..23c554f65da84 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmin.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmin.ll
@@ -58,7 +58,7 @@ define <vscale x 8 x bfloat> @bfmin_u(<vscale x 8 x bfloat> %a, <vscale x 8 x bf
 define <vscale x 8 x bfloat> @bfmin_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
 ; CHECK-LABEL: bfmin_u_zeroing:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    bfmin z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfminnm.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfminnm.ll
index 90132224e0223..cbbb372b921a8 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfminnm.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfminnm.ll
@@ -58,7 +58,7 @@ define <vscale x 8 x bfloat> @bfminnm_u(<vscale x 8 x bfloat> %a, <vscale x 8 x
 define <vscale x 8 x bfloat> @bfminnm_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
 ; CHECK-LABEL: bfminnm_u_zeroing:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    bfminnm z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla.ll
index eb7e99f332da3..e0d65c1a0dc0a 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmla.ll
@@ -22,7 +22,7 @@ define <vscale x 8 x bfloat> @bfmla_x(<vscale x 8 x i1> %pg, <vscale x 8 x bfloa
 define <vscale x 8 x bfloat> @bfmla_z(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){
 ; CHECK-LABEL: bfmla_z:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z3.h, #0 // =0x0
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z3.h
 ; CHECK-NEXT:    bfmla z0.h, p0/m, z1.h, z2.h
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls.ll
index 8ff1afcc9b4ab..9a90755711882 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmls.ll
@@ -23,7 +23,7 @@ define <vscale x 8 x bfloat> @bfmls_x(<vscale x 8 x i1> %pg, <vscale x 8 x bfloa
 define <vscale x 8 x bfloat> @bfmls_z(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c){
 ; CHECK-LABEL: bfmls_z:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z3.h, #0 // =0x0
+; CHECK-NEXT:    movi v3.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z3.h
 ; CHECK-NEXT:    bfmls z0.h, p0/m, z1.h, z2.h
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll
index 8b6a087578ed8..a5e5bb694679d 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll
@@ -46,7 +46,7 @@ define <vscale x 8 x bfloat> @bfmul_u(<vscale x 8 x bfloat> %a, <vscale x 8 x bf
 define <vscale x 8 x bfloat> @bfmul_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
 ; CHECK-LABEL: bfmul_u_zeroing:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    bfmul z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll
index 1b1304312ceb7..0fed30e74f4a8 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll
@@ -46,7 +46,7 @@ define <vscale x 8 x bfloat> @bfsub_u(<vscale x 8 x bfloat> %a, <vscale x 8 x bf
 define <vscale x 8 x bfloat> @bfsub_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
 ; CHECK-LABEL: bfsub_u_zeroing:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z2.h, #0 // =0x0
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    bfsub z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/zeroing-forms-abs-neg.ll b/llvm/test/CodeGen/AArch64/zeroing-forms-abs-neg.ll
index 510d4576646f1..ba8a606b331e0 100644
--- a/llvm/test/CodeGen/AArch64/zeroing-forms-abs-neg.ll
+++ b/llvm/test/CodeGen/AArch64/zeroing-forms-abs-neg.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mattr=+sve    < %s | FileCheck %s
+; RUN: llc -mattr=+sve    < %s | FileCheck %s -check-prefixes=CHECK,SVE
 ; RUN: llc -mattr=+sve2p2 < %s | FileCheck %s -check-prefix CHECK-2p2
 
-; RUN: llc -mattr=+sme    -force-streaming < %s | FileCheck %s
+; RUN: llc -mattr=+sme    -force-streaming < %s | FileCheck %s -check-prefixes=CHECK,STREAMING-SVE
 ; RUN: llc -mattr=+sme2p2 -force-streaming < %s | FileCheck %s -check-prefix CHECK-2p2
 
 target triple = "aarch64-linux"
@@ -39,16 +39,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svabs_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svabs_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    fabs z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svabs_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fabs z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svabs_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fabs z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svabs_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    fabs z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.fabs.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x double> %0
@@ -86,16 +92,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svabs_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svabs_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    fabs z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svabs_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fabs z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svabs_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fabs z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svabs_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    fabs z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.fabs.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x float> %0
@@ -133,16 +145,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svabs_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svabs_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fabs z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svabs_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fabs z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svabs_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fabs z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svabs_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fabs z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.fabs.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x half> %0
@@ -180,16 +198,22 @@ entry:
 }
 
 define <vscale x 16 x i8> @test_svabs_s8_z(<vscale x 16 x i1> %pg, double %z0, <vscale x 16 x i8> %x) {
-; CHECK-LABEL: test_svabs_s8_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
-; CHECK-NEXT:    abs z0.b, p0/m, z1.b
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svabs_s8_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    abs z0.b, p0/m, z1.b
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svabs_s8_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    abs z0.b, p0/z, z1.b
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svabs_s8_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.b, #0 // =0x0
+; STREAMING-SVE-NEXT:    abs z0.b, p0/m, z1.b
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %x)
   ret <vscale x 16 x i8> %0
@@ -227,16 +251,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svabs_s16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svabs_s16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    abs z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svabs_s16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    abs z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svabs_s16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    abs z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svabs_s16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    abs z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x i16> %0
@@ -274,16 +304,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svabs_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svabs_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    abs z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svabs_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    abs z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svabs_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    abs z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svabs_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    abs z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.abs.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -321,16 +357,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svabs_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svabs_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    abs z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svabs_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    abs z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svabs_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    abs z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svabs_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    abs z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.abs.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -368,16 +410,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svneg_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svneg_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    fneg z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svneg_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fneg z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svneg_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fneg z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svneg_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    fneg z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.fneg.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x double> %0
@@ -415,16 +463,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svneg_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svneg_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    fneg z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svneg_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fneg z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svneg_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fneg z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svneg_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    fneg z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.fneg.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x float> %0
@@ -462,16 +516,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svneg_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svneg_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fneg z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svneg_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fneg z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svneg_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fneg z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svneg_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fneg z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.fneg.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x half> %0
@@ -509,16 +569,22 @@ entry:
 }
 
 define <vscale x 16 x i8> @test_svneg_s8_z(<vscale x 16 x i1> %pg, double %z0, <vscale x 16 x i8> %x) {
-; CHECK-LABEL: test_svneg_s8_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
-; CHECK-NEXT:    neg z0.b, p0/m, z1.b
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svneg_s8_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    neg z0.b, p0/m, z1.b
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svneg_s8_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    neg z0.b, p0/z, z1.b
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svneg_s8_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.b, #0 // =0x0
+; STREAMING-SVE-NEXT:    neg z0.b, p0/m, z1.b
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.neg.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %x)
   ret <vscale x 16 x i8> %0
@@ -556,16 +622,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svneg_s16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svneg_s16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    neg z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svneg_s16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    neg z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svneg_s16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    neg z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svneg_s16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    neg z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.neg.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x i16> %0
@@ -603,16 +675,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svneg_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svneg_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    neg z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svneg_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    neg z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svneg_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    neg z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svneg_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    neg z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.neg.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -650,16 +728,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svneg_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svneg_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    neg z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svneg_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    neg z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svneg_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    neg z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svneg_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    neg z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.neg.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0

diff  --git a/llvm/test/CodeGen/AArch64/zeroing-forms-counts-not.ll b/llvm/test/CodeGen/AArch64/zeroing-forms-counts-not.ll
index f7970ca81f608..b904634d0b76c 100644
--- a/llvm/test/CodeGen/AArch64/zeroing-forms-counts-not.ll
+++ b/llvm/test/CodeGen/AArch64/zeroing-forms-counts-not.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mattr=+bf16,+sve    < %s | FileCheck %s
+; RUN: llc -mattr=+bf16,+sve    < %s | FileCheck %s -check-prefixes=CHECK,SVE
 ; RUN: llc -mattr=+bf16,+sve2p2 < %s | FileCheck %s -check-prefix CHECK-2p2
 
-; RUN: llc -mattr=+bf16,+sme    -force-streaming < %s | FileCheck %s
+; RUN: llc -mattr=+bf16,+sme    -force-streaming < %s | FileCheck %s -check-prefixes=CHECK,STREAMING-SVE
 ; RUN: llc -mattr=+bf16,+sme2p2 -force-streaming < %s | FileCheck %s -check-prefix CHECK-2p2
 
 target triple = "aarch64-linux"
@@ -39,16 +39,22 @@ entry:
 }
 
 define <vscale x 16 x i8> @test_svcls_s8_z(<vscale x 16 x i1> %pg, double %z0, <vscale x 16 x i8> %x) {
-; CHECK-LABEL: test_svcls_s8_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
-; CHECK-NEXT:    cls z0.b, p0/m, z1.b
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcls_s8_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cls z0.b, p0/m, z1.b
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcls_s8_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cls z0.b, p0/z, z1.b
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcls_s8_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.b, #0 // =0x0
+; STREAMING-SVE-NEXT:    cls z0.b, p0/m, z1.b
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.cls.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %x)
   ret <vscale x 16 x i8> %0
@@ -86,16 +92,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svcls_s16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svcls_s16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    cls z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcls_s16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cls z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcls_s16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cls z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcls_s16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    cls z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.cls.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x i16> %0
@@ -133,16 +145,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svcls_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svcls_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    cls z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcls_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cls z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcls_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cls z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcls_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    cls z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.cls.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -180,16 +198,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svcls_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svcls_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    cls z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcls_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cls z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcls_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cls z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcls_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    cls z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.cls.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -227,16 +251,22 @@ entry:
 }
 
 define <vscale x 16 x i8> @test_svclz_s8_z(<vscale x 16 x i1> %pg, double %z0, <vscale x 16 x i8> %x) {
-; CHECK-LABEL: test_svclz_s8_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
-; CHECK-NEXT:    clz z0.b, p0/m, z1.b
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svclz_s8_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    clz z0.b, p0/m, z1.b
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svclz_s8_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    clz z0.b, p0/z, z1.b
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svclz_s8_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.b, #0 // =0x0
+; STREAMING-SVE-NEXT:    clz z0.b, p0/m, z1.b
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.clz.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %x)
   ret <vscale x 16 x i8> %0
@@ -274,16 +304,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svclz_s16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svclz_s16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    clz z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svclz_s16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    clz z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svclz_s16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    clz z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svclz_s16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    clz z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.clz.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x i16> %0
@@ -321,16 +357,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svclz_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svclz_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    clz z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svclz_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    clz z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svclz_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    clz z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svclz_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    clz z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.clz.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -368,16 +410,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svclz_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svclz_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    clz z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svclz_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    clz z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svclz_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    clz z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svclz_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    clz z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.clz.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -415,16 +463,22 @@ entry:
 }
 
 define <vscale x 16 x i8> @test_svcnt_s8_z(<vscale x 16 x i1> %pg, double %z0, <vscale x 16 x i8> %x) {
-; CHECK-LABEL: test_svcnt_s8_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
-; CHECK-NEXT:    cnt z0.b, p0/m, z1.b
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcnt_s8_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cnt z0.b, p0/m, z1.b
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcnt_s8_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cnt z0.b, p0/z, z1.b
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcnt_s8_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.b, #0 // =0x0
+; STREAMING-SVE-NEXT:    cnt z0.b, p0/m, z1.b
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.cnt.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %x)
   ret <vscale x 16 x i8> %0
@@ -462,16 +516,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svcnt_s16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svcnt_s16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    cnt z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcnt_s16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cnt z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcnt_s16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cnt z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcnt_s16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    cnt z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.cnt.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x i16> %0
@@ -509,16 +569,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svcnt_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svcnt_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    cnt z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcnt_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cnt z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcnt_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cnt z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcnt_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    cnt z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.cnt.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -556,16 +622,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svcnt_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svcnt_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    cnt z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcnt_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cnt z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcnt_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cnt z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcnt_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    cnt z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.cnt.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -603,16 +675,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svcnt_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svcnt_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    cnt z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcnt_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cnt z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcnt_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cnt z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcnt_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    cnt z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.cnt.nxv8f16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x i16> %0
@@ -650,16 +728,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svcnt_bf16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x bfloat> %x) {
-; CHECK-LABEL: test_svcnt_bf16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    cnt z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcnt_bf16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cnt z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcnt_bf16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cnt z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcnt_bf16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    cnt z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.cnt.nxv8bf16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %x)
   ret <vscale x 8 x i16> %0
@@ -697,16 +781,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svcnt_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svcnt_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    cnt z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcnt_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cnt z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcnt_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cnt z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcnt_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    cnt z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.cnt.nxv4f32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x i32> %0
@@ -744,16 +834,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svcnt_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svcnt_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    cnt z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcnt_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cnt z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcnt_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cnt z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcnt_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    cnt z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.cnt.nxv2f64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x i64> %0
@@ -791,16 +887,22 @@ entry:
 }
 
 define <vscale x 16 x i8> @test_svcnot_s8_z(<vscale x 16 x i1> %pg, double %z0, <vscale x 16 x i8> %x) {
-; CHECK-LABEL: test_svcnot_s8_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
-; CHECK-NEXT:    cnot z0.b, p0/m, z1.b
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcnot_s8_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cnot z0.b, p0/m, z1.b
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcnot_s8_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cnot z0.b, p0/z, z1.b
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcnot_s8_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.b, #0 // =0x0
+; STREAMING-SVE-NEXT:    cnot z0.b, p0/m, z1.b
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.cnot.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %x)
   ret <vscale x 16 x i8> %0
@@ -838,16 +940,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svcnot_s16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svcnot_s16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    cnot z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcnot_s16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cnot z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcnot_s16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cnot z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcnot_s16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    cnot z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.cnot.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x i16> %0
@@ -885,16 +993,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svcnot_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svcnot_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    cnot z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcnot_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cnot z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcnot_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cnot z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcnot_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    cnot z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.cnot.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -932,16 +1046,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svcnot_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svcnot_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    cnot z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcnot_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    cnot z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcnot_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    cnot z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcnot_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    cnot z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.cnot.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -979,16 +1099,22 @@ entry:
 }
 
 define <vscale x 16 x i8> @test_svnot_s8_z(<vscale x 16 x i1> %pg, double %z0, <vscale x 16 x i8> %x) {
-; CHECK-LABEL: test_svnot_s8_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
-; CHECK-NEXT:    not z0.b, p0/m, z1.b
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svnot_s8_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    not z0.b, p0/m, z1.b
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svnot_s8_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    not z0.b, p0/z, z1.b
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svnot_s8_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.b, #0 // =0x0
+; STREAMING-SVE-NEXT:    not z0.b, p0/m, z1.b
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.not.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %x)
   ret <vscale x 16 x i8> %0
@@ -1026,16 +1152,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svnot_s16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svnot_s16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    not z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svnot_s16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    not z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svnot_s16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    not z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svnot_s16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    not z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.not.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x i16> %0
@@ -1073,16 +1205,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svnot_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svnot_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    not z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svnot_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    not z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svnot_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    not z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svnot_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    not z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.not.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -1120,16 +1258,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svnot_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svnot_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    not z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svnot_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    not z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svnot_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    not z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svnot_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    not z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.not.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0

diff  --git a/llvm/test/CodeGen/AArch64/zeroing-forms-ext.ll b/llvm/test/CodeGen/AArch64/zeroing-forms-ext.ll
index b29805c2b8f05..20a5475706c9c 100644
--- a/llvm/test/CodeGen/AArch64/zeroing-forms-ext.ll
+++ b/llvm/test/CodeGen/AArch64/zeroing-forms-ext.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mattr=+sve    < %s | FileCheck %s
+; RUN: llc -mattr=+sve    < %s | FileCheck %s -check-prefixes=CHECK,SVE
 ; RUN: llc -mattr=+sve2p2 < %s | FileCheck %s -check-prefix CHECK-2p2
 
-; RUN: llc -mattr=+sme    -force-streaming < %s | FileCheck %s
+; RUN: llc -mattr=+sme    -force-streaming < %s | FileCheck %s -check-prefixes=CHECK,STREAMING-SVE
 ; RUN: llc -mattr=+sme2p2 -force-streaming < %s | FileCheck %s -check-prefix CHECK-2p2
 
 target triple = "aarch64-linux"
@@ -39,16 +39,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svextb_s16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svextb_s16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    sxtb z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svextb_s16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    sxtb z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svextb_s16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    sxtb z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svextb_s16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    sxtb z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x i16> %0
@@ -86,16 +92,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svextb_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svextb_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    sxtb z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svextb_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    sxtb z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svextb_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    sxtb z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svextb_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    sxtb z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -133,16 +145,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svextb_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svextb_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    sxtb z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svextb_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    sxtb z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svextb_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    sxtb z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svextb_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    sxtb z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -180,16 +198,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svextb_u16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svextb_u16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    uxtb z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svextb_u16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    uxtb z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svextb_u16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    uxtb z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svextb_u16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    uxtb z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.uxtb.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x i16> %0
@@ -227,16 +251,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svextb_u32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svextb_u32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    uxtb z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svextb_u32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    uxtb z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svextb_u32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    uxtb z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svextb_u32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    uxtb z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uxtb.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -274,16 +304,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svextb_u64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svextb_u64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    uxtb z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svextb_u64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    uxtb z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svextb_u64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    uxtb z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svextb_u64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    uxtb z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxtb.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -321,16 +357,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svexth_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svexth_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    sxth z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svexth_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    sxth z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svexth_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    sxth z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svexth_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    sxth z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxth.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -368,16 +410,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svexth_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svexth_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    sxth z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svexth_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    sxth z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svexth_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    sxth z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svexth_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    sxth z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxth.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -415,16 +463,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svexth_u32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svexth_u32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    uxth z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svexth_u32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    uxth z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svexth_u32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    uxth z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svexth_u32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    uxth z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uxth.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -462,16 +516,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svexth_u64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svexth_u64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    uxth z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svexth_u64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    uxth z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svexth_u64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    uxth z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svexth_u64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    uxth z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxth.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -509,16 +569,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svextw_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svextw_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    sxtw z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svextw_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    sxtw z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svextw_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    sxtw z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svextw_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    sxtw z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -556,16 +622,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svextw_u64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svextw_u64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    uxtw z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svextw_u64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    uxtw z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svextw_u64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    uxtw z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svextw_u64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    uxtw z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0

diff  --git a/llvm/test/CodeGen/AArch64/zeroing-forms-fcvt-bfcvt.ll b/llvm/test/CodeGen/AArch64/zeroing-forms-fcvt-bfcvt.ll
index 855bf9a3b3c49..3ce8376394b2f 100644
--- a/llvm/test/CodeGen/AArch64/zeroing-forms-fcvt-bfcvt.ll
+++ b/llvm/test/CodeGen/AArch64/zeroing-forms-fcvt-bfcvt.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mattr=+sve,+bf16    < %s | FileCheck %s
+; RUN: llc -mattr=+sve,+bf16    < %s | FileCheck %s -check-prefixes=CHECK,SVE
 ; RUN: llc -mattr=+sve2p2,+bf16 < %s | FileCheck %s -check-prefix CHECK-2p2
 
-; RUN: llc -mattr=+sme,+bf16    -force-streaming < %s | FileCheck %s
+; RUN: llc -mattr=+sme,+bf16    -force-streaming < %s | FileCheck %s -check-prefixes=CHECK,STREAMING-SVE
 ; RUN: llc -mattr=+sme2p2,+bf16 -force-streaming < %s | FileCheck %s -check-prefix CHECK-2p2
 
 target triple = "aarch64-linux"
@@ -38,16 +38,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svcvt_f16_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svcvt_f16_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fcvt z0.h, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_f16_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvt z0.h, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_f16_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvt z0.h, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_f16_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvt z0.h, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f32(<vscale x 8 x half> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 8 x half> %0
@@ -84,16 +90,22 @@ entry:
 }
 
 define <vscale x 8 x bfloat> @test_svcvt_bf16_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svcvt_bf16_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    bfcvt z0.h, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_bf16_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    bfcvt z0.h, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_bf16_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    bfcvt z0.h, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_bf16_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    bfcvt z0.h, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.fcvt.bf16f32.v2(<vscale x 8 x bfloat> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 8 x bfloat> %0
@@ -130,16 +142,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svcvt_f16_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svcvt_f16_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fcvt z0.h, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_f16_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvt z0.h, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_f16_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvt z0.h, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_f16_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvt z0.h, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f64(<vscale x 8 x half> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 8 x half> %0
@@ -176,16 +194,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svcvt_f32_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svcvt_f32_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    fcvt z0.s, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_f32_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvt z0.s, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_f32_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvt z0.s, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_f32_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvt z0.s, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f64(<vscale x 4 x float> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 4 x float> %0
@@ -222,16 +246,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svcvt_f32_f16_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svcvt_f32_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    fcvt z0.s, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_f32_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvt z0.s, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_f32_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvt z0.s, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_f32_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvt z0.s, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f16(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 4 x float> %0
@@ -268,16 +298,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svcvt_f64_f16_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svcvt_f64_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    fcvt z0.d, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_f64_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvt z0.d, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_f64_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvt z0.d, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_f64_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvt z0.d, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f16(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 2 x double> %0
@@ -314,16 +350,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svcvt_f64_f32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svcvt_f64_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    fcvt z0.d, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_f64_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvt z0.d, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_f64_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvt z0.d, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_f64_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvt z0.d, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f32(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 2 x double> %0

diff  --git a/llvm/test/CodeGen/AArch64/zeroing-forms-fcvtlt-fcvtx.ll b/llvm/test/CodeGen/AArch64/zeroing-forms-fcvtlt-fcvtx.ll
index c7431e11c21ca..114f2163d94fc 100644
--- a/llvm/test/CodeGen/AArch64/zeroing-forms-fcvtlt-fcvtx.ll
+++ b/llvm/test/CodeGen/AArch64/zeroing-forms-fcvtlt-fcvtx.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mattr=+sve2   < %s | FileCheck %s
+; RUN: llc -mattr=+sve2   < %s | FileCheck %s -check-prefixes=CHECK,SVE
 ; RUN: llc -mattr=+sve2p2 < %s | FileCheck %s -check-prefix CHECK-2p2
 
-; RUN: llc -mattr=+sme2   -force-streaming < %s | FileCheck %s
+; RUN: llc -mattr=+sme2   -force-streaming < %s | FileCheck %s -check-prefixes=CHECK,STREAMING-SVE
 ; RUN: llc -mattr=+sme2p2 -force-streaming < %s | FileCheck %s -check-prefix CHECK-2p2
 
 target triple = "aarch64-linux"
@@ -38,16 +38,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svcvtlt_f32_f16_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svcvtlt_f32_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    fcvtlt z0.s, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvtlt_f32_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtlt z0.s, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvtlt_f32_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtlt z0.s, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvtlt_f32_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtlt z0.s, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.fcvtlt.f32f16(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 4 x float> %0
@@ -84,16 +90,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svcvtlt_f64_f32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svcvtlt_f64_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    fcvtlt z0.d, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvtlt_f64_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtlt z0.d, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvtlt_f64_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtlt z0.d, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvtlt_f64_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtlt z0.d, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.fcvtlt.f64f32(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 2 x double> %0
@@ -130,16 +142,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svcvtx_f32_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svcvtx_f32_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    fcvtx z0.s, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvtx_f32_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtx z0.s, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvtx_f32_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtx z0.s, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvtx_f32_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtx z0.s, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.fcvtx.f32f64(<vscale x 4 x float> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 4 x float> %0

diff  --git a/llvm/test/CodeGen/AArch64/zeroing-forms-fcvtzsu.ll b/llvm/test/CodeGen/AArch64/zeroing-forms-fcvtzsu.ll
index 7259502bf4400..e03e7ca14871a 100644
--- a/llvm/test/CodeGen/AArch64/zeroing-forms-fcvtzsu.ll
+++ b/llvm/test/CodeGen/AArch64/zeroing-forms-fcvtzsu.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mattr=+sve    < %s | FileCheck %s
+; RUN: llc -mattr=+sve    < %s | FileCheck %s -check-prefixes=CHECK,SVE
 ; RUN: llc -mattr=+sve2p2 < %s | FileCheck %s -check-prefix CHECK-2p2
 
-; RUN: llc -mattr=+sme    -force-streaming < %s | FileCheck %s
+; RUN: llc -mattr=+sme    -force-streaming < %s | FileCheck %s -check-prefixes=CHECK,STREAMING-SVE
 ; RUN: llc -mattr=+sme2p2 -force-streaming < %s | FileCheck %s -check-prefix CHECK-2p2
 
 target triple = "aarch64-linux"
@@ -38,16 +38,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_fcvtzs_s32_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_fcvtzs_s32_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    fcvtzs z0.s, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_fcvtzs_s32_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtzs z0.s, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_fcvtzs_s32_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtzs z0.s, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_fcvtzs_s32_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtzs z0.s, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f64(<vscale x 4 x i32> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 4 x i32> %0
@@ -84,16 +90,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_fcvtzs_s64_f32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_fcvtzs_s64_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_fcvtzs_s64_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtzs z0.d, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_fcvtzs_s64_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtzs z0.d, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_fcvtzs_s64_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtzs z0.d, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f32(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 2 x i64> %0
@@ -130,16 +142,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_fcvtzs_s32_f16_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_fcvtzs_s32_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    fcvtzs z0.s, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_fcvtzs_s32_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtzs z0.s, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_fcvtzs_s32_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtzs z0.s, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_fcvtzs_s32_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtzs z0.s, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f16(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 4 x i32> %0
@@ -176,16 +194,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_fcvtzs_s64_f16_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_fcvtzs_s64_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_fcvtzs_s64_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtzs z0.d, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_fcvtzs_s64_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtzs z0.d, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_fcvtzs_s64_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtzs z0.d, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f16(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 2 x i64> %0
@@ -222,16 +246,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_fcvtzu_u32_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_fcvtzu_u32_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    fcvtzu z0.s, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_fcvtzu_u32_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtzu z0.s, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_fcvtzu_u32_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtzu z0.s, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_fcvtzu_u32_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtzu z0.s, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f64(<vscale x 4 x i32> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 4 x i32> %0
@@ -268,16 +298,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_fcvtzu_u64_f32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_fcvtzu_u64_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    fcvtzu z0.d, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_fcvtzu_u64_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtzu z0.d, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_fcvtzu_u64_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtzu z0.d, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_fcvtzu_u64_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtzu z0.d, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f32(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 2 x i64> %0
@@ -314,16 +350,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_fcvtzu_u32_f16_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_fcvtzu_u32_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    fcvtzu z0.s, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_fcvtzu_u32_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtzu z0.s, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_fcvtzu_u32_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtzu z0.s, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_fcvtzu_u32_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtzu z0.s, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f16(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 4 x i32> %0
@@ -360,16 +402,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_fcvtzu_u64_f16_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_fcvtzu_u64_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    fcvtzu z0.d, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_fcvtzu_u64_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtzu z0.d, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_fcvtzu_u64_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtzu z0.d, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_fcvtzu_u64_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtzu z0.d, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f16(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 2 x i64> %0
@@ -408,16 +456,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svcvt_s16_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svcvt_s16_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fcvtzs z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_s16_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtzs z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_s16_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtzs z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_s16_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtzs z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzs.nxv8i16.nxv8f16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x i16> %0
@@ -455,16 +509,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svcvt_u16_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svcvt_u16_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fcvtzu z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_u16_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtzu z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_u16_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtzu z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_u16_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtzu z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzu.nxv8i16.nxv8f16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x i16> %0
@@ -502,16 +562,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svcvt_s32_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svcvt_s32_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    fcvtzs z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_s32_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtzs z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_s32_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtzs z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_s32_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtzs z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.nxv4i32.nxv4f32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x i32> %0
@@ -549,16 +615,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svcvt_u32_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svcvt_u32_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    fcvtzu z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_u32_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtzu z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_u32_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtzu z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_u32_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtzu z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.nxv4i32.nxv4f32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x i32> %0
@@ -596,16 +668,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svcvt_s64_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svcvt_s64_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    fcvtzs z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_s64_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtzs z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_s64_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtzs z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_s64_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtzs z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.nxv2i64.nxv2f64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x i64> %0
@@ -643,16 +721,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svcvt_u64_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svcvt_u64_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    fcvtzu z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_u64_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fcvtzu z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_u64_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fcvtzu z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_u64_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    fcvtzu z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.nxv2i64.nxv2f64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x i64> %0

diff  --git a/llvm/test/CodeGen/AArch64/zeroing-forms-flogb.ll b/llvm/test/CodeGen/AArch64/zeroing-forms-flogb.ll
index 23620a3419b99..79d66b0e04824 100644
--- a/llvm/test/CodeGen/AArch64/zeroing-forms-flogb.ll
+++ b/llvm/test/CodeGen/AArch64/zeroing-forms-flogb.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mattr=+sve2   < %s | FileCheck %s
+; RUN: llc -mattr=+sve2   < %s | FileCheck %s -check-prefixes=CHECK,SVE
 ; RUN: llc -mattr=+sve2p2 < %s | FileCheck %s -check-prefix CHECK-2p2
 
-; RUN: llc -mattr=+sme2   -force-streaming < %s | FileCheck %s
+; RUN: llc -mattr=+sme2   -force-streaming < %s | FileCheck %s -check-prefixes=CHECK,STREAMING-SVE
 ; RUN: llc -mattr=+sme2p2 -force-streaming < %s | FileCheck %s -check-prefix CHECK-2p2
 
 target triple = "aarch64-linux"
@@ -38,16 +38,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svlogb_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svlogb_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    flogb z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svlogb_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    flogb z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svlogb_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    flogb z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svlogb_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    flogb z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.flogb.nxv8f16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x i16> %0
@@ -84,16 +90,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svlogb_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svlogb_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    flogb z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svlogb_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    flogb z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svlogb_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    flogb z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svlogb_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    flogb z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.flogb.nxv4f32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x i32> %0
@@ -130,16 +142,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svlogb_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svlogb_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    flogb z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svlogb_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    flogb z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svlogb_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    flogb z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svlogb_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    flogb z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.flogb.nxv2f64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x i64> %0

diff  --git a/llvm/test/CodeGen/AArch64/zeroing-forms-frint-frecpx-fsqrt.ll b/llvm/test/CodeGen/AArch64/zeroing-forms-frint-frecpx-fsqrt.ll
index c493ec2dcc95d..25252d222db26 100644
--- a/llvm/test/CodeGen/AArch64/zeroing-forms-frint-frecpx-fsqrt.ll
+++ b/llvm/test/CodeGen/AArch64/zeroing-forms-frint-frecpx-fsqrt.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mattr=+sve    < %s | FileCheck %s
+; RUN: llc -mattr=+sve    < %s | FileCheck %s -check-prefixes=CHECK,SVE
 ; RUN: llc -mattr=+sve2p2 < %s | FileCheck %s -check-prefix CHECK-2p2
 
-; RUN: llc -mattr=+sme    -force-streaming < %s | FileCheck %s
+; RUN: llc -mattr=+sme    -force-streaming < %s | FileCheck %s -check-prefixes=CHECK,STREAMING-SVE
 ; RUN: llc -mattr=+sme2p2 -force-streaming < %s | FileCheck %s -check-prefix CHECK-2p2
 
 target triple = "aarch64-linux"
@@ -39,16 +39,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svrinta_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svrinta_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frinta z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrinta_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frinta z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrinta_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frinta z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrinta_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frinta z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.frinta.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x half> %0
@@ -86,16 +92,22 @@ entry:
 }
 
 define <vscale x 4 x half> @test_svrinta_4f16_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x half> %x) {
-; CHECK-LABEL: test_svrinta_4f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frinta z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrinta_4f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frinta z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrinta_4f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frinta z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrinta_4f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frinta z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x half> @llvm.aarch64.sve.frinta.nxv4f16(<vscale x 4 x half> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x half> %x)
   ret <vscale x 4 x half> %0
@@ -133,16 +145,22 @@ entry:
 }
 
 define <vscale x 2 x half> @test_svrinta_2f16_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x half> %x) {
-; CHECK-LABEL: test_svrinta_2f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frinta z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrinta_2f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frinta z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrinta_2f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frinta z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrinta_2f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frinta z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x half> @llvm.aarch64.sve.frinta.nxv2f16(<vscale x 2 x half> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x half> %x)
   ret <vscale x 2 x half> %0
@@ -180,16 +198,22 @@ entry:
 }
 
 define <vscale x 2 x float> @test_svrinta_2f32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x float> %x) {
-; CHECK-LABEL: test_svrinta_2f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frinta z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrinta_2f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frinta z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrinta_2f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frinta z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrinta_2f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frinta z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x float> @llvm.aarch64.sve.frinta.nxv2f32(<vscale x 2 x float> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x float> %x)
   ret <vscale x 2 x float> %0
@@ -227,16 +251,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svrinta_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svrinta_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frinta z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrinta_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frinta z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrinta_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frinta z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrinta_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frinta z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.frinta.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x float> %0
@@ -274,16 +304,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svrinta_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svrinta_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    frinta z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrinta_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frinta z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrinta_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frinta z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrinta_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    frinta z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.frinta.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x double> %0
@@ -321,16 +357,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svrinti_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svrinti_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frinti z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrinti_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frinti z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrinti_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frinti z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrinti_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frinti z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.frinti.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x half> %0
@@ -368,16 +410,22 @@ entry:
 }
 
 define <vscale x 4 x half> @test_svrinti_4f16_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x half> %x) {
-; CHECK-LABEL: test_svrinti_4f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frinti z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrinti_4f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frinti z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrinti_4f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frinti z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrinti_4f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frinti z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x half> @llvm.aarch64.sve.frinti.nxv4f16(<vscale x 4 x half> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x half> %x)
   ret <vscale x 4 x half> %0
@@ -415,16 +463,22 @@ entry:
 }
 
 define <vscale x 2 x half> @test_svrinti_2f16_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x half> %x) {
-; CHECK-LABEL: test_svrinti_2f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frinti z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrinti_2f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frinti z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrinti_2f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frinti z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrinti_2f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frinti z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x half> @llvm.aarch64.sve.frinti.nxv2f16(<vscale x 2 x half> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x half> %x)
   ret <vscale x 2 x half> %0
@@ -462,16 +516,22 @@ entry:
 }
 
 define <vscale x 2 x float> @test_svrinti_2f32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x float> %x) {
-; CHECK-LABEL: test_svrinti_2f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frinti z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrinti_2f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frinti z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrinti_2f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frinti z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrinti_2f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frinti z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x float> @llvm.aarch64.sve.frinti.nxv2f32(<vscale x 2 x float> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x float> %x)
   ret <vscale x 2 x float> %0
@@ -509,16 +569,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svrinti_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svrinti_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frinti z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrinti_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frinti z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrinti_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frinti z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrinti_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frinti z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.frinti.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x float> %0
@@ -557,16 +623,22 @@ entry:
 
 
 define <vscale x 2 x double> @test_svrinti_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svrinti_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    frinti z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrinti_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frinti z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrinti_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frinti z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrinti_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    frinti z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.frinti.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x double> %0
@@ -606,16 +678,22 @@ entry:
 
 
 define <vscale x 8 x half> @test_svrintm_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svrintm_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintm z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintm_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintm z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintm_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintm z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintm_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintm z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.frintm.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x half> %0
@@ -653,16 +731,22 @@ entry:
 }
 
 define <vscale x 4 x half> @test_svrintm_4f16_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x half> %x) {
-; CHECK-LABEL: test_svrintm_4f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintm z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintm_4f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintm z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintm_4f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintm z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintm_4f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintm z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x half> @llvm.aarch64.sve.frintm.nxv4f16(<vscale x 4 x half> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x half> %x)
   ret <vscale x 4 x half> %0
@@ -700,16 +784,22 @@ entry:
 }
 
 define <vscale x 2 x half> @test_svrintm_2f16_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x half> %x) {
-; CHECK-LABEL: test_svrintm_2f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintm z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintm_2f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintm z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintm_2f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintm z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintm_2f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintm z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x half> @llvm.aarch64.sve.frintm.nxv2f16(<vscale x 2 x half> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x half> %x)
   ret <vscale x 2 x half> %0
@@ -747,16 +837,22 @@ entry:
 }
 
 define <vscale x 2 x float> @test_svrintm_2f32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x float> %x) {
-; CHECK-LABEL: test_svrintm_2f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frintm z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintm_2f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintm z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintm_2f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintm z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintm_2f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintm z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x float> @llvm.aarch64.sve.frintm.nxv2f32(<vscale x 2 x float> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x float> %x)
   ret <vscale x 2 x float> %0
@@ -794,16 +890,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svrintm_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svrintm_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frintm z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintm_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintm z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintm_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintm z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintm_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintm z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.frintm.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x float> %0
@@ -841,16 +943,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svrintm_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svrintm_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    frintm z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintm_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintm z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintm_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintm z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintm_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintm z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.frintm.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x double> %0
@@ -888,16 +996,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svrintn_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svrintn_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintn z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintn_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintn z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintn_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintn z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintn_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintn z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.frintn.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x half> %0
@@ -935,16 +1049,22 @@ entry:
 }
 
 define <vscale x 4 x half> @test_svrintn_4f16_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x half> %x) {
-; CHECK-LABEL: test_svrintn_4f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintn z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintn_4f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintn z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintn_4f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintn z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintn_4f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintn z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x half> @llvm.aarch64.sve.frintn.nxv4f16(<vscale x 4 x half> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x half> %x)
   ret <vscale x 4 x half> %0
@@ -982,16 +1102,22 @@ entry:
 }
 
 define <vscale x 2 x half> @test_svrintn_2f16_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x half> %x) {
-; CHECK-LABEL: test_svrintn_2f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintn z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintn_2f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintn z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintn_2f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintn z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintn_2f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintn z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x half> @llvm.aarch64.sve.frintn.nxv2f16(<vscale x 2 x half> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x half> %x)
   ret <vscale x 2 x half> %0
@@ -1029,16 +1155,22 @@ entry:
 }
 
 define <vscale x 2 x float> @test_svrintn_2f32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x float> %x) {
-; CHECK-LABEL: test_svrintn_2f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frintn z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintn_2f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintn z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintn_2f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintn z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintn_2f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintn z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x float> @llvm.aarch64.sve.frintn.nxv2f32(<vscale x 2 x float> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x float> %x)
   ret <vscale x 2 x float> %0
@@ -1076,16 +1208,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svrintn_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svrintn_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frintn z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintn_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintn z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintn_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintn z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintn_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintn z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.frintn.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x float> %0
@@ -1123,16 +1261,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svrintn_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svrintn_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    frintn z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintn_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintn z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintn_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintn z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintn_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintn z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.frintn.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x double> %0
@@ -1170,16 +1314,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svrintp_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svrintp_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintp z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintp_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintp z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintp_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintp z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintp_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintp z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.frintp.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x half> %0
@@ -1217,16 +1367,22 @@ entry:
 }
 
 define <vscale x 4 x half> @test_svrintp_4f16_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x half> %x) {
-; CHECK-LABEL: test_svrintp_4f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintp z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintp_4f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintp z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintp_4f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintp z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintp_4f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintp z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x half> @llvm.aarch64.sve.frintp.nxv4f16(<vscale x 4 x half> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x half> %x)
   ret <vscale x 4 x half> %0
@@ -1264,16 +1420,22 @@ entry:
 }
 
 define <vscale x 2 x half> @test_svrintp_2f16_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x half> %x) {
-; CHECK-LABEL: test_svrintp_2f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintp z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintp_2f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintp z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintp_2f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintp z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintp_2f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintp z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x half> @llvm.aarch64.sve.frintp.nxv2f16(<vscale x 2 x half> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x half> %x)
   ret <vscale x 2 x half> %0
@@ -1311,16 +1473,22 @@ entry:
 }
 
 define <vscale x 2 x float> @test_svrintp_2f32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x float> %x) {
-; CHECK-LABEL: test_svrintp_2f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frintp z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintp_2f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintp z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintp_2f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintp z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintp_2f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintp z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x float> @llvm.aarch64.sve.frintp.nxv2f32(<vscale x 2 x float> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x float> %x)
   ret <vscale x 2 x float> %0
@@ -1358,16 +1526,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svrintp_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svrintp_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frintp z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintp_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintp z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintp_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintp z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintp_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintp z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.frintp.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x float> %0
@@ -1406,16 +1580,22 @@ entry:
 
 
 define <vscale x 2 x double> @test_svrintp_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svrintp_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    frintp z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintp_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintp z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintp_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintp z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintp_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintp z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.frintp.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x double> %0
@@ -1453,16 +1633,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svrintx_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svrintx_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintx z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintx_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintx z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintx_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintx z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintx_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintx z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.frintx.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x half> %0
@@ -1500,16 +1686,22 @@ entry:
 }
 
 define <vscale x 4 x half> @test_svrintx_4f16_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x half> %x) {
-; CHECK-LABEL: test_svrintx_4f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintx z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintx_4f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintx z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintx_4f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintx z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintx_4f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintx z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x half> @llvm.aarch64.sve.frintx.nxv4f16(<vscale x 4 x half> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x half> %x)
   ret <vscale x 4 x half> %0
@@ -1547,16 +1739,22 @@ entry:
 }
 
 define <vscale x 2 x half> @test_svrintx_2f16_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x half> %x) {
-; CHECK-LABEL: test_svrintx_2f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintx z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintx_2f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintx z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintx_2f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintx z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintx_2f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintx z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x half> @llvm.aarch64.sve.frintx.nxv2f16(<vscale x 2 x half> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x half> %x)
   ret <vscale x 2 x half> %0
@@ -1594,16 +1792,22 @@ entry:
 }
 
 define <vscale x 2 x float> @test_svrintx_2f32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x float> %x) {
-; CHECK-LABEL: test_svrintx_2f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frintx z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintx_2f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintx z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintx_2f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintx z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintx_2f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintx z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x float> @llvm.aarch64.sve.frintx.nxv2f32(<vscale x 2 x float> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x float> %x)
   ret <vscale x 2 x float> %0
@@ -1641,16 +1845,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svrintx_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svrintx_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frintx z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintx_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintx z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintx_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintx z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintx_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintx z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.frintx.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x float> %0
@@ -1688,16 +1898,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svrintx_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svrintx_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    frintx z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintx_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintx z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintx_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintx z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintx_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintx z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.frintx.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x double> %0
@@ -1735,16 +1951,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svrintz_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svrintz_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintz z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintz_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintz z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintz_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintz z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintz_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintz z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.frintz.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x half> %0
@@ -1782,16 +2004,22 @@ entry:
 }
 
 define <vscale x 4 x half> @test_svrintz_4f16_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x half> %x) {
-; CHECK-LABEL: test_svrintz_4f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintz z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintz_4f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintz z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintz_4f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintz z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintz_4f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintz z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x half> @llvm.aarch64.sve.frintz.nxv4f16(<vscale x 4 x half> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x half> %x)
   ret <vscale x 4 x half> %0
@@ -1829,16 +2057,22 @@ entry:
 }
 
 define <vscale x 2 x half> @test_svrintz_2f16_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x half> %x) {
-; CHECK-LABEL: test_svrintz_2f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frintz z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintz_2f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintz z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintz_2f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintz z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintz_2f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintz z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x half> @llvm.aarch64.sve.frintz.nxv2f16(<vscale x 2 x half> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x half> %x)
   ret <vscale x 2 x half> %0
@@ -1876,16 +2110,22 @@ entry:
 }
 
 define <vscale x 2 x float> @test_svrintz_2f32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x float> %x) {
-; CHECK-LABEL: test_svrintz_2f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frintz z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintz_2f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintz z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintz_2f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintz z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintz_2f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintz z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x float> @llvm.aarch64.sve.frintz.nxv2f32(<vscale x 2 x float> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x float> %x)
   ret <vscale x 2 x float> %0
@@ -1923,16 +2163,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svrintz_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svrintz_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frintz z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintz_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintz z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintz_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintz z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintz_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintz z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.frintz.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x float> %0
@@ -1970,16 +2216,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svrintz_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svrintz_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    frintz z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrintz_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frintz z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrintz_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frintz z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrintz_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    frintz z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.frintz.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x double> %0
@@ -2017,16 +2269,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svrecpx_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svrecpx_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frecpx z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrecpx_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frecpx z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrecpx_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frecpx z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrecpx_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frecpx z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.frecpx.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x half> %0
@@ -2064,16 +2322,22 @@ entry:
 }
 
 define <vscale x 4 x half> @test_svrecpx_4f16_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x half> %x) {
-; CHECK-LABEL: test_svrecpx_4f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frecpx z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrecpx_4f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frecpx z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrecpx_4f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frecpx z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrecpx_4f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frecpx z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x half> @llvm.aarch64.sve.frecpx.nxv4f16(<vscale x 4 x half> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x half> %x)
   ret <vscale x 4 x half> %0
@@ -2111,16 +2375,22 @@ entry:
 }
 
 define <vscale x 2 x half> @test_svrecpx_2f16_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x half> %x) {
-; CHECK-LABEL: test_svrecpx_2f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    frecpx z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrecpx_2f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frecpx z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrecpx_2f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frecpx z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrecpx_2f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    frecpx z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x half> @llvm.aarch64.sve.frecpx.nxv2f16(<vscale x 2 x half> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x half> %x)
   ret <vscale x 2 x half> %0
@@ -2158,16 +2428,22 @@ entry:
 }
 
 define <vscale x 2 x float> @test_svrecpx_2f32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x float> %x) {
-; CHECK-LABEL: test_svrecpx_2f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frecpx z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrecpx_2f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frecpx z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrecpx_2f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frecpx z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrecpx_2f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frecpx z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x float> @llvm.aarch64.sve.frecpx.nxv2f32(<vscale x 2 x float> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x float> %x)
   ret <vscale x 2 x float> %0
@@ -2205,16 +2481,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svrecpx_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svrecpx_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    frecpx z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrecpx_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frecpx z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrecpx_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frecpx z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrecpx_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    frecpx z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.frecpx.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x float> %0
@@ -2252,16 +2534,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svrecpx_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svrecpx_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    frecpx z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrecpx_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    frecpx z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrecpx_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    frecpx z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrecpx_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    frecpx z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.frecpx.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x double> %0
@@ -2299,16 +2587,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svsqrt_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svsqrt_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fsqrt z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svsqrt_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fsqrt z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svsqrt_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fsqrt z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svsqrt_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fsqrt z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.fsqrt.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x half> %0
@@ -2346,16 +2640,22 @@ entry:
 }
 
 define <vscale x 4 x half> @test_svsqrt_4f16_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x half> %x) {
-; CHECK-LABEL: test_svsqrt_4f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fsqrt z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svsqrt_4f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fsqrt z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svsqrt_4f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fsqrt z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svsqrt_4f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fsqrt z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x half> @llvm.aarch64.sve.fsqrt.nxv4f16(<vscale x 4 x half> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x half> %x)
   ret <vscale x 4 x half> %0
@@ -2393,16 +2693,22 @@ entry:
 }
 
 define <vscale x 2 x half> @test_svsqrt_2f16_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x half> %x) {
-; CHECK-LABEL: test_svsqrt_2f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    fsqrt z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svsqrt_2f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fsqrt z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svsqrt_2f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fsqrt z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svsqrt_2f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    fsqrt z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x half> @llvm.aarch64.sve.fsqrt.nxv2f16(<vscale x 2 x half> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x half> %x)
   ret <vscale x 2 x half> %0
@@ -2440,16 +2746,22 @@ entry:
 }
 
 define <vscale x 2 x float> @test_svsqrt_2f32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x float> %x) {
-; CHECK-LABEL: test_svsqrt_2f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    fsqrt z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svsqrt_2f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fsqrt z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svsqrt_2f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fsqrt z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svsqrt_2f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    fsqrt z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x float> @llvm.aarch64.sve.fsqrt.nxv2f32(<vscale x 2 x float> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x float> %x)
   ret <vscale x 2 x float> %0
@@ -2487,16 +2799,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svsqrt_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svsqrt_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    fsqrt z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svsqrt_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fsqrt z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svsqrt_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fsqrt z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svsqrt_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    fsqrt z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.fsqrt.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x float> %0
@@ -2534,16 +2852,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svsqrt_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svsqrt_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    fsqrt z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svsqrt_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    fsqrt z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svsqrt_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    fsqrt z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svsqrt_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    fsqrt z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.fsqrt.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x double> %0

diff  --git a/llvm/test/CodeGen/AArch64/zeroing-forms-rev.ll b/llvm/test/CodeGen/AArch64/zeroing-forms-rev.ll
index d7a51c8cf8062..fb95047a41205 100644
--- a/llvm/test/CodeGen/AArch64/zeroing-forms-rev.ll
+++ b/llvm/test/CodeGen/AArch64/zeroing-forms-rev.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mattr=+sve2p1 < %s | FileCheck %s
+; RUN: llc -mattr=+sve2p1 < %s | FileCheck %s -check-prefixes=CHECK,SVE
 ; RUN: llc -mattr=+sve2p2 < %s | FileCheck %s -check-prefix CHECK-2p2
 
-; RUN: llc -mattr=+sme    -force-streaming < %s | FileCheck %s
+; RUN: llc -mattr=+sme    -force-streaming < %s | FileCheck %s -check-prefixes=CHECK,STREAMING-SVE
 ; RUN: llc -mattr=+sme2p2 -force-streaming < %s | FileCheck %s -check-prefix CHECK-2p2
 
 target triple = "aarch64-linux"
@@ -38,16 +38,22 @@ entry:
 }
 
 define <vscale x 16 x i8> @test_svrbit_s8_z(<vscale x 16 x i1> %pg, double %z0, <vscale x 16 x i8> %x) {
-; CHECK-LABEL: test_svrbit_s8_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
-; CHECK-NEXT:    rbit z0.b, p0/m, z1.b
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrbit_s8_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    rbit z0.b, p0/m, z1.b
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrbit_s8_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    rbit z0.b, p0/z, z1.b
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrbit_s8_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.b, #0 // =0x0
+; STREAMING-SVE-NEXT:    rbit z0.b, p0/m, z1.b
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.rbit.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %x)
   ret <vscale x 16 x i8> %0
@@ -84,16 +90,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svrbit_s16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svrbit_s16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    rbit z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrbit_s16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    rbit z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrbit_s16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    rbit z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrbit_s16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    rbit z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.rbit.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x i16> %0
@@ -130,16 +142,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svrbit_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svrbit_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    rbit z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrbit_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    rbit z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrbit_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    rbit z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrbit_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    rbit z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.rbit.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -176,16 +194,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svrbit_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svrbit_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    rbit z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrbit_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    rbit z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrbit_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    rbit z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrbit_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    rbit z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.rbit.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -222,16 +246,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svrevb_s16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svrevb_s16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    revb z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrevb_s16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    revb z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrevb_s16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    revb z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrevb_s16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    revb z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.revb.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x i16> %0
@@ -268,16 +298,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svrevb_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svrevb_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    revb z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrevb_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    revb z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrevb_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    revb z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrevb_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    revb z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.revb.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -314,16 +350,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svrevb_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svrevb_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    revb z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrevb_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    revb z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrevb_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    revb z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrevb_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    revb z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.revb.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -360,16 +402,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svrevh_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svrevh_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    revh z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrevh_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    revh z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrevh_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    revh z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrevh_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    revh z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.revh.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -406,16 +454,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svrevh_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svrevh_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    revh z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrevh_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    revh z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrevh_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    revh z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrevh_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    revh z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.revh.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -452,16 +506,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svrevw_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svrevw_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    revw z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrevw_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    revw z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrevw_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    revw z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrevw_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    revw z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.revw.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -498,16 +558,22 @@ entry:
 }
 
 define <vscale x 16 x i8> @test_svrevd_s8_z(<vscale x 16 x i1> %pg, double %z0, <vscale x 16 x i8> %x) {
-; CHECK-LABEL: test_svrevd_s8_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
-; CHECK-NEXT:    revd z0.q, p0/m, z1.q
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrevd_s8_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    revd z0.q, p0/m, z1.q
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrevd_s8_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    revd z0.q, p0/z, z1.q
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrevd_s8_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.b, #0 // =0x0
+; STREAMING-SVE-NEXT:    revd z0.q, p0/m, z1.q
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.revd.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %x)
   ret <vscale x 16 x i8> %0
@@ -544,16 +610,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svrevd_s16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svrevd_s16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    revd z0.q, p0/m, z1.q
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrevd_s16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    revd z0.q, p0/m, z1.q
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrevd_s16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    revd z0.q, p0/z, z1.q
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrevd_s16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    revd z0.q, p0/m, z1.q
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.revd.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x i16> %0
@@ -590,16 +662,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svrevd_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svrevd_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    revd z0.q, p0/m, z1.q
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrevd_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    revd z0.q, p0/m, z1.q
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrevd_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    revd z0.q, p0/z, z1.q
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrevd_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    revd z0.q, p0/m, z1.q
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.revd.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -636,16 +714,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svrevd_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svrevd_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    revd z0.q, p0/m, z1.q
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrevd_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    revd z0.q, p0/m, z1.q
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrevd_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    revd z0.q, p0/z, z1.q
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrevd_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    revd z0.q, p0/m, z1.q
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.revd.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -682,16 +766,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svrevd_f16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x half> %x) {
-; CHECK-LABEL: test_svrevd_f16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    revd z0.q, p0/m, z1.q
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrevd_f16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    revd z0.q, p0/m, z1.q
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrevd_f16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    revd z0.q, p0/z, z1.q
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrevd_f16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    revd z0.q, p0/m, z1.q
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.revd.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x half> %x)
   ret <vscale x 8 x half> %0
@@ -728,16 +818,22 @@ entry:
 }
 
 define <vscale x 8 x bfloat> @test_svrevd_bf16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x bfloat> %x) {
-; CHECK-LABEL: test_svrevd_bf16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    revd z0.q, p0/m, z1.q
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrevd_bf16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    revd z0.q, p0/m, z1.q
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrevd_bf16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    revd z0.q, p0/z, z1.q
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrevd_bf16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    revd z0.q, p0/m, z1.q
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.revd.nxv8bf16(<vscale x 8 x bfloat> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %x)
   ret <vscale x 8 x bfloat> %0
@@ -774,16 +870,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svrevd_f32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x float> %x) {
-; CHECK-LABEL: test_svrevd_f32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    revd z0.q, p0/m, z1.q
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrevd_f32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    revd z0.q, p0/m, z1.q
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrevd_f32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    revd z0.q, p0/z, z1.q
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrevd_f32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    revd z0.q, p0/m, z1.q
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.revd.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x float> %x)
   ret <vscale x 4 x float> %0
@@ -820,16 +922,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svrevd_f64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x double> %x) {
-; CHECK-LABEL: test_svrevd_f64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    revd z0.q, p0/m, z1.q
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrevd_f64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    revd z0.q, p0/m, z1.q
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrevd_f64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    revd z0.q, p0/z, z1.q
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrevd_f64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    revd z0.q, p0/m, z1.q
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.revd.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x double> %x)
   ret <vscale x 2 x double> %0

diff  --git a/llvm/test/CodeGen/AArch64/zeroing-forms-urecpe-ursqrte-sqabs-sqneg.ll b/llvm/test/CodeGen/AArch64/zeroing-forms-urecpe-ursqrte-sqabs-sqneg.ll
index 787ac4458079c..50c73ae198ab2 100644
--- a/llvm/test/CodeGen/AArch64/zeroing-forms-urecpe-ursqrte-sqabs-sqneg.ll
+++ b/llvm/test/CodeGen/AArch64/zeroing-forms-urecpe-ursqrte-sqabs-sqneg.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mattr=+sve2   < %s | FileCheck %s
+; RUN: llc -mattr=+sve2   < %s | FileCheck %s -check-prefixes=CHECK,SVE
 ; RUN: llc -mattr=+sve2p2 < %s | FileCheck %s -check-prefix CHECK-2p2
 
-; RUN: llc -mattr=+sme    --force-streaming < %s | FileCheck %s
+; RUN: llc -mattr=+sme    --force-streaming < %s | FileCheck %s -check-prefixes=CHECK,STREAMING-SVE
 ; RUN: llc -mattr=+sme2p2 --force-streaming < %s | FileCheck %s -check-prefix CHECK-2p2
 
 target triple = "aarch64-linux"
@@ -39,16 +39,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svrecpe_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svrecpe_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    urecpe z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrecpe_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    urecpe z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrecpe_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    urecpe z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrecpe_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    urecpe z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.urecpe.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -86,16 +92,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svrsqrte_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svrsqrte_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    ursqrte z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svrsqrte_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ursqrte z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svrsqrte_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    ursqrte z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svrsqrte_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    ursqrte z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ursqrte.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -133,16 +145,22 @@ entry:
 }
 
 define <vscale x 16 x i8> @test_svqabs_s8_z(<vscale x 16 x i1> %pg, double %z0, <vscale x 16 x i8> %x) {
-; CHECK-LABEL: test_svqabs_s8_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
-; CHECK-NEXT:    sqabs z0.b, p0/m, z1.b
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svqabs_s8_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    sqabs z0.b, p0/m, z1.b
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svqabs_s8_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    sqabs z0.b, p0/z, z1.b
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svqabs_s8_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.b, #0 // =0x0
+; STREAMING-SVE-NEXT:    sqabs z0.b, p0/m, z1.b
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.sqabs.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %x)
   ret <vscale x 16 x i8> %0
@@ -180,16 +198,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svqabs_s16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svqabs_s16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    sqabs z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svqabs_s16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    sqabs z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svqabs_s16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    sqabs z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svqabs_s16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    sqabs z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sqabs.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x i16> %0
@@ -227,16 +251,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svqabs_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svqabs_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    sqabs z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svqabs_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    sqabs z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svqabs_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    sqabs z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svqabs_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    sqabs z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sqabs.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -274,16 +304,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svqabs_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svqabs_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    sqabs z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svqabs_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    sqabs z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svqabs_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    sqabs z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svqabs_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    sqabs z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sqabs.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0
@@ -321,16 +357,22 @@ entry:
 }
 
 define <vscale x 16 x i8> @test_svqneg_s8_z(<vscale x 16 x i1> %pg, double %z0, <vscale x 16 x i8> %x) {
-; CHECK-LABEL: test_svqneg_s8_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
-; CHECK-NEXT:    sqneg z0.b, p0/m, z1.b
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svqneg_s8_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    sqneg z0.b, p0/m, z1.b
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svqneg_s8_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    sqneg z0.b, p0/z, z1.b
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svqneg_s8_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.b, #0 // =0x0
+; STREAMING-SVE-NEXT:    sqneg z0.b, p0/m, z1.b
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.sqneg.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %x)
   ret <vscale x 16 x i8> %0
@@ -368,16 +410,22 @@ entry:
 }
 
 define <vscale x 8 x i16> @test_svqneg_s16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svqneg_s16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    sqneg z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svqneg_s16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    sqneg z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svqneg_s16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    sqneg z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svqneg_s16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    sqneg z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sqneg.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x i16> %0
@@ -415,16 +463,22 @@ entry:
 }
 
 define <vscale x 4 x i32> @test_svqneg_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svqneg_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    sqneg z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svqneg_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    sqneg z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svqneg_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    sqneg z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svqneg_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    sqneg z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sqneg.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x i32> %0
@@ -462,16 +516,22 @@ entry:
 }
 
 define <vscale x 2 x i64> @test_svqneg_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svqneg_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    sqneg z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svqneg_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    sqneg z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svqneg_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    sqneg z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svqneg_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    sqneg z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sqneg.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x i64> %0

diff  --git a/llvm/test/CodeGen/AArch64/zeroing-forms-uscvtf.ll b/llvm/test/CodeGen/AArch64/zeroing-forms-uscvtf.ll
index fd0126f3166dd..e5447071cbfd2 100644
--- a/llvm/test/CodeGen/AArch64/zeroing-forms-uscvtf.ll
+++ b/llvm/test/CodeGen/AArch64/zeroing-forms-uscvtf.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mattr=+sve    < %s | FileCheck %s
+; RUN: llc -mattr=+sve    < %s | FileCheck %s -check-prefixes=CHECK,SVE
 ; RUN: llc -mattr=+sve2p2 < %s | FileCheck %s -check-prefix CHECK-2p2
 
-; RUN: llc -mattr=+sme    -force-streaming < %s | FileCheck %s
+; RUN: llc -mattr=+sme    -force-streaming < %s | FileCheck %s -check-prefixes=CHECK,STREAMING-SVE
 ; RUN: llc -mattr=+sme2p2 -force-streaming < %s | FileCheck %s -check-prefix CHECK-2p2
 
 target triple = "aarch64-linux"
@@ -38,16 +38,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_scvtf_f32_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_scvtf_f32_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    scvtf z0.s, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_scvtf_f32_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    scvtf z0.s, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_scvtf_f32_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    scvtf z0.s, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_scvtf_f32_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    scvtf z0.s, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.scvtf.f32i64(<vscale x 4 x float> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 4 x float> %0
@@ -84,16 +90,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_scvtf_f64_s32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_scvtf_f64_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    scvtf z0.d, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_scvtf_f64_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    scvtf z0.d, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_scvtf_f64_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    scvtf z0.d, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_scvtf_f64_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    scvtf z0.d, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.scvtf.f64i32(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 2 x double> %0
@@ -130,16 +142,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_scvtf_f16_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_scvtf_f16_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    scvtf z0.h, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_scvtf_f16_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    scvtf z0.h, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_scvtf_f16_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    scvtf z0.h, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_scvtf_f16_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    scvtf z0.h, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i32(<vscale x 8 x half> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 8 x half> %0
@@ -176,16 +194,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_scvtf_f16_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_scvtf_f16_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    scvtf z0.h, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_scvtf_f16_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    scvtf z0.h, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_scvtf_f16_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    scvtf z0.h, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_scvtf_f16_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    scvtf z0.h, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i64(<vscale x 8 x half> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 8 x half> %0
@@ -222,16 +246,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_ucvtf_f32_u64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_ucvtf_f32_u64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    ucvtf z0.s, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_ucvtf_f32_u64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ucvtf z0.s, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_ucvtf_f32_u64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    ucvtf z0.s, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_ucvtf_f32_u64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    ucvtf z0.s, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.f32i64(<vscale x 4 x float> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 4 x float> %0
@@ -268,16 +298,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_ucvtf_f64_u32_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_ucvtf_f64_u32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    ucvtf z0.d, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_ucvtf_f64_u32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ucvtf z0.d, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_ucvtf_f64_u32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    ucvtf z0.d, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_ucvtf_f64_u32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    ucvtf z0.d, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.f64i32(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 2 x double> %0
@@ -314,16 +350,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_ucvtf_f16_u32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_ucvtf_f16_u32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    ucvtf z0.h, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_ucvtf_f16_u32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ucvtf z0.h, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_ucvtf_f16_u32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    ucvtf z0.h, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_ucvtf_f16_u32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    ucvtf z0.h, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i32(<vscale x 8 x half> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 8 x half> %0
@@ -360,16 +402,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_ucvtf_f16_u64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_ucvtf_f16_u64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    ucvtf z0.h, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_ucvtf_f16_u64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ucvtf z0.h, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_ucvtf_f16_u64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    ucvtf z0.h, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_ucvtf_f16_u64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    ucvtf z0.h, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i64(<vscale x 8 x half> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 8 x half> %0
@@ -407,16 +455,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svcvt_f16_s16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svcvt_f16_s16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    scvtf z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_f16_s16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    scvtf z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_f16_s16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    scvtf z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_f16_s16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    scvtf z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.nxv8f16.nxv8i16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x half> %0
@@ -454,16 +508,22 @@ entry:
 }
 
 define <vscale x 8 x half> @test_svcvt_f16_u16_z(<vscale x 8 x i1> %pg, double %z0, <vscale x 8 x i16> %x) {
-; CHECK-LABEL: test_svcvt_f16_u16_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    ucvtf z0.h, p0/m, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_f16_u16_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ucvtf z0.h, p0/m, z1.h
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_f16_u16_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    ucvtf z0.h, p0/z, z1.h
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_f16_u16_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.h, #0 // =0x0
+; STREAMING-SVE-NEXT:    ucvtf z0.h, p0/m, z1.h
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.nxv8f16.nxv8i16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %x)
   ret <vscale x 8 x half> %0
@@ -501,16 +561,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svcvt_f32_s32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svcvt_f32_s32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    scvtf z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_f32_s32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    scvtf z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_f32_s32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    scvtf z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_f32_s32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    scvtf z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x float> %0
@@ -548,16 +614,22 @@ entry:
 }
 
 define <vscale x 4 x float> @test_svcvt_f32_u32_z(<vscale x 4 x i1> %pg, double %z0, <vscale x 4 x i32> %x) {
-; CHECK-LABEL: test_svcvt_f32_u32_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    ucvtf z0.s, p0/m, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_f32_u32_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ucvtf z0.s, p0/m, z1.s
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_f32_u32_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    ucvtf z0.s, p0/z, z1.s
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_f32_u32_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.s, #0 // =0x0
+; STREAMING-SVE-NEXT:    ucvtf z0.s, p0/m, z1.s
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %x)
   ret <vscale x 4 x float> %0
@@ -595,16 +667,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svcvt_f64_s64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svcvt_f64_s64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    scvtf z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_f64_s64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    scvtf z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_f64_s64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    scvtf z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_f64_s64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    scvtf z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x double> %0
@@ -642,16 +720,22 @@ entry:
 }
 
 define <vscale x 2 x double> @test_svcvt_f64_u64_z(<vscale x 2 x i1> %pg, double %z0, <vscale x 2 x i64> %x) {
-; CHECK-LABEL: test_svcvt_f64_u64_z:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    ucvtf z0.d, p0/m, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: test_svcvt_f64_u64_z:
+; SVE:       // %bb.0: // %entry
+; SVE-NEXT:    movi v0.2d, #0000000000000000
+; SVE-NEXT:    ucvtf z0.d, p0/m, z1.d
+; SVE-NEXT:    ret
 ;
 ; CHECK-2p2-LABEL: test_svcvt_f64_u64_z:
 ; CHECK-2p2:       // %bb.0: // %entry
 ; CHECK-2p2-NEXT:    ucvtf z0.d, p0/z, z1.d
 ; CHECK-2p2-NEXT:    ret
+;
+; STREAMING-SVE-LABEL: test_svcvt_f64_u64_z:
+; STREAMING-SVE:       // %bb.0: // %entry
+; STREAMING-SVE-NEXT:    mov z0.d, #0 // =0x0
+; STREAMING-SVE-NEXT:    ucvtf z0.d, p0/m, z1.d
+; STREAMING-SVE-NEXT:    ret
 entry:
   %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %x)
   ret <vscale x 2 x double> %0


        


More information about the llvm-commits mailing list