[llvm] 22eef90 - [AArch64][SVE][NFC] Add streaming mode SVE tests

Hassnaa Hamdi via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 10 05:51:17 PST 2022


Author: Hassnaa Hamdi
Date: 2022-11-10T13:51:09Z
New Revision: 22eef90be5822a0b86e4580b6d948ee140e0d2e9

URL: https://github.com/llvm/llvm-project/commit/22eef90be5822a0b86e4580b6d948ee140e0d2e9
DIFF: https://github.com/llvm/llvm-project/commit/22eef90be5822a0b86e4580b6d948ee140e0d2e9.diff

LOG: [AArch64][SVE][NFC] Add streaming mode SVE tests

Add sve-fixed-length testing files and enable streaming mode flag for:
and-combine.ll
bitcast.ll
reshuffle.ll
rev.ll
sdiv-pow2.ll
splat-vector.ll
int-extends.ll

Differential Revision: https://reviews.llvm.org/D137093

Added: 
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll
new file mode 100644
index 000000000000..6527aa419911
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-and-combine.ll
@@ -0,0 +1,191 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; i8
+define <4 x i8> @vls_sve_and_4xi8(<4 x i8> %b) nounwind #0 {
+; CHECK-LABEL: vls_sve_and_4xi8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    and z0.d, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+ %c = and <4 x i8> %b, <i8 0, i8 255, i8 0, i8 255>
+ ret <4 x i8> %c
+}
+
+define <8 x i8> @vls_sve_and_8xi8(<8 x i8> %b) nounwind #0 {
+; CHECK-LABEL: vls_sve_and_8xi8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI1_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI1_0]
+; CHECK-NEXT:    and z0.d, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+ %c = and <8 x i8> %b, <i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
+ ret <8 x i8> %c
+}
+
+define <16 x i8> @vls_sve_and_16xi8(<16 x i8> %b) nounwind #0 {
+; CHECK-LABEL: vls_sve_and_16xi8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI2_0
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI2_0]
+; CHECK-NEXT:    and z0.d, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+ %c = and <16 x i8> %b, <i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
+ ret <16 x i8> %c
+}
+
+define <32 x i8> @vls_sve_and_32xi8(<32 x i8> %ap) nounwind #0 {
+; CHECK-LABEL: vls_sve_and_32xi8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI3_0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI3_0]
+; CHECK-NEXT:    and z0.d, z0.d, z2.d
+; CHECK-NEXT:    and z1.d, z1.d, z2.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    ret
+ %b = and <32 x i8> %ap, <i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255,
+                         i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
+ ret <32 x i8> %b
+}
+
+; i16
+define <2 x i16> @vls_sve_and_2xi16(<2 x i16> %b) nounwind #0 {
+; CHECK-LABEL: vls_sve_and_2xi16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    mov z0.s, z0.s[1]
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    stp wzr, w8, [sp, #8]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+ %c = and <2 x i16> %b, <i16 0, i16 65535>
+ ret <2 x i16> %c
+}
+
+define <4 x i16> @vls_sve_and_4xi16(<4 x i16> %b) nounwind #0 {
+; CHECK-LABEL: vls_sve_and_4xi16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI5_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI5_0]
+; CHECK-NEXT:    and z0.d, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+ %c = and <4 x i16> %b, <i16 0, i16 65535, i16 0, i16 65535>
+ ret <4 x i16> %c
+}
+
+define <8 x i16> @vls_sve_and_8xi16(<8 x i16> %b) nounwind #0 {
+; CHECK-LABEL: vls_sve_and_8xi16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI6_0
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI6_0]
+; CHECK-NEXT:    and z0.d, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+ %c = and <8 x i16> %b, <i16 0, i16 65535, i16 0, i16 65535, i16 0, i16 65535, i16 0, i16 65535>
+ ret <8 x i16> %c
+}
+
+define <16 x i16> @vls_sve_and_16xi16(<16 x i16> %b) nounwind #0 {
+; CHECK-LABEL: vls_sve_and_16xi16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI7_0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI7_0]
+; CHECK-NEXT:    and z0.d, z0.d, z2.d
+; CHECK-NEXT:    and z1.d, z1.d, z2.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    ret
+ %c = and <16 x i16> %b, <i16 0, i16 65535, i16 0, i16 65535, i16 0, i16 65535, i16 0, i16 65535, i16 0, i16 65535, i16 0, i16 65535, i16 0, i16 65535, i16 0, i16 65535>
+ ret <16 x i16> %c
+}
+
+; i32
+define <2 x i32> @vls_sve_and_2xi32(<2 x i32> %b) nounwind #0 {
+; CHECK-LABEL: vls_sve_and_2xi32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    index z1.s, #0, #-1
+; CHECK-NEXT:    and z0.d, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+ %c = and <2 x i32> %b, <i32 0, i32 4294967295>
+ ret <2 x i32> %c
+}
+
+define <4 x i32> @vls_sve_and_4xi32(<4 x i32> %b) nounwind #0 {
+; CHECK-LABEL: vls_sve_and_4xi32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI9_0
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI9_0]
+; CHECK-NEXT:    and z0.d, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+ %c = and <4 x i32> %b, <i32 0, i32 4294967295, i32 0, i32 4294967295>
+ ret <4 x i32> %c
+}
+
+define <8 x i32> @vls_sve_and_8xi32(<8 x i32> %b) nounwind #0 {
+; CHECK-LABEL: vls_sve_and_8xi32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI10_0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI10_0]
+; CHECK-NEXT:    and z0.d, z0.d, z2.d
+; CHECK-NEXT:    and z1.d, z1.d, z2.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    ret
+ %c = and <8 x i32> %b, <i32 0, i32 4294967295, i32 0, i32 4294967295, i32 0, i32 4294967295, i32 0, i32 4294967295>
+ ret <8 x i32> %c
+}
+
+; i64
+define <2 x i64> @vls_sve_and_2xi64(<2 x i64> %b) nounwind #0 {
+; CHECK-LABEL: vls_sve_and_2xi64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    index z1.d, #0, #-1
+; CHECK-NEXT:    and z0.d, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+ %c = and <2 x i64> %b, <i64 0, i64 18446744073709551615>
+ ret <2 x i64> %c
+}
+
+define <4 x i64> @vls_sve_and_4xi64(<4 x i64> %b) nounwind #0 {
+; CHECK-LABEL: vls_sve_and_4xi64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    index z2.d, #0, #-1
+; CHECK-NEXT:    and z0.d, z0.d, z2.d
+; CHECK-NEXT:    and z1.d, z1.d, z2.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
+; CHECK-NEXT:    ret
+ %c = and <4 x i64> %b, <i64 0, i64 18446744073709551615, i64 0, i64 18446744073709551615>
+ ret <4 x i64> %c
+}
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
new file mode 100644
index 000000000000..3bed6b6c178b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
@@ -0,0 +1,198 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve  < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define void @bitcast_v4i8(<4 x i8> *%a, <4 x i8>* %b) #0 {
+; CHECK-LABEL: bitcast_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    uunpklo z0.h, z0.b
+; CHECK-NEXT:    st1b { z0.h }, p0, [x1]
+; CHECK-NEXT:    ret
+  %load = load volatile <4 x i8>, <4 x i8>* %a
+  %cast = bitcast <4 x i8> %load to <4 x i8>
+  store volatile <4 x i8> %cast, <4 x i8>* %b
+  ret void
+}
+
+define void @bitcast_v8i8(<8 x i8> *%a, <8 x i8>* %b) #0 {
+; CHECK-LABEL: bitcast_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    str d0, [x1]
+; CHECK-NEXT:    ret
+  %load = load volatile <8 x i8>, <8 x i8>* %a
+  %cast = bitcast <8 x i8> %load to <8 x i8>
+  store volatile <8 x i8> %cast, <8 x i8>* %b
+  ret void
+}
+
+define void @bitcast_v16i8(<16 x i8> *%a, <16 x i8>* %b) #0 {
+; CHECK-LABEL: bitcast_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %load = load volatile <16 x i8>, <16 x i8>* %a
+  %cast = bitcast <16 x i8> %load to <16 x i8>
+  store volatile <16 x i8> %cast, <16 x i8>* %b
+  ret void
+}
+
+define void @bitcast_v32i8(<32 x i8> *%a, <32 x i8>* %b) #0 {
+; CHECK-LABEL: bitcast_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x0, #16]
+; CHECK-NEXT:    str q1, [x1, #16]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %load = load volatile <32 x i8>, <32 x i8>* %a
+  %cast = bitcast <32 x i8> %load to <32 x i8>
+  store volatile <32 x i8> %cast, <32 x i8>* %b
+  ret void
+}
+
+define void @bitcast_v2i16(<2 x i16> *%a, <2 x half>* %b) #0 {
+; CHECK-LABEL: bitcast_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldrh w8, [x0, #2]
+; CHECK-NEXT:    str w8, [sp, #4]
+; CHECK-NEXT:    ldrh w8, [x0]
+; CHECK-NEXT:    str w8, [sp]
+; CHECK-NEXT:    ldr d0, [sp]
+; CHECK-NEXT:    mov z1.s, z0.s[1]
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    strh w8, [sp, #8]
+; CHECK-NEXT:    strh w9, [sp, #10]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    str w8, [x1]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %load = load volatile <2 x i16>, <2 x i16>* %a
+  %cast = bitcast <2 x i16> %load to <2 x half>
+  store volatile <2 x half> %cast, <2 x half>* %b
+  ret void
+}
+
+define void @bitcast_v4i16(<4 x i16> *%a, <4 x half>* %b) #0 {
+; CHECK-LABEL: bitcast_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    str d0, [x1]
+; CHECK-NEXT:    ret
+  %load = load volatile <4 x i16>, <4 x i16>* %a
+  %cast = bitcast <4 x i16> %load to <4 x half>
+  store volatile <4 x half> %cast, <4 x half>* %b
+  ret void
+}
+
+define void @bitcast_v8i16(<8 x i16> *%a, <8 x half>* %b) #0 {
+; CHECK-LABEL: bitcast_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %load = load volatile <8 x i16>, <8 x i16>* %a
+  %cast = bitcast <8 x i16> %load to <8 x half>
+  store volatile <8 x half> %cast, <8 x half>* %b
+  ret void
+}
+
+define void @bitcast_v16i16(<16 x i16> *%a, <16 x half>* %b) #0 {
+; CHECK-LABEL: bitcast_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x0, #16]
+; CHECK-NEXT:    str q1, [x1, #16]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %load = load volatile <16 x i16>, <16 x i16>* %a
+  %cast = bitcast <16 x i16> %load to <16 x half>
+  store volatile <16 x half> %cast, <16 x half>* %b
+  ret void
+}
+
+define void @bitcast_v2i32(<2 x i32> *%a, <2 x float>* %b) #0 {
+; CHECK-LABEL: bitcast_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    str d0, [x1]
+; CHECK-NEXT:    ret
+  %load = load volatile <2 x i32>, <2 x i32>* %a
+  %cast = bitcast <2 x i32> %load to <2 x float>
+  store volatile <2 x float> %cast, <2 x float>* %b
+  ret void
+}
+
+define void @bitcast_v4i32(<4 x i32> *%a, <4 x float>* %b) #0 {
+; CHECK-LABEL: bitcast_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %load = load volatile <4 x i32>, <4 x i32>* %a
+  %cast = bitcast <4 x i32> %load to <4 x float>
+  store volatile <4 x float> %cast, <4 x float>* %b
+  ret void
+}
+
+define void @bitcast_v8i32(<8 x i32> *%a, <8 x float>* %b) #0 {
+; CHECK-LABEL: bitcast_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x0, #16]
+; CHECK-NEXT:    str q1, [x1, #16]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %load = load volatile <8 x i32>, <8 x i32>* %a
+  %cast = bitcast <8 x i32> %load to <8 x float>
+  store volatile <8 x float> %cast, <8 x float>* %b
+  ret void
+}
+
+define void @bitcast_v1i64(<1 x i64> *%a, <1 x double>* %b) #0 {
+; CHECK-LABEL: bitcast_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    str d0, [x1]
+; CHECK-NEXT:    ret
+  %load = load volatile <1 x i64>, <1 x i64>* %a
+  %cast = bitcast <1 x i64> %load to <1 x double>
+  store volatile <1 x double> %cast, <1 x double>* %b
+  ret void
+}
+
+define void @bitcast_v2i64(<2 x i64> *%a, <2 x double>* %b) #0 {
+; CHECK-LABEL: bitcast_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %load = load volatile <2 x i64>, <2 x i64>* %a
+  %cast = bitcast <2 x i64> %load to <2 x double>
+  store volatile <2 x double> %cast, <2 x double>* %b
+  ret void
+}
+
+define void @bitcast_v4i64(<4 x i64> *%a, <4 x double>* %b) #0 {
+; CHECK-LABEL: bitcast_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x0, #16]
+; CHECK-NEXT:    str q1, [x1, #16]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %load = load volatile <4 x i64>, <4 x i64>* %a
+  %cast = bitcast <4 x i64> %load to <4 x double>
+  store volatile <4 x double> %cast, <4 x double>* %b
+  ret void
+}
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
new file mode 100644
index 000000000000..8bbd8a359ecd
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
@@ -0,0 +1,906 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; sext i1 -> i32
+;
+
+; NOTE: Covers the scenario where a SIGN_EXTEND_INREG is required, whose inreg
+; type's element type is not byte based and thus cannot be lowered directly to
+; an SVE instruction.
+define void @sext_v8i1_v8i32(<8 x i1> %a, <8 x i32>* %out) #0 {
+; CHECK-LABEL: sext_v8i1_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    uunpklo z0.h, z0.b
+; CHECK-NEXT:    uunpklo z2.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    lsl z2.s, p0/m, z2.s, z1.s
+; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    asr z2.s, p0/m, z2.s, z1.s
+; CHECK-NEXT:    asr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    stp q2, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = sext <8 x i1> %a to <8 x i32>
+  store <8 x i32> %b, <8 x i32>* %out
+  ret void
+}
+
+;
+; sext i3 -> i64
+;
+
+; NOTE: Covers the scenario where a SIGN_EXTEND_INREG is required, whose inreg
+; type's element type is not power-of-2 based and thus cannot be lowered
+; directly to an SVE instruction.
+define void @sext_v4i3_v4i64(<4 x i3> %a, <4 x i64>* %out) #0 {
+; CHECK-LABEL: sext_v4i3_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI1_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z2.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI1_0]
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    lsl z2.d, p0/m, z2.d, z1.d
+; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    asr z2.d, p0/m, z2.d, z1.d
+; CHECK-NEXT:    asr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    stp q2, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = sext <4 x i3> %a to <4 x i64>
+  store <4 x i64> %b, <4 x i64>* %out
+  ret void
+}
+
+;
+; sext i8 -> i16
+;
+
+define void @sext_v16i8_v16i16(<16 x i8> %a, <16 x i16>* %out) #0 {
+; CHECK-LABEL: sext_v16i8_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    sunpklo z1.h, z0.b
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z0.h, z0.b
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = sext <16 x i8> %a to <16 x i16>
+  store <16 x i16>%b, <16 x i16>* %out
+  ret void
+}
+
+; NOTE: Extra 'add' is to prevent the extend being combined with the load.
+define void @sext_v32i8_v32i16(<32 x i8>* %in, <32 x i16>* %out) #0 {
+; CHECK-LABEL: sext_v32i8_v32i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    add z0.b, z0.b, z0.b
+; CHECK-NEXT:    sunpklo z2.h, z0.b
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    add z1.b, z1.b, z1.b
+; CHECK-NEXT:    sunpklo z0.h, z0.b
+; CHECK-NEXT:    sunpklo z3.h, z1.b
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    sunpklo z1.h, z1.b
+; CHECK-NEXT:    stp q2, q0, [x1]
+; CHECK-NEXT:    stp q3, q1, [x1, #32]
+; CHECK-NEXT:    ret
+  %a = load <32 x i8>, <32 x i8>* %in
+  %b = add <32 x i8> %a, %a
+  %c = sext <32 x i8> %b to <32 x i16>
+  store <32 x i16> %c, <32 x i16>* %out
+  ret void
+}
+
+;
+; sext i8 -> i32
+;
+
+define void @sext_v8i8_v8i32(<8 x i8> %a, <8 x i32>* %out) #0 {
+; CHECK-LABEL: sext_v8i8_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    sunpklo z0.h, z0.b
+; CHECK-NEXT:    sunpklo z1.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = sext <8 x i8> %a to <8 x i32>
+  store <8 x i32>%b, <8 x i32>* %out
+  ret void
+}
+
+define void @sext_v16i8_v16i32(<16 x i8> %a, <16 x i32>* %out) #0 {
+; CHECK-LABEL: sext_v16i8_v16i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    sunpklo z1.h, z0.b
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z2.s, z1.h
+; CHECK-NEXT:    sunpklo z0.h, z0.b
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    sunpklo z3.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    stp q3, q0, [x0, #32]
+; CHECK-NEXT:    sunpklo z0.s, z1.h
+; CHECK-NEXT:    stp q2, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = sext <16 x i8> %a to <16 x i32>
+  store <16 x i32> %b, <16 x i32>* %out
+  ret void
+}
+
+define void @sext_v32i8_v32i32(<32 x i8>* %in, <32 x i32>* %out) #0 {
+; CHECK-LABEL: sext_v32i8_v32i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    add z0.b, z0.b, z0.b
+; CHECK-NEXT:    sunpklo z2.h, z0.b
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    add z1.b, z1.b, z1.b
+; CHECK-NEXT:    sunpklo z0.h, z0.b
+; CHECK-NEXT:    sunpklo z3.h, z1.b
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    sunpklo z5.s, z3.h
+; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
+; CHECK-NEXT:    sunpklo z1.h, z1.b
+; CHECK-NEXT:    sunpklo z3.s, z3.h
+; CHECK-NEXT:    sunpklo z4.s, z2.h
+; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    sunpklo z6.s, z0.h
+; CHECK-NEXT:    stp q5, q3, [x1, #64]
+; CHECK-NEXT:    sunpklo z5.s, z1.h
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z2.s, z2.h
+; CHECK-NEXT:    sunpklo z1.s, z1.h
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    stp q4, q2, [x1]
+; CHECK-NEXT:    stp q6, q0, [x1, #32]
+; CHECK-NEXT:    stp q5, q1, [x1, #96]
+; CHECK-NEXT:    ret
+  %a = load <32 x i8>, <32 x i8>* %in
+  %b = add <32 x i8> %a, %a
+  %c = sext <32 x i8> %b to <32 x i32>
+  store <32 x i32> %c, <32 x i32>* %out
+  ret void
+}
+
+;
+; sext i8 -> i64
+;
+
+; NOTE: v4i8 is an unpacked typed stored within a v4i16 container. The sign
+; extend is a two step process where the container is any_extend'd with the
+; result feeding an inreg sign extend.
+define void @sext_v4i8_v4i64(<4 x i8> %a, <4 x i64>* %out) #0 {
+; CHECK-LABEL: sext_v4i8_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI7_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z2.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI7_0]
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    lsl z2.d, p0/m, z2.d, z1.d
+; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    asr z2.d, p0/m, z2.d, z1.d
+; CHECK-NEXT:    asr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    stp q2, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = sext <4 x i8> %a to <4 x i64>
+  store <4 x i64>%b, <4 x i64>* %out
+  ret void
+}
+
+define void @sext_v8i8_v8i64(<8 x i8> %a, <8 x i64>* %out) #0 {
+; CHECK-LABEL: sext_v8i8_v8i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    sunpklo z0.h, z0.b
+; CHECK-NEXT:    sunpklo z1.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    sunpklo z2.d, z1.s
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    sunpklo z3.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z1.d, z1.s
+; CHECK-NEXT:    sunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q2, q1, [x0]
+; CHECK-NEXT:    stp q3, q0, [x0, #32]
+; CHECK-NEXT:    ret
+  %b = sext <8 x i8> %a to <8 x i64>
+  store <8 x i64>%b, <8 x i64>* %out
+  ret void
+}
+
+define void @sext_v16i8_v16i64(<16 x i8> %a, <16 x i64>* %out) #0 {
+; CHECK-LABEL: sext_v16i8_v16i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    sunpklo z1.h, z0.b
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z2.s, z1.h
+; CHECK-NEXT:    sunpklo z0.h, z0.b
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    sunpklo z3.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    sunpklo z1.s, z1.h
+; CHECK-NEXT:    sunpklo z6.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z4.d, z2.s
+; CHECK-NEXT:    sunpklo z5.d, z3.s
+; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
+; CHECK-NEXT:    sunpklo z7.d, z1.s
+; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    sunpklo z0.d, z0.s
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    sunpklo z3.d, z3.s
+; CHECK-NEXT:    sunpklo z2.d, z2.s
+; CHECK-NEXT:    stp q5, q3, [x0, #64]
+; CHECK-NEXT:    stp q4, q2, [x0]
+; CHECK-NEXT:    stp q6, q0, [x0, #96]
+; CHECK-NEXT:    sunpklo z0.d, z1.s
+; CHECK-NEXT:    stp q7, q0, [x0, #32]
+; CHECK-NEXT:    ret
+  %b = sext <16 x i8> %a to <16 x i64>
+  store <16 x i64> %b, <16 x i64>* %out
+  ret void
+}
+
+define void @sext_v32i8_v32i64(<32 x i8>* %in, <32 x i64>* %out) #0 {
+; CHECK-LABEL: sext_v32i8_v32i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    add z0.b, z0.b, z0.b
+; CHECK-NEXT:    sunpklo z2.h, z0.b
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    add z1.b, z1.b, z1.b
+; CHECK-NEXT:    sunpklo z0.h, z0.b
+; CHECK-NEXT:    sunpklo z3.h, z1.b
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    sunpklo z5.s, z3.h
+; CHECK-NEXT:    sunpklo z1.h, z1.b
+; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
+; CHECK-NEXT:    sunpklo z4.s, z2.h
+; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    sunpklo z6.s, z0.h
+; CHECK-NEXT:    sunpklo z7.s, z1.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    sunpklo z3.s, z3.h
+; CHECK-NEXT:    sunpklo z2.s, z2.h
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    sunpklo z1.s, z1.h
+; CHECK-NEXT:    sunpklo z20.d, z3.s
+; CHECK-NEXT:    sunpklo z22.d, z4.s
+; CHECK-NEXT:    ext z4.b, z4.b, z4.b, #8
+; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
+; CHECK-NEXT:    sunpklo z16.d, z2.s
+; CHECK-NEXT:    sunpklo z17.d, z6.s
+; CHECK-NEXT:    sunpklo z18.d, z0.s
+; CHECK-NEXT:    sunpklo z19.d, z1.s
+; CHECK-NEXT:    sunpklo z21.d, z7.s
+; CHECK-NEXT:    sunpklo z23.d, z5.s
+; CHECK-NEXT:    ext z5.b, z5.b, z5.b, #8
+; CHECK-NEXT:    sunpklo z4.d, z4.s
+; CHECK-NEXT:    ext z7.b, z7.b, z7.b, #8
+; CHECK-NEXT:    sunpklo z3.d, z3.s
+; CHECK-NEXT:    ext z6.b, z6.b, z6.b, #8
+; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z5.d, z5.s
+; CHECK-NEXT:    stp q22, q4, [x1]
+; CHECK-NEXT:    sunpklo z4.d, z7.s
+; CHECK-NEXT:    stp q23, q5, [x1, #128]
+; CHECK-NEXT:    sunpklo z2.d, z2.s
+; CHECK-NEXT:    stp q20, q3, [x1, #160]
+; CHECK-NEXT:    sunpklo z3.d, z6.s
+; CHECK-NEXT:    sunpklo z1.d, z1.s
+; CHECK-NEXT:    sunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q16, q2, [x1, #32]
+; CHECK-NEXT:    stp q17, q3, [x1, #64]
+; CHECK-NEXT:    stp q18, q0, [x1, #96]
+; CHECK-NEXT:    stp q21, q4, [x1, #192]
+; CHECK-NEXT:    stp q19, q1, [x1, #224]
+; CHECK-NEXT:    ret
+  %a = load <32 x i8>, <32 x i8>* %in
+  %b = add <32 x i8> %a, %a
+  %c = sext <32 x i8> %b to <32 x i64>
+  store <32 x i64> %c, <32 x i64>* %out
+  ret void
+}
+
+;
+; sext i16 -> i32
+;
+
+define void @sext_v8i16_v8i32(<8 x i16> %a, <8 x i32>* %out) #0 {
+; CHECK-LABEL: sext_v8i16_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    sunpklo z1.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = sext <8 x i16> %a to <8 x i32>
+  store <8 x i32>%b, <8 x i32>* %out
+  ret void
+}
+
+define void @sext_v16i16_v16i32(<16 x i16>* %in, <16 x i32>* %out) #0 {
+; CHECK-LABEL: sext_v16i16_v16i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    add z0.h, z0.h, z0.h
+; CHECK-NEXT:    sunpklo z2.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    add z1.h, z1.h, z1.h
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    sunpklo z3.s, z1.h
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    sunpklo z1.s, z1.h
+; CHECK-NEXT:    stp q2, q0, [x1]
+; CHECK-NEXT:    stp q3, q1, [x1, #32]
+; CHECK-NEXT:    ret
+  %a = load <16 x i16>, <16 x i16>* %in
+  %b = add <16 x i16> %a, %a
+  %c = sext <16 x i16> %b to <16 x i32>
+  store <16 x i32> %c, <16 x i32>* %out
+  ret void
+}
+
+;
+; sext i16 -> i64
+;
+
+define void @sext_v4i16_v4i64(<4 x i16> %a, <4 x i64>* %out) #0 {
+; CHECK-LABEL: sext_v4i16_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    sunpklo z1.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = sext <4 x i16> %a to <4 x i64>
+  store <4 x i64>%b, <4 x i64>* %out
+  ret void
+}
+
+define void @sext_v8i16_v8i64(<8 x i16> %a, <8 x i64>* %out) #0 {
+; CHECK-LABEL: sext_v8i16_v8i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    sunpklo z1.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z2.d, z1.s
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    sunpklo z3.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q3, q0, [x0, #32]
+; CHECK-NEXT:    sunpklo z0.d, z1.s
+; CHECK-NEXT:    stp q2, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = sext <8 x i16> %a to <8 x i64>
+  store <8 x i64>%b, <8 x i64>* %out
+  ret void
+}
+
+define void @sext_v16i16_v16i64(<16 x i16>* %in, <16 x i64>* %out) #0 {
+; CHECK-LABEL: sext_v16i16_v16i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    add z0.h, z0.h, z0.h
+; CHECK-NEXT:    sunpklo z2.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    add z1.h, z1.h, z1.h
+; CHECK-NEXT:    sunpklo z0.s, z0.h
+; CHECK-NEXT:    sunpklo z3.s, z1.h
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    sunpklo z5.d, z3.s
+; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
+; CHECK-NEXT:    sunpklo z1.s, z1.h
+; CHECK-NEXT:    sunpklo z3.d, z3.s
+; CHECK-NEXT:    sunpklo z4.d, z2.s
+; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    sunpklo z6.d, z0.s
+; CHECK-NEXT:    stp q5, q3, [x1, #64]
+; CHECK-NEXT:    sunpklo z5.d, z1.s
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z2.d, z2.s
+; CHECK-NEXT:    sunpklo z1.d, z1.s
+; CHECK-NEXT:    sunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q4, q2, [x1]
+; CHECK-NEXT:    stp q6, q0, [x1, #32]
+; CHECK-NEXT:    stp q5, q1, [x1, #96]
+; CHECK-NEXT:    ret
+  %a = load <16 x i16>, <16 x i16>* %in
+  %b = add <16 x i16> %a, %a
+  %c = sext <16 x i16> %b to <16 x i64>
+  store <16 x i64> %c, <16 x i64>* %out
+  ret void
+}
+
+;
+; sext i32 -> i64
+;
+
+define void @sext_v4i32_v4i64(<4 x i32> %a, <4 x i64>* %out) #0 {
+; CHECK-LABEL: sext_v4i32_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    sunpklo z1.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    sunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = sext <4 x i32> %a to <4 x i64>
+  store <4 x i64>%b, <4 x i64>* %out
+  ret void
+}
+
+define void @sext_v8i32_v8i64(<8 x i32>* %in, <8 x i64>* %out) #0 {
+; CHECK-LABEL: sext_v8i32_v8i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    add z0.s, z0.s, z0.s
+; CHECK-NEXT:    sunpklo z2.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    add z1.s, z1.s, z1.s
+; CHECK-NEXT:    sunpklo z0.d, z0.s
+; CHECK-NEXT:    sunpklo z3.d, z1.s
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    sunpklo z1.d, z1.s
+; CHECK-NEXT:    stp q2, q0, [x1]
+; CHECK-NEXT:    stp q3, q1, [x1, #32]
+; CHECK-NEXT:    ret
+  %a = load <8 x i32>, <8 x i32>* %in
+  %b = add <8 x i32> %a, %a
+  %c = sext <8 x i32> %b to <8 x i64>
+  store <8 x i64> %c, <8 x i64>* %out
+  ret void
+}
+
+;
+; zext i8 -> i16
+;
+
+define void @zext_v16i8_v16i16(<16 x i8> %a, <16 x i16>* %out) #0 {
+; CHECK-LABEL: zext_v16i8_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    uunpklo z1.h, z0.b
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z0.h, z0.b
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = zext <16 x i8> %a to <16 x i16>
+  store <16 x i16>%b, <16 x i16>* %out
+  ret void
+}
+
+; NOTE: Extra 'add' is to prevent the extend being combined with the load.
+define void @zext_v32i8_v32i16(<32 x i8>* %in, <32 x i16>* %out) #0 {
+; CHECK-LABEL: zext_v32i8_v32i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    add z0.b, z0.b, z0.b
+; CHECK-NEXT:    uunpklo z2.h, z0.b
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    add z1.b, z1.b, z1.b
+; CHECK-NEXT:    uunpklo z0.h, z0.b
+; CHECK-NEXT:    uunpklo z3.h, z1.b
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    uunpklo z1.h, z1.b
+; CHECK-NEXT:    stp q2, q0, [x1]
+; CHECK-NEXT:    stp q3, q1, [x1, #32]
+; CHECK-NEXT:    ret
+  %a = load <32 x i8>, <32 x i8>* %in
+  %b = add <32 x i8> %a, %a
+  %c = zext <32 x i8> %b to <32 x i16>
+  store <32 x i16> %c, <32 x i16>* %out
+  ret void
+}
+
+;
+; zext i8 -> i32
+;
+
+define void @zext_v8i8_v8i32(<8 x i8> %a, <8 x i32>* %out) #0 {
+; CHECK-LABEL: zext_v8i8_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    uunpklo z0.h, z0.b
+; CHECK-NEXT:    uunpklo z1.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = zext <8 x i8> %a to <8 x i32>
+  store <8 x i32>%b, <8 x i32>* %out
+  ret void
+}
+
+define void @zext_v16i8_v16i32(<16 x i8> %a, <16 x i32>* %out) #0 {
+; CHECK-LABEL: zext_v16i8_v16i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    uunpklo z1.h, z0.b
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z2.s, z1.h
+; CHECK-NEXT:    uunpklo z0.h, z0.b
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    uunpklo z3.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    stp q3, q0, [x0, #32]
+; CHECK-NEXT:    uunpklo z0.s, z1.h
+; CHECK-NEXT:    stp q2, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = zext <16 x i8> %a to <16 x i32>
+  store <16 x i32> %b, <16 x i32>* %out
+  ret void
+}
+
+define void @zext_v32i8_v32i32(<32 x i8>* %in, <32 x i32>* %out) #0 {
+; CHECK-LABEL: zext_v32i8_v32i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    add z0.b, z0.b, z0.b
+; CHECK-NEXT:    uunpklo z2.h, z0.b
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    add z1.b, z1.b, z1.b
+; CHECK-NEXT:    uunpklo z0.h, z0.b
+; CHECK-NEXT:    uunpklo z3.h, z1.b
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    uunpklo z5.s, z3.h
+; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
+; CHECK-NEXT:    uunpklo z1.h, z1.b
+; CHECK-NEXT:    uunpklo z3.s, z3.h
+; CHECK-NEXT:    uunpklo z4.s, z2.h
+; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    uunpklo z6.s, z0.h
+; CHECK-NEXT:    stp q5, q3, [x1, #64]
+; CHECK-NEXT:    uunpklo z5.s, z1.h
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    stp q4, q2, [x1]
+; CHECK-NEXT:    stp q6, q0, [x1, #32]
+; CHECK-NEXT:    stp q5, q1, [x1, #96]
+; CHECK-NEXT:    ret
+  %a = load <32 x i8>, <32 x i8>* %in
+  %b = add <32 x i8> %a, %a
+  %c = zext <32 x i8> %b to <32 x i32>
+  store <32 x i32> %c, <32 x i32>* %out
+  ret void
+}
+
+;
+; zext i8 -> i64
+;
+
+; NOTE: v4i8 is an unpacked typed stored within a v4i16 container. The zero
+; extend is a two step process where the container is zero_extend_inreg'd with
+; the result feeding a normal zero extend from halfs to doublewords.
+define void @zext_v4i8_v4i64(<4 x i8> %a, <4 x i64>* %out) #0 {
+; CHECK-LABEL: zext_v4i8_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI23_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI23_0]
+; CHECK-NEXT:    and z0.d, z0.d, z1.d
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z1.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = zext <4 x i8> %a to <4 x i64>
+  store <4 x i64>%b, <4 x i64>* %out
+  ret void
+}
+
+define void @zext_v8i8_v8i64(<8 x i8> %a, <8 x i64>* %out) #0 {
+; CHECK-LABEL: zext_v8i8_v8i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    uunpklo z0.h, z0.b
+; CHECK-NEXT:    uunpklo z1.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z2.d, z1.s
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    uunpklo z3.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q2, q1, [x0]
+; CHECK-NEXT:    stp q3, q0, [x0, #32]
+; CHECK-NEXT:    ret
+  %b = zext <8 x i8> %a to <8 x i64>
+  store <8 x i64>%b, <8 x i64>* %out
+  ret void
+}
+
+define void @zext_v16i8_v16i64(<16 x i8> %a, <16 x i64>* %out) #0 {
+; CHECK-LABEL: zext_v16i8_v16i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    uunpklo z1.h, z0.b
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z2.s, z1.h
+; CHECK-NEXT:    uunpklo z0.h, z0.b
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    uunpklo z3.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    uunpklo z6.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z4.d, z2.s
+; CHECK-NEXT:    uunpklo z5.d, z3.s
+; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
+; CHECK-NEXT:    uunpklo z7.d, z1.s
+; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    stp q5, q3, [x0, #64]
+; CHECK-NEXT:    stp q4, q2, [x0]
+; CHECK-NEXT:    stp q6, q0, [x0, #96]
+; CHECK-NEXT:    uunpklo z0.d, z1.s
+; CHECK-NEXT:    stp q7, q0, [x0, #32]
+; CHECK-NEXT:    ret
+  %b = zext <16 x i8> %a to <16 x i64>
+  store <16 x i64> %b, <16 x i64>* %out
+  ret void
+}
+
+define void @zext_v32i8_v32i64(<32 x i8>* %in, <32 x i64>* %out) #0 {
+; CHECK-LABEL: zext_v32i8_v32i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    add z0.b, z0.b, z0.b
+; CHECK-NEXT:    uunpklo z2.h, z0.b
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    add z1.b, z1.b, z1.b
+; CHECK-NEXT:    uunpklo z0.h, z0.b
+; CHECK-NEXT:    uunpklo z3.h, z1.b
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    uunpklo z5.s, z3.h
+; CHECK-NEXT:    uunpklo z1.h, z1.b
+; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
+; CHECK-NEXT:    uunpklo z4.s, z2.h
+; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    uunpklo z6.s, z0.h
+; CHECK-NEXT:    uunpklo z7.s, z1.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    uunpklo z3.s, z3.h
+; CHECK-NEXT:    uunpklo z2.s, z2.h
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    uunpklo z20.d, z3.s
+; CHECK-NEXT:    uunpklo z22.d, z4.s
+; CHECK-NEXT:    ext z4.b, z4.b, z4.b, #8
+; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
+; CHECK-NEXT:    uunpklo z16.d, z2.s
+; CHECK-NEXT:    uunpklo z17.d, z6.s
+; CHECK-NEXT:    uunpklo z18.d, z0.s
+; CHECK-NEXT:    uunpklo z19.d, z1.s
+; CHECK-NEXT:    uunpklo z21.d, z7.s
+; CHECK-NEXT:    uunpklo z23.d, z5.s
+; CHECK-NEXT:    ext z5.b, z5.b, z5.b, #8
+; CHECK-NEXT:    uunpklo z4.d, z4.s
+; CHECK-NEXT:    ext z7.b, z7.b, z7.b, #8
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    ext z6.b, z6.b, z6.b, #8
+; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z5.d, z5.s
+; CHECK-NEXT:    stp q22, q4, [x1]
+; CHECK-NEXT:    uunpklo z4.d, z7.s
+; CHECK-NEXT:    stp q23, q5, [x1, #128]
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    stp q20, q3, [x1, #160]
+; CHECK-NEXT:    uunpklo z3.d, z6.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q16, q2, [x1, #32]
+; CHECK-NEXT:    stp q17, q3, [x1, #64]
+; CHECK-NEXT:    stp q18, q0, [x1, #96]
+; CHECK-NEXT:    stp q21, q4, [x1, #192]
+; CHECK-NEXT:    stp q19, q1, [x1, #224]
+; CHECK-NEXT:    ret
+  %a = load <32 x i8>, <32 x i8>* %in
+  %b = add <32 x i8> %a, %a
+  %c = zext <32 x i8> %b to <32 x i64>
+  store <32 x i64> %c, <32 x i64>* %out
+  ret void
+}
+
+;
+; zext i16 -> i32
+;
+
+define void @zext_v8i16_v8i32(<8 x i16> %a, <8 x i32>* %out) #0 {
+; CHECK-LABEL: zext_v8i16_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    uunpklo z1.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = zext <8 x i16> %a to <8 x i32>
+  store <8 x i32>%b, <8 x i32>* %out
+  ret void
+}
+
+define void @zext_v16i16_v16i32(<16 x i16>* %in, <16 x i32>* %out) #0 {
+; CHECK-LABEL: zext_v16i16_v16i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    add z0.h, z0.h, z0.h
+; CHECK-NEXT:    uunpklo z2.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    add z1.h, z1.h, z1.h
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z3.s, z1.h
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    stp q2, q0, [x1]
+; CHECK-NEXT:    stp q3, q1, [x1, #32]
+; CHECK-NEXT:    ret
+  %a = load <16 x i16>, <16 x i16>* %in
+  %b = add <16 x i16> %a, %a
+  %c = zext <16 x i16> %b to <16 x i32>
+  store <16 x i32> %c, <16 x i32>* %out
+  ret void
+}
+
+;
+; zext i16 -> i64
+;
+
+define void @zext_v4i16_v4i64(<4 x i16> %a, <4 x i64>* %out) #0 {
+; CHECK-LABEL: zext_v4i16_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z1.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = zext <4 x i16> %a to <4 x i64>
+  store <4 x i64>%b, <4 x i64>* %out
+  ret void
+}
+
+define void @zext_v8i16_v8i64(<8 x i16> %a, <8 x i64>* %out) #0 {
+; CHECK-LABEL: zext_v8i16_v8i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    uunpklo z1.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z2.d, z1.s
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    uunpklo z3.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q3, q0, [x0, #32]
+; CHECK-NEXT:    uunpklo z0.d, z1.s
+; CHECK-NEXT:    stp q2, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = zext <8 x i16> %a to <8 x i64>
+  store <8 x i64>%b, <8 x i64>* %out
+  ret void
+}
+
+define void @zext_v16i16_v16i64(<16 x i16>* %in, <16 x i64>* %out) #0 {
+; CHECK-LABEL: zext_v16i16_v16i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    add z0.h, z0.h, z0.h
+; CHECK-NEXT:    uunpklo z2.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    add z1.h, z1.h, z1.h
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    uunpklo z3.s, z1.h
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    uunpklo z5.d, z3.s
+; CHECK-NEXT:    ext z3.b, z3.b, z3.b, #8
+; CHECK-NEXT:    uunpklo z1.s, z1.h
+; CHECK-NEXT:    uunpklo z3.d, z3.s
+; CHECK-NEXT:    uunpklo z4.d, z2.s
+; CHECK-NEXT:    ext z2.b, z2.b, z2.b, #8
+; CHECK-NEXT:    uunpklo z6.d, z0.s
+; CHECK-NEXT:    stp q5, q3, [x1, #64]
+; CHECK-NEXT:    uunpklo z5.d, z1.s
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z2.d, z2.s
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q4, q2, [x1]
+; CHECK-NEXT:    stp q6, q0, [x1, #32]
+; CHECK-NEXT:    stp q5, q1, [x1, #96]
+; CHECK-NEXT:    ret
+  %a = load <16 x i16>, <16 x i16>* %in
+  %b = add <16 x i16> %a, %a
+  %c = zext <16 x i16> %b to <16 x i64>
+  store <16 x i64> %c, <16 x i64>* %out
+  ret void
+}
+
+;
+; zext i32 -> i64
+;
+
+define void @zext_v4i32_v4i64(<4 x i32> %a, <4 x i64>* %out) #0 {
+; CHECK-LABEL: zext_v4i32_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    uunpklo z1.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %b = zext <4 x i32> %a to <4 x i64>
+  store <4 x i64>%b, <4 x i64>* %out
+  ret void
+}
+
+define void @zext_v8i32_v8i64(<8 x i32>* %in, <8 x i64>* %out) #0 {
+; CHECK-LABEL: zext_v8i32_v8i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    add z0.s, z0.s, z0.s
+; CHECK-NEXT:    uunpklo z2.d, z0.s
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    add z1.s, z1.s, z1.s
+; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    uunpklo z3.d, z1.s
+; CHECK-NEXT:    ext z1.b, z1.b, z1.b, #8
+; CHECK-NEXT:    uunpklo z1.d, z1.s
+; CHECK-NEXT:    stp q2, q0, [x1]
+; CHECK-NEXT:    stp q3, q1, [x1, #32]
+; CHECK-NEXT:    ret
+  %a = load <8 x i32>, <8 x i32>* %in
+  %b = add <8 x i32> %a, %a
+  %c = zext <8 x i32> %b to <8 x i64>
+  store <8 x i64> %c, <8 x i64>* %out
+  ret void
+}
+
+attributes #0 = { nounwind "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll
new file mode 100644
index 000000000000..546d08e6bad3
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-reshuffle.ll
@@ -0,0 +1,39 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve  < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; == Matching first N elements ==
+
+define <4 x i1> @reshuffle_v4i1_nxv4i1(<vscale x 4 x i1> %a) #0 {
+; CHECK-LABEL: reshuffle_v4i1_nxv4i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    mov z0.s, p0/z, #1 // =0x1
+; CHECK-NEXT:    mov z1.s, z0.s[3]
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    mov z2.s, z0.s[2]
+; CHECK-NEXT:    mov z0.s, z0.s[1]
+; CHECK-NEXT:    fmov w9, s1
+; CHECK-NEXT:    fmov w10, s2
+; CHECK-NEXT:    fmov w11, s0
+; CHECK-NEXT:    strh w8, [sp, #8]
+; CHECK-NEXT:    strh w9, [sp, #14]
+; CHECK-NEXT:    strh w10, [sp, #12]
+; CHECK-NEXT:    strh w11, [sp, #10]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %el0 = extractelement <vscale x 4 x i1> %a, i32 0
+  %el1 = extractelement <vscale x 4 x i1> %a, i32 1
+  %el2 = extractelement <vscale x 4 x i1> %a, i32 2
+  %el3 = extractelement <vscale x 4 x i1> %a, i32 3
+  %v0 = insertelement <4 x i1> undef, i1 %el0, i32 0
+  %v1 = insertelement <4 x i1> %v0, i1 %el1, i32 1
+  %v2 = insertelement <4 x i1> %v1, i1 %el2, i32 2
+  %v3 = insertelement <4 x i1> %v2, i1 %el3, i32 3
+  ret <4 x i1> %v3
+}
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll
new file mode 100644
index 000000000000..62d18bd92b0a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll
@@ -0,0 +1,618 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve  < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; RBIT
+;
+
+define <4 x i8> @bitreverse_v4i8(<4 x i8> %op) #0 {
+; CHECK-LABEL: bitreverse_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    rbit z0.h, p0/m, z0.h
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <4 x i8> @llvm.bitreverse.v4i8(<4 x i8> %op)
+  ret <4 x i8> %res
+}
+
+define <8 x i8> @bitreverse_v8i8(<8 x i8> %op) #0 {
+; CHECK-LABEL: bitreverse_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    rbit z0.b, p0/m, z0.b
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <8 x i8> @llvm.bitreverse.v8i8(<8 x i8> %op)
+  ret <8 x i8> %res
+}
+
+define <16 x i8> @bitreverse_v16i8(<16 x i8> %op) #0 {
+; CHECK-LABEL: bitreverse_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    rbit z0.b, p0/m, z0.b
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %op)
+  ret <16 x i8> %res
+}
+
+define void @bitreverse_v32i8(<32 x i8>* %a) #0 {
+; CHECK-LABEL: bitreverse_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    rbit z0.b, p0/m, z0.b
+; CHECK-NEXT:    rbit z1.b, p0/m, z1.b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <32 x i8>, <32 x i8>* %a
+  %res = call <32 x i8> @llvm.bitreverse.v32i8(<32 x i8> %op)
+  store <32 x i8> %res, <32 x i8>* %a
+  ret void
+}
+
+define <2 x i16> @bitreverse_v2i16(<2 x i16> %op) #0 {
+; CHECK-LABEL: bitreverse_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI4_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    rbit z0.s, p0/m, z0.s
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI4_0]
+; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %op)
+  ret <2 x i16> %res
+}
+
+define <4 x i16> @bitreverse_v4i16(<4 x i16> %op) #0 {
+; CHECK-LABEL: bitreverse_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    rbit z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <4 x i16> @llvm.bitreverse.v4i16(<4 x i16> %op)
+  ret <4 x i16> %res
+}
+
+define <8 x i16> @bitreverse_v8i16(<8 x i16> %op) #0 {
+; CHECK-LABEL: bitreverse_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    rbit z0.h, p0/m, z0.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %op)
+  ret <8 x i16> %res
+}
+
+define void @bitreverse_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: bitreverse_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    rbit z0.h, p0/m, z0.h
+; CHECK-NEXT:    rbit z1.h, p0/m, z1.h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, <16 x i16>* %a
+  %res = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %op)
+  store <16 x i16> %res, <16 x i16>* %a
+  ret void
+}
+
+define <2 x i32> @bitreverse_v2i32(<2 x i32> %op) #0 {
+; CHECK-LABEL: bitreverse_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    rbit z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %op)
+  ret <2 x i32> %res
+}
+
+define <4 x i32> @bitreverse_v4i32(<4 x i32> %op) #0 {
+; CHECK-LABEL: bitreverse_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    rbit z0.s, p0/m, z0.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %op)
+  ret <4 x i32> %res
+}
+
+define void @bitreverse_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: bitreverse_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    rbit z0.s, p0/m, z0.s
+; CHECK-NEXT:    rbit z1.s, p0/m, z1.s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, <8 x i32>* %a
+  %res = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %op)
+  store <8 x i32> %res, <8 x i32>* %a
+  ret void
+}
+
+define <1 x i64> @bitreverse_v1i64(<1 x i64> %op) #0 {
+; CHECK-LABEL: bitreverse_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    rbit z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <1 x i64> @llvm.bitreverse.v1i64(<1 x i64> %op)
+  ret <1 x i64> %res
+}
+
+define <2 x i64> @bitreverse_v2i64(<2 x i64> %op) #0 {
+; CHECK-LABEL: bitreverse_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    rbit z0.d, p0/m, z0.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %op)
+  ret <2 x i64> %res
+}
+
+define void @bitreverse_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: bitreverse_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    rbit z0.d, p0/m, z0.d
+; CHECK-NEXT:    rbit z1.d, p0/m, z1.d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, <4 x i64>* %a
+  %res = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %op)
+  store <4 x i64> %res, <4 x i64>* %a
+  ret void
+}
+
+;
+; REVB
+;
+
+define <2 x i16> @bswap_v2i16(<2 x i16> %op) #0 {
+; CHECK-LABEL: bswap_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI14_0
+; CHECK-NEXT:    adrp x9, .LCPI14_1
+; CHECK-NEXT:    adrp x10, .LCPI14_2
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI14_0]
+; CHECK-NEXT:    adrp x8, .LCPI14_3
+; CHECK-NEXT:    ldr d2, [x9, :lo12:.LCPI14_1]
+; CHECK-NEXT:    movprfx z5, z0
+; CHECK-NEXT:    lsr z5.s, p0/m, z5.s, z1.s
+; CHECK-NEXT:    ldr d3, [x10, :lo12:.LCPI14_2]
+; CHECK-NEXT:    movprfx z6, z0
+; CHECK-NEXT:    lsr z6.s, p0/m, z6.s, z2.s
+; CHECK-NEXT:    ldr d4, [x8, :lo12:.LCPI14_3]
+; CHECK-NEXT:    adrp x8, .LCPI14_4
+; CHECK-NEXT:    lslr z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    and z2.d, z6.d, z3.d
+; CHECK-NEXT:    and z0.d, z0.d, z4.d
+; CHECK-NEXT:    ldr d3, [x8, :lo12:.LCPI14_4]
+; CHECK-NEXT:    orr z2.d, z2.d, z5.d
+; CHECK-NEXT:    orr z0.d, z1.d, z0.d
+; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, z3.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> %op)
+  ret <2 x i16> %res
+}
+
+define <4 x i16> @bswap_v4i16(<4 x i16> %op) #0 {
+; CHECK-LABEL: bswap_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI15_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI15_0]
+; CHECK-NEXT:    movprfx z2, z0
+; CHECK-NEXT:    lsr z2.h, p0/m, z2.h, z1.h
+; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <4 x i16> @llvm.bswap.v4i16(<4 x i16> %op)
+  ret <4 x i16> %res
+}
+
+define <8 x i16> @bswap_v8i16(<8 x i16> %op) #0 {
+; CHECK-LABEL: bswap_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI16_0
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI16_0]
+; CHECK-NEXT:    movprfx z2, z0
+; CHECK-NEXT:    lsr z2.h, p0/m, z2.h, z1.h
+; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %op)
+  ret <8 x i16> %res
+}
+
+define void @bswap_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: bswap_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI17_0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q2, q0, [x0]
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI17_0]
+; CHECK-NEXT:    movprfx z3, z0
+; CHECK-NEXT:    lsr z3.h, p0/m, z3.h, z1.h
+; CHECK-NEXT:    movprfx z4, z2
+; CHECK-NEXT:    lsr z4.h, p0/m, z4.h, z1.h
+; CHECK-NEXT:    lsl z2.h, p0/m, z2.h, z1.h
+; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    orr z1.d, z2.d, z4.d
+; CHECK-NEXT:    orr z0.d, z0.d, z3.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, <16 x i16>* %a
+  %res = call <16 x i16> @llvm.bswap.v16i16(<16 x i16> %op)
+  store <16 x i16> %res, <16 x i16>* %a
+  ret void
+}
+
+define <2 x i32> @bswap_v2i32(<2 x i32> %op) #0 {
+; CHECK-LABEL: bswap_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI18_1
+; CHECK-NEXT:    adrp x9, .LCPI18_2
+; CHECK-NEXT:    adrp x10, .LCPI18_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI18_1]
+; CHECK-NEXT:    adrp x8, .LCPI18_3
+; CHECK-NEXT:    ldr d2, [x9, :lo12:.LCPI18_2]
+; CHECK-NEXT:    movprfx z5, z0
+; CHECK-NEXT:    lsr z5.s, p0/m, z5.s, z1.s
+; CHECK-NEXT:    ldr d3, [x10, :lo12:.LCPI18_0]
+; CHECK-NEXT:    ldr d4, [x8, :lo12:.LCPI18_3]
+; CHECK-NEXT:    and z2.d, z5.d, z2.d
+; CHECK-NEXT:    movprfx z5, z0
+; CHECK-NEXT:    lsr z5.s, p0/m, z5.s, z3.s
+; CHECK-NEXT:    lslr z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z3.s
+; CHECK-NEXT:    and z1.d, z1.d, z4.d
+; CHECK-NEXT:    orr z2.d, z2.d, z5.d
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %op)
+  ret <2 x i32> %res
+}
+
+define <4 x i32> @bswap_v4i32(<4 x i32> %op) #0 {
+; CHECK-LABEL: bswap_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI19_1
+; CHECK-NEXT:    adrp x9, .LCPI19_2
+; CHECK-NEXT:    adrp x10, .LCPI19_0
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI19_1]
+; CHECK-NEXT:    adrp x8, .LCPI19_3
+; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI19_2]
+; CHECK-NEXT:    movprfx z5, z0
+; CHECK-NEXT:    lsr z5.s, p0/m, z5.s, z1.s
+; CHECK-NEXT:    ldr q3, [x10, :lo12:.LCPI19_0]
+; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI19_3]
+; CHECK-NEXT:    and z2.d, z5.d, z2.d
+; CHECK-NEXT:    movprfx z5, z0
+; CHECK-NEXT:    lsr z5.s, p0/m, z5.s, z3.s
+; CHECK-NEXT:    lslr z1.s, p0/m, z1.s, z0.s
+; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z3.s
+; CHECK-NEXT:    and z1.d, z1.d, z4.d
+; CHECK-NEXT:    orr z2.d, z2.d, z5.d
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %op)
+  ret <4 x i32> %res
+}
+
+define void @bswap_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: bswap_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI20_0
+; CHECK-NEXT:    adrp x9, .LCPI20_1
+; CHECK-NEXT:    ldp q3, q2, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI20_0]
+; CHECK-NEXT:    adrp x8, .LCPI20_2
+; CHECK-NEXT:    ldr q1, [x9, :lo12:.LCPI20_1]
+; CHECK-NEXT:    movprfx z5, z2
+; CHECK-NEXT:    lsr z5.s, p0/m, z5.s, z0.s
+; CHECK-NEXT:    movprfx z6, z2
+; CHECK-NEXT:    lsr z6.s, p0/m, z6.s, z1.s
+; CHECK-NEXT:    movprfx z7, z2
+; CHECK-NEXT:    lsl z7.s, p0/m, z7.s, z0.s
+; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI20_2]
+; CHECK-NEXT:    adrp x8, .LCPI20_3
+; CHECK-NEXT:    lsl z2.s, p0/m, z2.s, z1.s
+; CHECK-NEXT:    and z6.d, z6.d, z4.d
+; CHECK-NEXT:    ldr q16, [x8, :lo12:.LCPI20_3]
+; CHECK-NEXT:    orr z5.d, z6.d, z5.d
+; CHECK-NEXT:    movprfx z6, z3
+; CHECK-NEXT:    lsr z6.s, p0/m, z6.s, z1.s
+; CHECK-NEXT:    and z4.d, z6.d, z4.d
+; CHECK-NEXT:    movprfx z6, z3
+; CHECK-NEXT:    lsr z6.s, p0/m, z6.s, z0.s
+; CHECK-NEXT:    lslr z0.s, p0/m, z0.s, z3.s
+; CHECK-NEXT:    lslr z1.s, p0/m, z1.s, z3.s
+; CHECK-NEXT:    and z2.d, z2.d, z16.d
+; CHECK-NEXT:    and z1.d, z1.d, z16.d
+; CHECK-NEXT:    orr z3.d, z4.d, z6.d
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    orr z1.d, z7.d, z2.d
+; CHECK-NEXT:    orr z0.d, z0.d, z3.d
+; CHECK-NEXT:    orr z1.d, z1.d, z5.d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, <8 x i32>* %a
+  %res = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> %op)
+  store <8 x i32> %res, <8 x i32>* %a
+  ret void
+}
+
+define <1 x i64> @bswap_v1i64(<1 x i64> %op) #0 {
+; CHECK-LABEL: bswap_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #56
+; CHECK-NEXT:    mov w9, #40
+; CHECK-NEXT:    mov w10, #65280
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    mov w8, #24
+; CHECK-NEXT:    fmov d2, x9
+; CHECK-NEXT:    mov w9, #16711680
+; CHECK-NEXT:    fmov d3, x10
+; CHECK-NEXT:    mov w10, #8
+; CHECK-NEXT:    fmov d4, x8
+; CHECK-NEXT:    mov w8, #-16777216
+; CHECK-NEXT:    fmov d5, x9
+; CHECK-NEXT:    mov x9, #1095216660480
+; CHECK-NEXT:    movprfx z16, z0
+; CHECK-NEXT:    lsr z16.d, p0/m, z16.d, z2.d
+; CHECK-NEXT:    and z3.d, z16.d, z3.d
+; CHECK-NEXT:    fmov d7, x8
+; CHECK-NEXT:    mov x8, #280375465082880
+; CHECK-NEXT:    movprfx z16, z0
+; CHECK-NEXT:    lsr z16.d, p0/m, z16.d, z4.d
+; CHECK-NEXT:    fmov d6, x10
+; CHECK-NEXT:    and z5.d, z16.d, z5.d
+; CHECK-NEXT:    movprfx z16, z0
+; CHECK-NEXT:    lsr z16.d, p0/m, z16.d, z6.d
+; CHECK-NEXT:    fmov d18, x8
+; CHECK-NEXT:    mov x8, #71776119061217280
+; CHECK-NEXT:    and z7.d, z16.d, z7.d
+; CHECK-NEXT:    fmov d17, x9
+; CHECK-NEXT:    orr z5.d, z7.d, z5.d
+; CHECK-NEXT:    movprfx z16, z0
+; CHECK-NEXT:    lsr z16.d, p0/m, z16.d, z1.d
+; CHECK-NEXT:    fmov d7, x8
+; CHECK-NEXT:    lslr z6.d, p0/m, z6.d, z0.d
+; CHECK-NEXT:    lslr z4.d, p0/m, z4.d, z0.d
+; CHECK-NEXT:    lslr z2.d, p0/m, z2.d, z0.d
+; CHECK-NEXT:    and z6.d, z6.d, z17.d
+; CHECK-NEXT:    and z4.d, z4.d, z18.d
+; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    and z1.d, z2.d, z7.d
+; CHECK-NEXT:    orr z3.d, z3.d, z16.d
+; CHECK-NEXT:    orr z2.d, z4.d, z6.d
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    orr z1.d, z5.d, z3.d
+; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <1 x i64> @llvm.bswap.v1i64(<1 x i64> %op)
+  ret <1 x i64> %res
+}
+
+define <2 x i64> @bswap_v2i64(<2 x i64> %op) #0 {
+; CHECK-LABEL: bswap_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI22_0
+; CHECK-NEXT:    adrp x9, .LCPI22_1
+; CHECK-NEXT:    adrp x10, .LCPI22_2
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI22_0]
+; CHECK-NEXT:    adrp x8, .LCPI22_3
+; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI22_1]
+; CHECK-NEXT:    adrp x9, .LCPI22_4
+; CHECK-NEXT:    ldr q3, [x10, :lo12:.LCPI22_2]
+; CHECK-NEXT:    adrp x10, .LCPI22_5
+; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI22_3]
+; CHECK-NEXT:    adrp x8, .LCPI22_6
+; CHECK-NEXT:    ldr q5, [x9, :lo12:.LCPI22_4]
+; CHECK-NEXT:    adrp x9, .LCPI22_7
+; CHECK-NEXT:    movprfx z16, z0
+; CHECK-NEXT:    lsr z16.d, p0/m, z16.d, z2.d
+; CHECK-NEXT:    and z3.d, z16.d, z3.d
+; CHECK-NEXT:    ldr q7, [x8, :lo12:.LCPI22_6]
+; CHECK-NEXT:    adrp x8, .LCPI22_8
+; CHECK-NEXT:    movprfx z16, z0
+; CHECK-NEXT:    lsr z16.d, p0/m, z16.d, z4.d
+; CHECK-NEXT:    ldr q6, [x10, :lo12:.LCPI22_5]
+; CHECK-NEXT:    and z5.d, z16.d, z5.d
+; CHECK-NEXT:    movprfx z16, z0
+; CHECK-NEXT:    lsr z16.d, p0/m, z16.d, z6.d
+; CHECK-NEXT:    ldr q18, [x8, :lo12:.LCPI22_8]
+; CHECK-NEXT:    adrp x8, .LCPI22_9
+; CHECK-NEXT:    and z7.d, z16.d, z7.d
+; CHECK-NEXT:    ldr q17, [x9, :lo12:.LCPI22_7]
+; CHECK-NEXT:    orr z5.d, z7.d, z5.d
+; CHECK-NEXT:    movprfx z16, z0
+; CHECK-NEXT:    lsr z16.d, p0/m, z16.d, z1.d
+; CHECK-NEXT:    ldr q7, [x8, :lo12:.LCPI22_9]
+; CHECK-NEXT:    lslr z6.d, p0/m, z6.d, z0.d
+; CHECK-NEXT:    lslr z4.d, p0/m, z4.d, z0.d
+; CHECK-NEXT:    lslr z2.d, p0/m, z2.d, z0.d
+; CHECK-NEXT:    and z6.d, z6.d, z17.d
+; CHECK-NEXT:    and z4.d, z4.d, z18.d
+; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    and z1.d, z2.d, z7.d
+; CHECK-NEXT:    orr z3.d, z3.d, z16.d
+; CHECK-NEXT:    orr z2.d, z4.d, z6.d
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    orr z1.d, z5.d, z3.d
+; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %op)
+  ret <2 x i64> %res
+}
+
+define void @bswap_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: bswap_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI23_0
+; CHECK-NEXT:    adrp x9, .LCPI23_1
+; CHECK-NEXT:    adrp x10, .LCPI23_2
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI23_0]
+; CHECK-NEXT:    adrp x8, .LCPI23_4
+; CHECK-NEXT:    ldr q2, [x9, :lo12:.LCPI23_1]
+; CHECK-NEXT:    adrp x9, .LCPI23_3
+; CHECK-NEXT:    ldr q4, [x10, :lo12:.LCPI23_2]
+; CHECK-NEXT:    adrp x10, .LCPI23_5
+; CHECK-NEXT:    ldr q7, [x8, :lo12:.LCPI23_4]
+; CHECK-NEXT:    adrp x8, .LCPI23_6
+; CHECK-NEXT:    ldr q5, [x9, :lo12:.LCPI23_3]
+; CHECK-NEXT:    adrp x9, .LCPI23_7
+; CHECK-NEXT:    movprfx z6, z1
+; CHECK-NEXT:    lsr z6.d, p0/m, z6.d, z2.d
+; CHECK-NEXT:    movprfx z17, z1
+; CHECK-NEXT:    lsr z17.d, p0/m, z17.d, z3.d
+; CHECK-NEXT:    ldr q18, [x8, :lo12:.LCPI23_6]
+; CHECK-NEXT:    adrp x8, .LCPI23_8
+; CHECK-NEXT:    and z6.d, z6.d, z4.d
+; CHECK-NEXT:    ldr q16, [x10, :lo12:.LCPI23_5]
+; CHECK-NEXT:    orr z6.d, z6.d, z17.d
+; CHECK-NEXT:    ldr q17, [x9, :lo12:.LCPI23_7]
+; CHECK-NEXT:    ldr q21, [x8, :lo12:.LCPI23_8]
+; CHECK-NEXT:    adrp x8, .LCPI23_9
+; CHECK-NEXT:    movprfx z19, z1
+; CHECK-NEXT:    lsr z19.d, p0/m, z19.d, z5.d
+; CHECK-NEXT:    movprfx z20, z1
+; CHECK-NEXT:    lsr z20.d, p0/m, z20.d, z16.d
+; CHECK-NEXT:    and z19.d, z19.d, z7.d
+; CHECK-NEXT:    and z20.d, z20.d, z18.d
+; CHECK-NEXT:    orr z19.d, z20.d, z19.d
+; CHECK-NEXT:    movprfx z20, z1
+; CHECK-NEXT:    lsl z20.d, p0/m, z20.d, z16.d
+; CHECK-NEXT:    movprfx z22, z1
+; CHECK-NEXT:    lsl z22.d, p0/m, z22.d, z5.d
+; CHECK-NEXT:    ldr q23, [x8, :lo12:.LCPI23_9]
+; CHECK-NEXT:    and z20.d, z20.d, z17.d
+; CHECK-NEXT:    and z22.d, z22.d, z21.d
+; CHECK-NEXT:    orr z6.d, z19.d, z6.d
+; CHECK-NEXT:    orr z19.d, z22.d, z20.d
+; CHECK-NEXT:    movprfx z20, z1
+; CHECK-NEXT:    lsl z20.d, p0/m, z20.d, z3.d
+; CHECK-NEXT:    lsl z1.d, p0/m, z1.d, z2.d
+; CHECK-NEXT:    movprfx z22, z0
+; CHECK-NEXT:    lsr z22.d, p0/m, z22.d, z2.d
+; CHECK-NEXT:    and z1.d, z1.d, z23.d
+; CHECK-NEXT:    and z4.d, z22.d, z4.d
+; CHECK-NEXT:    movprfx z22, z0
+; CHECK-NEXT:    lsr z22.d, p0/m, z22.d, z3.d
+; CHECK-NEXT:    orr z1.d, z20.d, z1.d
+; CHECK-NEXT:    orr z4.d, z4.d, z22.d
+; CHECK-NEXT:    movprfx z20, z0
+; CHECK-NEXT:    lsr z20.d, p0/m, z20.d, z5.d
+; CHECK-NEXT:    movprfx z22, z0
+; CHECK-NEXT:    lsr z22.d, p0/m, z22.d, z16.d
+; CHECK-NEXT:    lslr z16.d, p0/m, z16.d, z0.d
+; CHECK-NEXT:    lslr z5.d, p0/m, z5.d, z0.d
+; CHECK-NEXT:    lslr z2.d, p0/m, z2.d, z0.d
+; CHECK-NEXT:    and z7.d, z20.d, z7.d
+; CHECK-NEXT:    and z18.d, z22.d, z18.d
+; CHECK-NEXT:    and z16.d, z16.d, z17.d
+; CHECK-NEXT:    and z5.d, z5.d, z21.d
+; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z3.d
+; CHECK-NEXT:    and z2.d, z2.d, z23.d
+; CHECK-NEXT:    orr z7.d, z18.d, z7.d
+; CHECK-NEXT:    orr z3.d, z5.d, z16.d
+; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    orr z2.d, z7.d, z4.d
+; CHECK-NEXT:    orr z0.d, z0.d, z3.d
+; CHECK-NEXT:    orr z1.d, z1.d, z19.d
+; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    orr z1.d, z1.d, z6.d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, <4 x i64>* %a
+  %res = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %op)
+  store <4 x i64> %res, <4 x i64>* %a
+  ret void
+}
+
+attributes #0 = { "target-features"="+sve" }
+
+declare <4 x i8> @llvm.bitreverse.v4i8(<4 x i8>)
+declare <8 x i8> @llvm.bitreverse.v8i8(<8 x i8>)
+declare <16 x i8> @llvm.bitreverse.v16i8(<16 x i8>)
+declare <32 x i8> @llvm.bitreverse.v32i8(<32 x i8>)
+declare <2 x i16> @llvm.bitreverse.v2i16(<2 x i16>)
+declare <4 x i16> @llvm.bitreverse.v4i16(<4 x i16>)
+declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>)
+declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>)
+declare <2 x i32> @llvm.bitreverse.v2i32(<2 x i32>)
+declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>)
+declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>)
+declare <1 x i64> @llvm.bitreverse.v1i64(<1 x i64>)
+declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>)
+declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>)
+
+declare <2 x i16> @llvm.bswap.v2i16(<2 x i16>)
+declare <4 x i16> @llvm.bswap.v4i16(<4 x i16>)
+declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>)
+declare <16 x i16> @llvm.bswap.v16i16(<16 x i16>)
+declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>)
+declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
+declare <8 x i32> @llvm.bswap.v8i32(<8 x i32>)
+declare <1 x i64> @llvm.bswap.v1i64(<1 x i64>)
+declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
+declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll
new file mode 100644
index 000000000000..ac941fb3a162
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll
@@ -0,0 +1,195 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve  < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define <4 x i8> @sdiv_v4i8(<4 x i8> %op1) #0 {
+; CHECK-LABEL: sdiv_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    asrd z0.h, p0/m, z0.h, #5
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = sdiv <4 x i8> %op1, shufflevector (<4 x i8> insertelement (<4 x i8> poison, i8 32, i32 0), <4 x i8> poison, <4 x i32> zeroinitializer)
+  ret <4 x i8> %res
+}
+
+define <8 x i8> @sdiv_v8i8(<8 x i8> %op1) #0 {
+; CHECK-LABEL: sdiv_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    asrd z0.b, p0/m, z0.b, #5
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = sdiv <8 x i8> %op1, shufflevector (<8 x i8> insertelement (<8 x i8> poison, i8 32, i32 0), <8 x i8> poison, <8 x i32> zeroinitializer)
+  ret <8 x i8> %res
+}
+
+define <16 x i8> @sdiv_v16i8(<16 x i8> %op1) #0 {
+; CHECK-LABEL: sdiv_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    asrd z0.b, p0/m, z0.b, #5
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = sdiv <16 x i8> %op1, shufflevector (<16 x i8> insertelement (<16 x i8> poison, i8 32, i32 0), <16 x i8> poison, <16 x i32> zeroinitializer)
+  ret <16 x i8> %res
+}
+
+define void @sdiv_v32i8(<32 x i8>* %a) #0 {
+; CHECK-LABEL: sdiv_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    asrd z0.b, p0/m, z0.b, #5
+; CHECK-NEXT:    asrd z1.b, p0/m, z1.b, #5
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, <32 x i8>* %a
+  %res = sdiv <32 x i8> %op1, shufflevector (<32 x i8> insertelement (<32 x i8> poison, i8 32, i32 0), <32 x i8> poison, <32 x i32> zeroinitializer)
+  store <32 x i8> %res, <32 x i8>* %a
+  ret void
+}
+
+define <2 x i16> @sdiv_v2i16(<2 x i16> %op1) #0 {
+; CHECK-LABEL: sdiv_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI4_0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI4_0]
+; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    asr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    asrd z0.s, p0/m, z0.s, #5
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = sdiv <2 x i16> %op1, shufflevector (<2 x i16> insertelement (<2 x i16> poison, i16 32, i32 0), <2 x i16> poison, <2 x i32> zeroinitializer)
+  ret <2 x i16> %res
+}
+
+define <4 x i16> @sdiv_v4i16(<4 x i16> %op1) #0 {
+; CHECK-LABEL: sdiv_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    asrd z0.h, p0/m, z0.h, #5
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = sdiv <4 x i16> %op1, shufflevector (<4 x i16> insertelement (<4 x i16> poison, i16 32, i32 0), <4 x i16> poison, <4 x i32> zeroinitializer)
+  ret <4 x i16> %res
+}
+
+define <8 x i16> @sdiv_v8i16(<8 x i16> %op1) #0 {
+; CHECK-LABEL: sdiv_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    asrd z0.h, p0/m, z0.h, #5
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = sdiv <8 x i16> %op1, shufflevector (<8 x i16> insertelement (<8 x i16> poison, i16 32, i32 0), <8 x i16> poison, <8 x i32> zeroinitializer)
+  ret <8 x i16> %res
+}
+
+define void @sdiv_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: sdiv_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    asrd z0.h, p0/m, z0.h, #5
+; CHECK-NEXT:    asrd z1.h, p0/m, z1.h, #5
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, <16 x i16>* %a
+  %res = sdiv <16 x i16> %op1, shufflevector (<16 x i16> insertelement (<16 x i16> poison, i16 32, i32 0), <16 x i16> poison, <16 x i32> zeroinitializer)
+  store <16 x i16> %res, <16 x i16>* %a
+  ret void
+}
+
+define <2 x i32> @sdiv_v2i32(<2 x i32> %op1) #0 {
+; CHECK-LABEL: sdiv_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    asrd z0.s, p0/m, z0.s, #5
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = sdiv <2 x i32> %op1, shufflevector (<2 x i32> insertelement (<2 x i32> poison, i32 32, i32 0), <2 x i32> poison, <2 x i32> zeroinitializer)
+  ret <2 x i32> %res
+}
+
+define <4 x i32> @sdiv_v4i32(<4 x i32> %op1) #0 {
+; CHECK-LABEL: sdiv_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    asrd z0.s, p0/m, z0.s, #5
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = sdiv <4 x i32> %op1, shufflevector (<4 x i32> insertelement (<4 x i32> poison, i32 32, i32 0), <4 x i32> poison, <4 x i32> zeroinitializer)
+  ret <4 x i32> %res
+}
+
+define void @sdiv_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: sdiv_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    asrd z0.s, p0/m, z0.s, #5
+; CHECK-NEXT:    asrd z1.s, p0/m, z1.s, #5
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, <8 x i32>* %a
+  %res = sdiv <8 x i32> %op1, shufflevector (<8 x i32> insertelement (<8 x i32> poison, i32 32, i32 0), <8 x i32> poison, <8 x i32> zeroinitializer)
+  store <8 x i32> %res, <8 x i32>* %a
+  ret void
+}
+
+define <1 x i64> @sdiv_v1i64(<1 x i64> %op1) #0 {
+; CHECK-LABEL: sdiv_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    asrd z0.d, p0/m, z0.d, #5
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = sdiv <1 x i64> %op1, shufflevector (<1 x i64> insertelement (<1 x i64> poison, i64 32, i32 0), <1 x i64> poison, <1 x i32> zeroinitializer)
+  ret <1 x i64> %res
+}
+
+; Vector i64 sdiv are not legal for NEON so use SVE when available.
+define <2 x i64> @sdiv_v2i64(<2 x i64> %op1) #0 {
+; CHECK-LABEL: sdiv_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    asrd z0.d, p0/m, z0.d, #5
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = sdiv <2 x i64> %op1, shufflevector (<2 x i64> insertelement (<2 x i64> poison, i64 32, i32 0), <2 x i64> poison, <2 x i32> zeroinitializer)
+  ret <2 x i64> %res
+}
+
+define void @sdiv_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: sdiv_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    asrd z0.d, p0/m, z0.d, #5
+; CHECK-NEXT:    asrd z1.d, p0/m, z1.d, #5
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, <4 x i64>* %a
+  %res = sdiv <4 x i64> %op1, shufflevector (<4 x i64> insertelement (<4 x i64> poison, i64 32, i32 0), <4 x i64> poison, <4 x i32> zeroinitializer)
+  store <4 x i64> %res, <4 x i64>* %a
+  ret void
+}
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll
new file mode 100644
index 000000000000..c328e3124ced
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll
@@ -0,0 +1,523 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; DUP (integer)
+;
+
+define <4 x i8> @splat_v4i8(i8 %a) #0 {
+; CHECK-LABEL: splat_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    strh w0, [sp, #14]
+; CHECK-NEXT:    strh w0, [sp, #12]
+; CHECK-NEXT:    strh w0, [sp, #10]
+; CHECK-NEXT:    strh w0, [sp, #8]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <4 x i8> undef, i8 %a, i64 0
+  %splat = shufflevector <4 x i8> %insert, <4 x i8> undef, <4 x i32> zeroinitializer
+  ret <4 x i8> %splat
+}
+
+define <8 x i8> @splat_v8i8(i8 %a) #0 {
+; CHECK-LABEL: splat_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    strb w0, [sp, #15]
+; CHECK-NEXT:    strb w0, [sp, #14]
+; CHECK-NEXT:    strb w0, [sp, #13]
+; CHECK-NEXT:    strb w0, [sp, #12]
+; CHECK-NEXT:    strb w0, [sp, #11]
+; CHECK-NEXT:    strb w0, [sp, #10]
+; CHECK-NEXT:    strb w0, [sp, #9]
+; CHECK-NEXT:    strb w0, [sp, #8]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <8 x i8> undef, i8 %a, i64 0
+  %splat = shufflevector <8 x i8> %insert, <8 x i8> undef, <8 x i32> zeroinitializer
+  ret <8 x i8> %splat
+}
+
+define <16 x i8> @splat_v16i8(i8 %a) #0 {
+; CHECK-LABEL: splat_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    strb w0, [sp, #15]
+; CHECK-NEXT:    strb w0, [sp, #14]
+; CHECK-NEXT:    strb w0, [sp, #13]
+; CHECK-NEXT:    strb w0, [sp, #12]
+; CHECK-NEXT:    strb w0, [sp, #11]
+; CHECK-NEXT:    strb w0, [sp, #10]
+; CHECK-NEXT:    strb w0, [sp, #9]
+; CHECK-NEXT:    strb w0, [sp, #8]
+; CHECK-NEXT:    strb w0, [sp, #7]
+; CHECK-NEXT:    strb w0, [sp, #6]
+; CHECK-NEXT:    strb w0, [sp, #5]
+; CHECK-NEXT:    strb w0, [sp, #4]
+; CHECK-NEXT:    strb w0, [sp, #3]
+; CHECK-NEXT:    strb w0, [sp, #2]
+; CHECK-NEXT:    strb w0, [sp, #1]
+; CHECK-NEXT:    strb w0, [sp]
+; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <16 x i8> undef, i8 %a, i64 0
+  %splat = shufflevector <16 x i8> %insert, <16 x i8> undef, <16 x i32> zeroinitializer
+  ret <16 x i8> %splat
+}
+
+define void @splat_v32i8(i8 %a, <32 x i8>* %b) #0 {
+; CHECK-LABEL: splat_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    strb w0, [sp, #15]
+; CHECK-NEXT:    strb w0, [sp, #14]
+; CHECK-NEXT:    strb w0, [sp, #13]
+; CHECK-NEXT:    strb w0, [sp, #12]
+; CHECK-NEXT:    strb w0, [sp, #11]
+; CHECK-NEXT:    strb w0, [sp, #10]
+; CHECK-NEXT:    strb w0, [sp, #9]
+; CHECK-NEXT:    strb w0, [sp, #8]
+; CHECK-NEXT:    strb w0, [sp, #7]
+; CHECK-NEXT:    strb w0, [sp, #6]
+; CHECK-NEXT:    strb w0, [sp, #5]
+; CHECK-NEXT:    strb w0, [sp, #4]
+; CHECK-NEXT:    strb w0, [sp, #3]
+; CHECK-NEXT:    strb w0, [sp, #2]
+; CHECK-NEXT:    strb w0, [sp, #1]
+; CHECK-NEXT:    strb w0, [sp]
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    stp q0, q0, [x1]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <32 x i8> undef, i8 %a, i64 0
+  %splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer
+  store <32 x i8> %splat, <32 x i8>* %b
+  ret void
+}
+
+define <2 x i16> @splat_v2i16(i16 %a) #0 {
+; CHECK-LABEL: splat_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    stp w0, w0, [sp, #8]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <2 x i16> undef, i16 %a, i64 0
+  %splat = shufflevector <2 x i16> %insert, <2 x i16> undef, <2 x i32> zeroinitializer
+  ret <2 x i16> %splat
+}
+
+define <4 x i16> @splat_v4i16(i16 %a) #0 {
+; CHECK-LABEL: splat_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    strh w0, [sp, #14]
+; CHECK-NEXT:    strh w0, [sp, #12]
+; CHECK-NEXT:    strh w0, [sp, #10]
+; CHECK-NEXT:    strh w0, [sp, #8]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <4 x i16> undef, i16 %a, i64 0
+  %splat = shufflevector <4 x i16> %insert, <4 x i16> undef, <4 x i32> zeroinitializer
+  ret <4 x i16> %splat
+}
+
+define <8 x i16> @splat_v8i16(i16 %a) #0 {
+; CHECK-LABEL: splat_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    strh w0, [sp, #14]
+; CHECK-NEXT:    strh w0, [sp, #12]
+; CHECK-NEXT:    strh w0, [sp, #10]
+; CHECK-NEXT:    strh w0, [sp, #8]
+; CHECK-NEXT:    strh w0, [sp, #6]
+; CHECK-NEXT:    strh w0, [sp, #4]
+; CHECK-NEXT:    strh w0, [sp, #2]
+; CHECK-NEXT:    strh w0, [sp]
+; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <8 x i16> undef, i16 %a, i64 0
+  %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer
+  ret <8 x i16> %splat
+}
+
+define void @splat_v16i16(i16 %a, <16 x i16>* %b) #0 {
+; CHECK-LABEL: splat_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    strh w0, [sp, #14]
+; CHECK-NEXT:    strh w0, [sp, #12]
+; CHECK-NEXT:    strh w0, [sp, #10]
+; CHECK-NEXT:    strh w0, [sp, #8]
+; CHECK-NEXT:    strh w0, [sp, #6]
+; CHECK-NEXT:    strh w0, [sp, #4]
+; CHECK-NEXT:    strh w0, [sp, #2]
+; CHECK-NEXT:    strh w0, [sp]
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    stp q0, q0, [x1]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <16 x i16> undef, i16 %a, i64 0
+  %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer
+  store <16 x i16> %splat, <16 x i16>* %b
+  ret void
+}
+
+define <2 x i32> @splat_v2i32(i32 %a) #0 {
+; CHECK-LABEL: splat_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    stp w0, w0, [sp, #8]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <2 x i32> undef, i32 %a, i64 0
+  %splat = shufflevector <2 x i32> %insert, <2 x i32> undef, <2 x i32> zeroinitializer
+  ret <2 x i32> %splat
+}
+
+define <4 x i32> @splat_v4i32(i32 %a) #0 {
+; CHECK-LABEL: splat_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    stp w0, w0, [sp, #8]
+; CHECK-NEXT:    stp w0, w0, [sp]
+; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <4 x i32> undef, i32 %a, i64 0
+  %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer
+  ret <4 x i32> %splat
+}
+
+define void @splat_v8i32(i32 %a, <8 x i32>* %b) #0 {
+; CHECK-LABEL: splat_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    stp w0, w0, [sp, #8]
+; CHECK-NEXT:    stp w0, w0, [sp]
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    stp q0, q0, [x1]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <8 x i32> undef, i32 %a, i64 0
+  %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer
+  store <8 x i32> %splat, <8 x i32>* %b
+  ret void
+}
+
+define <1 x i64> @splat_v1i64(i64 %a) #0 {
+; CHECK-LABEL: splat_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    ret
+  %insert = insertelement <1 x i64> undef, i64 %a, i64 0
+  %splat = shufflevector <1 x i64> %insert, <1 x i64> undef, <1 x i32> zeroinitializer
+  ret <1 x i64> %splat
+}
+
+define <2 x i64> @splat_v2i64(i64 %a) #0 {
+; CHECK-LABEL: splat_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp x0, x0, [sp, #-16]!
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <2 x i64> undef, i64 %a, i64 0
+  %splat = shufflevector <2 x i64> %insert, <2 x i64> undef, <2 x i32> zeroinitializer
+  ret <2 x i64> %splat
+}
+
+define void @splat_v4i64(i64 %a, <4 x i64>* %b) #0 {
+; CHECK-LABEL: splat_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp x0, x0, [sp, #-16]!
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    stp q0, q0, [x1]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <4 x i64> undef, i64 %a, i64 0
+  %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer
+  store <4 x i64> %splat, <4 x i64>* %b
+  ret void
+}
+
+;
+; DUP (floating-point)
+;
+
+define <2 x half> @splat_v2f16(half %a) #0 {
+; CHECK-LABEL: splat_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    str h0, [sp, #10]
+; CHECK-NEXT:    str h0, [sp, #8]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <2 x half> undef, half %a, i64 0
+  %splat = shufflevector <2 x half> %insert, <2 x half> undef, <2 x i32> zeroinitializer
+  ret <2 x half> %splat
+}
+
+define <4 x half> @splat_v4f16(half %a) #0 {
+; CHECK-LABEL: splat_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    str h0, [sp, #14]
+; CHECK-NEXT:    str h0, [sp, #12]
+; CHECK-NEXT:    str h0, [sp, #10]
+; CHECK-NEXT:    str h0, [sp, #8]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <4 x half> undef, half %a, i64 0
+  %splat = shufflevector <4 x half> %insert, <4 x half> undef, <4 x i32> zeroinitializer
+  ret <4 x half> %splat
+}
+
+define <8 x half> @splat_v8f16(half %a) #0 {
+; CHECK-LABEL: splat_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    str h0, [sp, #14]
+; CHECK-NEXT:    str h0, [sp, #12]
+; CHECK-NEXT:    str h0, [sp, #10]
+; CHECK-NEXT:    str h0, [sp, #8]
+; CHECK-NEXT:    str h0, [sp, #6]
+; CHECK-NEXT:    str h0, [sp, #4]
+; CHECK-NEXT:    str h0, [sp, #2]
+; CHECK-NEXT:    str h0, [sp]
+; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <8 x half> undef, half %a, i64 0
+  %splat = shufflevector <8 x half> %insert, <8 x half> undef, <8 x i32> zeroinitializer
+  ret <8 x half> %splat
+}
+
+define void @splat_v16f16(half %a, <16 x half>* %b) #0 {
+; CHECK-LABEL: splat_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    str h0, [sp, #14]
+; CHECK-NEXT:    str h0, [sp, #12]
+; CHECK-NEXT:    str h0, [sp, #10]
+; CHECK-NEXT:    str h0, [sp, #8]
+; CHECK-NEXT:    str h0, [sp, #6]
+; CHECK-NEXT:    str h0, [sp, #4]
+; CHECK-NEXT:    str h0, [sp, #2]
+; CHECK-NEXT:    str h0, [sp]
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    stp q0, q0, [x0]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <16 x half> undef, half %a, i64 0
+  %splat = shufflevector <16 x half> %insert, <16 x half> undef, <16 x i32> zeroinitializer
+  store <16 x half> %splat, <16 x half>* %b
+  ret void
+}
+
+define <2 x float> @splat_v2f32(float %a, <2 x float> %op2) #0 {
+; CHECK-LABEL: splat_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    stp s0, s0, [sp, #8]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <2 x float> undef, float %a, i64 0
+  %splat = shufflevector <2 x float> %insert, <2 x float> undef, <2 x i32> zeroinitializer
+  ret <2 x float> %splat
+}
+
+define <4 x float> @splat_v4f32(float %a, <4 x float> %op2) #0 {
+; CHECK-LABEL: splat_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    stp s0, s0, [sp, #8]
+; CHECK-NEXT:    stp s0, s0, [sp]
+; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <4 x float> undef, float %a, i64 0
+  %splat = shufflevector <4 x float> %insert, <4 x float> undef, <4 x i32> zeroinitializer
+  ret <4 x float> %splat
+}
+
+define void @splat_v8f32(float %a, <8 x float>* %b) #0 {
+; CHECK-LABEL: splat_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    stp s0, s0, [sp, #8]
+; CHECK-NEXT:    stp s0, s0, [sp]
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    stp q0, q0, [x0]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <8 x float> undef, float %a, i64 0
+  %splat = shufflevector <8 x float> %insert, <8 x float> undef, <8 x i32> zeroinitializer
+  store <8 x float> %splat, <8 x float>* %b
+  ret void
+}
+
+define <1 x double> @splat_v1f64(double %a, <1 x double> %op2) #0 {
+; CHECK-LABEL: splat_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %insert = insertelement <1 x double> undef, double %a, i64 0
+  %splat = shufflevector <1 x double> %insert, <1 x double> undef, <1 x i32> zeroinitializer
+  ret <1 x double> %splat
+}
+
+define <2 x double> @splat_v2f64(double %a, <2 x double> %op2) #0 {
+; CHECK-LABEL: splat_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp d0, d0, [sp, #-16]!
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldr q0, [sp], #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <2 x double> undef, double %a, i64 0
+  %splat = shufflevector <2 x double> %insert, <2 x double> undef, <2 x i32> zeroinitializer
+  ret <2 x double> %splat
+}
+
+define void @splat_v4f64(double %a, <4 x double>* %b) #0 {
+; CHECK-LABEL: splat_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp d0, d0, [sp, #-16]!
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    stp q0, q0, [x0]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %insert = insertelement <4 x double> undef, double %a, i64 0
+  %splat = shufflevector <4 x double> %insert, <4 x double> undef, <4 x i32> zeroinitializer
+  store <4 x double> %splat, <4 x double>* %b
+  ret void
+}
+
+;
+; DUP (integer immediate)
+;
+
+define void @splat_imm_v32i8(<32 x i8>* %a) #0 {
+; CHECK-LABEL: splat_imm_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI24_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI24_0]
+; CHECK-NEXT:    stp q0, q0, [x0]
+; CHECK-NEXT:    ret
+  %insert = insertelement <32 x i8> undef, i8 1, i64 0
+  %splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer
+  store <32 x i8> %splat, <32 x i8>* %a
+  ret void
+}
+
+define void @splat_imm_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: splat_imm_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI25_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI25_0]
+; CHECK-NEXT:    stp q0, q0, [x0]
+; CHECK-NEXT:    ret
+  %insert = insertelement <16 x i16> undef, i16 2, i64 0
+  %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer
+  store <16 x i16> %splat, <16 x i16>* %a
+  ret void
+}
+
+define void @splat_imm_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: splat_imm_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI26_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI26_0]
+; CHECK-NEXT:    stp q0, q0, [x0]
+; CHECK-NEXT:    ret
+  %insert = insertelement <8 x i32> undef, i32 3, i64 0
+  %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer
+  store <8 x i32> %splat, <8 x i32>* %a
+  ret void
+}
+
+define void @splat_imm_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: splat_imm_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI27_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI27_0]
+; CHECK-NEXT:    stp q0, q0, [x0]
+; CHECK-NEXT:    ret
+  %insert = insertelement <4 x i64> undef, i64 4, i64 0
+  %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer
+  store <4 x i64> %splat, <4 x i64>* %a
+  ret void
+}
+
+;
+; DUP (floating-point immediate)
+;
+
+define void @splat_imm_v16f16(<16 x half>* %a) #0 {
+; CHECK-LABEL: splat_imm_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI28_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI28_0]
+; CHECK-NEXT:    stp q0, q0, [x0]
+; CHECK-NEXT:    ret
+  %insert = insertelement <16 x half> undef, half 5.0, i64 0
+  %splat = shufflevector <16 x half> %insert, <16 x half> undef, <16 x i32> zeroinitializer
+  store <16 x half> %splat, <16 x half>* %a
+  ret void
+}
+
+define void @splat_imm_v8f32(<8 x float>* %a) #0 {
+; CHECK-LABEL: splat_imm_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI29_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI29_0]
+; CHECK-NEXT:    stp q0, q0, [x0]
+; CHECK-NEXT:    ret
+  %insert = insertelement <8 x float> undef, float 6.0, i64 0
+  %splat = shufflevector <8 x float> %insert, <8 x float> undef, <8 x i32> zeroinitializer
+  store <8 x float> %splat, <8 x float>* %a
+  ret void
+}
+
+define void @splat_imm_v4f64(<4 x double>* %a) #0 {
+; CHECK-LABEL: splat_imm_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI30_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI30_0]
+; CHECK-NEXT:    stp q0, q0, [x0]
+; CHECK-NEXT:    ret
+  %insert = insertelement <4 x double> undef, double 7.0, i64 0
+  %splat = shufflevector <4 x double> %insert, <4 x double> undef, <4 x i32> zeroinitializer
+  store <4 x double> %splat, <4 x double>* %a
+  ret void
+}
+
+attributes #0 = { "target-features"="+sve" }


        


More information about the llvm-commits mailing list