[llvm] 7122d9c - [RISCV] Add codegen tests for `vector.(de)interleave3/5/7` on FP scalable vectors (#137257)

via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 24 16:51:45 PDT 2025


Author: Min-Yih Hsu
Date: 2025-04-24T16:51:41-07:00
New Revision: 7122d9c5623f69a68817c38ecbe5cabd7ea13cd5

URL: https://github.com/llvm/llvm-project/commit/7122d9c5623f69a68817c38ecbe5cabd7ea13cd5
DIFF: https://github.com/llvm/llvm-project/commit/7122d9c5623f69a68817c38ecbe5cabd7ea13cd5.diff

LOG: [RISCV] Add codegen tests for `vector.(de)interleave3/5/7` on FP scalable vectors (#137257)

We had `vector.(de)interleave3/5/7` tests for floating point fixed
vectors but not for scalable vectors. This patch adds some tests. I also
reorganized the test files so all integer tests are put at the top.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
    llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index 8a71cd0826672..e316c022727ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -347,6 +347,629 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_nxv8i64_nxv
 ret {<vscale x 8 x i64>, <vscale x 8 x i64>} %retval
 }
 
+define {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv48i1(<vscale x 48 x i1> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv48i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmv1r.v v8, v0
+; CHECK-NEXT:    vmv.v.i v10, 0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    vmerge.vim v16, v10, 1, v0
+; CHECK-NEXT:    srli a1, a0, 2
+; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v0, v0, a1
+; CHECK-NEXT:    srli a0, a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmerge.vim v18, v10, 1, v0
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v0, v8, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmerge.vim v20, v10, 1, v0
+; CHECK-NEXT:    vs8r.v v16, (a0)
+; CHECK-NEXT:    vlseg3e8.v v8, (a0)
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    vmsne.vi v8, v10, 0
+; CHECK-NEXT:    vmsne.vi v9, v12, 0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.vector.deinterleave3.nxv48i1(<vscale x 48 x i1> %vec)
+  ret {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} %retval
+}
+
+
+define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_nxv16i8_nxv48i8(<vscale x 48 x i8> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv48i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vlseg3e8.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave3.nxv48i8(<vscale x 48 x i8> %vec)
+  ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %retval
+}
+
+
+define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_nxv8i16_nxv24i16(<vscale x 24 x i16> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv24i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vlseg3e16.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.vector.deinterleave3.nxv24i16(<vscale x 24 x i16> %vec)
+  ret {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} %retval
+}
+
+
+define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_nxv4i32_nxv12i32(<vscale x 12 x i32> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv12i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vlseg3e32.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> %vec)
+  ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %retval
+}
+
+
+define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv6i64(<vscale x 6 x i64> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv6i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vlseg3e64.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> %vec)
+  ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
+}
+
+define {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv80i1(<vscale x 80 x i1> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv80i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vmv.v.i v12, 0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    vmerge.vim v16, v12, 1, v0
+; CHECK-NEXT:    srli a1, a0, 2
+; CHECK-NEXT:    srli a2, a0, 1
+; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v0, v0, a1
+; CHECK-NEXT:    srli a1, a0, 3
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    vsetvli a3, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmerge.vim v18, v12, 1, v0
+; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v0, v9, a2
+; CHECK-NEXT:    sub a0, a0, a1
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmerge.vim v20, v12, 1, v0
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v0, v9, a0
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmerge.vim v14, v12, 1, v0
+; CHECK-NEXT:    vmv1r.v v10, v15
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vmerge.vim v12, v12, 1, v0
+; CHECK-NEXT:    vmv1r.v v11, v12
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vmv1r.v v8, v21
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vmv1r.v v9, v14
+; CHECK-NEXT:    vs8r.v v16, (a0)
+; CHECK-NEXT:    vmv1r.v v12, v13
+; CHECK-NEXT:    vs8r.v v8, (a1)
+; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vlseg5e8.v v8, (a0)
+; CHECK-NEXT:    vlseg5e8.v v14, (a1)
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v10
+; CHECK-NEXT:    vmv1r.v v21, v14
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmsne.vi v0, v20, 0
+; CHECK-NEXT:    vmv1r.v v14, v9
+; CHECK-NEXT:    vmsne.vi v8, v14, 0
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmsne.vi v9, v22, 0
+; CHECK-NEXT:    vmv1r.v v16, v11
+; CHECK-NEXT:    vmsne.vi v10, v16, 0
+; CHECK-NEXT:    vmv1r.v v13, v18
+; CHECK-NEXT:    vmsne.vi v11, v12, 0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.vector.deinterleave5.nxv80i1(<vscale x 80 x i1> %vec)
+  ret {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} %retval
+}
+
+
+define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_nxv16i8_nxv80i8(<vscale x 80 x i8> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv80i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmv1r.v v26, v15
+; CHECK-NEXT:    vmv1r.v v27, v16
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vmv1r.v v24, v13
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vmv1r.v v25, v14
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vmv1r.v v28, v17
+; CHECK-NEXT:    vs8r.v v24, (a1)
+; CHECK-NEXT:    vlseg5e8.v v12, (a0)
+; CHECK-NEXT:    vlseg5e8.v v18, (a1)
+; CHECK-NEXT:    vmv2r.v v8, v12
+; CHECK-NEXT:    vmv1r.v v9, v18
+; CHECK-NEXT:    vmv1r.v v18, v13
+; CHECK-NEXT:    vmv2r.v v12, v14
+; CHECK-NEXT:    vmv1r.v v13, v20
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v17, v22
+; CHECK-NEXT:    vmv2r.v v10, v18
+; CHECK-NEXT:    vmv2r.v v14, v20
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave5.nxv80i8(<vscale x 80 x i8> %vec)
+  ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %retval
+}
+
+
+define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_nxv8i16_nxv40i16(<vscale x 40 x i16> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv40i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv1r.v v26, v15
+; CHECK-NEXT:    vmv1r.v v27, v16
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vmv1r.v v24, v13
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vmv1r.v v25, v14
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vmv1r.v v28, v17
+; CHECK-NEXT:    vs8r.v v24, (a1)
+; CHECK-NEXT:    vlseg5e16.v v12, (a0)
+; CHECK-NEXT:    vlseg5e16.v v18, (a1)
+; CHECK-NEXT:    vmv2r.v v8, v12
+; CHECK-NEXT:    vmv1r.v v9, v18
+; CHECK-NEXT:    vmv1r.v v18, v13
+; CHECK-NEXT:    vmv2r.v v12, v14
+; CHECK-NEXT:    vmv1r.v v13, v20
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v17, v22
+; CHECK-NEXT:    vmv2r.v v10, v18
+; CHECK-NEXT:    vmv2r.v v14, v20
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.vector.deinterleave5.nxv40i16(<vscale x 40 x i16> %vec)
+  ret {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} %retval
+}
+
+
+define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_nxv4i32_nxv20i32(<vscale x 20 x i32> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv20i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vmv1r.v v26, v15
+; CHECK-NEXT:    vmv1r.v v27, v16
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vmv1r.v v24, v13
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vmv1r.v v25, v14
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vmv1r.v v28, v17
+; CHECK-NEXT:    vs8r.v v24, (a1)
+; CHECK-NEXT:    vlseg5e32.v v12, (a0)
+; CHECK-NEXT:    vlseg5e32.v v18, (a1)
+; CHECK-NEXT:    vmv2r.v v8, v12
+; CHECK-NEXT:    vmv1r.v v9, v18
+; CHECK-NEXT:    vmv1r.v v18, v13
+; CHECK-NEXT:    vmv2r.v v12, v14
+; CHECK-NEXT:    vmv1r.v v13, v20
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v17, v22
+; CHECK-NEXT:    vmv2r.v v10, v18
+; CHECK-NEXT:    vmv2r.v v14, v20
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave5.nxv20i32(<vscale x 20 x i32> %vec)
+  ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %retval
+}
+
+
+define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv10i64(<vscale x 10 x i64> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv10i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vmv1r.v v26, v15
+; CHECK-NEXT:    vmv1r.v v27, v16
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vmv1r.v v24, v13
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vmv1r.v v25, v14
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vmv1r.v v28, v17
+; CHECK-NEXT:    vs8r.v v24, (a1)
+; CHECK-NEXT:    vlseg5e64.v v12, (a0)
+; CHECK-NEXT:    vlseg5e64.v v18, (a1)
+; CHECK-NEXT:    vmv2r.v v8, v12
+; CHECK-NEXT:    vmv1r.v v9, v18
+; CHECK-NEXT:    vmv1r.v v18, v13
+; CHECK-NEXT:    vmv2r.v v12, v14
+; CHECK-NEXT:    vmv1r.v v13, v20
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v17, v22
+; CHECK-NEXT:    vmv2r.v v10, v18
+; CHECK-NEXT:    vmv2r.v v14, v20
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave5.nxv10i64(<vscale x 10 x i64> %vec)
+  ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
+}
+
+define {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv112i1(<vscale x 112 x i1> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv112i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vmv.v.i v12, 0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    vmerge.vim v16, v12, 1, v0
+; CHECK-NEXT:    srli a1, a0, 2
+; CHECK-NEXT:    srli a2, a0, 1
+; CHECK-NEXT:    srli a3, a0, 3
+; CHECK-NEXT:    vsetvli a4, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v0, v0, a1
+; CHECK-NEXT:    slli a3, a3, 1
+; CHECK-NEXT:    vsetvli a4, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmerge.vim v18, v12, 1, v0
+; CHECK-NEXT:    vsetvli a4, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v0, v9, a2
+; CHECK-NEXT:    sub a0, a0, a3
+; CHECK-NEXT:    vsetvli a3, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmerge.vim v20, v12, 1, v0
+; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v0, v9, a0
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmerge.vim v22, v12, 1, v0
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vmerge.vim v14, v12, 1, v0
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v0, v8, a1
+; CHECK-NEXT:    vmv1r.v v10, v15
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmerge.vim v24, v12, 1, v0
+; CHECK-NEXT:    vmv1r.v v11, v24
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v0, v8, a2
+; CHECK-NEXT:    vmv1r.v v8, v23
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vmv1r.v v9, v14
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmerge.vim v14, v12, 1, v0
+; CHECK-NEXT:    vmv1r.v v12, v25
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vmv1r.v v13, v14
+; CHECK-NEXT:    vs8r.v v16, (a0)
+; CHECK-NEXT:    vmv1r.v v14, v15
+; CHECK-NEXT:    vs8r.v v8, (a1)
+; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vlseg7e8.v v8, (a0)
+; CHECK-NEXT:    vlseg7e8.v v16, (a1)
+; CHECK-NEXT:    vmv2r.v v24, v8
+; CHECK-NEXT:    vmv2r.v v26, v10
+; CHECK-NEXT:    vmv2r.v v28, v12
+; CHECK-NEXT:    vmv1r.v v25, v16
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmsne.vi v0, v24, 0
+; CHECK-NEXT:    vmv1r.v v16, v9
+; CHECK-NEXT:    vmsne.vi v8, v16, 0
+; CHECK-NEXT:    vmv1r.v v27, v18
+; CHECK-NEXT:    vmsne.vi v9, v26, 0
+; CHECK-NEXT:    vmv1r.v v18, v11
+; CHECK-NEXT:    vmsne.vi v10, v18, 0
+; CHECK-NEXT:    vmv1r.v v29, v20
+; CHECK-NEXT:    vmsne.vi v11, v28, 0
+; CHECK-NEXT:    vmv1r.v v20, v13
+; CHECK-NEXT:    vmsne.vi v12, v20, 0
+; CHECK-NEXT:    vmv1r.v v15, v22
+; CHECK-NEXT:    vmsne.vi v13, v14, 0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.vector.deinterleave7.nxv112i1(<vscale x 112 x i1> %vec)
+  ret {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} %retval
+}
+
+
+define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_nxv16i8_nxv112i8(<vscale x 112 x i8> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv112i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmv1r.v v30, v21
+; CHECK-NEXT:    vmv1r.v v28, v19
+; CHECK-NEXT:    vmv1r.v v29, v20
+; CHECK-NEXT:    vmv1r.v v26, v17
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vmv1r.v v27, v18
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vmv1r.v v24, v15
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vmv1r.v v25, v16
+; CHECK-NEXT:    vs8r.v v24, (a1)
+; CHECK-NEXT:    vlseg7e8.v v14, (a0)
+; CHECK-NEXT:    vlseg7e8.v v22, (a1)
+; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vmv1r.v v9, v22
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vmv2r.v v12, v16
+; CHECK-NEXT:    vmv1r.v v13, v24
+; CHECK-NEXT:    vmv1r.v v24, v17
+; CHECK-NEXT:    vmv2r.v v16, v18
+; CHECK-NEXT:    vmv1r.v v17, v26
+; CHECK-NEXT:    vmv1r.v v26, v19
+; CHECK-NEXT:    vmv1r.v v21, v28
+; CHECK-NEXT:    vmv2r.v v10, v22
+; CHECK-NEXT:    vmv2r.v v14, v24
+; CHECK-NEXT:    vmv2r.v v18, v26
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave7.nxv112i8(<vscale x 112 x i8> %vec)
+  ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %retval
+}
+
+
+define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_nxv8i16_nxv56i16(<vscale x 56 x i16> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv56i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv1r.v v30, v21
+; CHECK-NEXT:    vmv1r.v v28, v19
+; CHECK-NEXT:    vmv1r.v v29, v20
+; CHECK-NEXT:    vmv1r.v v26, v17
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vmv1r.v v27, v18
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vmv1r.v v24, v15
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vmv1r.v v25, v16
+; CHECK-NEXT:    vs8r.v v24, (a1)
+; CHECK-NEXT:    vlseg7e16.v v14, (a0)
+; CHECK-NEXT:    vlseg7e16.v v22, (a1)
+; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vmv1r.v v9, v22
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vmv2r.v v12, v16
+; CHECK-NEXT:    vmv1r.v v13, v24
+; CHECK-NEXT:    vmv1r.v v24, v17
+; CHECK-NEXT:    vmv2r.v v16, v18
+; CHECK-NEXT:    vmv1r.v v17, v26
+; CHECK-NEXT:    vmv1r.v v26, v19
+; CHECK-NEXT:    vmv1r.v v21, v28
+; CHECK-NEXT:    vmv2r.v v10, v22
+; CHECK-NEXT:    vmv2r.v v14, v24
+; CHECK-NEXT:    vmv2r.v v18, v26
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.vector.deinterleave7.nxv56i16(<vscale x 56 x i16> %vec)
+  ret {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} %retval
+}
+
+
+define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_nxv4i32_nxv28i32(<vscale x 28 x i32> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv28i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vmv1r.v v30, v21
+; CHECK-NEXT:    vmv1r.v v28, v19
+; CHECK-NEXT:    vmv1r.v v29, v20
+; CHECK-NEXT:    vmv1r.v v26, v17
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vmv1r.v v27, v18
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vmv1r.v v24, v15
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vmv1r.v v25, v16
+; CHECK-NEXT:    vs8r.v v24, (a1)
+; CHECK-NEXT:    vlseg7e32.v v14, (a0)
+; CHECK-NEXT:    vlseg7e32.v v22, (a1)
+; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vmv1r.v v9, v22
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vmv2r.v v12, v16
+; CHECK-NEXT:    vmv1r.v v13, v24
+; CHECK-NEXT:    vmv1r.v v24, v17
+; CHECK-NEXT:    vmv2r.v v16, v18
+; CHECK-NEXT:    vmv1r.v v17, v26
+; CHECK-NEXT:    vmv1r.v v26, v19
+; CHECK-NEXT:    vmv1r.v v21, v28
+; CHECK-NEXT:    vmv2r.v v10, v22
+; CHECK-NEXT:    vmv2r.v v14, v24
+; CHECK-NEXT:    vmv2r.v v18, v26
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave7.nxv28i32(<vscale x 28 x i32> %vec)
+  ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %retval
+}
+
+
+define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv14i64(<vscale x 14 x i64> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv14i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vmv1r.v v30, v21
+; CHECK-NEXT:    vmv1r.v v28, v19
+; CHECK-NEXT:    vmv1r.v v29, v20
+; CHECK-NEXT:    vmv1r.v v26, v17
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vmv1r.v v27, v18
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vmv1r.v v24, v15
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vmv1r.v v25, v16
+; CHECK-NEXT:    vs8r.v v24, (a1)
+; CHECK-NEXT:    vlseg7e64.v v14, (a0)
+; CHECK-NEXT:    vlseg7e64.v v22, (a1)
+; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vmv1r.v v9, v22
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vmv2r.v v12, v16
+; CHECK-NEXT:    vmv1r.v v13, v24
+; CHECK-NEXT:    vmv1r.v v24, v17
+; CHECK-NEXT:    vmv2r.v v16, v18
+; CHECK-NEXT:    vmv1r.v v17, v26
+; CHECK-NEXT:    vmv1r.v v26, v19
+; CHECK-NEXT:    vmv1r.v v21, v28
+; CHECK-NEXT:    vmv2r.v v10, v22
+; CHECK-NEXT:    vmv2r.v v14, v24
+; CHECK-NEXT:    vmv2r.v v18, v26
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave7.nxv14i64(<vscale x 14 x i64> %vec)
+  ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
+}
 
 ; Floats
 
@@ -692,46 +1315,53 @@ define {<vscale x 8 x double>, <vscale x 8 x double>} @vector_deinterleave_nxv8f
 ret {<vscale x 8 x double>, <vscale x 8 x double>} %retval
 }
 
-define {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv48i1(<vscale x 48 x i1> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv48i1:
+define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv6f16(<vscale x 6 x half> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv6f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    sub sp, sp, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v8, v0
-; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    vmerge.vim v16, v10, 1, v0
-; CHECK-NEXT:    srli a1, a0, 2
-; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v0, v0, a1
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v18, v10, 1, v0
-; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v0, v8, a0
+; CHECK-NEXT:    srli a0, a0, 2
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v10, v8, a0
+; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v20, v10, 1, v0
-; CHECK-NEXT:    vs8r.v v16, (a0)
-; CHECK-NEXT:    vlseg3e8.v v8, (a0)
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    vmsne.vi v8, v10, 0
-; CHECK-NEXT:    vmsne.vi v9, v12, 0
+; CHECK-NEXT:    vs2r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vlseg3e16.v v8, (a0)
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.vector.deinterleave3.nxv48i1(<vscale x 48 x i1> %vec)
-  ret {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} %retval
+  %res = call {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @llvm.vector.deinterleave3.nxv6f16(<vscale x 6 x half> %arg)
+  ret {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} %res
 }
 
+define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_nxv4f16_nxv12f16(<vscale x 12 x half> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4f16_nxv12f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs4r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vlseg3e16.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @llvm.vector.deinterleave3.nxv12f16(<vscale x 12 x half> %arg)
+  ret {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} %res
+}
 
-define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_nxv16i8_nxv48i8(<vscale x 48 x i8> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv48i8:
+define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_nxv8f16_nxv24f16(<vscale x 24 x half> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv8f16_nxv24f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
@@ -739,20 +1369,64 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @vector_dein
 ; CHECK-NEXT:    sub sp, sp, a0
 ; CHECK-NEXT:    addi a0, sp, 16
 ; CHECK-NEXT:    vs8r.v v8, (a0)
-; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vlseg3e8.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vlseg3e16.v v8, (a0)
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave3.nxv48i8(<vscale x 48 x i8> %vec)
-  ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %retval
+  %res = call {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @llvm.vector.deinterleave3.nxv24f16(<vscale x 24 x half> %arg)
+  ret {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} %res
+}
+
+define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @vector_deinterleave_nxv2bf16_nxv6bf16(<vscale x 6 x bfloat> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2bf16_nxv6bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 2
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v10, v8, a0
+; CHECK-NEXT:    vslideup.vx v8, v10, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs2r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vlseg3e16.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @llvm.vector.deinterleave3.nxv6bf16(<vscale x 6 x bfloat> %arg)
+  ret {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} %res
 }
 
+define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @vector_deinterleave_nxv4bf16_nxv12bf16(<vscale x 12 x bfloat> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4bf16_nxv12bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs4r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vlseg3e16.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @llvm.vector.deinterleave3.nxv12bf16(<vscale x 12 x bfloat> %arg)
+  ret {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} %res
+}
 
-define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_nxv8i16_nxv24i16(<vscale x 24 x i16> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv24i16:
+define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8bf16_nxv24bf16(<vscale x 24 x bfloat> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv8bf16_nxv24bf16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
@@ -767,13 +1441,57 @@ define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_dein
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.vector.deinterleave3.nxv24i16(<vscale x 24 x i16> %vec)
-  ret {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} %retval
+  %res = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.vector.deinterleave3.nxv24bf16(<vscale x 24 x bfloat> %arg)
+  ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %res
 }
 
+define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @vector_deinterleave_nxv1f32_nxv3f32(<vscale x 3 x float> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv1f32_nxv3f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 3
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v10, v8, a0
+; CHECK-NEXT:    vslideup.vx v8, v10, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs2r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vlseg3e32.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @llvm.vector.deinterleave3.nxv3f32(<vscale x 3 x float> %arg)
+  ret {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} %res
+}
 
-define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_nxv4i32_nxv12i32(<vscale x 12 x i32> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv12i32:
+define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_nxv2f32_nxv6f32(<vscale x 6 x float> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2f32_nxv6f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs4r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vlseg3e32.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @llvm.vector.deinterleave3.nxv6f32(<vscale x 6 x float> %arg)
+  ret {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} %res
+}
+
+define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @vector_deinterleave_nxv4f32_nxv12f32(<vscale x 12 x float> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4f32_nxv12f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
@@ -788,13 +1506,32 @@ define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_dein
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> %vec)
-  ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %retval
+  %res = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.vector.deinterleave3.nxv12f32(<vscale x 12 x float> %arg)
+  ret {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res
 }
 
+define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @vector_deinterleave_nxv1f64_nxv3f64(<vscale x 3 x double> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv1f64_nxv3f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs4r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vlseg3e64.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @llvm.vector.deinterleave3.nxv3f64(<vscale x 3 x double> %arg)
+  ret {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} %res
+}
 
-define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv6i64(<vscale x 6 x i64> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv6i64:
+define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @vector_deinterleave_nxv2f64_nxv6f64(<vscale x 6 x double> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2f64_nxv6f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
@@ -809,87 +1546,65 @@ define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_dein
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> %vec)
-  ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
+  %res = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.vector.deinterleave3.nxv6f64(<vscale x 6 x double> %arg)
+  ret {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} %res
 }
 
-define {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv80i1(<vscale x 80 x i1> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv80i1:
+define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv10f16(<vscale x 10 x half> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv10f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    sub sp, sp, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv.v.i v12, 0
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    vmerge.vim v16, v12, 1, v0
-; CHECK-NEXT:    srli a1, a0, 2
-; CHECK-NEXT:    srli a2, a0, 1
-; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v0, v0, a1
-; CHECK-NEXT:    srli a1, a0, 3
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    vsetvli a3, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v18, v12, 1, v0
-; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v0, v9, a2
-; CHECK-NEXT:    sub a0, a0, a1
-; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v20, v12, 1, v0
-; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v0, v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v14, v12, 1, v0
-; CHECK-NEXT:    vmv1r.v v10, v15
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmerge.vim v12, v12, 1, v0
-; CHECK-NEXT:    vmv1r.v v11, v12
+; CHECK-NEXT:    srli a0, a0, 2
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v11, v9, a0
+; CHECK-NEXT:    vslideup.vx v9, v11, a0
+; CHECK-NEXT:    vslidedown.vx v11, v8, a0
+; CHECK-NEXT:    vslideup.vx v8, v11, a0
 ; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vmv1r.v v8, v21
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    add a1, sp, a1
-; CHECK-NEXT:    addi a1, a1, 16
-; CHECK-NEXT:    vmv1r.v v9, v14
-; CHECK-NEXT:    vs8r.v v16, (a0)
-; CHECK-NEXT:    vmv1r.v v12, v13
-; CHECK-NEXT:    vs8r.v v8, (a1)
-; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vlseg5e8.v v8, (a0)
-; CHECK-NEXT:    vlseg5e8.v v14, (a1)
-; CHECK-NEXT:    vmv2r.v v20, v8
-; CHECK-NEXT:    vmv2r.v v22, v10
-; CHECK-NEXT:    vmv1r.v v21, v14
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v20, 0
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmsne.vi v8, v14, 0
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vmsne.vi v9, v22, 0
-; CHECK-NEXT:    vmv1r.v v16, v11
-; CHECK-NEXT:    vmsne.vi v10, v16, 0
-; CHECK-NEXT:    vmv1r.v v13, v18
-; CHECK-NEXT:    vmsne.vi v11, v12, 0
+; CHECK-NEXT:    vs4r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vlseg5e16.v v8, (a0)
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.vector.deinterleave5.nxv80i1(<vscale x 80 x i1> %vec)
-  ret {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} %retval
+  %res = call {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @llvm.vector.deinterleave5.nxv10f16(<vscale x 10 x half> %arg)
+  ret {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} %res
+}
+
+define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_nxv4f16_nxv20f16(<vscale x 20 x half> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4f16_nxv20f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vlseg5e16.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @llvm.vector.deinterleave5.nxv20f16(<vscale x 20 x half> %arg)
+  ret {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} %res
 }
 
-
-define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_nxv16i8_nxv80i8(<vscale x 80 x i8> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv80i8:
+define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_nxv8f16_nxv40f16(<vscale x 40 x half> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv8f16_nxv40f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    sub sp, sp, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv1r.v v26, v15
 ; CHECK-NEXT:    vmv1r.v v27, v16
 ; CHECK-NEXT:    addi a0, sp, 16
@@ -902,8 +1617,8 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16
 ; CHECK-NEXT:    vs8r.v v8, (a0)
 ; CHECK-NEXT:    vmv1r.v v28, v17
 ; CHECK-NEXT:    vs8r.v v24, (a1)
-; CHECK-NEXT:    vlseg5e8.v v12, (a0)
-; CHECK-NEXT:    vlseg5e8.v v18, (a1)
+; CHECK-NEXT:    vlseg5e16.v v12, (a0)
+; CHECK-NEXT:    vlseg5e16.v v18, (a1)
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    vmv1r.v v9, v18
 ; CHECK-NEXT:    vmv1r.v v18, v13
@@ -918,13 +1633,59 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave5.nxv80i8(<vscale x 80 x i8> %vec)
-  ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %retval
+  %res = call {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @llvm.vector.deinterleave5.nxv40f16(<vscale x 40 x half> %arg)
+  ret {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} %res
 }
 
+define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @vector_deinterleave_nxv2bf16_nxv10bf16(<vscale x 10 x bfloat> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2bf16_nxv10bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 2
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v11, v9, a0
+; CHECK-NEXT:    vslideup.vx v9, v11, a0
+; CHECK-NEXT:    vslidedown.vx v11, v8, a0
+; CHECK-NEXT:    vslideup.vx v8, v11, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs4r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vlseg5e16.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @llvm.vector.deinterleave5.nxv10bf16(<vscale x 10 x bfloat> %arg)
+  ret {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} %res
+}
 
-define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_nxv8i16_nxv40i16(<vscale x 40 x i16> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv40i16:
+define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @vector_deinterleave_nxv4bf16_nxv20bf16(<vscale x 20 x bfloat> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4bf16_nxv20bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vlseg5e16.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @llvm.vector.deinterleave5.nxv20bf16(<vscale x 20 x bfloat> %arg)
+  ret {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} %res
+}
+
+define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8bf16_nxv40bf16(<vscale x 40 x bfloat> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv8bf16_nxv40bf16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
@@ -959,13 +1720,59 @@ define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.vector.deinterleave5.nxv40i16(<vscale x 40 x i16> %vec)
-  ret {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} %retval
+  %res = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.vector.deinterleave5.nxv40bf16(<vscale x 40 x bfloat> %arg)
+  ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %res
 }
 
+define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @vector_deinterleave_nxv1f32_nxv5f32(<vscale x 5 x float> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv1f32_nxv5f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 3
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v11, v9, a0
+; CHECK-NEXT:    vslideup.vx v9, v11, a0
+; CHECK-NEXT:    vslidedown.vx v11, v8, a0
+; CHECK-NEXT:    vslideup.vx v8, v11, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs4r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vlseg5e32.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @llvm.vector.deinterleave5.nxv5f32(<vscale x 5 x float> %arg)
+  ret {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} %res
+}
 
-define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_nxv4i32_nxv20i32(<vscale x 20 x i32> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv20i32:
+define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_nxv2f32_nxv10f32(<vscale x 10 x float> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2f32_nxv10f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vlseg5e32.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @llvm.vector.deinterleave5.nxv10f32(<vscale x 10 x float> %arg)
+  ret {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} %res
+}
+
+define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @vector_deinterleave_nxv4f32_nxv20f32(<vscale x 20 x float> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4f32_nxv20f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
@@ -1000,13 +1807,32 @@ define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave5.nxv20i32(<vscale x 20 x i32> %vec)
-  ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %retval
+  %res = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.vector.deinterleave5.nxv20f32(<vscale x 20 x float> %arg)
+  ret {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res
 }
 
+define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @vector_deinterleave_nxv1f64_nxv5f64(<vscale x 5 x double> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv1f64_nxv5f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vlseg5e64.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @llvm.vector.deinterleave5.nxv5f64(<vscale x 5 x double> %arg)
+  ret {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} %res
+}
 
-define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv10i64(<vscale x 10 x i64> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv10i64:
+define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @vector_deinterleave_nxv2f64_nxv10f64(<vscale x 10 x double> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2f64_nxv10f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
@@ -1041,102 +1867,67 @@ define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave5.nxv10i64(<vscale x 10 x i64> %vec)
-  ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
+  %res = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.vector.deinterleave5.nxv10f64(<vscale x 10 x double> %arg)
+  ret {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} %res
 }
 
-define {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv112i1(<vscale x 112 x i1> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv112i1:
+define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv14f16(<vscale x 14 x half> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv14f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    sub sp, sp, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv.v.i v12, 0
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    vmerge.vim v16, v12, 1, v0
-; CHECK-NEXT:    srli a1, a0, 2
-; CHECK-NEXT:    srli a2, a0, 1
-; CHECK-NEXT:    srli a3, a0, 3
-; CHECK-NEXT:    vsetvli a4, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v0, v0, a1
-; CHECK-NEXT:    slli a3, a3, 1
-; CHECK-NEXT:    vsetvli a4, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v18, v12, 1, v0
-; CHECK-NEXT:    vsetvli a4, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v0, v9, a2
-; CHECK-NEXT:    sub a0, a0, a3
-; CHECK-NEXT:    vsetvli a3, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v20, v12, 1, v0
-; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v0, v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v22, v12, 1, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmerge.vim v14, v12, 1, v0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v0, v8, a1
-; CHECK-NEXT:    vmv1r.v v10, v15
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v24, v12, 1, v0
-; CHECK-NEXT:    vmv1r.v v11, v24
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v0, v8, a2
-; CHECK-NEXT:    vmv1r.v v8, v23
+; CHECK-NEXT:    srli a0, a0, 2
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v12, v9, a0
+; CHECK-NEXT:    vslideup.vx v9, v12, a0
+; CHECK-NEXT:    vslidedown.vx v12, v8, a0
+; CHECK-NEXT:    vslideup.vx v8, v12, a0
+; CHECK-NEXT:    vslidedown.vx v12, v10, a0
+; CHECK-NEXT:    vslideup.vx v10, v12, a0
 ; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vmv1r.v v9, v14
-; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmerge.vim v14, v12, 1, v0
-; CHECK-NEXT:    vmv1r.v v12, v25
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    add a1, sp, a1
-; CHECK-NEXT:    addi a1, a1, 16
-; CHECK-NEXT:    vmv1r.v v13, v14
-; CHECK-NEXT:    vs8r.v v16, (a0)
-; CHECK-NEXT:    vmv1r.v v14, v15
-; CHECK-NEXT:    vs8r.v v8, (a1)
-; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vlseg7e8.v v8, (a0)
-; CHECK-NEXT:    vlseg7e8.v v16, (a1)
-; CHECK-NEXT:    vmv2r.v v24, v8
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v28, v12
-; CHECK-NEXT:    vmv1r.v v25, v16
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v24, 0
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vmsne.vi v8, v16, 0
-; CHECK-NEXT:    vmv1r.v v27, v18
-; CHECK-NEXT:    vmsne.vi v9, v26, 0
-; CHECK-NEXT:    vmv1r.v v18, v11
-; CHECK-NEXT:    vmsne.vi v10, v18, 0
-; CHECK-NEXT:    vmv1r.v v29, v20
-; CHECK-NEXT:    vmsne.vi v11, v28, 0
-; CHECK-NEXT:    vmv1r.v v20, v13
-; CHECK-NEXT:    vmsne.vi v12, v20, 0
-; CHECK-NEXT:    vmv1r.v v15, v22
-; CHECK-NEXT:    vmsne.vi v13, v14, 0
+; CHECK-NEXT:    vs4r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vlseg7e16.v v8, (a0)
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.vector.deinterleave7.nxv112i1(<vscale x 112 x i1> %vec)
-  ret {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} %retval
+  %res = call {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @llvm.vector.deinterleave7.nxv14f16(<vscale x 14 x half> %arg)
+  ret {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} %res
 }
 
+define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_nxv4f16_nxv28f16(<vscale x 28 x half> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4f16_nxv28f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vlseg7e16.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @llvm.vector.deinterleave7.nxv28f16(<vscale x 28 x half> %arg)
+  ret {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} %res
+}
 
-define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_nxv16i8_nxv112i8(<vscale x 112 x i8> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv112i8:
+define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_nxv8f16_nxv56f16(<vscale x 56 x half> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv8f16_nxv56f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    sub sp, sp, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv1r.v v30, v21
 ; CHECK-NEXT:    vmv1r.v v28, v19
 ; CHECK-NEXT:    vmv1r.v v29, v20
@@ -1151,8 +1942,8 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16
 ; CHECK-NEXT:    vs8r.v v8, (a0)
 ; CHECK-NEXT:    vmv1r.v v25, v16
 ; CHECK-NEXT:    vs8r.v v24, (a1)
-; CHECK-NEXT:    vlseg7e8.v v14, (a0)
-; CHECK-NEXT:    vlseg7e8.v v22, (a1)
+; CHECK-NEXT:    vlseg7e16.v v14, (a0)
+; CHECK-NEXT:    vlseg7e16.v v22, (a1)
 ; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    vmv1r.v v9, v22
 ; CHECK-NEXT:    vmv1r.v v22, v15
@@ -1171,13 +1962,61 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave7.nxv112i8(<vscale x 112 x i8> %vec)
-  ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %retval
+  %res = call {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @llvm.vector.deinterleave7.nxv56f16(<vscale x 56 x half> %arg)
+  ret {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} %res
+}
+
+define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @vector_deinterleave_nxv2bf16_nxv14bf16(<vscale x 14 x bfloat> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2bf16_nxv14bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 2
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v12, v9, a0
+; CHECK-NEXT:    vslideup.vx v9, v12, a0
+; CHECK-NEXT:    vslidedown.vx v12, v8, a0
+; CHECK-NEXT:    vslideup.vx v8, v12, a0
+; CHECK-NEXT:    vslidedown.vx v12, v10, a0
+; CHECK-NEXT:    vslideup.vx v10, v12, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs4r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vlseg7e16.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @llvm.vector.deinterleave7.nxv14bf16(<vscale x 14 x bfloat> %arg)
+  ret {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} %res
 }
 
+define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @vector_deinterleave_nxv4bf16_nxv28bf16(<vscale x 28 x bfloat> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4bf16_nxv28bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vlseg7e16.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @llvm.vector.deinterleave7.nxv28bf16(<vscale x 28 x bfloat> %arg)
+  ret {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} %res
+}
 
-define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_nxv8i16_nxv56i16(<vscale x 56 x i16> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv56i16:
+define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8bf16_nxv56bf16(<vscale x 56 x bfloat> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv8bf16_nxv56bf16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
@@ -1218,13 +2057,61 @@ define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.vector.deinterleave7.nxv56i16(<vscale x 56 x i16> %vec)
-  ret {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} %retval
+  %res = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.vector.deinterleave7.nxv56bf16(<vscale x 56 x bfloat> %arg)
+  ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %res
 }
 
+define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @vector_deinterleave_nxv1f32_nxv7f32(<vscale x 7 x float> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv1f32_nxv7f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 3
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v12, v9, a0
+; CHECK-NEXT:    vslideup.vx v9, v12, a0
+; CHECK-NEXT:    vslidedown.vx v12, v8, a0
+; CHECK-NEXT:    vslideup.vx v8, v12, a0
+; CHECK-NEXT:    vslidedown.vx v12, v10, a0
+; CHECK-NEXT:    vslideup.vx v10, v12, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs4r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vlseg7e32.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @llvm.vector.deinterleave7.nxv7f32(<vscale x 7 x float> %arg)
+  ret {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} %res
+}
 
-define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_nxv4i32_nxv28i32(<vscale x 28 x i32> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv28i32:
+define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_nxv2f32_nxv14f32(<vscale x 14 x float> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2f32_nxv14f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vlseg7e32.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @llvm.vector.deinterleave7.nxv14f32(<vscale x 14 x float> %arg)
+  ret {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} %res
+}
+
+define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @vector_deinterleave_nxv4f32_nxv28f32(<vscale x 28 x float> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4f32_nxv28f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
@@ -1265,13 +2152,32 @@ define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave7.nxv28i32(<vscale x 28 x i32> %vec)
-  ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %retval
+  %res = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.vector.deinterleave7.nxv28f32(<vscale x 28 x float> %arg)
+  ret {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res
 }
 
+define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @vector_deinterleave_nxv1f64_nxv7f64(<vscale x 7 x double> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv1f64_nxv7f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs8r.v v8, (a0)
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vlseg7e64.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %res = call {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @llvm.vector.deinterleave7.nxv7f64(<vscale x 7 x double> %arg)
+  ret {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} %res
+}
 
-define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv14i64(<vscale x 14 x i64> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv14i64:
+define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @vector_deinterleave_nxv2f64_nxv14f64(<vscale x 14 x double> %arg) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2f64_nxv14f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
@@ -1312,6 +2218,6 @@ define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave7.nxv14i64(<vscale x 14 x i64> %vec)
-  ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
+  %res = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.vector.deinterleave7.nxv14f64(<vscale x 14 x double> %arg)
+  ret {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} %res
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index 469263a3247ce..7347000bf5c71 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -446,587 +446,312 @@ define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64
   ret <vscale x 16 x i64> %res
 }
 
-
-; Floats
-
-define <vscale x 4 x bfloat> @vector_interleave_nxv4bf16_nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) {
-; V-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
-; V:       # %bb.0:
-; V-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; V-NEXT:    vwaddu.vv v10, v8, v9
-; V-NEXT:    li a0, -1
-; V-NEXT:    csrr a1, vlenb
-; V-NEXT:    vwmaccu.vx v10, a0, v9
-; V-NEXT:    srli a1, a1, 2
-; V-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; V-NEXT:    vslidedown.vx v8, v10, a1
-; V-NEXT:    add a0, a1, a1
-; V-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; V-NEXT:    vslideup.vx v10, v8, a1
-; V-NEXT:    vmv.v.v v8, v10
-; V-NEXT:    ret
+define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32_poison(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vmv2r.v v12, v8
+; CHECK-NEXT:    vzext.vf2 v8, v12
+; CHECK-NEXT:    ret
 ;
-; ZVBB-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
+; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison:
 ; ZVBB:       # %bb.0:
-; ZVBB-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; ZVBB-NEXT:    vwsll.vi v10, v9, 16
-; ZVBB-NEXT:    csrr a0, vlenb
-; ZVBB-NEXT:    vwaddu.wv v10, v10, v8
-; ZVBB-NEXT:    srli a0, a0, 2
-; ZVBB-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
-; ZVBB-NEXT:    vslidedown.vx v8, v10, a0
-; ZVBB-NEXT:    add a1, a0, a0
-; ZVBB-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; ZVBB-NEXT:    vslideup.vx v10, v8, a0
-; ZVBB-NEXT:    vmv.v.v v8, v10
+; ZVBB-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; ZVBB-NEXT:    vmv2r.v v12, v8
+; ZVBB-NEXT:    vzext.vf2 v8, v12
 ; ZVBB-NEXT:    ret
-;
-; ZIP-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
-; ZIP:       # %bb.0:
-; ZIP-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; ZIP-NEXT:    ri.vzip2b.vv v11, v8, v9
-; ZIP-NEXT:    ri.vzip2a.vv v10, v8, v9
-; ZIP-NEXT:    csrr a0, vlenb
-; ZIP-NEXT:    srli a0, a0, 2
-; ZIP-NEXT:    add a1, a0, a0
-; ZIP-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; ZIP-NEXT:    vslideup.vx v10, v11, a0
-; ZIP-NEXT:    vmv.v.v v8, v10
-; ZIP-NEXT:    ret
-  %res = call <vscale x 4 x bfloat> @llvm.vector.interleave2.nxv4bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b)
-  ret <vscale x 4 x bfloat> %res
+  %res = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> poison)
+  ret <vscale x 8 x i32> %res
 }
 
-define <vscale x 8 x bfloat> @vector_interleave_nxv8bf16_nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) {
-; V-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
-; V:       # %bb.0:
-; V-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; V-NEXT:    vmv1r.v v10, v9
-; V-NEXT:    vmv1r.v v11, v8
-; V-NEXT:    vwaddu.vv v8, v11, v10
-; V-NEXT:    li a0, -1
-; V-NEXT:    vwmaccu.vx v8, a0, v10
-; V-NEXT:    ret
+define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32_poison2(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vzext.vf2 v12, v8
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vsll.vx v8, v12, a0
+; CHECK-NEXT:    ret
 ;
-; ZVBB-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
+; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2:
 ; ZVBB:       # %bb.0:
-; ZVBB-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; ZVBB-NEXT:    vmv1r.v v10, v9
-; ZVBB-NEXT:    vmv1r.v v11, v8
-; ZVBB-NEXT:    vwsll.vi v8, v10, 16
-; ZVBB-NEXT:    vwaddu.wv v8, v8, v11
+; ZVBB-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZVBB-NEXT:    vmv2r.v v12, v8
+; ZVBB-NEXT:    li a0, 32
+; ZVBB-NEXT:    vwsll.vx v8, v12, a0
 ; ZVBB-NEXT:    ret
-;
-; ZIP-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
-; ZIP:       # %bb.0:
-; ZIP-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; ZIP-NEXT:    vmv1r.v v10, v9
-; ZIP-NEXT:    vmv1r.v v11, v8
-; ZIP-NEXT:    ri.vzip2b.vv v9, v8, v10
-; ZIP-NEXT:    ri.vzip2a.vv v8, v11, v10
-; ZIP-NEXT:    ret
-  %res = call <vscale x 8 x bfloat> @llvm.vector.interleave2.nxv8bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b)
-  ret <vscale x 8 x bfloat> %res
+  %res = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a)
+  ret <vscale x 8 x i32> %res
 }
 
-define <vscale x 4 x half> @vector_interleave_nxv4f16_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
-; V-LABEL: vector_interleave_nxv4f16_nxv2f16:
-; V:       # %bb.0:
-; V-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; V-NEXT:    vwaddu.vv v10, v8, v9
-; V-NEXT:    li a0, -1
-; V-NEXT:    csrr a1, vlenb
-; V-NEXT:    vwmaccu.vx v10, a0, v9
-; V-NEXT:    srli a1, a1, 2
-; V-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; V-NEXT:    vslidedown.vx v8, v10, a1
-; V-NEXT:    add a0, a1, a1
-; V-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; V-NEXT:    vslideup.vx v10, v8, a1
-; V-NEXT:    vmv.v.v v8, v10
-; V-NEXT:    ret
+define <vscale x 48 x i1> @vector_interleave_nxv48i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c) nounwind {
+; CHECK-LABEL: vector_interleave_nxv48i1_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmv1r.v v10, v0
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vmv.v.i v12, 0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    vmerge.vim v16, v12, 1, v0
+; CHECK-NEXT:    slli a2, a1, 1
+; CHECK-NEXT:    vmv1r.v v0, v10
+; CHECK-NEXT:    vmerge.vim v14, v12, 1, v0
+; CHECK-NEXT:    add a3, a0, a2
+; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vmerge.vim v18, v12, 1, v0
+; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    vsseg3e8.v v14, (a0)
+; CHECK-NEXT:    vl2r.v v8, (a2)
+; CHECK-NEXT:    srli a2, a1, 2
+; CHECK-NEXT:    srli a1, a1, 1
+; CHECK-NEXT:    vl2r.v v10, (a3)
+; CHECK-NEXT:    vl2r.v v12, (a0)
+; CHECK-NEXT:    add a0, a2, a2
+; CHECK-NEXT:    vmsne.vi v14, v8, 0
+; CHECK-NEXT:    vmsne.vi v8, v10, 0
+; CHECK-NEXT:    vmsne.vi v0, v12, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vslideup.vx v0, v8, a2
+; CHECK-NEXT:    add a0, a1, a1
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v0, v14, a1
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
 ;
-; ZVBB-LABEL: vector_interleave_nxv4f16_nxv2f16:
+; ZVBB-LABEL: vector_interleave_nxv48i1_nxv16i1:
 ; ZVBB:       # %bb.0:
-; ZVBB-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; ZVBB-NEXT:    vwsll.vi v10, v9, 16
+; ZVBB-NEXT:    addi sp, sp, -16
 ; ZVBB-NEXT:    csrr a0, vlenb
-; ZVBB-NEXT:    vwaddu.wv v10, v10, v8
-; ZVBB-NEXT:    srli a0, a0, 2
-; ZVBB-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
-; ZVBB-NEXT:    vslidedown.vx v8, v10, a0
-; ZVBB-NEXT:    add a1, a0, a0
-; ZVBB-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; ZVBB-NEXT:    vslideup.vx v10, v8, a0
-; ZVBB-NEXT:    vmv.v.v v8, v10
+; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVBB-NEXT:    vmv1r.v v10, v0
+; ZVBB-NEXT:    vmv1r.v v0, v8
+; ZVBB-NEXT:    vmv.v.i v12, 0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    vmerge.vim v16, v12, 1, v0
+; ZVBB-NEXT:    slli a2, a1, 1
+; ZVBB-NEXT:    vmv1r.v v0, v10
+; ZVBB-NEXT:    vmerge.vim v14, v12, 1, v0
+; ZVBB-NEXT:    add a3, a0, a2
+; ZVBB-NEXT:    vmv1r.v v0, v9
+; ZVBB-NEXT:    vmerge.vim v18, v12, 1, v0
+; ZVBB-NEXT:    add a2, a3, a2
+; ZVBB-NEXT:    vsseg3e8.v v14, (a0)
+; ZVBB-NEXT:    vl2r.v v8, (a2)
+; ZVBB-NEXT:    srli a2, a1, 2
+; ZVBB-NEXT:    srli a1, a1, 1
+; ZVBB-NEXT:    vl2r.v v10, (a3)
+; ZVBB-NEXT:    vl2r.v v12, (a0)
+; ZVBB-NEXT:    add a0, a2, a2
+; ZVBB-NEXT:    vmsne.vi v14, v8, 0
+; ZVBB-NEXT:    vmsne.vi v8, v10, 0
+; ZVBB-NEXT:    vmsne.vi v0, v12, 0
+; ZVBB-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVBB-NEXT:    vslideup.vx v0, v8, a2
+; ZVBB-NEXT:    add a0, a1, a1
+; ZVBB-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v0, v14, a1
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
 ; ZVBB-NEXT:    ret
-;
-; ZIP-LABEL: vector_interleave_nxv4f16_nxv2f16:
-; ZIP:       # %bb.0:
-; ZIP-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; ZIP-NEXT:    ri.vzip2b.vv v11, v8, v9
-; ZIP-NEXT:    ri.vzip2a.vv v10, v8, v9
-; ZIP-NEXT:    csrr a0, vlenb
-; ZIP-NEXT:    srli a0, a0, 2
-; ZIP-NEXT:    add a1, a0, a0
-; ZIP-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; ZIP-NEXT:    vslideup.vx v10, v11, a0
-; ZIP-NEXT:    vmv.v.v v8, v10
-; ZIP-NEXT:    ret
-  %res = call <vscale x 4 x half> @llvm.vector.interleave2.nxv4f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
-  ret <vscale x 4 x half> %res
+  %res = call <vscale x 48 x i1> @llvm.vector.interleave3.nxv48i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c)
+  ret <vscale x 48 x i1> %res
 }
 
-define <vscale x 8 x half> @vector_interleave_nxv8f16_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
-; V-LABEL: vector_interleave_nxv8f16_nxv4f16:
-; V:       # %bb.0:
-; V-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; V-NEXT:    vmv1r.v v10, v9
-; V-NEXT:    vmv1r.v v11, v8
-; V-NEXT:    vwaddu.vv v8, v11, v10
-; V-NEXT:    li a0, -1
-; V-NEXT:    vwmaccu.vx v8, a0, v10
-; V-NEXT:    ret
-;
-; ZVBB-LABEL: vector_interleave_nxv8f16_nxv4f16:
-; ZVBB:       # %bb.0:
-; ZVBB-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; ZVBB-NEXT:    vmv1r.v v10, v9
-; ZVBB-NEXT:    vmv1r.v v11, v8
-; ZVBB-NEXT:    vwsll.vi v8, v10, 16
-; ZVBB-NEXT:    vwaddu.wv v8, v8, v11
-; ZVBB-NEXT:    ret
-;
-; ZIP-LABEL: vector_interleave_nxv8f16_nxv4f16:
-; ZIP:       # %bb.0:
-; ZIP-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; ZIP-NEXT:    vmv1r.v v10, v9
-; ZIP-NEXT:    vmv1r.v v11, v8
-; ZIP-NEXT:    ri.vzip2b.vv v9, v8, v10
-; ZIP-NEXT:    ri.vzip2a.vv v8, v11, v10
-; ZIP-NEXT:    ret
-  %res = call <vscale x 8 x half> @llvm.vector.interleave2.nxv8f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
-  ret <vscale x 8 x half> %res
-}
 
-define <vscale x 4 x float> @vector_interleave_nxv4f32_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
-; V-LABEL: vector_interleave_nxv4f32_nxv2f32:
-; V:       # %bb.0:
-; V-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; V-NEXT:    vmv1r.v v10, v9
-; V-NEXT:    vmv1r.v v11, v8
-; V-NEXT:    vwaddu.vv v8, v11, v10
-; V-NEXT:    li a0, -1
-; V-NEXT:    vwmaccu.vx v8, a0, v10
-; V-NEXT:    ret
+define <vscale x 48 x i8> @vector_interleave_nxv48i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) nounwind {
+; CHECK-LABEL: vector_interleave_nxv48i8_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    vsetvli a2, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vsseg3e8.v v8, (a0)
+; CHECK-NEXT:    vl2r.v v8, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl2r.v v10, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl2r.v v12, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
 ;
-; ZVBB-LABEL: vector_interleave_nxv4f32_nxv2f32:
+; ZVBB-LABEL: vector_interleave_nxv48i8_nxv16i8:
 ; ZVBB:       # %bb.0:
-; ZVBB-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; ZVBB-NEXT:    vmv1r.v v10, v9
-; ZVBB-NEXT:    vmv1r.v v11, v8
-; ZVBB-NEXT:    li a0, 32
-; ZVBB-NEXT:    vwsll.vx v8, v10, a0
-; ZVBB-NEXT:    vwaddu.wv v8, v8, v11
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    slli a1, a1, 1
+; ZVBB-NEXT:    vsetvli a2, zero, e8, m2, ta, ma
+; ZVBB-NEXT:    vsseg3e8.v v8, (a0)
+; ZVBB-NEXT:    vl2r.v v8, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl2r.v v10, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl2r.v v12, (a0)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
 ; ZVBB-NEXT:    ret
-;
-; ZIP-LABEL: vector_interleave_nxv4f32_nxv2f32:
-; ZIP:       # %bb.0:
-; ZIP-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; ZIP-NEXT:    vmv1r.v v10, v9
-; ZIP-NEXT:    vmv1r.v v11, v8
-; ZIP-NEXT:    ri.vzip2b.vv v9, v8, v10
-; ZIP-NEXT:    ri.vzip2a.vv v8, v11, v10
-; ZIP-NEXT:    ret
-  %res = call <vscale x 4 x float> @llvm.vector.interleave2.nxv4f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
-  ret <vscale x 4 x float> %res
+  %res = call <vscale x 48 x i8> @llvm.vector.interleave3.nxv48i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c)
+  ret <vscale x 48 x i8> %res
 }
 
-define <vscale x 16 x bfloat> @vector_interleave_nxv16bf16_nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
-; V-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
-; V:       # %bb.0:
-; V-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; V-NEXT:    vmv2r.v v12, v10
-; V-NEXT:    vmv2r.v v14, v8
-; V-NEXT:    vwaddu.vv v8, v14, v12
-; V-NEXT:    li a0, -1
-; V-NEXT:    vwmaccu.vx v8, a0, v12
-; V-NEXT:    ret
-;
-; ZVBB-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
-; ZVBB:       # %bb.0:
-; ZVBB-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; ZVBB-NEXT:    vmv2r.v v12, v10
-; ZVBB-NEXT:    vmv2r.v v14, v8
-; ZVBB-NEXT:    vwsll.vi v8, v12, 16
-; ZVBB-NEXT:    vwaddu.wv v8, v8, v14
-; ZVBB-NEXT:    ret
-;
-; ZIP-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
-; ZIP:       # %bb.0:
-; ZIP-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; ZIP-NEXT:    vmv2r.v v12, v10
-; ZIP-NEXT:    vmv2r.v v14, v8
-; ZIP-NEXT:    ri.vzip2b.vv v10, v8, v12
-; ZIP-NEXT:    ri.vzip2a.vv v8, v14, v12
-; ZIP-NEXT:    ret
-  %res = call <vscale x 16 x bfloat> @llvm.vector.interleave2.nxv16bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
-  ret <vscale x 16 x bfloat> %res
-}
 
-define <vscale x 16 x half> @vector_interleave_nxv16f16_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
-; V-LABEL: vector_interleave_nxv16f16_nxv8f16:
-; V:       # %bb.0:
-; V-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; V-NEXT:    vmv2r.v v12, v10
-; V-NEXT:    vmv2r.v v14, v8
-; V-NEXT:    vwaddu.vv v8, v14, v12
-; V-NEXT:    li a0, -1
-; V-NEXT:    vwmaccu.vx v8, a0, v12
-; V-NEXT:    ret
+define <vscale x 24 x i16> @vector_interleave_nxv24i16_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) nounwind {
+; CHECK-LABEL: vector_interleave_nxv24i16_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vsseg3e16.v v8, (a0)
+; CHECK-NEXT:    vl2re16.v v8, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl2re16.v v10, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl2re16.v v12, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
 ;
-; ZVBB-LABEL: vector_interleave_nxv16f16_nxv8f16:
+; ZVBB-LABEL: vector_interleave_nxv24i16_nxv8i16:
 ; ZVBB:       # %bb.0:
-; ZVBB-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; ZVBB-NEXT:    vmv2r.v v12, v10
-; ZVBB-NEXT:    vmv2r.v v14, v8
-; ZVBB-NEXT:    vwsll.vi v8, v12, 16
-; ZVBB-NEXT:    vwaddu.wv v8, v8, v14
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    slli a1, a1, 1
+; ZVBB-NEXT:    vsetvli a2, zero, e16, m2, ta, ma
+; ZVBB-NEXT:    vsseg3e16.v v8, (a0)
+; ZVBB-NEXT:    vl2re16.v v8, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl2re16.v v10, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl2re16.v v12, (a0)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
 ; ZVBB-NEXT:    ret
-;
-; ZIP-LABEL: vector_interleave_nxv16f16_nxv8f16:
-; ZIP:       # %bb.0:
-; ZIP-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; ZIP-NEXT:    vmv2r.v v12, v10
-; ZIP-NEXT:    vmv2r.v v14, v8
-; ZIP-NEXT:    ri.vzip2b.vv v10, v8, v12
-; ZIP-NEXT:    ri.vzip2a.vv v8, v14, v12
-; ZIP-NEXT:    ret
-  %res = call <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
-  ret <vscale x 16 x half> %res
+  %res = call <vscale x 24 x i16> @llvm.vector.interleave3.nxv24i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c)
+  ret <vscale x 24 x i16> %res
 }
 
-define <vscale x 8 x float> @vector_interleave_nxv8f32_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
-; V-LABEL: vector_interleave_nxv8f32_nxv4f32:
-; V:       # %bb.0:
-; V-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; V-NEXT:    vmv2r.v v12, v10
-; V-NEXT:    vmv2r.v v14, v8
-; V-NEXT:    vwaddu.vv v8, v14, v12
-; V-NEXT:    li a0, -1
-; V-NEXT:    vwmaccu.vx v8, a0, v12
-; V-NEXT:    ret
-;
-; ZVBB-LABEL: vector_interleave_nxv8f32_nxv4f32:
-; ZVBB:       # %bb.0:
-; ZVBB-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; ZVBB-NEXT:    vmv2r.v v12, v10
-; ZVBB-NEXT:    vmv2r.v v14, v8
-; ZVBB-NEXT:    li a0, 32
-; ZVBB-NEXT:    vwsll.vx v8, v12, a0
-; ZVBB-NEXT:    vwaddu.wv v8, v8, v14
-; ZVBB-NEXT:    ret
-;
-; ZIP-LABEL: vector_interleave_nxv8f32_nxv4f32:
-; ZIP:       # %bb.0:
-; ZIP-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; ZIP-NEXT:    vmv2r.v v12, v10
-; ZIP-NEXT:    vmv2r.v v14, v8
-; ZIP-NEXT:    ri.vzip2b.vv v10, v8, v12
-; ZIP-NEXT:    ri.vzip2a.vv v8, v14, v12
-; ZIP-NEXT:    ret
-  %res = call <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
-  ret <vscale x 8 x float> %res
-}
 
-define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; V-LABEL: vector_interleave_nxv4f64_nxv2f64:
-; V:       # %bb.0:
-; V-NEXT:    csrr a0, vlenb
-; V-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
-; V-NEXT:    vid.v v12
-; V-NEXT:    srli a0, a0, 2
-; V-NEXT:    vand.vi v13, v12, 1
-; V-NEXT:    vmsne.vi v0, v13, 0
-; V-NEXT:    vsrl.vi v16, v12, 1
-; V-NEXT:    vadd.vx v16, v16, a0, v0.t
-; V-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; V-NEXT:    vrgatherei16.vv v12, v8, v16
-; V-NEXT:    vmv.v.v v8, v12
-; V-NEXT:    ret
+define <vscale x 12 x i32> @vector_interleave_nxv12i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) nounwind {
+; CHECK-LABEL: vector_interleave_nxv12i32_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vsseg3e32.v v8, (a0)
+; CHECK-NEXT:    vl2re32.v v8, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl2re32.v v10, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl2re32.v v12, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
 ;
-; ZVBB-LABEL: vector_interleave_nxv4f64_nxv2f64:
+; ZVBB-LABEL: vector_interleave_nxv12i32_nxv4i32:
 ; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
 ; ZVBB-NEXT:    csrr a0, vlenb
-; ZVBB-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
-; ZVBB-NEXT:    vid.v v12
-; ZVBB-NEXT:    srli a0, a0, 2
-; ZVBB-NEXT:    vand.vi v13, v12, 1
-; ZVBB-NEXT:    vmsne.vi v0, v13, 0
-; ZVBB-NEXT:    vsrl.vi v16, v12, 1
-; ZVBB-NEXT:    vadd.vx v16, v16, a0, v0.t
-; ZVBB-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; ZVBB-NEXT:    vrgatherei16.vv v12, v8, v16
-; ZVBB-NEXT:    vmv.v.v v8, v12
-; ZVBB-NEXT:    ret
-;
-; ZIP-LABEL: vector_interleave_nxv4f64_nxv2f64:
-; ZIP:       # %bb.0:
-; ZIP-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; ZIP-NEXT:    vmv2r.v v12, v10
-; ZIP-NEXT:    vmv2r.v v14, v8
-; ZIP-NEXT:    ri.vzip2b.vv v10, v8, v12
-; ZIP-NEXT:    ri.vzip2a.vv v8, v14, v12
-; ZIP-NEXT:    ret
-  %res = call <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
-  ret <vscale x 4 x double> %res
-}
-
-
-
-define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) {
-; V-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
-; V:       # %bb.0:
-; V-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; V-NEXT:    vmv8r.v v24, v8
-; V-NEXT:    vwaddu.vv v8, v24, v16
-; V-NEXT:    li a0, -1
-; V-NEXT:    vwaddu.vv v0, v28, v20
-; V-NEXT:    vwmaccu.vx v8, a0, v16
-; V-NEXT:    vwmaccu.vx v0, a0, v20
-; V-NEXT:    vmv8r.v v16, v0
-; V-NEXT:    ret
-;
-; ZVBB-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
-; ZVBB:       # %bb.0:
-; ZVBB-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; ZVBB-NEXT:    vwsll.vi v24, v16, 16
-; ZVBB-NEXT:    vwsll.vi v0, v20, 16
-; ZVBB-NEXT:    vwaddu.wv v24, v24, v8
-; ZVBB-NEXT:    vwaddu.wv v0, v0, v12
-; ZVBB-NEXT:    vmv8r.v v8, v24
-; ZVBB-NEXT:    vmv8r.v v16, v0
-; ZVBB-NEXT:    ret
-;
-; ZIP-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
-; ZIP:       # %bb.0:
-; ZIP-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; ZIP-NEXT:    ri.vzip2b.vv v28, v8, v16
-; ZIP-NEXT:    ri.vzip2b.vv v4, v12, v20
-; ZIP-NEXT:    ri.vzip2a.vv v24, v8, v16
-; ZIP-NEXT:    ri.vzip2a.vv v0, v12, v20
-; ZIP-NEXT:    vmv8r.v v8, v24
-; ZIP-NEXT:    vmv8r.v v16, v0
-; ZIP-NEXT:    ret
-  %res = call <vscale x 64 x bfloat> @llvm.vector.interleave2.nxv64bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b)
-  ret <vscale x 64 x bfloat> %res
-}
-
-define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b) {
-; V-LABEL: vector_interleave_nxv64f16_nxv32f16:
-; V:       # %bb.0:
-; V-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; V-NEXT:    vmv8r.v v24, v8
-; V-NEXT:    vwaddu.vv v8, v24, v16
-; V-NEXT:    li a0, -1
-; V-NEXT:    vwaddu.vv v0, v28, v20
-; V-NEXT:    vwmaccu.vx v8, a0, v16
-; V-NEXT:    vwmaccu.vx v0, a0, v20
-; V-NEXT:    vmv8r.v v16, v0
-; V-NEXT:    ret
-;
-; ZVBB-LABEL: vector_interleave_nxv64f16_nxv32f16:
-; ZVBB:       # %bb.0:
-; ZVBB-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; ZVBB-NEXT:    vwsll.vi v24, v16, 16
-; ZVBB-NEXT:    vwsll.vi v0, v20, 16
-; ZVBB-NEXT:    vwaddu.wv v24, v24, v8
-; ZVBB-NEXT:    vwaddu.wv v0, v0, v12
-; ZVBB-NEXT:    vmv8r.v v8, v24
-; ZVBB-NEXT:    vmv8r.v v16, v0
-; ZVBB-NEXT:    ret
-;
-; ZIP-LABEL: vector_interleave_nxv64f16_nxv32f16:
-; ZIP:       # %bb.0:
-; ZIP-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; ZIP-NEXT:    ri.vzip2b.vv v28, v8, v16
-; ZIP-NEXT:    ri.vzip2b.vv v4, v12, v20
-; ZIP-NEXT:    ri.vzip2a.vv v24, v8, v16
-; ZIP-NEXT:    ri.vzip2a.vv v0, v12, v20
-; ZIP-NEXT:    vmv8r.v v8, v24
-; ZIP-NEXT:    vmv8r.v v16, v0
-; ZIP-NEXT:    ret
-  %res = call <vscale x 64 x half> @llvm.vector.interleave2.nxv64f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b)
-  ret <vscale x 64 x half> %res
-}
-
-define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b) {
-; V-LABEL: vector_interleave_nxv32f32_nxv16f32:
-; V:       # %bb.0:
-; V-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; V-NEXT:    vmv8r.v v24, v8
-; V-NEXT:    vwaddu.vv v8, v24, v16
-; V-NEXT:    li a0, -1
-; V-NEXT:    vwaddu.vv v0, v28, v20
-; V-NEXT:    vwmaccu.vx v8, a0, v16
-; V-NEXT:    vwmaccu.vx v0, a0, v20
-; V-NEXT:    vmv8r.v v16, v0
-; V-NEXT:    ret
-;
-; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32:
-; ZVBB:       # %bb.0:
-; ZVBB-NEXT:    li a0, 32
-; ZVBB-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
-; ZVBB-NEXT:    vwsll.vx v24, v16, a0
-; ZVBB-NEXT:    vwsll.vx v0, v20, a0
-; ZVBB-NEXT:    vwaddu.wv v24, v24, v8
-; ZVBB-NEXT:    vwaddu.wv v0, v0, v12
-; ZVBB-NEXT:    vmv8r.v v8, v24
-; ZVBB-NEXT:    vmv8r.v v16, v0
-; ZVBB-NEXT:    ret
-;
-; ZIP-LABEL: vector_interleave_nxv32f32_nxv16f32:
-; ZIP:       # %bb.0:
-; ZIP-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; ZIP-NEXT:    ri.vzip2b.vv v28, v8, v16
-; ZIP-NEXT:    ri.vzip2b.vv v4, v12, v20
-; ZIP-NEXT:    ri.vzip2a.vv v24, v8, v16
-; ZIP-NEXT:    ri.vzip2a.vv v0, v12, v20
-; ZIP-NEXT:    vmv8r.v v8, v24
-; ZIP-NEXT:    vmv8r.v v16, v0
-; ZIP-NEXT:    ret
-  %res = call <vscale x 32 x float> @llvm.vector.interleave2.nxv32f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b)
-  ret <vscale x 32 x float> %res
-}
-
-define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b) {
-; V-LABEL: vector_interleave_nxv16f64_nxv8f64:
-; V:       # %bb.0:
-; V-NEXT:    csrr a0, vlenb
-; V-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
-; V-NEXT:    vid.v v6
-; V-NEXT:    vmv8r.v v24, v8
-; V-NEXT:    srli a0, a0, 1
-; V-NEXT:    vmv4r.v v28, v16
-; V-NEXT:    vmv4r.v v16, v12
-; V-NEXT:    vand.vi v8, v6, 1
-; V-NEXT:    vmsne.vi v0, v8, 0
-; V-NEXT:    vsrl.vi v6, v6, 1
-; V-NEXT:    vadd.vx v6, v6, a0, v0.t
-; V-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; V-NEXT:    vrgatherei16.vv v8, v24, v6
-; V-NEXT:    vrgatherei16.vv v24, v16, v6
-; V-NEXT:    vmv.v.v v16, v24
-; V-NEXT:    ret
-;
-; ZVBB-LABEL: vector_interleave_nxv16f64_nxv8f64:
-; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    slli a1, a1, 1
+; ZVBB-NEXT:    vsetvli a2, zero, e32, m2, ta, ma
+; ZVBB-NEXT:    vsseg3e32.v v8, (a0)
+; ZVBB-NEXT:    vl2re32.v v8, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl2re32.v v10, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl2re32.v v12, (a0)
 ; ZVBB-NEXT:    csrr a0, vlenb
-; ZVBB-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
-; ZVBB-NEXT:    vid.v v6
-; ZVBB-NEXT:    vmv8r.v v24, v8
-; ZVBB-NEXT:    srli a0, a0, 1
-; ZVBB-NEXT:    vmv4r.v v28, v16
-; ZVBB-NEXT:    vmv4r.v v16, v12
-; ZVBB-NEXT:    vand.vi v8, v6, 1
-; ZVBB-NEXT:    vmsne.vi v0, v8, 0
-; ZVBB-NEXT:    vsrl.vi v6, v6, 1
-; ZVBB-NEXT:    vadd.vx v6, v6, a0, v0.t
-; ZVBB-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVBB-NEXT:    vrgatherei16.vv v8, v24, v6
-; ZVBB-NEXT:    vrgatherei16.vv v24, v16, v6
-; ZVBB-NEXT:    vmv.v.v v16, v24
-; ZVBB-NEXT:    ret
-;
-; ZIP-LABEL: vector_interleave_nxv16f64_nxv8f64:
-; ZIP:       # %bb.0:
-; ZIP-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; ZIP-NEXT:    ri.vzip2b.vv v28, v8, v16
-; ZIP-NEXT:    ri.vzip2b.vv v4, v12, v20
-; ZIP-NEXT:    ri.vzip2a.vv v24, v8, v16
-; ZIP-NEXT:    ri.vzip2a.vv v0, v12, v20
-; ZIP-NEXT:    vmv8r.v v8, v24
-; ZIP-NEXT:    vmv8r.v v16, v0
-; ZIP-NEXT:    ret
-  %res = call <vscale x 16 x double> @llvm.vector.interleave2.nxv16f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b)
-  ret <vscale x 16 x double> %res
-}
-
-define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32_poison(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vzext.vf2 v8, v12
-; CHECK-NEXT:    ret
-;
-; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison:
-; ZVBB:       # %bb.0:
-; ZVBB-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; ZVBB-NEXT:    vmv2r.v v12, v8
-; ZVBB-NEXT:    vzext.vf2 v8, v12
+; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
 ; ZVBB-NEXT:    ret
-  %res = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> poison)
-  ret <vscale x 8 x i32> %res
+  %res = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
+  ret <vscale x 12 x i32> %res
 }
 
-define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32_poison2(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vzext.vf2 v12, v8
-; CHECK-NEXT:    li a0, 32
-; CHECK-NEXT:    vsll.vx v8, v12, a0
-; CHECK-NEXT:    ret
-;
-; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2:
-; ZVBB:       # %bb.0:
-; ZVBB-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; ZVBB-NEXT:    vmv2r.v v12, v8
-; ZVBB-NEXT:    li a0, 32
-; ZVBB-NEXT:    vwsll.vx v8, v12, a0
-; ZVBB-NEXT:    ret
-  %res = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a)
-  ret <vscale x 8 x i32> %res
-}
 
-define <vscale x 48 x i1> @vector_interleave_nxv48i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c) nounwind {
-; CHECK-LABEL: vector_interleave_nxv48i1_nxv16i1:
+define <vscale x 6 x i64> @vector_interleave_nxv6i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) nounwind {
+; CHECK-LABEL: vector_interleave_nxv6i64_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    li a1, 6
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    sub sp, sp, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v12, 0
 ; CHECK-NEXT:    addi a0, sp, 16
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    vmerge.vim v16, v12, 1, v0
-; CHECK-NEXT:    slli a2, a1, 1
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmerge.vim v14, v12, 1, v0
-; CHECK-NEXT:    add a3, a0, a2
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v18, v12, 1, v0
-; CHECK-NEXT:    add a2, a3, a2
-; CHECK-NEXT:    vsseg3e8.v v14, (a0)
-; CHECK-NEXT:    vl2r.v v8, (a2)
-; CHECK-NEXT:    srli a2, a1, 2
-; CHECK-NEXT:    srli a1, a1, 1
-; CHECK-NEXT:    vl2r.v v10, (a3)
-; CHECK-NEXT:    vl2r.v v12, (a0)
-; CHECK-NEXT:    add a0, a2, a2
-; CHECK-NEXT:    vmsne.vi v14, v8, 0
-; CHECK-NEXT:    vmsne.vi v8, v10, 0
-; CHECK-NEXT:    vmsne.vi v0, v12, 0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vx v0, v8, a2
-; CHECK-NEXT:    add a0, a1, a1
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vslideup.vx v0, v14, a1
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    vsetvli a2, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vsseg3e64.v v8, (a0)
+; CHECK-NEXT:    vl2re64.v v8, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl2re64.v v10, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl2re64.v v12, (a0)
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    li a1, 6
 ; CHECK-NEXT:    mul a0, a0, a1
@@ -1034,133 +759,4071 @@ define <vscale x 48 x i1> @vector_interleave_nxv48i1_nxv16i1(<vscale x 16 x i1>
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
 ;
-; ZVBB-LABEL: vector_interleave_nxv48i1_nxv16i1:
+; ZVBB-LABEL: vector_interleave_nxv6i64_nxv2i64:
 ; ZVBB:       # %bb.0:
 ; ZVBB-NEXT:    addi sp, sp, -16
 ; ZVBB-NEXT:    csrr a0, vlenb
 ; ZVBB-NEXT:    li a1, 6
 ; ZVBB-NEXT:    mul a0, a0, a1
 ; ZVBB-NEXT:    sub sp, sp, a0
-; ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; ZVBB-NEXT:    vmv1r.v v10, v0
-; ZVBB-NEXT:    vmv1r.v v0, v8
-; ZVBB-NEXT:    vmv.v.i v12, 0
 ; ZVBB-NEXT:    addi a0, sp, 16
 ; ZVBB-NEXT:    csrr a1, vlenb
-; ZVBB-NEXT:    vmerge.vim v16, v12, 1, v0
-; ZVBB-NEXT:    slli a2, a1, 1
-; ZVBB-NEXT:    vmv1r.v v0, v10
-; ZVBB-NEXT:    vmerge.vim v14, v12, 1, v0
-; ZVBB-NEXT:    add a3, a0, a2
-; ZVBB-NEXT:    vmv1r.v v0, v9
-; ZVBB-NEXT:    vmerge.vim v18, v12, 1, v0
-; ZVBB-NEXT:    add a2, a3, a2
-; ZVBB-NEXT:    vsseg3e8.v v14, (a0)
-; ZVBB-NEXT:    vl2r.v v8, (a2)
-; ZVBB-NEXT:    srli a2, a1, 2
-; ZVBB-NEXT:    srli a1, a1, 1
-; ZVBB-NEXT:    vl2r.v v10, (a3)
-; ZVBB-NEXT:    vl2r.v v12, (a0)
-; ZVBB-NEXT:    add a0, a2, a2
-; ZVBB-NEXT:    vmsne.vi v14, v8, 0
-; ZVBB-NEXT:    vmsne.vi v8, v10, 0
-; ZVBB-NEXT:    vmsne.vi v0, v12, 0
-; ZVBB-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; ZVBB-NEXT:    vslideup.vx v0, v8, a2
-; ZVBB-NEXT:    add a0, a1, a1
-; ZVBB-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; ZVBB-NEXT:    vslideup.vx v0, v14, a1
+; ZVBB-NEXT:    slli a1, a1, 1
+; ZVBB-NEXT:    vsetvli a2, zero, e64, m2, ta, ma
+; ZVBB-NEXT:    vsseg3e64.v v8, (a0)
+; ZVBB-NEXT:    vl2re64.v v8, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl2re64.v v10, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl2re64.v v12, (a0)
 ; ZVBB-NEXT:    csrr a0, vlenb
 ; ZVBB-NEXT:    li a1, 6
 ; ZVBB-NEXT:    mul a0, a0, a1
 ; ZVBB-NEXT:    add sp, sp, a0
 ; ZVBB-NEXT:    addi sp, sp, 16
 ; ZVBB-NEXT:    ret
-  %res = call <vscale x 48 x i1> @llvm.vector.interleave3.nxv48i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c)
-  ret <vscale x 48 x i1> %res
+  %res = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
+  ret <vscale x 6 x i64> %res
 }
 
-
-define <vscale x 48 x i8> @vector_interleave_nxv48i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) nounwind {
-; CHECK-LABEL: vector_interleave_nxv48i8_nxv16i8:
+define <vscale x 80 x i1> @vector_interleave_nxv80i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e) nounwind {
+; CHECK-LABEL: vector_interleave_nxv80i1_nxv16i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    li a1, 10
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    sub sp, sp, a0
-; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmv.v.i v12, 0
+; CHECK-NEXT:    addi a4, sp, 16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 2
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 16
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    vsetvli a2, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vsseg3e8.v v8, (a0)
-; CHECK-NEXT:    vl2r.v v8, (a0)
-; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    vl2r.v v10, (a0)
-; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    vl2r.v v12, (a0)
+; CHECK-NEXT:    vmerge.vim v14, v12, 1, v0
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vmerge.vim v18, v12, 1, v0
+; CHECK-NEXT:    add a2, a4, a1
+; CHECK-NEXT:    srli a3, a1, 2
+; CHECK-NEXT:    vmv2r.v v20, v14
+; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vmerge.vim v16, v12, 1, v0
+; CHECK-NEXT:    vmv1r.v v21, v18
+; CHECK-NEXT:    vmv1r.v v0, v10
+; CHECK-NEXT:    vmerge.vim v8, v12, 1, v0
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v16, v19
+; CHECK-NEXT:    add a5, a2, a1
+; CHECK-NEXT:    vmv1r.v v23, v8
+; CHECK-NEXT:    vmv1r.v v18, v9
+; CHECK-NEXT:    vmv1r.v v0, v11
+; CHECK-NEXT:    vmerge.vim v24, v12, 1, v0
+; CHECK-NEXT:    vsetvli a6, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vsseg5e8.v v20, (a4)
+; CHECK-NEXT:    vmv1r.v v19, v25
+; CHECK-NEXT:    vsseg5e8.v v15, (a0)
+; CHECK-NEXT:    vl1r.v v8, (a5)
+; CHECK-NEXT:    add a5, a5, a1
+; CHECK-NEXT:    vl1r.v v10, (a4)
+; CHECK-NEXT:    add a4, a5, a1
+; CHECK-NEXT:    vl1r.v v12, (a4)
+; CHECK-NEXT:    add a4, a0, a1
+; CHECK-NEXT:    vl1r.v v14, (a4)
+; CHECK-NEXT:    add a4, a4, a1
+; CHECK-NEXT:    vl1r.v v9, (a5)
+; CHECK-NEXT:    add a5, a4, a1
+; CHECK-NEXT:    vl1r.v v16, (a5)
+; CHECK-NEXT:    add a5, a5, a1
+; CHECK-NEXT:    srli a1, a1, 1
+; CHECK-NEXT:    vl1r.v v11, (a2)
+; CHECK-NEXT:    add a2, a3, a3
+; CHECK-NEXT:    vl1r.v v15, (a4)
+; CHECK-NEXT:    add a4, a1, a1
+; CHECK-NEXT:    vl1r.v v13, (a0)
+; CHECK-NEXT:    vl1r.v v17, (a5)
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmsne.vi v18, v8, 0
+; CHECK-NEXT:    vmsne.vi v0, v10, 0
+; CHECK-NEXT:    vmsne.vi v8, v14, 0
+; CHECK-NEXT:    vmsne.vi v9, v12, 0
+; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
+; CHECK-NEXT:    vslideup.vx v0, v18, a3
+; CHECK-NEXT:    vslideup.vx v9, v8, a3
+; CHECK-NEXT:    vsetvli zero, a4, e8, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v0, v9, a1
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmsne.vi v8, v16, 0
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    li a1, 10
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
 ;
-; ZVBB-LABEL: vector_interleave_nxv48i8_nxv16i8:
+; ZVBB-LABEL: vector_interleave_nxv80i1_nxv16i1:
 ; ZVBB:       # %bb.0:
 ; ZVBB-NEXT:    addi sp, sp, -16
 ; ZVBB-NEXT:    csrr a0, vlenb
-; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    li a1, 10
 ; ZVBB-NEXT:    mul a0, a0, a1
 ; ZVBB-NEXT:    sub sp, sp, a0
-; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVBB-NEXT:    vmv.v.i v12, 0
+; ZVBB-NEXT:    addi a4, sp, 16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 2
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    add a0, sp, a0
+; ZVBB-NEXT:    addi a0, a0, 16
 ; ZVBB-NEXT:    csrr a1, vlenb
-; ZVBB-NEXT:    slli a1, a1, 1
-; ZVBB-NEXT:    vsetvli a2, zero, e8, m2, ta, ma
-; ZVBB-NEXT:    vsseg3e8.v v8, (a0)
-; ZVBB-NEXT:    vl2r.v v8, (a0)
-; ZVBB-NEXT:    add a0, a0, a1
-; ZVBB-NEXT:    vl2r.v v10, (a0)
-; ZVBB-NEXT:    add a0, a0, a1
-; ZVBB-NEXT:    vl2r.v v12, (a0)
+; ZVBB-NEXT:    vmerge.vim v14, v12, 1, v0
+; ZVBB-NEXT:    vmv1r.v v0, v8
+; ZVBB-NEXT:    vmerge.vim v18, v12, 1, v0
+; ZVBB-NEXT:    add a2, a4, a1
+; ZVBB-NEXT:    srli a3, a1, 2
+; ZVBB-NEXT:    vmv2r.v v20, v14
+; ZVBB-NEXT:    vmv1r.v v0, v9
+; ZVBB-NEXT:    vmerge.vim v16, v12, 1, v0
+; ZVBB-NEXT:    vmv1r.v v21, v18
+; ZVBB-NEXT:    vmv1r.v v0, v10
+; ZVBB-NEXT:    vmerge.vim v8, v12, 1, v0
+; ZVBB-NEXT:    vmv1r.v v22, v16
+; ZVBB-NEXT:    vmv1r.v v16, v19
+; ZVBB-NEXT:    add a5, a2, a1
+; ZVBB-NEXT:    vmv1r.v v23, v8
+; ZVBB-NEXT:    vmv1r.v v18, v9
+; ZVBB-NEXT:    vmv1r.v v0, v11
+; ZVBB-NEXT:    vmerge.vim v24, v12, 1, v0
+; ZVBB-NEXT:    vsetvli a6, zero, e8, m1, ta, ma
+; ZVBB-NEXT:    vsseg5e8.v v20, (a4)
+; ZVBB-NEXT:    vmv1r.v v19, v25
+; ZVBB-NEXT:    vsseg5e8.v v15, (a0)
+; ZVBB-NEXT:    vl1r.v v8, (a5)
+; ZVBB-NEXT:    add a5, a5, a1
+; ZVBB-NEXT:    vl1r.v v10, (a4)
+; ZVBB-NEXT:    add a4, a5, a1
+; ZVBB-NEXT:    vl1r.v v12, (a4)
+; ZVBB-NEXT:    add a4, a0, a1
+; ZVBB-NEXT:    vl1r.v v14, (a4)
+; ZVBB-NEXT:    add a4, a4, a1
+; ZVBB-NEXT:    vl1r.v v9, (a5)
+; ZVBB-NEXT:    add a5, a4, a1
+; ZVBB-NEXT:    vl1r.v v16, (a5)
+; ZVBB-NEXT:    add a5, a5, a1
+; ZVBB-NEXT:    srli a1, a1, 1
+; ZVBB-NEXT:    vl1r.v v11, (a2)
+; ZVBB-NEXT:    add a2, a3, a3
+; ZVBB-NEXT:    vl1r.v v15, (a4)
+; ZVBB-NEXT:    add a4, a1, a1
+; ZVBB-NEXT:    vl1r.v v13, (a0)
+; ZVBB-NEXT:    vl1r.v v17, (a5)
+; ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVBB-NEXT:    vmsne.vi v18, v8, 0
+; ZVBB-NEXT:    vmsne.vi v0, v10, 0
+; ZVBB-NEXT:    vmsne.vi v8, v14, 0
+; ZVBB-NEXT:    vmsne.vi v9, v12, 0
+; ZVBB-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
+; ZVBB-NEXT:    vslideup.vx v0, v18, a3
+; ZVBB-NEXT:    vslideup.vx v9, v8, a3
+; ZVBB-NEXT:    vsetvli zero, a4, e8, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v0, v9, a1
+; ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVBB-NEXT:    vmsne.vi v8, v16, 0
 ; ZVBB-NEXT:    csrr a0, vlenb
-; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    li a1, 10
 ; ZVBB-NEXT:    mul a0, a0, a1
 ; ZVBB-NEXT:    add sp, sp, a0
 ; ZVBB-NEXT:    addi sp, sp, 16
 ; ZVBB-NEXT:    ret
-  %res = call <vscale x 48 x i8> @llvm.vector.interleave3.nxv48i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c)
-  ret <vscale x 48 x i8> %res
+  %res = call <vscale x 80 x i1> @llvm.vector.interleave5.nxv80i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e)
+  ret <vscale x 80 x i1> %res
 }
 
 
-define <vscale x 24 x i16> @vector_interleave_nxv24i16_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) nounwind {
-; CHECK-LABEL: vector_interleave_nxv24i16_nxv8i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 6
-; CHECK-NEXT:    mul a0, a0, a1
-; CHECK-NEXT:    sub sp, sp, a0
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    vsetvli a2, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vsseg3e16.v v8, (a0)
-; CHECK-NEXT:    vl2re16.v v8, (a0)
-; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    vl2re16.v v10, (a0)
-; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    vl2re16.v v12, (a0)
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 6
-; CHECK-NEXT:    mul a0, a0, a1
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
+define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e) nounwind {
 ;
-; ZVBB-LABEL: vector_interleave_nxv24i16_nxv8i16:
-; ZVBB:       # %bb.0:
+; RV32-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -80
+; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi s0, sp, 80
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    li a1, 28
+; RV32-NEXT:    mul a0, a0, a1
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT:    vmv2r.v v20, v16
+; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    vmv2r.v v18, v12
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a2, a1, 2
+; RV32-NEXT:    add a1, a2, a1
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 64
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    vmv2r.v v16, v8
+; RV32-NEXT:    vmv2r.v v22, v16
+; RV32-NEXT:    vmv2r.v v24, v18
+; RV32-NEXT:    vmv1r.v v26, v20
+; RV32-NEXT:    add a3, a0, a2
+; RV32-NEXT:    vmv1r.v v23, v10
+; RV32-NEXT:    add a4, a1, a2
+; RV32-NEXT:    add a5, a4, a2
+; RV32-NEXT:    vmv1r.v v25, v14
+; RV32-NEXT:    add a6, a5, a2
+; RV32-NEXT:    vmv1r.v v18, v11
+; RV32-NEXT:    vsseg5e8.v v22, (a0)
+; RV32-NEXT:    vmv1r.v v20, v15
+; RV32-NEXT:    vsseg5e8.v v17, (a1)
+; RV32-NEXT:    vl1r.v v16, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1r.v v17, (a6)
+; RV32-NEXT:    add a6, a3, a2
+; RV32-NEXT:    vl1r.v v10, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1r.v v11, (a6)
+; RV32-NEXT:    vl1r.v v8, (a0)
+; RV32-NEXT:    vl1r.v v9, (a3)
+; RV32-NEXT:    vl1r.v v14, (a4)
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    li a3, 10
+; RV32-NEXT:    mul a0, a0, a3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 64
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1r.v v15, (a5)
+; RV32-NEXT:    vl1r.v v12, (a6)
+; RV32-NEXT:    vl1r.v v13, (a1)
+; RV32-NEXT:    slli a2, a2, 3
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    vs2r.v v16, (a2)
+; RV32-NEXT:    vs8r.v v8, (a0)
+; RV32-NEXT:    vl8r.v v16, (a2)
+; RV32-NEXT:    vl8r.v v8, (a0)
+; RV32-NEXT:    addi sp, s0, -80
+; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 80
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -80
+; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi s0, sp, 80
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    li a1, 28
+; RV64-NEXT:    mul a0, a0, a1
+; RV64-NEXT:    sub sp, sp, a0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT:    vmv2r.v v20, v16
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    vmv2r.v v18, v12
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a2, a1, 2
+; RV64-NEXT:    add a1, a2, a1
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    addi a1, a1, 64
+; RV64-NEXT:    csrr a2, vlenb
+; RV64-NEXT:    vmv2r.v v16, v8
+; RV64-NEXT:    vmv2r.v v22, v16
+; RV64-NEXT:    vmv2r.v v24, v18
+; RV64-NEXT:    vmv1r.v v26, v20
+; RV64-NEXT:    add a3, a0, a2
+; RV64-NEXT:    vmv1r.v v23, v10
+; RV64-NEXT:    add a4, a1, a2
+; RV64-NEXT:    add a5, a4, a2
+; RV64-NEXT:    vmv1r.v v25, v14
+; RV64-NEXT:    add a6, a5, a2
+; RV64-NEXT:    vmv1r.v v18, v11
+; RV64-NEXT:    vsseg5e8.v v22, (a0)
+; RV64-NEXT:    vmv1r.v v20, v15
+; RV64-NEXT:    vsseg5e8.v v17, (a1)
+; RV64-NEXT:    vl1r.v v16, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1r.v v17, (a6)
+; RV64-NEXT:    add a6, a3, a2
+; RV64-NEXT:    vl1r.v v10, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1r.v v11, (a6)
+; RV64-NEXT:    vl1r.v v8, (a0)
+; RV64-NEXT:    vl1r.v v9, (a3)
+; RV64-NEXT:    vl1r.v v14, (a4)
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    li a3, 10
+; RV64-NEXT:    mul a0, a0, a3
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    addi a0, a0, 64
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1r.v v15, (a5)
+; RV64-NEXT:    vl1r.v v12, (a6)
+; RV64-NEXT:    vl1r.v v13, (a1)
+; RV64-NEXT:    slli a2, a2, 3
+; RV64-NEXT:    add a2, a0, a2
+; RV64-NEXT:    vs2r.v v16, (a2)
+; RV64-NEXT:    vs8r.v v8, (a0)
+; RV64-NEXT:    vl8r.v v16, (a2)
+; RV64-NEXT:    vl8r.v v8, (a0)
+; RV64-NEXT:    addi sp, s0, -80
+; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; ZVBB-RV32:       # %bb.0:
+; ZVBB-RV32-NEXT:    addi sp, sp, -80
+; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    addi s0, sp, 80
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    li a1, 28
+; ZVBB-RV32-NEXT:    mul a0, a0, a1
+; ZVBB-RV32-NEXT:    sub sp, sp, a0
+; ZVBB-RV32-NEXT:    andi sp, sp, -64
+; ZVBB-RV32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV32-NEXT:    vmv2r.v v20, v16
+; ZVBB-RV32-NEXT:    addi a0, sp, 64
+; ZVBB-RV32-NEXT:    vmv2r.v v18, v12
+; ZVBB-RV32-NEXT:    csrr a1, vlenb
+; ZVBB-RV32-NEXT:    slli a2, a1, 2
+; ZVBB-RV32-NEXT:    add a1, a2, a1
+; ZVBB-RV32-NEXT:    add a1, sp, a1
+; ZVBB-RV32-NEXT:    addi a1, a1, 64
+; ZVBB-RV32-NEXT:    csrr a2, vlenb
+; ZVBB-RV32-NEXT:    vmv2r.v v16, v8
+; ZVBB-RV32-NEXT:    vmv2r.v v22, v16
+; ZVBB-RV32-NEXT:    vmv2r.v v24, v18
+; ZVBB-RV32-NEXT:    vmv1r.v v26, v20
+; ZVBB-RV32-NEXT:    add a3, a0, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v23, v10
+; ZVBB-RV32-NEXT:    add a4, a1, a2
+; ZVBB-RV32-NEXT:    add a5, a4, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v25, v14
+; ZVBB-RV32-NEXT:    add a6, a5, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v18, v11
+; ZVBB-RV32-NEXT:    vsseg5e8.v v22, (a0)
+; ZVBB-RV32-NEXT:    vmv1r.v v20, v15
+; ZVBB-RV32-NEXT:    vsseg5e8.v v17, (a1)
+; ZVBB-RV32-NEXT:    vl1r.v v16, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1r.v v17, (a6)
+; ZVBB-RV32-NEXT:    add a6, a3, a2
+; ZVBB-RV32-NEXT:    vl1r.v v10, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1r.v v11, (a6)
+; ZVBB-RV32-NEXT:    vl1r.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl1r.v v9, (a3)
+; ZVBB-RV32-NEXT:    vl1r.v v14, (a4)
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    li a3, 10
+; ZVBB-RV32-NEXT:    mul a0, a0, a3
+; ZVBB-RV32-NEXT:    add a0, sp, a0
+; ZVBB-RV32-NEXT:    addi a0, a0, 64
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1r.v v15, (a5)
+; ZVBB-RV32-NEXT:    vl1r.v v12, (a6)
+; ZVBB-RV32-NEXT:    vl1r.v v13, (a1)
+; ZVBB-RV32-NEXT:    slli a2, a2, 3
+; ZVBB-RV32-NEXT:    add a2, a0, a2
+; ZVBB-RV32-NEXT:    vs2r.v v16, (a2)
+; ZVBB-RV32-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl8r.v v16, (a2)
+; ZVBB-RV32-NEXT:    vl8r.v v8, (a0)
+; ZVBB-RV32-NEXT:    addi sp, s0, -80
+; ZVBB-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    addi sp, sp, 80
+; ZVBB-RV32-NEXT:    ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; ZVBB-RV64:       # %bb.0:
+; ZVBB-RV64-NEXT:    addi sp, sp, -80
+; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    addi s0, sp, 80
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    li a1, 28
+; ZVBB-RV64-NEXT:    mul a0, a0, a1
+; ZVBB-RV64-NEXT:    sub sp, sp, a0
+; ZVBB-RV64-NEXT:    andi sp, sp, -64
+; ZVBB-RV64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV64-NEXT:    vmv2r.v v20, v16
+; ZVBB-RV64-NEXT:    addi a0, sp, 64
+; ZVBB-RV64-NEXT:    vmv2r.v v18, v12
+; ZVBB-RV64-NEXT:    csrr a1, vlenb
+; ZVBB-RV64-NEXT:    slli a2, a1, 2
+; ZVBB-RV64-NEXT:    add a1, a2, a1
+; ZVBB-RV64-NEXT:    add a1, sp, a1
+; ZVBB-RV64-NEXT:    addi a1, a1, 64
+; ZVBB-RV64-NEXT:    csrr a2, vlenb
+; ZVBB-RV64-NEXT:    vmv2r.v v16, v8
+; ZVBB-RV64-NEXT:    vmv2r.v v22, v16
+; ZVBB-RV64-NEXT:    vmv2r.v v24, v18
+; ZVBB-RV64-NEXT:    vmv1r.v v26, v20
+; ZVBB-RV64-NEXT:    add a3, a0, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v23, v10
+; ZVBB-RV64-NEXT:    add a4, a1, a2
+; ZVBB-RV64-NEXT:    add a5, a4, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v25, v14
+; ZVBB-RV64-NEXT:    add a6, a5, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v18, v11
+; ZVBB-RV64-NEXT:    vsseg5e8.v v22, (a0)
+; ZVBB-RV64-NEXT:    vmv1r.v v20, v15
+; ZVBB-RV64-NEXT:    vsseg5e8.v v17, (a1)
+; ZVBB-RV64-NEXT:    vl1r.v v16, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1r.v v17, (a6)
+; ZVBB-RV64-NEXT:    add a6, a3, a2
+; ZVBB-RV64-NEXT:    vl1r.v v10, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1r.v v11, (a6)
+; ZVBB-RV64-NEXT:    vl1r.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl1r.v v9, (a3)
+; ZVBB-RV64-NEXT:    vl1r.v v14, (a4)
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    li a3, 10
+; ZVBB-RV64-NEXT:    mul a0, a0, a3
+; ZVBB-RV64-NEXT:    add a0, sp, a0
+; ZVBB-RV64-NEXT:    addi a0, a0, 64
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1r.v v15, (a5)
+; ZVBB-RV64-NEXT:    vl1r.v v12, (a6)
+; ZVBB-RV64-NEXT:    vl1r.v v13, (a1)
+; ZVBB-RV64-NEXT:    slli a2, a2, 3
+; ZVBB-RV64-NEXT:    add a2, a0, a2
+; ZVBB-RV64-NEXT:    vs2r.v v16, (a2)
+; ZVBB-RV64-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl8r.v v16, (a2)
+; ZVBB-RV64-NEXT:    vl8r.v v8, (a0)
+; ZVBB-RV64-NEXT:    addi sp, s0, -80
+; ZVBB-RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    addi sp, sp, 80
+; ZVBB-RV64-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    addi sp, sp, -80
+; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    addi s0, sp, 80
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    li a1, 28
+; ZIP-NEXT:    mul a0, a0, a1
+; ZIP-NEXT:    sub sp, sp, a0
+; ZIP-NEXT:    andi sp, sp, -64
+; ZIP-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZIP-NEXT:    vmv2r.v v20, v16
+; ZIP-NEXT:    addi a0, sp, 64
+; ZIP-NEXT:    vmv2r.v v18, v12
+; ZIP-NEXT:    csrr a1, vlenb
+; ZIP-NEXT:    slli a2, a1, 2
+; ZIP-NEXT:    add a1, a2, a1
+; ZIP-NEXT:    add a1, sp, a1
+; ZIP-NEXT:    addi a1, a1, 64
+; ZIP-NEXT:    csrr a2, vlenb
+; ZIP-NEXT:    vmv2r.v v16, v8
+; ZIP-NEXT:    vmv2r.v v22, v16
+; ZIP-NEXT:    vmv2r.v v24, v18
+; ZIP-NEXT:    vmv1r.v v26, v20
+; ZIP-NEXT:    add a3, a0, a2
+; ZIP-NEXT:    vmv1r.v v23, v10
+; ZIP-NEXT:    add a4, a1, a2
+; ZIP-NEXT:    add a5, a4, a2
+; ZIP-NEXT:    vmv1r.v v25, v14
+; ZIP-NEXT:    add a6, a5, a2
+; ZIP-NEXT:    vmv1r.v v18, v11
+; ZIP-NEXT:    vsseg5e8.v v22, (a0)
+; ZIP-NEXT:    vmv1r.v v20, v15
+; ZIP-NEXT:    vsseg5e8.v v17, (a1)
+; ZIP-NEXT:    vl1r.v v16, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1r.v v17, (a6)
+; ZIP-NEXT:    add a6, a3, a2
+; ZIP-NEXT:    vl1r.v v10, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1r.v v11, (a6)
+; ZIP-NEXT:    vl1r.v v8, (a0)
+; ZIP-NEXT:    vl1r.v v9, (a3)
+; ZIP-NEXT:    vl1r.v v14, (a4)
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    li a3, 10
+; ZIP-NEXT:    mul a0, a0, a3
+; ZIP-NEXT:    add a0, sp, a0
+; ZIP-NEXT:    addi a0, a0, 64
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1r.v v15, (a5)
+; ZIP-NEXT:    vl1r.v v12, (a6)
+; ZIP-NEXT:    vl1r.v v13, (a1)
+; ZIP-NEXT:    slli a2, a2, 3
+; ZIP-NEXT:    add a2, a0, a2
+; ZIP-NEXT:    vs2r.v v16, (a2)
+; ZIP-NEXT:    vs8r.v v8, (a0)
+; ZIP-NEXT:    vl8r.v v16, (a2)
+; ZIP-NEXT:    vl8r.v v8, (a0)
+; ZIP-NEXT:    addi sp, s0, -80
+; ZIP-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    addi sp, sp, 80
+; ZIP-NEXT:    ret
+  %res = call <vscale x 80 x i8> @llvm.vector.interleave5.nxv80i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e)
+  ret <vscale x 80 x i8> %res
+}
+
+
+define <vscale x 40 x i8> @vector_interleave_nxv40i8_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i8> %d, <vscale x 8 x i8> %e) nounwind {
+; CHECK-LABEL: vector_interleave_nxv40i8_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 2
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    add a2, a0, a1
+; CHECK-NEXT:    add a3, a2, a1
+; CHECK-NEXT:    vsetvli a4, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vsseg5e8.v v8, (a0)
+; CHECK-NEXT:    vl1r.v v10, (a3)
+; CHECK-NEXT:    add a3, a3, a1
+; CHECK-NEXT:    vl1r.v v11, (a3)
+; CHECK-NEXT:    vl1r.v v8, (a0)
+; CHECK-NEXT:    vl1r.v v9, (a2)
+; CHECK-NEXT:    add a1, a3, a1
+; CHECK-NEXT:    vl1r.v v12, (a1)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 2
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv40i8_nxv8i8:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 2
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    add a2, a0, a1
+; ZVBB-NEXT:    add a3, a2, a1
+; ZVBB-NEXT:    vsetvli a4, zero, e8, m1, ta, ma
+; ZVBB-NEXT:    vsseg5e8.v v8, (a0)
+; ZVBB-NEXT:    vl1r.v v10, (a3)
+; ZVBB-NEXT:    add a3, a3, a1
+; ZVBB-NEXT:    vl1r.v v11, (a3)
+; ZVBB-NEXT:    vl1r.v v8, (a0)
+; ZVBB-NEXT:    vl1r.v v9, (a2)
+; ZVBB-NEXT:    add a1, a3, a1
+; ZVBB-NEXT:    vl1r.v v12, (a1)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 2
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 40 x i8> @llvm.vector.interleave5.nxv40i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i8> %d, <vscale x 8 x i8> %e)
+  ret <vscale x 40 x i8> %res
+}
+
+
+define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e) nounwind {
+;
+; RV32-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -80
+; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi s0, sp, 80
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    li a1, 28
+; RV32-NEXT:    mul a0, a0, a1
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; RV32-NEXT:    vmv2r.v v20, v16
+; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    vmv2r.v v18, v12
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a2, a1, 2
+; RV32-NEXT:    add a1, a2, a1
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 64
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    vmv2r.v v16, v8
+; RV32-NEXT:    vmv2r.v v22, v16
+; RV32-NEXT:    vmv2r.v v24, v18
+; RV32-NEXT:    vmv1r.v v26, v20
+; RV32-NEXT:    add a3, a0, a2
+; RV32-NEXT:    vmv1r.v v23, v10
+; RV32-NEXT:    add a4, a1, a2
+; RV32-NEXT:    add a5, a4, a2
+; RV32-NEXT:    vmv1r.v v25, v14
+; RV32-NEXT:    add a6, a5, a2
+; RV32-NEXT:    vmv1r.v v18, v11
+; RV32-NEXT:    vsseg5e32.v v22, (a0)
+; RV32-NEXT:    vmv1r.v v20, v15
+; RV32-NEXT:    vsseg5e32.v v17, (a1)
+; RV32-NEXT:    vl1re32.v v16, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re32.v v17, (a6)
+; RV32-NEXT:    add a6, a3, a2
+; RV32-NEXT:    vl1re32.v v10, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re32.v v11, (a6)
+; RV32-NEXT:    vl1re32.v v8, (a0)
+; RV32-NEXT:    vl1re32.v v9, (a3)
+; RV32-NEXT:    vl1re32.v v14, (a4)
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    li a3, 10
+; RV32-NEXT:    mul a0, a0, a3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 64
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re32.v v15, (a5)
+; RV32-NEXT:    vl1re32.v v12, (a6)
+; RV32-NEXT:    vl1re32.v v13, (a1)
+; RV32-NEXT:    slli a2, a2, 3
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    vs2r.v v16, (a2)
+; RV32-NEXT:    vs8r.v v8, (a0)
+; RV32-NEXT:    vl8re32.v v16, (a2)
+; RV32-NEXT:    vl8re32.v v8, (a0)
+; RV32-NEXT:    addi sp, s0, -80
+; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 80
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -80
+; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi s0, sp, 80
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    li a1, 28
+; RV64-NEXT:    mul a0, a0, a1
+; RV64-NEXT:    sub sp, sp, a0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; RV64-NEXT:    vmv2r.v v20, v16
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    vmv2r.v v18, v12
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a2, a1, 2
+; RV64-NEXT:    add a1, a2, a1
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    addi a1, a1, 64
+; RV64-NEXT:    csrr a2, vlenb
+; RV64-NEXT:    vmv2r.v v16, v8
+; RV64-NEXT:    vmv2r.v v22, v16
+; RV64-NEXT:    vmv2r.v v24, v18
+; RV64-NEXT:    vmv1r.v v26, v20
+; RV64-NEXT:    add a3, a0, a2
+; RV64-NEXT:    vmv1r.v v23, v10
+; RV64-NEXT:    add a4, a1, a2
+; RV64-NEXT:    add a5, a4, a2
+; RV64-NEXT:    vmv1r.v v25, v14
+; RV64-NEXT:    add a6, a5, a2
+; RV64-NEXT:    vmv1r.v v18, v11
+; RV64-NEXT:    vsseg5e32.v v22, (a0)
+; RV64-NEXT:    vmv1r.v v20, v15
+; RV64-NEXT:    vsseg5e32.v v17, (a1)
+; RV64-NEXT:    vl1re32.v v16, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re32.v v17, (a6)
+; RV64-NEXT:    add a6, a3, a2
+; RV64-NEXT:    vl1re32.v v10, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re32.v v11, (a6)
+; RV64-NEXT:    vl1re32.v v8, (a0)
+; RV64-NEXT:    vl1re32.v v9, (a3)
+; RV64-NEXT:    vl1re32.v v14, (a4)
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    li a3, 10
+; RV64-NEXT:    mul a0, a0, a3
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    addi a0, a0, 64
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re32.v v15, (a5)
+; RV64-NEXT:    vl1re32.v v12, (a6)
+; RV64-NEXT:    vl1re32.v v13, (a1)
+; RV64-NEXT:    slli a2, a2, 3
+; RV64-NEXT:    add a2, a0, a2
+; RV64-NEXT:    vs2r.v v16, (a2)
+; RV64-NEXT:    vs8r.v v8, (a0)
+; RV64-NEXT:    vl8re32.v v16, (a2)
+; RV64-NEXT:    vl8re32.v v8, (a0)
+; RV64-NEXT:    addi sp, s0, -80
+; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; ZVBB-RV32:       # %bb.0:
+; ZVBB-RV32-NEXT:    addi sp, sp, -80
+; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    addi s0, sp, 80
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    li a1, 28
+; ZVBB-RV32-NEXT:    mul a0, a0, a1
+; ZVBB-RV32-NEXT:    sub sp, sp, a0
+; ZVBB-RV32-NEXT:    andi sp, sp, -64
+; ZVBB-RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVBB-RV32-NEXT:    vmv2r.v v20, v16
+; ZVBB-RV32-NEXT:    addi a0, sp, 64
+; ZVBB-RV32-NEXT:    vmv2r.v v18, v12
+; ZVBB-RV32-NEXT:    csrr a1, vlenb
+; ZVBB-RV32-NEXT:    slli a2, a1, 2
+; ZVBB-RV32-NEXT:    add a1, a2, a1
+; ZVBB-RV32-NEXT:    add a1, sp, a1
+; ZVBB-RV32-NEXT:    addi a1, a1, 64
+; ZVBB-RV32-NEXT:    csrr a2, vlenb
+; ZVBB-RV32-NEXT:    vmv2r.v v16, v8
+; ZVBB-RV32-NEXT:    vmv2r.v v22, v16
+; ZVBB-RV32-NEXT:    vmv2r.v v24, v18
+; ZVBB-RV32-NEXT:    vmv1r.v v26, v20
+; ZVBB-RV32-NEXT:    add a3, a0, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v23, v10
+; ZVBB-RV32-NEXT:    add a4, a1, a2
+; ZVBB-RV32-NEXT:    add a5, a4, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v25, v14
+; ZVBB-RV32-NEXT:    add a6, a5, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v18, v11
+; ZVBB-RV32-NEXT:    vsseg5e32.v v22, (a0)
+; ZVBB-RV32-NEXT:    vmv1r.v v20, v15
+; ZVBB-RV32-NEXT:    vsseg5e32.v v17, (a1)
+; ZVBB-RV32-NEXT:    vl1re32.v v16, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re32.v v17, (a6)
+; ZVBB-RV32-NEXT:    add a6, a3, a2
+; ZVBB-RV32-NEXT:    vl1re32.v v10, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re32.v v11, (a6)
+; ZVBB-RV32-NEXT:    vl1re32.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl1re32.v v9, (a3)
+; ZVBB-RV32-NEXT:    vl1re32.v v14, (a4)
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    li a3, 10
+; ZVBB-RV32-NEXT:    mul a0, a0, a3
+; ZVBB-RV32-NEXT:    add a0, sp, a0
+; ZVBB-RV32-NEXT:    addi a0, a0, 64
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re32.v v15, (a5)
+; ZVBB-RV32-NEXT:    vl1re32.v v12, (a6)
+; ZVBB-RV32-NEXT:    vl1re32.v v13, (a1)
+; ZVBB-RV32-NEXT:    slli a2, a2, 3
+; ZVBB-RV32-NEXT:    add a2, a0, a2
+; ZVBB-RV32-NEXT:    vs2r.v v16, (a2)
+; ZVBB-RV32-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl8re32.v v16, (a2)
+; ZVBB-RV32-NEXT:    vl8re32.v v8, (a0)
+; ZVBB-RV32-NEXT:    addi sp, s0, -80
+; ZVBB-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    addi sp, sp, 80
+; ZVBB-RV32-NEXT:    ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; ZVBB-RV64:       # %bb.0:
+; ZVBB-RV64-NEXT:    addi sp, sp, -80
+; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    addi s0, sp, 80
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    li a1, 28
+; ZVBB-RV64-NEXT:    mul a0, a0, a1
+; ZVBB-RV64-NEXT:    sub sp, sp, a0
+; ZVBB-RV64-NEXT:    andi sp, sp, -64
+; ZVBB-RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVBB-RV64-NEXT:    vmv2r.v v20, v16
+; ZVBB-RV64-NEXT:    addi a0, sp, 64
+; ZVBB-RV64-NEXT:    vmv2r.v v18, v12
+; ZVBB-RV64-NEXT:    csrr a1, vlenb
+; ZVBB-RV64-NEXT:    slli a2, a1, 2
+; ZVBB-RV64-NEXT:    add a1, a2, a1
+; ZVBB-RV64-NEXT:    add a1, sp, a1
+; ZVBB-RV64-NEXT:    addi a1, a1, 64
+; ZVBB-RV64-NEXT:    csrr a2, vlenb
+; ZVBB-RV64-NEXT:    vmv2r.v v16, v8
+; ZVBB-RV64-NEXT:    vmv2r.v v22, v16
+; ZVBB-RV64-NEXT:    vmv2r.v v24, v18
+; ZVBB-RV64-NEXT:    vmv1r.v v26, v20
+; ZVBB-RV64-NEXT:    add a3, a0, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v23, v10
+; ZVBB-RV64-NEXT:    add a4, a1, a2
+; ZVBB-RV64-NEXT:    add a5, a4, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v25, v14
+; ZVBB-RV64-NEXT:    add a6, a5, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v18, v11
+; ZVBB-RV64-NEXT:    vsseg5e32.v v22, (a0)
+; ZVBB-RV64-NEXT:    vmv1r.v v20, v15
+; ZVBB-RV64-NEXT:    vsseg5e32.v v17, (a1)
+; ZVBB-RV64-NEXT:    vl1re32.v v16, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re32.v v17, (a6)
+; ZVBB-RV64-NEXT:    add a6, a3, a2
+; ZVBB-RV64-NEXT:    vl1re32.v v10, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re32.v v11, (a6)
+; ZVBB-RV64-NEXT:    vl1re32.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl1re32.v v9, (a3)
+; ZVBB-RV64-NEXT:    vl1re32.v v14, (a4)
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    li a3, 10
+; ZVBB-RV64-NEXT:    mul a0, a0, a3
+; ZVBB-RV64-NEXT:    add a0, sp, a0
+; ZVBB-RV64-NEXT:    addi a0, a0, 64
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re32.v v15, (a5)
+; ZVBB-RV64-NEXT:    vl1re32.v v12, (a6)
+; ZVBB-RV64-NEXT:    vl1re32.v v13, (a1)
+; ZVBB-RV64-NEXT:    slli a2, a2, 3
+; ZVBB-RV64-NEXT:    add a2, a0, a2
+; ZVBB-RV64-NEXT:    vs2r.v v16, (a2)
+; ZVBB-RV64-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl8re32.v v16, (a2)
+; ZVBB-RV64-NEXT:    vl8re32.v v8, (a0)
+; ZVBB-RV64-NEXT:    addi sp, s0, -80
+; ZVBB-RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    addi sp, sp, 80
+; ZVBB-RV64-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    addi sp, sp, -80
+; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    addi s0, sp, 80
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    li a1, 28
+; ZIP-NEXT:    mul a0, a0, a1
+; ZIP-NEXT:    sub sp, sp, a0
+; ZIP-NEXT:    andi sp, sp, -64
+; ZIP-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZIP-NEXT:    vmv2r.v v20, v16
+; ZIP-NEXT:    addi a0, sp, 64
+; ZIP-NEXT:    vmv2r.v v18, v12
+; ZIP-NEXT:    csrr a1, vlenb
+; ZIP-NEXT:    slli a2, a1, 2
+; ZIP-NEXT:    add a1, a2, a1
+; ZIP-NEXT:    add a1, sp, a1
+; ZIP-NEXT:    addi a1, a1, 64
+; ZIP-NEXT:    csrr a2, vlenb
+; ZIP-NEXT:    vmv2r.v v16, v8
+; ZIP-NEXT:    vmv2r.v v22, v16
+; ZIP-NEXT:    vmv2r.v v24, v18
+; ZIP-NEXT:    vmv1r.v v26, v20
+; ZIP-NEXT:    add a3, a0, a2
+; ZIP-NEXT:    vmv1r.v v23, v10
+; ZIP-NEXT:    add a4, a1, a2
+; ZIP-NEXT:    add a5, a4, a2
+; ZIP-NEXT:    vmv1r.v v25, v14
+; ZIP-NEXT:    add a6, a5, a2
+; ZIP-NEXT:    vmv1r.v v18, v11
+; ZIP-NEXT:    vsseg5e32.v v22, (a0)
+; ZIP-NEXT:    vmv1r.v v20, v15
+; ZIP-NEXT:    vsseg5e32.v v17, (a1)
+; ZIP-NEXT:    vl1re32.v v16, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re32.v v17, (a6)
+; ZIP-NEXT:    add a6, a3, a2
+; ZIP-NEXT:    vl1re32.v v10, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re32.v v11, (a6)
+; ZIP-NEXT:    vl1re32.v v8, (a0)
+; ZIP-NEXT:    vl1re32.v v9, (a3)
+; ZIP-NEXT:    vl1re32.v v14, (a4)
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    li a3, 10
+; ZIP-NEXT:    mul a0, a0, a3
+; ZIP-NEXT:    add a0, sp, a0
+; ZIP-NEXT:    addi a0, a0, 64
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re32.v v15, (a5)
+; ZIP-NEXT:    vl1re32.v v12, (a6)
+; ZIP-NEXT:    vl1re32.v v13, (a1)
+; ZIP-NEXT:    slli a2, a2, 3
+; ZIP-NEXT:    add a2, a0, a2
+; ZIP-NEXT:    vs2r.v v16, (a2)
+; ZIP-NEXT:    vs8r.v v8, (a0)
+; ZIP-NEXT:    vl8re32.v v16, (a2)
+; ZIP-NEXT:    vl8re32.v v8, (a0)
+; ZIP-NEXT:    addi sp, s0, -80
+; ZIP-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    addi sp, sp, 80
+; ZIP-NEXT:    ret
+  %res = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e)
+  ret <vscale x 20 x i32> %res
+}
+
+
+define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e) nounwind {
+;
+; RV32-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -80
+; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi s0, sp, 80
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    li a1, 28
+; RV32-NEXT:    mul a0, a0, a1
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT:    vmv2r.v v20, v16
+; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    vmv2r.v v18, v12
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a2, a1, 2
+; RV32-NEXT:    add a1, a2, a1
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 64
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    vmv2r.v v16, v8
+; RV32-NEXT:    vmv2r.v v22, v16
+; RV32-NEXT:    vmv2r.v v24, v18
+; RV32-NEXT:    vmv1r.v v26, v20
+; RV32-NEXT:    add a3, a0, a2
+; RV32-NEXT:    vmv1r.v v23, v10
+; RV32-NEXT:    add a4, a1, a2
+; RV32-NEXT:    add a5, a4, a2
+; RV32-NEXT:    vmv1r.v v25, v14
+; RV32-NEXT:    add a6, a5, a2
+; RV32-NEXT:    vmv1r.v v18, v11
+; RV32-NEXT:    vsseg5e64.v v22, (a0)
+; RV32-NEXT:    vmv1r.v v20, v15
+; RV32-NEXT:    vsseg5e64.v v17, (a1)
+; RV32-NEXT:    vl1re64.v v16, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re64.v v17, (a6)
+; RV32-NEXT:    add a6, a3, a2
+; RV32-NEXT:    vl1re64.v v10, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re64.v v11, (a6)
+; RV32-NEXT:    vl1re64.v v8, (a0)
+; RV32-NEXT:    vl1re64.v v9, (a3)
+; RV32-NEXT:    vl1re64.v v14, (a4)
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    li a3, 10
+; RV32-NEXT:    mul a0, a0, a3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 64
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re64.v v15, (a5)
+; RV32-NEXT:    vl1re64.v v12, (a6)
+; RV32-NEXT:    vl1re64.v v13, (a1)
+; RV32-NEXT:    slli a2, a2, 3
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    vs2r.v v16, (a2)
+; RV32-NEXT:    vs8r.v v8, (a0)
+; RV32-NEXT:    vl8re64.v v16, (a2)
+; RV32-NEXT:    vl8re64.v v8, (a0)
+; RV32-NEXT:    addi sp, s0, -80
+; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 80
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -80
+; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi s0, sp, 80
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    li a1, 28
+; RV64-NEXT:    mul a0, a0, a1
+; RV64-NEXT:    sub sp, sp, a0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT:    vmv2r.v v20, v16
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    vmv2r.v v18, v12
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a2, a1, 2
+; RV64-NEXT:    add a1, a2, a1
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    addi a1, a1, 64
+; RV64-NEXT:    csrr a2, vlenb
+; RV64-NEXT:    vmv2r.v v16, v8
+; RV64-NEXT:    vmv2r.v v22, v16
+; RV64-NEXT:    vmv2r.v v24, v18
+; RV64-NEXT:    vmv1r.v v26, v20
+; RV64-NEXT:    add a3, a0, a2
+; RV64-NEXT:    vmv1r.v v23, v10
+; RV64-NEXT:    add a4, a1, a2
+; RV64-NEXT:    add a5, a4, a2
+; RV64-NEXT:    vmv1r.v v25, v14
+; RV64-NEXT:    add a6, a5, a2
+; RV64-NEXT:    vmv1r.v v18, v11
+; RV64-NEXT:    vsseg5e64.v v22, (a0)
+; RV64-NEXT:    vmv1r.v v20, v15
+; RV64-NEXT:    vsseg5e64.v v17, (a1)
+; RV64-NEXT:    vl1re64.v v16, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re64.v v17, (a6)
+; RV64-NEXT:    add a6, a3, a2
+; RV64-NEXT:    vl1re64.v v10, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re64.v v11, (a6)
+; RV64-NEXT:    vl1re64.v v8, (a0)
+; RV64-NEXT:    vl1re64.v v9, (a3)
+; RV64-NEXT:    vl1re64.v v14, (a4)
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    li a3, 10
+; RV64-NEXT:    mul a0, a0, a3
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    addi a0, a0, 64
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re64.v v15, (a5)
+; RV64-NEXT:    vl1re64.v v12, (a6)
+; RV64-NEXT:    vl1re64.v v13, (a1)
+; RV64-NEXT:    slli a2, a2, 3
+; RV64-NEXT:    add a2, a0, a2
+; RV64-NEXT:    vs2r.v v16, (a2)
+; RV64-NEXT:    vs8r.v v8, (a0)
+; RV64-NEXT:    vl8re64.v v16, (a2)
+; RV64-NEXT:    vl8re64.v v8, (a0)
+; RV64-NEXT:    addi sp, s0, -80
+; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZVBB-RV32:       # %bb.0:
+; ZVBB-RV32-NEXT:    addi sp, sp, -80
+; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    addi s0, sp, 80
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    li a1, 28
+; ZVBB-RV32-NEXT:    mul a0, a0, a1
+; ZVBB-RV32-NEXT:    sub sp, sp, a0
+; ZVBB-RV32-NEXT:    andi sp, sp, -64
+; ZVBB-RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; ZVBB-RV32-NEXT:    vmv2r.v v20, v16
+; ZVBB-RV32-NEXT:    addi a0, sp, 64
+; ZVBB-RV32-NEXT:    vmv2r.v v18, v12
+; ZVBB-RV32-NEXT:    csrr a1, vlenb
+; ZVBB-RV32-NEXT:    slli a2, a1, 2
+; ZVBB-RV32-NEXT:    add a1, a2, a1
+; ZVBB-RV32-NEXT:    add a1, sp, a1
+; ZVBB-RV32-NEXT:    addi a1, a1, 64
+; ZVBB-RV32-NEXT:    csrr a2, vlenb
+; ZVBB-RV32-NEXT:    vmv2r.v v16, v8
+; ZVBB-RV32-NEXT:    vmv2r.v v22, v16
+; ZVBB-RV32-NEXT:    vmv2r.v v24, v18
+; ZVBB-RV32-NEXT:    vmv1r.v v26, v20
+; ZVBB-RV32-NEXT:    add a3, a0, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v23, v10
+; ZVBB-RV32-NEXT:    add a4, a1, a2
+; ZVBB-RV32-NEXT:    add a5, a4, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v25, v14
+; ZVBB-RV32-NEXT:    add a6, a5, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v18, v11
+; ZVBB-RV32-NEXT:    vsseg5e64.v v22, (a0)
+; ZVBB-RV32-NEXT:    vmv1r.v v20, v15
+; ZVBB-RV32-NEXT:    vsseg5e64.v v17, (a1)
+; ZVBB-RV32-NEXT:    vl1re64.v v16, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re64.v v17, (a6)
+; ZVBB-RV32-NEXT:    add a6, a3, a2
+; ZVBB-RV32-NEXT:    vl1re64.v v10, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re64.v v11, (a6)
+; ZVBB-RV32-NEXT:    vl1re64.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl1re64.v v9, (a3)
+; ZVBB-RV32-NEXT:    vl1re64.v v14, (a4)
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    li a3, 10
+; ZVBB-RV32-NEXT:    mul a0, a0, a3
+; ZVBB-RV32-NEXT:    add a0, sp, a0
+; ZVBB-RV32-NEXT:    addi a0, a0, 64
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re64.v v15, (a5)
+; ZVBB-RV32-NEXT:    vl1re64.v v12, (a6)
+; ZVBB-RV32-NEXT:    vl1re64.v v13, (a1)
+; ZVBB-RV32-NEXT:    slli a2, a2, 3
+; ZVBB-RV32-NEXT:    add a2, a0, a2
+; ZVBB-RV32-NEXT:    vs2r.v v16, (a2)
+; ZVBB-RV32-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl8re64.v v16, (a2)
+; ZVBB-RV32-NEXT:    vl8re64.v v8, (a0)
+; ZVBB-RV32-NEXT:    addi sp, s0, -80
+; ZVBB-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    addi sp, sp, 80
+; ZVBB-RV32-NEXT:    ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZVBB-RV64:       # %bb.0:
+; ZVBB-RV64-NEXT:    addi sp, sp, -80
+; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    addi s0, sp, 80
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    li a1, 28
+; ZVBB-RV64-NEXT:    mul a0, a0, a1
+; ZVBB-RV64-NEXT:    sub sp, sp, a0
+; ZVBB-RV64-NEXT:    andi sp, sp, -64
+; ZVBB-RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; ZVBB-RV64-NEXT:    vmv2r.v v20, v16
+; ZVBB-RV64-NEXT:    addi a0, sp, 64
+; ZVBB-RV64-NEXT:    vmv2r.v v18, v12
+; ZVBB-RV64-NEXT:    csrr a1, vlenb
+; ZVBB-RV64-NEXT:    slli a2, a1, 2
+; ZVBB-RV64-NEXT:    add a1, a2, a1
+; ZVBB-RV64-NEXT:    add a1, sp, a1
+; ZVBB-RV64-NEXT:    addi a1, a1, 64
+; ZVBB-RV64-NEXT:    csrr a2, vlenb
+; ZVBB-RV64-NEXT:    vmv2r.v v16, v8
+; ZVBB-RV64-NEXT:    vmv2r.v v22, v16
+; ZVBB-RV64-NEXT:    vmv2r.v v24, v18
+; ZVBB-RV64-NEXT:    vmv1r.v v26, v20
+; ZVBB-RV64-NEXT:    add a3, a0, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v23, v10
+; ZVBB-RV64-NEXT:    add a4, a1, a2
+; ZVBB-RV64-NEXT:    add a5, a4, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v25, v14
+; ZVBB-RV64-NEXT:    add a6, a5, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v18, v11
+; ZVBB-RV64-NEXT:    vsseg5e64.v v22, (a0)
+; ZVBB-RV64-NEXT:    vmv1r.v v20, v15
+; ZVBB-RV64-NEXT:    vsseg5e64.v v17, (a1)
+; ZVBB-RV64-NEXT:    vl1re64.v v16, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re64.v v17, (a6)
+; ZVBB-RV64-NEXT:    add a6, a3, a2
+; ZVBB-RV64-NEXT:    vl1re64.v v10, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re64.v v11, (a6)
+; ZVBB-RV64-NEXT:    vl1re64.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl1re64.v v9, (a3)
+; ZVBB-RV64-NEXT:    vl1re64.v v14, (a4)
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    li a3, 10
+; ZVBB-RV64-NEXT:    mul a0, a0, a3
+; ZVBB-RV64-NEXT:    add a0, sp, a0
+; ZVBB-RV64-NEXT:    addi a0, a0, 64
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re64.v v15, (a5)
+; ZVBB-RV64-NEXT:    vl1re64.v v12, (a6)
+; ZVBB-RV64-NEXT:    vl1re64.v v13, (a1)
+; ZVBB-RV64-NEXT:    slli a2, a2, 3
+; ZVBB-RV64-NEXT:    add a2, a0, a2
+; ZVBB-RV64-NEXT:    vs2r.v v16, (a2)
+; ZVBB-RV64-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl8re64.v v16, (a2)
+; ZVBB-RV64-NEXT:    vl8re64.v v8, (a0)
+; ZVBB-RV64-NEXT:    addi sp, s0, -80
+; ZVBB-RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    addi sp, sp, 80
+; ZVBB-RV64-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    addi sp, sp, -80
+; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    addi s0, sp, 80
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    li a1, 28
+; ZIP-NEXT:    mul a0, a0, a1
+; ZIP-NEXT:    sub sp, sp, a0
+; ZIP-NEXT:    andi sp, sp, -64
+; ZIP-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; ZIP-NEXT:    vmv2r.v v20, v16
+; ZIP-NEXT:    addi a0, sp, 64
+; ZIP-NEXT:    vmv2r.v v18, v12
+; ZIP-NEXT:    csrr a1, vlenb
+; ZIP-NEXT:    slli a2, a1, 2
+; ZIP-NEXT:    add a1, a2, a1
+; ZIP-NEXT:    add a1, sp, a1
+; ZIP-NEXT:    addi a1, a1, 64
+; ZIP-NEXT:    csrr a2, vlenb
+; ZIP-NEXT:    vmv2r.v v16, v8
+; ZIP-NEXT:    vmv2r.v v22, v16
+; ZIP-NEXT:    vmv2r.v v24, v18
+; ZIP-NEXT:    vmv1r.v v26, v20
+; ZIP-NEXT:    add a3, a0, a2
+; ZIP-NEXT:    vmv1r.v v23, v10
+; ZIP-NEXT:    add a4, a1, a2
+; ZIP-NEXT:    add a5, a4, a2
+; ZIP-NEXT:    vmv1r.v v25, v14
+; ZIP-NEXT:    add a6, a5, a2
+; ZIP-NEXT:    vmv1r.v v18, v11
+; ZIP-NEXT:    vsseg5e64.v v22, (a0)
+; ZIP-NEXT:    vmv1r.v v20, v15
+; ZIP-NEXT:    vsseg5e64.v v17, (a1)
+; ZIP-NEXT:    vl1re64.v v16, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re64.v v17, (a6)
+; ZIP-NEXT:    add a6, a3, a2
+; ZIP-NEXT:    vl1re64.v v10, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re64.v v11, (a6)
+; ZIP-NEXT:    vl1re64.v v8, (a0)
+; ZIP-NEXT:    vl1re64.v v9, (a3)
+; ZIP-NEXT:    vl1re64.v v14, (a4)
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    li a3, 10
+; ZIP-NEXT:    mul a0, a0, a3
+; ZIP-NEXT:    add a0, sp, a0
+; ZIP-NEXT:    addi a0, a0, 64
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re64.v v15, (a5)
+; ZIP-NEXT:    vl1re64.v v12, (a6)
+; ZIP-NEXT:    vl1re64.v v13, (a1)
+; ZIP-NEXT:    slli a2, a2, 3
+; ZIP-NEXT:    add a2, a0, a2
+; ZIP-NEXT:    vs2r.v v16, (a2)
+; ZIP-NEXT:    vs8r.v v8, (a0)
+; ZIP-NEXT:    vl8re64.v v16, (a2)
+; ZIP-NEXT:    vl8re64.v v8, (a0)
+; ZIP-NEXT:    addi sp, s0, -80
+; ZIP-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    addi sp, sp, 80
+; ZIP-NEXT:    ret
+  %res = call <vscale x 10 x i64> @llvm.vector.interleave5.nxv10i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e)
+  ret <vscale x 10 x i64> %res
+}
+
+define <vscale x 112 x i1> @vector_interleave_nxv112i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e, <vscale x 16 x i1> %f, <vscale x 16 x i1> %g) nounwind {
+; CHECK-LABEL: vector_interleave_nxv112i1_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 14
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmv.v.i v14, 0
+; CHECK-NEXT:    addi a4, sp, 16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 3
+; CHECK-NEXT:    sub a0, a1, a0
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    vmerge.vim v16, v14, 1, v0
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vmerge.vim v22, v14, 1, v0
+; CHECK-NEXT:    add a3, a4, a2
+; CHECK-NEXT:    srli a1, a2, 2
+; CHECK-NEXT:    add a5, a0, a2
+; CHECK-NEXT:    vmv4r.v v24, v16
+; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vmerge.vim v18, v14, 1, v0
+; CHECK-NEXT:    add a6, a3, a2
+; CHECK-NEXT:    vmv1r.v v25, v22
+; CHECK-NEXT:    vmv1r.v v0, v10
+; CHECK-NEXT:    vmerge.vim v8, v14, 1, v0
+; CHECK-NEXT:    vmv1r.v v26, v18
+; CHECK-NEXT:    vmv1r.v v0, v11
+; CHECK-NEXT:    vmerge.vim v20, v14, 1, v0
+; CHECK-NEXT:    vmv1r.v v27, v8
+; CHECK-NEXT:    vmv1r.v v0, v12
+; CHECK-NEXT:    vmerge.vim v10, v14, 1, v0
+; CHECK-NEXT:    vmv1r.v v28, v20
+; CHECK-NEXT:    vmv1r.v v18, v23
+; CHECK-NEXT:    add a7, a6, a2
+; CHECK-NEXT:    vmv1r.v v29, v10
+; CHECK-NEXT:    vmv1r.v v20, v9
+; CHECK-NEXT:    vmv1r.v v0, v13
+; CHECK-NEXT:    vmerge.vim v30, v14, 1, v0
+; CHECK-NEXT:    vmv1r.v v22, v11
+; CHECK-NEXT:    vsetvli t0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vsseg7e8.v v24, (a4)
+; CHECK-NEXT:    vmv1r.v v23, v31
+; CHECK-NEXT:    vsseg7e8.v v17, (a0)
+; CHECK-NEXT:    vl1r.v v8, (a6)
+; CHECK-NEXT:    add a6, a7, a2
+; CHECK-NEXT:    vl1r.v v10, (a4)
+; CHECK-NEXT:    add a4, a6, a2
+; CHECK-NEXT:    vl1r.v v12, (a6)
+; CHECK-NEXT:    add a6, a4, a2
+; CHECK-NEXT:    vl1r.v v14, (a6)
+; CHECK-NEXT:    add a6, a5, a2
+; CHECK-NEXT:    vl1r.v v16, (a5)
+; CHECK-NEXT:    add a5, a6, a2
+; CHECK-NEXT:    vl1r.v v18, (a5)
+; CHECK-NEXT:    add a5, a5, a2
+; CHECK-NEXT:    vl1r.v v9, (a7)
+; CHECK-NEXT:    add a7, a5, a2
+; CHECK-NEXT:    vl1r.v v20, (a7)
+; CHECK-NEXT:    add a7, a7, a2
+; CHECK-NEXT:    srli a2, a2, 1
+; CHECK-NEXT:    vl1r.v v11, (a3)
+; CHECK-NEXT:    add a3, a1, a1
+; CHECK-NEXT:    vl1r.v v13, (a4)
+; CHECK-NEXT:    add a4, a2, a2
+; CHECK-NEXT:    vl1r.v v15, (a0)
+; CHECK-NEXT:    vl1r.v v19, (a5)
+; CHECK-NEXT:    vl1r.v v17, (a6)
+; CHECK-NEXT:    vl1r.v v21, (a7)
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmsne.vi v22, v8, 0
+; CHECK-NEXT:    vmsne.vi v0, v10, 0
+; CHECK-NEXT:    vmsne.vi v9, v12, 0
+; CHECK-NEXT:    vmsne.vi v10, v14, 0
+; CHECK-NEXT:    vmsne.vi v11, v18, 0
+; CHECK-NEXT:    vmsne.vi v8, v16, 0
+; CHECK-NEXT:    vmsne.vi v12, v20, 0
+; CHECK-NEXT:    vsetvli zero, a3, e8, mf2, ta, ma
+; CHECK-NEXT:    vslideup.vx v0, v22, a1
+; CHECK-NEXT:    vslideup.vx v9, v10, a1
+; CHECK-NEXT:    vslideup.vx v8, v11, a1
+; CHECK-NEXT:    vsetvli zero, a4, e8, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v0, v9, a2
+; CHECK-NEXT:    vslideup.vx v8, v12, a2
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 14
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv112i1_nxv16i1:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    li a1, 14
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVBB-NEXT:    vmv.v.i v14, 0
+; ZVBB-NEXT:    addi a4, sp, 16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 3
+; ZVBB-NEXT:    sub a0, a1, a0
+; ZVBB-NEXT:    add a0, sp, a0
+; ZVBB-NEXT:    addi a0, a0, 16
+; ZVBB-NEXT:    csrr a2, vlenb
+; ZVBB-NEXT:    vmerge.vim v16, v14, 1, v0
+; ZVBB-NEXT:    vmv1r.v v0, v8
+; ZVBB-NEXT:    vmerge.vim v22, v14, 1, v0
+; ZVBB-NEXT:    add a3, a4, a2
+; ZVBB-NEXT:    srli a1, a2, 2
+; ZVBB-NEXT:    add a5, a0, a2
+; ZVBB-NEXT:    vmv4r.v v24, v16
+; ZVBB-NEXT:    vmv1r.v v0, v9
+; ZVBB-NEXT:    vmerge.vim v18, v14, 1, v0
+; ZVBB-NEXT:    add a6, a3, a2
+; ZVBB-NEXT:    vmv1r.v v25, v22
+; ZVBB-NEXT:    vmv1r.v v0, v10
+; ZVBB-NEXT:    vmerge.vim v8, v14, 1, v0
+; ZVBB-NEXT:    vmv1r.v v26, v18
+; ZVBB-NEXT:    vmv1r.v v0, v11
+; ZVBB-NEXT:    vmerge.vim v20, v14, 1, v0
+; ZVBB-NEXT:    vmv1r.v v27, v8
+; ZVBB-NEXT:    vmv1r.v v0, v12
+; ZVBB-NEXT:    vmerge.vim v10, v14, 1, v0
+; ZVBB-NEXT:    vmv1r.v v28, v20
+; ZVBB-NEXT:    vmv1r.v v18, v23
+; ZVBB-NEXT:    add a7, a6, a2
+; ZVBB-NEXT:    vmv1r.v v29, v10
+; ZVBB-NEXT:    vmv1r.v v20, v9
+; ZVBB-NEXT:    vmv1r.v v0, v13
+; ZVBB-NEXT:    vmerge.vim v30, v14, 1, v0
+; ZVBB-NEXT:    vmv1r.v v22, v11
+; ZVBB-NEXT:    vsetvli t0, zero, e8, m1, ta, ma
+; ZVBB-NEXT:    vsseg7e8.v v24, (a4)
+; ZVBB-NEXT:    vmv1r.v v23, v31
+; ZVBB-NEXT:    vsseg7e8.v v17, (a0)
+; ZVBB-NEXT:    vl1r.v v8, (a6)
+; ZVBB-NEXT:    add a6, a7, a2
+; ZVBB-NEXT:    vl1r.v v10, (a4)
+; ZVBB-NEXT:    add a4, a6, a2
+; ZVBB-NEXT:    vl1r.v v12, (a6)
+; ZVBB-NEXT:    add a6, a4, a2
+; ZVBB-NEXT:    vl1r.v v14, (a6)
+; ZVBB-NEXT:    add a6, a5, a2
+; ZVBB-NEXT:    vl1r.v v16, (a5)
+; ZVBB-NEXT:    add a5, a6, a2
+; ZVBB-NEXT:    vl1r.v v18, (a5)
+; ZVBB-NEXT:    add a5, a5, a2
+; ZVBB-NEXT:    vl1r.v v9, (a7)
+; ZVBB-NEXT:    add a7, a5, a2
+; ZVBB-NEXT:    vl1r.v v20, (a7)
+; ZVBB-NEXT:    add a7, a7, a2
+; ZVBB-NEXT:    srli a2, a2, 1
+; ZVBB-NEXT:    vl1r.v v11, (a3)
+; ZVBB-NEXT:    add a3, a1, a1
+; ZVBB-NEXT:    vl1r.v v13, (a4)
+; ZVBB-NEXT:    add a4, a2, a2
+; ZVBB-NEXT:    vl1r.v v15, (a0)
+; ZVBB-NEXT:    vl1r.v v19, (a5)
+; ZVBB-NEXT:    vl1r.v v17, (a6)
+; ZVBB-NEXT:    vl1r.v v21, (a7)
+; ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVBB-NEXT:    vmsne.vi v22, v8, 0
+; ZVBB-NEXT:    vmsne.vi v0, v10, 0
+; ZVBB-NEXT:    vmsne.vi v9, v12, 0
+; ZVBB-NEXT:    vmsne.vi v10, v14, 0
+; ZVBB-NEXT:    vmsne.vi v11, v18, 0
+; ZVBB-NEXT:    vmsne.vi v8, v16, 0
+; ZVBB-NEXT:    vmsne.vi v12, v20, 0
+; ZVBB-NEXT:    vsetvli zero, a3, e8, mf2, ta, ma
+; ZVBB-NEXT:    vslideup.vx v0, v22, a1
+; ZVBB-NEXT:    vslideup.vx v9, v10, a1
+; ZVBB-NEXT:    vslideup.vx v8, v11, a1
+; ZVBB-NEXT:    vsetvli zero, a4, e8, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v0, v9, a2
+; ZVBB-NEXT:    vslideup.vx v8, v12, a2
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    li a1, 14
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 112 x i1> @llvm.vector.interleave7.nxv112i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e, <vscale x 16 x i1> %f, <vscale x 16 x i1> %g)
+  ret <vscale x 112 x i1> %res
+}
+
+
+define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e, <vscale x 16 x i8> %f, <vscale x 16 x i8> %g) nounwind {
+;
+; RV32-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -80
+; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi s0, sp, 80
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 5
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT:    vmv2r.v v26, v20
+; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    vmv2r.v v24, v16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a2, a1, 3
+; RV32-NEXT:    sub a1, a2, a1
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 64
+; RV32-NEXT:    vmv2r.v v22, v12
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    vmv2r.v v20, v8
+; RV32-NEXT:    vmv1r.v v1, v20
+; RV32-NEXT:    vmv1r.v v3, v22
+; RV32-NEXT:    vmv1r.v v5, v24
+; RV32-NEXT:    vmv1r.v v7, v26
+; RV32-NEXT:    add a3, a0, a2
+; RV32-NEXT:    vmv1r.v v2, v10
+; RV32-NEXT:    add a4, a1, a2
+; RV32-NEXT:    slli a5, a2, 2
+; RV32-NEXT:    vmv1r.v v4, v14
+; RV32-NEXT:    slli a6, a2, 4
+; RV32-NEXT:    add a7, a4, a2
+; RV32-NEXT:    vmv1r.v v6, v18
+; RV32-NEXT:    sub a5, a6, a5
+; RV32-NEXT:    vmv1r.v v22, v11
+; RV32-NEXT:    add a6, a7, a2
+; RV32-NEXT:    vmv1r.v v24, v15
+; RV32-NEXT:    vsseg7e8.v v1, (a0)
+; RV32-NEXT:    vmv1r.v v26, v19
+; RV32-NEXT:    vsseg7e8.v v21, (a1)
+; RV32-NEXT:    vl1r.v v18, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1r.v v19, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1r.v v20, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1r.v v21, (a6)
+; RV32-NEXT:    add a6, a3, a2
+; RV32-NEXT:    vl1r.v v10, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1r.v v11, (a6)
+; RV32-NEXT:    vl1r.v v8, (a0)
+; RV32-NEXT:    vl1r.v v16, (a4)
+; RV32-NEXT:    vl1r.v v9, (a3)
+; RV32-NEXT:    vl1r.v v17, (a7)
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    li a3, 14
+; RV32-NEXT:    mul a0, a0, a3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 64
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1r.v v12, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1r.v v13, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    slli a2, a2, 3
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    vl1r.v v14, (a6)
+; RV32-NEXT:    vl1r.v v15, (a1)
+; RV32-NEXT:    add a5, a0, a5
+; RV32-NEXT:    vs2r.v v20, (a5)
+; RV32-NEXT:    vs4r.v v16, (a2)
+; RV32-NEXT:    vs8r.v v8, (a0)
+; RV32-NEXT:    vl8r.v v16, (a2)
+; RV32-NEXT:    vl8r.v v8, (a0)
+; RV32-NEXT:    addi sp, s0, -80
+; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 80
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -80
+; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi s0, sp, 80
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 5
+; RV64-NEXT:    sub sp, sp, a0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT:    vmv2r.v v26, v20
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    vmv2r.v v24, v16
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a2, a1, 3
+; RV64-NEXT:    sub a1, a2, a1
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    addi a1, a1, 64
+; RV64-NEXT:    vmv2r.v v22, v12
+; RV64-NEXT:    csrr a2, vlenb
+; RV64-NEXT:    vmv2r.v v20, v8
+; RV64-NEXT:    vmv1r.v v1, v20
+; RV64-NEXT:    vmv1r.v v3, v22
+; RV64-NEXT:    vmv1r.v v5, v24
+; RV64-NEXT:    vmv1r.v v7, v26
+; RV64-NEXT:    add a3, a0, a2
+; RV64-NEXT:    vmv1r.v v2, v10
+; RV64-NEXT:    add a4, a1, a2
+; RV64-NEXT:    slli a5, a2, 2
+; RV64-NEXT:    vmv1r.v v4, v14
+; RV64-NEXT:    slli a6, a2, 4
+; RV64-NEXT:    add a7, a4, a2
+; RV64-NEXT:    vmv1r.v v6, v18
+; RV64-NEXT:    sub a5, a6, a5
+; RV64-NEXT:    vmv1r.v v22, v11
+; RV64-NEXT:    add a6, a7, a2
+; RV64-NEXT:    vmv1r.v v24, v15
+; RV64-NEXT:    vsseg7e8.v v1, (a0)
+; RV64-NEXT:    vmv1r.v v26, v19
+; RV64-NEXT:    vsseg7e8.v v21, (a1)
+; RV64-NEXT:    vl1r.v v18, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1r.v v19, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1r.v v20, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1r.v v21, (a6)
+; RV64-NEXT:    add a6, a3, a2
+; RV64-NEXT:    vl1r.v v10, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1r.v v11, (a6)
+; RV64-NEXT:    vl1r.v v8, (a0)
+; RV64-NEXT:    vl1r.v v16, (a4)
+; RV64-NEXT:    vl1r.v v9, (a3)
+; RV64-NEXT:    vl1r.v v17, (a7)
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    li a3, 14
+; RV64-NEXT:    mul a0, a0, a3
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    addi a0, a0, 64
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1r.v v12, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1r.v v13, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    slli a2, a2, 3
+; RV64-NEXT:    add a2, a0, a2
+; RV64-NEXT:    vl1r.v v14, (a6)
+; RV64-NEXT:    vl1r.v v15, (a1)
+; RV64-NEXT:    add a5, a0, a5
+; RV64-NEXT:    vs2r.v v20, (a5)
+; RV64-NEXT:    vs4r.v v16, (a2)
+; RV64-NEXT:    vs8r.v v8, (a0)
+; RV64-NEXT:    vl8r.v v16, (a2)
+; RV64-NEXT:    vl8r.v v8, (a0)
+; RV64-NEXT:    addi sp, s0, -80
+; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; ZVBB-RV32:       # %bb.0:
+; ZVBB-RV32-NEXT:    addi sp, sp, -80
+; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    addi s0, sp, 80
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    slli a0, a0, 5
+; ZVBB-RV32-NEXT:    sub sp, sp, a0
+; ZVBB-RV32-NEXT:    andi sp, sp, -64
+; ZVBB-RV32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV32-NEXT:    vmv2r.v v26, v20
+; ZVBB-RV32-NEXT:    addi a0, sp, 64
+; ZVBB-RV32-NEXT:    vmv2r.v v24, v16
+; ZVBB-RV32-NEXT:    csrr a1, vlenb
+; ZVBB-RV32-NEXT:    slli a2, a1, 3
+; ZVBB-RV32-NEXT:    sub a1, a2, a1
+; ZVBB-RV32-NEXT:    add a1, sp, a1
+; ZVBB-RV32-NEXT:    addi a1, a1, 64
+; ZVBB-RV32-NEXT:    vmv2r.v v22, v12
+; ZVBB-RV32-NEXT:    csrr a2, vlenb
+; ZVBB-RV32-NEXT:    vmv2r.v v20, v8
+; ZVBB-RV32-NEXT:    vmv1r.v v1, v20
+; ZVBB-RV32-NEXT:    vmv1r.v v3, v22
+; ZVBB-RV32-NEXT:    vmv1r.v v5, v24
+; ZVBB-RV32-NEXT:    vmv1r.v v7, v26
+; ZVBB-RV32-NEXT:    add a3, a0, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v2, v10
+; ZVBB-RV32-NEXT:    add a4, a1, a2
+; ZVBB-RV32-NEXT:    slli a5, a2, 2
+; ZVBB-RV32-NEXT:    vmv1r.v v4, v14
+; ZVBB-RV32-NEXT:    slli a6, a2, 4
+; ZVBB-RV32-NEXT:    add a7, a4, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v6, v18
+; ZVBB-RV32-NEXT:    sub a5, a6, a5
+; ZVBB-RV32-NEXT:    vmv1r.v v22, v11
+; ZVBB-RV32-NEXT:    add a6, a7, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v24, v15
+; ZVBB-RV32-NEXT:    vsseg7e8.v v1, (a0)
+; ZVBB-RV32-NEXT:    vmv1r.v v26, v19
+; ZVBB-RV32-NEXT:    vsseg7e8.v v21, (a1)
+; ZVBB-RV32-NEXT:    vl1r.v v18, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1r.v v19, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1r.v v20, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1r.v v21, (a6)
+; ZVBB-RV32-NEXT:    add a6, a3, a2
+; ZVBB-RV32-NEXT:    vl1r.v v10, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1r.v v11, (a6)
+; ZVBB-RV32-NEXT:    vl1r.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl1r.v v16, (a4)
+; ZVBB-RV32-NEXT:    vl1r.v v9, (a3)
+; ZVBB-RV32-NEXT:    vl1r.v v17, (a7)
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    li a3, 14
+; ZVBB-RV32-NEXT:    mul a0, a0, a3
+; ZVBB-RV32-NEXT:    add a0, sp, a0
+; ZVBB-RV32-NEXT:    addi a0, a0, 64
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1r.v v12, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1r.v v13, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    slli a2, a2, 3
+; ZVBB-RV32-NEXT:    add a2, a0, a2
+; ZVBB-RV32-NEXT:    vl1r.v v14, (a6)
+; ZVBB-RV32-NEXT:    vl1r.v v15, (a1)
+; ZVBB-RV32-NEXT:    add a5, a0, a5
+; ZVBB-RV32-NEXT:    vs2r.v v20, (a5)
+; ZVBB-RV32-NEXT:    vs4r.v v16, (a2)
+; ZVBB-RV32-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl8r.v v16, (a2)
+; ZVBB-RV32-NEXT:    vl8r.v v8, (a0)
+; ZVBB-RV32-NEXT:    addi sp, s0, -80
+; ZVBB-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    addi sp, sp, 80
+; ZVBB-RV32-NEXT:    ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; ZVBB-RV64:       # %bb.0:
+; ZVBB-RV64-NEXT:    addi sp, sp, -80
+; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    addi s0, sp, 80
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    slli a0, a0, 5
+; ZVBB-RV64-NEXT:    sub sp, sp, a0
+; ZVBB-RV64-NEXT:    andi sp, sp, -64
+; ZVBB-RV64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV64-NEXT:    vmv2r.v v26, v20
+; ZVBB-RV64-NEXT:    addi a0, sp, 64
+; ZVBB-RV64-NEXT:    vmv2r.v v24, v16
+; ZVBB-RV64-NEXT:    csrr a1, vlenb
+; ZVBB-RV64-NEXT:    slli a2, a1, 3
+; ZVBB-RV64-NEXT:    sub a1, a2, a1
+; ZVBB-RV64-NEXT:    add a1, sp, a1
+; ZVBB-RV64-NEXT:    addi a1, a1, 64
+; ZVBB-RV64-NEXT:    vmv2r.v v22, v12
+; ZVBB-RV64-NEXT:    csrr a2, vlenb
+; ZVBB-RV64-NEXT:    vmv2r.v v20, v8
+; ZVBB-RV64-NEXT:    vmv1r.v v1, v20
+; ZVBB-RV64-NEXT:    vmv1r.v v3, v22
+; ZVBB-RV64-NEXT:    vmv1r.v v5, v24
+; ZVBB-RV64-NEXT:    vmv1r.v v7, v26
+; ZVBB-RV64-NEXT:    add a3, a0, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v2, v10
+; ZVBB-RV64-NEXT:    add a4, a1, a2
+; ZVBB-RV64-NEXT:    slli a5, a2, 2
+; ZVBB-RV64-NEXT:    vmv1r.v v4, v14
+; ZVBB-RV64-NEXT:    slli a6, a2, 4
+; ZVBB-RV64-NEXT:    add a7, a4, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v6, v18
+; ZVBB-RV64-NEXT:    sub a5, a6, a5
+; ZVBB-RV64-NEXT:    vmv1r.v v22, v11
+; ZVBB-RV64-NEXT:    add a6, a7, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v24, v15
+; ZVBB-RV64-NEXT:    vsseg7e8.v v1, (a0)
+; ZVBB-RV64-NEXT:    vmv1r.v v26, v19
+; ZVBB-RV64-NEXT:    vsseg7e8.v v21, (a1)
+; ZVBB-RV64-NEXT:    vl1r.v v18, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1r.v v19, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1r.v v20, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1r.v v21, (a6)
+; ZVBB-RV64-NEXT:    add a6, a3, a2
+; ZVBB-RV64-NEXT:    vl1r.v v10, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1r.v v11, (a6)
+; ZVBB-RV64-NEXT:    vl1r.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl1r.v v16, (a4)
+; ZVBB-RV64-NEXT:    vl1r.v v9, (a3)
+; ZVBB-RV64-NEXT:    vl1r.v v17, (a7)
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    li a3, 14
+; ZVBB-RV64-NEXT:    mul a0, a0, a3
+; ZVBB-RV64-NEXT:    add a0, sp, a0
+; ZVBB-RV64-NEXT:    addi a0, a0, 64
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1r.v v12, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1r.v v13, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    slli a2, a2, 3
+; ZVBB-RV64-NEXT:    add a2, a0, a2
+; ZVBB-RV64-NEXT:    vl1r.v v14, (a6)
+; ZVBB-RV64-NEXT:    vl1r.v v15, (a1)
+; ZVBB-RV64-NEXT:    add a5, a0, a5
+; ZVBB-RV64-NEXT:    vs2r.v v20, (a5)
+; ZVBB-RV64-NEXT:    vs4r.v v16, (a2)
+; ZVBB-RV64-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl8r.v v16, (a2)
+; ZVBB-RV64-NEXT:    vl8r.v v8, (a0)
+; ZVBB-RV64-NEXT:    addi sp, s0, -80
+; ZVBB-RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    addi sp, sp, 80
+; ZVBB-RV64-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    addi sp, sp, -80
+; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    addi s0, sp, 80
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    slli a0, a0, 5
+; ZIP-NEXT:    sub sp, sp, a0
+; ZIP-NEXT:    andi sp, sp, -64
+; ZIP-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZIP-NEXT:    vmv2r.v v26, v20
+; ZIP-NEXT:    addi a0, sp, 64
+; ZIP-NEXT:    vmv2r.v v24, v16
+; ZIP-NEXT:    csrr a1, vlenb
+; ZIP-NEXT:    slli a2, a1, 3
+; ZIP-NEXT:    sub a1, a2, a1
+; ZIP-NEXT:    add a1, sp, a1
+; ZIP-NEXT:    addi a1, a1, 64
+; ZIP-NEXT:    vmv2r.v v22, v12
+; ZIP-NEXT:    csrr a2, vlenb
+; ZIP-NEXT:    vmv2r.v v20, v8
+; ZIP-NEXT:    vmv1r.v v1, v20
+; ZIP-NEXT:    vmv1r.v v3, v22
+; ZIP-NEXT:    vmv1r.v v5, v24
+; ZIP-NEXT:    vmv1r.v v7, v26
+; ZIP-NEXT:    add a3, a0, a2
+; ZIP-NEXT:    vmv1r.v v2, v10
+; ZIP-NEXT:    add a4, a1, a2
+; ZIP-NEXT:    slli a5, a2, 2
+; ZIP-NEXT:    vmv1r.v v4, v14
+; ZIP-NEXT:    slli a6, a2, 4
+; ZIP-NEXT:    add a7, a4, a2
+; ZIP-NEXT:    vmv1r.v v6, v18
+; ZIP-NEXT:    sub a5, a6, a5
+; ZIP-NEXT:    vmv1r.v v22, v11
+; ZIP-NEXT:    add a6, a7, a2
+; ZIP-NEXT:    vmv1r.v v24, v15
+; ZIP-NEXT:    vsseg7e8.v v1, (a0)
+; ZIP-NEXT:    vmv1r.v v26, v19
+; ZIP-NEXT:    vsseg7e8.v v21, (a1)
+; ZIP-NEXT:    vl1r.v v18, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1r.v v19, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1r.v v20, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1r.v v21, (a6)
+; ZIP-NEXT:    add a6, a3, a2
+; ZIP-NEXT:    vl1r.v v10, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1r.v v11, (a6)
+; ZIP-NEXT:    vl1r.v v8, (a0)
+; ZIP-NEXT:    vl1r.v v16, (a4)
+; ZIP-NEXT:    vl1r.v v9, (a3)
+; ZIP-NEXT:    vl1r.v v17, (a7)
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    li a3, 14
+; ZIP-NEXT:    mul a0, a0, a3
+; ZIP-NEXT:    add a0, sp, a0
+; ZIP-NEXT:    addi a0, a0, 64
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1r.v v12, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1r.v v13, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    slli a2, a2, 3
+; ZIP-NEXT:    add a2, a0, a2
+; ZIP-NEXT:    vl1r.v v14, (a6)
+; ZIP-NEXT:    vl1r.v v15, (a1)
+; ZIP-NEXT:    add a5, a0, a5
+; ZIP-NEXT:    vs2r.v v20, (a5)
+; ZIP-NEXT:    vs4r.v v16, (a2)
+; ZIP-NEXT:    vs8r.v v8, (a0)
+; ZIP-NEXT:    vl8r.v v16, (a2)
+; ZIP-NEXT:    vl8r.v v8, (a0)
+; ZIP-NEXT:    addi sp, s0, -80
+; ZIP-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    addi sp, sp, 80
+; ZIP-NEXT:    ret
+  %res = call <vscale x 112 x i8> @llvm.vector.interleave7.nxv112i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e, <vscale x 16 x i8> %f, <vscale x 16 x i8> %g)
+  ret <vscale x 112 x i8> %res
+}
+
+
+define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i16> %d, <vscale x 8 x i16> %e, <vscale x 8 x i16> %f, <vscale x 8 x i16> %g) nounwind {
+;
+; RV32-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -80
+; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi s0, sp, 80
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 5
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT:    vmv2r.v v26, v20
+; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    vmv2r.v v24, v16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a2, a1, 3
+; RV32-NEXT:    sub a1, a2, a1
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 64
+; RV32-NEXT:    vmv2r.v v22, v12
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    vmv2r.v v20, v8
+; RV32-NEXT:    vmv1r.v v1, v20
+; RV32-NEXT:    vmv1r.v v3, v22
+; RV32-NEXT:    vmv1r.v v5, v24
+; RV32-NEXT:    vmv1r.v v7, v26
+; RV32-NEXT:    add a3, a0, a2
+; RV32-NEXT:    vmv1r.v v2, v10
+; RV32-NEXT:    add a4, a1, a2
+; RV32-NEXT:    slli a5, a2, 2
+; RV32-NEXT:    vmv1r.v v4, v14
+; RV32-NEXT:    slli a6, a2, 4
+; RV32-NEXT:    add a7, a4, a2
+; RV32-NEXT:    vmv1r.v v6, v18
+; RV32-NEXT:    sub a5, a6, a5
+; RV32-NEXT:    vmv1r.v v22, v11
+; RV32-NEXT:    add a6, a7, a2
+; RV32-NEXT:    vmv1r.v v24, v15
+; RV32-NEXT:    vsseg7e16.v v1, (a0)
+; RV32-NEXT:    vmv1r.v v26, v19
+; RV32-NEXT:    vsseg7e16.v v21, (a1)
+; RV32-NEXT:    vl1re16.v v18, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re16.v v19, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re16.v v20, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re16.v v21, (a6)
+; RV32-NEXT:    add a6, a3, a2
+; RV32-NEXT:    vl1re16.v v10, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re16.v v11, (a6)
+; RV32-NEXT:    vl1re16.v v8, (a0)
+; RV32-NEXT:    vl1re16.v v16, (a4)
+; RV32-NEXT:    vl1re16.v v9, (a3)
+; RV32-NEXT:    vl1re16.v v17, (a7)
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    li a3, 14
+; RV32-NEXT:    mul a0, a0, a3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 64
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re16.v v12, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re16.v v13, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    slli a2, a2, 3
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    vl1re16.v v14, (a6)
+; RV32-NEXT:    vl1re16.v v15, (a1)
+; RV32-NEXT:    add a5, a0, a5
+; RV32-NEXT:    vs2r.v v20, (a5)
+; RV32-NEXT:    vs4r.v v16, (a2)
+; RV32-NEXT:    vs8r.v v8, (a0)
+; RV32-NEXT:    vl8re16.v v16, (a2)
+; RV32-NEXT:    vl8re16.v v8, (a0)
+; RV32-NEXT:    addi sp, s0, -80
+; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 80
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -80
+; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi s0, sp, 80
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 5
+; RV64-NEXT:    sub sp, sp, a0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT:    vmv2r.v v26, v20
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    vmv2r.v v24, v16
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a2, a1, 3
+; RV64-NEXT:    sub a1, a2, a1
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    addi a1, a1, 64
+; RV64-NEXT:    vmv2r.v v22, v12
+; RV64-NEXT:    csrr a2, vlenb
+; RV64-NEXT:    vmv2r.v v20, v8
+; RV64-NEXT:    vmv1r.v v1, v20
+; RV64-NEXT:    vmv1r.v v3, v22
+; RV64-NEXT:    vmv1r.v v5, v24
+; RV64-NEXT:    vmv1r.v v7, v26
+; RV64-NEXT:    add a3, a0, a2
+; RV64-NEXT:    vmv1r.v v2, v10
+; RV64-NEXT:    add a4, a1, a2
+; RV64-NEXT:    slli a5, a2, 2
+; RV64-NEXT:    vmv1r.v v4, v14
+; RV64-NEXT:    slli a6, a2, 4
+; RV64-NEXT:    add a7, a4, a2
+; RV64-NEXT:    vmv1r.v v6, v18
+; RV64-NEXT:    sub a5, a6, a5
+; RV64-NEXT:    vmv1r.v v22, v11
+; RV64-NEXT:    add a6, a7, a2
+; RV64-NEXT:    vmv1r.v v24, v15
+; RV64-NEXT:    vsseg7e16.v v1, (a0)
+; RV64-NEXT:    vmv1r.v v26, v19
+; RV64-NEXT:    vsseg7e16.v v21, (a1)
+; RV64-NEXT:    vl1re16.v v18, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re16.v v19, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re16.v v20, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re16.v v21, (a6)
+; RV64-NEXT:    add a6, a3, a2
+; RV64-NEXT:    vl1re16.v v10, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re16.v v11, (a6)
+; RV64-NEXT:    vl1re16.v v8, (a0)
+; RV64-NEXT:    vl1re16.v v16, (a4)
+; RV64-NEXT:    vl1re16.v v9, (a3)
+; RV64-NEXT:    vl1re16.v v17, (a7)
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    li a3, 14
+; RV64-NEXT:    mul a0, a0, a3
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    addi a0, a0, 64
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re16.v v12, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re16.v v13, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    slli a2, a2, 3
+; RV64-NEXT:    add a2, a0, a2
+; RV64-NEXT:    vl1re16.v v14, (a6)
+; RV64-NEXT:    vl1re16.v v15, (a1)
+; RV64-NEXT:    add a5, a0, a5
+; RV64-NEXT:    vs2r.v v20, (a5)
+; RV64-NEXT:    vs4r.v v16, (a2)
+; RV64-NEXT:    vs8r.v v8, (a0)
+; RV64-NEXT:    vl8re16.v v16, (a2)
+; RV64-NEXT:    vl8re16.v v8, (a0)
+; RV64-NEXT:    addi sp, s0, -80
+; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZVBB-RV32:       # %bb.0:
+; ZVBB-RV32-NEXT:    addi sp, sp, -80
+; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    addi s0, sp, 80
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    slli a0, a0, 5
+; ZVBB-RV32-NEXT:    sub sp, sp, a0
+; ZVBB-RV32-NEXT:    andi sp, sp, -64
+; ZVBB-RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-RV32-NEXT:    vmv2r.v v26, v20
+; ZVBB-RV32-NEXT:    addi a0, sp, 64
+; ZVBB-RV32-NEXT:    vmv2r.v v24, v16
+; ZVBB-RV32-NEXT:    csrr a1, vlenb
+; ZVBB-RV32-NEXT:    slli a2, a1, 3
+; ZVBB-RV32-NEXT:    sub a1, a2, a1
+; ZVBB-RV32-NEXT:    add a1, sp, a1
+; ZVBB-RV32-NEXT:    addi a1, a1, 64
+; ZVBB-RV32-NEXT:    vmv2r.v v22, v12
+; ZVBB-RV32-NEXT:    csrr a2, vlenb
+; ZVBB-RV32-NEXT:    vmv2r.v v20, v8
+; ZVBB-RV32-NEXT:    vmv1r.v v1, v20
+; ZVBB-RV32-NEXT:    vmv1r.v v3, v22
+; ZVBB-RV32-NEXT:    vmv1r.v v5, v24
+; ZVBB-RV32-NEXT:    vmv1r.v v7, v26
+; ZVBB-RV32-NEXT:    add a3, a0, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v2, v10
+; ZVBB-RV32-NEXT:    add a4, a1, a2
+; ZVBB-RV32-NEXT:    slli a5, a2, 2
+; ZVBB-RV32-NEXT:    vmv1r.v v4, v14
+; ZVBB-RV32-NEXT:    slli a6, a2, 4
+; ZVBB-RV32-NEXT:    add a7, a4, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v6, v18
+; ZVBB-RV32-NEXT:    sub a5, a6, a5
+; ZVBB-RV32-NEXT:    vmv1r.v v22, v11
+; ZVBB-RV32-NEXT:    add a6, a7, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v24, v15
+; ZVBB-RV32-NEXT:    vsseg7e16.v v1, (a0)
+; ZVBB-RV32-NEXT:    vmv1r.v v26, v19
+; ZVBB-RV32-NEXT:    vsseg7e16.v v21, (a1)
+; ZVBB-RV32-NEXT:    vl1re16.v v18, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re16.v v19, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re16.v v20, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re16.v v21, (a6)
+; ZVBB-RV32-NEXT:    add a6, a3, a2
+; ZVBB-RV32-NEXT:    vl1re16.v v10, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re16.v v11, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl1re16.v v16, (a4)
+; ZVBB-RV32-NEXT:    vl1re16.v v9, (a3)
+; ZVBB-RV32-NEXT:    vl1re16.v v17, (a7)
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    li a3, 14
+; ZVBB-RV32-NEXT:    mul a0, a0, a3
+; ZVBB-RV32-NEXT:    add a0, sp, a0
+; ZVBB-RV32-NEXT:    addi a0, a0, 64
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re16.v v12, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re16.v v13, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    slli a2, a2, 3
+; ZVBB-RV32-NEXT:    add a2, a0, a2
+; ZVBB-RV32-NEXT:    vl1re16.v v14, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v15, (a1)
+; ZVBB-RV32-NEXT:    add a5, a0, a5
+; ZVBB-RV32-NEXT:    vs2r.v v20, (a5)
+; ZVBB-RV32-NEXT:    vs4r.v v16, (a2)
+; ZVBB-RV32-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl8re16.v v16, (a2)
+; ZVBB-RV32-NEXT:    vl8re16.v v8, (a0)
+; ZVBB-RV32-NEXT:    addi sp, s0, -80
+; ZVBB-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    addi sp, sp, 80
+; ZVBB-RV32-NEXT:    ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZVBB-RV64:       # %bb.0:
+; ZVBB-RV64-NEXT:    addi sp, sp, -80
+; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    addi s0, sp, 80
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    slli a0, a0, 5
+; ZVBB-RV64-NEXT:    sub sp, sp, a0
+; ZVBB-RV64-NEXT:    andi sp, sp, -64
+; ZVBB-RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-RV64-NEXT:    vmv2r.v v26, v20
+; ZVBB-RV64-NEXT:    addi a0, sp, 64
+; ZVBB-RV64-NEXT:    vmv2r.v v24, v16
+; ZVBB-RV64-NEXT:    csrr a1, vlenb
+; ZVBB-RV64-NEXT:    slli a2, a1, 3
+; ZVBB-RV64-NEXT:    sub a1, a2, a1
+; ZVBB-RV64-NEXT:    add a1, sp, a1
+; ZVBB-RV64-NEXT:    addi a1, a1, 64
+; ZVBB-RV64-NEXT:    vmv2r.v v22, v12
+; ZVBB-RV64-NEXT:    csrr a2, vlenb
+; ZVBB-RV64-NEXT:    vmv2r.v v20, v8
+; ZVBB-RV64-NEXT:    vmv1r.v v1, v20
+; ZVBB-RV64-NEXT:    vmv1r.v v3, v22
+; ZVBB-RV64-NEXT:    vmv1r.v v5, v24
+; ZVBB-RV64-NEXT:    vmv1r.v v7, v26
+; ZVBB-RV64-NEXT:    add a3, a0, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v2, v10
+; ZVBB-RV64-NEXT:    add a4, a1, a2
+; ZVBB-RV64-NEXT:    slli a5, a2, 2
+; ZVBB-RV64-NEXT:    vmv1r.v v4, v14
+; ZVBB-RV64-NEXT:    slli a6, a2, 4
+; ZVBB-RV64-NEXT:    add a7, a4, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v6, v18
+; ZVBB-RV64-NEXT:    sub a5, a6, a5
+; ZVBB-RV64-NEXT:    vmv1r.v v22, v11
+; ZVBB-RV64-NEXT:    add a6, a7, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v24, v15
+; ZVBB-RV64-NEXT:    vsseg7e16.v v1, (a0)
+; ZVBB-RV64-NEXT:    vmv1r.v v26, v19
+; ZVBB-RV64-NEXT:    vsseg7e16.v v21, (a1)
+; ZVBB-RV64-NEXT:    vl1re16.v v18, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re16.v v19, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re16.v v20, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re16.v v21, (a6)
+; ZVBB-RV64-NEXT:    add a6, a3, a2
+; ZVBB-RV64-NEXT:    vl1re16.v v10, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re16.v v11, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl1re16.v v16, (a4)
+; ZVBB-RV64-NEXT:    vl1re16.v v9, (a3)
+; ZVBB-RV64-NEXT:    vl1re16.v v17, (a7)
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    li a3, 14
+; ZVBB-RV64-NEXT:    mul a0, a0, a3
+; ZVBB-RV64-NEXT:    add a0, sp, a0
+; ZVBB-RV64-NEXT:    addi a0, a0, 64
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re16.v v12, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re16.v v13, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    slli a2, a2, 3
+; ZVBB-RV64-NEXT:    add a2, a0, a2
+; ZVBB-RV64-NEXT:    vl1re16.v v14, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v15, (a1)
+; ZVBB-RV64-NEXT:    add a5, a0, a5
+; ZVBB-RV64-NEXT:    vs2r.v v20, (a5)
+; ZVBB-RV64-NEXT:    vs4r.v v16, (a2)
+; ZVBB-RV64-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl8re16.v v16, (a2)
+; ZVBB-RV64-NEXT:    vl8re16.v v8, (a0)
+; ZVBB-RV64-NEXT:    addi sp, s0, -80
+; ZVBB-RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    addi sp, sp, 80
+; ZVBB-RV64-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    addi sp, sp, -80
+; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    addi s0, sp, 80
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    slli a0, a0, 5
+; ZIP-NEXT:    sub sp, sp, a0
+; ZIP-NEXT:    andi sp, sp, -64
+; ZIP-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZIP-NEXT:    vmv2r.v v26, v20
+; ZIP-NEXT:    addi a0, sp, 64
+; ZIP-NEXT:    vmv2r.v v24, v16
+; ZIP-NEXT:    csrr a1, vlenb
+; ZIP-NEXT:    slli a2, a1, 3
+; ZIP-NEXT:    sub a1, a2, a1
+; ZIP-NEXT:    add a1, sp, a1
+; ZIP-NEXT:    addi a1, a1, 64
+; ZIP-NEXT:    vmv2r.v v22, v12
+; ZIP-NEXT:    csrr a2, vlenb
+; ZIP-NEXT:    vmv2r.v v20, v8
+; ZIP-NEXT:    vmv1r.v v1, v20
+; ZIP-NEXT:    vmv1r.v v3, v22
+; ZIP-NEXT:    vmv1r.v v5, v24
+; ZIP-NEXT:    vmv1r.v v7, v26
+; ZIP-NEXT:    add a3, a0, a2
+; ZIP-NEXT:    vmv1r.v v2, v10
+; ZIP-NEXT:    add a4, a1, a2
+; ZIP-NEXT:    slli a5, a2, 2
+; ZIP-NEXT:    vmv1r.v v4, v14
+; ZIP-NEXT:    slli a6, a2, 4
+; ZIP-NEXT:    add a7, a4, a2
+; ZIP-NEXT:    vmv1r.v v6, v18
+; ZIP-NEXT:    sub a5, a6, a5
+; ZIP-NEXT:    vmv1r.v v22, v11
+; ZIP-NEXT:    add a6, a7, a2
+; ZIP-NEXT:    vmv1r.v v24, v15
+; ZIP-NEXT:    vsseg7e16.v v1, (a0)
+; ZIP-NEXT:    vmv1r.v v26, v19
+; ZIP-NEXT:    vsseg7e16.v v21, (a1)
+; ZIP-NEXT:    vl1re16.v v18, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re16.v v19, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re16.v v20, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re16.v v21, (a6)
+; ZIP-NEXT:    add a6, a3, a2
+; ZIP-NEXT:    vl1re16.v v10, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re16.v v11, (a6)
+; ZIP-NEXT:    vl1re16.v v8, (a0)
+; ZIP-NEXT:    vl1re16.v v16, (a4)
+; ZIP-NEXT:    vl1re16.v v9, (a3)
+; ZIP-NEXT:    vl1re16.v v17, (a7)
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    li a3, 14
+; ZIP-NEXT:    mul a0, a0, a3
+; ZIP-NEXT:    add a0, sp, a0
+; ZIP-NEXT:    addi a0, a0, 64
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re16.v v12, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re16.v v13, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    slli a2, a2, 3
+; ZIP-NEXT:    add a2, a0, a2
+; ZIP-NEXT:    vl1re16.v v14, (a6)
+; ZIP-NEXT:    vl1re16.v v15, (a1)
+; ZIP-NEXT:    add a5, a0, a5
+; ZIP-NEXT:    vs2r.v v20, (a5)
+; ZIP-NEXT:    vs4r.v v16, (a2)
+; ZIP-NEXT:    vs8r.v v8, (a0)
+; ZIP-NEXT:    vl8re16.v v16, (a2)
+; ZIP-NEXT:    vl8re16.v v8, (a0)
+; ZIP-NEXT:    addi sp, s0, -80
+; ZIP-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    addi sp, sp, 80
+; ZIP-NEXT:    ret
+  %res = call <vscale x 56 x i16> @llvm.vector.interleave7.nxv56i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i16> %d, <vscale x 8 x i16> %e, <vscale x 8 x i16> %f, <vscale x 8 x i16> %g)
+  ret <vscale x 56 x i16> %res
+}
+
+
+define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e, <vscale x 4 x i32> %f, <vscale x 4 x i32> %g) nounwind {
+;
+; RV32-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -80
+; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi s0, sp, 80
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 5
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; RV32-NEXT:    vmv2r.v v26, v20
+; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    vmv2r.v v24, v16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a2, a1, 3
+; RV32-NEXT:    sub a1, a2, a1
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 64
+; RV32-NEXT:    vmv2r.v v22, v12
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    vmv2r.v v20, v8
+; RV32-NEXT:    vmv1r.v v1, v20
+; RV32-NEXT:    vmv1r.v v3, v22
+; RV32-NEXT:    vmv1r.v v5, v24
+; RV32-NEXT:    vmv1r.v v7, v26
+; RV32-NEXT:    add a3, a0, a2
+; RV32-NEXT:    vmv1r.v v2, v10
+; RV32-NEXT:    add a4, a1, a2
+; RV32-NEXT:    slli a5, a2, 2
+; RV32-NEXT:    vmv1r.v v4, v14
+; RV32-NEXT:    slli a6, a2, 4
+; RV32-NEXT:    add a7, a4, a2
+; RV32-NEXT:    vmv1r.v v6, v18
+; RV32-NEXT:    sub a5, a6, a5
+; RV32-NEXT:    vmv1r.v v22, v11
+; RV32-NEXT:    add a6, a7, a2
+; RV32-NEXT:    vmv1r.v v24, v15
+; RV32-NEXT:    vsseg7e32.v v1, (a0)
+; RV32-NEXT:    vmv1r.v v26, v19
+; RV32-NEXT:    vsseg7e32.v v21, (a1)
+; RV32-NEXT:    vl1re32.v v18, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re32.v v19, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re32.v v20, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re32.v v21, (a6)
+; RV32-NEXT:    add a6, a3, a2
+; RV32-NEXT:    vl1re32.v v10, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re32.v v11, (a6)
+; RV32-NEXT:    vl1re32.v v8, (a0)
+; RV32-NEXT:    vl1re32.v v16, (a4)
+; RV32-NEXT:    vl1re32.v v9, (a3)
+; RV32-NEXT:    vl1re32.v v17, (a7)
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    li a3, 14
+; RV32-NEXT:    mul a0, a0, a3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 64
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re32.v v12, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re32.v v13, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    slli a2, a2, 3
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    vl1re32.v v14, (a6)
+; RV32-NEXT:    vl1re32.v v15, (a1)
+; RV32-NEXT:    add a5, a0, a5
+; RV32-NEXT:    vs2r.v v20, (a5)
+; RV32-NEXT:    vs4r.v v16, (a2)
+; RV32-NEXT:    vs8r.v v8, (a0)
+; RV32-NEXT:    vl8re32.v v16, (a2)
+; RV32-NEXT:    vl8re32.v v8, (a0)
+; RV32-NEXT:    addi sp, s0, -80
+; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 80
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -80
+; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi s0, sp, 80
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 5
+; RV64-NEXT:    sub sp, sp, a0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; RV64-NEXT:    vmv2r.v v26, v20
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    vmv2r.v v24, v16
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a2, a1, 3
+; RV64-NEXT:    sub a1, a2, a1
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    addi a1, a1, 64
+; RV64-NEXT:    vmv2r.v v22, v12
+; RV64-NEXT:    csrr a2, vlenb
+; RV64-NEXT:    vmv2r.v v20, v8
+; RV64-NEXT:    vmv1r.v v1, v20
+; RV64-NEXT:    vmv1r.v v3, v22
+; RV64-NEXT:    vmv1r.v v5, v24
+; RV64-NEXT:    vmv1r.v v7, v26
+; RV64-NEXT:    add a3, a0, a2
+; RV64-NEXT:    vmv1r.v v2, v10
+; RV64-NEXT:    add a4, a1, a2
+; RV64-NEXT:    slli a5, a2, 2
+; RV64-NEXT:    vmv1r.v v4, v14
+; RV64-NEXT:    slli a6, a2, 4
+; RV64-NEXT:    add a7, a4, a2
+; RV64-NEXT:    vmv1r.v v6, v18
+; RV64-NEXT:    sub a5, a6, a5
+; RV64-NEXT:    vmv1r.v v22, v11
+; RV64-NEXT:    add a6, a7, a2
+; RV64-NEXT:    vmv1r.v v24, v15
+; RV64-NEXT:    vsseg7e32.v v1, (a0)
+; RV64-NEXT:    vmv1r.v v26, v19
+; RV64-NEXT:    vsseg7e32.v v21, (a1)
+; RV64-NEXT:    vl1re32.v v18, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re32.v v19, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re32.v v20, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re32.v v21, (a6)
+; RV64-NEXT:    add a6, a3, a2
+; RV64-NEXT:    vl1re32.v v10, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re32.v v11, (a6)
+; RV64-NEXT:    vl1re32.v v8, (a0)
+; RV64-NEXT:    vl1re32.v v16, (a4)
+; RV64-NEXT:    vl1re32.v v9, (a3)
+; RV64-NEXT:    vl1re32.v v17, (a7)
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    li a3, 14
+; RV64-NEXT:    mul a0, a0, a3
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    addi a0, a0, 64
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re32.v v12, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re32.v v13, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    slli a2, a2, 3
+; RV64-NEXT:    add a2, a0, a2
+; RV64-NEXT:    vl1re32.v v14, (a6)
+; RV64-NEXT:    vl1re32.v v15, (a1)
+; RV64-NEXT:    add a5, a0, a5
+; RV64-NEXT:    vs2r.v v20, (a5)
+; RV64-NEXT:    vs4r.v v16, (a2)
+; RV64-NEXT:    vs8r.v v8, (a0)
+; RV64-NEXT:    vl8re32.v v16, (a2)
+; RV64-NEXT:    vl8re32.v v8, (a0)
+; RV64-NEXT:    addi sp, s0, -80
+; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZVBB-RV32:       # %bb.0:
+; ZVBB-RV32-NEXT:    addi sp, sp, -80
+; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    addi s0, sp, 80
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    slli a0, a0, 5
+; ZVBB-RV32-NEXT:    sub sp, sp, a0
+; ZVBB-RV32-NEXT:    andi sp, sp, -64
+; ZVBB-RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVBB-RV32-NEXT:    vmv2r.v v26, v20
+; ZVBB-RV32-NEXT:    addi a0, sp, 64
+; ZVBB-RV32-NEXT:    vmv2r.v v24, v16
+; ZVBB-RV32-NEXT:    csrr a1, vlenb
+; ZVBB-RV32-NEXT:    slli a2, a1, 3
+; ZVBB-RV32-NEXT:    sub a1, a2, a1
+; ZVBB-RV32-NEXT:    add a1, sp, a1
+; ZVBB-RV32-NEXT:    addi a1, a1, 64
+; ZVBB-RV32-NEXT:    vmv2r.v v22, v12
+; ZVBB-RV32-NEXT:    csrr a2, vlenb
+; ZVBB-RV32-NEXT:    vmv2r.v v20, v8
+; ZVBB-RV32-NEXT:    vmv1r.v v1, v20
+; ZVBB-RV32-NEXT:    vmv1r.v v3, v22
+; ZVBB-RV32-NEXT:    vmv1r.v v5, v24
+; ZVBB-RV32-NEXT:    vmv1r.v v7, v26
+; ZVBB-RV32-NEXT:    add a3, a0, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v2, v10
+; ZVBB-RV32-NEXT:    add a4, a1, a2
+; ZVBB-RV32-NEXT:    slli a5, a2, 2
+; ZVBB-RV32-NEXT:    vmv1r.v v4, v14
+; ZVBB-RV32-NEXT:    slli a6, a2, 4
+; ZVBB-RV32-NEXT:    add a7, a4, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v6, v18
+; ZVBB-RV32-NEXT:    sub a5, a6, a5
+; ZVBB-RV32-NEXT:    vmv1r.v v22, v11
+; ZVBB-RV32-NEXT:    add a6, a7, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v24, v15
+; ZVBB-RV32-NEXT:    vsseg7e32.v v1, (a0)
+; ZVBB-RV32-NEXT:    vmv1r.v v26, v19
+; ZVBB-RV32-NEXT:    vsseg7e32.v v21, (a1)
+; ZVBB-RV32-NEXT:    vl1re32.v v18, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re32.v v19, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re32.v v20, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re32.v v21, (a6)
+; ZVBB-RV32-NEXT:    add a6, a3, a2
+; ZVBB-RV32-NEXT:    vl1re32.v v10, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re32.v v11, (a6)
+; ZVBB-RV32-NEXT:    vl1re32.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl1re32.v v16, (a4)
+; ZVBB-RV32-NEXT:    vl1re32.v v9, (a3)
+; ZVBB-RV32-NEXT:    vl1re32.v v17, (a7)
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    li a3, 14
+; ZVBB-RV32-NEXT:    mul a0, a0, a3
+; ZVBB-RV32-NEXT:    add a0, sp, a0
+; ZVBB-RV32-NEXT:    addi a0, a0, 64
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re32.v v12, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re32.v v13, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    slli a2, a2, 3
+; ZVBB-RV32-NEXT:    add a2, a0, a2
+; ZVBB-RV32-NEXT:    vl1re32.v v14, (a6)
+; ZVBB-RV32-NEXT:    vl1re32.v v15, (a1)
+; ZVBB-RV32-NEXT:    add a5, a0, a5
+; ZVBB-RV32-NEXT:    vs2r.v v20, (a5)
+; ZVBB-RV32-NEXT:    vs4r.v v16, (a2)
+; ZVBB-RV32-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl8re32.v v16, (a2)
+; ZVBB-RV32-NEXT:    vl8re32.v v8, (a0)
+; ZVBB-RV32-NEXT:    addi sp, s0, -80
+; ZVBB-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    addi sp, sp, 80
+; ZVBB-RV32-NEXT:    ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZVBB-RV64:       # %bb.0:
+; ZVBB-RV64-NEXT:    addi sp, sp, -80
+; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    addi s0, sp, 80
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    slli a0, a0, 5
+; ZVBB-RV64-NEXT:    sub sp, sp, a0
+; ZVBB-RV64-NEXT:    andi sp, sp, -64
+; ZVBB-RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVBB-RV64-NEXT:    vmv2r.v v26, v20
+; ZVBB-RV64-NEXT:    addi a0, sp, 64
+; ZVBB-RV64-NEXT:    vmv2r.v v24, v16
+; ZVBB-RV64-NEXT:    csrr a1, vlenb
+; ZVBB-RV64-NEXT:    slli a2, a1, 3
+; ZVBB-RV64-NEXT:    sub a1, a2, a1
+; ZVBB-RV64-NEXT:    add a1, sp, a1
+; ZVBB-RV64-NEXT:    addi a1, a1, 64
+; ZVBB-RV64-NEXT:    vmv2r.v v22, v12
+; ZVBB-RV64-NEXT:    csrr a2, vlenb
+; ZVBB-RV64-NEXT:    vmv2r.v v20, v8
+; ZVBB-RV64-NEXT:    vmv1r.v v1, v20
+; ZVBB-RV64-NEXT:    vmv1r.v v3, v22
+; ZVBB-RV64-NEXT:    vmv1r.v v5, v24
+; ZVBB-RV64-NEXT:    vmv1r.v v7, v26
+; ZVBB-RV64-NEXT:    add a3, a0, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v2, v10
+; ZVBB-RV64-NEXT:    add a4, a1, a2
+; ZVBB-RV64-NEXT:    slli a5, a2, 2
+; ZVBB-RV64-NEXT:    vmv1r.v v4, v14
+; ZVBB-RV64-NEXT:    slli a6, a2, 4
+; ZVBB-RV64-NEXT:    add a7, a4, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v6, v18
+; ZVBB-RV64-NEXT:    sub a5, a6, a5
+; ZVBB-RV64-NEXT:    vmv1r.v v22, v11
+; ZVBB-RV64-NEXT:    add a6, a7, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v24, v15
+; ZVBB-RV64-NEXT:    vsseg7e32.v v1, (a0)
+; ZVBB-RV64-NEXT:    vmv1r.v v26, v19
+; ZVBB-RV64-NEXT:    vsseg7e32.v v21, (a1)
+; ZVBB-RV64-NEXT:    vl1re32.v v18, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re32.v v19, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re32.v v20, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re32.v v21, (a6)
+; ZVBB-RV64-NEXT:    add a6, a3, a2
+; ZVBB-RV64-NEXT:    vl1re32.v v10, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re32.v v11, (a6)
+; ZVBB-RV64-NEXT:    vl1re32.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl1re32.v v16, (a4)
+; ZVBB-RV64-NEXT:    vl1re32.v v9, (a3)
+; ZVBB-RV64-NEXT:    vl1re32.v v17, (a7)
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    li a3, 14
+; ZVBB-RV64-NEXT:    mul a0, a0, a3
+; ZVBB-RV64-NEXT:    add a0, sp, a0
+; ZVBB-RV64-NEXT:    addi a0, a0, 64
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re32.v v12, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re32.v v13, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    slli a2, a2, 3
+; ZVBB-RV64-NEXT:    add a2, a0, a2
+; ZVBB-RV64-NEXT:    vl1re32.v v14, (a6)
+; ZVBB-RV64-NEXT:    vl1re32.v v15, (a1)
+; ZVBB-RV64-NEXT:    add a5, a0, a5
+; ZVBB-RV64-NEXT:    vs2r.v v20, (a5)
+; ZVBB-RV64-NEXT:    vs4r.v v16, (a2)
+; ZVBB-RV64-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl8re32.v v16, (a2)
+; ZVBB-RV64-NEXT:    vl8re32.v v8, (a0)
+; ZVBB-RV64-NEXT:    addi sp, s0, -80
+; ZVBB-RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    addi sp, sp, 80
+; ZVBB-RV64-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    addi sp, sp, -80
+; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    addi s0, sp, 80
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    slli a0, a0, 5
+; ZIP-NEXT:    sub sp, sp, a0
+; ZIP-NEXT:    andi sp, sp, -64
+; ZIP-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZIP-NEXT:    vmv2r.v v26, v20
+; ZIP-NEXT:    addi a0, sp, 64
+; ZIP-NEXT:    vmv2r.v v24, v16
+; ZIP-NEXT:    csrr a1, vlenb
+; ZIP-NEXT:    slli a2, a1, 3
+; ZIP-NEXT:    sub a1, a2, a1
+; ZIP-NEXT:    add a1, sp, a1
+; ZIP-NEXT:    addi a1, a1, 64
+; ZIP-NEXT:    vmv2r.v v22, v12
+; ZIP-NEXT:    csrr a2, vlenb
+; ZIP-NEXT:    vmv2r.v v20, v8
+; ZIP-NEXT:    vmv1r.v v1, v20
+; ZIP-NEXT:    vmv1r.v v3, v22
+; ZIP-NEXT:    vmv1r.v v5, v24
+; ZIP-NEXT:    vmv1r.v v7, v26
+; ZIP-NEXT:    add a3, a0, a2
+; ZIP-NEXT:    vmv1r.v v2, v10
+; ZIP-NEXT:    add a4, a1, a2
+; ZIP-NEXT:    slli a5, a2, 2
+; ZIP-NEXT:    vmv1r.v v4, v14
+; ZIP-NEXT:    slli a6, a2, 4
+; ZIP-NEXT:    add a7, a4, a2
+; ZIP-NEXT:    vmv1r.v v6, v18
+; ZIP-NEXT:    sub a5, a6, a5
+; ZIP-NEXT:    vmv1r.v v22, v11
+; ZIP-NEXT:    add a6, a7, a2
+; ZIP-NEXT:    vmv1r.v v24, v15
+; ZIP-NEXT:    vsseg7e32.v v1, (a0)
+; ZIP-NEXT:    vmv1r.v v26, v19
+; ZIP-NEXT:    vsseg7e32.v v21, (a1)
+; ZIP-NEXT:    vl1re32.v v18, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re32.v v19, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re32.v v20, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re32.v v21, (a6)
+; ZIP-NEXT:    add a6, a3, a2
+; ZIP-NEXT:    vl1re32.v v10, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re32.v v11, (a6)
+; ZIP-NEXT:    vl1re32.v v8, (a0)
+; ZIP-NEXT:    vl1re32.v v16, (a4)
+; ZIP-NEXT:    vl1re32.v v9, (a3)
+; ZIP-NEXT:    vl1re32.v v17, (a7)
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    li a3, 14
+; ZIP-NEXT:    mul a0, a0, a3
+; ZIP-NEXT:    add a0, sp, a0
+; ZIP-NEXT:    addi a0, a0, 64
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re32.v v12, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re32.v v13, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    slli a2, a2, 3
+; ZIP-NEXT:    add a2, a0, a2
+; ZIP-NEXT:    vl1re32.v v14, (a6)
+; ZIP-NEXT:    vl1re32.v v15, (a1)
+; ZIP-NEXT:    add a5, a0, a5
+; ZIP-NEXT:    vs2r.v v20, (a5)
+; ZIP-NEXT:    vs4r.v v16, (a2)
+; ZIP-NEXT:    vs8r.v v8, (a0)
+; ZIP-NEXT:    vl8re32.v v16, (a2)
+; ZIP-NEXT:    vl8re32.v v8, (a0)
+; ZIP-NEXT:    addi sp, s0, -80
+; ZIP-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    addi sp, sp, 80
+; ZIP-NEXT:    ret
+  %res = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e, <vscale x 4 x i32> %f, <vscale x 4 x i32> %g)
+  ret <vscale x 28 x i32> %res
+}
+
+define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e, <vscale x 2 x i64> %f, <vscale x 2 x i64> %g) nounwind {
+;
+; RV32-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -80
+; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi s0, sp, 80
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 5
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT:    vmv2r.v v26, v20
+; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    vmv2r.v v24, v16
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a2, a1, 3
+; RV32-NEXT:    sub a1, a2, a1
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 64
+; RV32-NEXT:    vmv2r.v v22, v12
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    vmv2r.v v20, v8
+; RV32-NEXT:    vmv1r.v v1, v20
+; RV32-NEXT:    vmv1r.v v3, v22
+; RV32-NEXT:    vmv1r.v v5, v24
+; RV32-NEXT:    vmv1r.v v7, v26
+; RV32-NEXT:    add a3, a0, a2
+; RV32-NEXT:    vmv1r.v v2, v10
+; RV32-NEXT:    add a4, a1, a2
+; RV32-NEXT:    slli a5, a2, 2
+; RV32-NEXT:    vmv1r.v v4, v14
+; RV32-NEXT:    slli a6, a2, 4
+; RV32-NEXT:    add a7, a4, a2
+; RV32-NEXT:    vmv1r.v v6, v18
+; RV32-NEXT:    sub a5, a6, a5
+; RV32-NEXT:    vmv1r.v v22, v11
+; RV32-NEXT:    add a6, a7, a2
+; RV32-NEXT:    vmv1r.v v24, v15
+; RV32-NEXT:    vsseg7e64.v v1, (a0)
+; RV32-NEXT:    vmv1r.v v26, v19
+; RV32-NEXT:    vsseg7e64.v v21, (a1)
+; RV32-NEXT:    vl1re64.v v18, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re64.v v19, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re64.v v20, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re64.v v21, (a6)
+; RV32-NEXT:    add a6, a3, a2
+; RV32-NEXT:    vl1re64.v v10, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re64.v v11, (a6)
+; RV32-NEXT:    vl1re64.v v8, (a0)
+; RV32-NEXT:    vl1re64.v v16, (a4)
+; RV32-NEXT:    vl1re64.v v9, (a3)
+; RV32-NEXT:    vl1re64.v v17, (a7)
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    li a3, 14
+; RV32-NEXT:    mul a0, a0, a3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 64
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re64.v v12, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re64.v v13, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    slli a2, a2, 3
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    vl1re64.v v14, (a6)
+; RV32-NEXT:    vl1re64.v v15, (a1)
+; RV32-NEXT:    add a5, a0, a5
+; RV32-NEXT:    vs2r.v v20, (a5)
+; RV32-NEXT:    vs4r.v v16, (a2)
+; RV32-NEXT:    vs8r.v v8, (a0)
+; RV32-NEXT:    vl8re64.v v16, (a2)
+; RV32-NEXT:    vl8re64.v v8, (a0)
+; RV32-NEXT:    addi sp, s0, -80
+; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 80
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -80
+; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi s0, sp, 80
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 5
+; RV64-NEXT:    sub sp, sp, a0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT:    vmv2r.v v26, v20
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    vmv2r.v v24, v16
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a2, a1, 3
+; RV64-NEXT:    sub a1, a2, a1
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    addi a1, a1, 64
+; RV64-NEXT:    vmv2r.v v22, v12
+; RV64-NEXT:    csrr a2, vlenb
+; RV64-NEXT:    vmv2r.v v20, v8
+; RV64-NEXT:    vmv1r.v v1, v20
+; RV64-NEXT:    vmv1r.v v3, v22
+; RV64-NEXT:    vmv1r.v v5, v24
+; RV64-NEXT:    vmv1r.v v7, v26
+; RV64-NEXT:    add a3, a0, a2
+; RV64-NEXT:    vmv1r.v v2, v10
+; RV64-NEXT:    add a4, a1, a2
+; RV64-NEXT:    slli a5, a2, 2
+; RV64-NEXT:    vmv1r.v v4, v14
+; RV64-NEXT:    slli a6, a2, 4
+; RV64-NEXT:    add a7, a4, a2
+; RV64-NEXT:    vmv1r.v v6, v18
+; RV64-NEXT:    sub a5, a6, a5
+; RV64-NEXT:    vmv1r.v v22, v11
+; RV64-NEXT:    add a6, a7, a2
+; RV64-NEXT:    vmv1r.v v24, v15
+; RV64-NEXT:    vsseg7e64.v v1, (a0)
+; RV64-NEXT:    vmv1r.v v26, v19
+; RV64-NEXT:    vsseg7e64.v v21, (a1)
+; RV64-NEXT:    vl1re64.v v18, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re64.v v19, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re64.v v20, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re64.v v21, (a6)
+; RV64-NEXT:    add a6, a3, a2
+; RV64-NEXT:    vl1re64.v v10, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re64.v v11, (a6)
+; RV64-NEXT:    vl1re64.v v8, (a0)
+; RV64-NEXT:    vl1re64.v v16, (a4)
+; RV64-NEXT:    vl1re64.v v9, (a3)
+; RV64-NEXT:    vl1re64.v v17, (a7)
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    li a3, 14
+; RV64-NEXT:    mul a0, a0, a3
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    addi a0, a0, 64
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re64.v v12, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re64.v v13, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    slli a2, a2, 3
+; RV64-NEXT:    add a2, a0, a2
+; RV64-NEXT:    vl1re64.v v14, (a6)
+; RV64-NEXT:    vl1re64.v v15, (a1)
+; RV64-NEXT:    add a5, a0, a5
+; RV64-NEXT:    vs2r.v v20, (a5)
+; RV64-NEXT:    vs4r.v v16, (a2)
+; RV64-NEXT:    vs8r.v v8, (a0)
+; RV64-NEXT:    vl8re64.v v16, (a2)
+; RV64-NEXT:    vl8re64.v v8, (a0)
+; RV64-NEXT:    addi sp, s0, -80
+; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZVBB-RV32:       # %bb.0:
+; ZVBB-RV32-NEXT:    addi sp, sp, -80
+; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    addi s0, sp, 80
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    slli a0, a0, 5
+; ZVBB-RV32-NEXT:    sub sp, sp, a0
+; ZVBB-RV32-NEXT:    andi sp, sp, -64
+; ZVBB-RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; ZVBB-RV32-NEXT:    vmv2r.v v26, v20
+; ZVBB-RV32-NEXT:    addi a0, sp, 64
+; ZVBB-RV32-NEXT:    vmv2r.v v24, v16
+; ZVBB-RV32-NEXT:    csrr a1, vlenb
+; ZVBB-RV32-NEXT:    slli a2, a1, 3
+; ZVBB-RV32-NEXT:    sub a1, a2, a1
+; ZVBB-RV32-NEXT:    add a1, sp, a1
+; ZVBB-RV32-NEXT:    addi a1, a1, 64
+; ZVBB-RV32-NEXT:    vmv2r.v v22, v12
+; ZVBB-RV32-NEXT:    csrr a2, vlenb
+; ZVBB-RV32-NEXT:    vmv2r.v v20, v8
+; ZVBB-RV32-NEXT:    vmv1r.v v1, v20
+; ZVBB-RV32-NEXT:    vmv1r.v v3, v22
+; ZVBB-RV32-NEXT:    vmv1r.v v5, v24
+; ZVBB-RV32-NEXT:    vmv1r.v v7, v26
+; ZVBB-RV32-NEXT:    add a3, a0, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v2, v10
+; ZVBB-RV32-NEXT:    add a4, a1, a2
+; ZVBB-RV32-NEXT:    slli a5, a2, 2
+; ZVBB-RV32-NEXT:    vmv1r.v v4, v14
+; ZVBB-RV32-NEXT:    slli a6, a2, 4
+; ZVBB-RV32-NEXT:    add a7, a4, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v6, v18
+; ZVBB-RV32-NEXT:    sub a5, a6, a5
+; ZVBB-RV32-NEXT:    vmv1r.v v22, v11
+; ZVBB-RV32-NEXT:    add a6, a7, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v24, v15
+; ZVBB-RV32-NEXT:    vsseg7e64.v v1, (a0)
+; ZVBB-RV32-NEXT:    vmv1r.v v26, v19
+; ZVBB-RV32-NEXT:    vsseg7e64.v v21, (a1)
+; ZVBB-RV32-NEXT:    vl1re64.v v18, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re64.v v19, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re64.v v20, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re64.v v21, (a6)
+; ZVBB-RV32-NEXT:    add a6, a3, a2
+; ZVBB-RV32-NEXT:    vl1re64.v v10, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re64.v v11, (a6)
+; ZVBB-RV32-NEXT:    vl1re64.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl1re64.v v16, (a4)
+; ZVBB-RV32-NEXT:    vl1re64.v v9, (a3)
+; ZVBB-RV32-NEXT:    vl1re64.v v17, (a7)
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    li a3, 14
+; ZVBB-RV32-NEXT:    mul a0, a0, a3
+; ZVBB-RV32-NEXT:    add a0, sp, a0
+; ZVBB-RV32-NEXT:    addi a0, a0, 64
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re64.v v12, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re64.v v13, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    slli a2, a2, 3
+; ZVBB-RV32-NEXT:    add a2, a0, a2
+; ZVBB-RV32-NEXT:    vl1re64.v v14, (a6)
+; ZVBB-RV32-NEXT:    vl1re64.v v15, (a1)
+; ZVBB-RV32-NEXT:    add a5, a0, a5
+; ZVBB-RV32-NEXT:    vs2r.v v20, (a5)
+; ZVBB-RV32-NEXT:    vs4r.v v16, (a2)
+; ZVBB-RV32-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl8re64.v v16, (a2)
+; ZVBB-RV32-NEXT:    vl8re64.v v8, (a0)
+; ZVBB-RV32-NEXT:    addi sp, s0, -80
+; ZVBB-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    addi sp, sp, 80
+; ZVBB-RV32-NEXT:    ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZVBB-RV64:       # %bb.0:
+; ZVBB-RV64-NEXT:    addi sp, sp, -80
+; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    addi s0, sp, 80
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    slli a0, a0, 5
+; ZVBB-RV64-NEXT:    sub sp, sp, a0
+; ZVBB-RV64-NEXT:    andi sp, sp, -64
+; ZVBB-RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; ZVBB-RV64-NEXT:    vmv2r.v v26, v20
+; ZVBB-RV64-NEXT:    addi a0, sp, 64
+; ZVBB-RV64-NEXT:    vmv2r.v v24, v16
+; ZVBB-RV64-NEXT:    csrr a1, vlenb
+; ZVBB-RV64-NEXT:    slli a2, a1, 3
+; ZVBB-RV64-NEXT:    sub a1, a2, a1
+; ZVBB-RV64-NEXT:    add a1, sp, a1
+; ZVBB-RV64-NEXT:    addi a1, a1, 64
+; ZVBB-RV64-NEXT:    vmv2r.v v22, v12
+; ZVBB-RV64-NEXT:    csrr a2, vlenb
+; ZVBB-RV64-NEXT:    vmv2r.v v20, v8
+; ZVBB-RV64-NEXT:    vmv1r.v v1, v20
+; ZVBB-RV64-NEXT:    vmv1r.v v3, v22
+; ZVBB-RV64-NEXT:    vmv1r.v v5, v24
+; ZVBB-RV64-NEXT:    vmv1r.v v7, v26
+; ZVBB-RV64-NEXT:    add a3, a0, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v2, v10
+; ZVBB-RV64-NEXT:    add a4, a1, a2
+; ZVBB-RV64-NEXT:    slli a5, a2, 2
+; ZVBB-RV64-NEXT:    vmv1r.v v4, v14
+; ZVBB-RV64-NEXT:    slli a6, a2, 4
+; ZVBB-RV64-NEXT:    add a7, a4, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v6, v18
+; ZVBB-RV64-NEXT:    sub a5, a6, a5
+; ZVBB-RV64-NEXT:    vmv1r.v v22, v11
+; ZVBB-RV64-NEXT:    add a6, a7, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v24, v15
+; ZVBB-RV64-NEXT:    vsseg7e64.v v1, (a0)
+; ZVBB-RV64-NEXT:    vmv1r.v v26, v19
+; ZVBB-RV64-NEXT:    vsseg7e64.v v21, (a1)
+; ZVBB-RV64-NEXT:    vl1re64.v v18, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re64.v v19, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re64.v v20, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re64.v v21, (a6)
+; ZVBB-RV64-NEXT:    add a6, a3, a2
+; ZVBB-RV64-NEXT:    vl1re64.v v10, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re64.v v11, (a6)
+; ZVBB-RV64-NEXT:    vl1re64.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl1re64.v v16, (a4)
+; ZVBB-RV64-NEXT:    vl1re64.v v9, (a3)
+; ZVBB-RV64-NEXT:    vl1re64.v v17, (a7)
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    li a3, 14
+; ZVBB-RV64-NEXT:    mul a0, a0, a3
+; ZVBB-RV64-NEXT:    add a0, sp, a0
+; ZVBB-RV64-NEXT:    addi a0, a0, 64
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re64.v v12, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re64.v v13, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    slli a2, a2, 3
+; ZVBB-RV64-NEXT:    add a2, a0, a2
+; ZVBB-RV64-NEXT:    vl1re64.v v14, (a6)
+; ZVBB-RV64-NEXT:    vl1re64.v v15, (a1)
+; ZVBB-RV64-NEXT:    add a5, a0, a5
+; ZVBB-RV64-NEXT:    vs2r.v v20, (a5)
+; ZVBB-RV64-NEXT:    vs4r.v v16, (a2)
+; ZVBB-RV64-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl8re64.v v16, (a2)
+; ZVBB-RV64-NEXT:    vl8re64.v v8, (a0)
+; ZVBB-RV64-NEXT:    addi sp, s0, -80
+; ZVBB-RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    addi sp, sp, 80
+; ZVBB-RV64-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    addi sp, sp, -80
+; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    addi s0, sp, 80
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    slli a0, a0, 5
+; ZIP-NEXT:    sub sp, sp, a0
+; ZIP-NEXT:    andi sp, sp, -64
+; ZIP-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; ZIP-NEXT:    vmv2r.v v26, v20
+; ZIP-NEXT:    addi a0, sp, 64
+; ZIP-NEXT:    vmv2r.v v24, v16
+; ZIP-NEXT:    csrr a1, vlenb
+; ZIP-NEXT:    slli a2, a1, 3
+; ZIP-NEXT:    sub a1, a2, a1
+; ZIP-NEXT:    add a1, sp, a1
+; ZIP-NEXT:    addi a1, a1, 64
+; ZIP-NEXT:    vmv2r.v v22, v12
+; ZIP-NEXT:    csrr a2, vlenb
+; ZIP-NEXT:    vmv2r.v v20, v8
+; ZIP-NEXT:    vmv1r.v v1, v20
+; ZIP-NEXT:    vmv1r.v v3, v22
+; ZIP-NEXT:    vmv1r.v v5, v24
+; ZIP-NEXT:    vmv1r.v v7, v26
+; ZIP-NEXT:    add a3, a0, a2
+; ZIP-NEXT:    vmv1r.v v2, v10
+; ZIP-NEXT:    add a4, a1, a2
+; ZIP-NEXT:    slli a5, a2, 2
+; ZIP-NEXT:    vmv1r.v v4, v14
+; ZIP-NEXT:    slli a6, a2, 4
+; ZIP-NEXT:    add a7, a4, a2
+; ZIP-NEXT:    vmv1r.v v6, v18
+; ZIP-NEXT:    sub a5, a6, a5
+; ZIP-NEXT:    vmv1r.v v22, v11
+; ZIP-NEXT:    add a6, a7, a2
+; ZIP-NEXT:    vmv1r.v v24, v15
+; ZIP-NEXT:    vsseg7e64.v v1, (a0)
+; ZIP-NEXT:    vmv1r.v v26, v19
+; ZIP-NEXT:    vsseg7e64.v v21, (a1)
+; ZIP-NEXT:    vl1re64.v v18, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re64.v v19, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re64.v v20, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re64.v v21, (a6)
+; ZIP-NEXT:    add a6, a3, a2
+; ZIP-NEXT:    vl1re64.v v10, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re64.v v11, (a6)
+; ZIP-NEXT:    vl1re64.v v8, (a0)
+; ZIP-NEXT:    vl1re64.v v16, (a4)
+; ZIP-NEXT:    vl1re64.v v9, (a3)
+; ZIP-NEXT:    vl1re64.v v17, (a7)
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    li a3, 14
+; ZIP-NEXT:    mul a0, a0, a3
+; ZIP-NEXT:    add a0, sp, a0
+; ZIP-NEXT:    addi a0, a0, 64
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re64.v v12, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re64.v v13, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    slli a2, a2, 3
+; ZIP-NEXT:    add a2, a0, a2
+; ZIP-NEXT:    vl1re64.v v14, (a6)
+; ZIP-NEXT:    vl1re64.v v15, (a1)
+; ZIP-NEXT:    add a5, a0, a5
+; ZIP-NEXT:    vs2r.v v20, (a5)
+; ZIP-NEXT:    vs4r.v v16, (a2)
+; ZIP-NEXT:    vs8r.v v8, (a0)
+; ZIP-NEXT:    vl8re64.v v16, (a2)
+; ZIP-NEXT:    vl8re64.v v8, (a0)
+; ZIP-NEXT:    addi sp, s0, -80
+; ZIP-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    addi sp, sp, 80
+; ZIP-NEXT:    ret
+  %res = call <vscale x 14 x i64> @llvm.vector.interleave7.nxv14i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e, <vscale x 2 x i64> %f, <vscale x 2 x i64> %g)
+  ret <vscale x 14 x i64> %res
+}
+
+; Floats
+
+define <vscale x 4 x bfloat> @vector_interleave_nxv4bf16_nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) {
+; V-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
+; V:       # %bb.0:
+; V-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; V-NEXT:    vwaddu.vv v10, v8, v9
+; V-NEXT:    li a0, -1
+; V-NEXT:    csrr a1, vlenb
+; V-NEXT:    vwmaccu.vx v10, a0, v9
+; V-NEXT:    srli a1, a1, 2
+; V-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; V-NEXT:    vslidedown.vx v8, v10, a1
+; V-NEXT:    add a0, a1, a1
+; V-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; V-NEXT:    vslideup.vx v10, v8, a1
+; V-NEXT:    vmv.v.v v8, v10
+; V-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vwsll.vi v10, v9, 16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    vwaddu.wv v10, v10, v8
+; ZVBB-NEXT:    srli a0, a0, 2
+; ZVBB-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; ZVBB-NEXT:    vslidedown.vx v8, v10, a0
+; ZVBB-NEXT:    add a1, a0, a0
+; ZVBB-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v10, v8, a0
+; ZVBB-NEXT:    vmv.v.v v8, v10
+; ZVBB-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZIP-NEXT:    ri.vzip2b.vv v11, v8, v9
+; ZIP-NEXT:    ri.vzip2a.vv v10, v8, v9
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    srli a0, a0, 2
+; ZIP-NEXT:    add a1, a0, a0
+; ZIP-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; ZIP-NEXT:    vslideup.vx v10, v11, a0
+; ZIP-NEXT:    vmv.v.v v8, v10
+; ZIP-NEXT:    ret
+  %res = call <vscale x 4 x bfloat> @llvm.vector.interleave2.nxv4bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b)
+  ret <vscale x 4 x bfloat> %res
+}
+
+define <vscale x 8 x bfloat> @vector_interleave_nxv8bf16_nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) {
+; V-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
+; V:       # %bb.0:
+; V-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; V-NEXT:    vmv1r.v v10, v9
+; V-NEXT:    vmv1r.v v11, v8
+; V-NEXT:    vwaddu.vv v8, v11, v10
+; V-NEXT:    li a0, -1
+; V-NEXT:    vwmaccu.vx v8, a0, v10
+; V-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-NEXT:    vmv1r.v v10, v9
+; ZVBB-NEXT:    vmv1r.v v11, v8
+; ZVBB-NEXT:    vwsll.vi v8, v10, 16
+; ZVBB-NEXT:    vwaddu.wv v8, v8, v11
+; ZVBB-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZIP-NEXT:    vmv1r.v v10, v9
+; ZIP-NEXT:    vmv1r.v v11, v8
+; ZIP-NEXT:    ri.vzip2b.vv v9, v8, v10
+; ZIP-NEXT:    ri.vzip2a.vv v8, v11, v10
+; ZIP-NEXT:    ret
+  %res = call <vscale x 8 x bfloat> @llvm.vector.interleave2.nxv8bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b)
+  ret <vscale x 8 x bfloat> %res
+}
+
+define <vscale x 4 x half> @vector_interleave_nxv4f16_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
+; V-LABEL: vector_interleave_nxv4f16_nxv2f16:
+; V:       # %bb.0:
+; V-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; V-NEXT:    vwaddu.vv v10, v8, v9
+; V-NEXT:    li a0, -1
+; V-NEXT:    csrr a1, vlenb
+; V-NEXT:    vwmaccu.vx v10, a0, v9
+; V-NEXT:    srli a1, a1, 2
+; V-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; V-NEXT:    vslidedown.vx v8, v10, a1
+; V-NEXT:    add a0, a1, a1
+; V-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; V-NEXT:    vslideup.vx v10, v8, a1
+; V-NEXT:    vmv.v.v v8, v10
+; V-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv4f16_nxv2f16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vwsll.vi v10, v9, 16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    vwaddu.wv v10, v10, v8
+; ZVBB-NEXT:    srli a0, a0, 2
+; ZVBB-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; ZVBB-NEXT:    vslidedown.vx v8, v10, a0
+; ZVBB-NEXT:    add a1, a0, a0
+; ZVBB-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v10, v8, a0
+; ZVBB-NEXT:    vmv.v.v v8, v10
+; ZVBB-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv4f16_nxv2f16:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZIP-NEXT:    ri.vzip2b.vv v11, v8, v9
+; ZIP-NEXT:    ri.vzip2a.vv v10, v8, v9
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    srli a0, a0, 2
+; ZIP-NEXT:    add a1, a0, a0
+; ZIP-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; ZIP-NEXT:    vslideup.vx v10, v11, a0
+; ZIP-NEXT:    vmv.v.v v8, v10
+; ZIP-NEXT:    ret
+  %res = call <vscale x 4 x half> @llvm.vector.interleave2.nxv4f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
+  ret <vscale x 4 x half> %res
+}
+
+define <vscale x 8 x half> @vector_interleave_nxv8f16_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
+; V-LABEL: vector_interleave_nxv8f16_nxv4f16:
+; V:       # %bb.0:
+; V-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; V-NEXT:    vmv1r.v v10, v9
+; V-NEXT:    vmv1r.v v11, v8
+; V-NEXT:    vwaddu.vv v8, v11, v10
+; V-NEXT:    li a0, -1
+; V-NEXT:    vwmaccu.vx v8, a0, v10
+; V-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv8f16_nxv4f16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-NEXT:    vmv1r.v v10, v9
+; ZVBB-NEXT:    vmv1r.v v11, v8
+; ZVBB-NEXT:    vwsll.vi v8, v10, 16
+; ZVBB-NEXT:    vwaddu.wv v8, v8, v11
+; ZVBB-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv8f16_nxv4f16:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZIP-NEXT:    vmv1r.v v10, v9
+; ZIP-NEXT:    vmv1r.v v11, v8
+; ZIP-NEXT:    ri.vzip2b.vv v9, v8, v10
+; ZIP-NEXT:    ri.vzip2a.vv v8, v11, v10
+; ZIP-NEXT:    ret
+  %res = call <vscale x 8 x half> @llvm.vector.interleave2.nxv8f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
+  ret <vscale x 8 x half> %res
+}
+
+define <vscale x 4 x float> @vector_interleave_nxv4f32_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
+; V-LABEL: vector_interleave_nxv4f32_nxv2f32:
+; V:       # %bb.0:
+; V-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; V-NEXT:    vmv1r.v v10, v9
+; V-NEXT:    vmv1r.v v11, v8
+; V-NEXT:    vwaddu.vv v8, v11, v10
+; V-NEXT:    li a0, -1
+; V-NEXT:    vwmaccu.vx v8, a0, v10
+; V-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv4f32_nxv2f32:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVBB-NEXT:    vmv1r.v v10, v9
+; ZVBB-NEXT:    vmv1r.v v11, v8
+; ZVBB-NEXT:    li a0, 32
+; ZVBB-NEXT:    vwsll.vx v8, v10, a0
+; ZVBB-NEXT:    vwaddu.wv v8, v8, v11
+; ZVBB-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv4f32_nxv2f32:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZIP-NEXT:    vmv1r.v v10, v9
+; ZIP-NEXT:    vmv1r.v v11, v8
+; ZIP-NEXT:    ri.vzip2b.vv v9, v8, v10
+; ZIP-NEXT:    ri.vzip2a.vv v8, v11, v10
+; ZIP-NEXT:    ret
+  %res = call <vscale x 4 x float> @llvm.vector.interleave2.nxv4f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
+  ret <vscale x 4 x float> %res
+}
+
+define <vscale x 16 x bfloat> @vector_interleave_nxv16bf16_nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
+; V-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
+; V:       # %bb.0:
+; V-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; V-NEXT:    vmv2r.v v12, v10
+; V-NEXT:    vmv2r.v v14, v8
+; V-NEXT:    vwaddu.vv v8, v14, v12
+; V-NEXT:    li a0, -1
+; V-NEXT:    vwmaccu.vx v8, a0, v12
+; V-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVBB-NEXT:    vmv2r.v v12, v10
+; ZVBB-NEXT:    vmv2r.v v14, v8
+; ZVBB-NEXT:    vwsll.vi v8, v12, 16
+; ZVBB-NEXT:    vwaddu.wv v8, v8, v14
+; ZVBB-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZIP-NEXT:    vmv2r.v v12, v10
+; ZIP-NEXT:    vmv2r.v v14, v8
+; ZIP-NEXT:    ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT:    ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT:    ret
+  %res = call <vscale x 16 x bfloat> @llvm.vector.interleave2.nxv16bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
+  ret <vscale x 16 x bfloat> %res
+}
+
+define <vscale x 16 x half> @vector_interleave_nxv16f16_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; V-LABEL: vector_interleave_nxv16f16_nxv8f16:
+; V:       # %bb.0:
+; V-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; V-NEXT:    vmv2r.v v12, v10
+; V-NEXT:    vmv2r.v v14, v8
+; V-NEXT:    vwaddu.vv v8, v14, v12
+; V-NEXT:    li a0, -1
+; V-NEXT:    vwmaccu.vx v8, a0, v12
+; V-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv16f16_nxv8f16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVBB-NEXT:    vmv2r.v v12, v10
+; ZVBB-NEXT:    vmv2r.v v14, v8
+; ZVBB-NEXT:    vwsll.vi v8, v12, 16
+; ZVBB-NEXT:    vwaddu.wv v8, v8, v14
+; ZVBB-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv16f16_nxv8f16:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZIP-NEXT:    vmv2r.v v12, v10
+; ZIP-NEXT:    vmv2r.v v14, v8
+; ZIP-NEXT:    ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT:    ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT:    ret
+  %res = call <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
+  ret <vscale x 16 x half> %res
+}
+
+define <vscale x 8 x float> @vector_interleave_nxv8f32_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; V-LABEL: vector_interleave_nxv8f32_nxv4f32:
+; V:       # %bb.0:
+; V-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; V-NEXT:    vmv2r.v v12, v10
+; V-NEXT:    vmv2r.v v14, v8
+; V-NEXT:    vwaddu.vv v8, v14, v12
+; V-NEXT:    li a0, -1
+; V-NEXT:    vwmaccu.vx v8, a0, v12
+; V-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv8f32_nxv4f32:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZVBB-NEXT:    vmv2r.v v12, v10
+; ZVBB-NEXT:    vmv2r.v v14, v8
+; ZVBB-NEXT:    li a0, 32
+; ZVBB-NEXT:    vwsll.vx v8, v12, a0
+; ZVBB-NEXT:    vwaddu.wv v8, v8, v14
+; ZVBB-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv8f32_nxv4f32:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZIP-NEXT:    vmv2r.v v12, v10
+; ZIP-NEXT:    vmv2r.v v14, v8
+; ZIP-NEXT:    ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT:    ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT:    ret
+  %res = call <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
+  ret <vscale x 8 x float> %res
+}
+
+define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; V-LABEL: vector_interleave_nxv4f64_nxv2f64:
+; V:       # %bb.0:
+; V-NEXT:    csrr a0, vlenb
+; V-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
+; V-NEXT:    vid.v v12
+; V-NEXT:    srli a0, a0, 2
+; V-NEXT:    vand.vi v13, v12, 1
+; V-NEXT:    vmsne.vi v0, v13, 0
+; V-NEXT:    vsrl.vi v16, v12, 1
+; V-NEXT:    vadd.vx v16, v16, a0, v0.t
+; V-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; V-NEXT:    vrgatherei16.vv v12, v8, v16
+; V-NEXT:    vmv.v.v v8, v12
+; V-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv4f64_nxv2f64:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
+; ZVBB-NEXT:    vid.v v12
+; ZVBB-NEXT:    srli a0, a0, 2
+; ZVBB-NEXT:    vand.vi v13, v12, 1
+; ZVBB-NEXT:    vmsne.vi v0, v13, 0
+; ZVBB-NEXT:    vsrl.vi v16, v12, 1
+; ZVBB-NEXT:    vadd.vx v16, v16, a0, v0.t
+; ZVBB-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; ZVBB-NEXT:    vrgatherei16.vv v12, v8, v16
+; ZVBB-NEXT:    vmv.v.v v8, v12
+; ZVBB-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv4f64_nxv2f64:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; ZIP-NEXT:    vmv2r.v v12, v10
+; ZIP-NEXT:    vmv2r.v v14, v8
+; ZIP-NEXT:    ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT:    ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT:    ret
+  %res = call <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
+  ret <vscale x 4 x double> %res
+}
+
+
+
+define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) {
+; V-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
+; V:       # %bb.0:
+; V-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; V-NEXT:    vmv8r.v v24, v8
+; V-NEXT:    vwaddu.vv v8, v24, v16
+; V-NEXT:    li a0, -1
+; V-NEXT:    vwaddu.vv v0, v28, v20
+; V-NEXT:    vwmaccu.vx v8, a0, v16
+; V-NEXT:    vwmaccu.vx v0, a0, v20
+; V-NEXT:    vmv8r.v v16, v0
+; V-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVBB-NEXT:    vwsll.vi v24, v16, 16
+; ZVBB-NEXT:    vwsll.vi v0, v20, 16
+; ZVBB-NEXT:    vwaddu.wv v24, v24, v8
+; ZVBB-NEXT:    vwaddu.wv v0, v0, v12
+; ZVBB-NEXT:    vmv8r.v v8, v24
+; ZVBB-NEXT:    vmv8r.v v16, v0
+; ZVBB-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZIP-NEXT:    ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT:    ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT:    ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT:    ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT:    vmv8r.v v8, v24
+; ZIP-NEXT:    vmv8r.v v16, v0
+; ZIP-NEXT:    ret
+  %res = call <vscale x 64 x bfloat> @llvm.vector.interleave2.nxv64bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b)
+  ret <vscale x 64 x bfloat> %res
+}
+
+define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b) {
+; V-LABEL: vector_interleave_nxv64f16_nxv32f16:
+; V:       # %bb.0:
+; V-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; V-NEXT:    vmv8r.v v24, v8
+; V-NEXT:    vwaddu.vv v8, v24, v16
+; V-NEXT:    li a0, -1
+; V-NEXT:    vwaddu.vv v0, v28, v20
+; V-NEXT:    vwmaccu.vx v8, a0, v16
+; V-NEXT:    vwmaccu.vx v0, a0, v20
+; V-NEXT:    vmv8r.v v16, v0
+; V-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv64f16_nxv32f16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVBB-NEXT:    vwsll.vi v24, v16, 16
+; ZVBB-NEXT:    vwsll.vi v0, v20, 16
+; ZVBB-NEXT:    vwaddu.wv v24, v24, v8
+; ZVBB-NEXT:    vwaddu.wv v0, v0, v12
+; ZVBB-NEXT:    vmv8r.v v8, v24
+; ZVBB-NEXT:    vmv8r.v v16, v0
+; ZVBB-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv64f16_nxv32f16:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZIP-NEXT:    ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT:    ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT:    ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT:    ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT:    vmv8r.v v8, v24
+; ZIP-NEXT:    vmv8r.v v16, v0
+; ZIP-NEXT:    ret
+  %res = call <vscale x 64 x half> @llvm.vector.interleave2.nxv64f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b)
+  ret <vscale x 64 x half> %res
+}
+
+define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b) {
+; V-LABEL: vector_interleave_nxv32f32_nxv16f32:
+; V:       # %bb.0:
+; V-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; V-NEXT:    vmv8r.v v24, v8
+; V-NEXT:    vwaddu.vv v8, v24, v16
+; V-NEXT:    li a0, -1
+; V-NEXT:    vwaddu.vv v0, v28, v20
+; V-NEXT:    vwmaccu.vx v8, a0, v16
+; V-NEXT:    vwmaccu.vx v0, a0, v20
+; V-NEXT:    vmv8r.v v16, v0
+; V-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    li a0, 32
+; ZVBB-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; ZVBB-NEXT:    vwsll.vx v24, v16, a0
+; ZVBB-NEXT:    vwsll.vx v0, v20, a0
+; ZVBB-NEXT:    vwaddu.wv v24, v24, v8
+; ZVBB-NEXT:    vwaddu.wv v0, v0, v12
+; ZVBB-NEXT:    vmv8r.v v8, v24
+; ZVBB-NEXT:    vmv8r.v v16, v0
+; ZVBB-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv32f32_nxv16f32:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; ZIP-NEXT:    ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT:    ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT:    ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT:    ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT:    vmv8r.v v8, v24
+; ZIP-NEXT:    vmv8r.v v16, v0
+; ZIP-NEXT:    ret
+  %res = call <vscale x 32 x float> @llvm.vector.interleave2.nxv32f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b)
+  ret <vscale x 32 x float> %res
+}
+
+define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b) {
+; V-LABEL: vector_interleave_nxv16f64_nxv8f64:
+; V:       # %bb.0:
+; V-NEXT:    csrr a0, vlenb
+; V-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
+; V-NEXT:    vid.v v6
+; V-NEXT:    vmv8r.v v24, v8
+; V-NEXT:    srli a0, a0, 1
+; V-NEXT:    vmv4r.v v28, v16
+; V-NEXT:    vmv4r.v v16, v12
+; V-NEXT:    vand.vi v8, v6, 1
+; V-NEXT:    vmsne.vi v0, v8, 0
+; V-NEXT:    vsrl.vi v6, v6, 1
+; V-NEXT:    vadd.vx v6, v6, a0, v0.t
+; V-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; V-NEXT:    vrgatherei16.vv v8, v24, v6
+; V-NEXT:    vrgatherei16.vv v24, v16, v6
+; V-NEXT:    vmv.v.v v16, v24
+; V-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv16f64_nxv8f64:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
+; ZVBB-NEXT:    vid.v v6
+; ZVBB-NEXT:    vmv8r.v v24, v8
+; ZVBB-NEXT:    srli a0, a0, 1
+; ZVBB-NEXT:    vmv4r.v v28, v16
+; ZVBB-NEXT:    vmv4r.v v16, v12
+; ZVBB-NEXT:    vand.vi v8, v6, 1
+; ZVBB-NEXT:    vmsne.vi v0, v8, 0
+; ZVBB-NEXT:    vsrl.vi v6, v6, 1
+; ZVBB-NEXT:    vadd.vx v6, v6, a0, v0.t
+; ZVBB-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; ZVBB-NEXT:    vrgatherei16.vv v8, v24, v6
+; ZVBB-NEXT:    vrgatherei16.vv v24, v16, v6
+; ZVBB-NEXT:    vmv.v.v v16, v24
+; ZVBB-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv16f64_nxv8f64:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; ZIP-NEXT:    ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT:    ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT:    ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT:    ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT:    vmv8r.v v8, v24
+; ZIP-NEXT:    vmv8r.v v16, v0
+; ZIP-NEXT:    ret
+  %res = call <vscale x 16 x double> @llvm.vector.interleave2.nxv16f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b)
+  ret <vscale x 16 x double> %res
+}
+
+define <vscale x 6 x half> @vector_interleave_nxv6f16_nxv2f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2) nounwind {
+; CHECK-LABEL: vector_interleave_nxv6f16_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    srli a2, a1, 1
+; CHECK-NEXT:    vsetvli a3, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vsseg3e16.v v8, (a0)
+; CHECK-NEXT:    add a3, a0, a2
+; CHECK-NEXT:    vle16.v v9, (a3)
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    srli a1, a1, 2
+; CHECK-NEXT:    add a0, a1, a1
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v8, v9, a1
+; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v9, (a2)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv6f16_nxv2f16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a0, a0, 1
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    srli a2, a1, 1
+; ZVBB-NEXT:    vsetvli a3, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vsseg3e16.v v8, (a0)
+; ZVBB-NEXT:    add a3, a0, a2
+; ZVBB-NEXT:    vle16.v v9, (a3)
+; ZVBB-NEXT:    vle16.v v8, (a0)
+; ZVBB-NEXT:    srli a1, a1, 2
+; ZVBB-NEXT:    add a0, a1, a1
+; ZVBB-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v8, v9, a1
+; ZVBB-NEXT:    add a2, a3, a2
+; ZVBB-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vle16.v v9, (a2)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a0, a0, 1
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 6 x half> @llvm.vector.interleave3.nxv6f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2)
+  ret <vscale x 6 x half> %res
+}
+
+define <vscale x 12 x half> @vector_interleave_nxv12f16_nxv4f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2) nounwind {
+; CHECK-LABEL: vector_interleave_nxv12f16_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vsseg3e16.v v8, (a0)
+; CHECK-NEXT:    vl1re16.v v8, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl1re16.v v9, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl1re16.v v10, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv12f16_nxv4f16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 1
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
+; ZVBB-NEXT:    vsseg3e16.v v8, (a0)
+; ZVBB-NEXT:    vl1re16.v v8, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl1re16.v v9, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl1re16.v v10, (a0)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 1
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 12 x half> @llvm.vector.interleave3.nxv12f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2)
+  ret <vscale x 12 x half> %res
+}
+
+define <vscale x 24 x half> @vector_interleave_nxv24f16_nxv8f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2) nounwind {
+; CHECK-LABEL: vector_interleave_nxv24f16_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vsseg3e16.v v8, (a0)
+; CHECK-NEXT:    vl2re16.v v8, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl2re16.v v10, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl2re16.v v12, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv24f16_nxv8f16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    slli a1, a1, 1
+; ZVBB-NEXT:    vsetvli a2, zero, e16, m2, ta, ma
+; ZVBB-NEXT:    vsseg3e16.v v8, (a0)
+; ZVBB-NEXT:    vl2re16.v v8, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl2re16.v v10, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl2re16.v v12, (a0)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 24 x half> @llvm.vector.interleave3.nxv24f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2)
+  ret <vscale x 24 x half> %res
+}
+
+define <vscale x 6 x bfloat> @vector_interleave_nxv6bf16_nxv2bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2) nounwind {
+; CHECK-LABEL: vector_interleave_nxv6bf16_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    srli a2, a1, 1
+; CHECK-NEXT:    vsetvli a3, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vsseg3e16.v v8, (a0)
+; CHECK-NEXT:    add a3, a0, a2
+; CHECK-NEXT:    vle16.v v9, (a3)
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    srli a1, a1, 2
+; CHECK-NEXT:    add a0, a1, a1
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v8, v9, a1
+; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v9, (a2)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv6bf16_nxv2bf16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a0, a0, 1
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    srli a2, a1, 1
+; ZVBB-NEXT:    vsetvli a3, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vsseg3e16.v v8, (a0)
+; ZVBB-NEXT:    add a3, a0, a2
+; ZVBB-NEXT:    vle16.v v9, (a3)
+; ZVBB-NEXT:    vle16.v v8, (a0)
+; ZVBB-NEXT:    srli a1, a1, 2
+; ZVBB-NEXT:    add a0, a1, a1
+; ZVBB-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v8, v9, a1
+; ZVBB-NEXT:    add a2, a3, a2
+; ZVBB-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vle16.v v9, (a2)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a0, a0, 1
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 6 x bfloat> @llvm.vector.interleave3.nxv6bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2)
+  ret <vscale x 6 x bfloat> %res
+}
+
+define <vscale x 12 x bfloat> @vector_interleave_nxv12bf16_nxv4bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2) nounwind {
+; CHECK-LABEL: vector_interleave_nxv12bf16_nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vsseg3e16.v v8, (a0)
+; CHECK-NEXT:    vl1re16.v v8, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl1re16.v v9, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl1re16.v v10, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv12bf16_nxv4bf16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 1
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
+; ZVBB-NEXT:    vsseg3e16.v v8, (a0)
+; ZVBB-NEXT:    vl1re16.v v8, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl1re16.v v9, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl1re16.v v10, (a0)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 1
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 12 x bfloat> @llvm.vector.interleave3.nxv12bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2)
+  ret <vscale x 12 x bfloat> %res
+}
+
+define <vscale x 24 x bfloat> @vector_interleave_nxv24bf16_nxv8bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2) nounwind {
+; CHECK-LABEL: vector_interleave_nxv24bf16_nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vsseg3e16.v v8, (a0)
+; CHECK-NEXT:    vl2re16.v v8, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl2re16.v v10, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl2re16.v v12, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv24bf16_nxv8bf16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    slli a1, a1, 1
+; ZVBB-NEXT:    vsetvli a2, zero, e16, m2, ta, ma
+; ZVBB-NEXT:    vsseg3e16.v v8, (a0)
+; ZVBB-NEXT:    vl2re16.v v8, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl2re16.v v10, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl2re16.v v12, (a0)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 24 x bfloat> @llvm.vector.interleave3.nxv24bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2)
+  ret <vscale x 24 x bfloat> %res
+}
+
+define <vscale x 3 x float> @vector_interleave_nxv3f32_nxv1f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2) nounwind {
+; CHECK-LABEL: vector_interleave_nxv3f32_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    srli a2, a1, 1
+; CHECK-NEXT:    vsetvli a3, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vsseg3e32.v v8, (a0)
+; CHECK-NEXT:    add a3, a0, a2
+; CHECK-NEXT:    vle32.v v9, (a3)
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    srli a1, a1, 3
+; CHECK-NEXT:    add a0, a1, a1
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v8, v9, a1
+; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vle32.v v9, (a2)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv3f32_nxv1f32:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a0, a0, 1
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    srli a2, a1, 1
+; ZVBB-NEXT:    vsetvli a3, zero, e32, mf2, ta, ma
+; ZVBB-NEXT:    vsseg3e32.v v8, (a0)
+; ZVBB-NEXT:    add a3, a0, a2
+; ZVBB-NEXT:    vle32.v v9, (a3)
+; ZVBB-NEXT:    vle32.v v8, (a0)
+; ZVBB-NEXT:    srli a1, a1, 3
+; ZVBB-NEXT:    add a0, a1, a1
+; ZVBB-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v8, v9, a1
+; ZVBB-NEXT:    add a2, a3, a2
+; ZVBB-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; ZVBB-NEXT:    vle32.v v9, (a2)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a0, a0, 1
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 3 x float> @llvm.vector.interleave3.nxv3f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2)
+  ret <vscale x 3 x float> %res
+}
+
+define <vscale x 6 x float> @vector_interleave_nxv6f32_nxv2f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2) nounwind {
+; CHECK-LABEL: vector_interleave_nxv6f32_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vsseg3e32.v v8, (a0)
+; CHECK-NEXT:    vl1re32.v v8, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl1re32.v v9, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl1re32.v v10, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv6f32_nxv2f32:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 1
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
+; ZVBB-NEXT:    vsseg3e32.v v8, (a0)
+; ZVBB-NEXT:    vl1re32.v v8, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl1re32.v v9, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl1re32.v v10, (a0)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 1
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 6 x float> @llvm.vector.interleave3.nxv6f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2)
+  ret <vscale x 6 x float> %res
+}
+
+define <vscale x 12 x float> @vector_interleave_nxv12f32_nxv4f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2) nounwind {
+; CHECK-LABEL: vector_interleave_nxv12f32_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vsseg3e32.v v8, (a0)
+; CHECK-NEXT:    vl2re32.v v8, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl2re32.v v10, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl2re32.v v12, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv12f32_nxv4f32:
+; ZVBB:       # %bb.0:
 ; ZVBB-NEXT:    addi sp, sp, -16
 ; ZVBB-NEXT:    csrr a0, vlenb
 ; ZVBB-NEXT:    li a1, 6
@@ -1169,26 +4832,75 @@ define <vscale x 24 x i16> @vector_interleave_nxv24i16_nxv8i16(<vscale x 8 x i16
 ; ZVBB-NEXT:    addi a0, sp, 16
 ; ZVBB-NEXT:    csrr a1, vlenb
 ; ZVBB-NEXT:    slli a1, a1, 1
-; ZVBB-NEXT:    vsetvli a2, zero, e16, m2, ta, ma
-; ZVBB-NEXT:    vsseg3e16.v v8, (a0)
-; ZVBB-NEXT:    vl2re16.v v8, (a0)
+; ZVBB-NEXT:    vsetvli a2, zero, e32, m2, ta, ma
+; ZVBB-NEXT:    vsseg3e32.v v8, (a0)
+; ZVBB-NEXT:    vl2re32.v v8, (a0)
 ; ZVBB-NEXT:    add a0, a0, a1
-; ZVBB-NEXT:    vl2re16.v v10, (a0)
+; ZVBB-NEXT:    vl2re32.v v10, (a0)
 ; ZVBB-NEXT:    add a0, a0, a1
-; ZVBB-NEXT:    vl2re16.v v12, (a0)
+; ZVBB-NEXT:    vl2re32.v v12, (a0)
 ; ZVBB-NEXT:    csrr a0, vlenb
 ; ZVBB-NEXT:    li a1, 6
 ; ZVBB-NEXT:    mul a0, a0, a1
 ; ZVBB-NEXT:    add sp, sp, a0
 ; ZVBB-NEXT:    addi sp, sp, 16
 ; ZVBB-NEXT:    ret
-  %res = call <vscale x 24 x i16> @llvm.vector.interleave3.nxv24i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c)
-  ret <vscale x 24 x i16> %res
+  %res = call <vscale x 12 x float> @llvm.vector.interleave3.nxv12f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2)
+  ret <vscale x 12 x float> %res
 }
 
+define <vscale x 3 x double> @vector_interleave_nxv3f64_nxv1f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2) nounwind {
+; CHECK-LABEL: vector_interleave_nxv3f64_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vsseg3e64.v v8, (a0)
+; CHECK-NEXT:    vl1re64.v v8, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl1re64.v v9, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl1re64.v v10, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv3f64_nxv1f64:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 1
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
+; ZVBB-NEXT:    vsseg3e64.v v8, (a0)
+; ZVBB-NEXT:    vl1re64.v v8, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl1re64.v v9, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl1re64.v v10, (a0)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 1
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 3 x double> @llvm.vector.interleave3.nxv3f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2)
+  ret <vscale x 3 x double> %res
+}
 
-define <vscale x 12 x i32> @vector_interleave_nxv12i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) nounwind {
-; CHECK-LABEL: vector_interleave_nxv12i32_nxv4i32:
+define <vscale x 6 x double> @vector_interleave_nxv6f64_nxv2f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2) nounwind {
+; CHECK-LABEL: vector_interleave_nxv6f64_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
@@ -1198,13 +4910,13 @@ define <vscale x 12 x i32> @vector_interleave_nxv12i32_nxv4i32(<vscale x 4 x i32
 ; CHECK-NEXT:    addi a0, sp, 16
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vsseg3e32.v v8, (a0)
-; CHECK-NEXT:    vl2re32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a2, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vsseg3e64.v v8, (a0)
+; CHECK-NEXT:    vl2re64.v v8, (a0)
 ; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    vl2re32.v v10, (a0)
+; CHECK-NEXT:    vl2re64.v v10, (a0)
 ; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    vl2re32.v v12, (a0)
+; CHECK-NEXT:    vl2re64.v v12, (a0)
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    li a1, 6
 ; CHECK-NEXT:    mul a0, a0, a1
@@ -1212,246 +4924,637 @@ define <vscale x 12 x i32> @vector_interleave_nxv12i32_nxv4i32(<vscale x 4 x i32
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
 ;
-; ZVBB-LABEL: vector_interleave_nxv12i32_nxv4i32:
-; ZVBB:       # %bb.0:
-; ZVBB-NEXT:    addi sp, sp, -16
-; ZVBB-NEXT:    csrr a0, vlenb
-; ZVBB-NEXT:    li a1, 6
-; ZVBB-NEXT:    mul a0, a0, a1
-; ZVBB-NEXT:    sub sp, sp, a0
-; ZVBB-NEXT:    addi a0, sp, 16
-; ZVBB-NEXT:    csrr a1, vlenb
-; ZVBB-NEXT:    slli a1, a1, 1
-; ZVBB-NEXT:    vsetvli a2, zero, e32, m2, ta, ma
-; ZVBB-NEXT:    vsseg3e32.v v8, (a0)
-; ZVBB-NEXT:    vl2re32.v v8, (a0)
-; ZVBB-NEXT:    add a0, a0, a1
-; ZVBB-NEXT:    vl2re32.v v10, (a0)
-; ZVBB-NEXT:    add a0, a0, a1
-; ZVBB-NEXT:    vl2re32.v v12, (a0)
-; ZVBB-NEXT:    csrr a0, vlenb
-; ZVBB-NEXT:    li a1, 6
-; ZVBB-NEXT:    mul a0, a0, a1
-; ZVBB-NEXT:    add sp, sp, a0
-; ZVBB-NEXT:    addi sp, sp, 16
-; ZVBB-NEXT:    ret
-  %res = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
-  ret <vscale x 12 x i32> %res
+; ZVBB-LABEL: vector_interleave_nxv6f64_nxv2f64:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    slli a1, a1, 1
+; ZVBB-NEXT:    vsetvli a2, zero, e64, m2, ta, ma
+; ZVBB-NEXT:    vsseg3e64.v v8, (a0)
+; ZVBB-NEXT:    vl2re64.v v8, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl2re64.v v10, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl2re64.v v12, (a0)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    li a1, 6
+; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 6 x double> @llvm.vector.interleave3.nxv6f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2)
+  ret <vscale x 6 x double> %res
+}
+
+define <vscale x 10 x half> @vector_interleave_nxv10f16_nxv2f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2, <vscale x 2 x half> %v3, <vscale x 2 x half> %v4) nounwind {
+; CHECK-LABEL: vector_interleave_nxv10f16_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    srli a2, a1, 1
+; CHECK-NEXT:    add a3, a0, a2
+; CHECK-NEXT:    add a4, a3, a2
+; CHECK-NEXT:    vsetvli a5, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vsseg5e16.v v8, (a0)
+; CHECK-NEXT:    add a5, a4, a2
+; CHECK-NEXT:    vle16.v v8, (a5)
+; CHECK-NEXT:    vle16.v v9, (a4)
+; CHECK-NEXT:    srli a1, a1, 2
+; CHECK-NEXT:    add a4, a1, a1
+; CHECK-NEXT:    vle16.v v10, (a3)
+; CHECK-NEXT:    vsetvli zero, a4, e16, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v9, v8, a1
+; CHECK-NEXT:    vsetvli a3, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli zero, a4, e16, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v8, v10, a1
+; CHECK-NEXT:    add a2, a5, a2
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v10, (a2)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv10f16_nxv2f16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 1
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    srli a2, a1, 1
+; ZVBB-NEXT:    add a3, a0, a2
+; ZVBB-NEXT:    add a4, a3, a2
+; ZVBB-NEXT:    vsetvli a5, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vsseg5e16.v v8, (a0)
+; ZVBB-NEXT:    add a5, a4, a2
+; ZVBB-NEXT:    vle16.v v8, (a5)
+; ZVBB-NEXT:    vle16.v v9, (a4)
+; ZVBB-NEXT:    srli a1, a1, 2
+; ZVBB-NEXT:    add a4, a1, a1
+; ZVBB-NEXT:    vle16.v v10, (a3)
+; ZVBB-NEXT:    vsetvli zero, a4, e16, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v9, v8, a1
+; ZVBB-NEXT:    vsetvli a3, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vle16.v v8, (a0)
+; ZVBB-NEXT:    vsetvli zero, a4, e16, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v8, v10, a1
+; ZVBB-NEXT:    add a2, a5, a2
+; ZVBB-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vle16.v v10, (a2)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 1
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 10 x half> @llvm.vector.interleave5.nxv10f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2, <vscale x 2 x half> %v3, <vscale x 2 x half> %v4)
+  ret <vscale x 10 x half> %res
+}
+
+define <vscale x 20 x half> @vector_interleave_nxv20f16_nxv4f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2, <vscale x 4 x half> %v3, <vscale x 4 x half> %v4) nounwind {
+; CHECK-LABEL: vector_interleave_nxv20f16_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 2
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    add a2, a0, a1
+; CHECK-NEXT:    add a3, a2, a1
+; CHECK-NEXT:    vsetvli a4, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vsseg5e16.v v8, (a0)
+; CHECK-NEXT:    vl1re16.v v10, (a3)
+; CHECK-NEXT:    add a3, a3, a1
+; CHECK-NEXT:    vl1re16.v v11, (a3)
+; CHECK-NEXT:    vl1re16.v v8, (a0)
+; CHECK-NEXT:    vl1re16.v v9, (a2)
+; CHECK-NEXT:    add a1, a3, a1
+; CHECK-NEXT:    vl1re16.v v12, (a1)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 2
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv20f16_nxv4f16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 2
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    add a2, a0, a1
+; ZVBB-NEXT:    add a3, a2, a1
+; ZVBB-NEXT:    vsetvli a4, zero, e16, m1, ta, ma
+; ZVBB-NEXT:    vsseg5e16.v v8, (a0)
+; ZVBB-NEXT:    vl1re16.v v10, (a3)
+; ZVBB-NEXT:    add a3, a3, a1
+; ZVBB-NEXT:    vl1re16.v v11, (a3)
+; ZVBB-NEXT:    vl1re16.v v8, (a0)
+; ZVBB-NEXT:    vl1re16.v v9, (a2)
+; ZVBB-NEXT:    add a1, a3, a1
+; ZVBB-NEXT:    vl1re16.v v12, (a1)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 2
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 20 x half> @llvm.vector.interleave5.nxv20f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2, <vscale x 4 x half> %v3, <vscale x 4 x half> %v4)
+  ret <vscale x 20 x half> %res
+}
+
+define <vscale x 40 x half> @vector_interleave_nxv40f16_nxv8f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x half> %v4) nounwind {
+; RV32-LABEL: vector_interleave_nxv40f16_nxv8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -80
+; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi s0, sp, 80
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    li a1, 28
+; RV32-NEXT:    mul a0, a0, a1
+; RV32-NEXT:    sub sp, sp, a0
+; RV32-NEXT:    andi sp, sp, -64
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT:    vmv2r.v v20, v16
+; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    vmv2r.v v18, v12
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a2, a1, 2
+; RV32-NEXT:    add a1, a2, a1
+; RV32-NEXT:    add a1, sp, a1
+; RV32-NEXT:    addi a1, a1, 64
+; RV32-NEXT:    csrr a2, vlenb
+; RV32-NEXT:    vmv2r.v v16, v8
+; RV32-NEXT:    vmv2r.v v22, v16
+; RV32-NEXT:    vmv2r.v v24, v18
+; RV32-NEXT:    vmv1r.v v26, v20
+; RV32-NEXT:    add a3, a0, a2
+; RV32-NEXT:    vmv1r.v v23, v10
+; RV32-NEXT:    add a4, a1, a2
+; RV32-NEXT:    add a5, a4, a2
+; RV32-NEXT:    vmv1r.v v25, v14
+; RV32-NEXT:    add a6, a5, a2
+; RV32-NEXT:    vmv1r.v v18, v11
+; RV32-NEXT:    vsseg5e16.v v22, (a0)
+; RV32-NEXT:    vmv1r.v v20, v15
+; RV32-NEXT:    vsseg5e16.v v17, (a1)
+; RV32-NEXT:    vl1re16.v v16, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re16.v v17, (a6)
+; RV32-NEXT:    add a6, a3, a2
+; RV32-NEXT:    vl1re16.v v10, (a6)
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re16.v v11, (a6)
+; RV32-NEXT:    vl1re16.v v8, (a0)
+; RV32-NEXT:    vl1re16.v v9, (a3)
+; RV32-NEXT:    vl1re16.v v14, (a4)
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    li a3, 10
+; RV32-NEXT:    mul a0, a0, a3
+; RV32-NEXT:    add a0, sp, a0
+; RV32-NEXT:    addi a0, a0, 64
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    vl1re16.v v15, (a5)
+; RV32-NEXT:    vl1re16.v v12, (a6)
+; RV32-NEXT:    vl1re16.v v13, (a1)
+; RV32-NEXT:    slli a2, a2, 3
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    vs2r.v v16, (a2)
+; RV32-NEXT:    vs8r.v v8, (a0)
+; RV32-NEXT:    vl8re16.v v16, (a2)
+; RV32-NEXT:    vl8re16.v v8, (a0)
+; RV32-NEXT:    addi sp, s0, -80
+; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 80
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vector_interleave_nxv40f16_nxv8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -80
+; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi s0, sp, 80
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    li a1, 28
+; RV64-NEXT:    mul a0, a0, a1
+; RV64-NEXT:    sub sp, sp, a0
+; RV64-NEXT:    andi sp, sp, -64
+; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT:    vmv2r.v v20, v16
+; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    vmv2r.v v18, v12
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a2, a1, 2
+; RV64-NEXT:    add a1, a2, a1
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    addi a1, a1, 64
+; RV64-NEXT:    csrr a2, vlenb
+; RV64-NEXT:    vmv2r.v v16, v8
+; RV64-NEXT:    vmv2r.v v22, v16
+; RV64-NEXT:    vmv2r.v v24, v18
+; RV64-NEXT:    vmv1r.v v26, v20
+; RV64-NEXT:    add a3, a0, a2
+; RV64-NEXT:    vmv1r.v v23, v10
+; RV64-NEXT:    add a4, a1, a2
+; RV64-NEXT:    add a5, a4, a2
+; RV64-NEXT:    vmv1r.v v25, v14
+; RV64-NEXT:    add a6, a5, a2
+; RV64-NEXT:    vmv1r.v v18, v11
+; RV64-NEXT:    vsseg5e16.v v22, (a0)
+; RV64-NEXT:    vmv1r.v v20, v15
+; RV64-NEXT:    vsseg5e16.v v17, (a1)
+; RV64-NEXT:    vl1re16.v v16, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re16.v v17, (a6)
+; RV64-NEXT:    add a6, a3, a2
+; RV64-NEXT:    vl1re16.v v10, (a6)
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re16.v v11, (a6)
+; RV64-NEXT:    vl1re16.v v8, (a0)
+; RV64-NEXT:    vl1re16.v v9, (a3)
+; RV64-NEXT:    vl1re16.v v14, (a4)
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    li a3, 10
+; RV64-NEXT:    mul a0, a0, a3
+; RV64-NEXT:    add a0, sp, a0
+; RV64-NEXT:    addi a0, a0, 64
+; RV64-NEXT:    add a6, a6, a2
+; RV64-NEXT:    vl1re16.v v15, (a5)
+; RV64-NEXT:    vl1re16.v v12, (a6)
+; RV64-NEXT:    vl1re16.v v13, (a1)
+; RV64-NEXT:    slli a2, a2, 3
+; RV64-NEXT:    add a2, a0, a2
+; RV64-NEXT:    vs2r.v v16, (a2)
+; RV64-NEXT:    vs8r.v v8, (a0)
+; RV64-NEXT:    vl8re16.v v16, (a2)
+; RV64-NEXT:    vl8re16.v v8, (a0)
+; RV64-NEXT:    addi sp, s0, -80
+; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 80
+; RV64-NEXT:    ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv40f16_nxv8f16:
+; ZVBB-RV32:       # %bb.0:
+; ZVBB-RV32-NEXT:    addi sp, sp, -80
+; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT:    addi s0, sp, 80
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    li a1, 28
+; ZVBB-RV32-NEXT:    mul a0, a0, a1
+; ZVBB-RV32-NEXT:    sub sp, sp, a0
+; ZVBB-RV32-NEXT:    andi sp, sp, -64
+; ZVBB-RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-RV32-NEXT:    vmv2r.v v20, v16
+; ZVBB-RV32-NEXT:    addi a0, sp, 64
+; ZVBB-RV32-NEXT:    vmv2r.v v18, v12
+; ZVBB-RV32-NEXT:    csrr a1, vlenb
+; ZVBB-RV32-NEXT:    slli a2, a1, 2
+; ZVBB-RV32-NEXT:    add a1, a2, a1
+; ZVBB-RV32-NEXT:    add a1, sp, a1
+; ZVBB-RV32-NEXT:    addi a1, a1, 64
+; ZVBB-RV32-NEXT:    csrr a2, vlenb
+; ZVBB-RV32-NEXT:    vmv2r.v v16, v8
+; ZVBB-RV32-NEXT:    vmv2r.v v22, v16
+; ZVBB-RV32-NEXT:    vmv2r.v v24, v18
+; ZVBB-RV32-NEXT:    vmv1r.v v26, v20
+; ZVBB-RV32-NEXT:    add a3, a0, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v23, v10
+; ZVBB-RV32-NEXT:    add a4, a1, a2
+; ZVBB-RV32-NEXT:    add a5, a4, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v25, v14
+; ZVBB-RV32-NEXT:    add a6, a5, a2
+; ZVBB-RV32-NEXT:    vmv1r.v v18, v11
+; ZVBB-RV32-NEXT:    vsseg5e16.v v22, (a0)
+; ZVBB-RV32-NEXT:    vmv1r.v v20, v15
+; ZVBB-RV32-NEXT:    vsseg5e16.v v17, (a1)
+; ZVBB-RV32-NEXT:    vl1re16.v v16, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re16.v v17, (a6)
+; ZVBB-RV32-NEXT:    add a6, a3, a2
+; ZVBB-RV32-NEXT:    vl1re16.v v10, (a6)
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re16.v v11, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl1re16.v v9, (a3)
+; ZVBB-RV32-NEXT:    vl1re16.v v14, (a4)
+; ZVBB-RV32-NEXT:    csrr a0, vlenb
+; ZVBB-RV32-NEXT:    li a3, 10
+; ZVBB-RV32-NEXT:    mul a0, a0, a3
+; ZVBB-RV32-NEXT:    add a0, sp, a0
+; ZVBB-RV32-NEXT:    addi a0, a0, 64
+; ZVBB-RV32-NEXT:    add a6, a6, a2
+; ZVBB-RV32-NEXT:    vl1re16.v v15, (a5)
+; ZVBB-RV32-NEXT:    vl1re16.v v12, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v13, (a1)
+; ZVBB-RV32-NEXT:    slli a2, a2, 3
+; ZVBB-RV32-NEXT:    add a2, a0, a2
+; ZVBB-RV32-NEXT:    vs2r.v v16, (a2)
+; ZVBB-RV32-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl8re16.v v16, (a2)
+; ZVBB-RV32-NEXT:    vl8re16.v v8, (a0)
+; ZVBB-RV32-NEXT:    addi sp, s0, -80
+; ZVBB-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT:    addi sp, sp, 80
+; ZVBB-RV32-NEXT:    ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv40f16_nxv8f16:
+; ZVBB-RV64:       # %bb.0:
+; ZVBB-RV64-NEXT:    addi sp, sp, -80
+; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT:    addi s0, sp, 80
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    li a1, 28
+; ZVBB-RV64-NEXT:    mul a0, a0, a1
+; ZVBB-RV64-NEXT:    sub sp, sp, a0
+; ZVBB-RV64-NEXT:    andi sp, sp, -64
+; ZVBB-RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-RV64-NEXT:    vmv2r.v v20, v16
+; ZVBB-RV64-NEXT:    addi a0, sp, 64
+; ZVBB-RV64-NEXT:    vmv2r.v v18, v12
+; ZVBB-RV64-NEXT:    csrr a1, vlenb
+; ZVBB-RV64-NEXT:    slli a2, a1, 2
+; ZVBB-RV64-NEXT:    add a1, a2, a1
+; ZVBB-RV64-NEXT:    add a1, sp, a1
+; ZVBB-RV64-NEXT:    addi a1, a1, 64
+; ZVBB-RV64-NEXT:    csrr a2, vlenb
+; ZVBB-RV64-NEXT:    vmv2r.v v16, v8
+; ZVBB-RV64-NEXT:    vmv2r.v v22, v16
+; ZVBB-RV64-NEXT:    vmv2r.v v24, v18
+; ZVBB-RV64-NEXT:    vmv1r.v v26, v20
+; ZVBB-RV64-NEXT:    add a3, a0, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v23, v10
+; ZVBB-RV64-NEXT:    add a4, a1, a2
+; ZVBB-RV64-NEXT:    add a5, a4, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v25, v14
+; ZVBB-RV64-NEXT:    add a6, a5, a2
+; ZVBB-RV64-NEXT:    vmv1r.v v18, v11
+; ZVBB-RV64-NEXT:    vsseg5e16.v v22, (a0)
+; ZVBB-RV64-NEXT:    vmv1r.v v20, v15
+; ZVBB-RV64-NEXT:    vsseg5e16.v v17, (a1)
+; ZVBB-RV64-NEXT:    vl1re16.v v16, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re16.v v17, (a6)
+; ZVBB-RV64-NEXT:    add a6, a3, a2
+; ZVBB-RV64-NEXT:    vl1re16.v v10, (a6)
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re16.v v11, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl1re16.v v9, (a3)
+; ZVBB-RV64-NEXT:    vl1re16.v v14, (a4)
+; ZVBB-RV64-NEXT:    csrr a0, vlenb
+; ZVBB-RV64-NEXT:    li a3, 10
+; ZVBB-RV64-NEXT:    mul a0, a0, a3
+; ZVBB-RV64-NEXT:    add a0, sp, a0
+; ZVBB-RV64-NEXT:    addi a0, a0, 64
+; ZVBB-RV64-NEXT:    add a6, a6, a2
+; ZVBB-RV64-NEXT:    vl1re16.v v15, (a5)
+; ZVBB-RV64-NEXT:    vl1re16.v v12, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v13, (a1)
+; ZVBB-RV64-NEXT:    slli a2, a2, 3
+; ZVBB-RV64-NEXT:    add a2, a0, a2
+; ZVBB-RV64-NEXT:    vs2r.v v16, (a2)
+; ZVBB-RV64-NEXT:    vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl8re16.v v16, (a2)
+; ZVBB-RV64-NEXT:    vl8re16.v v8, (a0)
+; ZVBB-RV64-NEXT:    addi sp, s0, -80
+; ZVBB-RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT:    addi sp, sp, 80
+; ZVBB-RV64-NEXT:    ret
+;
+; ZIP-LABEL: vector_interleave_nxv40f16_nxv8f16:
+; ZIP:       # %bb.0:
+; ZIP-NEXT:    addi sp, sp, -80
+; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT:    addi s0, sp, 80
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    li a1, 28
+; ZIP-NEXT:    mul a0, a0, a1
+; ZIP-NEXT:    sub sp, sp, a0
+; ZIP-NEXT:    andi sp, sp, -64
+; ZIP-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZIP-NEXT:    vmv2r.v v20, v16
+; ZIP-NEXT:    addi a0, sp, 64
+; ZIP-NEXT:    vmv2r.v v18, v12
+; ZIP-NEXT:    csrr a1, vlenb
+; ZIP-NEXT:    slli a2, a1, 2
+; ZIP-NEXT:    add a1, a2, a1
+; ZIP-NEXT:    add a1, sp, a1
+; ZIP-NEXT:    addi a1, a1, 64
+; ZIP-NEXT:    csrr a2, vlenb
+; ZIP-NEXT:    vmv2r.v v16, v8
+; ZIP-NEXT:    vmv2r.v v22, v16
+; ZIP-NEXT:    vmv2r.v v24, v18
+; ZIP-NEXT:    vmv1r.v v26, v20
+; ZIP-NEXT:    add a3, a0, a2
+; ZIP-NEXT:    vmv1r.v v23, v10
+; ZIP-NEXT:    add a4, a1, a2
+; ZIP-NEXT:    add a5, a4, a2
+; ZIP-NEXT:    vmv1r.v v25, v14
+; ZIP-NEXT:    add a6, a5, a2
+; ZIP-NEXT:    vmv1r.v v18, v11
+; ZIP-NEXT:    vsseg5e16.v v22, (a0)
+; ZIP-NEXT:    vmv1r.v v20, v15
+; ZIP-NEXT:    vsseg5e16.v v17, (a1)
+; ZIP-NEXT:    vl1re16.v v16, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re16.v v17, (a6)
+; ZIP-NEXT:    add a6, a3, a2
+; ZIP-NEXT:    vl1re16.v v10, (a6)
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re16.v v11, (a6)
+; ZIP-NEXT:    vl1re16.v v8, (a0)
+; ZIP-NEXT:    vl1re16.v v9, (a3)
+; ZIP-NEXT:    vl1re16.v v14, (a4)
+; ZIP-NEXT:    csrr a0, vlenb
+; ZIP-NEXT:    li a3, 10
+; ZIP-NEXT:    mul a0, a0, a3
+; ZIP-NEXT:    add a0, sp, a0
+; ZIP-NEXT:    addi a0, a0, 64
+; ZIP-NEXT:    add a6, a6, a2
+; ZIP-NEXT:    vl1re16.v v15, (a5)
+; ZIP-NEXT:    vl1re16.v v12, (a6)
+; ZIP-NEXT:    vl1re16.v v13, (a1)
+; ZIP-NEXT:    slli a2, a2, 3
+; ZIP-NEXT:    add a2, a0, a2
+; ZIP-NEXT:    vs2r.v v16, (a2)
+; ZIP-NEXT:    vs8r.v v8, (a0)
+; ZIP-NEXT:    vl8re16.v v16, (a2)
+; ZIP-NEXT:    vl8re16.v v8, (a0)
+; ZIP-NEXT:    addi sp, s0, -80
+; ZIP-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT:    addi sp, sp, 80
+; ZIP-NEXT:    ret
+  %res = call <vscale x 40 x half> @llvm.vector.interleave5.nxv40f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x half> %v4)
+  ret <vscale x 40 x half> %res
 }
 
-
-define <vscale x 6 x i64> @vector_interleave_nxv6i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) nounwind {
-; CHECK-LABEL: vector_interleave_nxv6i64_nxv2i64:
+define <vscale x 10 x bfloat> @vector_interleave_nxv10bf16_nxv2bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2, <vscale x 2 x bfloat> %v3, <vscale x 2 x bfloat> %v4) nounwind {
+; CHECK-LABEL: vector_interleave_nxv10bf16_nxv2bf16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 6
-; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    sub sp, sp, a0
 ; CHECK-NEXT:    addi a0, sp, 16
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 1
-; CHECK-NEXT:    vsetvli a2, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vsseg3e64.v v8, (a0)
-; CHECK-NEXT:    vl2re64.v v8, (a0)
-; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    vl2re64.v v10, (a0)
-; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    vl2re64.v v12, (a0)
+; CHECK-NEXT:    srli a2, a1, 1
+; CHECK-NEXT:    add a3, a0, a2
+; CHECK-NEXT:    add a4, a3, a2
+; CHECK-NEXT:    vsetvli a5, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vsseg5e16.v v8, (a0)
+; CHECK-NEXT:    add a5, a4, a2
+; CHECK-NEXT:    vle16.v v8, (a5)
+; CHECK-NEXT:    vle16.v v9, (a4)
+; CHECK-NEXT:    srli a1, a1, 2
+; CHECK-NEXT:    add a4, a1, a1
+; CHECK-NEXT:    vle16.v v10, (a3)
+; CHECK-NEXT:    vsetvli zero, a4, e16, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v9, v8, a1
+; CHECK-NEXT:    vsetvli a3, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli zero, a4, e16, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v8, v10, a1
+; CHECK-NEXT:    add a2, a5, a2
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v10, (a2)
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 6
-; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
 ;
-; ZVBB-LABEL: vector_interleave_nxv6i64_nxv2i64:
+; ZVBB-LABEL: vector_interleave_nxv10bf16_nxv2bf16:
 ; ZVBB:       # %bb.0:
 ; ZVBB-NEXT:    addi sp, sp, -16
 ; ZVBB-NEXT:    csrr a0, vlenb
-; ZVBB-NEXT:    li a1, 6
-; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    slli a1, a0, 1
+; ZVBB-NEXT:    add a0, a1, a0
 ; ZVBB-NEXT:    sub sp, sp, a0
 ; ZVBB-NEXT:    addi a0, sp, 16
 ; ZVBB-NEXT:    csrr a1, vlenb
-; ZVBB-NEXT:    slli a1, a1, 1
-; ZVBB-NEXT:    vsetvli a2, zero, e64, m2, ta, ma
-; ZVBB-NEXT:    vsseg3e64.v v8, (a0)
-; ZVBB-NEXT:    vl2re64.v v8, (a0)
-; ZVBB-NEXT:    add a0, a0, a1
-; ZVBB-NEXT:    vl2re64.v v10, (a0)
-; ZVBB-NEXT:    add a0, a0, a1
-; ZVBB-NEXT:    vl2re64.v v12, (a0)
+; ZVBB-NEXT:    srli a2, a1, 1
+; ZVBB-NEXT:    add a3, a0, a2
+; ZVBB-NEXT:    add a4, a3, a2
+; ZVBB-NEXT:    vsetvli a5, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vsseg5e16.v v8, (a0)
+; ZVBB-NEXT:    add a5, a4, a2
+; ZVBB-NEXT:    vle16.v v8, (a5)
+; ZVBB-NEXT:    vle16.v v9, (a4)
+; ZVBB-NEXT:    srli a1, a1, 2
+; ZVBB-NEXT:    add a4, a1, a1
+; ZVBB-NEXT:    vle16.v v10, (a3)
+; ZVBB-NEXT:    vsetvli zero, a4, e16, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v9, v8, a1
+; ZVBB-NEXT:    vsetvli a3, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vle16.v v8, (a0)
+; ZVBB-NEXT:    vsetvli zero, a4, e16, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v8, v10, a1
+; ZVBB-NEXT:    add a2, a5, a2
+; ZVBB-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vle16.v v10, (a2)
 ; ZVBB-NEXT:    csrr a0, vlenb
-; ZVBB-NEXT:    li a1, 6
-; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    slli a1, a0, 1
+; ZVBB-NEXT:    add a0, a1, a0
 ; ZVBB-NEXT:    add sp, sp, a0
 ; ZVBB-NEXT:    addi sp, sp, 16
 ; ZVBB-NEXT:    ret
-  %res = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
-  ret <vscale x 6 x i64> %res
+  %res = call <vscale x 10 x bfloat> @llvm.vector.interleave5.nxv10bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2, <vscale x 2 x bfloat> %v3, <vscale x 2 x bfloat> %v4)
+  ret <vscale x 10 x bfloat> %res
 }
 
-define <vscale x 80 x i1> @vector_interleave_nxv80i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e) nounwind {
-; CHECK-LABEL: vector_interleave_nxv80i1_nxv16i1:
+define <vscale x 20 x bfloat> @vector_interleave_nxv20bf16_nxv4bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2, <vscale x 4 x bfloat> %v3, <vscale x 4 x bfloat> %v4) nounwind {
+; CHECK-LABEL: vector_interleave_nxv20bf16_nxv4bf16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 10
-; CHECK-NEXT:    mul a0, a0, a1
-; CHECK-NEXT:    sub sp, sp, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmv.v.i v12, 0
-; CHECK-NEXT:    addi a4, sp, 16
-; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a1, a0, 2
 ; CHECK-NEXT:    add a0, a1, a0
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    vmerge.vim v14, v12, 1, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmerge.vim v18, v12, 1, v0
-; CHECK-NEXT:    add a2, a4, a1
-; CHECK-NEXT:    srli a3, a1, 2
-; CHECK-NEXT:    vmv2r.v v20, v14
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v16, v12, 1, v0
-; CHECK-NEXT:    vmv1r.v v21, v18
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmerge.vim v8, v12, 1, v0
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v16, v19
-; CHECK-NEXT:    add a5, a2, a1
-; CHECK-NEXT:    vmv1r.v v23, v8
-; CHECK-NEXT:    vmv1r.v v18, v9
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    vmerge.vim v24, v12, 1, v0
-; CHECK-NEXT:    vsetvli a6, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vsseg5e8.v v20, (a4)
-; CHECK-NEXT:    vmv1r.v v19, v25
-; CHECK-NEXT:    vsseg5e8.v v15, (a0)
-; CHECK-NEXT:    vl1r.v v8, (a5)
-; CHECK-NEXT:    add a5, a5, a1
-; CHECK-NEXT:    vl1r.v v10, (a4)
-; CHECK-NEXT:    add a4, a5, a1
-; CHECK-NEXT:    vl1r.v v12, (a4)
-; CHECK-NEXT:    add a4, a0, a1
-; CHECK-NEXT:    vl1r.v v14, (a4)
-; CHECK-NEXT:    add a4, a4, a1
-; CHECK-NEXT:    vl1r.v v9, (a5)
-; CHECK-NEXT:    add a5, a4, a1
-; CHECK-NEXT:    vl1r.v v16, (a5)
-; CHECK-NEXT:    add a5, a5, a1
-; CHECK-NEXT:    srli a1, a1, 1
-; CHECK-NEXT:    vl1r.v v11, (a2)
-; CHECK-NEXT:    add a2, a3, a3
-; CHECK-NEXT:    vl1r.v v15, (a4)
-; CHECK-NEXT:    add a4, a1, a1
-; CHECK-NEXT:    vl1r.v v13, (a0)
-; CHECK-NEXT:    vl1r.v v17, (a5)
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmsne.vi v18, v8, 0
-; CHECK-NEXT:    vmsne.vi v0, v10, 0
-; CHECK-NEXT:    vmsne.vi v8, v14, 0
-; CHECK-NEXT:    vmsne.vi v9, v12, 0
-; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vx v0, v18, a3
-; CHECK-NEXT:    vslideup.vx v9, v8, a3
-; CHECK-NEXT:    vsetvli zero, a4, e8, m1, ta, ma
-; CHECK-NEXT:    vslideup.vx v0, v9, a1
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmsne.vi v8, v16, 0
+; CHECK-NEXT:    add a2, a0, a1
+; CHECK-NEXT:    add a3, a2, a1
+; CHECK-NEXT:    vsetvli a4, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vsseg5e16.v v8, (a0)
+; CHECK-NEXT:    vl1re16.v v10, (a3)
+; CHECK-NEXT:    add a3, a3, a1
+; CHECK-NEXT:    vl1re16.v v11, (a3)
+; CHECK-NEXT:    vl1re16.v v8, (a0)
+; CHECK-NEXT:    vl1re16.v v9, (a2)
+; CHECK-NEXT:    add a1, a3, a1
+; CHECK-NEXT:    vl1re16.v v12, (a1)
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 10
-; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    slli a1, a0, 2
+; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
 ;
-; ZVBB-LABEL: vector_interleave_nxv80i1_nxv16i1:
+; ZVBB-LABEL: vector_interleave_nxv20bf16_nxv4bf16:
 ; ZVBB:       # %bb.0:
 ; ZVBB-NEXT:    addi sp, sp, -16
 ; ZVBB-NEXT:    csrr a0, vlenb
-; ZVBB-NEXT:    li a1, 10
-; ZVBB-NEXT:    mul a0, a0, a1
-; ZVBB-NEXT:    sub sp, sp, a0
-; ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; ZVBB-NEXT:    vmv.v.i v12, 0
-; ZVBB-NEXT:    addi a4, sp, 16
-; ZVBB-NEXT:    csrr a0, vlenb
 ; ZVBB-NEXT:    slli a1, a0, 2
 ; ZVBB-NEXT:    add a0, a1, a0
-; ZVBB-NEXT:    add a0, sp, a0
-; ZVBB-NEXT:    addi a0, a0, 16
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
 ; ZVBB-NEXT:    csrr a1, vlenb
-; ZVBB-NEXT:    vmerge.vim v14, v12, 1, v0
-; ZVBB-NEXT:    vmv1r.v v0, v8
-; ZVBB-NEXT:    vmerge.vim v18, v12, 1, v0
-; ZVBB-NEXT:    add a2, a4, a1
-; ZVBB-NEXT:    srli a3, a1, 2
-; ZVBB-NEXT:    vmv2r.v v20, v14
-; ZVBB-NEXT:    vmv1r.v v0, v9
-; ZVBB-NEXT:    vmerge.vim v16, v12, 1, v0
-; ZVBB-NEXT:    vmv1r.v v21, v18
-; ZVBB-NEXT:    vmv1r.v v0, v10
-; ZVBB-NEXT:    vmerge.vim v8, v12, 1, v0
-; ZVBB-NEXT:    vmv1r.v v22, v16
-; ZVBB-NEXT:    vmv1r.v v16, v19
-; ZVBB-NEXT:    add a5, a2, a1
-; ZVBB-NEXT:    vmv1r.v v23, v8
-; ZVBB-NEXT:    vmv1r.v v18, v9
-; ZVBB-NEXT:    vmv1r.v v0, v11
-; ZVBB-NEXT:    vmerge.vim v24, v12, 1, v0
-; ZVBB-NEXT:    vsetvli a6, zero, e8, m1, ta, ma
-; ZVBB-NEXT:    vsseg5e8.v v20, (a4)
-; ZVBB-NEXT:    vmv1r.v v19, v25
-; ZVBB-NEXT:    vsseg5e8.v v15, (a0)
-; ZVBB-NEXT:    vl1r.v v8, (a5)
-; ZVBB-NEXT:    add a5, a5, a1
-; ZVBB-NEXT:    vl1r.v v10, (a4)
-; ZVBB-NEXT:    add a4, a5, a1
-; ZVBB-NEXT:    vl1r.v v12, (a4)
-; ZVBB-NEXT:    add a4, a0, a1
-; ZVBB-NEXT:    vl1r.v v14, (a4)
-; ZVBB-NEXT:    add a4, a4, a1
-; ZVBB-NEXT:    vl1r.v v9, (a5)
-; ZVBB-NEXT:    add a5, a4, a1
-; ZVBB-NEXT:    vl1r.v v16, (a5)
-; ZVBB-NEXT:    add a5, a5, a1
-; ZVBB-NEXT:    srli a1, a1, 1
-; ZVBB-NEXT:    vl1r.v v11, (a2)
-; ZVBB-NEXT:    add a2, a3, a3
-; ZVBB-NEXT:    vl1r.v v15, (a4)
-; ZVBB-NEXT:    add a4, a1, a1
-; ZVBB-NEXT:    vl1r.v v13, (a0)
-; ZVBB-NEXT:    vl1r.v v17, (a5)
-; ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; ZVBB-NEXT:    vmsne.vi v18, v8, 0
-; ZVBB-NEXT:    vmsne.vi v0, v10, 0
-; ZVBB-NEXT:    vmsne.vi v8, v14, 0
-; ZVBB-NEXT:    vmsne.vi v9, v12, 0
-; ZVBB-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
-; ZVBB-NEXT:    vslideup.vx v0, v18, a3
-; ZVBB-NEXT:    vslideup.vx v9, v8, a3
-; ZVBB-NEXT:    vsetvli zero, a4, e8, m1, ta, ma
-; ZVBB-NEXT:    vslideup.vx v0, v9, a1
-; ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; ZVBB-NEXT:    vmsne.vi v8, v16, 0
+; ZVBB-NEXT:    add a2, a0, a1
+; ZVBB-NEXT:    add a3, a2, a1
+; ZVBB-NEXT:    vsetvli a4, zero, e16, m1, ta, ma
+; ZVBB-NEXT:    vsseg5e16.v v8, (a0)
+; ZVBB-NEXT:    vl1re16.v v10, (a3)
+; ZVBB-NEXT:    add a3, a3, a1
+; ZVBB-NEXT:    vl1re16.v v11, (a3)
+; ZVBB-NEXT:    vl1re16.v v8, (a0)
+; ZVBB-NEXT:    vl1re16.v v9, (a2)
+; ZVBB-NEXT:    add a1, a3, a1
+; ZVBB-NEXT:    vl1re16.v v12, (a1)
 ; ZVBB-NEXT:    csrr a0, vlenb
-; ZVBB-NEXT:    li a1, 10
-; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    slli a1, a0, 2
+; ZVBB-NEXT:    add a0, a1, a0
 ; ZVBB-NEXT:    add sp, sp, a0
 ; ZVBB-NEXT:    addi sp, sp, 16
 ; ZVBB-NEXT:    ret
-  %res = call <vscale x 80 x i1> @llvm.vector.interleave5.nxv80i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e)
-  ret <vscale x 80 x i1> %res
+  %res = call <vscale x 20 x bfloat> @llvm.vector.interleave5.nxv20bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2, <vscale x 4 x bfloat> %v3, <vscale x 4 x bfloat> %v4)
+  ret <vscale x 20 x bfloat> %res
 }
 
-
-define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e) nounwind {
-;
-; RV32-LABEL: vector_interleave_nxv80i8_nxv16i8:
+define <vscale x 40 x bfloat> @vector_interleave_nxv40bf16_nxv8bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x bfloat> %v4) nounwind {
+; RV32-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -80
 ; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
@@ -1462,7 +5565,7 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    sub sp, sp, a0
 ; RV32-NEXT:    andi sp, sp, -64
-; RV32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV32-NEXT:    vmv2r.v v20, v16
 ; RV32-NEXT:    addi a0, sp, 64
 ; RV32-NEXT:    vmv2r.v v18, v12
@@ -1483,41 +5586,41 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
 ; RV32-NEXT:    vmv1r.v v25, v14
 ; RV32-NEXT:    add a6, a5, a2
 ; RV32-NEXT:    vmv1r.v v18, v11
-; RV32-NEXT:    vsseg5e8.v v22, (a0)
+; RV32-NEXT:    vsseg5e16.v v22, (a0)
 ; RV32-NEXT:    vmv1r.v v20, v15
-; RV32-NEXT:    vsseg5e8.v v17, (a1)
-; RV32-NEXT:    vl1r.v v16, (a6)
+; RV32-NEXT:    vsseg5e16.v v17, (a1)
+; RV32-NEXT:    vl1re16.v v16, (a6)
 ; RV32-NEXT:    add a6, a6, a2
-; RV32-NEXT:    vl1r.v v17, (a6)
+; RV32-NEXT:    vl1re16.v v17, (a6)
 ; RV32-NEXT:    add a6, a3, a2
-; RV32-NEXT:    vl1r.v v10, (a6)
+; RV32-NEXT:    vl1re16.v v10, (a6)
 ; RV32-NEXT:    add a6, a6, a2
-; RV32-NEXT:    vl1r.v v11, (a6)
-; RV32-NEXT:    vl1r.v v8, (a0)
-; RV32-NEXT:    vl1r.v v9, (a3)
-; RV32-NEXT:    vl1r.v v14, (a4)
+; RV32-NEXT:    vl1re16.v v11, (a6)
+; RV32-NEXT:    vl1re16.v v8, (a0)
+; RV32-NEXT:    vl1re16.v v9, (a3)
+; RV32-NEXT:    vl1re16.v v14, (a4)
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    li a3, 10
 ; RV32-NEXT:    mul a0, a0, a3
 ; RV32-NEXT:    add a0, sp, a0
 ; RV32-NEXT:    addi a0, a0, 64
 ; RV32-NEXT:    add a6, a6, a2
-; RV32-NEXT:    vl1r.v v15, (a5)
-; RV32-NEXT:    vl1r.v v12, (a6)
-; RV32-NEXT:    vl1r.v v13, (a1)
+; RV32-NEXT:    vl1re16.v v15, (a5)
+; RV32-NEXT:    vl1re16.v v12, (a6)
+; RV32-NEXT:    vl1re16.v v13, (a1)
 ; RV32-NEXT:    slli a2, a2, 3
 ; RV32-NEXT:    add a2, a0, a2
 ; RV32-NEXT:    vs2r.v v16, (a2)
 ; RV32-NEXT:    vs8r.v v8, (a0)
-; RV32-NEXT:    vl8r.v v16, (a2)
-; RV32-NEXT:    vl8r.v v8, (a0)
+; RV32-NEXT:    vl8re16.v v16, (a2)
+; RV32-NEXT:    vl8re16.v v8, (a0)
 ; RV32-NEXT:    addi sp, s0, -80
 ; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 80
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; RV64-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -80
 ; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -1528,7 +5631,7 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
 ; RV64-NEXT:    mul a0, a0, a1
 ; RV64-NEXT:    sub sp, sp, a0
 ; RV64-NEXT:    andi sp, sp, -64
-; RV64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV64-NEXT:    vmv2r.v v20, v16
 ; RV64-NEXT:    addi a0, sp, 64
 ; RV64-NEXT:    vmv2r.v v18, v12
@@ -1549,41 +5652,41 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
 ; RV64-NEXT:    vmv1r.v v25, v14
 ; RV64-NEXT:    add a6, a5, a2
 ; RV64-NEXT:    vmv1r.v v18, v11
-; RV64-NEXT:    vsseg5e8.v v22, (a0)
+; RV64-NEXT:    vsseg5e16.v v22, (a0)
 ; RV64-NEXT:    vmv1r.v v20, v15
-; RV64-NEXT:    vsseg5e8.v v17, (a1)
-; RV64-NEXT:    vl1r.v v16, (a6)
+; RV64-NEXT:    vsseg5e16.v v17, (a1)
+; RV64-NEXT:    vl1re16.v v16, (a6)
 ; RV64-NEXT:    add a6, a6, a2
-; RV64-NEXT:    vl1r.v v17, (a6)
+; RV64-NEXT:    vl1re16.v v17, (a6)
 ; RV64-NEXT:    add a6, a3, a2
-; RV64-NEXT:    vl1r.v v10, (a6)
+; RV64-NEXT:    vl1re16.v v10, (a6)
 ; RV64-NEXT:    add a6, a6, a2
-; RV64-NEXT:    vl1r.v v11, (a6)
-; RV64-NEXT:    vl1r.v v8, (a0)
-; RV64-NEXT:    vl1r.v v9, (a3)
-; RV64-NEXT:    vl1r.v v14, (a4)
+; RV64-NEXT:    vl1re16.v v11, (a6)
+; RV64-NEXT:    vl1re16.v v8, (a0)
+; RV64-NEXT:    vl1re16.v v9, (a3)
+; RV64-NEXT:    vl1re16.v v14, (a4)
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    li a3, 10
 ; RV64-NEXT:    mul a0, a0, a3
 ; RV64-NEXT:    add a0, sp, a0
 ; RV64-NEXT:    addi a0, a0, 64
 ; RV64-NEXT:    add a6, a6, a2
-; RV64-NEXT:    vl1r.v v15, (a5)
-; RV64-NEXT:    vl1r.v v12, (a6)
-; RV64-NEXT:    vl1r.v v13, (a1)
+; RV64-NEXT:    vl1re16.v v15, (a5)
+; RV64-NEXT:    vl1re16.v v12, (a6)
+; RV64-NEXT:    vl1re16.v v13, (a1)
 ; RV64-NEXT:    slli a2, a2, 3
 ; RV64-NEXT:    add a2, a0, a2
 ; RV64-NEXT:    vs2r.v v16, (a2)
 ; RV64-NEXT:    vs8r.v v8, (a0)
-; RV64-NEXT:    vl8r.v v16, (a2)
-; RV64-NEXT:    vl8r.v v8, (a0)
+; RV64-NEXT:    vl8re16.v v16, (a2)
+; RV64-NEXT:    vl8re16.v v8, (a0)
 ; RV64-NEXT:    addi sp, s0, -80
 ; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 80
 ; RV64-NEXT:    ret
 ;
-; ZVBB-RV32-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; ZVBB-RV32-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
 ; ZVBB-RV32:       # %bb.0:
 ; ZVBB-RV32-NEXT:    addi sp, sp, -80
 ; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
@@ -1594,7 +5697,7 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
 ; ZVBB-RV32-NEXT:    mul a0, a0, a1
 ; ZVBB-RV32-NEXT:    sub sp, sp, a0
 ; ZVBB-RV32-NEXT:    andi sp, sp, -64
-; ZVBB-RV32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVBB-RV32-NEXT:    vmv2r.v v20, v16
 ; ZVBB-RV32-NEXT:    addi a0, sp, 64
 ; ZVBB-RV32-NEXT:    vmv2r.v v18, v12
@@ -1615,41 +5718,41 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
 ; ZVBB-RV32-NEXT:    vmv1r.v v25, v14
 ; ZVBB-RV32-NEXT:    add a6, a5, a2
 ; ZVBB-RV32-NEXT:    vmv1r.v v18, v11
-; ZVBB-RV32-NEXT:    vsseg5e8.v v22, (a0)
+; ZVBB-RV32-NEXT:    vsseg5e16.v v22, (a0)
 ; ZVBB-RV32-NEXT:    vmv1r.v v20, v15
-; ZVBB-RV32-NEXT:    vsseg5e8.v v17, (a1)
-; ZVBB-RV32-NEXT:    vl1r.v v16, (a6)
+; ZVBB-RV32-NEXT:    vsseg5e16.v v17, (a1)
+; ZVBB-RV32-NEXT:    vl1re16.v v16, (a6)
 ; ZVBB-RV32-NEXT:    add a6, a6, a2
-; ZVBB-RV32-NEXT:    vl1r.v v17, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v17, (a6)
 ; ZVBB-RV32-NEXT:    add a6, a3, a2
-; ZVBB-RV32-NEXT:    vl1r.v v10, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v10, (a6)
 ; ZVBB-RV32-NEXT:    add a6, a6, a2
-; ZVBB-RV32-NEXT:    vl1r.v v11, (a6)
-; ZVBB-RV32-NEXT:    vl1r.v v8, (a0)
-; ZVBB-RV32-NEXT:    vl1r.v v9, (a3)
-; ZVBB-RV32-NEXT:    vl1r.v v14, (a4)
+; ZVBB-RV32-NEXT:    vl1re16.v v11, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl1re16.v v9, (a3)
+; ZVBB-RV32-NEXT:    vl1re16.v v14, (a4)
 ; ZVBB-RV32-NEXT:    csrr a0, vlenb
 ; ZVBB-RV32-NEXT:    li a3, 10
 ; ZVBB-RV32-NEXT:    mul a0, a0, a3
 ; ZVBB-RV32-NEXT:    add a0, sp, a0
 ; ZVBB-RV32-NEXT:    addi a0, a0, 64
 ; ZVBB-RV32-NEXT:    add a6, a6, a2
-; ZVBB-RV32-NEXT:    vl1r.v v15, (a5)
-; ZVBB-RV32-NEXT:    vl1r.v v12, (a6)
-; ZVBB-RV32-NEXT:    vl1r.v v13, (a1)
+; ZVBB-RV32-NEXT:    vl1re16.v v15, (a5)
+; ZVBB-RV32-NEXT:    vl1re16.v v12, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v13, (a1)
 ; ZVBB-RV32-NEXT:    slli a2, a2, 3
 ; ZVBB-RV32-NEXT:    add a2, a0, a2
 ; ZVBB-RV32-NEXT:    vs2r.v v16, (a2)
 ; ZVBB-RV32-NEXT:    vs8r.v v8, (a0)
-; ZVBB-RV32-NEXT:    vl8r.v v16, (a2)
-; ZVBB-RV32-NEXT:    vl8r.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl8re16.v v16, (a2)
+; ZVBB-RV32-NEXT:    vl8re16.v v8, (a0)
 ; ZVBB-RV32-NEXT:    addi sp, s0, -80
 ; ZVBB-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; ZVBB-RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
 ; ZVBB-RV32-NEXT:    addi sp, sp, 80
 ; ZVBB-RV32-NEXT:    ret
 ;
-; ZVBB-RV64-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; ZVBB-RV64-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
 ; ZVBB-RV64:       # %bb.0:
 ; ZVBB-RV64-NEXT:    addi sp, sp, -80
 ; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -1660,7 +5763,7 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
 ; ZVBB-RV64-NEXT:    mul a0, a0, a1
 ; ZVBB-RV64-NEXT:    sub sp, sp, a0
 ; ZVBB-RV64-NEXT:    andi sp, sp, -64
-; ZVBB-RV64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVBB-RV64-NEXT:    vmv2r.v v20, v16
 ; ZVBB-RV64-NEXT:    addi a0, sp, 64
 ; ZVBB-RV64-NEXT:    vmv2r.v v18, v12
@@ -1681,41 +5784,41 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
 ; ZVBB-RV64-NEXT:    vmv1r.v v25, v14
 ; ZVBB-RV64-NEXT:    add a6, a5, a2
 ; ZVBB-RV64-NEXT:    vmv1r.v v18, v11
-; ZVBB-RV64-NEXT:    vsseg5e8.v v22, (a0)
+; ZVBB-RV64-NEXT:    vsseg5e16.v v22, (a0)
 ; ZVBB-RV64-NEXT:    vmv1r.v v20, v15
-; ZVBB-RV64-NEXT:    vsseg5e8.v v17, (a1)
-; ZVBB-RV64-NEXT:    vl1r.v v16, (a6)
+; ZVBB-RV64-NEXT:    vsseg5e16.v v17, (a1)
+; ZVBB-RV64-NEXT:    vl1re16.v v16, (a6)
 ; ZVBB-RV64-NEXT:    add a6, a6, a2
-; ZVBB-RV64-NEXT:    vl1r.v v17, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v17, (a6)
 ; ZVBB-RV64-NEXT:    add a6, a3, a2
-; ZVBB-RV64-NEXT:    vl1r.v v10, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v10, (a6)
 ; ZVBB-RV64-NEXT:    add a6, a6, a2
-; ZVBB-RV64-NEXT:    vl1r.v v11, (a6)
-; ZVBB-RV64-NEXT:    vl1r.v v8, (a0)
-; ZVBB-RV64-NEXT:    vl1r.v v9, (a3)
-; ZVBB-RV64-NEXT:    vl1r.v v14, (a4)
+; ZVBB-RV64-NEXT:    vl1re16.v v11, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl1re16.v v9, (a3)
+; ZVBB-RV64-NEXT:    vl1re16.v v14, (a4)
 ; ZVBB-RV64-NEXT:    csrr a0, vlenb
 ; ZVBB-RV64-NEXT:    li a3, 10
 ; ZVBB-RV64-NEXT:    mul a0, a0, a3
 ; ZVBB-RV64-NEXT:    add a0, sp, a0
 ; ZVBB-RV64-NEXT:    addi a0, a0, 64
 ; ZVBB-RV64-NEXT:    add a6, a6, a2
-; ZVBB-RV64-NEXT:    vl1r.v v15, (a5)
-; ZVBB-RV64-NEXT:    vl1r.v v12, (a6)
-; ZVBB-RV64-NEXT:    vl1r.v v13, (a1)
+; ZVBB-RV64-NEXT:    vl1re16.v v15, (a5)
+; ZVBB-RV64-NEXT:    vl1re16.v v12, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v13, (a1)
 ; ZVBB-RV64-NEXT:    slli a2, a2, 3
 ; ZVBB-RV64-NEXT:    add a2, a0, a2
 ; ZVBB-RV64-NEXT:    vs2r.v v16, (a2)
 ; ZVBB-RV64-NEXT:    vs8r.v v8, (a0)
-; ZVBB-RV64-NEXT:    vl8r.v v16, (a2)
-; ZVBB-RV64-NEXT:    vl8r.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl8re16.v v16, (a2)
+; ZVBB-RV64-NEXT:    vl8re16.v v8, (a0)
 ; ZVBB-RV64-NEXT:    addi sp, s0, -80
 ; ZVBB-RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; ZVBB-RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; ZVBB-RV64-NEXT:    addi sp, sp, 80
 ; ZVBB-RV64-NEXT:    ret
 ;
-; ZIP-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; ZIP-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
 ; ZIP:       # %bb.0:
 ; ZIP-NEXT:    addi sp, sp, -80
 ; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -1726,7 +5829,7 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
 ; ZIP-NEXT:    mul a0, a0, a1
 ; ZIP-NEXT:    sub sp, sp, a0
 ; ZIP-NEXT:    andi sp, sp, -64
-; ZIP-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZIP-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZIP-NEXT:    vmv2r.v v20, v16
 ; ZIP-NEXT:    addi a0, sp, 64
 ; ZIP-NEXT:    vmv2r.v v18, v12
@@ -1747,46 +5850,121 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
 ; ZIP-NEXT:    vmv1r.v v25, v14
 ; ZIP-NEXT:    add a6, a5, a2
 ; ZIP-NEXT:    vmv1r.v v18, v11
-; ZIP-NEXT:    vsseg5e8.v v22, (a0)
+; ZIP-NEXT:    vsseg5e16.v v22, (a0)
 ; ZIP-NEXT:    vmv1r.v v20, v15
-; ZIP-NEXT:    vsseg5e8.v v17, (a1)
-; ZIP-NEXT:    vl1r.v v16, (a6)
+; ZIP-NEXT:    vsseg5e16.v v17, (a1)
+; ZIP-NEXT:    vl1re16.v v16, (a6)
 ; ZIP-NEXT:    add a6, a6, a2
-; ZIP-NEXT:    vl1r.v v17, (a6)
+; ZIP-NEXT:    vl1re16.v v17, (a6)
 ; ZIP-NEXT:    add a6, a3, a2
-; ZIP-NEXT:    vl1r.v v10, (a6)
+; ZIP-NEXT:    vl1re16.v v10, (a6)
 ; ZIP-NEXT:    add a6, a6, a2
-; ZIP-NEXT:    vl1r.v v11, (a6)
-; ZIP-NEXT:    vl1r.v v8, (a0)
-; ZIP-NEXT:    vl1r.v v9, (a3)
-; ZIP-NEXT:    vl1r.v v14, (a4)
+; ZIP-NEXT:    vl1re16.v v11, (a6)
+; ZIP-NEXT:    vl1re16.v v8, (a0)
+; ZIP-NEXT:    vl1re16.v v9, (a3)
+; ZIP-NEXT:    vl1re16.v v14, (a4)
 ; ZIP-NEXT:    csrr a0, vlenb
 ; ZIP-NEXT:    li a3, 10
 ; ZIP-NEXT:    mul a0, a0, a3
 ; ZIP-NEXT:    add a0, sp, a0
 ; ZIP-NEXT:    addi a0, a0, 64
 ; ZIP-NEXT:    add a6, a6, a2
-; ZIP-NEXT:    vl1r.v v15, (a5)
-; ZIP-NEXT:    vl1r.v v12, (a6)
-; ZIP-NEXT:    vl1r.v v13, (a1)
+; ZIP-NEXT:    vl1re16.v v15, (a5)
+; ZIP-NEXT:    vl1re16.v v12, (a6)
+; ZIP-NEXT:    vl1re16.v v13, (a1)
 ; ZIP-NEXT:    slli a2, a2, 3
 ; ZIP-NEXT:    add a2, a0, a2
 ; ZIP-NEXT:    vs2r.v v16, (a2)
 ; ZIP-NEXT:    vs8r.v v8, (a0)
-; ZIP-NEXT:    vl8r.v v16, (a2)
-; ZIP-NEXT:    vl8r.v v8, (a0)
+; ZIP-NEXT:    vl8re16.v v16, (a2)
+; ZIP-NEXT:    vl8re16.v v8, (a0)
 ; ZIP-NEXT:    addi sp, s0, -80
 ; ZIP-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; ZIP-NEXT:    addi sp, sp, 80
 ; ZIP-NEXT:    ret
-  %res = call <vscale x 80 x i8> @llvm.vector.interleave5.nxv80i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e)
-  ret <vscale x 80 x i8> %res
+  %res = call <vscale x 40 x bfloat> @llvm.vector.interleave5.nxv40bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x bfloat> %v4)
+  ret <vscale x 40 x bfloat> %res
 }
 
+define <vscale x 5 x float> @vector_interleave_nxv5f32_nxv1f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2, <vscale x 1 x float> %v3, <vscale x 1 x float> %v4) nounwind {
+; CHECK-LABEL: vector_interleave_nxv5f32_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    srli a2, a1, 1
+; CHECK-NEXT:    add a3, a0, a2
+; CHECK-NEXT:    add a4, a3, a2
+; CHECK-NEXT:    vsetvli a5, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vsseg5e32.v v8, (a0)
+; CHECK-NEXT:    add a5, a4, a2
+; CHECK-NEXT:    vle32.v v8, (a5)
+; CHECK-NEXT:    vle32.v v9, (a4)
+; CHECK-NEXT:    srli a1, a1, 3
+; CHECK-NEXT:    add a4, a1, a1
+; CHECK-NEXT:    vle32.v v10, (a3)
+; CHECK-NEXT:    vsetvli zero, a4, e32, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v9, v8, a1
+; CHECK-NEXT:    vsetvli a3, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli zero, a4, e32, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v8, v10, a1
+; CHECK-NEXT:    add a2, a5, a2
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vle32.v v10, (a2)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv5f32_nxv1f32:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 1
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    srli a2, a1, 1
+; ZVBB-NEXT:    add a3, a0, a2
+; ZVBB-NEXT:    add a4, a3, a2
+; ZVBB-NEXT:    vsetvli a5, zero, e32, mf2, ta, ma
+; ZVBB-NEXT:    vsseg5e32.v v8, (a0)
+; ZVBB-NEXT:    add a5, a4, a2
+; ZVBB-NEXT:    vle32.v v8, (a5)
+; ZVBB-NEXT:    vle32.v v9, (a4)
+; ZVBB-NEXT:    srli a1, a1, 3
+; ZVBB-NEXT:    add a4, a1, a1
+; ZVBB-NEXT:    vle32.v v10, (a3)
+; ZVBB-NEXT:    vsetvli zero, a4, e32, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v9, v8, a1
+; ZVBB-NEXT:    vsetvli a3, zero, e32, mf2, ta, ma
+; ZVBB-NEXT:    vle32.v v8, (a0)
+; ZVBB-NEXT:    vsetvli zero, a4, e32, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v8, v10, a1
+; ZVBB-NEXT:    add a2, a5, a2
+; ZVBB-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; ZVBB-NEXT:    vle32.v v10, (a2)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 1
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 5 x float> @llvm.vector.interleave5.nxv5f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2, <vscale x 1 x float> %v3, <vscale x 1 x float> %v4)
+  ret <vscale x 5 x float> %res
+}
 
-define <vscale x 40 x i8> @vector_interleave_nxv40i8_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i8> %d, <vscale x 8 x i8> %e) nounwind {
-; CHECK-LABEL: vector_interleave_nxv40i8_nxv8i8:
+define <vscale x 10 x float> @vector_interleave_nxv10f32_nxv2f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2, <vscale x 2 x float> %v3, <vscale x 2 x float> %v4) nounwind {
+; CHECK-LABEL: vector_interleave_nxv10f32_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
@@ -1797,15 +5975,15 @@ define <vscale x 40 x i8> @vector_interleave_nxv40i8_nxv8i8(<vscale x 8 x i8> %a
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    add a2, a0, a1
 ; CHECK-NEXT:    add a3, a2, a1
-; CHECK-NEXT:    vsetvli a4, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vsseg5e8.v v8, (a0)
-; CHECK-NEXT:    vl1r.v v10, (a3)
+; CHECK-NEXT:    vsetvli a4, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vsseg5e32.v v8, (a0)
+; CHECK-NEXT:    vl1re32.v v10, (a3)
 ; CHECK-NEXT:    add a3, a3, a1
-; CHECK-NEXT:    vl1r.v v11, (a3)
-; CHECK-NEXT:    vl1r.v v8, (a0)
-; CHECK-NEXT:    vl1r.v v9, (a2)
+; CHECK-NEXT:    vl1re32.v v11, (a3)
+; CHECK-NEXT:    vl1re32.v v8, (a0)
+; CHECK-NEXT:    vl1re32.v v9, (a2)
 ; CHECK-NEXT:    add a1, a3, a1
-; CHECK-NEXT:    vl1r.v v12, (a1)
+; CHECK-NEXT:    vl1re32.v v12, (a1)
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a1, a0, 2
 ; CHECK-NEXT:    add a0, a1, a0
@@ -1813,7 +5991,7 @@ define <vscale x 40 x i8> @vector_interleave_nxv40i8_nxv8i8(<vscale x 8 x i8> %a
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
 ;
-; ZVBB-LABEL: vector_interleave_nxv40i8_nxv8i8:
+; ZVBB-LABEL: vector_interleave_nxv10f32_nxv2f32:
 ; ZVBB:       # %bb.0:
 ; ZVBB-NEXT:    addi sp, sp, -16
 ; ZVBB-NEXT:    csrr a0, vlenb
@@ -1824,29 +6002,27 @@ define <vscale x 40 x i8> @vector_interleave_nxv40i8_nxv8i8(<vscale x 8 x i8> %a
 ; ZVBB-NEXT:    csrr a1, vlenb
 ; ZVBB-NEXT:    add a2, a0, a1
 ; ZVBB-NEXT:    add a3, a2, a1
-; ZVBB-NEXT:    vsetvli a4, zero, e8, m1, ta, ma
-; ZVBB-NEXT:    vsseg5e8.v v8, (a0)
-; ZVBB-NEXT:    vl1r.v v10, (a3)
+; ZVBB-NEXT:    vsetvli a4, zero, e32, m1, ta, ma
+; ZVBB-NEXT:    vsseg5e32.v v8, (a0)
+; ZVBB-NEXT:    vl1re32.v v10, (a3)
 ; ZVBB-NEXT:    add a3, a3, a1
-; ZVBB-NEXT:    vl1r.v v11, (a3)
-; ZVBB-NEXT:    vl1r.v v8, (a0)
-; ZVBB-NEXT:    vl1r.v v9, (a2)
+; ZVBB-NEXT:    vl1re32.v v11, (a3)
+; ZVBB-NEXT:    vl1re32.v v8, (a0)
+; ZVBB-NEXT:    vl1re32.v v9, (a2)
 ; ZVBB-NEXT:    add a1, a3, a1
-; ZVBB-NEXT:    vl1r.v v12, (a1)
+; ZVBB-NEXT:    vl1re32.v v12, (a1)
 ; ZVBB-NEXT:    csrr a0, vlenb
 ; ZVBB-NEXT:    slli a1, a0, 2
 ; ZVBB-NEXT:    add a0, a1, a0
 ; ZVBB-NEXT:    add sp, sp, a0
 ; ZVBB-NEXT:    addi sp, sp, 16
 ; ZVBB-NEXT:    ret
-  %res = call <vscale x 40 x i8> @llvm.vector.interleave5.nxv40i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i8> %d, <vscale x 8 x i8> %e)
-  ret <vscale x 40 x i8> %res
+  %res = call <vscale x 10 x float> @llvm.vector.interleave5.nxv10f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2, <vscale x 2 x float> %v3, <vscale x 2 x float> %v4)
+  ret <vscale x 10 x float> %res
 }
 
-
-define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e) nounwind {
-;
-; RV32-LABEL: vector_interleave_nxv20i32_nxv4i32:
+define <vscale x 20 x float> @vector_interleave_nxv20f32_nxv4f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x float> %v4) nounwind {
+; RV32-LABEL: vector_interleave_nxv20f32_nxv4f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -80
 ; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
@@ -1912,7 +6088,7 @@ define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32
 ; RV32-NEXT:    addi sp, sp, 80
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; RV64-LABEL: vector_interleave_nxv20f32_nxv4f32:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -80
 ; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -1978,7 +6154,7 @@ define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32
 ; RV64-NEXT:    addi sp, sp, 80
 ; RV64-NEXT:    ret
 ;
-; ZVBB-RV32-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; ZVBB-RV32-LABEL: vector_interleave_nxv20f32_nxv4f32:
 ; ZVBB-RV32:       # %bb.0:
 ; ZVBB-RV32-NEXT:    addi sp, sp, -80
 ; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
@@ -2044,7 +6220,7 @@ define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32
 ; ZVBB-RV32-NEXT:    addi sp, sp, 80
 ; ZVBB-RV32-NEXT:    ret
 ;
-; ZVBB-RV64-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; ZVBB-RV64-LABEL: vector_interleave_nxv20f32_nxv4f32:
 ; ZVBB-RV64:       # %bb.0:
 ; ZVBB-RV64-NEXT:    addi sp, sp, -80
 ; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -2110,7 +6286,7 @@ define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32
 ; ZVBB-RV64-NEXT:    addi sp, sp, 80
 ; ZVBB-RV64-NEXT:    ret
 ;
-; ZIP-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; ZIP-LABEL: vector_interleave_nxv20f32_nxv4f32:
 ; ZIP:       # %bb.0:
 ; ZIP-NEXT:    addi sp, sp, -80
 ; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -2175,14 +6351,70 @@ define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32
 ; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; ZIP-NEXT:    addi sp, sp, 80
 ; ZIP-NEXT:    ret
-  %res = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e)
-  ret <vscale x 20 x i32> %res
+  %res = call <vscale x 20 x float> @llvm.vector.interleave5.nxv20f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x float> %v4)
+  ret <vscale x 20 x float> %res
 }
 
-
-define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e) nounwind {
+define <vscale x 5 x double> @vector_interleave_nxv5f64_nxv1f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2, <vscale x 1 x double> %v3, <vscale x 1 x double> %v4) nounwind {
+; CHECK-LABEL: vector_interleave_nxv5f64_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 2
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    add a2, a0, a1
+; CHECK-NEXT:    add a3, a2, a1
+; CHECK-NEXT:    vsetvli a4, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vsseg5e64.v v8, (a0)
+; CHECK-NEXT:    vl1re64.v v10, (a3)
+; CHECK-NEXT:    add a3, a3, a1
+; CHECK-NEXT:    vl1re64.v v11, (a3)
+; CHECK-NEXT:    vl1re64.v v8, (a0)
+; CHECK-NEXT:    vl1re64.v v9, (a2)
+; CHECK-NEXT:    add a1, a3, a1
+; CHECK-NEXT:    vl1re64.v v12, (a1)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 2
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
 ;
-; RV32-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZVBB-LABEL: vector_interleave_nxv5f64_nxv1f64:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 2
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    add a2, a0, a1
+; ZVBB-NEXT:    add a3, a2, a1
+; ZVBB-NEXT:    vsetvli a4, zero, e64, m1, ta, ma
+; ZVBB-NEXT:    vsseg5e64.v v8, (a0)
+; ZVBB-NEXT:    vl1re64.v v10, (a3)
+; ZVBB-NEXT:    add a3, a3, a1
+; ZVBB-NEXT:    vl1re64.v v11, (a3)
+; ZVBB-NEXT:    vl1re64.v v8, (a0)
+; ZVBB-NEXT:    vl1re64.v v9, (a2)
+; ZVBB-NEXT:    add a1, a3, a1
+; ZVBB-NEXT:    vl1re64.v v12, (a1)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 2
+; ZVBB-NEXT:    add a0, a1, a0
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 5 x double> @llvm.vector.interleave5.nxv5f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2, <vscale x 1 x double> %v3, <vscale x 1 x double> %v4)
+  ret <vscale x 5 x double> %res
+}
+
+define <vscale x 10 x double> @vector_interleave_nxv10f64_nxv2f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4) nounwind {
+; RV32-LABEL: vector_interleave_nxv10f64_nxv2f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -80
 ; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
@@ -2248,7 +6480,7 @@ define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64
 ; RV32-NEXT:    addi sp, sp, 80
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; RV64-LABEL: vector_interleave_nxv10f64_nxv2f64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -80
 ; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -2314,7 +6546,7 @@ define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64
 ; RV64-NEXT:    addi sp, sp, 80
 ; RV64-NEXT:    ret
 ;
-; ZVBB-RV32-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZVBB-RV32-LABEL: vector_interleave_nxv10f64_nxv2f64:
 ; ZVBB-RV32:       # %bb.0:
 ; ZVBB-RV32-NEXT:    addi sp, sp, -80
 ; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
@@ -2380,7 +6612,7 @@ define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64
 ; ZVBB-RV32-NEXT:    addi sp, sp, 80
 ; ZVBB-RV32-NEXT:    ret
 ;
-; ZVBB-RV64-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZVBB-RV64-LABEL: vector_interleave_nxv10f64_nxv2f64:
 ; ZVBB-RV64:       # %bb.0:
 ; ZVBB-RV64-NEXT:    addi sp, sp, -80
 ; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -2446,7 +6678,7 @@ define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64
 ; ZVBB-RV64-NEXT:    addi sp, sp, 80
 ; ZVBB-RV64-NEXT:    ret
 ;
-; ZIP-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZIP-LABEL: vector_interleave_nxv10f64_nxv2f64:
 ; ZIP:       # %bb.0:
 ; ZIP-NEXT:    addi sp, sp, -80
 ; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -2511,206 +6743,162 @@ define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64
 ; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; ZIP-NEXT:    addi sp, sp, 80
 ; ZIP-NEXT:    ret
-  %res = call <vscale x 10 x i64> @llvm.vector.interleave5.nxv10i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e)
-  ret <vscale x 10 x i64> %res
+  %res = call <vscale x 10 x double> @llvm.vector.interleave5.nxv10f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4)
+  ret <vscale x 10 x double> %res
 }
 
-define <vscale x 112 x i1> @vector_interleave_nxv112i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e, <vscale x 16 x i1> %f, <vscale x 16 x i1> %g) nounwind {
-; CHECK-LABEL: vector_interleave_nxv112i1_nxv16i1:
+define <vscale x 14 x half> @vector_interleave_nxv14f16_nxv2f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2, <vscale x 2 x half> %v3, <vscale x 2 x half> %v4, <vscale x 2 x half> %v5, <vscale x 2 x half> %v6) nounwind {
+; CHECK-LABEL: vector_interleave_nxv14f16_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 14
-; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    sub sp, sp, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmv.v.i v14, 0
-; CHECK-NEXT:    addi a4, sp, 16
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a1, a0, 3
-; CHECK-NEXT:    sub a0, a1, a0
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    vmerge.vim v16, v14, 1, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmerge.vim v22, v14, 1, v0
-; CHECK-NEXT:    add a3, a4, a2
-; CHECK-NEXT:    srli a1, a2, 2
-; CHECK-NEXT:    add a5, a0, a2
-; CHECK-NEXT:    vmv4r.v v24, v16
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v18, v14, 1, v0
-; CHECK-NEXT:    add a6, a3, a2
-; CHECK-NEXT:    vmv1r.v v25, v22
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmerge.vim v8, v14, 1, v0
-; CHECK-NEXT:    vmv1r.v v26, v18
-; CHECK-NEXT:    vmv1r.v v0, v11
-; CHECK-NEXT:    vmerge.vim v20, v14, 1, v0
-; CHECK-NEXT:    vmv1r.v v27, v8
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vmerge.vim v10, v14, 1, v0
-; CHECK-NEXT:    vmv1r.v v28, v20
-; CHECK-NEXT:    vmv1r.v v18, v23
-; CHECK-NEXT:    add a7, a6, a2
-; CHECK-NEXT:    vmv1r.v v29, v10
-; CHECK-NEXT:    vmv1r.v v20, v9
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    vmerge.vim v30, v14, 1, v0
-; CHECK-NEXT:    vmv1r.v v22, v11
-; CHECK-NEXT:    vsetvli t0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vsseg7e8.v v24, (a4)
-; CHECK-NEXT:    vmv1r.v v23, v31
-; CHECK-NEXT:    vsseg7e8.v v17, (a0)
-; CHECK-NEXT:    vl1r.v v8, (a6)
-; CHECK-NEXT:    add a6, a7, a2
-; CHECK-NEXT:    vl1r.v v10, (a4)
-; CHECK-NEXT:    add a4, a6, a2
-; CHECK-NEXT:    vl1r.v v12, (a6)
-; CHECK-NEXT:    add a6, a4, a2
-; CHECK-NEXT:    vl1r.v v14, (a6)
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    srli a2, a1, 1
+; CHECK-NEXT:    srli a1, a1, 2
+; CHECK-NEXT:    add a3, a0, a2
+; CHECK-NEXT:    add a4, a3, a2
+; CHECK-NEXT:    add a5, a4, a2
 ; CHECK-NEXT:    add a6, a5, a2
-; CHECK-NEXT:    vl1r.v v16, (a5)
-; CHECK-NEXT:    add a5, a6, a2
-; CHECK-NEXT:    vl1r.v v18, (a5)
-; CHECK-NEXT:    add a5, a5, a2
-; CHECK-NEXT:    vl1r.v v9, (a7)
-; CHECK-NEXT:    add a7, a5, a2
-; CHECK-NEXT:    vl1r.v v20, (a7)
-; CHECK-NEXT:    add a7, a7, a2
-; CHECK-NEXT:    srli a2, a2, 1
-; CHECK-NEXT:    vl1r.v v11, (a3)
-; CHECK-NEXT:    add a3, a1, a1
-; CHECK-NEXT:    vl1r.v v13, (a4)
-; CHECK-NEXT:    add a4, a2, a2
-; CHECK-NEXT:    vl1r.v v15, (a0)
-; CHECK-NEXT:    vl1r.v v19, (a5)
-; CHECK-NEXT:    vl1r.v v17, (a6)
-; CHECK-NEXT:    vl1r.v v21, (a7)
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmsne.vi v22, v8, 0
-; CHECK-NEXT:    vmsne.vi v0, v10, 0
-; CHECK-NEXT:    vmsne.vi v9, v12, 0
-; CHECK-NEXT:    vmsne.vi v10, v14, 0
-; CHECK-NEXT:    vmsne.vi v11, v18, 0
-; CHECK-NEXT:    vmsne.vi v8, v16, 0
-; CHECK-NEXT:    vmsne.vi v12, v20, 0
-; CHECK-NEXT:    vsetvli zero, a3, e8, mf2, ta, ma
-; CHECK-NEXT:    vslideup.vx v0, v22, a1
-; CHECK-NEXT:    vslideup.vx v9, v10, a1
-; CHECK-NEXT:    vslideup.vx v8, v11, a1
-; CHECK-NEXT:    vsetvli zero, a4, e8, m1, ta, ma
-; CHECK-NEXT:    vslideup.vx v0, v9, a2
-; CHECK-NEXT:    vslideup.vx v8, v12, a2
+; CHECK-NEXT:    vsetvli a7, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vsseg7e16.v v8, (a0)
+; CHECK-NEXT:    add a7, a6, a2
+; CHECK-NEXT:    vle16.v v8, (a7)
+; CHECK-NEXT:    vle16.v v10, (a6)
+; CHECK-NEXT:    add a6, a1, a1
+; CHECK-NEXT:    add a2, a7, a2
+; CHECK-NEXT:    vle16.v v12, (a5)
+; CHECK-NEXT:    vsetvli zero, a6, e16, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v10, v8, a1
+; CHECK-NEXT:    vsetvli a5, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v11, (a2)
+; CHECK-NEXT:    vle16.v v9, (a4)
+; CHECK-NEXT:    vsetvli zero, a6, e16, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v9, v12, a1
+; CHECK-NEXT:    vsetvli a2, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v12, (a3)
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli zero, a6, e16, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v8, v12, a1
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv14f16_nxv2f16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a0, a0, 2
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    srli a2, a1, 1
+; ZVBB-NEXT:    srli a1, a1, 2
+; ZVBB-NEXT:    add a3, a0, a2
+; ZVBB-NEXT:    add a4, a3, a2
+; ZVBB-NEXT:    add a5, a4, a2
+; ZVBB-NEXT:    add a6, a5, a2
+; ZVBB-NEXT:    vsetvli a7, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vsseg7e16.v v8, (a0)
+; ZVBB-NEXT:    add a7, a6, a2
+; ZVBB-NEXT:    vle16.v v8, (a7)
+; ZVBB-NEXT:    vle16.v v10, (a6)
+; ZVBB-NEXT:    add a6, a1, a1
+; ZVBB-NEXT:    add a2, a7, a2
+; ZVBB-NEXT:    vle16.v v12, (a5)
+; ZVBB-NEXT:    vsetvli zero, a6, e16, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v10, v8, a1
+; ZVBB-NEXT:    vsetvli a5, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vle16.v v11, (a2)
+; ZVBB-NEXT:    vle16.v v9, (a4)
+; ZVBB-NEXT:    vsetvli zero, a6, e16, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v9, v12, a1
+; ZVBB-NEXT:    vsetvli a2, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vle16.v v12, (a3)
+; ZVBB-NEXT:    vle16.v v8, (a0)
+; ZVBB-NEXT:    vsetvli zero, a6, e16, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v8, v12, a1
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a0, a0, 2
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 14 x half> @llvm.vector.interleave7.nxv14f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2, <vscale x 2 x half> %v3, <vscale x 2 x half> %v4, <vscale x 2 x half> %v5, <vscale x 2 x half> %v6)
+  ret <vscale x 14 x half> %res
+}
+
+define <vscale x 28 x half> @vector_interleave_nxv28f16_nxv4f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2, <vscale x 4 x half> %v3, <vscale x 4 x half> %v4, <vscale x 4 x half> %v5, <vscale x 4 x half> %v6) nounwind {
+; CHECK-LABEL: vector_interleave_nxv28f16_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 14
-; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    slli a1, a0, 3
+; CHECK-NEXT:    sub a0, a1, a0
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    add a2, a0, a1
+; CHECK-NEXT:    add a3, a2, a1
+; CHECK-NEXT:    vsetvli a4, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vsseg7e16.v v8, (a0)
+; CHECK-NEXT:    vl1re16.v v10, (a3)
+; CHECK-NEXT:    add a3, a3, a1
+; CHECK-NEXT:    vl1re16.v v11, (a3)
+; CHECK-NEXT:    add a3, a3, a1
+; CHECK-NEXT:    vl1re16.v v8, (a0)
+; CHECK-NEXT:    add a0, a3, a1
+; CHECK-NEXT:    vl1re16.v v9, (a2)
+; CHECK-NEXT:    vl1re16.v v12, (a3)
+; CHECK-NEXT:    vl1re16.v v13, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl1re16.v v14, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 3
+; CHECK-NEXT:    sub a0, a1, a0
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
 ;
-; ZVBB-LABEL: vector_interleave_nxv112i1_nxv16i1:
+; ZVBB-LABEL: vector_interleave_nxv28f16_nxv4f16:
 ; ZVBB:       # %bb.0:
 ; ZVBB-NEXT:    addi sp, sp, -16
 ; ZVBB-NEXT:    csrr a0, vlenb
-; ZVBB-NEXT:    li a1, 14
-; ZVBB-NEXT:    mul a0, a0, a1
+; ZVBB-NEXT:    slli a1, a0, 3
+; ZVBB-NEXT:    sub a0, a1, a0
 ; ZVBB-NEXT:    sub sp, sp, a0
-; ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; ZVBB-NEXT:    vmv.v.i v14, 0
-; ZVBB-NEXT:    addi a4, sp, 16
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    add a2, a0, a1
+; ZVBB-NEXT:    add a3, a2, a1
+; ZVBB-NEXT:    vsetvli a4, zero, e16, m1, ta, ma
+; ZVBB-NEXT:    vsseg7e16.v v8, (a0)
+; ZVBB-NEXT:    vl1re16.v v10, (a3)
+; ZVBB-NEXT:    add a3, a3, a1
+; ZVBB-NEXT:    vl1re16.v v11, (a3)
+; ZVBB-NEXT:    add a3, a3, a1
+; ZVBB-NEXT:    vl1re16.v v8, (a0)
+; ZVBB-NEXT:    add a0, a3, a1
+; ZVBB-NEXT:    vl1re16.v v9, (a2)
+; ZVBB-NEXT:    vl1re16.v v12, (a3)
+; ZVBB-NEXT:    vl1re16.v v13, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl1re16.v v14, (a0)
 ; ZVBB-NEXT:    csrr a0, vlenb
 ; ZVBB-NEXT:    slli a1, a0, 3
 ; ZVBB-NEXT:    sub a0, a1, a0
-; ZVBB-NEXT:    add a0, sp, a0
-; ZVBB-NEXT:    addi a0, a0, 16
-; ZVBB-NEXT:    csrr a2, vlenb
-; ZVBB-NEXT:    vmerge.vim v16, v14, 1, v0
-; ZVBB-NEXT:    vmv1r.v v0, v8
-; ZVBB-NEXT:    vmerge.vim v22, v14, 1, v0
-; ZVBB-NEXT:    add a3, a4, a2
-; ZVBB-NEXT:    srli a1, a2, 2
-; ZVBB-NEXT:    add a5, a0, a2
-; ZVBB-NEXT:    vmv4r.v v24, v16
-; ZVBB-NEXT:    vmv1r.v v0, v9
-; ZVBB-NEXT:    vmerge.vim v18, v14, 1, v0
-; ZVBB-NEXT:    add a6, a3, a2
-; ZVBB-NEXT:    vmv1r.v v25, v22
-; ZVBB-NEXT:    vmv1r.v v0, v10
-; ZVBB-NEXT:    vmerge.vim v8, v14, 1, v0
-; ZVBB-NEXT:    vmv1r.v v26, v18
-; ZVBB-NEXT:    vmv1r.v v0, v11
-; ZVBB-NEXT:    vmerge.vim v20, v14, 1, v0
-; ZVBB-NEXT:    vmv1r.v v27, v8
-; ZVBB-NEXT:    vmv1r.v v0, v12
-; ZVBB-NEXT:    vmerge.vim v10, v14, 1, v0
-; ZVBB-NEXT:    vmv1r.v v28, v20
-; ZVBB-NEXT:    vmv1r.v v18, v23
-; ZVBB-NEXT:    add a7, a6, a2
-; ZVBB-NEXT:    vmv1r.v v29, v10
-; ZVBB-NEXT:    vmv1r.v v20, v9
-; ZVBB-NEXT:    vmv1r.v v0, v13
-; ZVBB-NEXT:    vmerge.vim v30, v14, 1, v0
-; ZVBB-NEXT:    vmv1r.v v22, v11
-; ZVBB-NEXT:    vsetvli t0, zero, e8, m1, ta, ma
-; ZVBB-NEXT:    vsseg7e8.v v24, (a4)
-; ZVBB-NEXT:    vmv1r.v v23, v31
-; ZVBB-NEXT:    vsseg7e8.v v17, (a0)
-; ZVBB-NEXT:    vl1r.v v8, (a6)
-; ZVBB-NEXT:    add a6, a7, a2
-; ZVBB-NEXT:    vl1r.v v10, (a4)
-; ZVBB-NEXT:    add a4, a6, a2
-; ZVBB-NEXT:    vl1r.v v12, (a6)
-; ZVBB-NEXT:    add a6, a4, a2
-; ZVBB-NEXT:    vl1r.v v14, (a6)
-; ZVBB-NEXT:    add a6, a5, a2
-; ZVBB-NEXT:    vl1r.v v16, (a5)
-; ZVBB-NEXT:    add a5, a6, a2
-; ZVBB-NEXT:    vl1r.v v18, (a5)
-; ZVBB-NEXT:    add a5, a5, a2
-; ZVBB-NEXT:    vl1r.v v9, (a7)
-; ZVBB-NEXT:    add a7, a5, a2
-; ZVBB-NEXT:    vl1r.v v20, (a7)
-; ZVBB-NEXT:    add a7, a7, a2
-; ZVBB-NEXT:    srli a2, a2, 1
-; ZVBB-NEXT:    vl1r.v v11, (a3)
-; ZVBB-NEXT:    add a3, a1, a1
-; ZVBB-NEXT:    vl1r.v v13, (a4)
-; ZVBB-NEXT:    add a4, a2, a2
-; ZVBB-NEXT:    vl1r.v v15, (a0)
-; ZVBB-NEXT:    vl1r.v v19, (a5)
-; ZVBB-NEXT:    vl1r.v v17, (a6)
-; ZVBB-NEXT:    vl1r.v v21, (a7)
-; ZVBB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; ZVBB-NEXT:    vmsne.vi v22, v8, 0
-; ZVBB-NEXT:    vmsne.vi v0, v10, 0
-; ZVBB-NEXT:    vmsne.vi v9, v12, 0
-; ZVBB-NEXT:    vmsne.vi v10, v14, 0
-; ZVBB-NEXT:    vmsne.vi v11, v18, 0
-; ZVBB-NEXT:    vmsne.vi v8, v16, 0
-; ZVBB-NEXT:    vmsne.vi v12, v20, 0
-; ZVBB-NEXT:    vsetvli zero, a3, e8, mf2, ta, ma
-; ZVBB-NEXT:    vslideup.vx v0, v22, a1
-; ZVBB-NEXT:    vslideup.vx v9, v10, a1
-; ZVBB-NEXT:    vslideup.vx v8, v11, a1
-; ZVBB-NEXT:    vsetvli zero, a4, e8, m1, ta, ma
-; ZVBB-NEXT:    vslideup.vx v0, v9, a2
-; ZVBB-NEXT:    vslideup.vx v8, v12, a2
-; ZVBB-NEXT:    csrr a0, vlenb
-; ZVBB-NEXT:    li a1, 14
-; ZVBB-NEXT:    mul a0, a0, a1
 ; ZVBB-NEXT:    add sp, sp, a0
 ; ZVBB-NEXT:    addi sp, sp, 16
 ; ZVBB-NEXT:    ret
-  %res = call <vscale x 112 x i1> @llvm.vector.interleave7.nxv112i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e, <vscale x 16 x i1> %f, <vscale x 16 x i1> %g)
-  ret <vscale x 112 x i1> %res
+  %res = call <vscale x 28 x half> @llvm.vector.interleave7.nxv28f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2, <vscale x 4 x half> %v3, <vscale x 4 x half> %v4, <vscale x 4 x half> %v5, <vscale x 4 x half> %v6)
+  ret <vscale x 28 x half> %res
 }
 
-
-define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e, <vscale x 16 x i8> %f, <vscale x 16 x i8> %g) nounwind {
-;
-; RV32-LABEL: vector_interleave_nxv112i8_nxv16i8:
+define <vscale x 56 x half> @vector_interleave_nxv56f16_nxv8f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x half> %v4, <vscale x 8 x half> %v5, <vscale x 8 x half> %v6) nounwind {
+; RV32-LABEL: vector_interleave_nxv56f16_nxv8f16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -80
 ; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
@@ -2720,7 +6908,7 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
 ; RV32-NEXT:    slli a0, a0, 5
 ; RV32-NEXT:    sub sp, sp, a0
 ; RV32-NEXT:    andi sp, sp, -64
-; RV32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV32-NEXT:    vmv2r.v v26, v20
 ; RV32-NEXT:    addi a0, sp, 64
 ; RV32-NEXT:    vmv2r.v v24, v16
@@ -2748,51 +6936,51 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
 ; RV32-NEXT:    vmv1r.v v22, v11
 ; RV32-NEXT:    add a6, a7, a2
 ; RV32-NEXT:    vmv1r.v v24, v15
-; RV32-NEXT:    vsseg7e8.v v1, (a0)
+; RV32-NEXT:    vsseg7e16.v v1, (a0)
 ; RV32-NEXT:    vmv1r.v v26, v19
-; RV32-NEXT:    vsseg7e8.v v21, (a1)
-; RV32-NEXT:    vl1r.v v18, (a6)
+; RV32-NEXT:    vsseg7e16.v v21, (a1)
+; RV32-NEXT:    vl1re16.v v18, (a6)
 ; RV32-NEXT:    add a6, a6, a2
-; RV32-NEXT:    vl1r.v v19, (a6)
+; RV32-NEXT:    vl1re16.v v19, (a6)
 ; RV32-NEXT:    add a6, a6, a2
-; RV32-NEXT:    vl1r.v v20, (a6)
+; RV32-NEXT:    vl1re16.v v20, (a6)
 ; RV32-NEXT:    add a6, a6, a2
-; RV32-NEXT:    vl1r.v v21, (a6)
+; RV32-NEXT:    vl1re16.v v21, (a6)
 ; RV32-NEXT:    add a6, a3, a2
-; RV32-NEXT:    vl1r.v v10, (a6)
+; RV32-NEXT:    vl1re16.v v10, (a6)
 ; RV32-NEXT:    add a6, a6, a2
-; RV32-NEXT:    vl1r.v v11, (a6)
-; RV32-NEXT:    vl1r.v v8, (a0)
-; RV32-NEXT:    vl1r.v v16, (a4)
-; RV32-NEXT:    vl1r.v v9, (a3)
-; RV32-NEXT:    vl1r.v v17, (a7)
+; RV32-NEXT:    vl1re16.v v11, (a6)
+; RV32-NEXT:    vl1re16.v v8, (a0)
+; RV32-NEXT:    vl1re16.v v16, (a4)
+; RV32-NEXT:    vl1re16.v v9, (a3)
+; RV32-NEXT:    vl1re16.v v17, (a7)
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    li a3, 14
 ; RV32-NEXT:    mul a0, a0, a3
 ; RV32-NEXT:    add a0, sp, a0
 ; RV32-NEXT:    addi a0, a0, 64
 ; RV32-NEXT:    add a6, a6, a2
-; RV32-NEXT:    vl1r.v v12, (a6)
+; RV32-NEXT:    vl1re16.v v12, (a6)
 ; RV32-NEXT:    add a6, a6, a2
-; RV32-NEXT:    vl1r.v v13, (a6)
+; RV32-NEXT:    vl1re16.v v13, (a6)
 ; RV32-NEXT:    add a6, a6, a2
 ; RV32-NEXT:    slli a2, a2, 3
 ; RV32-NEXT:    add a2, a0, a2
-; RV32-NEXT:    vl1r.v v14, (a6)
-; RV32-NEXT:    vl1r.v v15, (a1)
+; RV32-NEXT:    vl1re16.v v14, (a6)
+; RV32-NEXT:    vl1re16.v v15, (a1)
 ; RV32-NEXT:    add a5, a0, a5
 ; RV32-NEXT:    vs2r.v v20, (a5)
 ; RV32-NEXT:    vs4r.v v16, (a2)
 ; RV32-NEXT:    vs8r.v v8, (a0)
-; RV32-NEXT:    vl8r.v v16, (a2)
-; RV32-NEXT:    vl8r.v v8, (a0)
+; RV32-NEXT:    vl8re16.v v16, (a2)
+; RV32-NEXT:    vl8re16.v v8, (a0)
 ; RV32-NEXT:    addi sp, s0, -80
 ; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 80
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; RV64-LABEL: vector_interleave_nxv56f16_nxv8f16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -80
 ; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -2802,7 +6990,7 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
 ; RV64-NEXT:    slli a0, a0, 5
 ; RV64-NEXT:    sub sp, sp, a0
 ; RV64-NEXT:    andi sp, sp, -64
-; RV64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV64-NEXT:    vmv2r.v v26, v20
 ; RV64-NEXT:    addi a0, sp, 64
 ; RV64-NEXT:    vmv2r.v v24, v16
@@ -2830,51 +7018,51 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
 ; RV64-NEXT:    vmv1r.v v22, v11
 ; RV64-NEXT:    add a6, a7, a2
 ; RV64-NEXT:    vmv1r.v v24, v15
-; RV64-NEXT:    vsseg7e8.v v1, (a0)
+; RV64-NEXT:    vsseg7e16.v v1, (a0)
 ; RV64-NEXT:    vmv1r.v v26, v19
-; RV64-NEXT:    vsseg7e8.v v21, (a1)
-; RV64-NEXT:    vl1r.v v18, (a6)
+; RV64-NEXT:    vsseg7e16.v v21, (a1)
+; RV64-NEXT:    vl1re16.v v18, (a6)
 ; RV64-NEXT:    add a6, a6, a2
-; RV64-NEXT:    vl1r.v v19, (a6)
+; RV64-NEXT:    vl1re16.v v19, (a6)
 ; RV64-NEXT:    add a6, a6, a2
-; RV64-NEXT:    vl1r.v v20, (a6)
+; RV64-NEXT:    vl1re16.v v20, (a6)
 ; RV64-NEXT:    add a6, a6, a2
-; RV64-NEXT:    vl1r.v v21, (a6)
+; RV64-NEXT:    vl1re16.v v21, (a6)
 ; RV64-NEXT:    add a6, a3, a2
-; RV64-NEXT:    vl1r.v v10, (a6)
+; RV64-NEXT:    vl1re16.v v10, (a6)
 ; RV64-NEXT:    add a6, a6, a2
-; RV64-NEXT:    vl1r.v v11, (a6)
-; RV64-NEXT:    vl1r.v v8, (a0)
-; RV64-NEXT:    vl1r.v v16, (a4)
-; RV64-NEXT:    vl1r.v v9, (a3)
-; RV64-NEXT:    vl1r.v v17, (a7)
+; RV64-NEXT:    vl1re16.v v11, (a6)
+; RV64-NEXT:    vl1re16.v v8, (a0)
+; RV64-NEXT:    vl1re16.v v16, (a4)
+; RV64-NEXT:    vl1re16.v v9, (a3)
+; RV64-NEXT:    vl1re16.v v17, (a7)
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    li a3, 14
 ; RV64-NEXT:    mul a0, a0, a3
 ; RV64-NEXT:    add a0, sp, a0
 ; RV64-NEXT:    addi a0, a0, 64
 ; RV64-NEXT:    add a6, a6, a2
-; RV64-NEXT:    vl1r.v v12, (a6)
+; RV64-NEXT:    vl1re16.v v12, (a6)
 ; RV64-NEXT:    add a6, a6, a2
-; RV64-NEXT:    vl1r.v v13, (a6)
+; RV64-NEXT:    vl1re16.v v13, (a6)
 ; RV64-NEXT:    add a6, a6, a2
 ; RV64-NEXT:    slli a2, a2, 3
 ; RV64-NEXT:    add a2, a0, a2
-; RV64-NEXT:    vl1r.v v14, (a6)
-; RV64-NEXT:    vl1r.v v15, (a1)
+; RV64-NEXT:    vl1re16.v v14, (a6)
+; RV64-NEXT:    vl1re16.v v15, (a1)
 ; RV64-NEXT:    add a5, a0, a5
 ; RV64-NEXT:    vs2r.v v20, (a5)
 ; RV64-NEXT:    vs4r.v v16, (a2)
 ; RV64-NEXT:    vs8r.v v8, (a0)
-; RV64-NEXT:    vl8r.v v16, (a2)
-; RV64-NEXT:    vl8r.v v8, (a0)
+; RV64-NEXT:    vl8re16.v v16, (a2)
+; RV64-NEXT:    vl8re16.v v8, (a0)
 ; RV64-NEXT:    addi sp, s0, -80
 ; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 80
 ; RV64-NEXT:    ret
 ;
-; ZVBB-RV32-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; ZVBB-RV32-LABEL: vector_interleave_nxv56f16_nxv8f16:
 ; ZVBB-RV32:       # %bb.0:
 ; ZVBB-RV32-NEXT:    addi sp, sp, -80
 ; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
@@ -2884,7 +7072,7 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
 ; ZVBB-RV32-NEXT:    slli a0, a0, 5
 ; ZVBB-RV32-NEXT:    sub sp, sp, a0
 ; ZVBB-RV32-NEXT:    andi sp, sp, -64
-; ZVBB-RV32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVBB-RV32-NEXT:    vmv2r.v v26, v20
 ; ZVBB-RV32-NEXT:    addi a0, sp, 64
 ; ZVBB-RV32-NEXT:    vmv2r.v v24, v16
@@ -2912,51 +7100,51 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
 ; ZVBB-RV32-NEXT:    vmv1r.v v22, v11
 ; ZVBB-RV32-NEXT:    add a6, a7, a2
 ; ZVBB-RV32-NEXT:    vmv1r.v v24, v15
-; ZVBB-RV32-NEXT:    vsseg7e8.v v1, (a0)
+; ZVBB-RV32-NEXT:    vsseg7e16.v v1, (a0)
 ; ZVBB-RV32-NEXT:    vmv1r.v v26, v19
-; ZVBB-RV32-NEXT:    vsseg7e8.v v21, (a1)
-; ZVBB-RV32-NEXT:    vl1r.v v18, (a6)
+; ZVBB-RV32-NEXT:    vsseg7e16.v v21, (a1)
+; ZVBB-RV32-NEXT:    vl1re16.v v18, (a6)
 ; ZVBB-RV32-NEXT:    add a6, a6, a2
-; ZVBB-RV32-NEXT:    vl1r.v v19, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v19, (a6)
 ; ZVBB-RV32-NEXT:    add a6, a6, a2
-; ZVBB-RV32-NEXT:    vl1r.v v20, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v20, (a6)
 ; ZVBB-RV32-NEXT:    add a6, a6, a2
-; ZVBB-RV32-NEXT:    vl1r.v v21, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v21, (a6)
 ; ZVBB-RV32-NEXT:    add a6, a3, a2
-; ZVBB-RV32-NEXT:    vl1r.v v10, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v10, (a6)
 ; ZVBB-RV32-NEXT:    add a6, a6, a2
-; ZVBB-RV32-NEXT:    vl1r.v v11, (a6)
-; ZVBB-RV32-NEXT:    vl1r.v v8, (a0)
-; ZVBB-RV32-NEXT:    vl1r.v v16, (a4)
-; ZVBB-RV32-NEXT:    vl1r.v v9, (a3)
-; ZVBB-RV32-NEXT:    vl1r.v v17, (a7)
+; ZVBB-RV32-NEXT:    vl1re16.v v11, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl1re16.v v16, (a4)
+; ZVBB-RV32-NEXT:    vl1re16.v v9, (a3)
+; ZVBB-RV32-NEXT:    vl1re16.v v17, (a7)
 ; ZVBB-RV32-NEXT:    csrr a0, vlenb
 ; ZVBB-RV32-NEXT:    li a3, 14
 ; ZVBB-RV32-NEXT:    mul a0, a0, a3
 ; ZVBB-RV32-NEXT:    add a0, sp, a0
 ; ZVBB-RV32-NEXT:    addi a0, a0, 64
 ; ZVBB-RV32-NEXT:    add a6, a6, a2
-; ZVBB-RV32-NEXT:    vl1r.v v12, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v12, (a6)
 ; ZVBB-RV32-NEXT:    add a6, a6, a2
-; ZVBB-RV32-NEXT:    vl1r.v v13, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v13, (a6)
 ; ZVBB-RV32-NEXT:    add a6, a6, a2
 ; ZVBB-RV32-NEXT:    slli a2, a2, 3
 ; ZVBB-RV32-NEXT:    add a2, a0, a2
-; ZVBB-RV32-NEXT:    vl1r.v v14, (a6)
-; ZVBB-RV32-NEXT:    vl1r.v v15, (a1)
+; ZVBB-RV32-NEXT:    vl1re16.v v14, (a6)
+; ZVBB-RV32-NEXT:    vl1re16.v v15, (a1)
 ; ZVBB-RV32-NEXT:    add a5, a0, a5
 ; ZVBB-RV32-NEXT:    vs2r.v v20, (a5)
 ; ZVBB-RV32-NEXT:    vs4r.v v16, (a2)
 ; ZVBB-RV32-NEXT:    vs8r.v v8, (a0)
-; ZVBB-RV32-NEXT:    vl8r.v v16, (a2)
-; ZVBB-RV32-NEXT:    vl8r.v v8, (a0)
+; ZVBB-RV32-NEXT:    vl8re16.v v16, (a2)
+; ZVBB-RV32-NEXT:    vl8re16.v v8, (a0)
 ; ZVBB-RV32-NEXT:    addi sp, s0, -80
 ; ZVBB-RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
 ; ZVBB-RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
 ; ZVBB-RV32-NEXT:    addi sp, sp, 80
 ; ZVBB-RV32-NEXT:    ret
 ;
-; ZVBB-RV64-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; ZVBB-RV64-LABEL: vector_interleave_nxv56f16_nxv8f16:
 ; ZVBB-RV64:       # %bb.0:
 ; ZVBB-RV64-NEXT:    addi sp, sp, -80
 ; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -2966,7 +7154,7 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
 ; ZVBB-RV64-NEXT:    slli a0, a0, 5
 ; ZVBB-RV64-NEXT:    sub sp, sp, a0
 ; ZVBB-RV64-NEXT:    andi sp, sp, -64
-; ZVBB-RV64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVBB-RV64-NEXT:    vmv2r.v v26, v20
 ; ZVBB-RV64-NEXT:    addi a0, sp, 64
 ; ZVBB-RV64-NEXT:    vmv2r.v v24, v16
@@ -2994,51 +7182,51 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
 ; ZVBB-RV64-NEXT:    vmv1r.v v22, v11
 ; ZVBB-RV64-NEXT:    add a6, a7, a2
 ; ZVBB-RV64-NEXT:    vmv1r.v v24, v15
-; ZVBB-RV64-NEXT:    vsseg7e8.v v1, (a0)
+; ZVBB-RV64-NEXT:    vsseg7e16.v v1, (a0)
 ; ZVBB-RV64-NEXT:    vmv1r.v v26, v19
-; ZVBB-RV64-NEXT:    vsseg7e8.v v21, (a1)
-; ZVBB-RV64-NEXT:    vl1r.v v18, (a6)
+; ZVBB-RV64-NEXT:    vsseg7e16.v v21, (a1)
+; ZVBB-RV64-NEXT:    vl1re16.v v18, (a6)
 ; ZVBB-RV64-NEXT:    add a6, a6, a2
-; ZVBB-RV64-NEXT:    vl1r.v v19, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v19, (a6)
 ; ZVBB-RV64-NEXT:    add a6, a6, a2
-; ZVBB-RV64-NEXT:    vl1r.v v20, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v20, (a6)
 ; ZVBB-RV64-NEXT:    add a6, a6, a2
-; ZVBB-RV64-NEXT:    vl1r.v v21, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v21, (a6)
 ; ZVBB-RV64-NEXT:    add a6, a3, a2
-; ZVBB-RV64-NEXT:    vl1r.v v10, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v10, (a6)
 ; ZVBB-RV64-NEXT:    add a6, a6, a2
-; ZVBB-RV64-NEXT:    vl1r.v v11, (a6)
-; ZVBB-RV64-NEXT:    vl1r.v v8, (a0)
-; ZVBB-RV64-NEXT:    vl1r.v v16, (a4)
-; ZVBB-RV64-NEXT:    vl1r.v v9, (a3)
-; ZVBB-RV64-NEXT:    vl1r.v v17, (a7)
+; ZVBB-RV64-NEXT:    vl1re16.v v11, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl1re16.v v16, (a4)
+; ZVBB-RV64-NEXT:    vl1re16.v v9, (a3)
+; ZVBB-RV64-NEXT:    vl1re16.v v17, (a7)
 ; ZVBB-RV64-NEXT:    csrr a0, vlenb
 ; ZVBB-RV64-NEXT:    li a3, 14
 ; ZVBB-RV64-NEXT:    mul a0, a0, a3
 ; ZVBB-RV64-NEXT:    add a0, sp, a0
 ; ZVBB-RV64-NEXT:    addi a0, a0, 64
 ; ZVBB-RV64-NEXT:    add a6, a6, a2
-; ZVBB-RV64-NEXT:    vl1r.v v12, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v12, (a6)
 ; ZVBB-RV64-NEXT:    add a6, a6, a2
-; ZVBB-RV64-NEXT:    vl1r.v v13, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v13, (a6)
 ; ZVBB-RV64-NEXT:    add a6, a6, a2
 ; ZVBB-RV64-NEXT:    slli a2, a2, 3
 ; ZVBB-RV64-NEXT:    add a2, a0, a2
-; ZVBB-RV64-NEXT:    vl1r.v v14, (a6)
-; ZVBB-RV64-NEXT:    vl1r.v v15, (a1)
+; ZVBB-RV64-NEXT:    vl1re16.v v14, (a6)
+; ZVBB-RV64-NEXT:    vl1re16.v v15, (a1)
 ; ZVBB-RV64-NEXT:    add a5, a0, a5
 ; ZVBB-RV64-NEXT:    vs2r.v v20, (a5)
 ; ZVBB-RV64-NEXT:    vs4r.v v16, (a2)
 ; ZVBB-RV64-NEXT:    vs8r.v v8, (a0)
-; ZVBB-RV64-NEXT:    vl8r.v v16, (a2)
-; ZVBB-RV64-NEXT:    vl8r.v v8, (a0)
+; ZVBB-RV64-NEXT:    vl8re16.v v16, (a2)
+; ZVBB-RV64-NEXT:    vl8re16.v v8, (a0)
 ; ZVBB-RV64-NEXT:    addi sp, s0, -80
 ; ZVBB-RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; ZVBB-RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; ZVBB-RV64-NEXT:    addi sp, sp, 80
 ; ZVBB-RV64-NEXT:    ret
 ;
-; ZIP-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; ZIP-LABEL: vector_interleave_nxv56f16_nxv8f16:
 ; ZIP:       # %bb.0:
 ; ZIP-NEXT:    addi sp, sp, -80
 ; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -3048,7 +7236,7 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
 ; ZIP-NEXT:    slli a0, a0, 5
 ; ZIP-NEXT:    sub sp, sp, a0
 ; ZIP-NEXT:    andi sp, sp, -64
-; ZIP-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZIP-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZIP-NEXT:    vmv2r.v v26, v20
 ; ZIP-NEXT:    addi a0, sp, 64
 ; ZIP-NEXT:    vmv2r.v v24, v16
@@ -3076,57 +7264,205 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
 ; ZIP-NEXT:    vmv1r.v v22, v11
 ; ZIP-NEXT:    add a6, a7, a2
 ; ZIP-NEXT:    vmv1r.v v24, v15
-; ZIP-NEXT:    vsseg7e8.v v1, (a0)
+; ZIP-NEXT:    vsseg7e16.v v1, (a0)
 ; ZIP-NEXT:    vmv1r.v v26, v19
-; ZIP-NEXT:    vsseg7e8.v v21, (a1)
-; ZIP-NEXT:    vl1r.v v18, (a6)
+; ZIP-NEXT:    vsseg7e16.v v21, (a1)
+; ZIP-NEXT:    vl1re16.v v18, (a6)
 ; ZIP-NEXT:    add a6, a6, a2
-; ZIP-NEXT:    vl1r.v v19, (a6)
+; ZIP-NEXT:    vl1re16.v v19, (a6)
 ; ZIP-NEXT:    add a6, a6, a2
-; ZIP-NEXT:    vl1r.v v20, (a6)
+; ZIP-NEXT:    vl1re16.v v20, (a6)
 ; ZIP-NEXT:    add a6, a6, a2
-; ZIP-NEXT:    vl1r.v v21, (a6)
+; ZIP-NEXT:    vl1re16.v v21, (a6)
 ; ZIP-NEXT:    add a6, a3, a2
-; ZIP-NEXT:    vl1r.v v10, (a6)
+; ZIP-NEXT:    vl1re16.v v10, (a6)
 ; ZIP-NEXT:    add a6, a6, a2
-; ZIP-NEXT:    vl1r.v v11, (a6)
-; ZIP-NEXT:    vl1r.v v8, (a0)
-; ZIP-NEXT:    vl1r.v v16, (a4)
-; ZIP-NEXT:    vl1r.v v9, (a3)
-; ZIP-NEXT:    vl1r.v v17, (a7)
+; ZIP-NEXT:    vl1re16.v v11, (a6)
+; ZIP-NEXT:    vl1re16.v v8, (a0)
+; ZIP-NEXT:    vl1re16.v v16, (a4)
+; ZIP-NEXT:    vl1re16.v v9, (a3)
+; ZIP-NEXT:    vl1re16.v v17, (a7)
 ; ZIP-NEXT:    csrr a0, vlenb
 ; ZIP-NEXT:    li a3, 14
 ; ZIP-NEXT:    mul a0, a0, a3
 ; ZIP-NEXT:    add a0, sp, a0
 ; ZIP-NEXT:    addi a0, a0, 64
 ; ZIP-NEXT:    add a6, a6, a2
-; ZIP-NEXT:    vl1r.v v12, (a6)
+; ZIP-NEXT:    vl1re16.v v12, (a6)
 ; ZIP-NEXT:    add a6, a6, a2
-; ZIP-NEXT:    vl1r.v v13, (a6)
+; ZIP-NEXT:    vl1re16.v v13, (a6)
 ; ZIP-NEXT:    add a6, a6, a2
 ; ZIP-NEXT:    slli a2, a2, 3
 ; ZIP-NEXT:    add a2, a0, a2
-; ZIP-NEXT:    vl1r.v v14, (a6)
-; ZIP-NEXT:    vl1r.v v15, (a1)
+; ZIP-NEXT:    vl1re16.v v14, (a6)
+; ZIP-NEXT:    vl1re16.v v15, (a1)
 ; ZIP-NEXT:    add a5, a0, a5
 ; ZIP-NEXT:    vs2r.v v20, (a5)
 ; ZIP-NEXT:    vs4r.v v16, (a2)
 ; ZIP-NEXT:    vs8r.v v8, (a0)
-; ZIP-NEXT:    vl8r.v v16, (a2)
-; ZIP-NEXT:    vl8r.v v8, (a0)
+; ZIP-NEXT:    vl8re16.v v16, (a2)
+; ZIP-NEXT:    vl8re16.v v8, (a0)
 ; ZIP-NEXT:    addi sp, s0, -80
 ; ZIP-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
 ; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; ZIP-NEXT:    addi sp, sp, 80
 ; ZIP-NEXT:    ret
-  %res = call <vscale x 112 x i8> @llvm.vector.interleave7.nxv112i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e, <vscale x 16 x i8> %f, <vscale x 16 x i8> %g)
-  ret <vscale x 112 x i8> %res
+  %res = call <vscale x 56 x half> @llvm.vector.interleave7.nxv56f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x half> %v4, <vscale x 8 x half> %v5, <vscale x 8 x half> %v6)
+  ret <vscale x 56 x half> %res
 }
 
+define <vscale x 14 x bfloat> @vector_interleave_nxv14bf16_nxv2bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2, <vscale x 2 x bfloat> %v3, <vscale x 2 x bfloat> %v4, <vscale x 2 x bfloat> %v5, <vscale x 2 x bfloat> %v6) nounwind {
+; CHECK-LABEL: vector_interleave_nxv14bf16_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    srli a2, a1, 1
+; CHECK-NEXT:    srli a1, a1, 2
+; CHECK-NEXT:    add a3, a0, a2
+; CHECK-NEXT:    add a4, a3, a2
+; CHECK-NEXT:    add a5, a4, a2
+; CHECK-NEXT:    add a6, a5, a2
+; CHECK-NEXT:    vsetvli a7, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vsseg7e16.v v8, (a0)
+; CHECK-NEXT:    add a7, a6, a2
+; CHECK-NEXT:    vle16.v v8, (a7)
+; CHECK-NEXT:    vle16.v v10, (a6)
+; CHECK-NEXT:    add a6, a1, a1
+; CHECK-NEXT:    add a2, a7, a2
+; CHECK-NEXT:    vle16.v v12, (a5)
+; CHECK-NEXT:    vsetvli zero, a6, e16, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v10, v8, a1
+; CHECK-NEXT:    vsetvli a5, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v11, (a2)
+; CHECK-NEXT:    vle16.v v9, (a4)
+; CHECK-NEXT:    vsetvli zero, a6, e16, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v9, v12, a1
+; CHECK-NEXT:    vsetvli a2, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v12, (a3)
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vsetvli zero, a6, e16, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v8, v12, a1
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv14bf16_nxv2bf16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a0, a0, 2
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    srli a2, a1, 1
+; ZVBB-NEXT:    srli a1, a1, 2
+; ZVBB-NEXT:    add a3, a0, a2
+; ZVBB-NEXT:    add a4, a3, a2
+; ZVBB-NEXT:    add a5, a4, a2
+; ZVBB-NEXT:    add a6, a5, a2
+; ZVBB-NEXT:    vsetvli a7, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vsseg7e16.v v8, (a0)
+; ZVBB-NEXT:    add a7, a6, a2
+; ZVBB-NEXT:    vle16.v v8, (a7)
+; ZVBB-NEXT:    vle16.v v10, (a6)
+; ZVBB-NEXT:    add a6, a1, a1
+; ZVBB-NEXT:    add a2, a7, a2
+; ZVBB-NEXT:    vle16.v v12, (a5)
+; ZVBB-NEXT:    vsetvli zero, a6, e16, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v10, v8, a1
+; ZVBB-NEXT:    vsetvli a5, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vle16.v v11, (a2)
+; ZVBB-NEXT:    vle16.v v9, (a4)
+; ZVBB-NEXT:    vsetvli zero, a6, e16, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v9, v12, a1
+; ZVBB-NEXT:    vsetvli a2, zero, e16, mf2, ta, ma
+; ZVBB-NEXT:    vle16.v v12, (a3)
+; ZVBB-NEXT:    vle16.v v8, (a0)
+; ZVBB-NEXT:    vsetvli zero, a6, e16, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v8, v12, a1
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a0, a0, 2
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 14 x bfloat> @llvm.vector.interleave7.nxv14bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2, <vscale x 2 x bfloat> %v3, <vscale x 2 x bfloat> %v4, <vscale x 2 x bfloat> %v5, <vscale x 2 x bfloat> %v6)
+  ret <vscale x 14 x bfloat> %res
+}
 
-define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i16> %d, <vscale x 8 x i16> %e, <vscale x 8 x i16> %f, <vscale x 8 x i16> %g) nounwind {
+define <vscale x 28 x bfloat> @vector_interleave_nxv28bf16_nxv4bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2, <vscale x 4 x bfloat> %v3, <vscale x 4 x bfloat> %v4, <vscale x 4 x bfloat> %v5, <vscale x 4 x bfloat> %v6) nounwind {
+; CHECK-LABEL: vector_interleave_nxv28bf16_nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 3
+; CHECK-NEXT:    sub a0, a1, a0
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    add a2, a0, a1
+; CHECK-NEXT:    add a3, a2, a1
+; CHECK-NEXT:    vsetvli a4, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vsseg7e16.v v8, (a0)
+; CHECK-NEXT:    vl1re16.v v10, (a3)
+; CHECK-NEXT:    add a3, a3, a1
+; CHECK-NEXT:    vl1re16.v v11, (a3)
+; CHECK-NEXT:    add a3, a3, a1
+; CHECK-NEXT:    vl1re16.v v8, (a0)
+; CHECK-NEXT:    add a0, a3, a1
+; CHECK-NEXT:    vl1re16.v v9, (a2)
+; CHECK-NEXT:    vl1re16.v v12, (a3)
+; CHECK-NEXT:    vl1re16.v v13, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl1re16.v v14, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 3
+; CHECK-NEXT:    sub a0, a1, a0
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
 ;
-; RV32-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZVBB-LABEL: vector_interleave_nxv28bf16_nxv4bf16:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 3
+; ZVBB-NEXT:    sub a0, a1, a0
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    add a2, a0, a1
+; ZVBB-NEXT:    add a3, a2, a1
+; ZVBB-NEXT:    vsetvli a4, zero, e16, m1, ta, ma
+; ZVBB-NEXT:    vsseg7e16.v v8, (a0)
+; ZVBB-NEXT:    vl1re16.v v10, (a3)
+; ZVBB-NEXT:    add a3, a3, a1
+; ZVBB-NEXT:    vl1re16.v v11, (a3)
+; ZVBB-NEXT:    add a3, a3, a1
+; ZVBB-NEXT:    vl1re16.v v8, (a0)
+; ZVBB-NEXT:    add a0, a3, a1
+; ZVBB-NEXT:    vl1re16.v v9, (a2)
+; ZVBB-NEXT:    vl1re16.v v12, (a3)
+; ZVBB-NEXT:    vl1re16.v v13, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl1re16.v v14, (a0)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 3
+; ZVBB-NEXT:    sub a0, a1, a0
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 28 x bfloat> @llvm.vector.interleave7.nxv28bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2, <vscale x 4 x bfloat> %v3, <vscale x 4 x bfloat> %v4, <vscale x 4 x bfloat> %v5, <vscale x 4 x bfloat> %v6)
+  ret <vscale x 28 x bfloat> %res
+}
+
+define <vscale x 56 x bfloat> @vector_interleave_nxv56bf16_nxv8bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x bfloat> %v4, <vscale x 8 x bfloat> %v5, <vscale x 8 x bfloat> %v6) nounwind {
+; RV32-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -80
 ; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
@@ -3208,7 +7544,7 @@ define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16
 ; RV32-NEXT:    addi sp, sp, 80
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; RV64-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -80
 ; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -3290,7 +7626,7 @@ define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16
 ; RV64-NEXT:    addi sp, sp, 80
 ; RV64-NEXT:    ret
 ;
-; ZVBB-RV32-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZVBB-RV32-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
 ; ZVBB-RV32:       # %bb.0:
 ; ZVBB-RV32-NEXT:    addi sp, sp, -80
 ; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
@@ -3372,7 +7708,7 @@ define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16
 ; ZVBB-RV32-NEXT:    addi sp, sp, 80
 ; ZVBB-RV32-NEXT:    ret
 ;
-; ZVBB-RV64-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZVBB-RV64-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
 ; ZVBB-RV64:       # %bb.0:
 ; ZVBB-RV64-NEXT:    addi sp, sp, -80
 ; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -3454,7 +7790,7 @@ define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16
 ; ZVBB-RV64-NEXT:    addi sp, sp, 80
 ; ZVBB-RV64-NEXT:    ret
 ;
-; ZIP-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZIP-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
 ; ZIP:       # %bb.0:
 ; ZIP-NEXT:    addi sp, sp, -80
 ; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -3535,14 +7871,162 @@ define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16
 ; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; ZIP-NEXT:    addi sp, sp, 80
 ; ZIP-NEXT:    ret
-  %res = call <vscale x 56 x i16> @llvm.vector.interleave7.nxv56i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i16> %d, <vscale x 8 x i16> %e, <vscale x 8 x i16> %f, <vscale x 8 x i16> %g)
-  ret <vscale x 56 x i16> %res
+  %res = call <vscale x 56 x bfloat> @llvm.vector.interleave7.nxv56bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x bfloat> %v4, <vscale x 8 x bfloat> %v5, <vscale x 8 x bfloat> %v6)
+  ret <vscale x 56 x bfloat> %res
 }
 
+define <vscale x 7 x float> @vector_interleave_nxv7f32_nxv1f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2, <vscale x 1 x float> %v3, <vscale x 1 x float> %v4, <vscale x 1 x float> %v5, <vscale x 1 x float> %v6) nounwind {
+; CHECK-LABEL: vector_interleave_nxv7f32_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    srli a2, a1, 1
+; CHECK-NEXT:    srli a1, a1, 3
+; CHECK-NEXT:    add a3, a0, a2
+; CHECK-NEXT:    add a4, a3, a2
+; CHECK-NEXT:    add a5, a4, a2
+; CHECK-NEXT:    add a6, a5, a2
+; CHECK-NEXT:    vsetvli a7, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vsseg7e32.v v8, (a0)
+; CHECK-NEXT:    add a7, a6, a2
+; CHECK-NEXT:    vle32.v v8, (a7)
+; CHECK-NEXT:    vle32.v v10, (a6)
+; CHECK-NEXT:    add a6, a1, a1
+; CHECK-NEXT:    add a2, a7, a2
+; CHECK-NEXT:    vle32.v v12, (a5)
+; CHECK-NEXT:    vsetvli zero, a6, e32, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v10, v8, a1
+; CHECK-NEXT:    vsetvli a5, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vle32.v v11, (a2)
+; CHECK-NEXT:    vle32.v v9, (a4)
+; CHECK-NEXT:    vsetvli zero, a6, e32, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v9, v12, a1
+; CHECK-NEXT:    vsetvli a2, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vle32.v v12, (a3)
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vsetvli zero, a6, e32, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v8, v12, a1
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+;
+; ZVBB-LABEL: vector_interleave_nxv7f32_nxv1f32:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a0, a0, 2
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    srli a2, a1, 1
+; ZVBB-NEXT:    srli a1, a1, 3
+; ZVBB-NEXT:    add a3, a0, a2
+; ZVBB-NEXT:    add a4, a3, a2
+; ZVBB-NEXT:    add a5, a4, a2
+; ZVBB-NEXT:    add a6, a5, a2
+; ZVBB-NEXT:    vsetvli a7, zero, e32, mf2, ta, ma
+; ZVBB-NEXT:    vsseg7e32.v v8, (a0)
+; ZVBB-NEXT:    add a7, a6, a2
+; ZVBB-NEXT:    vle32.v v8, (a7)
+; ZVBB-NEXT:    vle32.v v10, (a6)
+; ZVBB-NEXT:    add a6, a1, a1
+; ZVBB-NEXT:    add a2, a7, a2
+; ZVBB-NEXT:    vle32.v v12, (a5)
+; ZVBB-NEXT:    vsetvli zero, a6, e32, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v10, v8, a1
+; ZVBB-NEXT:    vsetvli a5, zero, e32, mf2, ta, ma
+; ZVBB-NEXT:    vle32.v v11, (a2)
+; ZVBB-NEXT:    vle32.v v9, (a4)
+; ZVBB-NEXT:    vsetvli zero, a6, e32, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v9, v12, a1
+; ZVBB-NEXT:    vsetvli a2, zero, e32, mf2, ta, ma
+; ZVBB-NEXT:    vle32.v v12, (a3)
+; ZVBB-NEXT:    vle32.v v8, (a0)
+; ZVBB-NEXT:    vsetvli zero, a6, e32, m1, ta, ma
+; ZVBB-NEXT:    vslideup.vx v8, v12, a1
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a0, a0, 2
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 7 x float> @llvm.vector.interleave7.nxv7f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2, <vscale x 1 x float> %v3, <vscale x 1 x float> %v4, <vscale x 1 x float> %v5, <vscale x 1 x float> %v6)
+  ret <vscale x 7 x float> %res
+}
 
-define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e, <vscale x 4 x i32> %f, <vscale x 4 x i32> %g) nounwind {
+define <vscale x 14 x float> @vector_interleave_nxv14f32_nxv2f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2, <vscale x 2 x float> %v3, <vscale x 2 x float> %v4, <vscale x 2 x float> %v5, <vscale x 2 x float> %v6) nounwind {
+; CHECK-LABEL: vector_interleave_nxv14f32_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 3
+; CHECK-NEXT:    sub a0, a1, a0
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    add a2, a0, a1
+; CHECK-NEXT:    add a3, a2, a1
+; CHECK-NEXT:    vsetvli a4, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vsseg7e32.v v8, (a0)
+; CHECK-NEXT:    vl1re32.v v10, (a3)
+; CHECK-NEXT:    add a3, a3, a1
+; CHECK-NEXT:    vl1re32.v v11, (a3)
+; CHECK-NEXT:    add a3, a3, a1
+; CHECK-NEXT:    vl1re32.v v8, (a0)
+; CHECK-NEXT:    add a0, a3, a1
+; CHECK-NEXT:    vl1re32.v v9, (a2)
+; CHECK-NEXT:    vl1re32.v v12, (a3)
+; CHECK-NEXT:    vl1re32.v v13, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl1re32.v v14, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 3
+; CHECK-NEXT:    sub a0, a1, a0
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
 ;
-; RV32-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZVBB-LABEL: vector_interleave_nxv14f32_nxv2f32:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 3
+; ZVBB-NEXT:    sub a0, a1, a0
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    add a2, a0, a1
+; ZVBB-NEXT:    add a3, a2, a1
+; ZVBB-NEXT:    vsetvli a4, zero, e32, m1, ta, ma
+; ZVBB-NEXT:    vsseg7e32.v v8, (a0)
+; ZVBB-NEXT:    vl1re32.v v10, (a3)
+; ZVBB-NEXT:    add a3, a3, a1
+; ZVBB-NEXT:    vl1re32.v v11, (a3)
+; ZVBB-NEXT:    add a3, a3, a1
+; ZVBB-NEXT:    vl1re32.v v8, (a0)
+; ZVBB-NEXT:    add a0, a3, a1
+; ZVBB-NEXT:    vl1re32.v v9, (a2)
+; ZVBB-NEXT:    vl1re32.v v12, (a3)
+; ZVBB-NEXT:    vl1re32.v v13, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl1re32.v v14, (a0)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 3
+; ZVBB-NEXT:    sub a0, a1, a0
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 14 x float> @llvm.vector.interleave7.nxv14f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2, <vscale x 2 x float> %v3, <vscale x 2 x float> %v4, <vscale x 2 x float> %v5, <vscale x 2 x float> %v6)
+  ret <vscale x 14 x float> %res
+}
+
+define <vscale x 28 x float> @vector_interleave_nxv28f32_nxv4f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x float> %v4, <vscale x 4 x float> %v5, <vscale x 4 x float> %v6) nounwind {
+; RV32-LABEL: vector_interleave_nxv28f32_nxv4f32:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -80
 ; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
@@ -3624,7 +8108,7 @@ define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32
 ; RV32-NEXT:    addi sp, sp, 80
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; RV64-LABEL: vector_interleave_nxv28f32_nxv4f32:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -80
 ; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -3706,7 +8190,7 @@ define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32
 ; RV64-NEXT:    addi sp, sp, 80
 ; RV64-NEXT:    ret
 ;
-; ZVBB-RV32-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZVBB-RV32-LABEL: vector_interleave_nxv28f32_nxv4f32:
 ; ZVBB-RV32:       # %bb.0:
 ; ZVBB-RV32-NEXT:    addi sp, sp, -80
 ; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
@@ -3788,7 +8272,7 @@ define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32
 ; ZVBB-RV32-NEXT:    addi sp, sp, 80
 ; ZVBB-RV32-NEXT:    ret
 ;
-; ZVBB-RV64-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZVBB-RV64-LABEL: vector_interleave_nxv28f32_nxv4f32:
 ; ZVBB-RV64:       # %bb.0:
 ; ZVBB-RV64-NEXT:    addi sp, sp, -80
 ; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -3870,7 +8354,7 @@ define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32
 ; ZVBB-RV64-NEXT:    addi sp, sp, 80
 ; ZVBB-RV64-NEXT:    ret
 ;
-; ZIP-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZIP-LABEL: vector_interleave_nxv28f32_nxv4f32:
 ; ZIP:       # %bb.0:
 ; ZIP-NEXT:    addi sp, sp, -80
 ; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -3951,13 +8435,78 @@ define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32
 ; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; ZIP-NEXT:    addi sp, sp, 80
 ; ZIP-NEXT:    ret
-  %res = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e, <vscale x 4 x i32> %f, <vscale x 4 x i32> %g)
-  ret <vscale x 28 x i32> %res
+  %res = call <vscale x 28 x float> @llvm.vector.interleave7.nxv28f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x float> %v4, <vscale x 4 x float> %v5, <vscale x 4 x float> %v6)
+  ret <vscale x 28 x float> %res
 }
 
-define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e, <vscale x 2 x i64> %f, <vscale x 2 x i64> %g) nounwind {
+define <vscale x 7 x double> @vector_interleave_nxv7f64_nxv1f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2, <vscale x 1 x double> %v3, <vscale x 1 x double> %v4, <vscale x 1 x double> %v5, <vscale x 1 x double> %v6) nounwind {
+; CHECK-LABEL: vector_interleave_nxv7f64_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 3
+; CHECK-NEXT:    sub a0, a1, a0
+; CHECK-NEXT:    sub sp, sp, a0
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    add a2, a0, a1
+; CHECK-NEXT:    add a3, a2, a1
+; CHECK-NEXT:    vsetvli a4, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vsseg7e64.v v8, (a0)
+; CHECK-NEXT:    vl1re64.v v10, (a3)
+; CHECK-NEXT:    add a3, a3, a1
+; CHECK-NEXT:    vl1re64.v v11, (a3)
+; CHECK-NEXT:    add a3, a3, a1
+; CHECK-NEXT:    vl1re64.v v8, (a0)
+; CHECK-NEXT:    add a0, a3, a1
+; CHECK-NEXT:    vl1re64.v v9, (a2)
+; CHECK-NEXT:    vl1re64.v v12, (a3)
+; CHECK-NEXT:    vl1re64.v v13, (a0)
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    vl1re64.v v14, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 3
+; CHECK-NEXT:    sub a0, a1, a0
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
 ;
-; RV32-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZVBB-LABEL: vector_interleave_nxv7f64_nxv1f64:
+; ZVBB:       # %bb.0:
+; ZVBB-NEXT:    addi sp, sp, -16
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 3
+; ZVBB-NEXT:    sub a0, a1, a0
+; ZVBB-NEXT:    sub sp, sp, a0
+; ZVBB-NEXT:    addi a0, sp, 16
+; ZVBB-NEXT:    csrr a1, vlenb
+; ZVBB-NEXT:    add a2, a0, a1
+; ZVBB-NEXT:    add a3, a2, a1
+; ZVBB-NEXT:    vsetvli a4, zero, e64, m1, ta, ma
+; ZVBB-NEXT:    vsseg7e64.v v8, (a0)
+; ZVBB-NEXT:    vl1re64.v v10, (a3)
+; ZVBB-NEXT:    add a3, a3, a1
+; ZVBB-NEXT:    vl1re64.v v11, (a3)
+; ZVBB-NEXT:    add a3, a3, a1
+; ZVBB-NEXT:    vl1re64.v v8, (a0)
+; ZVBB-NEXT:    add a0, a3, a1
+; ZVBB-NEXT:    vl1re64.v v9, (a2)
+; ZVBB-NEXT:    vl1re64.v v12, (a3)
+; ZVBB-NEXT:    vl1re64.v v13, (a0)
+; ZVBB-NEXT:    add a0, a0, a1
+; ZVBB-NEXT:    vl1re64.v v14, (a0)
+; ZVBB-NEXT:    csrr a0, vlenb
+; ZVBB-NEXT:    slli a1, a0, 3
+; ZVBB-NEXT:    sub a0, a1, a0
+; ZVBB-NEXT:    add sp, sp, a0
+; ZVBB-NEXT:    addi sp, sp, 16
+; ZVBB-NEXT:    ret
+  %res = call <vscale x 7 x double> @llvm.vector.interleave7.nxv7f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2, <vscale x 1 x double> %v3, <vscale x 1 x double> %v4, <vscale x 1 x double> %v5, <vscale x 1 x double> %v6)
+  ret <vscale x 7 x double> %res
+}
+
+define <vscale x 14 x double> @vector_interleave_nxv14f64_nxv2f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4, <vscale x 2 x double> %v5, <vscale x 2 x double> %v6) nounwind {
+; RV32-LABEL: vector_interleave_nxv14f64_nxv2f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -80
 ; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
@@ -4039,7 +8588,7 @@ define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64
 ; RV32-NEXT:    addi sp, sp, 80
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; RV64-LABEL: vector_interleave_nxv14f64_nxv2f64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -80
 ; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -4121,7 +8670,7 @@ define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64
 ; RV64-NEXT:    addi sp, sp, 80
 ; RV64-NEXT:    ret
 ;
-; ZVBB-RV32-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZVBB-RV32-LABEL: vector_interleave_nxv14f64_nxv2f64:
 ; ZVBB-RV32:       # %bb.0:
 ; ZVBB-RV32-NEXT:    addi sp, sp, -80
 ; ZVBB-RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
@@ -4203,7 +8752,7 @@ define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64
 ; ZVBB-RV32-NEXT:    addi sp, sp, 80
 ; ZVBB-RV32-NEXT:    ret
 ;
-; ZVBB-RV64-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZVBB-RV64-LABEL: vector_interleave_nxv14f64_nxv2f64:
 ; ZVBB-RV64:       # %bb.0:
 ; ZVBB-RV64-NEXT:    addi sp, sp, -80
 ; ZVBB-RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -4285,7 +8834,7 @@ define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64
 ; ZVBB-RV64-NEXT:    addi sp, sp, 80
 ; ZVBB-RV64-NEXT:    ret
 ;
-; ZIP-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZIP-LABEL: vector_interleave_nxv14f64_nxv2f64:
 ; ZIP:       # %bb.0:
 ; ZIP-NEXT:    addi sp, sp, -80
 ; ZIP-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
@@ -4366,6 +8915,6 @@ define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64
 ; ZIP-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; ZIP-NEXT:    addi sp, sp, 80
 ; ZIP-NEXT:    ret
-  %res = call <vscale x 14 x i64> @llvm.vector.interleave7.nxv14i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e, <vscale x 2 x i64> %f, <vscale x 2 x i64> %g)
-  ret <vscale x 14 x i64> %res
+  %res = call <vscale x 14 x double> @llvm.vector.interleave7.nxv14f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4, <vscale x 2 x double> %v5, <vscale x 2 x double> %v6)
+  ret <vscale x 14 x double> %res
 }


        


More information about the llvm-commits mailing list