[llvm] [RISCV] Add codegen tests for `vector.(de)interleave3/5/7` on FP scalable vectors (PR #137257)
Min-Yih Hsu via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 24 15:41:54 PDT 2025
https://github.com/mshockwave updated https://github.com/llvm/llvm-project/pull/137257
>From 55c5f13378c487bf465f72298d24438854b5e1ed Mon Sep 17 00:00:00 2001
From: Min-Yih Hsu <min.hsu at sifive.com>
Date: Thu, 24 Apr 2025 14:51:42 -0700
Subject: [PATCH 1/2] [RISCV] Add codegen tests for floating points
`vector.(de)interleave3/5/7`
---
.../CodeGen/RISCV/rvv/vector-deinterleave.ll | 1456 +++-
.../CodeGen/RISCV/rvv/vector-interleave.ll | 7557 ++++++++++++++---
2 files changed, 7560 insertions(+), 1453 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index 8a71cd0826672..bb4e1f58588f8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -347,6 +347,629 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_nxv8i64_nxv
ret {<vscale x 8 x i64>, <vscale x 8 x i64>} %retval
}
+define {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv48i1(<vscale x 48 x i1> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv48i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv1r.v v8, v0
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: vmerge.vim v16, v10, 1, v0
+; CHECK-NEXT: srli a1, a0, 2
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a1
+; CHECK-NEXT: srli a0, a0, 1
+; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vim v18, v10, 1, v0
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v8, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vim v20, v10, 1, v0
+; CHECK-NEXT: vs8r.v v16, (a0)
+; CHECK-NEXT: vlseg3e8.v v8, (a0)
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: vmsne.vi v8, v10, 0
+; CHECK-NEXT: vmsne.vi v9, v12, 0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.vector.deinterleave3.nxv48i1(<vscale x 48 x i1> %vec)
+ ret {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} %retval
+}
+
+
+define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_nxv16i8_nxv48i8(<vscale x 48 x i8> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv48i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT: vlseg3e8.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave3.nxv48i8(<vscale x 48 x i8> %vec)
+ ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %retval
+}
+
+
+define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_nxv8i16_nxv24i16(<vscale x 24 x i16> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv24i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT: vlseg3e16.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.vector.deinterleave3.nxv24i16(<vscale x 24 x i16> %vec)
+ ret {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} %retval
+}
+
+
+define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_nxv4i32_nxv12i32(<vscale x 12 x i32> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv12i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-NEXT: vlseg3e32.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> %vec)
+ ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %retval
+}
+
+
+define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv6i64(<vscale x 6 x i64> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv6i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-NEXT: vlseg3e64.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> %vec)
+ ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
+}
+
+define {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv80i1(<vscale x 80 x i1> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv80i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: vmerge.vim v16, v12, 1, v0
+; CHECK-NEXT: srli a1, a0, 2
+; CHECK-NEXT: srli a2, a0, 1
+; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a1
+; CHECK-NEXT: srli a1, a0, 3
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: vsetvli a3, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vim v18, v12, 1, v0
+; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v9, a2
+; CHECK-NEXT: sub a0, a0, a1
+; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vim v20, v12, 1, v0
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v9, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vim v14, v12, 1, v0
+; CHECK-NEXT: vmv1r.v v10, v15
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
+; CHECK-NEXT: vmv1r.v v11, v12
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vmv1r.v v8, v21
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vmv1r.v v9, v14
+; CHECK-NEXT: vs8r.v v16, (a0)
+; CHECK-NEXT: vmv1r.v v12, v13
+; CHECK-NEXT: vs8r.v v8, (a1)
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vlseg5e8.v v8, (a0)
+; CHECK-NEXT: vlseg5e8.v v14, (a1)
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v10
+; CHECK-NEXT: vmv1r.v v21, v14
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmsne.vi v0, v20, 0
+; CHECK-NEXT: vmv1r.v v14, v9
+; CHECK-NEXT: vmsne.vi v8, v14, 0
+; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmsne.vi v9, v22, 0
+; CHECK-NEXT: vmv1r.v v16, v11
+; CHECK-NEXT: vmsne.vi v10, v16, 0
+; CHECK-NEXT: vmv1r.v v13, v18
+; CHECK-NEXT: vmsne.vi v11, v12, 0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.vector.deinterleave5.nxv80i1(<vscale x 80 x i1> %vec)
+ ret {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} %retval
+}
+
+
+define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_nxv16i8_nxv80i8(<vscale x 80 x i8> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv80i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v26, v15
+; CHECK-NEXT: vmv1r.v v27, v16
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vmv1r.v v24, v13
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vmv1r.v v25, v14
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vmv1r.v v28, v17
+; CHECK-NEXT: vs8r.v v24, (a1)
+; CHECK-NEXT: vlseg5e8.v v12, (a0)
+; CHECK-NEXT: vlseg5e8.v v18, (a1)
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: vmv1r.v v9, v18
+; CHECK-NEXT: vmv1r.v v18, v13
+; CHECK-NEXT: vmv2r.v v12, v14
+; CHECK-NEXT: vmv1r.v v13, v20
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v17, v22
+; CHECK-NEXT: vmv2r.v v10, v18
+; CHECK-NEXT: vmv2r.v v14, v20
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave5.nxv80i8(<vscale x 80 x i8> %vec)
+ ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %retval
+}
+
+
+define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_nxv8i16_nxv40i16(<vscale x 40 x i16> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv40i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v26, v15
+; CHECK-NEXT: vmv1r.v v27, v16
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vmv1r.v v24, v13
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vmv1r.v v25, v14
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vmv1r.v v28, v17
+; CHECK-NEXT: vs8r.v v24, (a1)
+; CHECK-NEXT: vlseg5e16.v v12, (a0)
+; CHECK-NEXT: vlseg5e16.v v18, (a1)
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: vmv1r.v v9, v18
+; CHECK-NEXT: vmv1r.v v18, v13
+; CHECK-NEXT: vmv2r.v v12, v14
+; CHECK-NEXT: vmv1r.v v13, v20
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v17, v22
+; CHECK-NEXT: vmv2r.v v10, v18
+; CHECK-NEXT: vmv2r.v v14, v20
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.vector.deinterleave5.nxv40i16(<vscale x 40 x i16> %vec)
+ ret {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} %retval
+}
+
+
+define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_nxv4i32_nxv20i32(<vscale x 20 x i32> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv20i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v26, v15
+; CHECK-NEXT: vmv1r.v v27, v16
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vmv1r.v v24, v13
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vmv1r.v v25, v14
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vmv1r.v v28, v17
+; CHECK-NEXT: vs8r.v v24, (a1)
+; CHECK-NEXT: vlseg5e32.v v12, (a0)
+; CHECK-NEXT: vlseg5e32.v v18, (a1)
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: vmv1r.v v9, v18
+; CHECK-NEXT: vmv1r.v v18, v13
+; CHECK-NEXT: vmv2r.v v12, v14
+; CHECK-NEXT: vmv1r.v v13, v20
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v17, v22
+; CHECK-NEXT: vmv2r.v v10, v18
+; CHECK-NEXT: vmv2r.v v14, v20
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave5.nxv20i32(<vscale x 20 x i32> %vec)
+ ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %retval
+}
+
+
+define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv10i64(<vscale x 10 x i64> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv10i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v26, v15
+; CHECK-NEXT: vmv1r.v v27, v16
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vmv1r.v v24, v13
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vmv1r.v v25, v14
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vmv1r.v v28, v17
+; CHECK-NEXT: vs8r.v v24, (a1)
+; CHECK-NEXT: vlseg5e64.v v12, (a0)
+; CHECK-NEXT: vlseg5e64.v v18, (a1)
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: vmv1r.v v9, v18
+; CHECK-NEXT: vmv1r.v v18, v13
+; CHECK-NEXT: vmv2r.v v12, v14
+; CHECK-NEXT: vmv1r.v v13, v20
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v17, v22
+; CHECK-NEXT: vmv2r.v v10, v18
+; CHECK-NEXT: vmv2r.v v14, v20
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave5.nxv10i64(<vscale x 10 x i64> %vec)
+ ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
+}
+
+define {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv112i1(<vscale x 112 x i1> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv112i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: vmerge.vim v16, v12, 1, v0
+; CHECK-NEXT: srli a1, a0, 2
+; CHECK-NEXT: srli a2, a0, 1
+; CHECK-NEXT: srli a3, a0, 3
+; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a1
+; CHECK-NEXT: slli a3, a3, 1
+; CHECK-NEXT: vsetvli a4, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vim v18, v12, 1, v0
+; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v9, a2
+; CHECK-NEXT: sub a0, a0, a3
+; CHECK-NEXT: vsetvli a3, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vim v20, v12, 1, v0
+; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v9, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vim v22, v12, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v14, v12, 1, v0
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v8, a1
+; CHECK-NEXT: vmv1r.v v10, v15
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vim v24, v12, 1, v0
+; CHECK-NEXT: vmv1r.v v11, v24
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v8, a2
+; CHECK-NEXT: vmv1r.v v8, v23
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vmv1r.v v9, v14
+; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vim v14, v12, 1, v0
+; CHECK-NEXT: vmv1r.v v12, v25
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vmv1r.v v13, v14
+; CHECK-NEXT: vs8r.v v16, (a0)
+; CHECK-NEXT: vmv1r.v v14, v15
+; CHECK-NEXT: vs8r.v v8, (a1)
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vlseg7e8.v v8, (a0)
+; CHECK-NEXT: vlseg7e8.v v16, (a1)
+; CHECK-NEXT: vmv2r.v v24, v8
+; CHECK-NEXT: vmv2r.v v26, v10
+; CHECK-NEXT: vmv2r.v v28, v12
+; CHECK-NEXT: vmv1r.v v25, v16
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmsne.vi v0, v24, 0
+; CHECK-NEXT: vmv1r.v v16, v9
+; CHECK-NEXT: vmsne.vi v8, v16, 0
+; CHECK-NEXT: vmv1r.v v27, v18
+; CHECK-NEXT: vmsne.vi v9, v26, 0
+; CHECK-NEXT: vmv1r.v v18, v11
+; CHECK-NEXT: vmsne.vi v10, v18, 0
+; CHECK-NEXT: vmv1r.v v29, v20
+; CHECK-NEXT: vmsne.vi v11, v28, 0
+; CHECK-NEXT: vmv1r.v v20, v13
+; CHECK-NEXT: vmsne.vi v12, v20, 0
+; CHECK-NEXT: vmv1r.v v15, v22
+; CHECK-NEXT: vmsne.vi v13, v14, 0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.vector.deinterleave7.nxv112i1(<vscale x 112 x i1> %vec)
+ ret {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} %retval
+}
+
+
+define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_nxv16i8_nxv112i8(<vscale x 112 x i8> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv112i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v30, v21
+; CHECK-NEXT: vmv1r.v v28, v19
+; CHECK-NEXT: vmv1r.v v29, v20
+; CHECK-NEXT: vmv1r.v v26, v17
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vmv1r.v v27, v18
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vmv1r.v v24, v15
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vmv1r.v v25, v16
+; CHECK-NEXT: vs8r.v v24, (a1)
+; CHECK-NEXT: vlseg7e8.v v14, (a0)
+; CHECK-NEXT: vlseg7e8.v v22, (a1)
+; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vmv1r.v v9, v22
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vmv2r.v v12, v16
+; CHECK-NEXT: vmv1r.v v13, v24
+; CHECK-NEXT: vmv1r.v v24, v17
+; CHECK-NEXT: vmv2r.v v16, v18
+; CHECK-NEXT: vmv1r.v v17, v26
+; CHECK-NEXT: vmv1r.v v26, v19
+; CHECK-NEXT: vmv1r.v v21, v28
+; CHECK-NEXT: vmv2r.v v10, v22
+; CHECK-NEXT: vmv2r.v v14, v24
+; CHECK-NEXT: vmv2r.v v18, v26
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave7.nxv112i8(<vscale x 112 x i8> %vec)
+ ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %retval
+}
+
+
+define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_nxv8i16_nxv56i16(<vscale x 56 x i16> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv56i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v30, v21
+; CHECK-NEXT: vmv1r.v v28, v19
+; CHECK-NEXT: vmv1r.v v29, v20
+; CHECK-NEXT: vmv1r.v v26, v17
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vmv1r.v v27, v18
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vmv1r.v v24, v15
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vmv1r.v v25, v16
+; CHECK-NEXT: vs8r.v v24, (a1)
+; CHECK-NEXT: vlseg7e16.v v14, (a0)
+; CHECK-NEXT: vlseg7e16.v v22, (a1)
+; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vmv1r.v v9, v22
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vmv2r.v v12, v16
+; CHECK-NEXT: vmv1r.v v13, v24
+; CHECK-NEXT: vmv1r.v v24, v17
+; CHECK-NEXT: vmv2r.v v16, v18
+; CHECK-NEXT: vmv1r.v v17, v26
+; CHECK-NEXT: vmv1r.v v26, v19
+; CHECK-NEXT: vmv1r.v v21, v28
+; CHECK-NEXT: vmv2r.v v10, v22
+; CHECK-NEXT: vmv2r.v v14, v24
+; CHECK-NEXT: vmv2r.v v18, v26
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.vector.deinterleave7.nxv56i16(<vscale x 56 x i16> %vec)
+ ret {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} %retval
+}
+
+
+define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_nxv4i32_nxv28i32(<vscale x 28 x i32> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv28i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v30, v21
+; CHECK-NEXT: vmv1r.v v28, v19
+; CHECK-NEXT: vmv1r.v v29, v20
+; CHECK-NEXT: vmv1r.v v26, v17
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vmv1r.v v27, v18
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vmv1r.v v24, v15
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vmv1r.v v25, v16
+; CHECK-NEXT: vs8r.v v24, (a1)
+; CHECK-NEXT: vlseg7e32.v v14, (a0)
+; CHECK-NEXT: vlseg7e32.v v22, (a1)
+; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vmv1r.v v9, v22
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vmv2r.v v12, v16
+; CHECK-NEXT: vmv1r.v v13, v24
+; CHECK-NEXT: vmv1r.v v24, v17
+; CHECK-NEXT: vmv2r.v v16, v18
+; CHECK-NEXT: vmv1r.v v17, v26
+; CHECK-NEXT: vmv1r.v v26, v19
+; CHECK-NEXT: vmv1r.v v21, v28
+; CHECK-NEXT: vmv2r.v v10, v22
+; CHECK-NEXT: vmv2r.v v14, v24
+; CHECK-NEXT: vmv2r.v v18, v26
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave7.nxv28i32(<vscale x 28 x i32> %vec)
+ ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %retval
+}
+
+
+define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv14i64(<vscale x 14 x i64> %vec) nounwind {
+; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv14i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v30, v21
+; CHECK-NEXT: vmv1r.v v28, v19
+; CHECK-NEXT: vmv1r.v v29, v20
+; CHECK-NEXT: vmv1r.v v26, v17
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vmv1r.v v27, v18
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vmv1r.v v24, v15
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vmv1r.v v25, v16
+; CHECK-NEXT: vs8r.v v24, (a1)
+; CHECK-NEXT: vlseg7e64.v v14, (a0)
+; CHECK-NEXT: vlseg7e64.v v22, (a1)
+; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vmv1r.v v9, v22
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vmv2r.v v12, v16
+; CHECK-NEXT: vmv1r.v v13, v24
+; CHECK-NEXT: vmv1r.v v24, v17
+; CHECK-NEXT: vmv2r.v v16, v18
+; CHECK-NEXT: vmv1r.v v17, v26
+; CHECK-NEXT: vmv1r.v v26, v19
+; CHECK-NEXT: vmv1r.v v21, v28
+; CHECK-NEXT: vmv2r.v v10, v22
+; CHECK-NEXT: vmv2r.v v14, v24
+; CHECK-NEXT: vmv2r.v v18, v26
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave7.nxv14i64(<vscale x 14 x i64> %vec)
+ ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
+}
; Floats
@@ -692,72 +1315,145 @@ define {<vscale x 8 x double>, <vscale x 8 x double>} @vector_deinterleave_nxv8f
ret {<vscale x 8 x double>, <vscale x 8 x double>} %retval
}
-define {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv48i1(<vscale x 48 x i1> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv48i1:
+define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv6f16(<vscale x 6 x half> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv6f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v8, v0
-; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: vmerge.vim v16, v10, 1, v0
-; CHECK-NEXT: srli a1, a0, 2
-; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v0, a1
-; CHECK-NEXT: srli a0, a0, 1
-; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v18, v10, 1, v0
-; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v8, a0
+; CHECK-NEXT: srli a0, a0, 2
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v10, v8, a0
+; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v20, v10, 1, v0
-; CHECK-NEXT: vs8r.v v16, (a0)
-; CHECK-NEXT: vlseg3e8.v v8, (a0)
-; CHECK-NEXT: vmsne.vi v0, v8, 0
-; CHECK-NEXT: vmsne.vi v8, v10, 0
-; CHECK-NEXT: vmsne.vi v9, v12, 0
+; CHECK-NEXT: vs2r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vlseg3e16.v v8, (a0)
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.vector.deinterleave3.nxv48i1(<vscale x 48 x i1> %vec)
- ret {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} %retval
+ %res = call {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @llvm.vector.deinterleave3.nxv6f16(<vscale x 6 x half> %arg)
+ ret {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} %res
}
+define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_nxv4f16_nxv12f16(<vscale x 12 x half> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv4f16_nxv12f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vlseg3e16.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @llvm.vector.deinterleave3.nxv12f16(<vscale x 12 x half> %arg)
+ ret {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} %res
+}
-define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_nxv16i8_nxv48i8(<vscale x 48 x i8> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv48i8:
+define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_nxv8f16_nxv24f16(<vscale x 24 x half> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv8f16_nxv24f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
-; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
-; CHECK-NEXT: vlseg3e8.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT: vlseg3e16.v v8, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave3.nxv48i8(<vscale x 48 x i8> %vec)
- ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %retval
+ %res = call {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @llvm.vector.deinterleave3.nxv24f16(<vscale x 24 x half> %arg)
+ ret {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} %res
+}
+
+define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @vector_deinterleave_nxv2bf16_nxv6bf16(<vscale x 6 x bfloat> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv2bf16_nxv6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: srli a0, a0, 2
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v10, v8, a0
+; CHECK-NEXT: vslideup.vx v8, v10, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs2r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vlseg3e16.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @llvm.vector.deinterleave3.nxv6bf16(<vscale x 6 x bfloat> %arg)
+ ret {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} %res
}
+define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @vector_deinterleave_nxv4bf16_nxv12bf16(<vscale x 12 x bfloat> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv4bf16_nxv12bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vlseg3e16.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @llvm.vector.deinterleave3.nxv12bf16(<vscale x 12 x bfloat> %arg)
+ ret {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} %res
+}
-define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_nxv8i16_nxv24i16(<vscale x 24 x i16> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv24i16:
+define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8bf16_nxv24bf16(<vscale x 24 x bfloat> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv8bf16_nxv24bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
@@ -765,20 +1461,76 @@ define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_dein
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.vector.deinterleave3.nxv24i16(<vscale x 24 x i16> %vec)
- ret {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} %retval
+ %res = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.vector.deinterleave3.nxv24bf16(<vscale x 24 x bfloat> %arg)
+ ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %res
}
+define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @vector_deinterleave_nxv1f32_nxv3f32(<vscale x 3 x float> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv1f32_nxv3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: srli a0, a0, 3
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v10, v8, a0
+; CHECK-NEXT: vslideup.vx v8, v10, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs2r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vlseg3e32.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @llvm.vector.deinterleave3.nxv3f32(<vscale x 3 x float> %arg)
+ ret {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} %res
+}
-define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_nxv4i32_nxv12i32(<vscale x 12 x i32> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv12i32:
+define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_nxv2f32_nxv6f32(<vscale x 6 x float> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv2f32_nxv6f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT: vlseg3e32.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @llvm.vector.deinterleave3.nxv6f32(<vscale x 6 x float> %arg)
+ ret {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} %res
+}
+
+define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @vector_deinterleave_nxv4f32_nxv12f32(<vscale x 12 x float> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv4f32_nxv12f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
@@ -786,20 +1538,47 @@ define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_dein
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> %vec)
- ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %retval
+ %res = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.vector.deinterleave3.nxv12f32(<vscale x 12 x float> %arg)
+ ret {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res
}
+define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @vector_deinterleave_nxv1f64_nxv3f64(<vscale x 3 x double> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv1f64_nxv3f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-NEXT: vlseg3e64.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @llvm.vector.deinterleave3.nxv3f64(<vscale x 3 x double> %arg)
+ ret {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} %res
+}
-define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv6i64(<vscale x 6 x i64> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv6i64:
+define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @vector_deinterleave_nxv2f64_nxv6f64(<vscale x 6 x double> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv2f64_nxv6f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
@@ -807,89 +1586,79 @@ define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_dein
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> %vec)
- ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
+ %res = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.vector.deinterleave3.nxv6f64(<vscale x 6 x double> %arg)
+ ret {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} %res
}
-define {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv80i1(<vscale x 80 x i1> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv80i1:
+define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv10f16(<vscale x 10 x half> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv10f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: vmerge.vim v16, v12, 1, v0
-; CHECK-NEXT: srli a1, a0, 2
-; CHECK-NEXT: srli a2, a0, 1
-; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v0, a1
-; CHECK-NEXT: srli a1, a0, 3
-; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vsetvli a3, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v18, v12, 1, v0
-; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v9, a2
-; CHECK-NEXT: sub a0, a0, a1
-; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v20, v12, 1, v0
-; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v9, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v14, v12, 1, v0
-; CHECK-NEXT: vmv1r.v v10, v15
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
-; CHECK-NEXT: vmv1r.v v11, v12
+; CHECK-NEXT: srli a0, a0, 2
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v11, v9, a0
+; CHECK-NEXT: vslideup.vx v9, v11, a0
+; CHECK-NEXT: vslidedown.vx v11, v8, a0
+; CHECK-NEXT: vslideup.vx v8, v11, a0
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vmv1r.v v8, v21
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vmv1r.v v9, v14
-; CHECK-NEXT: vs8r.v v16, (a0)
-; CHECK-NEXT: vmv1r.v v12, v13
-; CHECK-NEXT: vs8r.v v8, (a1)
-; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
-; CHECK-NEXT: vlseg5e8.v v8, (a0)
-; CHECK-NEXT: vlseg5e8.v v14, (a1)
-; CHECK-NEXT: vmv2r.v v20, v8
-; CHECK-NEXT: vmv2r.v v22, v10
-; CHECK-NEXT: vmv1r.v v21, v14
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v20, 0
-; CHECK-NEXT: vmv1r.v v14, v9
-; CHECK-NEXT: vmsne.vi v8, v14, 0
-; CHECK-NEXT: vmv1r.v v23, v16
-; CHECK-NEXT: vmsne.vi v9, v22, 0
-; CHECK-NEXT: vmv1r.v v16, v11
-; CHECK-NEXT: vmsne.vi v10, v16, 0
-; CHECK-NEXT: vmv1r.v v13, v18
-; CHECK-NEXT: vmsne.vi v11, v12, 0
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vlseg5e16.v v8, (a0)
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.vector.deinterleave5.nxv80i1(<vscale x 80 x i1> %vec)
- ret {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} %retval
+ %res = call {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @llvm.vector.deinterleave5.nxv10f16(<vscale x 10 x half> %arg)
+ ret {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} %res
+}
+
+define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_nxv4f16_nxv20f16(<vscale x 20 x half> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv4f16_nxv20f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vlseg5e16.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @llvm.vector.deinterleave5.nxv20f16(<vscale x 20 x half> %arg)
+ ret {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} %res
}
-
-define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_nxv16i8_nxv80i8(<vscale x 80 x i8> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv80i8:
+define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_nxv8f16_nxv40f16(<vscale x 40 x half> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv8f16_nxv40f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv1r.v v26, v15
; CHECK-NEXT: vmv1r.v v27, v16
; CHECK-NEXT: addi a0, sp, 16
@@ -902,8 +1671,8 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vmv1r.v v28, v17
; CHECK-NEXT: vs8r.v v24, (a1)
-; CHECK-NEXT: vlseg5e8.v v12, (a0)
-; CHECK-NEXT: vlseg5e8.v v18, (a1)
+; CHECK-NEXT: vlseg5e16.v v12, (a0)
+; CHECK-NEXT: vlseg5e16.v v18, (a1)
; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: vmv1r.v v9, v18
; CHECK-NEXT: vmv1r.v v18, v13
@@ -916,20 +1685,78 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave5.nxv80i8(<vscale x 80 x i8> %vec)
- ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %retval
+ %res = call {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @llvm.vector.deinterleave5.nxv40f16(<vscale x 40 x half> %arg)
+ ret {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} %res
}
+define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @vector_deinterleave_nxv2bf16_nxv10bf16(<vscale x 10 x bfloat> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv2bf16_nxv10bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: srli a0, a0, 2
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v11, v9, a0
+; CHECK-NEXT: vslideup.vx v9, v11, a0
+; CHECK-NEXT: vslidedown.vx v11, v8, a0
+; CHECK-NEXT: vslideup.vx v8, v11, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vlseg5e16.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @llvm.vector.deinterleave5.nxv10bf16(<vscale x 10 x bfloat> %arg)
+ ret {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} %res
+}
-define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_nxv8i16_nxv40i16(<vscale x 40 x i16> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv40i16:
+define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @vector_deinterleave_nxv4bf16_nxv20bf16(<vscale x 20 x bfloat> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv4bf16_nxv20bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vlseg5e16.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @llvm.vector.deinterleave5.nxv20bf16(<vscale x 20 x bfloat> %arg)
+ ret {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} %res
+}
+
+define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8bf16_nxv40bf16(<vscale x 40 x bfloat> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv8bf16_nxv40bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv1r.v v26, v15
; CHECK-NEXT: vmv1r.v v27, v16
@@ -957,20 +1784,78 @@ define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.vector.deinterleave5.nxv40i16(<vscale x 40 x i16> %vec)
- ret {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} %retval
+ %res = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.vector.deinterleave5.nxv40bf16(<vscale x 40 x bfloat> %arg)
+ ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %res
}
+define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @vector_deinterleave_nxv1f32_nxv5f32(<vscale x 5 x float> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv1f32_nxv5f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: srli a0, a0, 3
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v11, v9, a0
+; CHECK-NEXT: vslideup.vx v9, v11, a0
+; CHECK-NEXT: vslidedown.vx v11, v8, a0
+; CHECK-NEXT: vslideup.vx v8, v11, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vlseg5e32.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @llvm.vector.deinterleave5.nxv5f32(<vscale x 5 x float> %arg)
+ ret {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} %res
+}
-define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_nxv4i32_nxv20i32(<vscale x 20 x i32> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv20i32:
+define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_nxv2f32_nxv10f32(<vscale x 10 x float> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv2f32_nxv10f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT: vlseg5e32.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @llvm.vector.deinterleave5.nxv10f32(<vscale x 10 x float> %arg)
+ ret {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} %res
+}
+
+define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @vector_deinterleave_nxv4f32_nxv20f32(<vscale x 20 x float> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv4f32_nxv20f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vmv1r.v v26, v15
; CHECK-NEXT: vmv1r.v v27, v16
@@ -998,20 +1883,47 @@ define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave5.nxv20i32(<vscale x 20 x i32> %vec)
- ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %retval
+ %res = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.vector.deinterleave5.nxv20f32(<vscale x 20 x float> %arg)
+ ret {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res
}
+define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @vector_deinterleave_nxv1f64_nxv5f64(<vscale x 5 x double> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv1f64_nxv5f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-NEXT: vlseg5e64.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @llvm.vector.deinterleave5.nxv5f64(<vscale x 5 x double> %arg)
+ ret {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} %res
+}
-define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv10i64(<vscale x 10 x i64> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv10i64:
+define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @vector_deinterleave_nxv2f64_nxv10f64(<vscale x 10 x double> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv2f64_nxv10f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vmv1r.v v26, v15
; CHECK-NEXT: vmv1r.v v27, v16
@@ -1039,104 +1951,81 @@ define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave5.nxv10i64(<vscale x 10 x i64> %vec)
- ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
+ %res = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.vector.deinterleave5.nxv10f64(<vscale x 10 x double> %arg)
+ ret {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} %res
}
-define {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv112i1(<vscale x 112 x i1> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv112i1:
+define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv14f16(<vscale x 14 x half> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv14f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: vmerge.vim v16, v12, 1, v0
-; CHECK-NEXT: srli a1, a0, 2
-; CHECK-NEXT: srli a2, a0, 1
-; CHECK-NEXT: srli a3, a0, 3
-; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v0, a1
-; CHECK-NEXT: slli a3, a3, 1
-; CHECK-NEXT: vsetvli a4, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v18, v12, 1, v0
-; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v9, a2
-; CHECK-NEXT: sub a0, a0, a3
-; CHECK-NEXT: vsetvli a3, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v20, v12, 1, v0
-; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v9, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v22, v12, 1, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vim v14, v12, 1, v0
-; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v8, a1
-; CHECK-NEXT: vmv1r.v v10, v15
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v24, v12, 1, v0
-; CHECK-NEXT: vmv1r.v v11, v24
-; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v8, a2
-; CHECK-NEXT: vmv1r.v v8, v23
+; CHECK-NEXT: srli a0, a0, 2
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v12, v9, a0
+; CHECK-NEXT: vslideup.vx v9, v12, a0
+; CHECK-NEXT: vslidedown.vx v12, v8, a0
+; CHECK-NEXT: vslideup.vx v8, v12, a0
+; CHECK-NEXT: vslidedown.vx v12, v10, a0
+; CHECK-NEXT: vslideup.vx v10, v12, a0
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vmv1r.v v9, v14
-; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v14, v12, 1, v0
-; CHECK-NEXT: vmv1r.v v12, v25
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vmv1r.v v13, v14
-; CHECK-NEXT: vs8r.v v16, (a0)
-; CHECK-NEXT: vmv1r.v v14, v15
-; CHECK-NEXT: vs8r.v v8, (a1)
-; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
-; CHECK-NEXT: vlseg7e8.v v8, (a0)
-; CHECK-NEXT: vlseg7e8.v v16, (a1)
-; CHECK-NEXT: vmv2r.v v24, v8
-; CHECK-NEXT: vmv2r.v v26, v10
-; CHECK-NEXT: vmv2r.v v28, v12
-; CHECK-NEXT: vmv1r.v v25, v16
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v24, 0
-; CHECK-NEXT: vmv1r.v v16, v9
-; CHECK-NEXT: vmsne.vi v8, v16, 0
-; CHECK-NEXT: vmv1r.v v27, v18
-; CHECK-NEXT: vmsne.vi v9, v26, 0
-; CHECK-NEXT: vmv1r.v v18, v11
-; CHECK-NEXT: vmsne.vi v10, v18, 0
-; CHECK-NEXT: vmv1r.v v29, v20
-; CHECK-NEXT: vmsne.vi v11, v28, 0
-; CHECK-NEXT: vmv1r.v v20, v13
-; CHECK-NEXT: vmsne.vi v12, v20, 0
-; CHECK-NEXT: vmv1r.v v15, v22
-; CHECK-NEXT: vmsne.vi v13, v14, 0
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vlseg7e16.v v8, (a0)
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.vector.deinterleave7.nxv112i1(<vscale x 112 x i1> %vec)
- ret {<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>} %retval
+ %res = call {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @llvm.vector.deinterleave7.nxv14f16(<vscale x 14 x half> %arg)
+ ret {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} %res
}
+define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_nxv4f16_nxv28f16(<vscale x 28 x half> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv4f16_nxv28f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vlseg7e16.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @llvm.vector.deinterleave7.nxv28f16(<vscale x 28 x half> %arg)
+ ret {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} %res
+}
-define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_nxv16i8_nxv112i8(<vscale x 112 x i8> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv112i8:
+define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_nxv8f16_nxv56f16(<vscale x 56 x half> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv8f16_nxv56f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv1r.v v30, v21
; CHECK-NEXT: vmv1r.v v28, v19
; CHECK-NEXT: vmv1r.v v29, v20
@@ -1151,8 +2040,8 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vmv1r.v v25, v16
; CHECK-NEXT: vs8r.v v24, (a1)
-; CHECK-NEXT: vlseg7e8.v v14, (a0)
-; CHECK-NEXT: vlseg7e8.v v22, (a1)
+; CHECK-NEXT: vlseg7e16.v v14, (a0)
+; CHECK-NEXT: vlseg7e16.v v22, (a1)
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv1r.v v9, v22
; CHECK-NEXT: vmv1r.v v22, v15
@@ -1169,20 +2058,80 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave7.nxv112i8(<vscale x 112 x i8> %vec)
- ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %retval
+ %res = call {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @llvm.vector.deinterleave7.nxv56f16(<vscale x 56 x half> %arg)
+ ret {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} %res
+}
+
+define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @vector_deinterleave_nxv2bf16_nxv14bf16(<vscale x 14 x bfloat> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv2bf16_nxv14bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: srli a0, a0, 2
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v12, v9, a0
+; CHECK-NEXT: vslideup.vx v9, v12, a0
+; CHECK-NEXT: vslidedown.vx v12, v8, a0
+; CHECK-NEXT: vslideup.vx v8, v12, a0
+; CHECK-NEXT: vslidedown.vx v12, v10, a0
+; CHECK-NEXT: vslideup.vx v10, v12, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vlseg7e16.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @llvm.vector.deinterleave7.nxv14bf16(<vscale x 14 x bfloat> %arg)
+ ret {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} %res
}
+define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @vector_deinterleave_nxv4bf16_nxv28bf16(<vscale x 28 x bfloat> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv4bf16_nxv28bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vlseg7e16.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @llvm.vector.deinterleave7.nxv28bf16(<vscale x 28 x bfloat> %arg)
+ ret {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} %res
+}
-define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_nxv8i16_nxv56i16(<vscale x 56 x i16> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv56i16:
+define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8bf16_nxv56bf16(<vscale x 56 x bfloat> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv8bf16_nxv56bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv1r.v v30, v21
; CHECK-NEXT: vmv1r.v v28, v19
@@ -1216,20 +2165,80 @@ define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.vector.deinterleave7.nxv56i16(<vscale x 56 x i16> %vec)
- ret {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} %retval
+ %res = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.vector.deinterleave7.nxv56bf16(<vscale x 56 x bfloat> %arg)
+ ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %res
}
+define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @vector_deinterleave_nxv1f32_nxv7f32(<vscale x 7 x float> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv1f32_nxv7f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: srli a0, a0, 3
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v12, v9, a0
+; CHECK-NEXT: vslideup.vx v9, v12, a0
+; CHECK-NEXT: vslidedown.vx v12, v8, a0
+; CHECK-NEXT: vslideup.vx v8, v12, a0
+; CHECK-NEXT: vslidedown.vx v12, v10, a0
+; CHECK-NEXT: vslideup.vx v10, v12, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vlseg7e32.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @llvm.vector.deinterleave7.nxv7f32(<vscale x 7 x float> %arg)
+ ret {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} %res
+}
-define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_nxv4i32_nxv28i32(<vscale x 28 x i32> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv28i32:
+define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_nxv2f32_nxv14f32(<vscale x 14 x float> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv2f32_nxv14f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT: vlseg7e32.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @llvm.vector.deinterleave7.nxv14f32(<vscale x 14 x float> %arg)
+ ret {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} %res
+}
+
+define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @vector_deinterleave_nxv4f32_nxv28f32(<vscale x 28 x float> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv4f32_nxv28f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vmv1r.v v30, v21
; CHECK-NEXT: vmv1r.v v28, v19
@@ -1263,20 +2272,47 @@ define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave7.nxv28i32(<vscale x 28 x i32> %vec)
- ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %retval
+ %res = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.vector.deinterleave7.nxv28f32(<vscale x 28 x float> %arg)
+ ret {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res
}
+define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @vector_deinterleave_nxv1f64_nxv7f64(<vscale x 7 x double> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv1f64_nxv7f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0)
+; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-NEXT: vlseg7e64.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+ %res = call {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @llvm.vector.deinterleave7.nxv7f64(<vscale x 7 x double> %arg)
+ ret {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} %res
+}
-define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv14i64(<vscale x 14 x i64> %vec) nounwind {
-; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv14i64:
+define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @vector_deinterleave_nxv2f64_nxv14f64(<vscale x 14 x double> %arg) {
+; CHECK-LABEL: vector_deinterleave_nxv2f64_nxv14f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vmv1r.v v30, v21
; CHECK-NEXT: vmv1r.v v28, v19
@@ -1310,8 +2346,10 @@ define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
- %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave7.nxv14i64(<vscale x 14 x i64> %vec)
- ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
+ %res = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.vector.deinterleave7.nxv14f64(<vscale x 14 x double> %arg)
+ ret {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} %res
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index 469263a3247ce..5f73855c71453 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -446,587 +446,312 @@ define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64
ret <vscale x 16 x i64> %res
}
-
-; Floats
-
-define <vscale x 4 x bfloat> @vector_interleave_nxv4bf16_nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) {
-; V-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
-; V: # %bb.0:
-; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; V-NEXT: vwaddu.vv v10, v8, v9
-; V-NEXT: li a0, -1
-; V-NEXT: csrr a1, vlenb
-; V-NEXT: vwmaccu.vx v10, a0, v9
-; V-NEXT: srli a1, a1, 2
-; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; V-NEXT: vslidedown.vx v8, v10, a1
-; V-NEXT: add a0, a1, a1
-; V-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; V-NEXT: vslideup.vx v10, v8, a1
-; V-NEXT: vmv.v.v v8, v10
-; V-NEXT: ret
+define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32_poison(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
;
-; ZVBB-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
+; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; ZVBB-NEXT: vwsll.vi v10, v9, 16
-; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: vwaddu.wv v10, v10, v8
-; ZVBB-NEXT: srli a0, a0, 2
-; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
-; ZVBB-NEXT: vslidedown.vx v8, v10, a0
-; ZVBB-NEXT: add a1, a0, a0
-; ZVBB-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; ZVBB-NEXT: vslideup.vx v10, v8, a0
-; ZVBB-NEXT: vmv.v.v v8, v10
+; ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; ZVBB-NEXT: vmv2r.v v12, v8
+; ZVBB-NEXT: vzext.vf2 v8, v12
; ZVBB-NEXT: ret
-;
-; ZIP-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
-; ZIP: # %bb.0:
-; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; ZIP-NEXT: ri.vzip2b.vv v11, v8, v9
-; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9
-; ZIP-NEXT: csrr a0, vlenb
-; ZIP-NEXT: srli a0, a0, 2
-; ZIP-NEXT: add a1, a0, a0
-; ZIP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; ZIP-NEXT: vslideup.vx v10, v11, a0
-; ZIP-NEXT: vmv.v.v v8, v10
-; ZIP-NEXT: ret
- %res = call <vscale x 4 x bfloat> @llvm.vector.interleave2.nxv4bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b)
- ret <vscale x 4 x bfloat> %res
+ %res = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> poison)
+ ret <vscale x 8 x i32> %res
}
-define <vscale x 8 x bfloat> @vector_interleave_nxv8bf16_nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) {
-; V-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
-; V: # %bb.0:
-; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; V-NEXT: vmv1r.v v10, v9
-; V-NEXT: vmv1r.v v11, v8
-; V-NEXT: vwaddu.vv v8, v11, v10
-; V-NEXT: li a0, -1
-; V-NEXT: vwmaccu.vx v8, a0, v10
-; V-NEXT: ret
+define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32_poison2(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsll.vx v8, v12, a0
+; CHECK-NEXT: ret
;
-; ZVBB-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
+; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; ZVBB-NEXT: vmv1r.v v10, v9
-; ZVBB-NEXT: vmv1r.v v11, v8
-; ZVBB-NEXT: vwsll.vi v8, v10, 16
-; ZVBB-NEXT: vwaddu.wv v8, v8, v11
+; ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; ZVBB-NEXT: vmv2r.v v12, v8
+; ZVBB-NEXT: li a0, 32
+; ZVBB-NEXT: vwsll.vx v8, v12, a0
; ZVBB-NEXT: ret
-;
-; ZIP-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
-; ZIP: # %bb.0:
-; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; ZIP-NEXT: vmv1r.v v10, v9
-; ZIP-NEXT: vmv1r.v v11, v8
-; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10
-; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10
-; ZIP-NEXT: ret
- %res = call <vscale x 8 x bfloat> @llvm.vector.interleave2.nxv8bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b)
- ret <vscale x 8 x bfloat> %res
+ %res = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a)
+ ret <vscale x 8 x i32> %res
}
-define <vscale x 4 x half> @vector_interleave_nxv4f16_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
-; V-LABEL: vector_interleave_nxv4f16_nxv2f16:
-; V: # %bb.0:
-; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; V-NEXT: vwaddu.vv v10, v8, v9
-; V-NEXT: li a0, -1
-; V-NEXT: csrr a1, vlenb
-; V-NEXT: vwmaccu.vx v10, a0, v9
-; V-NEXT: srli a1, a1, 2
-; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; V-NEXT: vslidedown.vx v8, v10, a1
-; V-NEXT: add a0, a1, a1
-; V-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; V-NEXT: vslideup.vx v10, v8, a1
-; V-NEXT: vmv.v.v v8, v10
-; V-NEXT: ret
+define <vscale x 48 x i1> @vector_interleave_nxv48i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c) nounwind {
+; CHECK-LABEL: vector_interleave_nxv48i1_nxv16i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: vmerge.vim v16, v12, 1, v0
+; CHECK-NEXT: slli a2, a1, 1
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmerge.vim v14, v12, 1, v0
+; CHECK-NEXT: add a3, a0, a2
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v18, v12, 1, v0
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: vsseg3e8.v v14, (a0)
+; CHECK-NEXT: vl2r.v v8, (a2)
+; CHECK-NEXT: srli a2, a1, 2
+; CHECK-NEXT: srli a1, a1, 1
+; CHECK-NEXT: vl2r.v v10, (a3)
+; CHECK-NEXT: vl2r.v v12, (a0)
+; CHECK-NEXT: add a0, a2, a2
+; CHECK-NEXT: vmsne.vi v14, v8, 0
+; CHECK-NEXT: vmsne.vi v8, v10, 0
+; CHECK-NEXT: vmsne.vi v0, v12, 0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vx v0, v8, a2
+; CHECK-NEXT: add a0, a1, a1
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v0, v14, a1
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
;
-; ZVBB-LABEL: vector_interleave_nxv4f16_nxv2f16:
+; ZVBB-LABEL: vector_interleave_nxv48i1_nxv16i1:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; ZVBB-NEXT: vwsll.vi v10, v9, 16
+; ZVBB-NEXT: addi sp, sp, -16
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: vwaddu.wv v10, v10, v8
-; ZVBB-NEXT: srli a0, a0, 2
-; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
-; ZVBB-NEXT: vslidedown.vx v8, v10, a0
-; ZVBB-NEXT: add a1, a0, a0
-; ZVBB-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; ZVBB-NEXT: vslideup.vx v10, v8, a0
-; ZVBB-NEXT: vmv.v.v v8, v10
+; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; ZVBB-NEXT: vmv1r.v v10, v0
+; ZVBB-NEXT: vmv1r.v v0, v8
+; ZVBB-NEXT: vmv.v.i v12, 0
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: vmerge.vim v16, v12, 1, v0
+; ZVBB-NEXT: slli a2, a1, 1
+; ZVBB-NEXT: vmv1r.v v0, v10
+; ZVBB-NEXT: vmerge.vim v14, v12, 1, v0
+; ZVBB-NEXT: add a3, a0, a2
+; ZVBB-NEXT: vmv1r.v v0, v9
+; ZVBB-NEXT: vmerge.vim v18, v12, 1, v0
+; ZVBB-NEXT: add a2, a3, a2
+; ZVBB-NEXT: vsseg3e8.v v14, (a0)
+; ZVBB-NEXT: vl2r.v v8, (a2)
+; ZVBB-NEXT: srli a2, a1, 2
+; ZVBB-NEXT: srli a1, a1, 1
+; ZVBB-NEXT: vl2r.v v10, (a3)
+; ZVBB-NEXT: vl2r.v v12, (a0)
+; ZVBB-NEXT: add a0, a2, a2
+; ZVBB-NEXT: vmsne.vi v14, v8, 0
+; ZVBB-NEXT: vmsne.vi v8, v10, 0
+; ZVBB-NEXT: vmsne.vi v0, v12, 0
+; ZVBB-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; ZVBB-NEXT: vslideup.vx v0, v8, a2
+; ZVBB-NEXT: add a0, a1, a1
+; ZVBB-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v0, v14, a1
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: addi sp, sp, 16
; ZVBB-NEXT: ret
-;
-; ZIP-LABEL: vector_interleave_nxv4f16_nxv2f16:
-; ZIP: # %bb.0:
-; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; ZIP-NEXT: ri.vzip2b.vv v11, v8, v9
-; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9
-; ZIP-NEXT: csrr a0, vlenb
-; ZIP-NEXT: srli a0, a0, 2
-; ZIP-NEXT: add a1, a0, a0
-; ZIP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; ZIP-NEXT: vslideup.vx v10, v11, a0
-; ZIP-NEXT: vmv.v.v v8, v10
-; ZIP-NEXT: ret
- %res = call <vscale x 4 x half> @llvm.vector.interleave2.nxv4f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
- ret <vscale x 4 x half> %res
+ %res = call <vscale x 48 x i1> @llvm.vector.interleave3.nxv48i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c)
+ ret <vscale x 48 x i1> %res
}
-define <vscale x 8 x half> @vector_interleave_nxv8f16_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
-; V-LABEL: vector_interleave_nxv8f16_nxv4f16:
-; V: # %bb.0:
-; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; V-NEXT: vmv1r.v v10, v9
-; V-NEXT: vmv1r.v v11, v8
-; V-NEXT: vwaddu.vv v8, v11, v10
-; V-NEXT: li a0, -1
-; V-NEXT: vwmaccu.vx v8, a0, v10
-; V-NEXT: ret
-;
-; ZVBB-LABEL: vector_interleave_nxv8f16_nxv4f16:
-; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; ZVBB-NEXT: vmv1r.v v10, v9
-; ZVBB-NEXT: vmv1r.v v11, v8
-; ZVBB-NEXT: vwsll.vi v8, v10, 16
-; ZVBB-NEXT: vwaddu.wv v8, v8, v11
-; ZVBB-NEXT: ret
-;
-; ZIP-LABEL: vector_interleave_nxv8f16_nxv4f16:
-; ZIP: # %bb.0:
-; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; ZIP-NEXT: vmv1r.v v10, v9
-; ZIP-NEXT: vmv1r.v v11, v8
-; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10
-; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10
-; ZIP-NEXT: ret
- %res = call <vscale x 8 x half> @llvm.vector.interleave2.nxv8f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
- ret <vscale x 8 x half> %res
-}
-define <vscale x 4 x float> @vector_interleave_nxv4f32_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
-; V-LABEL: vector_interleave_nxv4f32_nxv2f32:
-; V: # %bb.0:
-; V-NEXT: vsetvli a0, zero, e32, m1, ta, ma
-; V-NEXT: vmv1r.v v10, v9
-; V-NEXT: vmv1r.v v11, v8
-; V-NEXT: vwaddu.vv v8, v11, v10
-; V-NEXT: li a0, -1
-; V-NEXT: vwmaccu.vx v8, a0, v10
-; V-NEXT: ret
+define <vscale x 48 x i8> @vector_interleave_nxv48i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) nounwind {
+; CHECK-LABEL: vector_interleave_nxv48i8_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma
+; CHECK-NEXT: vsseg3e8.v v8, (a0)
+; CHECK-NEXT: vl2r.v v8, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl2r.v v10, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl2r.v v12, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
;
-; ZVBB-LABEL: vector_interleave_nxv4f32_nxv2f32:
+; ZVBB-LABEL: vector_interleave_nxv48i8_nxv16i8:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma
-; ZVBB-NEXT: vmv1r.v v10, v9
-; ZVBB-NEXT: vmv1r.v v11, v8
-; ZVBB-NEXT: li a0, 32
-; ZVBB-NEXT: vwsll.vx v8, v10, a0
-; ZVBB-NEXT: vwaddu.wv v8, v8, v11
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: slli a1, a1, 1
+; ZVBB-NEXT: vsetvli a2, zero, e8, m2, ta, ma
+; ZVBB-NEXT: vsseg3e8.v v8, (a0)
+; ZVBB-NEXT: vl2r.v v8, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl2r.v v10, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl2r.v v12, (a0)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: addi sp, sp, 16
; ZVBB-NEXT: ret
-;
-; ZIP-LABEL: vector_interleave_nxv4f32_nxv2f32:
-; ZIP: # %bb.0:
-; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma
-; ZIP-NEXT: vmv1r.v v10, v9
-; ZIP-NEXT: vmv1r.v v11, v8
-; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10
-; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10
-; ZIP-NEXT: ret
- %res = call <vscale x 4 x float> @llvm.vector.interleave2.nxv4f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
- ret <vscale x 4 x float> %res
+ %res = call <vscale x 48 x i8> @llvm.vector.interleave3.nxv48i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c)
+ ret <vscale x 48 x i8> %res
}
-define <vscale x 16 x bfloat> @vector_interleave_nxv16bf16_nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
-; V-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
-; V: # %bb.0:
-; V-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; V-NEXT: vmv2r.v v12, v10
-; V-NEXT: vmv2r.v v14, v8
-; V-NEXT: vwaddu.vv v8, v14, v12
-; V-NEXT: li a0, -1
-; V-NEXT: vwmaccu.vx v8, a0, v12
-; V-NEXT: ret
-;
-; ZVBB-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
-; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; ZVBB-NEXT: vmv2r.v v12, v10
-; ZVBB-NEXT: vmv2r.v v14, v8
-; ZVBB-NEXT: vwsll.vi v8, v12, 16
-; ZVBB-NEXT: vwaddu.wv v8, v8, v14
-; ZVBB-NEXT: ret
-;
-; ZIP-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
-; ZIP: # %bb.0:
-; ZIP-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; ZIP-NEXT: vmv2r.v v12, v10
-; ZIP-NEXT: vmv2r.v v14, v8
-; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
-; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
-; ZIP-NEXT: ret
- %res = call <vscale x 16 x bfloat> @llvm.vector.interleave2.nxv16bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
- ret <vscale x 16 x bfloat> %res
-}
-define <vscale x 16 x half> @vector_interleave_nxv16f16_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
-; V-LABEL: vector_interleave_nxv16f16_nxv8f16:
-; V: # %bb.0:
-; V-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; V-NEXT: vmv2r.v v12, v10
-; V-NEXT: vmv2r.v v14, v8
-; V-NEXT: vwaddu.vv v8, v14, v12
-; V-NEXT: li a0, -1
-; V-NEXT: vwmaccu.vx v8, a0, v12
-; V-NEXT: ret
+define <vscale x 24 x i16> @vector_interleave_nxv24i16_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) nounwind {
+; CHECK-LABEL: vector_interleave_nxv24i16_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vsseg3e16.v v8, (a0)
+; CHECK-NEXT: vl2re16.v v8, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl2re16.v v10, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl2re16.v v12, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
;
-; ZVBB-LABEL: vector_interleave_nxv16f16_nxv8f16:
+; ZVBB-LABEL: vector_interleave_nxv24i16_nxv8i16:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; ZVBB-NEXT: vmv2r.v v12, v10
-; ZVBB-NEXT: vmv2r.v v14, v8
-; ZVBB-NEXT: vwsll.vi v8, v12, 16
-; ZVBB-NEXT: vwaddu.wv v8, v8, v14
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: slli a1, a1, 1
+; ZVBB-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVBB-NEXT: vsseg3e16.v v8, (a0)
+; ZVBB-NEXT: vl2re16.v v8, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl2re16.v v10, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl2re16.v v12, (a0)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: addi sp, sp, 16
; ZVBB-NEXT: ret
-;
-; ZIP-LABEL: vector_interleave_nxv16f16_nxv8f16:
-; ZIP: # %bb.0:
-; ZIP-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; ZIP-NEXT: vmv2r.v v12, v10
-; ZIP-NEXT: vmv2r.v v14, v8
-; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
-; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
-; ZIP-NEXT: ret
- %res = call <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
- ret <vscale x 16 x half> %res
+ %res = call <vscale x 24 x i16> @llvm.vector.interleave3.nxv24i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c)
+ ret <vscale x 24 x i16> %res
}
-define <vscale x 8 x float> @vector_interleave_nxv8f32_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
-; V-LABEL: vector_interleave_nxv8f32_nxv4f32:
-; V: # %bb.0:
-; V-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; V-NEXT: vmv2r.v v12, v10
-; V-NEXT: vmv2r.v v14, v8
-; V-NEXT: vwaddu.vv v8, v14, v12
-; V-NEXT: li a0, -1
-; V-NEXT: vwmaccu.vx v8, a0, v12
-; V-NEXT: ret
-;
-; ZVBB-LABEL: vector_interleave_nxv8f32_nxv4f32:
-; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; ZVBB-NEXT: vmv2r.v v12, v10
-; ZVBB-NEXT: vmv2r.v v14, v8
-; ZVBB-NEXT: li a0, 32
-; ZVBB-NEXT: vwsll.vx v8, v12, a0
-; ZVBB-NEXT: vwaddu.wv v8, v8, v14
-; ZVBB-NEXT: ret
-;
-; ZIP-LABEL: vector_interleave_nxv8f32_nxv4f32:
-; ZIP: # %bb.0:
-; ZIP-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; ZIP-NEXT: vmv2r.v v12, v10
-; ZIP-NEXT: vmv2r.v v14, v8
-; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
-; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
-; ZIP-NEXT: ret
- %res = call <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
- ret <vscale x 8 x float> %res
-}
-define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; V-LABEL: vector_interleave_nxv4f64_nxv2f64:
-; V: # %bb.0:
-; V-NEXT: csrr a0, vlenb
-; V-NEXT: vsetvli a1, zero, e16, m1, ta, mu
-; V-NEXT: vid.v v12
-; V-NEXT: srli a0, a0, 2
-; V-NEXT: vand.vi v13, v12, 1
-; V-NEXT: vmsne.vi v0, v13, 0
-; V-NEXT: vsrl.vi v16, v12, 1
-; V-NEXT: vadd.vx v16, v16, a0, v0.t
-; V-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; V-NEXT: vrgatherei16.vv v12, v8, v16
-; V-NEXT: vmv.v.v v8, v12
-; V-NEXT: ret
+define <vscale x 12 x i32> @vector_interleave_nxv12i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) nounwind {
+; CHECK-LABEL: vector_interleave_nxv12i32_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsseg3e32.v v8, (a0)
+; CHECK-NEXT: vl2re32.v v8, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl2re32.v v10, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl2re32.v v12, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
;
-; ZVBB-LABEL: vector_interleave_nxv4f64_nxv2f64:
+; ZVBB-LABEL: vector_interleave_nxv12i32_nxv4i32:
; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu
-; ZVBB-NEXT: vid.v v12
-; ZVBB-NEXT: srli a0, a0, 2
-; ZVBB-NEXT: vand.vi v13, v12, 1
-; ZVBB-NEXT: vmsne.vi v0, v13, 0
-; ZVBB-NEXT: vsrl.vi v16, v12, 1
-; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t
-; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16
-; ZVBB-NEXT: vmv.v.v v8, v12
-; ZVBB-NEXT: ret
-;
-; ZIP-LABEL: vector_interleave_nxv4f64_nxv2f64:
-; ZIP: # %bb.0:
-; ZIP-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; ZIP-NEXT: vmv2r.v v12, v10
-; ZIP-NEXT: vmv2r.v v14, v8
-; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
-; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
-; ZIP-NEXT: ret
- %res = call <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
- ret <vscale x 4 x double> %res
-}
-
-
-
-define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) {
-; V-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
-; V: # %bb.0:
-; V-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; V-NEXT: vmv8r.v v24, v8
-; V-NEXT: vwaddu.vv v8, v24, v16
-; V-NEXT: li a0, -1
-; V-NEXT: vwaddu.vv v0, v28, v20
-; V-NEXT: vwmaccu.vx v8, a0, v16
-; V-NEXT: vwmaccu.vx v0, a0, v20
-; V-NEXT: vmv8r.v v16, v0
-; V-NEXT: ret
-;
-; ZVBB-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
-; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVBB-NEXT: vwsll.vi v24, v16, 16
-; ZVBB-NEXT: vwsll.vi v0, v20, 16
-; ZVBB-NEXT: vwaddu.wv v24, v24, v8
-; ZVBB-NEXT: vwaddu.wv v0, v0, v12
-; ZVBB-NEXT: vmv8r.v v8, v24
-; ZVBB-NEXT: vmv8r.v v16, v0
-; ZVBB-NEXT: ret
-;
-; ZIP-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
-; ZIP: # %bb.0:
-; ZIP-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
-; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
-; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
-; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
-; ZIP-NEXT: vmv8r.v v8, v24
-; ZIP-NEXT: vmv8r.v v16, v0
-; ZIP-NEXT: ret
- %res = call <vscale x 64 x bfloat> @llvm.vector.interleave2.nxv64bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b)
- ret <vscale x 64 x bfloat> %res
-}
-
-define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b) {
-; V-LABEL: vector_interleave_nxv64f16_nxv32f16:
-; V: # %bb.0:
-; V-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; V-NEXT: vmv8r.v v24, v8
-; V-NEXT: vwaddu.vv v8, v24, v16
-; V-NEXT: li a0, -1
-; V-NEXT: vwaddu.vv v0, v28, v20
-; V-NEXT: vwmaccu.vx v8, a0, v16
-; V-NEXT: vwmaccu.vx v0, a0, v20
-; V-NEXT: vmv8r.v v16, v0
-; V-NEXT: ret
-;
-; ZVBB-LABEL: vector_interleave_nxv64f16_nxv32f16:
-; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVBB-NEXT: vwsll.vi v24, v16, 16
-; ZVBB-NEXT: vwsll.vi v0, v20, 16
-; ZVBB-NEXT: vwaddu.wv v24, v24, v8
-; ZVBB-NEXT: vwaddu.wv v0, v0, v12
-; ZVBB-NEXT: vmv8r.v v8, v24
-; ZVBB-NEXT: vmv8r.v v16, v0
-; ZVBB-NEXT: ret
-;
-; ZIP-LABEL: vector_interleave_nxv64f16_nxv32f16:
-; ZIP: # %bb.0:
-; ZIP-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
-; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
-; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
-; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
-; ZIP-NEXT: vmv8r.v v8, v24
-; ZIP-NEXT: vmv8r.v v16, v0
-; ZIP-NEXT: ret
- %res = call <vscale x 64 x half> @llvm.vector.interleave2.nxv64f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b)
- ret <vscale x 64 x half> %res
-}
-
-define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b) {
-; V-LABEL: vector_interleave_nxv32f32_nxv16f32:
-; V: # %bb.0:
-; V-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; V-NEXT: vmv8r.v v24, v8
-; V-NEXT: vwaddu.vv v8, v24, v16
-; V-NEXT: li a0, -1
-; V-NEXT: vwaddu.vv v0, v28, v20
-; V-NEXT: vwmaccu.vx v8, a0, v16
-; V-NEXT: vwmaccu.vx v0, a0, v20
-; V-NEXT: vmv8r.v v16, v0
-; V-NEXT: ret
-;
-; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32:
-; ZVBB: # %bb.0:
-; ZVBB-NEXT: li a0, 32
-; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; ZVBB-NEXT: vwsll.vx v24, v16, a0
-; ZVBB-NEXT: vwsll.vx v0, v20, a0
-; ZVBB-NEXT: vwaddu.wv v24, v24, v8
-; ZVBB-NEXT: vwaddu.wv v0, v0, v12
-; ZVBB-NEXT: vmv8r.v v8, v24
-; ZVBB-NEXT: vmv8r.v v16, v0
-; ZVBB-NEXT: ret
-;
-; ZIP-LABEL: vector_interleave_nxv32f32_nxv16f32:
-; ZIP: # %bb.0:
-; ZIP-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
-; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
-; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
-; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
-; ZIP-NEXT: vmv8r.v v8, v24
-; ZIP-NEXT: vmv8r.v v16, v0
-; ZIP-NEXT: ret
- %res = call <vscale x 32 x float> @llvm.vector.interleave2.nxv32f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b)
- ret <vscale x 32 x float> %res
-}
-
-define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b) {
-; V-LABEL: vector_interleave_nxv16f64_nxv8f64:
-; V: # %bb.0:
-; V-NEXT: csrr a0, vlenb
-; V-NEXT: vsetvli a1, zero, e16, m2, ta, mu
-; V-NEXT: vid.v v6
-; V-NEXT: vmv8r.v v24, v8
-; V-NEXT: srli a0, a0, 1
-; V-NEXT: vmv4r.v v28, v16
-; V-NEXT: vmv4r.v v16, v12
-; V-NEXT: vand.vi v8, v6, 1
-; V-NEXT: vmsne.vi v0, v8, 0
-; V-NEXT: vsrl.vi v6, v6, 1
-; V-NEXT: vadd.vx v6, v6, a0, v0.t
-; V-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; V-NEXT: vrgatherei16.vv v8, v24, v6
-; V-NEXT: vrgatherei16.vv v24, v16, v6
-; V-NEXT: vmv.v.v v16, v24
-; V-NEXT: ret
-;
-; ZVBB-LABEL: vector_interleave_nxv16f64_nxv8f64:
-; ZVBB: # %bb.0:
+; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: slli a1, a1, 1
+; ZVBB-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; ZVBB-NEXT: vsseg3e32.v v8, (a0)
+; ZVBB-NEXT: vl2re32.v v8, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl2re32.v v10, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl2re32.v v12, (a0)
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu
-; ZVBB-NEXT: vid.v v6
-; ZVBB-NEXT: vmv8r.v v24, v8
-; ZVBB-NEXT: srli a0, a0, 1
-; ZVBB-NEXT: vmv4r.v v28, v16
-; ZVBB-NEXT: vmv4r.v v16, v12
-; ZVBB-NEXT: vand.vi v8, v6, 1
-; ZVBB-NEXT: vmsne.vi v0, v8, 0
-; ZVBB-NEXT: vsrl.vi v6, v6, 1
-; ZVBB-NEXT: vadd.vx v6, v6, a0, v0.t
-; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; ZVBB-NEXT: vrgatherei16.vv v8, v24, v6
-; ZVBB-NEXT: vrgatherei16.vv v24, v16, v6
-; ZVBB-NEXT: vmv.v.v v16, v24
-; ZVBB-NEXT: ret
-;
-; ZIP-LABEL: vector_interleave_nxv16f64_nxv8f64:
-; ZIP: # %bb.0:
-; ZIP-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
-; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
-; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
-; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
-; ZIP-NEXT: vmv8r.v v8, v24
-; ZIP-NEXT: vmv8r.v v16, v0
-; ZIP-NEXT: ret
- %res = call <vscale x 16 x double> @llvm.vector.interleave2.nxv16f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b)
- ret <vscale x 16 x double> %res
-}
-
-define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32_poison(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vzext.vf2 v8, v12
-; CHECK-NEXT: ret
-;
-; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison:
-; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; ZVBB-NEXT: vmv2r.v v12, v8
-; ZVBB-NEXT: vzext.vf2 v8, v12
+; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: addi sp, sp, 16
; ZVBB-NEXT: ret
- %res = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> poison)
- ret <vscale x 8 x i32> %res
+ %res = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
+ ret <vscale x 12 x i32> %res
}
-define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32_poison2(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf2 v12, v8
-; CHECK-NEXT: li a0, 32
-; CHECK-NEXT: vsll.vx v8, v12, a0
-; CHECK-NEXT: ret
-;
-; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2:
-; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; ZVBB-NEXT: vmv2r.v v12, v8
-; ZVBB-NEXT: li a0, 32
-; ZVBB-NEXT: vwsll.vx v8, v12, a0
-; ZVBB-NEXT: ret
- %res = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a)
- ret <vscale x 8 x i32> %res
-}
-define <vscale x 48 x i1> @vector_interleave_nxv48i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c) nounwind {
-; CHECK-LABEL: vector_interleave_nxv48i1_nxv16i1:
+define <vscale x 6 x i64> @vector_interleave_nxv6i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) nounwind {
+; CHECK-LABEL: vector_interleave_nxv6i64_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 6
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: vmerge.vim v16, v12, 1, v0
-; CHECK-NEXT: slli a2, a1, 1
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmerge.vim v14, v12, 1, v0
-; CHECK-NEXT: add a3, a0, a2
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmerge.vim v18, v12, 1, v0
-; CHECK-NEXT: add a2, a3, a2
-; CHECK-NEXT: vsseg3e8.v v14, (a0)
-; CHECK-NEXT: vl2r.v v8, (a2)
-; CHECK-NEXT: srli a2, a1, 2
-; CHECK-NEXT: srli a1, a1, 1
-; CHECK-NEXT: vl2r.v v10, (a3)
-; CHECK-NEXT: vl2r.v v12, (a0)
-; CHECK-NEXT: add a0, a2, a2
-; CHECK-NEXT: vmsne.vi v14, v8, 0
-; CHECK-NEXT: vmsne.vi v8, v10, 0
-; CHECK-NEXT: vmsne.vi v0, v12, 0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vslideup.vx v0, v8, a2
-; CHECK-NEXT: add a0, a1, a1
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vslideup.vx v0, v14, a1
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma
+; CHECK-NEXT: vsseg3e64.v v8, (a0)
+; CHECK-NEXT: vl2re64.v v8, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl2re64.v v10, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl2re64.v v12, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 6
; CHECK-NEXT: mul a0, a0, a1
@@ -1034,435 +759,4977 @@ define <vscale x 48 x i1> @vector_interleave_nxv48i1_nxv16i1(<vscale x 16 x i1>
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
;
-; ZVBB-LABEL: vector_interleave_nxv48i1_nxv16i1:
+; ZVBB-LABEL: vector_interleave_nxv6i64_nxv2i64:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: li a1, 6
; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; ZVBB-NEXT: vmv1r.v v10, v0
-; ZVBB-NEXT: vmv1r.v v0, v8
-; ZVBB-NEXT: vmv.v.i v12, 0
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
-; ZVBB-NEXT: vmerge.vim v16, v12, 1, v0
-; ZVBB-NEXT: slli a2, a1, 1
-; ZVBB-NEXT: vmv1r.v v0, v10
-; ZVBB-NEXT: vmerge.vim v14, v12, 1, v0
-; ZVBB-NEXT: add a3, a0, a2
-; ZVBB-NEXT: vmv1r.v v0, v9
-; ZVBB-NEXT: vmerge.vim v18, v12, 1, v0
-; ZVBB-NEXT: add a2, a3, a2
-; ZVBB-NEXT: vsseg3e8.v v14, (a0)
-; ZVBB-NEXT: vl2r.v v8, (a2)
-; ZVBB-NEXT: srli a2, a1, 2
-; ZVBB-NEXT: srli a1, a1, 1
-; ZVBB-NEXT: vl2r.v v10, (a3)
-; ZVBB-NEXT: vl2r.v v12, (a0)
-; ZVBB-NEXT: add a0, a2, a2
-; ZVBB-NEXT: vmsne.vi v14, v8, 0
-; ZVBB-NEXT: vmsne.vi v8, v10, 0
-; ZVBB-NEXT: vmsne.vi v0, v12, 0
-; ZVBB-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; ZVBB-NEXT: vslideup.vx v0, v8, a2
-; ZVBB-NEXT: add a0, a1, a1
-; ZVBB-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; ZVBB-NEXT: vslideup.vx v0, v14, a1
+; ZVBB-NEXT: slli a1, a1, 1
+; ZVBB-NEXT: vsetvli a2, zero, e64, m2, ta, ma
+; ZVBB-NEXT: vsseg3e64.v v8, (a0)
+; ZVBB-NEXT: vl2re64.v v8, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl2re64.v v10, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl2re64.v v12, (a0)
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: li a1, 6
; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: add sp, sp, a0
; ZVBB-NEXT: addi sp, sp, 16
; ZVBB-NEXT: ret
- %res = call <vscale x 48 x i1> @llvm.vector.interleave3.nxv48i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c)
- ret <vscale x 48 x i1> %res
+ %res = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
+ ret <vscale x 6 x i64> %res
}
-
-define <vscale x 48 x i8> @vector_interleave_nxv48i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) nounwind {
-; CHECK-LABEL: vector_interleave_nxv48i8_nxv16i8:
+define <vscale x 80 x i1> @vector_interleave_nxv80i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e) nounwind {
+; CHECK-LABEL: vector_interleave_nxv80i1_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: li a1, 10
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: addi a4, sp, 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 2
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma
-; CHECK-NEXT: vsseg3e8.v v8, (a0)
-; CHECK-NEXT: vl2r.v v8, (a0)
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl2r.v v10, (a0)
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl2r.v v12, (a0)
+; CHECK-NEXT: vmerge.vim v14, v12, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v18, v12, 1, v0
+; CHECK-NEXT: add a2, a4, a1
+; CHECK-NEXT: srli a3, a1, 2
+; CHECK-NEXT: vmv2r.v v20, v14
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v16, v12, 1, v0
+; CHECK-NEXT: vmv1r.v v21, v18
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmerge.vim v8, v12, 1, v0
+; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v16, v19
+; CHECK-NEXT: add a5, a2, a1
+; CHECK-NEXT: vmv1r.v v23, v8
+; CHECK-NEXT: vmv1r.v v18, v9
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: vmerge.vim v24, v12, 1, v0
+; CHECK-NEXT: vsetvli a6, zero, e8, m1, ta, ma
+; CHECK-NEXT: vsseg5e8.v v20, (a4)
+; CHECK-NEXT: vmv1r.v v19, v25
+; CHECK-NEXT: vsseg5e8.v v15, (a0)
+; CHECK-NEXT: vl1r.v v8, (a5)
+; CHECK-NEXT: add a5, a5, a1
+; CHECK-NEXT: vl1r.v v10, (a4)
+; CHECK-NEXT: add a4, a5, a1
+; CHECK-NEXT: vl1r.v v12, (a4)
+; CHECK-NEXT: add a4, a0, a1
+; CHECK-NEXT: vl1r.v v14, (a4)
+; CHECK-NEXT: add a4, a4, a1
+; CHECK-NEXT: vl1r.v v9, (a5)
+; CHECK-NEXT: add a5, a4, a1
+; CHECK-NEXT: vl1r.v v16, (a5)
+; CHECK-NEXT: add a5, a5, a1
+; CHECK-NEXT: srli a1, a1, 1
+; CHECK-NEXT: vl1r.v v11, (a2)
+; CHECK-NEXT: add a2, a3, a3
+; CHECK-NEXT: vl1r.v v15, (a4)
+; CHECK-NEXT: add a4, a1, a1
+; CHECK-NEXT: vl1r.v v13, (a0)
+; CHECK-NEXT: vl1r.v v17, (a5)
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmsne.vi v18, v8, 0
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vmsne.vi v8, v14, 0
+; CHECK-NEXT: vmsne.vi v9, v12, 0
+; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vx v0, v18, a3
+; CHECK-NEXT: vslideup.vx v9, v8, a3
+; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v0, v9, a1
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmsne.vi v8, v16, 0
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: li a1, 10
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
;
-; ZVBB-LABEL: vector_interleave_nxv48i8_nxv16i8:
+; ZVBB-LABEL: vector_interleave_nxv80i1_nxv16i1:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: li a1, 10
; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; ZVBB-NEXT: vmv.v.i v12, 0
+; ZVBB-NEXT: addi a4, sp, 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 2
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: add a0, sp, a0
+; ZVBB-NEXT: addi a0, a0, 16
; ZVBB-NEXT: csrr a1, vlenb
-; ZVBB-NEXT: slli a1, a1, 1
-; ZVBB-NEXT: vsetvli a2, zero, e8, m2, ta, ma
-; ZVBB-NEXT: vsseg3e8.v v8, (a0)
-; ZVBB-NEXT: vl2r.v v8, (a0)
-; ZVBB-NEXT: add a0, a0, a1
-; ZVBB-NEXT: vl2r.v v10, (a0)
-; ZVBB-NEXT: add a0, a0, a1
-; ZVBB-NEXT: vl2r.v v12, (a0)
+; ZVBB-NEXT: vmerge.vim v14, v12, 1, v0
+; ZVBB-NEXT: vmv1r.v v0, v8
+; ZVBB-NEXT: vmerge.vim v18, v12, 1, v0
+; ZVBB-NEXT: add a2, a4, a1
+; ZVBB-NEXT: srli a3, a1, 2
+; ZVBB-NEXT: vmv2r.v v20, v14
+; ZVBB-NEXT: vmv1r.v v0, v9
+; ZVBB-NEXT: vmerge.vim v16, v12, 1, v0
+; ZVBB-NEXT: vmv1r.v v21, v18
+; ZVBB-NEXT: vmv1r.v v0, v10
+; ZVBB-NEXT: vmerge.vim v8, v12, 1, v0
+; ZVBB-NEXT: vmv1r.v v22, v16
+; ZVBB-NEXT: vmv1r.v v16, v19
+; ZVBB-NEXT: add a5, a2, a1
+; ZVBB-NEXT: vmv1r.v v23, v8
+; ZVBB-NEXT: vmv1r.v v18, v9
+; ZVBB-NEXT: vmv1r.v v0, v11
+; ZVBB-NEXT: vmerge.vim v24, v12, 1, v0
+; ZVBB-NEXT: vsetvli a6, zero, e8, m1, ta, ma
+; ZVBB-NEXT: vsseg5e8.v v20, (a4)
+; ZVBB-NEXT: vmv1r.v v19, v25
+; ZVBB-NEXT: vsseg5e8.v v15, (a0)
+; ZVBB-NEXT: vl1r.v v8, (a5)
+; ZVBB-NEXT: add a5, a5, a1
+; ZVBB-NEXT: vl1r.v v10, (a4)
+; ZVBB-NEXT: add a4, a5, a1
+; ZVBB-NEXT: vl1r.v v12, (a4)
+; ZVBB-NEXT: add a4, a0, a1
+; ZVBB-NEXT: vl1r.v v14, (a4)
+; ZVBB-NEXT: add a4, a4, a1
+; ZVBB-NEXT: vl1r.v v9, (a5)
+; ZVBB-NEXT: add a5, a4, a1
+; ZVBB-NEXT: vl1r.v v16, (a5)
+; ZVBB-NEXT: add a5, a5, a1
+; ZVBB-NEXT: srli a1, a1, 1
+; ZVBB-NEXT: vl1r.v v11, (a2)
+; ZVBB-NEXT: add a2, a3, a3
+; ZVBB-NEXT: vl1r.v v15, (a4)
+; ZVBB-NEXT: add a4, a1, a1
+; ZVBB-NEXT: vl1r.v v13, (a0)
+; ZVBB-NEXT: vl1r.v v17, (a5)
+; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; ZVBB-NEXT: vmsne.vi v18, v8, 0
+; ZVBB-NEXT: vmsne.vi v0, v10, 0
+; ZVBB-NEXT: vmsne.vi v8, v14, 0
+; ZVBB-NEXT: vmsne.vi v9, v12, 0
+; ZVBB-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
+; ZVBB-NEXT: vslideup.vx v0, v18, a3
+; ZVBB-NEXT: vslideup.vx v9, v8, a3
+; ZVBB-NEXT: vsetvli zero, a4, e8, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v0, v9, a1
+; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; ZVBB-NEXT: vmsne.vi v8, v16, 0
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: li a1, 10
; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: add sp, sp, a0
; ZVBB-NEXT: addi sp, sp, 16
; ZVBB-NEXT: ret
- %res = call <vscale x 48 x i8> @llvm.vector.interleave3.nxv48i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c)
- ret <vscale x 48 x i8> %res
+ %res = call <vscale x 80 x i1> @llvm.vector.interleave5.nxv80i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e)
+ ret <vscale x 80 x i1> %res
}
-define <vscale x 24 x i16> @vector_interleave_nxv24i16_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) nounwind {
-; CHECK-LABEL: vector_interleave_nxv24i16_nxv8i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 6
-; CHECK-NEXT: mul a0, a0, a1
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
-; CHECK-NEXT: vsseg3e16.v v8, (a0)
-; CHECK-NEXT: vl2re16.v v8, (a0)
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl2re16.v v10, (a0)
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl2re16.v v12, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 6
-; CHECK-NEXT: mul a0, a0, a1
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e) nounwind {
;
-; ZVBB-LABEL: vector_interleave_nxv24i16_nxv8i16:
-; ZVBB: # %bb.0:
+; RV32-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 28
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT: vmv2r.v v20, v16
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: vmv2r.v v18, v12
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 2
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 64
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: vmv2r.v v16, v8
+; RV32-NEXT: vmv2r.v v22, v16
+; RV32-NEXT: vmv2r.v v24, v18
+; RV32-NEXT: vmv1r.v v26, v20
+; RV32-NEXT: add a3, a0, a2
+; RV32-NEXT: vmv1r.v v23, v10
+; RV32-NEXT: add a4, a1, a2
+; RV32-NEXT: add a5, a4, a2
+; RV32-NEXT: vmv1r.v v25, v14
+; RV32-NEXT: add a6, a5, a2
+; RV32-NEXT: vmv1r.v v18, v11
+; RV32-NEXT: vsseg5e8.v v22, (a0)
+; RV32-NEXT: vmv1r.v v20, v15
+; RV32-NEXT: vsseg5e8.v v17, (a1)
+; RV32-NEXT: vl1r.v v16, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1r.v v17, (a6)
+; RV32-NEXT: add a6, a3, a2
+; RV32-NEXT: vl1r.v v10, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1r.v v11, (a6)
+; RV32-NEXT: vl1r.v v8, (a0)
+; RV32-NEXT: vl1r.v v9, (a3)
+; RV32-NEXT: vl1r.v v14, (a4)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a3, 10
+; RV32-NEXT: mul a0, a0, a3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 64
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1r.v v15, (a5)
+; RV32-NEXT: vl1r.v v12, (a6)
+; RV32-NEXT: vl1r.v v13, (a1)
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, a0, a2
+; RV32-NEXT: vs2r.v v16, (a2)
+; RV32-NEXT: vs8r.v v8, (a0)
+; RV32-NEXT: vl8r.v v16, (a2)
+; RV32-NEXT: vl8r.v v8, (a0)
+; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 28
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: sub sp, sp, a0
+; RV64-NEXT: andi sp, sp, -64
+; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT: vmv2r.v v20, v16
+; RV64-NEXT: addi a0, sp, 64
+; RV64-NEXT: vmv2r.v v18, v12
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 2
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 64
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: vmv2r.v v16, v8
+; RV64-NEXT: vmv2r.v v22, v16
+; RV64-NEXT: vmv2r.v v24, v18
+; RV64-NEXT: vmv1r.v v26, v20
+; RV64-NEXT: add a3, a0, a2
+; RV64-NEXT: vmv1r.v v23, v10
+; RV64-NEXT: add a4, a1, a2
+; RV64-NEXT: add a5, a4, a2
+; RV64-NEXT: vmv1r.v v25, v14
+; RV64-NEXT: add a6, a5, a2
+; RV64-NEXT: vmv1r.v v18, v11
+; RV64-NEXT: vsseg5e8.v v22, (a0)
+; RV64-NEXT: vmv1r.v v20, v15
+; RV64-NEXT: vsseg5e8.v v17, (a1)
+; RV64-NEXT: vl1r.v v16, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1r.v v17, (a6)
+; RV64-NEXT: add a6, a3, a2
+; RV64-NEXT: vl1r.v v10, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1r.v v11, (a6)
+; RV64-NEXT: vl1r.v v8, (a0)
+; RV64-NEXT: vl1r.v v9, (a3)
+; RV64-NEXT: vl1r.v v14, (a4)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a3, 10
+; RV64-NEXT: mul a0, a0, a3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 64
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1r.v v15, (a5)
+; RV64-NEXT: vl1r.v v12, (a6)
+; RV64-NEXT: vl1r.v v13, (a1)
+; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: add a2, a0, a2
+; RV64-NEXT: vs2r.v v16, (a2)
+; RV64-NEXT: vs8r.v v8, (a0)
+; RV64-NEXT: vl8r.v v16, (a2)
+; RV64-NEXT: vl8r.v v8, (a0)
+; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; ZVBB-RV32: # %bb.0:
+; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: li a1, 28
+; ZVBB-RV32-NEXT: mul a0, a0, a1
+; ZVBB-RV32-NEXT: sub sp, sp, a0
+; ZVBB-RV32-NEXT: andi sp, sp, -64
+; ZVBB-RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV32-NEXT: vmv2r.v v20, v16
+; ZVBB-RV32-NEXT: addi a0, sp, 64
+; ZVBB-RV32-NEXT: vmv2r.v v18, v12
+; ZVBB-RV32-NEXT: csrr a1, vlenb
+; ZVBB-RV32-NEXT: slli a2, a1, 2
+; ZVBB-RV32-NEXT: add a1, a2, a1
+; ZVBB-RV32-NEXT: add a1, sp, a1
+; ZVBB-RV32-NEXT: addi a1, a1, 64
+; ZVBB-RV32-NEXT: csrr a2, vlenb
+; ZVBB-RV32-NEXT: vmv2r.v v16, v8
+; ZVBB-RV32-NEXT: vmv2r.v v22, v16
+; ZVBB-RV32-NEXT: vmv2r.v v24, v18
+; ZVBB-RV32-NEXT: vmv1r.v v26, v20
+; ZVBB-RV32-NEXT: add a3, a0, a2
+; ZVBB-RV32-NEXT: vmv1r.v v23, v10
+; ZVBB-RV32-NEXT: add a4, a1, a2
+; ZVBB-RV32-NEXT: add a5, a4, a2
+; ZVBB-RV32-NEXT: vmv1r.v v25, v14
+; ZVBB-RV32-NEXT: add a6, a5, a2
+; ZVBB-RV32-NEXT: vmv1r.v v18, v11
+; ZVBB-RV32-NEXT: vsseg5e8.v v22, (a0)
+; ZVBB-RV32-NEXT: vmv1r.v v20, v15
+; ZVBB-RV32-NEXT: vsseg5e8.v v17, (a1)
+; ZVBB-RV32-NEXT: vl1r.v v16, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1r.v v17, (a6)
+; ZVBB-RV32-NEXT: add a6, a3, a2
+; ZVBB-RV32-NEXT: vl1r.v v10, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1r.v v11, (a6)
+; ZVBB-RV32-NEXT: vl1r.v v8, (a0)
+; ZVBB-RV32-NEXT: vl1r.v v9, (a3)
+; ZVBB-RV32-NEXT: vl1r.v v14, (a4)
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: li a3, 10
+; ZVBB-RV32-NEXT: mul a0, a0, a3
+; ZVBB-RV32-NEXT: add a0, sp, a0
+; ZVBB-RV32-NEXT: addi a0, a0, 64
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1r.v v15, (a5)
+; ZVBB-RV32-NEXT: vl1r.v v12, (a6)
+; ZVBB-RV32-NEXT: vl1r.v v13, (a1)
+; ZVBB-RV32-NEXT: slli a2, a2, 3
+; ZVBB-RV32-NEXT: add a2, a0, a2
+; ZVBB-RV32-NEXT: vs2r.v v16, (a2)
+; ZVBB-RV32-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT: vl8r.v v16, (a2)
+; ZVBB-RV32-NEXT: vl8r.v v8, (a0)
+; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; ZVBB-RV64: # %bb.0:
+; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: li a1, 28
+; ZVBB-RV64-NEXT: mul a0, a0, a1
+; ZVBB-RV64-NEXT: sub sp, sp, a0
+; ZVBB-RV64-NEXT: andi sp, sp, -64
+; ZVBB-RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV64-NEXT: vmv2r.v v20, v16
+; ZVBB-RV64-NEXT: addi a0, sp, 64
+; ZVBB-RV64-NEXT: vmv2r.v v18, v12
+; ZVBB-RV64-NEXT: csrr a1, vlenb
+; ZVBB-RV64-NEXT: slli a2, a1, 2
+; ZVBB-RV64-NEXT: add a1, a2, a1
+; ZVBB-RV64-NEXT: add a1, sp, a1
+; ZVBB-RV64-NEXT: addi a1, a1, 64
+; ZVBB-RV64-NEXT: csrr a2, vlenb
+; ZVBB-RV64-NEXT: vmv2r.v v16, v8
+; ZVBB-RV64-NEXT: vmv2r.v v22, v16
+; ZVBB-RV64-NEXT: vmv2r.v v24, v18
+; ZVBB-RV64-NEXT: vmv1r.v v26, v20
+; ZVBB-RV64-NEXT: add a3, a0, a2
+; ZVBB-RV64-NEXT: vmv1r.v v23, v10
+; ZVBB-RV64-NEXT: add a4, a1, a2
+; ZVBB-RV64-NEXT: add a5, a4, a2
+; ZVBB-RV64-NEXT: vmv1r.v v25, v14
+; ZVBB-RV64-NEXT: add a6, a5, a2
+; ZVBB-RV64-NEXT: vmv1r.v v18, v11
+; ZVBB-RV64-NEXT: vsseg5e8.v v22, (a0)
+; ZVBB-RV64-NEXT: vmv1r.v v20, v15
+; ZVBB-RV64-NEXT: vsseg5e8.v v17, (a1)
+; ZVBB-RV64-NEXT: vl1r.v v16, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1r.v v17, (a6)
+; ZVBB-RV64-NEXT: add a6, a3, a2
+; ZVBB-RV64-NEXT: vl1r.v v10, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1r.v v11, (a6)
+; ZVBB-RV64-NEXT: vl1r.v v8, (a0)
+; ZVBB-RV64-NEXT: vl1r.v v9, (a3)
+; ZVBB-RV64-NEXT: vl1r.v v14, (a4)
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: li a3, 10
+; ZVBB-RV64-NEXT: mul a0, a0, a3
+; ZVBB-RV64-NEXT: add a0, sp, a0
+; ZVBB-RV64-NEXT: addi a0, a0, 64
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1r.v v15, (a5)
+; ZVBB-RV64-NEXT: vl1r.v v12, (a6)
+; ZVBB-RV64-NEXT: vl1r.v v13, (a1)
+; ZVBB-RV64-NEXT: slli a2, a2, 3
+; ZVBB-RV64-NEXT: add a2, a0, a2
+; ZVBB-RV64-NEXT: vs2r.v v16, (a2)
+; ZVBB-RV64-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT: vl8r.v v16, (a2)
+; ZVBB-RV64-NEXT: vl8r.v v8, (a0)
+; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a1, 28
+; ZIP-NEXT: mul a0, a0, a1
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v20, v16
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v18, v12
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 2
+; ZIP-NEXT: add a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v16, v8
+; ZIP-NEXT: vmv2r.v v22, v16
+; ZIP-NEXT: vmv2r.v v24, v18
+; ZIP-NEXT: vmv1r.v v26, v20
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v23, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: add a5, a4, a2
+; ZIP-NEXT: vmv1r.v v25, v14
+; ZIP-NEXT: add a6, a5, a2
+; ZIP-NEXT: vmv1r.v v18, v11
+; ZIP-NEXT: vsseg5e8.v v22, (a0)
+; ZIP-NEXT: vmv1r.v v20, v15
+; ZIP-NEXT: vsseg5e8.v v17, (a1)
+; ZIP-NEXT: vl1r.v v16, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v17, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1r.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v11, (a6)
+; ZIP-NEXT: vl1r.v v8, (a0)
+; ZIP-NEXT: vl1r.v v9, (a3)
+; ZIP-NEXT: vl1r.v v14, (a4)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 10
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v15, (a5)
+; ZIP-NEXT: vl1r.v v12, (a6)
+; ZIP-NEXT: vl1r.v v13, (a1)
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vs2r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8r.v v16, (a2)
+; ZIP-NEXT: vl8r.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: ret
+ %res = call <vscale x 80 x i8> @llvm.vector.interleave5.nxv80i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e)
+ ret <vscale x 80 x i8> %res
+}
+
+
+define <vscale x 40 x i8> @vector_interleave_nxv40i8_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i8> %d, <vscale x 8 x i8> %e) nounwind {
+; CHECK-LABEL: vector_interleave_nxv40i8_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 2
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: add a2, a0, a1
+; CHECK-NEXT: add a3, a2, a1
+; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma
+; CHECK-NEXT: vsseg5e8.v v8, (a0)
+; CHECK-NEXT: vl1r.v v10, (a3)
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: vl1r.v v11, (a3)
+; CHECK-NEXT: vl1r.v v8, (a0)
+; CHECK-NEXT: vl1r.v v9, (a2)
+; CHECK-NEXT: add a1, a3, a1
+; CHECK-NEXT: vl1r.v v12, (a1)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 2
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv40i8_nxv8i8:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 2
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: add a2, a0, a1
+; ZVBB-NEXT: add a3, a2, a1
+; ZVBB-NEXT: vsetvli a4, zero, e8, m1, ta, ma
+; ZVBB-NEXT: vsseg5e8.v v8, (a0)
+; ZVBB-NEXT: vl1r.v v10, (a3)
+; ZVBB-NEXT: add a3, a3, a1
+; ZVBB-NEXT: vl1r.v v11, (a3)
+; ZVBB-NEXT: vl1r.v v8, (a0)
+; ZVBB-NEXT: vl1r.v v9, (a2)
+; ZVBB-NEXT: add a1, a3, a1
+; ZVBB-NEXT: vl1r.v v12, (a1)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 2
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: ret
+ %res = call <vscale x 40 x i8> @llvm.vector.interleave5.nxv40i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i8> %d, <vscale x 8 x i8> %e)
+ ret <vscale x 40 x i8> %res
+}
+
+
+define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e) nounwind {
+;
+; RV32-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 28
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; RV32-NEXT: vmv2r.v v20, v16
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: vmv2r.v v18, v12
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 2
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 64
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: vmv2r.v v16, v8
+; RV32-NEXT: vmv2r.v v22, v16
+; RV32-NEXT: vmv2r.v v24, v18
+; RV32-NEXT: vmv1r.v v26, v20
+; RV32-NEXT: add a3, a0, a2
+; RV32-NEXT: vmv1r.v v23, v10
+; RV32-NEXT: add a4, a1, a2
+; RV32-NEXT: add a5, a4, a2
+; RV32-NEXT: vmv1r.v v25, v14
+; RV32-NEXT: add a6, a5, a2
+; RV32-NEXT: vmv1r.v v18, v11
+; RV32-NEXT: vsseg5e32.v v22, (a0)
+; RV32-NEXT: vmv1r.v v20, v15
+; RV32-NEXT: vsseg5e32.v v17, (a1)
+; RV32-NEXT: vl1re32.v v16, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re32.v v17, (a6)
+; RV32-NEXT: add a6, a3, a2
+; RV32-NEXT: vl1re32.v v10, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re32.v v11, (a6)
+; RV32-NEXT: vl1re32.v v8, (a0)
+; RV32-NEXT: vl1re32.v v9, (a3)
+; RV32-NEXT: vl1re32.v v14, (a4)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a3, 10
+; RV32-NEXT: mul a0, a0, a3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 64
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re32.v v15, (a5)
+; RV32-NEXT: vl1re32.v v12, (a6)
+; RV32-NEXT: vl1re32.v v13, (a1)
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, a0, a2
+; RV32-NEXT: vs2r.v v16, (a2)
+; RV32-NEXT: vs8r.v v8, (a0)
+; RV32-NEXT: vl8re32.v v16, (a2)
+; RV32-NEXT: vl8re32.v v8, (a0)
+; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 28
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: sub sp, sp, a0
+; RV64-NEXT: andi sp, sp, -64
+; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; RV64-NEXT: vmv2r.v v20, v16
+; RV64-NEXT: addi a0, sp, 64
+; RV64-NEXT: vmv2r.v v18, v12
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 2
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 64
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: vmv2r.v v16, v8
+; RV64-NEXT: vmv2r.v v22, v16
+; RV64-NEXT: vmv2r.v v24, v18
+; RV64-NEXT: vmv1r.v v26, v20
+; RV64-NEXT: add a3, a0, a2
+; RV64-NEXT: vmv1r.v v23, v10
+; RV64-NEXT: add a4, a1, a2
+; RV64-NEXT: add a5, a4, a2
+; RV64-NEXT: vmv1r.v v25, v14
+; RV64-NEXT: add a6, a5, a2
+; RV64-NEXT: vmv1r.v v18, v11
+; RV64-NEXT: vsseg5e32.v v22, (a0)
+; RV64-NEXT: vmv1r.v v20, v15
+; RV64-NEXT: vsseg5e32.v v17, (a1)
+; RV64-NEXT: vl1re32.v v16, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re32.v v17, (a6)
+; RV64-NEXT: add a6, a3, a2
+; RV64-NEXT: vl1re32.v v10, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re32.v v11, (a6)
+; RV64-NEXT: vl1re32.v v8, (a0)
+; RV64-NEXT: vl1re32.v v9, (a3)
+; RV64-NEXT: vl1re32.v v14, (a4)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a3, 10
+; RV64-NEXT: mul a0, a0, a3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 64
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re32.v v15, (a5)
+; RV64-NEXT: vl1re32.v v12, (a6)
+; RV64-NEXT: vl1re32.v v13, (a1)
+; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: add a2, a0, a2
+; RV64-NEXT: vs2r.v v16, (a2)
+; RV64-NEXT: vs8r.v v8, (a0)
+; RV64-NEXT: vl8re32.v v16, (a2)
+; RV64-NEXT: vl8re32.v v8, (a0)
+; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; ZVBB-RV32: # %bb.0:
+; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: li a1, 28
+; ZVBB-RV32-NEXT: mul a0, a0, a1
+; ZVBB-RV32-NEXT: sub sp, sp, a0
+; ZVBB-RV32-NEXT: andi sp, sp, -64
+; ZVBB-RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; ZVBB-RV32-NEXT: vmv2r.v v20, v16
+; ZVBB-RV32-NEXT: addi a0, sp, 64
+; ZVBB-RV32-NEXT: vmv2r.v v18, v12
+; ZVBB-RV32-NEXT: csrr a1, vlenb
+; ZVBB-RV32-NEXT: slli a2, a1, 2
+; ZVBB-RV32-NEXT: add a1, a2, a1
+; ZVBB-RV32-NEXT: add a1, sp, a1
+; ZVBB-RV32-NEXT: addi a1, a1, 64
+; ZVBB-RV32-NEXT: csrr a2, vlenb
+; ZVBB-RV32-NEXT: vmv2r.v v16, v8
+; ZVBB-RV32-NEXT: vmv2r.v v22, v16
+; ZVBB-RV32-NEXT: vmv2r.v v24, v18
+; ZVBB-RV32-NEXT: vmv1r.v v26, v20
+; ZVBB-RV32-NEXT: add a3, a0, a2
+; ZVBB-RV32-NEXT: vmv1r.v v23, v10
+; ZVBB-RV32-NEXT: add a4, a1, a2
+; ZVBB-RV32-NEXT: add a5, a4, a2
+; ZVBB-RV32-NEXT: vmv1r.v v25, v14
+; ZVBB-RV32-NEXT: add a6, a5, a2
+; ZVBB-RV32-NEXT: vmv1r.v v18, v11
+; ZVBB-RV32-NEXT: vsseg5e32.v v22, (a0)
+; ZVBB-RV32-NEXT: vmv1r.v v20, v15
+; ZVBB-RV32-NEXT: vsseg5e32.v v17, (a1)
+; ZVBB-RV32-NEXT: vl1re32.v v16, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re32.v v17, (a6)
+; ZVBB-RV32-NEXT: add a6, a3, a2
+; ZVBB-RV32-NEXT: vl1re32.v v10, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re32.v v11, (a6)
+; ZVBB-RV32-NEXT: vl1re32.v v8, (a0)
+; ZVBB-RV32-NEXT: vl1re32.v v9, (a3)
+; ZVBB-RV32-NEXT: vl1re32.v v14, (a4)
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: li a3, 10
+; ZVBB-RV32-NEXT: mul a0, a0, a3
+; ZVBB-RV32-NEXT: add a0, sp, a0
+; ZVBB-RV32-NEXT: addi a0, a0, 64
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re32.v v15, (a5)
+; ZVBB-RV32-NEXT: vl1re32.v v12, (a6)
+; ZVBB-RV32-NEXT: vl1re32.v v13, (a1)
+; ZVBB-RV32-NEXT: slli a2, a2, 3
+; ZVBB-RV32-NEXT: add a2, a0, a2
+; ZVBB-RV32-NEXT: vs2r.v v16, (a2)
+; ZVBB-RV32-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT: vl8re32.v v16, (a2)
+; ZVBB-RV32-NEXT: vl8re32.v v8, (a0)
+; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; ZVBB-RV64: # %bb.0:
+; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: li a1, 28
+; ZVBB-RV64-NEXT: mul a0, a0, a1
+; ZVBB-RV64-NEXT: sub sp, sp, a0
+; ZVBB-RV64-NEXT: andi sp, sp, -64
+; ZVBB-RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; ZVBB-RV64-NEXT: vmv2r.v v20, v16
+; ZVBB-RV64-NEXT: addi a0, sp, 64
+; ZVBB-RV64-NEXT: vmv2r.v v18, v12
+; ZVBB-RV64-NEXT: csrr a1, vlenb
+; ZVBB-RV64-NEXT: slli a2, a1, 2
+; ZVBB-RV64-NEXT: add a1, a2, a1
+; ZVBB-RV64-NEXT: add a1, sp, a1
+; ZVBB-RV64-NEXT: addi a1, a1, 64
+; ZVBB-RV64-NEXT: csrr a2, vlenb
+; ZVBB-RV64-NEXT: vmv2r.v v16, v8
+; ZVBB-RV64-NEXT: vmv2r.v v22, v16
+; ZVBB-RV64-NEXT: vmv2r.v v24, v18
+; ZVBB-RV64-NEXT: vmv1r.v v26, v20
+; ZVBB-RV64-NEXT: add a3, a0, a2
+; ZVBB-RV64-NEXT: vmv1r.v v23, v10
+; ZVBB-RV64-NEXT: add a4, a1, a2
+; ZVBB-RV64-NEXT: add a5, a4, a2
+; ZVBB-RV64-NEXT: vmv1r.v v25, v14
+; ZVBB-RV64-NEXT: add a6, a5, a2
+; ZVBB-RV64-NEXT: vmv1r.v v18, v11
+; ZVBB-RV64-NEXT: vsseg5e32.v v22, (a0)
+; ZVBB-RV64-NEXT: vmv1r.v v20, v15
+; ZVBB-RV64-NEXT: vsseg5e32.v v17, (a1)
+; ZVBB-RV64-NEXT: vl1re32.v v16, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re32.v v17, (a6)
+; ZVBB-RV64-NEXT: add a6, a3, a2
+; ZVBB-RV64-NEXT: vl1re32.v v10, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re32.v v11, (a6)
+; ZVBB-RV64-NEXT: vl1re32.v v8, (a0)
+; ZVBB-RV64-NEXT: vl1re32.v v9, (a3)
+; ZVBB-RV64-NEXT: vl1re32.v v14, (a4)
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: li a3, 10
+; ZVBB-RV64-NEXT: mul a0, a0, a3
+; ZVBB-RV64-NEXT: add a0, sp, a0
+; ZVBB-RV64-NEXT: addi a0, a0, 64
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re32.v v15, (a5)
+; ZVBB-RV64-NEXT: vl1re32.v v12, (a6)
+; ZVBB-RV64-NEXT: vl1re32.v v13, (a1)
+; ZVBB-RV64-NEXT: slli a2, a2, 3
+; ZVBB-RV64-NEXT: add a2, a0, a2
+; ZVBB-RV64-NEXT: vs2r.v v16, (a2)
+; ZVBB-RV64-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT: vl8re32.v v16, (a2)
+; ZVBB-RV64-NEXT: vl8re32.v v8, (a0)
+; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a1, 28
+; ZIP-NEXT: mul a0, a0, a1
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v20, v16
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v18, v12
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 2
+; ZIP-NEXT: add a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v16, v8
+; ZIP-NEXT: vmv2r.v v22, v16
+; ZIP-NEXT: vmv2r.v v24, v18
+; ZIP-NEXT: vmv1r.v v26, v20
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v23, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: add a5, a4, a2
+; ZIP-NEXT: vmv1r.v v25, v14
+; ZIP-NEXT: add a6, a5, a2
+; ZIP-NEXT: vmv1r.v v18, v11
+; ZIP-NEXT: vsseg5e32.v v22, (a0)
+; ZIP-NEXT: vmv1r.v v20, v15
+; ZIP-NEXT: vsseg5e32.v v17, (a1)
+; ZIP-NEXT: vl1re32.v v16, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v17, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1re32.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v11, (a6)
+; ZIP-NEXT: vl1re32.v v8, (a0)
+; ZIP-NEXT: vl1re32.v v9, (a3)
+; ZIP-NEXT: vl1re32.v v14, (a4)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 10
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v15, (a5)
+; ZIP-NEXT: vl1re32.v v12, (a6)
+; ZIP-NEXT: vl1re32.v v13, (a1)
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vs2r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8re32.v v16, (a2)
+; ZIP-NEXT: vl8re32.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: ret
+ %res = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e)
+ ret <vscale x 20 x i32> %res
+}
+
+
+define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e) nounwind {
+;
+; RV32-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 28
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vmv2r.v v20, v16
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: vmv2r.v v18, v12
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 2
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 64
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: vmv2r.v v16, v8
+; RV32-NEXT: vmv2r.v v22, v16
+; RV32-NEXT: vmv2r.v v24, v18
+; RV32-NEXT: vmv1r.v v26, v20
+; RV32-NEXT: add a3, a0, a2
+; RV32-NEXT: vmv1r.v v23, v10
+; RV32-NEXT: add a4, a1, a2
+; RV32-NEXT: add a5, a4, a2
+; RV32-NEXT: vmv1r.v v25, v14
+; RV32-NEXT: add a6, a5, a2
+; RV32-NEXT: vmv1r.v v18, v11
+; RV32-NEXT: vsseg5e64.v v22, (a0)
+; RV32-NEXT: vmv1r.v v20, v15
+; RV32-NEXT: vsseg5e64.v v17, (a1)
+; RV32-NEXT: vl1re64.v v16, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re64.v v17, (a6)
+; RV32-NEXT: add a6, a3, a2
+; RV32-NEXT: vl1re64.v v10, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re64.v v11, (a6)
+; RV32-NEXT: vl1re64.v v8, (a0)
+; RV32-NEXT: vl1re64.v v9, (a3)
+; RV32-NEXT: vl1re64.v v14, (a4)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a3, 10
+; RV32-NEXT: mul a0, a0, a3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 64
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re64.v v15, (a5)
+; RV32-NEXT: vl1re64.v v12, (a6)
+; RV32-NEXT: vl1re64.v v13, (a1)
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, a0, a2
+; RV32-NEXT: vs2r.v v16, (a2)
+; RV32-NEXT: vs8r.v v8, (a0)
+; RV32-NEXT: vl8re64.v v16, (a2)
+; RV32-NEXT: vl8re64.v v8, (a0)
+; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 28
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: sub sp, sp, a0
+; RV64-NEXT: andi sp, sp, -64
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vmv2r.v v20, v16
+; RV64-NEXT: addi a0, sp, 64
+; RV64-NEXT: vmv2r.v v18, v12
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 2
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 64
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: vmv2r.v v16, v8
+; RV64-NEXT: vmv2r.v v22, v16
+; RV64-NEXT: vmv2r.v v24, v18
+; RV64-NEXT: vmv1r.v v26, v20
+; RV64-NEXT: add a3, a0, a2
+; RV64-NEXT: vmv1r.v v23, v10
+; RV64-NEXT: add a4, a1, a2
+; RV64-NEXT: add a5, a4, a2
+; RV64-NEXT: vmv1r.v v25, v14
+; RV64-NEXT: add a6, a5, a2
+; RV64-NEXT: vmv1r.v v18, v11
+; RV64-NEXT: vsseg5e64.v v22, (a0)
+; RV64-NEXT: vmv1r.v v20, v15
+; RV64-NEXT: vsseg5e64.v v17, (a1)
+; RV64-NEXT: vl1re64.v v16, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re64.v v17, (a6)
+; RV64-NEXT: add a6, a3, a2
+; RV64-NEXT: vl1re64.v v10, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re64.v v11, (a6)
+; RV64-NEXT: vl1re64.v v8, (a0)
+; RV64-NEXT: vl1re64.v v9, (a3)
+; RV64-NEXT: vl1re64.v v14, (a4)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a3, 10
+; RV64-NEXT: mul a0, a0, a3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 64
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re64.v v15, (a5)
+; RV64-NEXT: vl1re64.v v12, (a6)
+; RV64-NEXT: vl1re64.v v13, (a1)
+; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: add a2, a0, a2
+; RV64-NEXT: vs2r.v v16, (a2)
+; RV64-NEXT: vs8r.v v8, (a0)
+; RV64-NEXT: vl8re64.v v16, (a2)
+; RV64-NEXT: vl8re64.v v8, (a0)
+; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZVBB-RV32: # %bb.0:
+; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: li a1, 28
+; ZVBB-RV32-NEXT: mul a0, a0, a1
+; ZVBB-RV32-NEXT: sub sp, sp, a0
+; ZVBB-RV32-NEXT: andi sp, sp, -64
+; ZVBB-RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; ZVBB-RV32-NEXT: vmv2r.v v20, v16
+; ZVBB-RV32-NEXT: addi a0, sp, 64
+; ZVBB-RV32-NEXT: vmv2r.v v18, v12
+; ZVBB-RV32-NEXT: csrr a1, vlenb
+; ZVBB-RV32-NEXT: slli a2, a1, 2
+; ZVBB-RV32-NEXT: add a1, a2, a1
+; ZVBB-RV32-NEXT: add a1, sp, a1
+; ZVBB-RV32-NEXT: addi a1, a1, 64
+; ZVBB-RV32-NEXT: csrr a2, vlenb
+; ZVBB-RV32-NEXT: vmv2r.v v16, v8
+; ZVBB-RV32-NEXT: vmv2r.v v22, v16
+; ZVBB-RV32-NEXT: vmv2r.v v24, v18
+; ZVBB-RV32-NEXT: vmv1r.v v26, v20
+; ZVBB-RV32-NEXT: add a3, a0, a2
+; ZVBB-RV32-NEXT: vmv1r.v v23, v10
+; ZVBB-RV32-NEXT: add a4, a1, a2
+; ZVBB-RV32-NEXT: add a5, a4, a2
+; ZVBB-RV32-NEXT: vmv1r.v v25, v14
+; ZVBB-RV32-NEXT: add a6, a5, a2
+; ZVBB-RV32-NEXT: vmv1r.v v18, v11
+; ZVBB-RV32-NEXT: vsseg5e64.v v22, (a0)
+; ZVBB-RV32-NEXT: vmv1r.v v20, v15
+; ZVBB-RV32-NEXT: vsseg5e64.v v17, (a1)
+; ZVBB-RV32-NEXT: vl1re64.v v16, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re64.v v17, (a6)
+; ZVBB-RV32-NEXT: add a6, a3, a2
+; ZVBB-RV32-NEXT: vl1re64.v v10, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re64.v v11, (a6)
+; ZVBB-RV32-NEXT: vl1re64.v v8, (a0)
+; ZVBB-RV32-NEXT: vl1re64.v v9, (a3)
+; ZVBB-RV32-NEXT: vl1re64.v v14, (a4)
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: li a3, 10
+; ZVBB-RV32-NEXT: mul a0, a0, a3
+; ZVBB-RV32-NEXT: add a0, sp, a0
+; ZVBB-RV32-NEXT: addi a0, a0, 64
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re64.v v15, (a5)
+; ZVBB-RV32-NEXT: vl1re64.v v12, (a6)
+; ZVBB-RV32-NEXT: vl1re64.v v13, (a1)
+; ZVBB-RV32-NEXT: slli a2, a2, 3
+; ZVBB-RV32-NEXT: add a2, a0, a2
+; ZVBB-RV32-NEXT: vs2r.v v16, (a2)
+; ZVBB-RV32-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT: vl8re64.v v16, (a2)
+; ZVBB-RV32-NEXT: vl8re64.v v8, (a0)
+; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZVBB-RV64: # %bb.0:
+; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: li a1, 28
+; ZVBB-RV64-NEXT: mul a0, a0, a1
+; ZVBB-RV64-NEXT: sub sp, sp, a0
+; ZVBB-RV64-NEXT: andi sp, sp, -64
+; ZVBB-RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; ZVBB-RV64-NEXT: vmv2r.v v20, v16
+; ZVBB-RV64-NEXT: addi a0, sp, 64
+; ZVBB-RV64-NEXT: vmv2r.v v18, v12
+; ZVBB-RV64-NEXT: csrr a1, vlenb
+; ZVBB-RV64-NEXT: slli a2, a1, 2
+; ZVBB-RV64-NEXT: add a1, a2, a1
+; ZVBB-RV64-NEXT: add a1, sp, a1
+; ZVBB-RV64-NEXT: addi a1, a1, 64
+; ZVBB-RV64-NEXT: csrr a2, vlenb
+; ZVBB-RV64-NEXT: vmv2r.v v16, v8
+; ZVBB-RV64-NEXT: vmv2r.v v22, v16
+; ZVBB-RV64-NEXT: vmv2r.v v24, v18
+; ZVBB-RV64-NEXT: vmv1r.v v26, v20
+; ZVBB-RV64-NEXT: add a3, a0, a2
+; ZVBB-RV64-NEXT: vmv1r.v v23, v10
+; ZVBB-RV64-NEXT: add a4, a1, a2
+; ZVBB-RV64-NEXT: add a5, a4, a2
+; ZVBB-RV64-NEXT: vmv1r.v v25, v14
+; ZVBB-RV64-NEXT: add a6, a5, a2
+; ZVBB-RV64-NEXT: vmv1r.v v18, v11
+; ZVBB-RV64-NEXT: vsseg5e64.v v22, (a0)
+; ZVBB-RV64-NEXT: vmv1r.v v20, v15
+; ZVBB-RV64-NEXT: vsseg5e64.v v17, (a1)
+; ZVBB-RV64-NEXT: vl1re64.v v16, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re64.v v17, (a6)
+; ZVBB-RV64-NEXT: add a6, a3, a2
+; ZVBB-RV64-NEXT: vl1re64.v v10, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re64.v v11, (a6)
+; ZVBB-RV64-NEXT: vl1re64.v v8, (a0)
+; ZVBB-RV64-NEXT: vl1re64.v v9, (a3)
+; ZVBB-RV64-NEXT: vl1re64.v v14, (a4)
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: li a3, 10
+; ZVBB-RV64-NEXT: mul a0, a0, a3
+; ZVBB-RV64-NEXT: add a0, sp, a0
+; ZVBB-RV64-NEXT: addi a0, a0, 64
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re64.v v15, (a5)
+; ZVBB-RV64-NEXT: vl1re64.v v12, (a6)
+; ZVBB-RV64-NEXT: vl1re64.v v13, (a1)
+; ZVBB-RV64-NEXT: slli a2, a2, 3
+; ZVBB-RV64-NEXT: add a2, a0, a2
+; ZVBB-RV64-NEXT: vs2r.v v16, (a2)
+; ZVBB-RV64-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT: vl8re64.v v16, (a2)
+; ZVBB-RV64-NEXT: vl8re64.v v8, (a0)
+; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a1, 28
+; ZIP-NEXT: mul a0, a0, a1
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v20, v16
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v18, v12
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 2
+; ZIP-NEXT: add a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v16, v8
+; ZIP-NEXT: vmv2r.v v22, v16
+; ZIP-NEXT: vmv2r.v v24, v18
+; ZIP-NEXT: vmv1r.v v26, v20
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v23, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: add a5, a4, a2
+; ZIP-NEXT: vmv1r.v v25, v14
+; ZIP-NEXT: add a6, a5, a2
+; ZIP-NEXT: vmv1r.v v18, v11
+; ZIP-NEXT: vsseg5e64.v v22, (a0)
+; ZIP-NEXT: vmv1r.v v20, v15
+; ZIP-NEXT: vsseg5e64.v v17, (a1)
+; ZIP-NEXT: vl1re64.v v16, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v17, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1re64.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v11, (a6)
+; ZIP-NEXT: vl1re64.v v8, (a0)
+; ZIP-NEXT: vl1re64.v v9, (a3)
+; ZIP-NEXT: vl1re64.v v14, (a4)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 10
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v15, (a5)
+; ZIP-NEXT: vl1re64.v v12, (a6)
+; ZIP-NEXT: vl1re64.v v13, (a1)
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vs2r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8re64.v v16, (a2)
+; ZIP-NEXT: vl8re64.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: ret
+ %res = call <vscale x 10 x i64> @llvm.vector.interleave5.nxv10i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e)
+ ret <vscale x 10 x i64> %res
+}
+
+define <vscale x 112 x i1> @vector_interleave_nxv112i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e, <vscale x 16 x i1> %f, <vscale x 16 x i1> %g) nounwind {
+; CHECK-LABEL: vector_interleave_nxv112i1_nxv16i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 14
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v14, 0
+; CHECK-NEXT: addi a4, sp, 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 3
+; CHECK-NEXT: sub a0, a1, a0
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: vmerge.vim v16, v14, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v22, v14, 1, v0
+; CHECK-NEXT: add a3, a4, a2
+; CHECK-NEXT: srli a1, a2, 2
+; CHECK-NEXT: add a5, a0, a2
+; CHECK-NEXT: vmv4r.v v24, v16
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmerge.vim v18, v14, 1, v0
+; CHECK-NEXT: add a6, a3, a2
+; CHECK-NEXT: vmv1r.v v25, v22
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmerge.vim v8, v14, 1, v0
+; CHECK-NEXT: vmv1r.v v26, v18
+; CHECK-NEXT: vmv1r.v v0, v11
+; CHECK-NEXT: vmerge.vim v20, v14, 1, v0
+; CHECK-NEXT: vmv1r.v v27, v8
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmerge.vim v10, v14, 1, v0
+; CHECK-NEXT: vmv1r.v v28, v20
+; CHECK-NEXT: vmv1r.v v18, v23
+; CHECK-NEXT: add a7, a6, a2
+; CHECK-NEXT: vmv1r.v v29, v10
+; CHECK-NEXT: vmv1r.v v20, v9
+; CHECK-NEXT: vmv1r.v v0, v13
+; CHECK-NEXT: vmerge.vim v30, v14, 1, v0
+; CHECK-NEXT: vmv1r.v v22, v11
+; CHECK-NEXT: vsetvli t0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vsseg7e8.v v24, (a4)
+; CHECK-NEXT: vmv1r.v v23, v31
+; CHECK-NEXT: vsseg7e8.v v17, (a0)
+; CHECK-NEXT: vl1r.v v8, (a6)
+; CHECK-NEXT: add a6, a7, a2
+; CHECK-NEXT: vl1r.v v10, (a4)
+; CHECK-NEXT: add a4, a6, a2
+; CHECK-NEXT: vl1r.v v12, (a6)
+; CHECK-NEXT: add a6, a4, a2
+; CHECK-NEXT: vl1r.v v14, (a6)
+; CHECK-NEXT: add a6, a5, a2
+; CHECK-NEXT: vl1r.v v16, (a5)
+; CHECK-NEXT: add a5, a6, a2
+; CHECK-NEXT: vl1r.v v18, (a5)
+; CHECK-NEXT: add a5, a5, a2
+; CHECK-NEXT: vl1r.v v9, (a7)
+; CHECK-NEXT: add a7, a5, a2
+; CHECK-NEXT: vl1r.v v20, (a7)
+; CHECK-NEXT: add a7, a7, a2
+; CHECK-NEXT: srli a2, a2, 1
+; CHECK-NEXT: vl1r.v v11, (a3)
+; CHECK-NEXT: add a3, a1, a1
+; CHECK-NEXT: vl1r.v v13, (a4)
+; CHECK-NEXT: add a4, a2, a2
+; CHECK-NEXT: vl1r.v v15, (a0)
+; CHECK-NEXT: vl1r.v v19, (a5)
+; CHECK-NEXT: vl1r.v v17, (a6)
+; CHECK-NEXT: vl1r.v v21, (a7)
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmsne.vi v22, v8, 0
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vmsne.vi v9, v12, 0
+; CHECK-NEXT: vmsne.vi v10, v14, 0
+; CHECK-NEXT: vmsne.vi v11, v18, 0
+; CHECK-NEXT: vmsne.vi v8, v16, 0
+; CHECK-NEXT: vmsne.vi v12, v20, 0
+; CHECK-NEXT: vsetvli zero, a3, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vx v0, v22, a1
+; CHECK-NEXT: vslideup.vx v9, v10, a1
+; CHECK-NEXT: vslideup.vx v8, v11, a1
+; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v0, v9, a2
+; CHECK-NEXT: vslideup.vx v8, v12, a2
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 14
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv112i1_nxv16i1:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: li a1, 14
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; ZVBB-NEXT: vmv.v.i v14, 0
+; ZVBB-NEXT: addi a4, sp, 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 3
+; ZVBB-NEXT: sub a0, a1, a0
+; ZVBB-NEXT: add a0, sp, a0
+; ZVBB-NEXT: addi a0, a0, 16
+; ZVBB-NEXT: csrr a2, vlenb
+; ZVBB-NEXT: vmerge.vim v16, v14, 1, v0
+; ZVBB-NEXT: vmv1r.v v0, v8
+; ZVBB-NEXT: vmerge.vim v22, v14, 1, v0
+; ZVBB-NEXT: add a3, a4, a2
+; ZVBB-NEXT: srli a1, a2, 2
+; ZVBB-NEXT: add a5, a0, a2
+; ZVBB-NEXT: vmv4r.v v24, v16
+; ZVBB-NEXT: vmv1r.v v0, v9
+; ZVBB-NEXT: vmerge.vim v18, v14, 1, v0
+; ZVBB-NEXT: add a6, a3, a2
+; ZVBB-NEXT: vmv1r.v v25, v22
+; ZVBB-NEXT: vmv1r.v v0, v10
+; ZVBB-NEXT: vmerge.vim v8, v14, 1, v0
+; ZVBB-NEXT: vmv1r.v v26, v18
+; ZVBB-NEXT: vmv1r.v v0, v11
+; ZVBB-NEXT: vmerge.vim v20, v14, 1, v0
+; ZVBB-NEXT: vmv1r.v v27, v8
+; ZVBB-NEXT: vmv1r.v v0, v12
+; ZVBB-NEXT: vmerge.vim v10, v14, 1, v0
+; ZVBB-NEXT: vmv1r.v v28, v20
+; ZVBB-NEXT: vmv1r.v v18, v23
+; ZVBB-NEXT: add a7, a6, a2
+; ZVBB-NEXT: vmv1r.v v29, v10
+; ZVBB-NEXT: vmv1r.v v20, v9
+; ZVBB-NEXT: vmv1r.v v0, v13
+; ZVBB-NEXT: vmerge.vim v30, v14, 1, v0
+; ZVBB-NEXT: vmv1r.v v22, v11
+; ZVBB-NEXT: vsetvli t0, zero, e8, m1, ta, ma
+; ZVBB-NEXT: vsseg7e8.v v24, (a4)
+; ZVBB-NEXT: vmv1r.v v23, v31
+; ZVBB-NEXT: vsseg7e8.v v17, (a0)
+; ZVBB-NEXT: vl1r.v v8, (a6)
+; ZVBB-NEXT: add a6, a7, a2
+; ZVBB-NEXT: vl1r.v v10, (a4)
+; ZVBB-NEXT: add a4, a6, a2
+; ZVBB-NEXT: vl1r.v v12, (a6)
+; ZVBB-NEXT: add a6, a4, a2
+; ZVBB-NEXT: vl1r.v v14, (a6)
+; ZVBB-NEXT: add a6, a5, a2
+; ZVBB-NEXT: vl1r.v v16, (a5)
+; ZVBB-NEXT: add a5, a6, a2
+; ZVBB-NEXT: vl1r.v v18, (a5)
+; ZVBB-NEXT: add a5, a5, a2
+; ZVBB-NEXT: vl1r.v v9, (a7)
+; ZVBB-NEXT: add a7, a5, a2
+; ZVBB-NEXT: vl1r.v v20, (a7)
+; ZVBB-NEXT: add a7, a7, a2
+; ZVBB-NEXT: srli a2, a2, 1
+; ZVBB-NEXT: vl1r.v v11, (a3)
+; ZVBB-NEXT: add a3, a1, a1
+; ZVBB-NEXT: vl1r.v v13, (a4)
+; ZVBB-NEXT: add a4, a2, a2
+; ZVBB-NEXT: vl1r.v v15, (a0)
+; ZVBB-NEXT: vl1r.v v19, (a5)
+; ZVBB-NEXT: vl1r.v v17, (a6)
+; ZVBB-NEXT: vl1r.v v21, (a7)
+; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; ZVBB-NEXT: vmsne.vi v22, v8, 0
+; ZVBB-NEXT: vmsne.vi v0, v10, 0
+; ZVBB-NEXT: vmsne.vi v9, v12, 0
+; ZVBB-NEXT: vmsne.vi v10, v14, 0
+; ZVBB-NEXT: vmsne.vi v11, v18, 0
+; ZVBB-NEXT: vmsne.vi v8, v16, 0
+; ZVBB-NEXT: vmsne.vi v12, v20, 0
+; ZVBB-NEXT: vsetvli zero, a3, e8, mf2, ta, ma
+; ZVBB-NEXT: vslideup.vx v0, v22, a1
+; ZVBB-NEXT: vslideup.vx v9, v10, a1
+; ZVBB-NEXT: vslideup.vx v8, v11, a1
+; ZVBB-NEXT: vsetvli zero, a4, e8, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v0, v9, a2
+; ZVBB-NEXT: vslideup.vx v8, v12, a2
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: li a1, 14
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: ret
+ %res = call <vscale x 112 x i1> @llvm.vector.interleave7.nxv112i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e, <vscale x 16 x i1> %f, <vscale x 16 x i1> %g)
+ ret <vscale x 112 x i1> %res
+}
+
+
+define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e, <vscale x 16 x i8> %f, <vscale x 16 x i8> %g) nounwind {
+;
+; RV32-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT: vmv2r.v v26, v20
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: vmv2r.v v24, v16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 3
+; RV32-NEXT: sub a1, a2, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 64
+; RV32-NEXT: vmv2r.v v22, v12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: vmv2r.v v20, v8
+; RV32-NEXT: vmv1r.v v1, v20
+; RV32-NEXT: vmv1r.v v3, v22
+; RV32-NEXT: vmv1r.v v5, v24
+; RV32-NEXT: vmv1r.v v7, v26
+; RV32-NEXT: add a3, a0, a2
+; RV32-NEXT: vmv1r.v v2, v10
+; RV32-NEXT: add a4, a1, a2
+; RV32-NEXT: slli a5, a2, 2
+; RV32-NEXT: vmv1r.v v4, v14
+; RV32-NEXT: slli a6, a2, 4
+; RV32-NEXT: add a7, a4, a2
+; RV32-NEXT: vmv1r.v v6, v18
+; RV32-NEXT: sub a5, a6, a5
+; RV32-NEXT: vmv1r.v v22, v11
+; RV32-NEXT: add a6, a7, a2
+; RV32-NEXT: vmv1r.v v24, v15
+; RV32-NEXT: vsseg7e8.v v1, (a0)
+; RV32-NEXT: vmv1r.v v26, v19
+; RV32-NEXT: vsseg7e8.v v21, (a1)
+; RV32-NEXT: vl1r.v v18, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1r.v v19, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1r.v v20, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1r.v v21, (a6)
+; RV32-NEXT: add a6, a3, a2
+; RV32-NEXT: vl1r.v v10, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1r.v v11, (a6)
+; RV32-NEXT: vl1r.v v8, (a0)
+; RV32-NEXT: vl1r.v v16, (a4)
+; RV32-NEXT: vl1r.v v9, (a3)
+; RV32-NEXT: vl1r.v v17, (a7)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a3, 14
+; RV32-NEXT: mul a0, a0, a3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 64
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1r.v v12, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1r.v v13, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, a0, a2
+; RV32-NEXT: vl1r.v v14, (a6)
+; RV32-NEXT: vl1r.v v15, (a1)
+; RV32-NEXT: add a5, a0, a5
+; RV32-NEXT: vs2r.v v20, (a5)
+; RV32-NEXT: vs4r.v v16, (a2)
+; RV32-NEXT: vs8r.v v8, (a0)
+; RV32-NEXT: vl8r.v v16, (a2)
+; RV32-NEXT: vl8r.v v8, (a0)
+; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 5
+; RV64-NEXT: sub sp, sp, a0
+; RV64-NEXT: andi sp, sp, -64
+; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT: vmv2r.v v26, v20
+; RV64-NEXT: addi a0, sp, 64
+; RV64-NEXT: vmv2r.v v24, v16
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 3
+; RV64-NEXT: sub a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 64
+; RV64-NEXT: vmv2r.v v22, v12
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: vmv2r.v v20, v8
+; RV64-NEXT: vmv1r.v v1, v20
+; RV64-NEXT: vmv1r.v v3, v22
+; RV64-NEXT: vmv1r.v v5, v24
+; RV64-NEXT: vmv1r.v v7, v26
+; RV64-NEXT: add a3, a0, a2
+; RV64-NEXT: vmv1r.v v2, v10
+; RV64-NEXT: add a4, a1, a2
+; RV64-NEXT: slli a5, a2, 2
+; RV64-NEXT: vmv1r.v v4, v14
+; RV64-NEXT: slli a6, a2, 4
+; RV64-NEXT: add a7, a4, a2
+; RV64-NEXT: vmv1r.v v6, v18
+; RV64-NEXT: sub a5, a6, a5
+; RV64-NEXT: vmv1r.v v22, v11
+; RV64-NEXT: add a6, a7, a2
+; RV64-NEXT: vmv1r.v v24, v15
+; RV64-NEXT: vsseg7e8.v v1, (a0)
+; RV64-NEXT: vmv1r.v v26, v19
+; RV64-NEXT: vsseg7e8.v v21, (a1)
+; RV64-NEXT: vl1r.v v18, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1r.v v19, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1r.v v20, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1r.v v21, (a6)
+; RV64-NEXT: add a6, a3, a2
+; RV64-NEXT: vl1r.v v10, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1r.v v11, (a6)
+; RV64-NEXT: vl1r.v v8, (a0)
+; RV64-NEXT: vl1r.v v16, (a4)
+; RV64-NEXT: vl1r.v v9, (a3)
+; RV64-NEXT: vl1r.v v17, (a7)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a3, 14
+; RV64-NEXT: mul a0, a0, a3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 64
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1r.v v12, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1r.v v13, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: add a2, a0, a2
+; RV64-NEXT: vl1r.v v14, (a6)
+; RV64-NEXT: vl1r.v v15, (a1)
+; RV64-NEXT: add a5, a0, a5
+; RV64-NEXT: vs2r.v v20, (a5)
+; RV64-NEXT: vs4r.v v16, (a2)
+; RV64-NEXT: vs8r.v v8, (a0)
+; RV64-NEXT: vl8r.v v16, (a2)
+; RV64-NEXT: vl8r.v v8, (a0)
+; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; ZVBB-RV32: # %bb.0:
+; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: slli a0, a0, 5
+; ZVBB-RV32-NEXT: sub sp, sp, a0
+; ZVBB-RV32-NEXT: andi sp, sp, -64
+; ZVBB-RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV32-NEXT: vmv2r.v v26, v20
+; ZVBB-RV32-NEXT: addi a0, sp, 64
+; ZVBB-RV32-NEXT: vmv2r.v v24, v16
+; ZVBB-RV32-NEXT: csrr a1, vlenb
+; ZVBB-RV32-NEXT: slli a2, a1, 3
+; ZVBB-RV32-NEXT: sub a1, a2, a1
+; ZVBB-RV32-NEXT: add a1, sp, a1
+; ZVBB-RV32-NEXT: addi a1, a1, 64
+; ZVBB-RV32-NEXT: vmv2r.v v22, v12
+; ZVBB-RV32-NEXT: csrr a2, vlenb
+; ZVBB-RV32-NEXT: vmv2r.v v20, v8
+; ZVBB-RV32-NEXT: vmv1r.v v1, v20
+; ZVBB-RV32-NEXT: vmv1r.v v3, v22
+; ZVBB-RV32-NEXT: vmv1r.v v5, v24
+; ZVBB-RV32-NEXT: vmv1r.v v7, v26
+; ZVBB-RV32-NEXT: add a3, a0, a2
+; ZVBB-RV32-NEXT: vmv1r.v v2, v10
+; ZVBB-RV32-NEXT: add a4, a1, a2
+; ZVBB-RV32-NEXT: slli a5, a2, 2
+; ZVBB-RV32-NEXT: vmv1r.v v4, v14
+; ZVBB-RV32-NEXT: slli a6, a2, 4
+; ZVBB-RV32-NEXT: add a7, a4, a2
+; ZVBB-RV32-NEXT: vmv1r.v v6, v18
+; ZVBB-RV32-NEXT: sub a5, a6, a5
+; ZVBB-RV32-NEXT: vmv1r.v v22, v11
+; ZVBB-RV32-NEXT: add a6, a7, a2
+; ZVBB-RV32-NEXT: vmv1r.v v24, v15
+; ZVBB-RV32-NEXT: vsseg7e8.v v1, (a0)
+; ZVBB-RV32-NEXT: vmv1r.v v26, v19
+; ZVBB-RV32-NEXT: vsseg7e8.v v21, (a1)
+; ZVBB-RV32-NEXT: vl1r.v v18, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1r.v v19, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1r.v v20, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1r.v v21, (a6)
+; ZVBB-RV32-NEXT: add a6, a3, a2
+; ZVBB-RV32-NEXT: vl1r.v v10, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1r.v v11, (a6)
+; ZVBB-RV32-NEXT: vl1r.v v8, (a0)
+; ZVBB-RV32-NEXT: vl1r.v v16, (a4)
+; ZVBB-RV32-NEXT: vl1r.v v9, (a3)
+; ZVBB-RV32-NEXT: vl1r.v v17, (a7)
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: li a3, 14
+; ZVBB-RV32-NEXT: mul a0, a0, a3
+; ZVBB-RV32-NEXT: add a0, sp, a0
+; ZVBB-RV32-NEXT: addi a0, a0, 64
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1r.v v12, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1r.v v13, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: slli a2, a2, 3
+; ZVBB-RV32-NEXT: add a2, a0, a2
+; ZVBB-RV32-NEXT: vl1r.v v14, (a6)
+; ZVBB-RV32-NEXT: vl1r.v v15, (a1)
+; ZVBB-RV32-NEXT: add a5, a0, a5
+; ZVBB-RV32-NEXT: vs2r.v v20, (a5)
+; ZVBB-RV32-NEXT: vs4r.v v16, (a2)
+; ZVBB-RV32-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT: vl8r.v v16, (a2)
+; ZVBB-RV32-NEXT: vl8r.v v8, (a0)
+; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; ZVBB-RV64: # %bb.0:
+; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: slli a0, a0, 5
+; ZVBB-RV64-NEXT: sub sp, sp, a0
+; ZVBB-RV64-NEXT: andi sp, sp, -64
+; ZVBB-RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV64-NEXT: vmv2r.v v26, v20
+; ZVBB-RV64-NEXT: addi a0, sp, 64
+; ZVBB-RV64-NEXT: vmv2r.v v24, v16
+; ZVBB-RV64-NEXT: csrr a1, vlenb
+; ZVBB-RV64-NEXT: slli a2, a1, 3
+; ZVBB-RV64-NEXT: sub a1, a2, a1
+; ZVBB-RV64-NEXT: add a1, sp, a1
+; ZVBB-RV64-NEXT: addi a1, a1, 64
+; ZVBB-RV64-NEXT: vmv2r.v v22, v12
+; ZVBB-RV64-NEXT: csrr a2, vlenb
+; ZVBB-RV64-NEXT: vmv2r.v v20, v8
+; ZVBB-RV64-NEXT: vmv1r.v v1, v20
+; ZVBB-RV64-NEXT: vmv1r.v v3, v22
+; ZVBB-RV64-NEXT: vmv1r.v v5, v24
+; ZVBB-RV64-NEXT: vmv1r.v v7, v26
+; ZVBB-RV64-NEXT: add a3, a0, a2
+; ZVBB-RV64-NEXT: vmv1r.v v2, v10
+; ZVBB-RV64-NEXT: add a4, a1, a2
+; ZVBB-RV64-NEXT: slli a5, a2, 2
+; ZVBB-RV64-NEXT: vmv1r.v v4, v14
+; ZVBB-RV64-NEXT: slli a6, a2, 4
+; ZVBB-RV64-NEXT: add a7, a4, a2
+; ZVBB-RV64-NEXT: vmv1r.v v6, v18
+; ZVBB-RV64-NEXT: sub a5, a6, a5
+; ZVBB-RV64-NEXT: vmv1r.v v22, v11
+; ZVBB-RV64-NEXT: add a6, a7, a2
+; ZVBB-RV64-NEXT: vmv1r.v v24, v15
+; ZVBB-RV64-NEXT: vsseg7e8.v v1, (a0)
+; ZVBB-RV64-NEXT: vmv1r.v v26, v19
+; ZVBB-RV64-NEXT: vsseg7e8.v v21, (a1)
+; ZVBB-RV64-NEXT: vl1r.v v18, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1r.v v19, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1r.v v20, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1r.v v21, (a6)
+; ZVBB-RV64-NEXT: add a6, a3, a2
+; ZVBB-RV64-NEXT: vl1r.v v10, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1r.v v11, (a6)
+; ZVBB-RV64-NEXT: vl1r.v v8, (a0)
+; ZVBB-RV64-NEXT: vl1r.v v16, (a4)
+; ZVBB-RV64-NEXT: vl1r.v v9, (a3)
+; ZVBB-RV64-NEXT: vl1r.v v17, (a7)
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: li a3, 14
+; ZVBB-RV64-NEXT: mul a0, a0, a3
+; ZVBB-RV64-NEXT: add a0, sp, a0
+; ZVBB-RV64-NEXT: addi a0, a0, 64
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1r.v v12, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1r.v v13, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: slli a2, a2, 3
+; ZVBB-RV64-NEXT: add a2, a0, a2
+; ZVBB-RV64-NEXT: vl1r.v v14, (a6)
+; ZVBB-RV64-NEXT: vl1r.v v15, (a1)
+; ZVBB-RV64-NEXT: add a5, a0, a5
+; ZVBB-RV64-NEXT: vs2r.v v20, (a5)
+; ZVBB-RV64-NEXT: vs4r.v v16, (a2)
+; ZVBB-RV64-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT: vl8r.v v16, (a2)
+; ZVBB-RV64-NEXT: vl8r.v v8, (a0)
+; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: slli a0, a0, 5
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v26, v20
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v24, v16
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 3
+; ZIP-NEXT: sub a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: vmv2r.v v22, v12
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v20, v8
+; ZIP-NEXT: vmv1r.v v1, v20
+; ZIP-NEXT: vmv1r.v v3, v22
+; ZIP-NEXT: vmv1r.v v5, v24
+; ZIP-NEXT: vmv1r.v v7, v26
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v2, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: slli a5, a2, 2
+; ZIP-NEXT: vmv1r.v v4, v14
+; ZIP-NEXT: slli a6, a2, 4
+; ZIP-NEXT: add a7, a4, a2
+; ZIP-NEXT: vmv1r.v v6, v18
+; ZIP-NEXT: sub a5, a6, a5
+; ZIP-NEXT: vmv1r.v v22, v11
+; ZIP-NEXT: add a6, a7, a2
+; ZIP-NEXT: vmv1r.v v24, v15
+; ZIP-NEXT: vsseg7e8.v v1, (a0)
+; ZIP-NEXT: vmv1r.v v26, v19
+; ZIP-NEXT: vsseg7e8.v v21, (a1)
+; ZIP-NEXT: vl1r.v v18, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v19, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v20, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v21, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1r.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v11, (a6)
+; ZIP-NEXT: vl1r.v v8, (a0)
+; ZIP-NEXT: vl1r.v v16, (a4)
+; ZIP-NEXT: vl1r.v v9, (a3)
+; ZIP-NEXT: vl1r.v v17, (a7)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 14
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v12, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v13, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vl1r.v v14, (a6)
+; ZIP-NEXT: vl1r.v v15, (a1)
+; ZIP-NEXT: add a5, a0, a5
+; ZIP-NEXT: vs2r.v v20, (a5)
+; ZIP-NEXT: vs4r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8r.v v16, (a2)
+; ZIP-NEXT: vl8r.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: ret
+ %res = call <vscale x 112 x i8> @llvm.vector.interleave7.nxv112i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e, <vscale x 16 x i8> %f, <vscale x 16 x i8> %g)
+ ret <vscale x 112 x i8> %res
+}
+
+
+define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i16> %d, <vscale x 8 x i16> %e, <vscale x 8 x i16> %f, <vscale x 8 x i16> %g) nounwind {
+;
+; RV32-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vmv2r.v v26, v20
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: vmv2r.v v24, v16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 3
+; RV32-NEXT: sub a1, a2, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 64
+; RV32-NEXT: vmv2r.v v22, v12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: vmv2r.v v20, v8
+; RV32-NEXT: vmv1r.v v1, v20
+; RV32-NEXT: vmv1r.v v3, v22
+; RV32-NEXT: vmv1r.v v5, v24
+; RV32-NEXT: vmv1r.v v7, v26
+; RV32-NEXT: add a3, a0, a2
+; RV32-NEXT: vmv1r.v v2, v10
+; RV32-NEXT: add a4, a1, a2
+; RV32-NEXT: slli a5, a2, 2
+; RV32-NEXT: vmv1r.v v4, v14
+; RV32-NEXT: slli a6, a2, 4
+; RV32-NEXT: add a7, a4, a2
+; RV32-NEXT: vmv1r.v v6, v18
+; RV32-NEXT: sub a5, a6, a5
+; RV32-NEXT: vmv1r.v v22, v11
+; RV32-NEXT: add a6, a7, a2
+; RV32-NEXT: vmv1r.v v24, v15
+; RV32-NEXT: vsseg7e16.v v1, (a0)
+; RV32-NEXT: vmv1r.v v26, v19
+; RV32-NEXT: vsseg7e16.v v21, (a1)
+; RV32-NEXT: vl1re16.v v18, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re16.v v19, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re16.v v20, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re16.v v21, (a6)
+; RV32-NEXT: add a6, a3, a2
+; RV32-NEXT: vl1re16.v v10, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re16.v v11, (a6)
+; RV32-NEXT: vl1re16.v v8, (a0)
+; RV32-NEXT: vl1re16.v v16, (a4)
+; RV32-NEXT: vl1re16.v v9, (a3)
+; RV32-NEXT: vl1re16.v v17, (a7)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a3, 14
+; RV32-NEXT: mul a0, a0, a3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 64
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re16.v v12, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re16.v v13, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, a0, a2
+; RV32-NEXT: vl1re16.v v14, (a6)
+; RV32-NEXT: vl1re16.v v15, (a1)
+; RV32-NEXT: add a5, a0, a5
+; RV32-NEXT: vs2r.v v20, (a5)
+; RV32-NEXT: vs4r.v v16, (a2)
+; RV32-NEXT: vs8r.v v8, (a0)
+; RV32-NEXT: vl8re16.v v16, (a2)
+; RV32-NEXT: vl8re16.v v8, (a0)
+; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 5
+; RV64-NEXT: sub sp, sp, a0
+; RV64-NEXT: andi sp, sp, -64
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vmv2r.v v26, v20
+; RV64-NEXT: addi a0, sp, 64
+; RV64-NEXT: vmv2r.v v24, v16
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 3
+; RV64-NEXT: sub a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 64
+; RV64-NEXT: vmv2r.v v22, v12
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: vmv2r.v v20, v8
+; RV64-NEXT: vmv1r.v v1, v20
+; RV64-NEXT: vmv1r.v v3, v22
+; RV64-NEXT: vmv1r.v v5, v24
+; RV64-NEXT: vmv1r.v v7, v26
+; RV64-NEXT: add a3, a0, a2
+; RV64-NEXT: vmv1r.v v2, v10
+; RV64-NEXT: add a4, a1, a2
+; RV64-NEXT: slli a5, a2, 2
+; RV64-NEXT: vmv1r.v v4, v14
+; RV64-NEXT: slli a6, a2, 4
+; RV64-NEXT: add a7, a4, a2
+; RV64-NEXT: vmv1r.v v6, v18
+; RV64-NEXT: sub a5, a6, a5
+; RV64-NEXT: vmv1r.v v22, v11
+; RV64-NEXT: add a6, a7, a2
+; RV64-NEXT: vmv1r.v v24, v15
+; RV64-NEXT: vsseg7e16.v v1, (a0)
+; RV64-NEXT: vmv1r.v v26, v19
+; RV64-NEXT: vsseg7e16.v v21, (a1)
+; RV64-NEXT: vl1re16.v v18, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re16.v v19, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re16.v v20, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re16.v v21, (a6)
+; RV64-NEXT: add a6, a3, a2
+; RV64-NEXT: vl1re16.v v10, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re16.v v11, (a6)
+; RV64-NEXT: vl1re16.v v8, (a0)
+; RV64-NEXT: vl1re16.v v16, (a4)
+; RV64-NEXT: vl1re16.v v9, (a3)
+; RV64-NEXT: vl1re16.v v17, (a7)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a3, 14
+; RV64-NEXT: mul a0, a0, a3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 64
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re16.v v12, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re16.v v13, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: add a2, a0, a2
+; RV64-NEXT: vl1re16.v v14, (a6)
+; RV64-NEXT: vl1re16.v v15, (a1)
+; RV64-NEXT: add a5, a0, a5
+; RV64-NEXT: vs2r.v v20, (a5)
+; RV64-NEXT: vs4r.v v16, (a2)
+; RV64-NEXT: vs8r.v v8, (a0)
+; RV64-NEXT: vl8re16.v v16, (a2)
+; RV64-NEXT: vl8re16.v v8, (a0)
+; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZVBB-RV32: # %bb.0:
+; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: slli a0, a0, 5
+; ZVBB-RV32-NEXT: sub sp, sp, a0
+; ZVBB-RV32-NEXT: andi sp, sp, -64
+; ZVBB-RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-RV32-NEXT: vmv2r.v v26, v20
+; ZVBB-RV32-NEXT: addi a0, sp, 64
+; ZVBB-RV32-NEXT: vmv2r.v v24, v16
+; ZVBB-RV32-NEXT: csrr a1, vlenb
+; ZVBB-RV32-NEXT: slli a2, a1, 3
+; ZVBB-RV32-NEXT: sub a1, a2, a1
+; ZVBB-RV32-NEXT: add a1, sp, a1
+; ZVBB-RV32-NEXT: addi a1, a1, 64
+; ZVBB-RV32-NEXT: vmv2r.v v22, v12
+; ZVBB-RV32-NEXT: csrr a2, vlenb
+; ZVBB-RV32-NEXT: vmv2r.v v20, v8
+; ZVBB-RV32-NEXT: vmv1r.v v1, v20
+; ZVBB-RV32-NEXT: vmv1r.v v3, v22
+; ZVBB-RV32-NEXT: vmv1r.v v5, v24
+; ZVBB-RV32-NEXT: vmv1r.v v7, v26
+; ZVBB-RV32-NEXT: add a3, a0, a2
+; ZVBB-RV32-NEXT: vmv1r.v v2, v10
+; ZVBB-RV32-NEXT: add a4, a1, a2
+; ZVBB-RV32-NEXT: slli a5, a2, 2
+; ZVBB-RV32-NEXT: vmv1r.v v4, v14
+; ZVBB-RV32-NEXT: slli a6, a2, 4
+; ZVBB-RV32-NEXT: add a7, a4, a2
+; ZVBB-RV32-NEXT: vmv1r.v v6, v18
+; ZVBB-RV32-NEXT: sub a5, a6, a5
+; ZVBB-RV32-NEXT: vmv1r.v v22, v11
+; ZVBB-RV32-NEXT: add a6, a7, a2
+; ZVBB-RV32-NEXT: vmv1r.v v24, v15
+; ZVBB-RV32-NEXT: vsseg7e16.v v1, (a0)
+; ZVBB-RV32-NEXT: vmv1r.v v26, v19
+; ZVBB-RV32-NEXT: vsseg7e16.v v21, (a1)
+; ZVBB-RV32-NEXT: vl1re16.v v18, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re16.v v19, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re16.v v20, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re16.v v21, (a6)
+; ZVBB-RV32-NEXT: add a6, a3, a2
+; ZVBB-RV32-NEXT: vl1re16.v v10, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re16.v v11, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v8, (a0)
+; ZVBB-RV32-NEXT: vl1re16.v v16, (a4)
+; ZVBB-RV32-NEXT: vl1re16.v v9, (a3)
+; ZVBB-RV32-NEXT: vl1re16.v v17, (a7)
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: li a3, 14
+; ZVBB-RV32-NEXT: mul a0, a0, a3
+; ZVBB-RV32-NEXT: add a0, sp, a0
+; ZVBB-RV32-NEXT: addi a0, a0, 64
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re16.v v12, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re16.v v13, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: slli a2, a2, 3
+; ZVBB-RV32-NEXT: add a2, a0, a2
+; ZVBB-RV32-NEXT: vl1re16.v v14, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v15, (a1)
+; ZVBB-RV32-NEXT: add a5, a0, a5
+; ZVBB-RV32-NEXT: vs2r.v v20, (a5)
+; ZVBB-RV32-NEXT: vs4r.v v16, (a2)
+; ZVBB-RV32-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT: vl8re16.v v16, (a2)
+; ZVBB-RV32-NEXT: vl8re16.v v8, (a0)
+; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZVBB-RV64: # %bb.0:
+; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: slli a0, a0, 5
+; ZVBB-RV64-NEXT: sub sp, sp, a0
+; ZVBB-RV64-NEXT: andi sp, sp, -64
+; ZVBB-RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-RV64-NEXT: vmv2r.v v26, v20
+; ZVBB-RV64-NEXT: addi a0, sp, 64
+; ZVBB-RV64-NEXT: vmv2r.v v24, v16
+; ZVBB-RV64-NEXT: csrr a1, vlenb
+; ZVBB-RV64-NEXT: slli a2, a1, 3
+; ZVBB-RV64-NEXT: sub a1, a2, a1
+; ZVBB-RV64-NEXT: add a1, sp, a1
+; ZVBB-RV64-NEXT: addi a1, a1, 64
+; ZVBB-RV64-NEXT: vmv2r.v v22, v12
+; ZVBB-RV64-NEXT: csrr a2, vlenb
+; ZVBB-RV64-NEXT: vmv2r.v v20, v8
+; ZVBB-RV64-NEXT: vmv1r.v v1, v20
+; ZVBB-RV64-NEXT: vmv1r.v v3, v22
+; ZVBB-RV64-NEXT: vmv1r.v v5, v24
+; ZVBB-RV64-NEXT: vmv1r.v v7, v26
+; ZVBB-RV64-NEXT: add a3, a0, a2
+; ZVBB-RV64-NEXT: vmv1r.v v2, v10
+; ZVBB-RV64-NEXT: add a4, a1, a2
+; ZVBB-RV64-NEXT: slli a5, a2, 2
+; ZVBB-RV64-NEXT: vmv1r.v v4, v14
+; ZVBB-RV64-NEXT: slli a6, a2, 4
+; ZVBB-RV64-NEXT: add a7, a4, a2
+; ZVBB-RV64-NEXT: vmv1r.v v6, v18
+; ZVBB-RV64-NEXT: sub a5, a6, a5
+; ZVBB-RV64-NEXT: vmv1r.v v22, v11
+; ZVBB-RV64-NEXT: add a6, a7, a2
+; ZVBB-RV64-NEXT: vmv1r.v v24, v15
+; ZVBB-RV64-NEXT: vsseg7e16.v v1, (a0)
+; ZVBB-RV64-NEXT: vmv1r.v v26, v19
+; ZVBB-RV64-NEXT: vsseg7e16.v v21, (a1)
+; ZVBB-RV64-NEXT: vl1re16.v v18, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re16.v v19, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re16.v v20, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re16.v v21, (a6)
+; ZVBB-RV64-NEXT: add a6, a3, a2
+; ZVBB-RV64-NEXT: vl1re16.v v10, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re16.v v11, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v8, (a0)
+; ZVBB-RV64-NEXT: vl1re16.v v16, (a4)
+; ZVBB-RV64-NEXT: vl1re16.v v9, (a3)
+; ZVBB-RV64-NEXT: vl1re16.v v17, (a7)
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: li a3, 14
+; ZVBB-RV64-NEXT: mul a0, a0, a3
+; ZVBB-RV64-NEXT: add a0, sp, a0
+; ZVBB-RV64-NEXT: addi a0, a0, 64
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re16.v v12, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re16.v v13, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: slli a2, a2, 3
+; ZVBB-RV64-NEXT: add a2, a0, a2
+; ZVBB-RV64-NEXT: vl1re16.v v14, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v15, (a1)
+; ZVBB-RV64-NEXT: add a5, a0, a5
+; ZVBB-RV64-NEXT: vs2r.v v20, (a5)
+; ZVBB-RV64-NEXT: vs4r.v v16, (a2)
+; ZVBB-RV64-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT: vl8re16.v v16, (a2)
+; ZVBB-RV64-NEXT: vl8re16.v v8, (a0)
+; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: slli a0, a0, 5
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v26, v20
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v24, v16
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 3
+; ZIP-NEXT: sub a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: vmv2r.v v22, v12
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v20, v8
+; ZIP-NEXT: vmv1r.v v1, v20
+; ZIP-NEXT: vmv1r.v v3, v22
+; ZIP-NEXT: vmv1r.v v5, v24
+; ZIP-NEXT: vmv1r.v v7, v26
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v2, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: slli a5, a2, 2
+; ZIP-NEXT: vmv1r.v v4, v14
+; ZIP-NEXT: slli a6, a2, 4
+; ZIP-NEXT: add a7, a4, a2
+; ZIP-NEXT: vmv1r.v v6, v18
+; ZIP-NEXT: sub a5, a6, a5
+; ZIP-NEXT: vmv1r.v v22, v11
+; ZIP-NEXT: add a6, a7, a2
+; ZIP-NEXT: vmv1r.v v24, v15
+; ZIP-NEXT: vsseg7e16.v v1, (a0)
+; ZIP-NEXT: vmv1r.v v26, v19
+; ZIP-NEXT: vsseg7e16.v v21, (a1)
+; ZIP-NEXT: vl1re16.v v18, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v19, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v20, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v21, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1re16.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v11, (a6)
+; ZIP-NEXT: vl1re16.v v8, (a0)
+; ZIP-NEXT: vl1re16.v v16, (a4)
+; ZIP-NEXT: vl1re16.v v9, (a3)
+; ZIP-NEXT: vl1re16.v v17, (a7)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 14
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v12, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v13, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vl1re16.v v14, (a6)
+; ZIP-NEXT: vl1re16.v v15, (a1)
+; ZIP-NEXT: add a5, a0, a5
+; ZIP-NEXT: vs2r.v v20, (a5)
+; ZIP-NEXT: vs4r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8re16.v v16, (a2)
+; ZIP-NEXT: vl8re16.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: ret
+ %res = call <vscale x 56 x i16> @llvm.vector.interleave7.nxv56i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i16> %d, <vscale x 8 x i16> %e, <vscale x 8 x i16> %f, <vscale x 8 x i16> %g)
+ ret <vscale x 56 x i16> %res
+}
+
+
+define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e, <vscale x 4 x i32> %f, <vscale x 4 x i32> %g) nounwind {
+;
+; RV32-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; RV32-NEXT: vmv2r.v v26, v20
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: vmv2r.v v24, v16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 3
+; RV32-NEXT: sub a1, a2, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 64
+; RV32-NEXT: vmv2r.v v22, v12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: vmv2r.v v20, v8
+; RV32-NEXT: vmv1r.v v1, v20
+; RV32-NEXT: vmv1r.v v3, v22
+; RV32-NEXT: vmv1r.v v5, v24
+; RV32-NEXT: vmv1r.v v7, v26
+; RV32-NEXT: add a3, a0, a2
+; RV32-NEXT: vmv1r.v v2, v10
+; RV32-NEXT: add a4, a1, a2
+; RV32-NEXT: slli a5, a2, 2
+; RV32-NEXT: vmv1r.v v4, v14
+; RV32-NEXT: slli a6, a2, 4
+; RV32-NEXT: add a7, a4, a2
+; RV32-NEXT: vmv1r.v v6, v18
+; RV32-NEXT: sub a5, a6, a5
+; RV32-NEXT: vmv1r.v v22, v11
+; RV32-NEXT: add a6, a7, a2
+; RV32-NEXT: vmv1r.v v24, v15
+; RV32-NEXT: vsseg7e32.v v1, (a0)
+; RV32-NEXT: vmv1r.v v26, v19
+; RV32-NEXT: vsseg7e32.v v21, (a1)
+; RV32-NEXT: vl1re32.v v18, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re32.v v19, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re32.v v20, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re32.v v21, (a6)
+; RV32-NEXT: add a6, a3, a2
+; RV32-NEXT: vl1re32.v v10, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re32.v v11, (a6)
+; RV32-NEXT: vl1re32.v v8, (a0)
+; RV32-NEXT: vl1re32.v v16, (a4)
+; RV32-NEXT: vl1re32.v v9, (a3)
+; RV32-NEXT: vl1re32.v v17, (a7)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a3, 14
+; RV32-NEXT: mul a0, a0, a3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 64
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re32.v v12, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re32.v v13, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, a0, a2
+; RV32-NEXT: vl1re32.v v14, (a6)
+; RV32-NEXT: vl1re32.v v15, (a1)
+; RV32-NEXT: add a5, a0, a5
+; RV32-NEXT: vs2r.v v20, (a5)
+; RV32-NEXT: vs4r.v v16, (a2)
+; RV32-NEXT: vs8r.v v8, (a0)
+; RV32-NEXT: vl8re32.v v16, (a2)
+; RV32-NEXT: vl8re32.v v8, (a0)
+; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 5
+; RV64-NEXT: sub sp, sp, a0
+; RV64-NEXT: andi sp, sp, -64
+; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; RV64-NEXT: vmv2r.v v26, v20
+; RV64-NEXT: addi a0, sp, 64
+; RV64-NEXT: vmv2r.v v24, v16
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 3
+; RV64-NEXT: sub a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 64
+; RV64-NEXT: vmv2r.v v22, v12
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: vmv2r.v v20, v8
+; RV64-NEXT: vmv1r.v v1, v20
+; RV64-NEXT: vmv1r.v v3, v22
+; RV64-NEXT: vmv1r.v v5, v24
+; RV64-NEXT: vmv1r.v v7, v26
+; RV64-NEXT: add a3, a0, a2
+; RV64-NEXT: vmv1r.v v2, v10
+; RV64-NEXT: add a4, a1, a2
+; RV64-NEXT: slli a5, a2, 2
+; RV64-NEXT: vmv1r.v v4, v14
+; RV64-NEXT: slli a6, a2, 4
+; RV64-NEXT: add a7, a4, a2
+; RV64-NEXT: vmv1r.v v6, v18
+; RV64-NEXT: sub a5, a6, a5
+; RV64-NEXT: vmv1r.v v22, v11
+; RV64-NEXT: add a6, a7, a2
+; RV64-NEXT: vmv1r.v v24, v15
+; RV64-NEXT: vsseg7e32.v v1, (a0)
+; RV64-NEXT: vmv1r.v v26, v19
+; RV64-NEXT: vsseg7e32.v v21, (a1)
+; RV64-NEXT: vl1re32.v v18, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re32.v v19, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re32.v v20, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re32.v v21, (a6)
+; RV64-NEXT: add a6, a3, a2
+; RV64-NEXT: vl1re32.v v10, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re32.v v11, (a6)
+; RV64-NEXT: vl1re32.v v8, (a0)
+; RV64-NEXT: vl1re32.v v16, (a4)
+; RV64-NEXT: vl1re32.v v9, (a3)
+; RV64-NEXT: vl1re32.v v17, (a7)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a3, 14
+; RV64-NEXT: mul a0, a0, a3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 64
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re32.v v12, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re32.v v13, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: add a2, a0, a2
+; RV64-NEXT: vl1re32.v v14, (a6)
+; RV64-NEXT: vl1re32.v v15, (a1)
+; RV64-NEXT: add a5, a0, a5
+; RV64-NEXT: vs2r.v v20, (a5)
+; RV64-NEXT: vs4r.v v16, (a2)
+; RV64-NEXT: vs8r.v v8, (a0)
+; RV64-NEXT: vl8re32.v v16, (a2)
+; RV64-NEXT: vl8re32.v v8, (a0)
+; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZVBB-RV32: # %bb.0:
+; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: slli a0, a0, 5
+; ZVBB-RV32-NEXT: sub sp, sp, a0
+; ZVBB-RV32-NEXT: andi sp, sp, -64
+; ZVBB-RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; ZVBB-RV32-NEXT: vmv2r.v v26, v20
+; ZVBB-RV32-NEXT: addi a0, sp, 64
+; ZVBB-RV32-NEXT: vmv2r.v v24, v16
+; ZVBB-RV32-NEXT: csrr a1, vlenb
+; ZVBB-RV32-NEXT: slli a2, a1, 3
+; ZVBB-RV32-NEXT: sub a1, a2, a1
+; ZVBB-RV32-NEXT: add a1, sp, a1
+; ZVBB-RV32-NEXT: addi a1, a1, 64
+; ZVBB-RV32-NEXT: vmv2r.v v22, v12
+; ZVBB-RV32-NEXT: csrr a2, vlenb
+; ZVBB-RV32-NEXT: vmv2r.v v20, v8
+; ZVBB-RV32-NEXT: vmv1r.v v1, v20
+; ZVBB-RV32-NEXT: vmv1r.v v3, v22
+; ZVBB-RV32-NEXT: vmv1r.v v5, v24
+; ZVBB-RV32-NEXT: vmv1r.v v7, v26
+; ZVBB-RV32-NEXT: add a3, a0, a2
+; ZVBB-RV32-NEXT: vmv1r.v v2, v10
+; ZVBB-RV32-NEXT: add a4, a1, a2
+; ZVBB-RV32-NEXT: slli a5, a2, 2
+; ZVBB-RV32-NEXT: vmv1r.v v4, v14
+; ZVBB-RV32-NEXT: slli a6, a2, 4
+; ZVBB-RV32-NEXT: add a7, a4, a2
+; ZVBB-RV32-NEXT: vmv1r.v v6, v18
+; ZVBB-RV32-NEXT: sub a5, a6, a5
+; ZVBB-RV32-NEXT: vmv1r.v v22, v11
+; ZVBB-RV32-NEXT: add a6, a7, a2
+; ZVBB-RV32-NEXT: vmv1r.v v24, v15
+; ZVBB-RV32-NEXT: vsseg7e32.v v1, (a0)
+; ZVBB-RV32-NEXT: vmv1r.v v26, v19
+; ZVBB-RV32-NEXT: vsseg7e32.v v21, (a1)
+; ZVBB-RV32-NEXT: vl1re32.v v18, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re32.v v19, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re32.v v20, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re32.v v21, (a6)
+; ZVBB-RV32-NEXT: add a6, a3, a2
+; ZVBB-RV32-NEXT: vl1re32.v v10, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re32.v v11, (a6)
+; ZVBB-RV32-NEXT: vl1re32.v v8, (a0)
+; ZVBB-RV32-NEXT: vl1re32.v v16, (a4)
+; ZVBB-RV32-NEXT: vl1re32.v v9, (a3)
+; ZVBB-RV32-NEXT: vl1re32.v v17, (a7)
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: li a3, 14
+; ZVBB-RV32-NEXT: mul a0, a0, a3
+; ZVBB-RV32-NEXT: add a0, sp, a0
+; ZVBB-RV32-NEXT: addi a0, a0, 64
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re32.v v12, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re32.v v13, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: slli a2, a2, 3
+; ZVBB-RV32-NEXT: add a2, a0, a2
+; ZVBB-RV32-NEXT: vl1re32.v v14, (a6)
+; ZVBB-RV32-NEXT: vl1re32.v v15, (a1)
+; ZVBB-RV32-NEXT: add a5, a0, a5
+; ZVBB-RV32-NEXT: vs2r.v v20, (a5)
+; ZVBB-RV32-NEXT: vs4r.v v16, (a2)
+; ZVBB-RV32-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT: vl8re32.v v16, (a2)
+; ZVBB-RV32-NEXT: vl8re32.v v8, (a0)
+; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZVBB-RV64: # %bb.0:
+; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: slli a0, a0, 5
+; ZVBB-RV64-NEXT: sub sp, sp, a0
+; ZVBB-RV64-NEXT: andi sp, sp, -64
+; ZVBB-RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; ZVBB-RV64-NEXT: vmv2r.v v26, v20
+; ZVBB-RV64-NEXT: addi a0, sp, 64
+; ZVBB-RV64-NEXT: vmv2r.v v24, v16
+; ZVBB-RV64-NEXT: csrr a1, vlenb
+; ZVBB-RV64-NEXT: slli a2, a1, 3
+; ZVBB-RV64-NEXT: sub a1, a2, a1
+; ZVBB-RV64-NEXT: add a1, sp, a1
+; ZVBB-RV64-NEXT: addi a1, a1, 64
+; ZVBB-RV64-NEXT: vmv2r.v v22, v12
+; ZVBB-RV64-NEXT: csrr a2, vlenb
+; ZVBB-RV64-NEXT: vmv2r.v v20, v8
+; ZVBB-RV64-NEXT: vmv1r.v v1, v20
+; ZVBB-RV64-NEXT: vmv1r.v v3, v22
+; ZVBB-RV64-NEXT: vmv1r.v v5, v24
+; ZVBB-RV64-NEXT: vmv1r.v v7, v26
+; ZVBB-RV64-NEXT: add a3, a0, a2
+; ZVBB-RV64-NEXT: vmv1r.v v2, v10
+; ZVBB-RV64-NEXT: add a4, a1, a2
+; ZVBB-RV64-NEXT: slli a5, a2, 2
+; ZVBB-RV64-NEXT: vmv1r.v v4, v14
+; ZVBB-RV64-NEXT: slli a6, a2, 4
+; ZVBB-RV64-NEXT: add a7, a4, a2
+; ZVBB-RV64-NEXT: vmv1r.v v6, v18
+; ZVBB-RV64-NEXT: sub a5, a6, a5
+; ZVBB-RV64-NEXT: vmv1r.v v22, v11
+; ZVBB-RV64-NEXT: add a6, a7, a2
+; ZVBB-RV64-NEXT: vmv1r.v v24, v15
+; ZVBB-RV64-NEXT: vsseg7e32.v v1, (a0)
+; ZVBB-RV64-NEXT: vmv1r.v v26, v19
+; ZVBB-RV64-NEXT: vsseg7e32.v v21, (a1)
+; ZVBB-RV64-NEXT: vl1re32.v v18, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re32.v v19, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re32.v v20, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re32.v v21, (a6)
+; ZVBB-RV64-NEXT: add a6, a3, a2
+; ZVBB-RV64-NEXT: vl1re32.v v10, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re32.v v11, (a6)
+; ZVBB-RV64-NEXT: vl1re32.v v8, (a0)
+; ZVBB-RV64-NEXT: vl1re32.v v16, (a4)
+; ZVBB-RV64-NEXT: vl1re32.v v9, (a3)
+; ZVBB-RV64-NEXT: vl1re32.v v17, (a7)
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: li a3, 14
+; ZVBB-RV64-NEXT: mul a0, a0, a3
+; ZVBB-RV64-NEXT: add a0, sp, a0
+; ZVBB-RV64-NEXT: addi a0, a0, 64
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re32.v v12, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re32.v v13, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: slli a2, a2, 3
+; ZVBB-RV64-NEXT: add a2, a0, a2
+; ZVBB-RV64-NEXT: vl1re32.v v14, (a6)
+; ZVBB-RV64-NEXT: vl1re32.v v15, (a1)
+; ZVBB-RV64-NEXT: add a5, a0, a5
+; ZVBB-RV64-NEXT: vs2r.v v20, (a5)
+; ZVBB-RV64-NEXT: vs4r.v v16, (a2)
+; ZVBB-RV64-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT: vl8re32.v v16, (a2)
+; ZVBB-RV64-NEXT: vl8re32.v v8, (a0)
+; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: slli a0, a0, 5
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v26, v20
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v24, v16
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 3
+; ZIP-NEXT: sub a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: vmv2r.v v22, v12
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v20, v8
+; ZIP-NEXT: vmv1r.v v1, v20
+; ZIP-NEXT: vmv1r.v v3, v22
+; ZIP-NEXT: vmv1r.v v5, v24
+; ZIP-NEXT: vmv1r.v v7, v26
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v2, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: slli a5, a2, 2
+; ZIP-NEXT: vmv1r.v v4, v14
+; ZIP-NEXT: slli a6, a2, 4
+; ZIP-NEXT: add a7, a4, a2
+; ZIP-NEXT: vmv1r.v v6, v18
+; ZIP-NEXT: sub a5, a6, a5
+; ZIP-NEXT: vmv1r.v v22, v11
+; ZIP-NEXT: add a6, a7, a2
+; ZIP-NEXT: vmv1r.v v24, v15
+; ZIP-NEXT: vsseg7e32.v v1, (a0)
+; ZIP-NEXT: vmv1r.v v26, v19
+; ZIP-NEXT: vsseg7e32.v v21, (a1)
+; ZIP-NEXT: vl1re32.v v18, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v19, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v20, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v21, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1re32.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v11, (a6)
+; ZIP-NEXT: vl1re32.v v8, (a0)
+; ZIP-NEXT: vl1re32.v v16, (a4)
+; ZIP-NEXT: vl1re32.v v9, (a3)
+; ZIP-NEXT: vl1re32.v v17, (a7)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 14
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v12, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v13, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vl1re32.v v14, (a6)
+; ZIP-NEXT: vl1re32.v v15, (a1)
+; ZIP-NEXT: add a5, a0, a5
+; ZIP-NEXT: vs2r.v v20, (a5)
+; ZIP-NEXT: vs4r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8re32.v v16, (a2)
+; ZIP-NEXT: vl8re32.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: ret
+ %res = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e, <vscale x 4 x i32> %f, <vscale x 4 x i32> %g)
+ ret <vscale x 28 x i32> %res
+}
+
+define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e, <vscale x 2 x i64> %f, <vscale x 2 x i64> %g) nounwind {
+;
+; RV32-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vmv2r.v v26, v20
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: vmv2r.v v24, v16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 3
+; RV32-NEXT: sub a1, a2, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 64
+; RV32-NEXT: vmv2r.v v22, v12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: vmv2r.v v20, v8
+; RV32-NEXT: vmv1r.v v1, v20
+; RV32-NEXT: vmv1r.v v3, v22
+; RV32-NEXT: vmv1r.v v5, v24
+; RV32-NEXT: vmv1r.v v7, v26
+; RV32-NEXT: add a3, a0, a2
+; RV32-NEXT: vmv1r.v v2, v10
+; RV32-NEXT: add a4, a1, a2
+; RV32-NEXT: slli a5, a2, 2
+; RV32-NEXT: vmv1r.v v4, v14
+; RV32-NEXT: slli a6, a2, 4
+; RV32-NEXT: add a7, a4, a2
+; RV32-NEXT: vmv1r.v v6, v18
+; RV32-NEXT: sub a5, a6, a5
+; RV32-NEXT: vmv1r.v v22, v11
+; RV32-NEXT: add a6, a7, a2
+; RV32-NEXT: vmv1r.v v24, v15
+; RV32-NEXT: vsseg7e64.v v1, (a0)
+; RV32-NEXT: vmv1r.v v26, v19
+; RV32-NEXT: vsseg7e64.v v21, (a1)
+; RV32-NEXT: vl1re64.v v18, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re64.v v19, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re64.v v20, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re64.v v21, (a6)
+; RV32-NEXT: add a6, a3, a2
+; RV32-NEXT: vl1re64.v v10, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re64.v v11, (a6)
+; RV32-NEXT: vl1re64.v v8, (a0)
+; RV32-NEXT: vl1re64.v v16, (a4)
+; RV32-NEXT: vl1re64.v v9, (a3)
+; RV32-NEXT: vl1re64.v v17, (a7)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a3, 14
+; RV32-NEXT: mul a0, a0, a3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 64
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re64.v v12, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re64.v v13, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, a0, a2
+; RV32-NEXT: vl1re64.v v14, (a6)
+; RV32-NEXT: vl1re64.v v15, (a1)
+; RV32-NEXT: add a5, a0, a5
+; RV32-NEXT: vs2r.v v20, (a5)
+; RV32-NEXT: vs4r.v v16, (a2)
+; RV32-NEXT: vs8r.v v8, (a0)
+; RV32-NEXT: vl8re64.v v16, (a2)
+; RV32-NEXT: vl8re64.v v8, (a0)
+; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 5
+; RV64-NEXT: sub sp, sp, a0
+; RV64-NEXT: andi sp, sp, -64
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vmv2r.v v26, v20
+; RV64-NEXT: addi a0, sp, 64
+; RV64-NEXT: vmv2r.v v24, v16
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 3
+; RV64-NEXT: sub a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 64
+; RV64-NEXT: vmv2r.v v22, v12
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: vmv2r.v v20, v8
+; RV64-NEXT: vmv1r.v v1, v20
+; RV64-NEXT: vmv1r.v v3, v22
+; RV64-NEXT: vmv1r.v v5, v24
+; RV64-NEXT: vmv1r.v v7, v26
+; RV64-NEXT: add a3, a0, a2
+; RV64-NEXT: vmv1r.v v2, v10
+; RV64-NEXT: add a4, a1, a2
+; RV64-NEXT: slli a5, a2, 2
+; RV64-NEXT: vmv1r.v v4, v14
+; RV64-NEXT: slli a6, a2, 4
+; RV64-NEXT: add a7, a4, a2
+; RV64-NEXT: vmv1r.v v6, v18
+; RV64-NEXT: sub a5, a6, a5
+; RV64-NEXT: vmv1r.v v22, v11
+; RV64-NEXT: add a6, a7, a2
+; RV64-NEXT: vmv1r.v v24, v15
+; RV64-NEXT: vsseg7e64.v v1, (a0)
+; RV64-NEXT: vmv1r.v v26, v19
+; RV64-NEXT: vsseg7e64.v v21, (a1)
+; RV64-NEXT: vl1re64.v v18, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re64.v v19, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re64.v v20, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re64.v v21, (a6)
+; RV64-NEXT: add a6, a3, a2
+; RV64-NEXT: vl1re64.v v10, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re64.v v11, (a6)
+; RV64-NEXT: vl1re64.v v8, (a0)
+; RV64-NEXT: vl1re64.v v16, (a4)
+; RV64-NEXT: vl1re64.v v9, (a3)
+; RV64-NEXT: vl1re64.v v17, (a7)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a3, 14
+; RV64-NEXT: mul a0, a0, a3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 64
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re64.v v12, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re64.v v13, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: add a2, a0, a2
+; RV64-NEXT: vl1re64.v v14, (a6)
+; RV64-NEXT: vl1re64.v v15, (a1)
+; RV64-NEXT: add a5, a0, a5
+; RV64-NEXT: vs2r.v v20, (a5)
+; RV64-NEXT: vs4r.v v16, (a2)
+; RV64-NEXT: vs8r.v v8, (a0)
+; RV64-NEXT: vl8re64.v v16, (a2)
+; RV64-NEXT: vl8re64.v v8, (a0)
+; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZVBB-RV32: # %bb.0:
+; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: slli a0, a0, 5
+; ZVBB-RV32-NEXT: sub sp, sp, a0
+; ZVBB-RV32-NEXT: andi sp, sp, -64
+; ZVBB-RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; ZVBB-RV32-NEXT: vmv2r.v v26, v20
+; ZVBB-RV32-NEXT: addi a0, sp, 64
+; ZVBB-RV32-NEXT: vmv2r.v v24, v16
+; ZVBB-RV32-NEXT: csrr a1, vlenb
+; ZVBB-RV32-NEXT: slli a2, a1, 3
+; ZVBB-RV32-NEXT: sub a1, a2, a1
+; ZVBB-RV32-NEXT: add a1, sp, a1
+; ZVBB-RV32-NEXT: addi a1, a1, 64
+; ZVBB-RV32-NEXT: vmv2r.v v22, v12
+; ZVBB-RV32-NEXT: csrr a2, vlenb
+; ZVBB-RV32-NEXT: vmv2r.v v20, v8
+; ZVBB-RV32-NEXT: vmv1r.v v1, v20
+; ZVBB-RV32-NEXT: vmv1r.v v3, v22
+; ZVBB-RV32-NEXT: vmv1r.v v5, v24
+; ZVBB-RV32-NEXT: vmv1r.v v7, v26
+; ZVBB-RV32-NEXT: add a3, a0, a2
+; ZVBB-RV32-NEXT: vmv1r.v v2, v10
+; ZVBB-RV32-NEXT: add a4, a1, a2
+; ZVBB-RV32-NEXT: slli a5, a2, 2
+; ZVBB-RV32-NEXT: vmv1r.v v4, v14
+; ZVBB-RV32-NEXT: slli a6, a2, 4
+; ZVBB-RV32-NEXT: add a7, a4, a2
+; ZVBB-RV32-NEXT: vmv1r.v v6, v18
+; ZVBB-RV32-NEXT: sub a5, a6, a5
+; ZVBB-RV32-NEXT: vmv1r.v v22, v11
+; ZVBB-RV32-NEXT: add a6, a7, a2
+; ZVBB-RV32-NEXT: vmv1r.v v24, v15
+; ZVBB-RV32-NEXT: vsseg7e64.v v1, (a0)
+; ZVBB-RV32-NEXT: vmv1r.v v26, v19
+; ZVBB-RV32-NEXT: vsseg7e64.v v21, (a1)
+; ZVBB-RV32-NEXT: vl1re64.v v18, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re64.v v19, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re64.v v20, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re64.v v21, (a6)
+; ZVBB-RV32-NEXT: add a6, a3, a2
+; ZVBB-RV32-NEXT: vl1re64.v v10, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re64.v v11, (a6)
+; ZVBB-RV32-NEXT: vl1re64.v v8, (a0)
+; ZVBB-RV32-NEXT: vl1re64.v v16, (a4)
+; ZVBB-RV32-NEXT: vl1re64.v v9, (a3)
+; ZVBB-RV32-NEXT: vl1re64.v v17, (a7)
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: li a3, 14
+; ZVBB-RV32-NEXT: mul a0, a0, a3
+; ZVBB-RV32-NEXT: add a0, sp, a0
+; ZVBB-RV32-NEXT: addi a0, a0, 64
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re64.v v12, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re64.v v13, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: slli a2, a2, 3
+; ZVBB-RV32-NEXT: add a2, a0, a2
+; ZVBB-RV32-NEXT: vl1re64.v v14, (a6)
+; ZVBB-RV32-NEXT: vl1re64.v v15, (a1)
+; ZVBB-RV32-NEXT: add a5, a0, a5
+; ZVBB-RV32-NEXT: vs2r.v v20, (a5)
+; ZVBB-RV32-NEXT: vs4r.v v16, (a2)
+; ZVBB-RV32-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT: vl8re64.v v16, (a2)
+; ZVBB-RV32-NEXT: vl8re64.v v8, (a0)
+; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZVBB-RV64: # %bb.0:
+; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: slli a0, a0, 5
+; ZVBB-RV64-NEXT: sub sp, sp, a0
+; ZVBB-RV64-NEXT: andi sp, sp, -64
+; ZVBB-RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; ZVBB-RV64-NEXT: vmv2r.v v26, v20
+; ZVBB-RV64-NEXT: addi a0, sp, 64
+; ZVBB-RV64-NEXT: vmv2r.v v24, v16
+; ZVBB-RV64-NEXT: csrr a1, vlenb
+; ZVBB-RV64-NEXT: slli a2, a1, 3
+; ZVBB-RV64-NEXT: sub a1, a2, a1
+; ZVBB-RV64-NEXT: add a1, sp, a1
+; ZVBB-RV64-NEXT: addi a1, a1, 64
+; ZVBB-RV64-NEXT: vmv2r.v v22, v12
+; ZVBB-RV64-NEXT: csrr a2, vlenb
+; ZVBB-RV64-NEXT: vmv2r.v v20, v8
+; ZVBB-RV64-NEXT: vmv1r.v v1, v20
+; ZVBB-RV64-NEXT: vmv1r.v v3, v22
+; ZVBB-RV64-NEXT: vmv1r.v v5, v24
+; ZVBB-RV64-NEXT: vmv1r.v v7, v26
+; ZVBB-RV64-NEXT: add a3, a0, a2
+; ZVBB-RV64-NEXT: vmv1r.v v2, v10
+; ZVBB-RV64-NEXT: add a4, a1, a2
+; ZVBB-RV64-NEXT: slli a5, a2, 2
+; ZVBB-RV64-NEXT: vmv1r.v v4, v14
+; ZVBB-RV64-NEXT: slli a6, a2, 4
+; ZVBB-RV64-NEXT: add a7, a4, a2
+; ZVBB-RV64-NEXT: vmv1r.v v6, v18
+; ZVBB-RV64-NEXT: sub a5, a6, a5
+; ZVBB-RV64-NEXT: vmv1r.v v22, v11
+; ZVBB-RV64-NEXT: add a6, a7, a2
+; ZVBB-RV64-NEXT: vmv1r.v v24, v15
+; ZVBB-RV64-NEXT: vsseg7e64.v v1, (a0)
+; ZVBB-RV64-NEXT: vmv1r.v v26, v19
+; ZVBB-RV64-NEXT: vsseg7e64.v v21, (a1)
+; ZVBB-RV64-NEXT: vl1re64.v v18, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re64.v v19, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re64.v v20, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re64.v v21, (a6)
+; ZVBB-RV64-NEXT: add a6, a3, a2
+; ZVBB-RV64-NEXT: vl1re64.v v10, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re64.v v11, (a6)
+; ZVBB-RV64-NEXT: vl1re64.v v8, (a0)
+; ZVBB-RV64-NEXT: vl1re64.v v16, (a4)
+; ZVBB-RV64-NEXT: vl1re64.v v9, (a3)
+; ZVBB-RV64-NEXT: vl1re64.v v17, (a7)
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: li a3, 14
+; ZVBB-RV64-NEXT: mul a0, a0, a3
+; ZVBB-RV64-NEXT: add a0, sp, a0
+; ZVBB-RV64-NEXT: addi a0, a0, 64
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re64.v v12, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re64.v v13, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: slli a2, a2, 3
+; ZVBB-RV64-NEXT: add a2, a0, a2
+; ZVBB-RV64-NEXT: vl1re64.v v14, (a6)
+; ZVBB-RV64-NEXT: vl1re64.v v15, (a1)
+; ZVBB-RV64-NEXT: add a5, a0, a5
+; ZVBB-RV64-NEXT: vs2r.v v20, (a5)
+; ZVBB-RV64-NEXT: vs4r.v v16, (a2)
+; ZVBB-RV64-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT: vl8re64.v v16, (a2)
+; ZVBB-RV64-NEXT: vl8re64.v v8, (a0)
+; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: slli a0, a0, 5
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v26, v20
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v24, v16
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 3
+; ZIP-NEXT: sub a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: vmv2r.v v22, v12
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v20, v8
+; ZIP-NEXT: vmv1r.v v1, v20
+; ZIP-NEXT: vmv1r.v v3, v22
+; ZIP-NEXT: vmv1r.v v5, v24
+; ZIP-NEXT: vmv1r.v v7, v26
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v2, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: slli a5, a2, 2
+; ZIP-NEXT: vmv1r.v v4, v14
+; ZIP-NEXT: slli a6, a2, 4
+; ZIP-NEXT: add a7, a4, a2
+; ZIP-NEXT: vmv1r.v v6, v18
+; ZIP-NEXT: sub a5, a6, a5
+; ZIP-NEXT: vmv1r.v v22, v11
+; ZIP-NEXT: add a6, a7, a2
+; ZIP-NEXT: vmv1r.v v24, v15
+; ZIP-NEXT: vsseg7e64.v v1, (a0)
+; ZIP-NEXT: vmv1r.v v26, v19
+; ZIP-NEXT: vsseg7e64.v v21, (a1)
+; ZIP-NEXT: vl1re64.v v18, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v19, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v20, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v21, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1re64.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v11, (a6)
+; ZIP-NEXT: vl1re64.v v8, (a0)
+; ZIP-NEXT: vl1re64.v v16, (a4)
+; ZIP-NEXT: vl1re64.v v9, (a3)
+; ZIP-NEXT: vl1re64.v v17, (a7)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 14
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v12, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v13, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vl1re64.v v14, (a6)
+; ZIP-NEXT: vl1re64.v v15, (a1)
+; ZIP-NEXT: add a5, a0, a5
+; ZIP-NEXT: vs2r.v v20, (a5)
+; ZIP-NEXT: vs4r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8re64.v v16, (a2)
+; ZIP-NEXT: vl8re64.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: ret
+ %res = call <vscale x 14 x i64> @llvm.vector.interleave7.nxv14i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e, <vscale x 2 x i64> %f, <vscale x 2 x i64> %g)
+ ret <vscale x 14 x i64> %res
+}
+
+; Floats
+
+define <vscale x 4 x bfloat> @vector_interleave_nxv4bf16_nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) {
+; V-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; V-NEXT: vwaddu.vv v10, v8, v9
+; V-NEXT: li a0, -1
+; V-NEXT: csrr a1, vlenb
+; V-NEXT: vwmaccu.vx v10, a0, v9
+; V-NEXT: srli a1, a1, 2
+; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; V-NEXT: vslidedown.vx v8, v10, a1
+; V-NEXT: add a0, a1, a1
+; V-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; V-NEXT: vslideup.vx v10, v8, a1
+; V-NEXT: vmv.v.v v8, v10
+; V-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vwsll.vi v10, v9, 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: vwaddu.wv v10, v10, v8
+; ZVBB-NEXT: srli a0, a0, 2
+; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vslidedown.vx v8, v10, a0
+; ZVBB-NEXT: add a1, a0, a0
+; ZVBB-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v10, v8, a0
+; ZVBB-NEXT: vmv.v.v v8, v10
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v11, v8, v9
+; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: srli a0, a0, 2
+; ZIP-NEXT: add a1, a0, a0
+; ZIP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; ZIP-NEXT: vslideup.vx v10, v11, a0
+; ZIP-NEXT: vmv.v.v v8, v10
+; ZIP-NEXT: ret
+ %res = call <vscale x 4 x bfloat> @llvm.vector.interleave2.nxv4bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b)
+ ret <vscale x 4 x bfloat> %res
+}
+
+define <vscale x 8 x bfloat> @vector_interleave_nxv8bf16_nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) {
+; V-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; V-NEXT: vmv1r.v v10, v9
+; V-NEXT: vmv1r.v v11, v8
+; V-NEXT: vwaddu.vv v8, v11, v10
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v10
+; V-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vmv1r.v v10, v9
+; ZVBB-NEXT: vmv1r.v v11, v8
+; ZVBB-NEXT: vwsll.vi v8, v10, 16
+; ZVBB-NEXT: vwaddu.wv v8, v8, v11
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZIP-NEXT: vmv1r.v v10, v9
+; ZIP-NEXT: vmv1r.v v11, v8
+; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10
+; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10
+; ZIP-NEXT: ret
+ %res = call <vscale x 8 x bfloat> @llvm.vector.interleave2.nxv8bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b)
+ ret <vscale x 8 x bfloat> %res
+}
+
+define <vscale x 4 x half> @vector_interleave_nxv4f16_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
+; V-LABEL: vector_interleave_nxv4f16_nxv2f16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; V-NEXT: vwaddu.vv v10, v8, v9
+; V-NEXT: li a0, -1
+; V-NEXT: csrr a1, vlenb
+; V-NEXT: vwmaccu.vx v10, a0, v9
+; V-NEXT: srli a1, a1, 2
+; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; V-NEXT: vslidedown.vx v8, v10, a1
+; V-NEXT: add a0, a1, a1
+; V-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; V-NEXT: vslideup.vx v10, v8, a1
+; V-NEXT: vmv.v.v v8, v10
+; V-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv4f16_nxv2f16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vwsll.vi v10, v9, 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: vwaddu.wv v10, v10, v8
+; ZVBB-NEXT: srli a0, a0, 2
+; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vslidedown.vx v8, v10, a0
+; ZVBB-NEXT: add a1, a0, a0
+; ZVBB-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v10, v8, a0
+; ZVBB-NEXT: vmv.v.v v8, v10
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv4f16_nxv2f16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v11, v8, v9
+; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: srli a0, a0, 2
+; ZIP-NEXT: add a1, a0, a0
+; ZIP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; ZIP-NEXT: vslideup.vx v10, v11, a0
+; ZIP-NEXT: vmv.v.v v8, v10
+; ZIP-NEXT: ret
+ %res = call <vscale x 4 x half> @llvm.vector.interleave2.nxv4f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
+ ret <vscale x 4 x half> %res
+}
+
+define <vscale x 8 x half> @vector_interleave_nxv8f16_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
+; V-LABEL: vector_interleave_nxv8f16_nxv4f16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; V-NEXT: vmv1r.v v10, v9
+; V-NEXT: vmv1r.v v11, v8
+; V-NEXT: vwaddu.vv v8, v11, v10
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v10
+; V-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv8f16_nxv4f16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vmv1r.v v10, v9
+; ZVBB-NEXT: vmv1r.v v11, v8
+; ZVBB-NEXT: vwsll.vi v8, v10, 16
+; ZVBB-NEXT: vwaddu.wv v8, v8, v11
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv8f16_nxv4f16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZIP-NEXT: vmv1r.v v10, v9
+; ZIP-NEXT: vmv1r.v v11, v8
+; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10
+; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10
+; ZIP-NEXT: ret
+ %res = call <vscale x 8 x half> @llvm.vector.interleave2.nxv8f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
+ ret <vscale x 8 x half> %res
+}
+
+define <vscale x 4 x float> @vector_interleave_nxv4f32_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
+; V-LABEL: vector_interleave_nxv4f32_nxv2f32:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; V-NEXT: vmv1r.v v10, v9
+; V-NEXT: vmv1r.v v11, v8
+; V-NEXT: vwaddu.vv v8, v11, v10
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v10
+; V-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv4f32_nxv2f32:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; ZVBB-NEXT: vmv1r.v v10, v9
+; ZVBB-NEXT: vmv1r.v v11, v8
+; ZVBB-NEXT: li a0, 32
+; ZVBB-NEXT: vwsll.vx v8, v10, a0
+; ZVBB-NEXT: vwaddu.wv v8, v8, v11
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv4f32_nxv2f32:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; ZIP-NEXT: vmv1r.v v10, v9
+; ZIP-NEXT: vmv1r.v v11, v8
+; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10
+; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10
+; ZIP-NEXT: ret
+ %res = call <vscale x 4 x float> @llvm.vector.interleave2.nxv4f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
+ ret <vscale x 4 x float> %res
+}
+
+define <vscale x 16 x bfloat> @vector_interleave_nxv16bf16_nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
+; V-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; V-NEXT: vmv2r.v v12, v10
+; V-NEXT: vmv2r.v v14, v8
+; V-NEXT: vwaddu.vv v8, v14, v12
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v12
+; V-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVBB-NEXT: vmv2r.v v12, v10
+; ZVBB-NEXT: vmv2r.v v14, v8
+; ZVBB-NEXT: vwsll.vi v8, v12, 16
+; ZVBB-NEXT: vwaddu.wv v8, v8, v14
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZIP-NEXT: vmv2r.v v12, v10
+; ZIP-NEXT: vmv2r.v v14, v8
+; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT: ret
+ %res = call <vscale x 16 x bfloat> @llvm.vector.interleave2.nxv16bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
+ ret <vscale x 16 x bfloat> %res
+}
+
+define <vscale x 16 x half> @vector_interleave_nxv16f16_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; V-LABEL: vector_interleave_nxv16f16_nxv8f16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; V-NEXT: vmv2r.v v12, v10
+; V-NEXT: vmv2r.v v14, v8
+; V-NEXT: vwaddu.vv v8, v14, v12
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v12
+; V-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv16f16_nxv8f16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVBB-NEXT: vmv2r.v v12, v10
+; ZVBB-NEXT: vmv2r.v v14, v8
+; ZVBB-NEXT: vwsll.vi v8, v12, 16
+; ZVBB-NEXT: vwaddu.wv v8, v8, v14
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv16f16_nxv8f16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZIP-NEXT: vmv2r.v v12, v10
+; ZIP-NEXT: vmv2r.v v14, v8
+; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT: ret
+ %res = call <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
+ ret <vscale x 16 x half> %res
+}
+
+define <vscale x 8 x float> @vector_interleave_nxv8f32_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; V-LABEL: vector_interleave_nxv8f32_nxv4f32:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; V-NEXT: vmv2r.v v12, v10
+; V-NEXT: vmv2r.v v14, v8
+; V-NEXT: vwaddu.vv v8, v14, v12
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v12
+; V-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv8f32_nxv4f32:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; ZVBB-NEXT: vmv2r.v v12, v10
+; ZVBB-NEXT: vmv2r.v v14, v8
+; ZVBB-NEXT: li a0, 32
+; ZVBB-NEXT: vwsll.vx v8, v12, a0
+; ZVBB-NEXT: vwaddu.wv v8, v8, v14
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv8f32_nxv4f32:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; ZIP-NEXT: vmv2r.v v12, v10
+; ZIP-NEXT: vmv2r.v v14, v8
+; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT: ret
+ %res = call <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
+ ret <vscale x 8 x float> %res
+}
+
+define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; V-LABEL: vector_interleave_nxv4f64_nxv2f64:
+; V: # %bb.0:
+; V-NEXT: csrr a0, vlenb
+; V-NEXT: vsetvli a1, zero, e16, m1, ta, mu
+; V-NEXT: vid.v v12
+; V-NEXT: srli a0, a0, 2
+; V-NEXT: vand.vi v13, v12, 1
+; V-NEXT: vmsne.vi v0, v13, 0
+; V-NEXT: vsrl.vi v16, v12, 1
+; V-NEXT: vadd.vx v16, v16, a0, v0.t
+; V-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; V-NEXT: vrgatherei16.vv v12, v8, v16
+; V-NEXT: vmv.v.v v8, v12
+; V-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv4f64_nxv2f64:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu
+; ZVBB-NEXT: vid.v v12
+; ZVBB-NEXT: srli a0, a0, 2
+; ZVBB-NEXT: vand.vi v13, v12, 1
+; ZVBB-NEXT: vmsne.vi v0, v13, 0
+; ZVBB-NEXT: vsrl.vi v16, v12, 1
+; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t
+; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16
+; ZVBB-NEXT: vmv.v.v v8, v12
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv4f64_nxv2f64:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; ZIP-NEXT: vmv2r.v v12, v10
+; ZIP-NEXT: vmv2r.v v14, v8
+; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT: ret
+ %res = call <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
+ ret <vscale x 4 x double> %res
+}
+
+
+
+define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) {
+; V-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; V-NEXT: vmv8r.v v24, v8
+; V-NEXT: vwaddu.vv v8, v24, v16
+; V-NEXT: li a0, -1
+; V-NEXT: vwaddu.vv v0, v28, v20
+; V-NEXT: vwmaccu.vx v8, a0, v16
+; V-NEXT: vwmaccu.vx v0, a0, v20
+; V-NEXT: vmv8r.v v16, v0
+; V-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVBB-NEXT: vwsll.vi v24, v16, 16
+; ZVBB-NEXT: vwsll.vi v0, v20, 16
+; ZVBB-NEXT: vwaddu.wv v24, v24, v8
+; ZVBB-NEXT: vwaddu.wv v0, v0, v12
+; ZVBB-NEXT: vmv8r.v v8, v24
+; ZVBB-NEXT: vmv8r.v v16, v0
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT: vmv8r.v v8, v24
+; ZIP-NEXT: vmv8r.v v16, v0
+; ZIP-NEXT: ret
+ %res = call <vscale x 64 x bfloat> @llvm.vector.interleave2.nxv64bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b)
+ ret <vscale x 64 x bfloat> %res
+}
+
+define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b) {
+; V-LABEL: vector_interleave_nxv64f16_nxv32f16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; V-NEXT: vmv8r.v v24, v8
+; V-NEXT: vwaddu.vv v8, v24, v16
+; V-NEXT: li a0, -1
+; V-NEXT: vwaddu.vv v0, v28, v20
+; V-NEXT: vwmaccu.vx v8, a0, v16
+; V-NEXT: vwmaccu.vx v0, a0, v20
+; V-NEXT: vmv8r.v v16, v0
+; V-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv64f16_nxv32f16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVBB-NEXT: vwsll.vi v24, v16, 16
+; ZVBB-NEXT: vwsll.vi v0, v20, 16
+; ZVBB-NEXT: vwaddu.wv v24, v24, v8
+; ZVBB-NEXT: vwaddu.wv v0, v0, v12
+; ZVBB-NEXT: vmv8r.v v8, v24
+; ZVBB-NEXT: vmv8r.v v16, v0
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv64f16_nxv32f16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT: vmv8r.v v8, v24
+; ZIP-NEXT: vmv8r.v v16, v0
+; ZIP-NEXT: ret
+ %res = call <vscale x 64 x half> @llvm.vector.interleave2.nxv64f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b)
+ ret <vscale x 64 x half> %res
+}
+
+define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b) {
+; V-LABEL: vector_interleave_nxv32f32_nxv16f32:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; V-NEXT: vmv8r.v v24, v8
+; V-NEXT: vwaddu.vv v8, v24, v16
+; V-NEXT: li a0, -1
+; V-NEXT: vwaddu.vv v0, v28, v20
+; V-NEXT: vwmaccu.vx v8, a0, v16
+; V-NEXT: vwmaccu.vx v0, a0, v20
+; V-NEXT: vmv8r.v v16, v0
+; V-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: li a0, 32
+; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
+; ZVBB-NEXT: vwsll.vx v24, v16, a0
+; ZVBB-NEXT: vwsll.vx v0, v20, a0
+; ZVBB-NEXT: vwaddu.wv v24, v24, v8
+; ZVBB-NEXT: vwaddu.wv v0, v0, v12
+; ZVBB-NEXT: vmv8r.v v8, v24
+; ZVBB-NEXT: vmv8r.v v16, v0
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv32f32_nxv16f32:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT: vmv8r.v v8, v24
+; ZIP-NEXT: vmv8r.v v16, v0
+; ZIP-NEXT: ret
+ %res = call <vscale x 32 x float> @llvm.vector.interleave2.nxv32f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b)
+ ret <vscale x 32 x float> %res
+}
+
+define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b) {
+; V-LABEL: vector_interleave_nxv16f64_nxv8f64:
+; V: # %bb.0:
+; V-NEXT: csrr a0, vlenb
+; V-NEXT: vsetvli a1, zero, e16, m2, ta, mu
+; V-NEXT: vid.v v6
+; V-NEXT: vmv8r.v v24, v8
+; V-NEXT: srli a0, a0, 1
+; V-NEXT: vmv4r.v v28, v16
+; V-NEXT: vmv4r.v v16, v12
+; V-NEXT: vand.vi v8, v6, 1
+; V-NEXT: vmsne.vi v0, v8, 0
+; V-NEXT: vsrl.vi v6, v6, 1
+; V-NEXT: vadd.vx v6, v6, a0, v0.t
+; V-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; V-NEXT: vrgatherei16.vv v8, v24, v6
+; V-NEXT: vrgatherei16.vv v24, v16, v6
+; V-NEXT: vmv.v.v v16, v24
+; V-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv16f64_nxv8f64:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu
+; ZVBB-NEXT: vid.v v6
+; ZVBB-NEXT: vmv8r.v v24, v8
+; ZVBB-NEXT: srli a0, a0, 1
+; ZVBB-NEXT: vmv4r.v v28, v16
+; ZVBB-NEXT: vmv4r.v v16, v12
+; ZVBB-NEXT: vand.vi v8, v6, 1
+; ZVBB-NEXT: vmsne.vi v0, v8, 0
+; ZVBB-NEXT: vsrl.vi v6, v6, 1
+; ZVBB-NEXT: vadd.vx v6, v6, a0, v0.t
+; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; ZVBB-NEXT: vrgatherei16.vv v8, v24, v6
+; ZVBB-NEXT: vrgatherei16.vv v24, v16, v6
+; ZVBB-NEXT: vmv.v.v v16, v24
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv16f64_nxv8f64:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT: vmv8r.v v8, v24
+; ZIP-NEXT: vmv8r.v v16, v0
+; ZIP-NEXT: ret
+ %res = call <vscale x 16 x double> @llvm.vector.interleave2.nxv16f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b)
+ ret <vscale x 16 x double> %res
+}
+
+define <vscale x 6 x half> @vector_interleave_nxv6f16_nxv2f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2) {
+; CHECK-LABEL: vector_interleave_nxv6f16_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 1
+; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vsseg3e16.v v8, (a0)
+; CHECK-NEXT: add a3, a0, a2
+; CHECK-NEXT: vle16.v v9, (a3)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: srli a1, a1, 2
+; CHECK-NEXT: add a0, a1, a1
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a1
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v9, (a2)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv6f16_nxv2f16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a0, a0, 1
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: srli a2, a1, 1
+; ZVBB-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vsseg3e16.v v8, (a0)
+; ZVBB-NEXT: add a3, a0, a2
+; ZVBB-NEXT: vle16.v v9, (a3)
+; ZVBB-NEXT: vle16.v v8, (a0)
+; ZVBB-NEXT: srli a1, a1, 2
+; ZVBB-NEXT: add a0, a1, a1
+; ZVBB-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v8, v9, a1
+; ZVBB-NEXT: add a2, a3, a2
+; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vle16.v v9, (a2)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a0, a0, 1
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 6 x half> @llvm.vector.interleave3.nxv6f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2)
+ ret <vscale x 6 x half> %res
+}
+
+define <vscale x 12 x half> @vector_interleave_nxv12f16_nxv4f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2) {
+; CHECK-LABEL: vector_interleave_nxv12f16_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; CHECK-NEXT: vsseg3e16.v v8, (a0)
+; CHECK-NEXT: vl1re16.v v8, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl1re16.v v9, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl1re16.v v10, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv12f16_nxv4f16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 1
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vsseg3e16.v v8, (a0)
+; ZVBB-NEXT: vl1re16.v v8, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl1re16.v v9, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl1re16.v v10, (a0)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 1
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 12 x half> @llvm.vector.interleave3.nxv12f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2)
+ ret <vscale x 12 x half> %res
+}
+
+define <vscale x 24 x half> @vector_interleave_nxv24f16_nxv8f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2) {
+; CHECK-LABEL: vector_interleave_nxv24f16_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vsseg3e16.v v8, (a0)
+; CHECK-NEXT: vl2re16.v v8, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl2re16.v v10, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl2re16.v v12, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv24f16_nxv8f16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: slli a1, a1, 1
+; ZVBB-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVBB-NEXT: vsseg3e16.v v8, (a0)
+; ZVBB-NEXT: vl2re16.v v8, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl2re16.v v10, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl2re16.v v12, (a0)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 24 x half> @llvm.vector.interleave3.nxv24f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2)
+ ret <vscale x 24 x half> %res
+}
+
+define <vscale x 6 x bfloat> @vector_interleave_nxv6bf16_nxv2bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2) {
+; CHECK-LABEL: vector_interleave_nxv6bf16_nxv2bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 1
+; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vsseg3e16.v v8, (a0)
+; CHECK-NEXT: add a3, a0, a2
+; CHECK-NEXT: vle16.v v9, (a3)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: srli a1, a1, 2
+; CHECK-NEXT: add a0, a1, a1
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a1
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v9, (a2)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv6bf16_nxv2bf16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a0, a0, 1
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: srli a2, a1, 1
+; ZVBB-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vsseg3e16.v v8, (a0)
+; ZVBB-NEXT: add a3, a0, a2
+; ZVBB-NEXT: vle16.v v9, (a3)
+; ZVBB-NEXT: vle16.v v8, (a0)
+; ZVBB-NEXT: srli a1, a1, 2
+; ZVBB-NEXT: add a0, a1, a1
+; ZVBB-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v8, v9, a1
+; ZVBB-NEXT: add a2, a3, a2
+; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vle16.v v9, (a2)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a0, a0, 1
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 6 x bfloat> @llvm.vector.interleave3.nxv6bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2)
+ ret <vscale x 6 x bfloat> %res
+}
+
+define <vscale x 12 x bfloat> @vector_interleave_nxv12bf16_nxv4bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2) {
+; CHECK-LABEL: vector_interleave_nxv12bf16_nxv4bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; CHECK-NEXT: vsseg3e16.v v8, (a0)
+; CHECK-NEXT: vl1re16.v v8, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl1re16.v v9, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl1re16.v v10, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv12bf16_nxv4bf16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 1
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vsseg3e16.v v8, (a0)
+; ZVBB-NEXT: vl1re16.v v8, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl1re16.v v9, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl1re16.v v10, (a0)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 1
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 12 x bfloat> @llvm.vector.interleave3.nxv12bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2)
+ ret <vscale x 12 x bfloat> %res
+}
+
+define <vscale x 24 x bfloat> @vector_interleave_nxv24bf16_nxv8bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2) {
+; CHECK-LABEL: vector_interleave_nxv24bf16_nxv8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vsseg3e16.v v8, (a0)
+; CHECK-NEXT: vl2re16.v v8, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl2re16.v v10, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl2re16.v v12, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv24bf16_nxv8bf16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: slli a1, a1, 1
+; ZVBB-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVBB-NEXT: vsseg3e16.v v8, (a0)
+; ZVBB-NEXT: vl2re16.v v8, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl2re16.v v10, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl2re16.v v12, (a0)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 24 x bfloat> @llvm.vector.interleave3.nxv24bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2)
+ ret <vscale x 24 x bfloat> %res
+}
+
+define <vscale x 3 x float> @vector_interleave_nxv3f32_nxv1f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2) {
+; CHECK-LABEL: vector_interleave_nxv3f32_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 1
+; CHECK-NEXT: vsetvli a3, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsseg3e32.v v8, (a0)
+; CHECK-NEXT: add a3, a0, a2
+; CHECK-NEXT: vle32.v v9, (a3)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: srli a1, a1, 3
+; CHECK-NEXT: add a0, a1, a1
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v9, a1
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v9, (a2)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv3f32_nxv1f32:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a0, a0, 1
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: srli a2, a1, 1
+; ZVBB-NEXT: vsetvli a3, zero, e32, mf2, ta, ma
+; ZVBB-NEXT: vsseg3e32.v v8, (a0)
+; ZVBB-NEXT: add a3, a0, a2
+; ZVBB-NEXT: vle32.v v9, (a3)
+; ZVBB-NEXT: vle32.v v8, (a0)
+; ZVBB-NEXT: srli a1, a1, 3
+; ZVBB-NEXT: add a0, a1, a1
+; ZVBB-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v8, v9, a1
+; ZVBB-NEXT: add a2, a3, a2
+; ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; ZVBB-NEXT: vle32.v v9, (a2)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a0, a0, 1
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 3 x float> @llvm.vector.interleave3.nxv3f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2)
+ ret <vscale x 3 x float> %res
+}
+
+define <vscale x 6 x float> @vector_interleave_nxv6f32_nxv2f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2) {
+; CHECK-LABEL: vector_interleave_nxv6f32_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsseg3e32.v v8, (a0)
+; CHECK-NEXT: vl1re32.v v8, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl1re32.v v9, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl1re32.v v10, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv6f32_nxv2f32:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 1
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; ZVBB-NEXT: vsseg3e32.v v8, (a0)
+; ZVBB-NEXT: vl1re32.v v8, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl1re32.v v9, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl1re32.v v10, (a0)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 1
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 6 x float> @llvm.vector.interleave3.nxv6f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2)
+ ret <vscale x 6 x float> %res
+}
+
+define <vscale x 12 x float> @vector_interleave_nxv12f32_nxv4f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2) {
+; CHECK-LABEL: vector_interleave_nxv12f32_nxv4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsseg3e32.v v8, (a0)
+; CHECK-NEXT: vl2re32.v v8, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl2re32.v v10, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl2re32.v v12, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 6
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv12f32_nxv4f32:
+; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: li a1, 6
; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: slli a1, a1, 1
-; ZVBB-NEXT: vsetvli a2, zero, e16, m2, ta, ma
-; ZVBB-NEXT: vsseg3e16.v v8, (a0)
-; ZVBB-NEXT: vl2re16.v v8, (a0)
+; ZVBB-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; ZVBB-NEXT: vsseg3e32.v v8, (a0)
+; ZVBB-NEXT: vl2re32.v v8, (a0)
; ZVBB-NEXT: add a0, a0, a1
-; ZVBB-NEXT: vl2re16.v v10, (a0)
+; ZVBB-NEXT: vl2re32.v v10, (a0)
; ZVBB-NEXT: add a0, a0, a1
-; ZVBB-NEXT: vl2re16.v v12, (a0)
+; ZVBB-NEXT: vl2re32.v v12, (a0)
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: li a1, 6
; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
- %res = call <vscale x 24 x i16> @llvm.vector.interleave3.nxv24i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c)
- ret <vscale x 24 x i16> %res
+ %res = call <vscale x 12 x float> @llvm.vector.interleave3.nxv12f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2)
+ ret <vscale x 12 x float> %res
}
+define <vscale x 3 x double> @vector_interleave_nxv3f64_nxv1f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2) {
+; CHECK-LABEL: vector_interleave_nxv3f64_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
+; CHECK-NEXT: vsseg3e64.v v8, (a0)
+; CHECK-NEXT: vl1re64.v v8, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl1re64.v v9, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl1re64.v v10, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv3f64_nxv1f64:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 1
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: vsetvli a2, zero, e64, m1, ta, ma
+; ZVBB-NEXT: vsseg3e64.v v8, (a0)
+; ZVBB-NEXT: vl1re64.v v8, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl1re64.v v9, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl1re64.v v10, (a0)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 1
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 3 x double> @llvm.vector.interleave3.nxv3f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2)
+ ret <vscale x 3 x double> %res
+}
-define <vscale x 12 x i32> @vector_interleave_nxv12i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) nounwind {
-; CHECK-LABEL: vector_interleave_nxv12i32_nxv4i32:
+define <vscale x 6 x double> @vector_interleave_nxv6f64_nxv2f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2) {
+; CHECK-LABEL: vector_interleave_nxv6f64_nxv2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 6
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; CHECK-NEXT: vsseg3e32.v v8, (a0)
-; CHECK-NEXT: vl2re32.v v8, (a0)
+; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma
+; CHECK-NEXT: vsseg3e64.v v8, (a0)
+; CHECK-NEXT: vl2re64.v v8, (a0)
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl2re32.v v10, (a0)
+; CHECK-NEXT: vl2re64.v v10, (a0)
; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl2re32.v v12, (a0)
+; CHECK-NEXT: vl2re64.v v12, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 6
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv6f64_nxv2f64:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: slli a1, a1, 1
+; ZVBB-NEXT: vsetvli a2, zero, e64, m2, ta, ma
+; ZVBB-NEXT: vsseg3e64.v v8, (a0)
+; ZVBB-NEXT: vl2re64.v v8, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl2re64.v v10, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl2re64.v v12, (a0)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: li a1, 6
+; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 6 x double> @llvm.vector.interleave3.nxv6f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2)
+ ret <vscale x 6 x double> %res
+}
+
+define <vscale x 10 x half> @vector_interleave_nxv10f16_nxv2f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2, <vscale x 2 x half> %v3, <vscale x 2 x half> %v4) {
+; CHECK-LABEL: vector_interleave_nxv10f16_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 1
+; CHECK-NEXT: add a3, a0, a2
+; CHECK-NEXT: add a4, a3, a2
+; CHECK-NEXT: vsetvli a5, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vsseg5e16.v v8, (a0)
+; CHECK-NEXT: add a5, a4, a2
+; CHECK-NEXT: vle16.v v8, (a5)
+; CHECK-NEXT: vle16.v v9, (a4)
+; CHECK-NEXT: srli a1, a1, 2
+; CHECK-NEXT: add a4, a1, a1
+; CHECK-NEXT: vle16.v v10, (a3)
+; CHECK-NEXT: vsetvli zero, a4, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v8, a1
+; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a4, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v10, a1
+; CHECK-NEXT: add a2, a5, a2
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v10, (a2)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv10f16_nxv2f16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 1
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: srli a2, a1, 1
+; ZVBB-NEXT: add a3, a0, a2
+; ZVBB-NEXT: add a4, a3, a2
+; ZVBB-NEXT: vsetvli a5, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vsseg5e16.v v8, (a0)
+; ZVBB-NEXT: add a5, a4, a2
+; ZVBB-NEXT: vle16.v v8, (a5)
+; ZVBB-NEXT: vle16.v v9, (a4)
+; ZVBB-NEXT: srli a1, a1, 2
+; ZVBB-NEXT: add a4, a1, a1
+; ZVBB-NEXT: vle16.v v10, (a3)
+; ZVBB-NEXT: vsetvli zero, a4, e16, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v9, v8, a1
+; ZVBB-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vle16.v v8, (a0)
+; ZVBB-NEXT: vsetvli zero, a4, e16, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v8, v10, a1
+; ZVBB-NEXT: add a2, a5, a2
+; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vle16.v v10, (a2)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 1
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 10 x half> @llvm.vector.interleave5.nxv10f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2, <vscale x 2 x half> %v3, <vscale x 2 x half> %v4)
+ ret <vscale x 10 x half> %res
+}
+
+define <vscale x 20 x half> @vector_interleave_nxv20f16_nxv4f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2, <vscale x 4 x half> %v3, <vscale x 4 x half> %v4) {
+; CHECK-LABEL: vector_interleave_nxv20f16_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 2
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: add a2, a0, a1
+; CHECK-NEXT: add a3, a2, a1
+; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma
+; CHECK-NEXT: vsseg5e16.v v8, (a0)
+; CHECK-NEXT: vl1re16.v v10, (a3)
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: vl1re16.v v11, (a3)
+; CHECK-NEXT: vl1re16.v v8, (a0)
+; CHECK-NEXT: vl1re16.v v9, (a2)
+; CHECK-NEXT: add a1, a3, a1
+; CHECK-NEXT: vl1re16.v v12, (a1)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 2
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
-; ZVBB-LABEL: vector_interleave_nxv12i32_nxv4i32:
-; ZVBB: # %bb.0:
-; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: li a1, 6
-; ZVBB-NEXT: mul a0, a0, a1
-; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: addi a0, sp, 16
-; ZVBB-NEXT: csrr a1, vlenb
-; ZVBB-NEXT: slli a1, a1, 1
-; ZVBB-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; ZVBB-NEXT: vsseg3e32.v v8, (a0)
-; ZVBB-NEXT: vl2re32.v v8, (a0)
-; ZVBB-NEXT: add a0, a0, a1
-; ZVBB-NEXT: vl2re32.v v10, (a0)
-; ZVBB-NEXT: add a0, a0, a1
-; ZVBB-NEXT: vl2re32.v v12, (a0)
-; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: li a1, 6
-; ZVBB-NEXT: mul a0, a0, a1
-; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: ret
- %res = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
- ret <vscale x 12 x i32> %res
+; ZVBB-LABEL: vector_interleave_nxv20f16_nxv4f16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 2
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: add a2, a0, a1
+; ZVBB-NEXT: add a3, a2, a1
+; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vsseg5e16.v v8, (a0)
+; ZVBB-NEXT: vl1re16.v v10, (a3)
+; ZVBB-NEXT: add a3, a3, a1
+; ZVBB-NEXT: vl1re16.v v11, (a3)
+; ZVBB-NEXT: vl1re16.v v8, (a0)
+; ZVBB-NEXT: vl1re16.v v9, (a2)
+; ZVBB-NEXT: add a1, a3, a1
+; ZVBB-NEXT: vl1re16.v v12, (a1)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 2
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 20 x half> @llvm.vector.interleave5.nxv20f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2, <vscale x 4 x half> %v3, <vscale x 4 x half> %v4)
+ ret <vscale x 20 x half> %res
+}
+
+define <vscale x 40 x half> @vector_interleave_nxv40f16_nxv8f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x half> %v4) {
+; RV32-LABEL: vector_interleave_nxv40f16_nxv8f16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: .cfi_def_cfa_offset 80
+; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: .cfi_def_cfa s0, 0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 28
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vmv2r.v v20, v16
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: vmv2r.v v18, v12
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 2
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 64
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: vmv2r.v v16, v8
+; RV32-NEXT: vmv2r.v v22, v16
+; RV32-NEXT: vmv2r.v v24, v18
+; RV32-NEXT: vmv1r.v v26, v20
+; RV32-NEXT: add a3, a0, a2
+; RV32-NEXT: vmv1r.v v23, v10
+; RV32-NEXT: add a4, a1, a2
+; RV32-NEXT: add a5, a4, a2
+; RV32-NEXT: vmv1r.v v25, v14
+; RV32-NEXT: add a6, a5, a2
+; RV32-NEXT: vmv1r.v v18, v11
+; RV32-NEXT: vsseg5e16.v v22, (a0)
+; RV32-NEXT: vmv1r.v v20, v15
+; RV32-NEXT: vsseg5e16.v v17, (a1)
+; RV32-NEXT: vl1re16.v v16, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re16.v v17, (a6)
+; RV32-NEXT: add a6, a3, a2
+; RV32-NEXT: vl1re16.v v10, (a6)
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re16.v v11, (a6)
+; RV32-NEXT: vl1re16.v v8, (a0)
+; RV32-NEXT: vl1re16.v v9, (a3)
+; RV32-NEXT: vl1re16.v v14, (a4)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a3, 10
+; RV32-NEXT: mul a0, a0, a3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 64
+; RV32-NEXT: add a6, a6, a2
+; RV32-NEXT: vl1re16.v v15, (a5)
+; RV32-NEXT: vl1re16.v v12, (a6)
+; RV32-NEXT: vl1re16.v v13, (a1)
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, a0, a2
+; RV32-NEXT: vs2r.v v16, (a2)
+; RV32-NEXT: vs8r.v v8, (a0)
+; RV32-NEXT: vl8re16.v v16, (a2)
+; RV32-NEXT: vl8re16.v v8, (a0)
+; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: .cfi_def_cfa sp, 80
+; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: .cfi_restore s0
+; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vector_interleave_nxv40f16_nxv8f16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: .cfi_def_cfa_offset 80
+; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: .cfi_def_cfa s0, 0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a1, 28
+; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: sub sp, sp, a0
+; RV64-NEXT: andi sp, sp, -64
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vmv2r.v v20, v16
+; RV64-NEXT: addi a0, sp, 64
+; RV64-NEXT: vmv2r.v v18, v12
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 2
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 64
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: vmv2r.v v16, v8
+; RV64-NEXT: vmv2r.v v22, v16
+; RV64-NEXT: vmv2r.v v24, v18
+; RV64-NEXT: vmv1r.v v26, v20
+; RV64-NEXT: add a3, a0, a2
+; RV64-NEXT: vmv1r.v v23, v10
+; RV64-NEXT: add a4, a1, a2
+; RV64-NEXT: add a5, a4, a2
+; RV64-NEXT: vmv1r.v v25, v14
+; RV64-NEXT: add a6, a5, a2
+; RV64-NEXT: vmv1r.v v18, v11
+; RV64-NEXT: vsseg5e16.v v22, (a0)
+; RV64-NEXT: vmv1r.v v20, v15
+; RV64-NEXT: vsseg5e16.v v17, (a1)
+; RV64-NEXT: vl1re16.v v16, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re16.v v17, (a6)
+; RV64-NEXT: add a6, a3, a2
+; RV64-NEXT: vl1re16.v v10, (a6)
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re16.v v11, (a6)
+; RV64-NEXT: vl1re16.v v8, (a0)
+; RV64-NEXT: vl1re16.v v9, (a3)
+; RV64-NEXT: vl1re16.v v14, (a4)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: li a3, 10
+; RV64-NEXT: mul a0, a0, a3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 64
+; RV64-NEXT: add a6, a6, a2
+; RV64-NEXT: vl1re16.v v15, (a5)
+; RV64-NEXT: vl1re16.v v12, (a6)
+; RV64-NEXT: vl1re16.v v13, (a1)
+; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: add a2, a0, a2
+; RV64-NEXT: vs2r.v v16, (a2)
+; RV64-NEXT: vs8r.v v8, (a0)
+; RV64-NEXT: vl8re16.v v16, (a2)
+; RV64-NEXT: vl8re16.v v8, (a0)
+; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: .cfi_def_cfa sp, 80
+; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: .cfi_restore s0
+; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+;
+; ZVBB-RV32-LABEL: vector_interleave_nxv40f16_nxv8f16:
+; ZVBB-RV32: # %bb.0:
+; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
+; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: .cfi_offset ra, -4
+; ZVBB-RV32-NEXT: .cfi_offset s0, -8
+; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: li a1, 28
+; ZVBB-RV32-NEXT: mul a0, a0, a1
+; ZVBB-RV32-NEXT: sub sp, sp, a0
+; ZVBB-RV32-NEXT: andi sp, sp, -64
+; ZVBB-RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-RV32-NEXT: vmv2r.v v20, v16
+; ZVBB-RV32-NEXT: addi a0, sp, 64
+; ZVBB-RV32-NEXT: vmv2r.v v18, v12
+; ZVBB-RV32-NEXT: csrr a1, vlenb
+; ZVBB-RV32-NEXT: slli a2, a1, 2
+; ZVBB-RV32-NEXT: add a1, a2, a1
+; ZVBB-RV32-NEXT: add a1, sp, a1
+; ZVBB-RV32-NEXT: addi a1, a1, 64
+; ZVBB-RV32-NEXT: csrr a2, vlenb
+; ZVBB-RV32-NEXT: vmv2r.v v16, v8
+; ZVBB-RV32-NEXT: vmv2r.v v22, v16
+; ZVBB-RV32-NEXT: vmv2r.v v24, v18
+; ZVBB-RV32-NEXT: vmv1r.v v26, v20
+; ZVBB-RV32-NEXT: add a3, a0, a2
+; ZVBB-RV32-NEXT: vmv1r.v v23, v10
+; ZVBB-RV32-NEXT: add a4, a1, a2
+; ZVBB-RV32-NEXT: add a5, a4, a2
+; ZVBB-RV32-NEXT: vmv1r.v v25, v14
+; ZVBB-RV32-NEXT: add a6, a5, a2
+; ZVBB-RV32-NEXT: vmv1r.v v18, v11
+; ZVBB-RV32-NEXT: vsseg5e16.v v22, (a0)
+; ZVBB-RV32-NEXT: vmv1r.v v20, v15
+; ZVBB-RV32-NEXT: vsseg5e16.v v17, (a1)
+; ZVBB-RV32-NEXT: vl1re16.v v16, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re16.v v17, (a6)
+; ZVBB-RV32-NEXT: add a6, a3, a2
+; ZVBB-RV32-NEXT: vl1re16.v v10, (a6)
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re16.v v11, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v8, (a0)
+; ZVBB-RV32-NEXT: vl1re16.v v9, (a3)
+; ZVBB-RV32-NEXT: vl1re16.v v14, (a4)
+; ZVBB-RV32-NEXT: csrr a0, vlenb
+; ZVBB-RV32-NEXT: li a3, 10
+; ZVBB-RV32-NEXT: mul a0, a0, a3
+; ZVBB-RV32-NEXT: add a0, sp, a0
+; ZVBB-RV32-NEXT: addi a0, a0, 64
+; ZVBB-RV32-NEXT: add a6, a6, a2
+; ZVBB-RV32-NEXT: vl1re16.v v15, (a5)
+; ZVBB-RV32-NEXT: vl1re16.v v12, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v13, (a1)
+; ZVBB-RV32-NEXT: slli a2, a2, 3
+; ZVBB-RV32-NEXT: add a2, a0, a2
+; ZVBB-RV32-NEXT: vs2r.v v16, (a2)
+; ZVBB-RV32-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV32-NEXT: vl8re16.v v16, (a2)
+; ZVBB-RV32-NEXT: vl8re16.v v8, (a0)
+; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
+; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: .cfi_restore ra
+; ZVBB-RV32-NEXT: .cfi_restore s0
+; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-RV32-NEXT: ret
+;
+; ZVBB-RV64-LABEL: vector_interleave_nxv40f16_nxv8f16:
+; ZVBB-RV64: # %bb.0:
+; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
+; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: .cfi_offset ra, -8
+; ZVBB-RV64-NEXT: .cfi_offset s0, -16
+; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: li a1, 28
+; ZVBB-RV64-NEXT: mul a0, a0, a1
+; ZVBB-RV64-NEXT: sub sp, sp, a0
+; ZVBB-RV64-NEXT: andi sp, sp, -64
+; ZVBB-RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-RV64-NEXT: vmv2r.v v20, v16
+; ZVBB-RV64-NEXT: addi a0, sp, 64
+; ZVBB-RV64-NEXT: vmv2r.v v18, v12
+; ZVBB-RV64-NEXT: csrr a1, vlenb
+; ZVBB-RV64-NEXT: slli a2, a1, 2
+; ZVBB-RV64-NEXT: add a1, a2, a1
+; ZVBB-RV64-NEXT: add a1, sp, a1
+; ZVBB-RV64-NEXT: addi a1, a1, 64
+; ZVBB-RV64-NEXT: csrr a2, vlenb
+; ZVBB-RV64-NEXT: vmv2r.v v16, v8
+; ZVBB-RV64-NEXT: vmv2r.v v22, v16
+; ZVBB-RV64-NEXT: vmv2r.v v24, v18
+; ZVBB-RV64-NEXT: vmv1r.v v26, v20
+; ZVBB-RV64-NEXT: add a3, a0, a2
+; ZVBB-RV64-NEXT: vmv1r.v v23, v10
+; ZVBB-RV64-NEXT: add a4, a1, a2
+; ZVBB-RV64-NEXT: add a5, a4, a2
+; ZVBB-RV64-NEXT: vmv1r.v v25, v14
+; ZVBB-RV64-NEXT: add a6, a5, a2
+; ZVBB-RV64-NEXT: vmv1r.v v18, v11
+; ZVBB-RV64-NEXT: vsseg5e16.v v22, (a0)
+; ZVBB-RV64-NEXT: vmv1r.v v20, v15
+; ZVBB-RV64-NEXT: vsseg5e16.v v17, (a1)
+; ZVBB-RV64-NEXT: vl1re16.v v16, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re16.v v17, (a6)
+; ZVBB-RV64-NEXT: add a6, a3, a2
+; ZVBB-RV64-NEXT: vl1re16.v v10, (a6)
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re16.v v11, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v8, (a0)
+; ZVBB-RV64-NEXT: vl1re16.v v9, (a3)
+; ZVBB-RV64-NEXT: vl1re16.v v14, (a4)
+; ZVBB-RV64-NEXT: csrr a0, vlenb
+; ZVBB-RV64-NEXT: li a3, 10
+; ZVBB-RV64-NEXT: mul a0, a0, a3
+; ZVBB-RV64-NEXT: add a0, sp, a0
+; ZVBB-RV64-NEXT: addi a0, a0, 64
+; ZVBB-RV64-NEXT: add a6, a6, a2
+; ZVBB-RV64-NEXT: vl1re16.v v15, (a5)
+; ZVBB-RV64-NEXT: vl1re16.v v12, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v13, (a1)
+; ZVBB-RV64-NEXT: slli a2, a2, 3
+; ZVBB-RV64-NEXT: add a2, a0, a2
+; ZVBB-RV64-NEXT: vs2r.v v16, (a2)
+; ZVBB-RV64-NEXT: vs8r.v v8, (a0)
+; ZVBB-RV64-NEXT: vl8re16.v v16, (a2)
+; ZVBB-RV64-NEXT: vl8re16.v v8, (a0)
+; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
+; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: .cfi_restore ra
+; ZVBB-RV64-NEXT: .cfi_restore s0
+; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv40f16_nxv8f16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: .cfi_def_cfa_offset 80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: .cfi_offset ra, -8
+; ZIP-NEXT: .cfi_offset s0, -16
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: .cfi_def_cfa s0, 0
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a1, 28
+; ZIP-NEXT: mul a0, a0, a1
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v20, v16
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v18, v12
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 2
+; ZIP-NEXT: add a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v16, v8
+; ZIP-NEXT: vmv2r.v v22, v16
+; ZIP-NEXT: vmv2r.v v24, v18
+; ZIP-NEXT: vmv1r.v v26, v20
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v23, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: add a5, a4, a2
+; ZIP-NEXT: vmv1r.v v25, v14
+; ZIP-NEXT: add a6, a5, a2
+; ZIP-NEXT: vmv1r.v v18, v11
+; ZIP-NEXT: vsseg5e16.v v22, (a0)
+; ZIP-NEXT: vmv1r.v v20, v15
+; ZIP-NEXT: vsseg5e16.v v17, (a1)
+; ZIP-NEXT: vl1re16.v v16, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v17, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1re16.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v11, (a6)
+; ZIP-NEXT: vl1re16.v v8, (a0)
+; ZIP-NEXT: vl1re16.v v9, (a3)
+; ZIP-NEXT: vl1re16.v v14, (a4)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 10
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v15, (a5)
+; ZIP-NEXT: vl1re16.v v12, (a6)
+; ZIP-NEXT: vl1re16.v v13, (a1)
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vs2r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8re16.v v16, (a2)
+; ZIP-NEXT: vl8re16.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: .cfi_def_cfa sp, 80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: .cfi_restore ra
+; ZIP-NEXT: .cfi_restore s0
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: .cfi_def_cfa_offset 0
+; ZIP-NEXT: ret
+ %res = call <vscale x 40 x half> @llvm.vector.interleave5.nxv40f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x half> %v4)
+ ret <vscale x 40 x half> %res
}
-
-define <vscale x 6 x i64> @vector_interleave_nxv6i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) nounwind {
-; CHECK-LABEL: vector_interleave_nxv6i64_nxv2i64:
+define <vscale x 10 x bfloat> @vector_interleave_nxv10bf16_nxv2bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2, <vscale x 2 x bfloat> %v3, <vscale x 2 x bfloat> %v4) {
+; CHECK-LABEL: vector_interleave_nxv10bf16_nxv2bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 6
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsseg3e64.v v8, (a0)
-; CHECK-NEXT: vl2re64.v v8, (a0)
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl2re64.v v10, (a0)
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vl2re64.v v12, (a0)
+; CHECK-NEXT: srli a2, a1, 1
+; CHECK-NEXT: add a3, a0, a2
+; CHECK-NEXT: add a4, a3, a2
+; CHECK-NEXT: vsetvli a5, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vsseg5e16.v v8, (a0)
+; CHECK-NEXT: add a5, a4, a2
+; CHECK-NEXT: vle16.v v8, (a5)
+; CHECK-NEXT: vle16.v v9, (a4)
+; CHECK-NEXT: srli a1, a1, 2
+; CHECK-NEXT: add a4, a1, a1
+; CHECK-NEXT: vle16.v v10, (a3)
+; CHECK-NEXT: vsetvli zero, a4, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v8, a1
+; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a4, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v10, a1
+; CHECK-NEXT: add a2, a5, a2
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v10, (a2)
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 6
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
-; ZVBB-LABEL: vector_interleave_nxv6i64_nxv2i64:
+; ZVBB-LABEL: vector_interleave_nxv10bf16_nxv2bf16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: li a1, 6
-; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: slli a1, a0, 1
+; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
-; ZVBB-NEXT: slli a1, a1, 1
-; ZVBB-NEXT: vsetvli a2, zero, e64, m2, ta, ma
-; ZVBB-NEXT: vsseg3e64.v v8, (a0)
-; ZVBB-NEXT: vl2re64.v v8, (a0)
-; ZVBB-NEXT: add a0, a0, a1
-; ZVBB-NEXT: vl2re64.v v10, (a0)
-; ZVBB-NEXT: add a0, a0, a1
-; ZVBB-NEXT: vl2re64.v v12, (a0)
+; ZVBB-NEXT: srli a2, a1, 1
+; ZVBB-NEXT: add a3, a0, a2
+; ZVBB-NEXT: add a4, a3, a2
+; ZVBB-NEXT: vsetvli a5, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vsseg5e16.v v8, (a0)
+; ZVBB-NEXT: add a5, a4, a2
+; ZVBB-NEXT: vle16.v v8, (a5)
+; ZVBB-NEXT: vle16.v v9, (a4)
+; ZVBB-NEXT: srli a1, a1, 2
+; ZVBB-NEXT: add a4, a1, a1
+; ZVBB-NEXT: vle16.v v10, (a3)
+; ZVBB-NEXT: vsetvli zero, a4, e16, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v9, v8, a1
+; ZVBB-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vle16.v v8, (a0)
+; ZVBB-NEXT: vsetvli zero, a4, e16, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v8, v10, a1
+; ZVBB-NEXT: add a2, a5, a2
+; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vle16.v v10, (a2)
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: li a1, 6
-; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: slli a1, a0, 1
+; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
- %res = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
- ret <vscale x 6 x i64> %res
+ %res = call <vscale x 10 x bfloat> @llvm.vector.interleave5.nxv10bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2, <vscale x 2 x bfloat> %v3, <vscale x 2 x bfloat> %v4)
+ ret <vscale x 10 x bfloat> %res
}
-define <vscale x 80 x i1> @vector_interleave_nxv80i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e) nounwind {
-; CHECK-LABEL: vector_interleave_nxv80i1_nxv16i1:
+define <vscale x 20 x bfloat> @vector_interleave_nxv20bf16_nxv4bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2, <vscale x 4 x bfloat> %v3, <vscale x 4 x bfloat> %v4) {
+; CHECK-LABEL: vector_interleave_nxv20bf16_nxv4bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 10
-; CHECK-NEXT: mul a0, a0, a1
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv.v.i v12, 0
-; CHECK-NEXT: addi a4, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 2
; CHECK-NEXT: add a0, a1, a0
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: vmerge.vim v14, v12, 1, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vim v18, v12, 1, v0
-; CHECK-NEXT: add a2, a4, a1
-; CHECK-NEXT: srli a3, a1, 2
-; CHECK-NEXT: vmv2r.v v20, v14
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmerge.vim v16, v12, 1, v0
-; CHECK-NEXT: vmv1r.v v21, v18
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmerge.vim v8, v12, 1, v0
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v16, v19
-; CHECK-NEXT: add a5, a2, a1
-; CHECK-NEXT: vmv1r.v v23, v8
-; CHECK-NEXT: vmv1r.v v18, v9
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: vmerge.vim v24, v12, 1, v0
-; CHECK-NEXT: vsetvli a6, zero, e8, m1, ta, ma
-; CHECK-NEXT: vsseg5e8.v v20, (a4)
-; CHECK-NEXT: vmv1r.v v19, v25
-; CHECK-NEXT: vsseg5e8.v v15, (a0)
-; CHECK-NEXT: vl1r.v v8, (a5)
-; CHECK-NEXT: add a5, a5, a1
-; CHECK-NEXT: vl1r.v v10, (a4)
-; CHECK-NEXT: add a4, a5, a1
-; CHECK-NEXT: vl1r.v v12, (a4)
-; CHECK-NEXT: add a4, a0, a1
-; CHECK-NEXT: vl1r.v v14, (a4)
-; CHECK-NEXT: add a4, a4, a1
-; CHECK-NEXT: vl1r.v v9, (a5)
-; CHECK-NEXT: add a5, a4, a1
-; CHECK-NEXT: vl1r.v v16, (a5)
-; CHECK-NEXT: add a5, a5, a1
-; CHECK-NEXT: srli a1, a1, 1
-; CHECK-NEXT: vl1r.v v11, (a2)
-; CHECK-NEXT: add a2, a3, a3
-; CHECK-NEXT: vl1r.v v15, (a4)
-; CHECK-NEXT: add a4, a1, a1
-; CHECK-NEXT: vl1r.v v13, (a0)
-; CHECK-NEXT: vl1r.v v17, (a5)
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmsne.vi v18, v8, 0
-; CHECK-NEXT: vmsne.vi v0, v10, 0
-; CHECK-NEXT: vmsne.vi v8, v14, 0
-; CHECK-NEXT: vmsne.vi v9, v12, 0
-; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
-; CHECK-NEXT: vslideup.vx v0, v18, a3
-; CHECK-NEXT: vslideup.vx v9, v8, a3
-; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, ma
-; CHECK-NEXT: vslideup.vx v0, v9, a1
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmsne.vi v8, v16, 0
+; CHECK-NEXT: add a2, a0, a1
+; CHECK-NEXT: add a3, a2, a1
+; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma
+; CHECK-NEXT: vsseg5e16.v v8, (a0)
+; CHECK-NEXT: vl1re16.v v10, (a3)
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: vl1re16.v v11, (a3)
+; CHECK-NEXT: vl1re16.v v8, (a0)
+; CHECK-NEXT: vl1re16.v v9, (a2)
+; CHECK-NEXT: add a1, a3, a1
+; CHECK-NEXT: vl1re16.v v12, (a1)
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 10
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: slli a1, a0, 2
+; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
-; ZVBB-LABEL: vector_interleave_nxv80i1_nxv16i1:
+; ZVBB-LABEL: vector_interleave_nxv20bf16_nxv4bf16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: li a1, 10
-; ZVBB-NEXT: mul a0, a0, a1
-; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; ZVBB-NEXT: vmv.v.i v12, 0
-; ZVBB-NEXT: addi a4, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 2
; ZVBB-NEXT: add a0, a1, a0
-; ZVBB-NEXT: add a0, sp, a0
-; ZVBB-NEXT: addi a0, a0, 16
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
-; ZVBB-NEXT: vmerge.vim v14, v12, 1, v0
-; ZVBB-NEXT: vmv1r.v v0, v8
-; ZVBB-NEXT: vmerge.vim v18, v12, 1, v0
-; ZVBB-NEXT: add a2, a4, a1
-; ZVBB-NEXT: srli a3, a1, 2
-; ZVBB-NEXT: vmv2r.v v20, v14
-; ZVBB-NEXT: vmv1r.v v0, v9
-; ZVBB-NEXT: vmerge.vim v16, v12, 1, v0
-; ZVBB-NEXT: vmv1r.v v21, v18
-; ZVBB-NEXT: vmv1r.v v0, v10
-; ZVBB-NEXT: vmerge.vim v8, v12, 1, v0
-; ZVBB-NEXT: vmv1r.v v22, v16
-; ZVBB-NEXT: vmv1r.v v16, v19
-; ZVBB-NEXT: add a5, a2, a1
-; ZVBB-NEXT: vmv1r.v v23, v8
-; ZVBB-NEXT: vmv1r.v v18, v9
-; ZVBB-NEXT: vmv1r.v v0, v11
-; ZVBB-NEXT: vmerge.vim v24, v12, 1, v0
-; ZVBB-NEXT: vsetvli a6, zero, e8, m1, ta, ma
-; ZVBB-NEXT: vsseg5e8.v v20, (a4)
-; ZVBB-NEXT: vmv1r.v v19, v25
-; ZVBB-NEXT: vsseg5e8.v v15, (a0)
-; ZVBB-NEXT: vl1r.v v8, (a5)
-; ZVBB-NEXT: add a5, a5, a1
-; ZVBB-NEXT: vl1r.v v10, (a4)
-; ZVBB-NEXT: add a4, a5, a1
-; ZVBB-NEXT: vl1r.v v12, (a4)
-; ZVBB-NEXT: add a4, a0, a1
-; ZVBB-NEXT: vl1r.v v14, (a4)
-; ZVBB-NEXT: add a4, a4, a1
-; ZVBB-NEXT: vl1r.v v9, (a5)
-; ZVBB-NEXT: add a5, a4, a1
-; ZVBB-NEXT: vl1r.v v16, (a5)
-; ZVBB-NEXT: add a5, a5, a1
-; ZVBB-NEXT: srli a1, a1, 1
-; ZVBB-NEXT: vl1r.v v11, (a2)
-; ZVBB-NEXT: add a2, a3, a3
-; ZVBB-NEXT: vl1r.v v15, (a4)
-; ZVBB-NEXT: add a4, a1, a1
-; ZVBB-NEXT: vl1r.v v13, (a0)
-; ZVBB-NEXT: vl1r.v v17, (a5)
-; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; ZVBB-NEXT: vmsne.vi v18, v8, 0
-; ZVBB-NEXT: vmsne.vi v0, v10, 0
-; ZVBB-NEXT: vmsne.vi v8, v14, 0
-; ZVBB-NEXT: vmsne.vi v9, v12, 0
-; ZVBB-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
-; ZVBB-NEXT: vslideup.vx v0, v18, a3
-; ZVBB-NEXT: vslideup.vx v9, v8, a3
-; ZVBB-NEXT: vsetvli zero, a4, e8, m1, ta, ma
-; ZVBB-NEXT: vslideup.vx v0, v9, a1
-; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; ZVBB-NEXT: vmsne.vi v8, v16, 0
+; ZVBB-NEXT: add a2, a0, a1
+; ZVBB-NEXT: add a3, a2, a1
+; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vsseg5e16.v v8, (a0)
+; ZVBB-NEXT: vl1re16.v v10, (a3)
+; ZVBB-NEXT: add a3, a3, a1
+; ZVBB-NEXT: vl1re16.v v11, (a3)
+; ZVBB-NEXT: vl1re16.v v8, (a0)
+; ZVBB-NEXT: vl1re16.v v9, (a2)
+; ZVBB-NEXT: add a1, a3, a1
+; ZVBB-NEXT: vl1re16.v v12, (a1)
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: li a1, 10
-; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: slli a1, a0, 2
+; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
- %res = call <vscale x 80 x i1> @llvm.vector.interleave5.nxv80i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e)
- ret <vscale x 80 x i1> %res
+ %res = call <vscale x 20 x bfloat> @llvm.vector.interleave5.nxv20bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2, <vscale x 4 x bfloat> %v3, <vscale x 4 x bfloat> %v4)
+ ret <vscale x 20 x bfloat> %res
}
-
-define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e) nounwind {
-;
-; RV32-LABEL: vector_interleave_nxv80i8_nxv16i8:
+define <vscale x 40 x bfloat> @vector_interleave_nxv40bf16_nxv8bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x bfloat> %v4) {
+; RV32-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 28
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: sub sp, sp, a0
; RV32-NEXT: andi sp, sp, -64
-; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; RV32-NEXT: vmv2r.v v20, v16
; RV32-NEXT: addi a0, sp, 64
; RV32-NEXT: vmv2r.v v18, v12
@@ -1483,52 +5750,60 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
; RV32-NEXT: vmv1r.v v25, v14
; RV32-NEXT: add a6, a5, a2
; RV32-NEXT: vmv1r.v v18, v11
-; RV32-NEXT: vsseg5e8.v v22, (a0)
+; RV32-NEXT: vsseg5e16.v v22, (a0)
; RV32-NEXT: vmv1r.v v20, v15
-; RV32-NEXT: vsseg5e8.v v17, (a1)
-; RV32-NEXT: vl1r.v v16, (a6)
+; RV32-NEXT: vsseg5e16.v v17, (a1)
+; RV32-NEXT: vl1re16.v v16, (a6)
; RV32-NEXT: add a6, a6, a2
-; RV32-NEXT: vl1r.v v17, (a6)
+; RV32-NEXT: vl1re16.v v17, (a6)
; RV32-NEXT: add a6, a3, a2
-; RV32-NEXT: vl1r.v v10, (a6)
+; RV32-NEXT: vl1re16.v v10, (a6)
; RV32-NEXT: add a6, a6, a2
-; RV32-NEXT: vl1r.v v11, (a6)
-; RV32-NEXT: vl1r.v v8, (a0)
-; RV32-NEXT: vl1r.v v9, (a3)
-; RV32-NEXT: vl1r.v v14, (a4)
+; RV32-NEXT: vl1re16.v v11, (a6)
+; RV32-NEXT: vl1re16.v v8, (a0)
+; RV32-NEXT: vl1re16.v v9, (a3)
+; RV32-NEXT: vl1re16.v v14, (a4)
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a3, 10
; RV32-NEXT: mul a0, a0, a3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 64
; RV32-NEXT: add a6, a6, a2
-; RV32-NEXT: vl1r.v v15, (a5)
-; RV32-NEXT: vl1r.v v12, (a6)
-; RV32-NEXT: vl1r.v v13, (a1)
+; RV32-NEXT: vl1re16.v v15, (a5)
+; RV32-NEXT: vl1re16.v v12, (a6)
+; RV32-NEXT: vl1re16.v v13, (a1)
; RV32-NEXT: slli a2, a2, 3
; RV32-NEXT: add a2, a0, a2
; RV32-NEXT: vs2r.v v16, (a2)
; RV32-NEXT: vs8r.v v8, (a0)
-; RV32-NEXT: vl8r.v v16, (a2)
-; RV32-NEXT: vl8r.v v8, (a0)
+; RV32-NEXT: vl8re16.v v16, (a2)
+; RV32-NEXT: vl8re16.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
-; RV64-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; RV64-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 28
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: sub sp, sp, a0
; RV64-NEXT: andi sp, sp, -64
-; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; RV64-NEXT: vmv2r.v v20, v16
; RV64-NEXT: addi a0, sp, 64
; RV64-NEXT: vmv2r.v v18, v12
@@ -1549,52 +5824,60 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
; RV64-NEXT: vmv1r.v v25, v14
; RV64-NEXT: add a6, a5, a2
; RV64-NEXT: vmv1r.v v18, v11
-; RV64-NEXT: vsseg5e8.v v22, (a0)
+; RV64-NEXT: vsseg5e16.v v22, (a0)
; RV64-NEXT: vmv1r.v v20, v15
-; RV64-NEXT: vsseg5e8.v v17, (a1)
-; RV64-NEXT: vl1r.v v16, (a6)
+; RV64-NEXT: vsseg5e16.v v17, (a1)
+; RV64-NEXT: vl1re16.v v16, (a6)
; RV64-NEXT: add a6, a6, a2
-; RV64-NEXT: vl1r.v v17, (a6)
+; RV64-NEXT: vl1re16.v v17, (a6)
; RV64-NEXT: add a6, a3, a2
-; RV64-NEXT: vl1r.v v10, (a6)
+; RV64-NEXT: vl1re16.v v10, (a6)
; RV64-NEXT: add a6, a6, a2
-; RV64-NEXT: vl1r.v v11, (a6)
-; RV64-NEXT: vl1r.v v8, (a0)
-; RV64-NEXT: vl1r.v v9, (a3)
-; RV64-NEXT: vl1r.v v14, (a4)
+; RV64-NEXT: vl1re16.v v11, (a6)
+; RV64-NEXT: vl1re16.v v8, (a0)
+; RV64-NEXT: vl1re16.v v9, (a3)
+; RV64-NEXT: vl1re16.v v14, (a4)
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a3, 10
; RV64-NEXT: mul a0, a0, a3
; RV64-NEXT: add a0, sp, a0
; RV64-NEXT: addi a0, a0, 64
; RV64-NEXT: add a6, a6, a2
-; RV64-NEXT: vl1r.v v15, (a5)
-; RV64-NEXT: vl1r.v v12, (a6)
-; RV64-NEXT: vl1r.v v13, (a1)
+; RV64-NEXT: vl1re16.v v15, (a5)
+; RV64-NEXT: vl1re16.v v12, (a6)
+; RV64-NEXT: vl1re16.v v13, (a1)
; RV64-NEXT: slli a2, a2, 3
; RV64-NEXT: add a2, a0, a2
; RV64-NEXT: vs2r.v v16, (a2)
; RV64-NEXT: vs8r.v v8, (a0)
-; RV64-NEXT: vl8r.v v16, (a2)
-; RV64-NEXT: vl8r.v v8, (a0)
+; RV64-NEXT: vl8re16.v v16, (a2)
+; RV64-NEXT: vl8re16.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
-; ZVBB-RV32-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; ZVBB-RV32-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: .cfi_offset ra, -4
+; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: li a1, 28
; ZVBB-RV32-NEXT: mul a0, a0, a1
; ZVBB-RV32-NEXT: sub sp, sp, a0
; ZVBB-RV32-NEXT: andi sp, sp, -64
-; ZVBB-RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVBB-RV32-NEXT: vmv2r.v v20, v16
; ZVBB-RV32-NEXT: addi a0, sp, 64
; ZVBB-RV32-NEXT: vmv2r.v v18, v12
@@ -1615,52 +5898,60 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
; ZVBB-RV32-NEXT: vmv1r.v v25, v14
; ZVBB-RV32-NEXT: add a6, a5, a2
; ZVBB-RV32-NEXT: vmv1r.v v18, v11
-; ZVBB-RV32-NEXT: vsseg5e8.v v22, (a0)
+; ZVBB-RV32-NEXT: vsseg5e16.v v22, (a0)
; ZVBB-RV32-NEXT: vmv1r.v v20, v15
-; ZVBB-RV32-NEXT: vsseg5e8.v v17, (a1)
-; ZVBB-RV32-NEXT: vl1r.v v16, (a6)
+; ZVBB-RV32-NEXT: vsseg5e16.v v17, (a1)
+; ZVBB-RV32-NEXT: vl1re16.v v16, (a6)
; ZVBB-RV32-NEXT: add a6, a6, a2
-; ZVBB-RV32-NEXT: vl1r.v v17, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v17, (a6)
; ZVBB-RV32-NEXT: add a6, a3, a2
-; ZVBB-RV32-NEXT: vl1r.v v10, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v10, (a6)
; ZVBB-RV32-NEXT: add a6, a6, a2
-; ZVBB-RV32-NEXT: vl1r.v v11, (a6)
-; ZVBB-RV32-NEXT: vl1r.v v8, (a0)
-; ZVBB-RV32-NEXT: vl1r.v v9, (a3)
-; ZVBB-RV32-NEXT: vl1r.v v14, (a4)
+; ZVBB-RV32-NEXT: vl1re16.v v11, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v8, (a0)
+; ZVBB-RV32-NEXT: vl1re16.v v9, (a3)
+; ZVBB-RV32-NEXT: vl1re16.v v14, (a4)
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: li a3, 10
; ZVBB-RV32-NEXT: mul a0, a0, a3
; ZVBB-RV32-NEXT: add a0, sp, a0
; ZVBB-RV32-NEXT: addi a0, a0, 64
; ZVBB-RV32-NEXT: add a6, a6, a2
-; ZVBB-RV32-NEXT: vl1r.v v15, (a5)
-; ZVBB-RV32-NEXT: vl1r.v v12, (a6)
-; ZVBB-RV32-NEXT: vl1r.v v13, (a1)
+; ZVBB-RV32-NEXT: vl1re16.v v15, (a5)
+; ZVBB-RV32-NEXT: vl1re16.v v12, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v13, (a1)
; ZVBB-RV32-NEXT: slli a2, a2, 3
; ZVBB-RV32-NEXT: add a2, a0, a2
; ZVBB-RV32-NEXT: vs2r.v v16, (a2)
; ZVBB-RV32-NEXT: vs8r.v v8, (a0)
-; ZVBB-RV32-NEXT: vl8r.v v16, (a2)
-; ZVBB-RV32-NEXT: vl8r.v v8, (a0)
+; ZVBB-RV32-NEXT: vl8re16.v v16, (a2)
+; ZVBB-RV32-NEXT: vl8re16.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: .cfi_restore ra
+; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
-; ZVBB-RV64-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; ZVBB-RV64-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: .cfi_offset ra, -8
+; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: li a1, 28
; ZVBB-RV64-NEXT: mul a0, a0, a1
; ZVBB-RV64-NEXT: sub sp, sp, a0
; ZVBB-RV64-NEXT: andi sp, sp, -64
-; ZVBB-RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVBB-RV64-NEXT: vmv2r.v v20, v16
; ZVBB-RV64-NEXT: addi a0, sp, 64
; ZVBB-RV64-NEXT: vmv2r.v v18, v12
@@ -1681,52 +5972,60 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
; ZVBB-RV64-NEXT: vmv1r.v v25, v14
; ZVBB-RV64-NEXT: add a6, a5, a2
; ZVBB-RV64-NEXT: vmv1r.v v18, v11
-; ZVBB-RV64-NEXT: vsseg5e8.v v22, (a0)
+; ZVBB-RV64-NEXT: vsseg5e16.v v22, (a0)
; ZVBB-RV64-NEXT: vmv1r.v v20, v15
-; ZVBB-RV64-NEXT: vsseg5e8.v v17, (a1)
-; ZVBB-RV64-NEXT: vl1r.v v16, (a6)
+; ZVBB-RV64-NEXT: vsseg5e16.v v17, (a1)
+; ZVBB-RV64-NEXT: vl1re16.v v16, (a6)
; ZVBB-RV64-NEXT: add a6, a6, a2
-; ZVBB-RV64-NEXT: vl1r.v v17, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v17, (a6)
; ZVBB-RV64-NEXT: add a6, a3, a2
-; ZVBB-RV64-NEXT: vl1r.v v10, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v10, (a6)
; ZVBB-RV64-NEXT: add a6, a6, a2
-; ZVBB-RV64-NEXT: vl1r.v v11, (a6)
-; ZVBB-RV64-NEXT: vl1r.v v8, (a0)
-; ZVBB-RV64-NEXT: vl1r.v v9, (a3)
-; ZVBB-RV64-NEXT: vl1r.v v14, (a4)
+; ZVBB-RV64-NEXT: vl1re16.v v11, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v8, (a0)
+; ZVBB-RV64-NEXT: vl1re16.v v9, (a3)
+; ZVBB-RV64-NEXT: vl1re16.v v14, (a4)
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: li a3, 10
; ZVBB-RV64-NEXT: mul a0, a0, a3
; ZVBB-RV64-NEXT: add a0, sp, a0
; ZVBB-RV64-NEXT: addi a0, a0, 64
; ZVBB-RV64-NEXT: add a6, a6, a2
-; ZVBB-RV64-NEXT: vl1r.v v15, (a5)
-; ZVBB-RV64-NEXT: vl1r.v v12, (a6)
-; ZVBB-RV64-NEXT: vl1r.v v13, (a1)
+; ZVBB-RV64-NEXT: vl1re16.v v15, (a5)
+; ZVBB-RV64-NEXT: vl1re16.v v12, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v13, (a1)
; ZVBB-RV64-NEXT: slli a2, a2, 3
; ZVBB-RV64-NEXT: add a2, a0, a2
; ZVBB-RV64-NEXT: vs2r.v v16, (a2)
; ZVBB-RV64-NEXT: vs8r.v v8, (a0)
-; ZVBB-RV64-NEXT: vl8r.v v16, (a2)
-; ZVBB-RV64-NEXT: vl8r.v v8, (a0)
+; ZVBB-RV64-NEXT: vl8re16.v v16, (a2)
+; ZVBB-RV64-NEXT: vl8re16.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: .cfi_restore ra
+; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
-; ZIP-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; ZIP-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: .cfi_offset ra, -8
+; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: li a1, 28
; ZIP-NEXT: mul a0, a0, a1
; ZIP-NEXT: sub sp, sp, a0
; ZIP-NEXT: andi sp, sp, -64
-; ZIP-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZIP-NEXT: vmv2r.v v20, v16
; ZIP-NEXT: addi a0, sp, 64
; ZIP-NEXT: vmv2r.v v18, v12
@@ -1747,111 +6046,208 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
; ZIP-NEXT: vmv1r.v v25, v14
; ZIP-NEXT: add a6, a5, a2
; ZIP-NEXT: vmv1r.v v18, v11
-; ZIP-NEXT: vsseg5e8.v v22, (a0)
+; ZIP-NEXT: vsseg5e16.v v22, (a0)
; ZIP-NEXT: vmv1r.v v20, v15
-; ZIP-NEXT: vsseg5e8.v v17, (a1)
-; ZIP-NEXT: vl1r.v v16, (a6)
+; ZIP-NEXT: vsseg5e16.v v17, (a1)
+; ZIP-NEXT: vl1re16.v v16, (a6)
; ZIP-NEXT: add a6, a6, a2
-; ZIP-NEXT: vl1r.v v17, (a6)
+; ZIP-NEXT: vl1re16.v v17, (a6)
; ZIP-NEXT: add a6, a3, a2
-; ZIP-NEXT: vl1r.v v10, (a6)
+; ZIP-NEXT: vl1re16.v v10, (a6)
; ZIP-NEXT: add a6, a6, a2
-; ZIP-NEXT: vl1r.v v11, (a6)
-; ZIP-NEXT: vl1r.v v8, (a0)
-; ZIP-NEXT: vl1r.v v9, (a3)
-; ZIP-NEXT: vl1r.v v14, (a4)
+; ZIP-NEXT: vl1re16.v v11, (a6)
+; ZIP-NEXT: vl1re16.v v8, (a0)
+; ZIP-NEXT: vl1re16.v v9, (a3)
+; ZIP-NEXT: vl1re16.v v14, (a4)
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: li a3, 10
; ZIP-NEXT: mul a0, a0, a3
; ZIP-NEXT: add a0, sp, a0
; ZIP-NEXT: addi a0, a0, 64
; ZIP-NEXT: add a6, a6, a2
-; ZIP-NEXT: vl1r.v v15, (a5)
-; ZIP-NEXT: vl1r.v v12, (a6)
-; ZIP-NEXT: vl1r.v v13, (a1)
+; ZIP-NEXT: vl1re16.v v15, (a5)
+; ZIP-NEXT: vl1re16.v v12, (a6)
+; ZIP-NEXT: vl1re16.v v13, (a1)
; ZIP-NEXT: slli a2, a2, 3
; ZIP-NEXT: add a2, a0, a2
; ZIP-NEXT: vs2r.v v16, (a2)
; ZIP-NEXT: vs8r.v v8, (a0)
-; ZIP-NEXT: vl8r.v v16, (a2)
-; ZIP-NEXT: vl8r.v v8, (a0)
+; ZIP-NEXT: vl8re16.v v16, (a2)
+; ZIP-NEXT: vl8re16.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: .cfi_restore ra
+; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
- %res = call <vscale x 80 x i8> @llvm.vector.interleave5.nxv80i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e)
- ret <vscale x 80 x i8> %res
+ %res = call <vscale x 40 x bfloat> @llvm.vector.interleave5.nxv40bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x bfloat> %v4)
+ ret <vscale x 40 x bfloat> %res
}
+define <vscale x 5 x float> @vector_interleave_nxv5f32_nxv1f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2, <vscale x 1 x float> %v3, <vscale x 1 x float> %v4) {
+; CHECK-LABEL: vector_interleave_nxv5f32_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 1
+; CHECK-NEXT: add a3, a0, a2
+; CHECK-NEXT: add a4, a3, a2
+; CHECK-NEXT: vsetvli a5, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsseg5e32.v v8, (a0)
+; CHECK-NEXT: add a5, a4, a2
+; CHECK-NEXT: vle32.v v8, (a5)
+; CHECK-NEXT: vle32.v v9, (a4)
+; CHECK-NEXT: srli a1, a1, 3
+; CHECK-NEXT: add a4, a1, a1
+; CHECK-NEXT: vle32.v v10, (a3)
+; CHECK-NEXT: vsetvli zero, a4, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v8, a1
+; CHECK-NEXT: vsetvli a3, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a4, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v10, a1
+; CHECK-NEXT: add a2, a5, a2
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v10, (a2)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 1
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv5f32_nxv1f32:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 1
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: srli a2, a1, 1
+; ZVBB-NEXT: add a3, a0, a2
+; ZVBB-NEXT: add a4, a3, a2
+; ZVBB-NEXT: vsetvli a5, zero, e32, mf2, ta, ma
+; ZVBB-NEXT: vsseg5e32.v v8, (a0)
+; ZVBB-NEXT: add a5, a4, a2
+; ZVBB-NEXT: vle32.v v8, (a5)
+; ZVBB-NEXT: vle32.v v9, (a4)
+; ZVBB-NEXT: srli a1, a1, 3
+; ZVBB-NEXT: add a4, a1, a1
+; ZVBB-NEXT: vle32.v v10, (a3)
+; ZVBB-NEXT: vsetvli zero, a4, e32, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v9, v8, a1
+; ZVBB-NEXT: vsetvli a3, zero, e32, mf2, ta, ma
+; ZVBB-NEXT: vle32.v v8, (a0)
+; ZVBB-NEXT: vsetvli zero, a4, e32, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v8, v10, a1
+; ZVBB-NEXT: add a2, a5, a2
+; ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; ZVBB-NEXT: vle32.v v10, (a2)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 1
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 5 x float> @llvm.vector.interleave5.nxv5f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2, <vscale x 1 x float> %v3, <vscale x 1 x float> %v4)
+ ret <vscale x 5 x float> %res
+}
-define <vscale x 40 x i8> @vector_interleave_nxv40i8_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i8> %d, <vscale x 8 x i8> %e) nounwind {
-; CHECK-LABEL: vector_interleave_nxv40i8_nxv8i8:
+define <vscale x 10 x float> @vector_interleave_nxv10f32_nxv2f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2, <vscale x 2 x float> %v3, <vscale x 2 x float> %v4) {
+; CHECK-LABEL: vector_interleave_nxv10f32_nxv2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 2
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: add a2, a0, a1
; CHECK-NEXT: add a3, a2, a1
-; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma
-; CHECK-NEXT: vsseg5e8.v v8, (a0)
-; CHECK-NEXT: vl1r.v v10, (a3)
+; CHECK-NEXT: vsetvli a4, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsseg5e32.v v8, (a0)
+; CHECK-NEXT: vl1re32.v v10, (a3)
; CHECK-NEXT: add a3, a3, a1
-; CHECK-NEXT: vl1r.v v11, (a3)
-; CHECK-NEXT: vl1r.v v8, (a0)
-; CHECK-NEXT: vl1r.v v9, (a2)
+; CHECK-NEXT: vl1re32.v v11, (a3)
+; CHECK-NEXT: vl1re32.v v8, (a0)
+; CHECK-NEXT: vl1re32.v v9, (a2)
; CHECK-NEXT: add a1, a3, a1
-; CHECK-NEXT: vl1r.v v12, (a1)
+; CHECK-NEXT: vl1re32.v v12, (a1)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 2
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
-; ZVBB-LABEL: vector_interleave_nxv40i8_nxv8i8:
+; ZVBB-LABEL: vector_interleave_nxv10f32_nxv2f32:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 2
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: add a2, a0, a1
; ZVBB-NEXT: add a3, a2, a1
-; ZVBB-NEXT: vsetvli a4, zero, e8, m1, ta, ma
-; ZVBB-NEXT: vsseg5e8.v v8, (a0)
-; ZVBB-NEXT: vl1r.v v10, (a3)
+; ZVBB-NEXT: vsetvli a4, zero, e32, m1, ta, ma
+; ZVBB-NEXT: vsseg5e32.v v8, (a0)
+; ZVBB-NEXT: vl1re32.v v10, (a3)
; ZVBB-NEXT: add a3, a3, a1
-; ZVBB-NEXT: vl1r.v v11, (a3)
-; ZVBB-NEXT: vl1r.v v8, (a0)
-; ZVBB-NEXT: vl1r.v v9, (a2)
+; ZVBB-NEXT: vl1re32.v v11, (a3)
+; ZVBB-NEXT: vl1re32.v v8, (a0)
+; ZVBB-NEXT: vl1re32.v v9, (a2)
; ZVBB-NEXT: add a1, a3, a1
-; ZVBB-NEXT: vl1r.v v12, (a1)
+; ZVBB-NEXT: vl1re32.v v12, (a1)
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 2
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
- %res = call <vscale x 40 x i8> @llvm.vector.interleave5.nxv40i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i8> %d, <vscale x 8 x i8> %e)
- ret <vscale x 40 x i8> %res
+ %res = call <vscale x 10 x float> @llvm.vector.interleave5.nxv10f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2, <vscale x 2 x float> %v3, <vscale x 2 x float> %v4)
+ ret <vscale x 10 x float> %res
}
-
-define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e) nounwind {
-;
-; RV32-LABEL: vector_interleave_nxv20i32_nxv4i32:
+define <vscale x 20 x float> @vector_interleave_nxv20f32_nxv4f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x float> %v4) {
+; RV32-LABEL: vector_interleave_nxv20f32_nxv4f32:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 28
; RV32-NEXT: mul a0, a0, a1
@@ -1907,17 +6303,25 @@ define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32
; RV32-NEXT: vl8re32.v v16, (a2)
; RV32-NEXT: vl8re32.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
-; RV64-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; RV64-LABEL: vector_interleave_nxv20f32_nxv4f32:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 28
; RV64-NEXT: mul a0, a0, a1
@@ -1973,17 +6377,25 @@ define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32
; RV64-NEXT: vl8re32.v v16, (a2)
; RV64-NEXT: vl8re32.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
-; ZVBB-RV32-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; ZVBB-RV32-LABEL: vector_interleave_nxv20f32_nxv4f32:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: .cfi_offset ra, -4
+; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: li a1, 28
; ZVBB-RV32-NEXT: mul a0, a0, a1
@@ -2039,17 +6451,25 @@ define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32
; ZVBB-RV32-NEXT: vl8re32.v v16, (a2)
; ZVBB-RV32-NEXT: vl8re32.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: .cfi_restore ra
+; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
-; ZVBB-RV64-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; ZVBB-RV64-LABEL: vector_interleave_nxv20f32_nxv4f32:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: .cfi_offset ra, -8
+; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: li a1, 28
; ZVBB-RV64-NEXT: mul a0, a0, a1
@@ -2105,17 +6525,25 @@ define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32
; ZVBB-RV64-NEXT: vl8re32.v v16, (a2)
; ZVBB-RV64-NEXT: vl8re32.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: .cfi_restore ra
+; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
-; ZIP-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; ZIP-LABEL: vector_interleave_nxv20f32_nxv4f32:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: .cfi_offset ra, -8
+; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: li a1, 28
; ZIP-NEXT: mul a0, a0, a1
@@ -2171,23 +6599,95 @@ define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32
; ZIP-NEXT: vl8re32.v v16, (a2)
; ZIP-NEXT: vl8re32.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: .cfi_restore ra
+; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
- %res = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e)
- ret <vscale x 20 x i32> %res
+ %res = call <vscale x 20 x float> @llvm.vector.interleave5.nxv20f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x float> %v4)
+ ret <vscale x 20 x float> %res
}
-
-define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e) nounwind {
+define <vscale x 5 x double> @vector_interleave_nxv5f64_nxv1f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2, <vscale x 1 x double> %v3, <vscale x 1 x double> %v4) {
+; CHECK-LABEL: vector_interleave_nxv5f64_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 2
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: add a2, a0, a1
+; CHECK-NEXT: add a3, a2, a1
+; CHECK-NEXT: vsetvli a4, zero, e64, m1, ta, ma
+; CHECK-NEXT: vsseg5e64.v v8, (a0)
+; CHECK-NEXT: vl1re64.v v10, (a3)
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: vl1re64.v v11, (a3)
+; CHECK-NEXT: vl1re64.v v8, (a0)
+; CHECK-NEXT: vl1re64.v v9, (a2)
+; CHECK-NEXT: add a1, a3, a1
+; CHECK-NEXT: vl1re64.v v12, (a1)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 2
+; CHECK-NEXT: add a0, a1, a0
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
;
-; RV32-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZVBB-LABEL: vector_interleave_nxv5f64_nxv1f64:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 2
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: add a2, a0, a1
+; ZVBB-NEXT: add a3, a2, a1
+; ZVBB-NEXT: vsetvli a4, zero, e64, m1, ta, ma
+; ZVBB-NEXT: vsseg5e64.v v8, (a0)
+; ZVBB-NEXT: vl1re64.v v10, (a3)
+; ZVBB-NEXT: add a3, a3, a1
+; ZVBB-NEXT: vl1re64.v v11, (a3)
+; ZVBB-NEXT: vl1re64.v v8, (a0)
+; ZVBB-NEXT: vl1re64.v v9, (a2)
+; ZVBB-NEXT: add a1, a3, a1
+; ZVBB-NEXT: vl1re64.v v12, (a1)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 2
+; ZVBB-NEXT: add a0, a1, a0
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 5 x double> @llvm.vector.interleave5.nxv5f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2, <vscale x 1 x double> %v3, <vscale x 1 x double> %v4)
+ ret <vscale x 5 x double> %res
+}
+
+define <vscale x 10 x double> @vector_interleave_nxv10f64_nxv2f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4) {
+; RV32-LABEL: vector_interleave_nxv10f64_nxv2f64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 28
; RV32-NEXT: mul a0, a0, a1
@@ -2243,17 +6743,25 @@ define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64
; RV32-NEXT: vl8re64.v v16, (a2)
; RV32-NEXT: vl8re64.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
-; RV64-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; RV64-LABEL: vector_interleave_nxv10f64_nxv2f64:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 28
; RV64-NEXT: mul a0, a0, a1
@@ -2309,17 +6817,25 @@ define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64
; RV64-NEXT: vl8re64.v v16, (a2)
; RV64-NEXT: vl8re64.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
-; ZVBB-RV32-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZVBB-RV32-LABEL: vector_interleave_nxv10f64_nxv2f64:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: .cfi_offset ra, -4
+; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: li a1, 28
; ZVBB-RV32-NEXT: mul a0, a0, a1
@@ -2375,17 +6891,25 @@ define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64
; ZVBB-RV32-NEXT: vl8re64.v v16, (a2)
; ZVBB-RV32-NEXT: vl8re64.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: .cfi_restore ra
+; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
-; ZVBB-RV64-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZVBB-RV64-LABEL: vector_interleave_nxv10f64_nxv2f64:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: .cfi_offset ra, -8
+; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: li a1, 28
; ZVBB-RV64-NEXT: mul a0, a0, a1
@@ -2441,17 +6965,25 @@ define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64
; ZVBB-RV64-NEXT: vl8re64.v v16, (a2)
; ZVBB-RV64-NEXT: vl8re64.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: .cfi_restore ra
+; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
-; ZIP-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZIP-LABEL: vector_interleave_nxv10f64_nxv2f64:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: .cfi_offset ra, -8
+; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: li a1, 28
; ZIP-NEXT: mul a0, a0, a1
@@ -2507,220 +7039,200 @@ define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64
; ZIP-NEXT: vl8re64.v v16, (a2)
; ZIP-NEXT: vl8re64.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: .cfi_restore ra
+; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
- %res = call <vscale x 10 x i64> @llvm.vector.interleave5.nxv10i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e)
- ret <vscale x 10 x i64> %res
+ %res = call <vscale x 10 x double> @llvm.vector.interleave5.nxv10f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4)
+ ret <vscale x 10 x double> %res
}
-define <vscale x 112 x i1> @vector_interleave_nxv112i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e, <vscale x 16 x i1> %f, <vscale x 16 x i1> %g) nounwind {
-; CHECK-LABEL: vector_interleave_nxv112i1_nxv16i1:
+define <vscale x 14 x half> @vector_interleave_nxv14f16_nxv2f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2, <vscale x 2 x half> %v3, <vscale x 2 x half> %v4, <vscale x 2 x half> %v5, <vscale x 2 x half> %v6) {
+; CHECK-LABEL: vector_interleave_nxv14f16_nxv2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 14
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv.v.i v14, 0
-; CHECK-NEXT: addi a4, sp, 16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a1, a0, 3
-; CHECK-NEXT: sub a0, a1, a0
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: vmerge.vim v16, v14, 1, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vim v22, v14, 1, v0
-; CHECK-NEXT: add a3, a4, a2
-; CHECK-NEXT: srli a1, a2, 2
-; CHECK-NEXT: add a5, a0, a2
-; CHECK-NEXT: vmv4r.v v24, v16
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmerge.vim v18, v14, 1, v0
-; CHECK-NEXT: add a6, a3, a2
-; CHECK-NEXT: vmv1r.v v25, v22
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmerge.vim v8, v14, 1, v0
-; CHECK-NEXT: vmv1r.v v26, v18
-; CHECK-NEXT: vmv1r.v v0, v11
-; CHECK-NEXT: vmerge.vim v20, v14, 1, v0
-; CHECK-NEXT: vmv1r.v v27, v8
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vmerge.vim v10, v14, 1, v0
-; CHECK-NEXT: vmv1r.v v28, v20
-; CHECK-NEXT: vmv1r.v v18, v23
-; CHECK-NEXT: add a7, a6, a2
-; CHECK-NEXT: vmv1r.v v29, v10
-; CHECK-NEXT: vmv1r.v v20, v9
-; CHECK-NEXT: vmv1r.v v0, v13
-; CHECK-NEXT: vmerge.vim v30, v14, 1, v0
-; CHECK-NEXT: vmv1r.v v22, v11
-; CHECK-NEXT: vsetvli t0, zero, e8, m1, ta, ma
-; CHECK-NEXT: vsseg7e8.v v24, (a4)
-; CHECK-NEXT: vmv1r.v v23, v31
-; CHECK-NEXT: vsseg7e8.v v17, (a0)
-; CHECK-NEXT: vl1r.v v8, (a6)
-; CHECK-NEXT: add a6, a7, a2
-; CHECK-NEXT: vl1r.v v10, (a4)
-; CHECK-NEXT: add a4, a6, a2
-; CHECK-NEXT: vl1r.v v12, (a6)
-; CHECK-NEXT: add a6, a4, a2
-; CHECK-NEXT: vl1r.v v14, (a6)
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 1
+; CHECK-NEXT: srli a1, a1, 2
+; CHECK-NEXT: add a3, a0, a2
+; CHECK-NEXT: add a4, a3, a2
+; CHECK-NEXT: add a5, a4, a2
; CHECK-NEXT: add a6, a5, a2
-; CHECK-NEXT: vl1r.v v16, (a5)
-; CHECK-NEXT: add a5, a6, a2
-; CHECK-NEXT: vl1r.v v18, (a5)
-; CHECK-NEXT: add a5, a5, a2
-; CHECK-NEXT: vl1r.v v9, (a7)
-; CHECK-NEXT: add a7, a5, a2
-; CHECK-NEXT: vl1r.v v20, (a7)
-; CHECK-NEXT: add a7, a7, a2
-; CHECK-NEXT: srli a2, a2, 1
-; CHECK-NEXT: vl1r.v v11, (a3)
-; CHECK-NEXT: add a3, a1, a1
-; CHECK-NEXT: vl1r.v v13, (a4)
-; CHECK-NEXT: add a4, a2, a2
-; CHECK-NEXT: vl1r.v v15, (a0)
-; CHECK-NEXT: vl1r.v v19, (a5)
-; CHECK-NEXT: vl1r.v v17, (a6)
-; CHECK-NEXT: vl1r.v v21, (a7)
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmsne.vi v22, v8, 0
-; CHECK-NEXT: vmsne.vi v0, v10, 0
-; CHECK-NEXT: vmsne.vi v9, v12, 0
-; CHECK-NEXT: vmsne.vi v10, v14, 0
-; CHECK-NEXT: vmsne.vi v11, v18, 0
-; CHECK-NEXT: vmsne.vi v8, v16, 0
-; CHECK-NEXT: vmsne.vi v12, v20, 0
-; CHECK-NEXT: vsetvli zero, a3, e8, mf2, ta, ma
-; CHECK-NEXT: vslideup.vx v0, v22, a1
-; CHECK-NEXT: vslideup.vx v9, v10, a1
-; CHECK-NEXT: vslideup.vx v8, v11, a1
-; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, ma
-; CHECK-NEXT: vslideup.vx v0, v9, a2
-; CHECK-NEXT: vslideup.vx v8, v12, a2
+; CHECK-NEXT: vsetvli a7, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vsseg7e16.v v8, (a0)
+; CHECK-NEXT: add a7, a6, a2
+; CHECK-NEXT: vle16.v v8, (a7)
+; CHECK-NEXT: vle16.v v10, (a6)
+; CHECK-NEXT: add a6, a1, a1
+; CHECK-NEXT: add a2, a7, a2
+; CHECK-NEXT: vle16.v v12, (a5)
+; CHECK-NEXT: vsetvli zero, a6, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v10, v8, a1
+; CHECK-NEXT: vsetvli a5, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v11, (a2)
+; CHECK-NEXT: vle16.v v9, (a4)
+; CHECK-NEXT: vsetvli zero, a6, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v12, a1
+; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v12, (a3)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a6, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v12, a1
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv14f16_nxv2f16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a0, a0, 2
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: srli a2, a1, 1
+; ZVBB-NEXT: srli a1, a1, 2
+; ZVBB-NEXT: add a3, a0, a2
+; ZVBB-NEXT: add a4, a3, a2
+; ZVBB-NEXT: add a5, a4, a2
+; ZVBB-NEXT: add a6, a5, a2
+; ZVBB-NEXT: vsetvli a7, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vsseg7e16.v v8, (a0)
+; ZVBB-NEXT: add a7, a6, a2
+; ZVBB-NEXT: vle16.v v8, (a7)
+; ZVBB-NEXT: vle16.v v10, (a6)
+; ZVBB-NEXT: add a6, a1, a1
+; ZVBB-NEXT: add a2, a7, a2
+; ZVBB-NEXT: vle16.v v12, (a5)
+; ZVBB-NEXT: vsetvli zero, a6, e16, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v10, v8, a1
+; ZVBB-NEXT: vsetvli a5, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vle16.v v11, (a2)
+; ZVBB-NEXT: vle16.v v9, (a4)
+; ZVBB-NEXT: vsetvli zero, a6, e16, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v9, v12, a1
+; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vle16.v v12, (a3)
+; ZVBB-NEXT: vle16.v v8, (a0)
+; ZVBB-NEXT: vsetvli zero, a6, e16, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v8, v12, a1
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a0, a0, 2
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 14 x half> @llvm.vector.interleave7.nxv14f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2, <vscale x 2 x half> %v3, <vscale x 2 x half> %v4, <vscale x 2 x half> %v5, <vscale x 2 x half> %v6)
+ ret <vscale x 14 x half> %res
+}
+
+define <vscale x 28 x half> @vector_interleave_nxv28f16_nxv4f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2, <vscale x 4 x half> %v3, <vscale x 4 x half> %v4, <vscale x 4 x half> %v5, <vscale x 4 x half> %v6) {
+; CHECK-LABEL: vector_interleave_nxv28f16_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 14
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: slli a1, a0, 3
+; CHECK-NEXT: sub a0, a1, a0
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: add a2, a0, a1
+; CHECK-NEXT: add a3, a2, a1
+; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma
+; CHECK-NEXT: vsseg7e16.v v8, (a0)
+; CHECK-NEXT: vl1re16.v v10, (a3)
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: vl1re16.v v11, (a3)
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: vl1re16.v v8, (a0)
+; CHECK-NEXT: add a0, a3, a1
+; CHECK-NEXT: vl1re16.v v9, (a2)
+; CHECK-NEXT: vl1re16.v v12, (a3)
+; CHECK-NEXT: vl1re16.v v13, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl1re16.v v14, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 3
+; CHECK-NEXT: sub a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
-; ZVBB-LABEL: vector_interleave_nxv112i1_nxv16i1:
+; ZVBB-LABEL: vector_interleave_nxv28f16_nxv4f16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: li a1, 14
-; ZVBB-NEXT: mul a0, a0, a1
+; ZVBB-NEXT: slli a1, a0, 3
+; ZVBB-NEXT: sub a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; ZVBB-NEXT: vmv.v.i v14, 0
-; ZVBB-NEXT: addi a4, sp, 16
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: add a2, a0, a1
+; ZVBB-NEXT: add a3, a2, a1
+; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vsseg7e16.v v8, (a0)
+; ZVBB-NEXT: vl1re16.v v10, (a3)
+; ZVBB-NEXT: add a3, a3, a1
+; ZVBB-NEXT: vl1re16.v v11, (a3)
+; ZVBB-NEXT: add a3, a3, a1
+; ZVBB-NEXT: vl1re16.v v8, (a0)
+; ZVBB-NEXT: add a0, a3, a1
+; ZVBB-NEXT: vl1re16.v v9, (a2)
+; ZVBB-NEXT: vl1re16.v v12, (a3)
+; ZVBB-NEXT: vl1re16.v v13, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl1re16.v v14, (a0)
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 3
; ZVBB-NEXT: sub a0, a1, a0
-; ZVBB-NEXT: add a0, sp, a0
-; ZVBB-NEXT: addi a0, a0, 16
-; ZVBB-NEXT: csrr a2, vlenb
-; ZVBB-NEXT: vmerge.vim v16, v14, 1, v0
-; ZVBB-NEXT: vmv1r.v v0, v8
-; ZVBB-NEXT: vmerge.vim v22, v14, 1, v0
-; ZVBB-NEXT: add a3, a4, a2
-; ZVBB-NEXT: srli a1, a2, 2
-; ZVBB-NEXT: add a5, a0, a2
-; ZVBB-NEXT: vmv4r.v v24, v16
-; ZVBB-NEXT: vmv1r.v v0, v9
-; ZVBB-NEXT: vmerge.vim v18, v14, 1, v0
-; ZVBB-NEXT: add a6, a3, a2
-; ZVBB-NEXT: vmv1r.v v25, v22
-; ZVBB-NEXT: vmv1r.v v0, v10
-; ZVBB-NEXT: vmerge.vim v8, v14, 1, v0
-; ZVBB-NEXT: vmv1r.v v26, v18
-; ZVBB-NEXT: vmv1r.v v0, v11
-; ZVBB-NEXT: vmerge.vim v20, v14, 1, v0
-; ZVBB-NEXT: vmv1r.v v27, v8
-; ZVBB-NEXT: vmv1r.v v0, v12
-; ZVBB-NEXT: vmerge.vim v10, v14, 1, v0
-; ZVBB-NEXT: vmv1r.v v28, v20
-; ZVBB-NEXT: vmv1r.v v18, v23
-; ZVBB-NEXT: add a7, a6, a2
-; ZVBB-NEXT: vmv1r.v v29, v10
-; ZVBB-NEXT: vmv1r.v v20, v9
-; ZVBB-NEXT: vmv1r.v v0, v13
-; ZVBB-NEXT: vmerge.vim v30, v14, 1, v0
-; ZVBB-NEXT: vmv1r.v v22, v11
-; ZVBB-NEXT: vsetvli t0, zero, e8, m1, ta, ma
-; ZVBB-NEXT: vsseg7e8.v v24, (a4)
-; ZVBB-NEXT: vmv1r.v v23, v31
-; ZVBB-NEXT: vsseg7e8.v v17, (a0)
-; ZVBB-NEXT: vl1r.v v8, (a6)
-; ZVBB-NEXT: add a6, a7, a2
-; ZVBB-NEXT: vl1r.v v10, (a4)
-; ZVBB-NEXT: add a4, a6, a2
-; ZVBB-NEXT: vl1r.v v12, (a6)
-; ZVBB-NEXT: add a6, a4, a2
-; ZVBB-NEXT: vl1r.v v14, (a6)
-; ZVBB-NEXT: add a6, a5, a2
-; ZVBB-NEXT: vl1r.v v16, (a5)
-; ZVBB-NEXT: add a5, a6, a2
-; ZVBB-NEXT: vl1r.v v18, (a5)
-; ZVBB-NEXT: add a5, a5, a2
-; ZVBB-NEXT: vl1r.v v9, (a7)
-; ZVBB-NEXT: add a7, a5, a2
-; ZVBB-NEXT: vl1r.v v20, (a7)
-; ZVBB-NEXT: add a7, a7, a2
-; ZVBB-NEXT: srli a2, a2, 1
-; ZVBB-NEXT: vl1r.v v11, (a3)
-; ZVBB-NEXT: add a3, a1, a1
-; ZVBB-NEXT: vl1r.v v13, (a4)
-; ZVBB-NEXT: add a4, a2, a2
-; ZVBB-NEXT: vl1r.v v15, (a0)
-; ZVBB-NEXT: vl1r.v v19, (a5)
-; ZVBB-NEXT: vl1r.v v17, (a6)
-; ZVBB-NEXT: vl1r.v v21, (a7)
-; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; ZVBB-NEXT: vmsne.vi v22, v8, 0
-; ZVBB-NEXT: vmsne.vi v0, v10, 0
-; ZVBB-NEXT: vmsne.vi v9, v12, 0
-; ZVBB-NEXT: vmsne.vi v10, v14, 0
-; ZVBB-NEXT: vmsne.vi v11, v18, 0
-; ZVBB-NEXT: vmsne.vi v8, v16, 0
-; ZVBB-NEXT: vmsne.vi v12, v20, 0
-; ZVBB-NEXT: vsetvli zero, a3, e8, mf2, ta, ma
-; ZVBB-NEXT: vslideup.vx v0, v22, a1
-; ZVBB-NEXT: vslideup.vx v9, v10, a1
-; ZVBB-NEXT: vslideup.vx v8, v11, a1
-; ZVBB-NEXT: vsetvli zero, a4, e8, m1, ta, ma
-; ZVBB-NEXT: vslideup.vx v0, v9, a2
-; ZVBB-NEXT: vslideup.vx v8, v12, a2
-; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: li a1, 14
-; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
- %res = call <vscale x 112 x i1> @llvm.vector.interleave7.nxv112i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d, <vscale x 16 x i1> %e, <vscale x 16 x i1> %f, <vscale x 16 x i1> %g)
- ret <vscale x 112 x i1> %res
+ %res = call <vscale x 28 x half> @llvm.vector.interleave7.nxv28f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2, <vscale x 4 x half> %v3, <vscale x 4 x half> %v4, <vscale x 4 x half> %v5, <vscale x 4 x half> %v6)
+ ret <vscale x 28 x half> %res
}
-
-define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e, <vscale x 16 x i8> %f, <vscale x 16 x i8> %g) nounwind {
-;
-; RV32-LABEL: vector_interleave_nxv112i8_nxv16i8:
+define <vscale x 56 x half> @vector_interleave_nxv56f16_nxv8f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x half> %v4, <vscale x 8 x half> %v5, <vscale x 8 x half> %v6) {
+; RV32-LABEL: vector_interleave_nxv56f16_nxv8f16:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: sub sp, sp, a0
; RV32-NEXT: andi sp, sp, -64
-; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; RV32-NEXT: vmv2r.v v26, v20
; RV32-NEXT: addi a0, sp, 64
; RV32-NEXT: vmv2r.v v24, v16
@@ -2748,61 +7260,69 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
; RV32-NEXT: vmv1r.v v22, v11
; RV32-NEXT: add a6, a7, a2
; RV32-NEXT: vmv1r.v v24, v15
-; RV32-NEXT: vsseg7e8.v v1, (a0)
+; RV32-NEXT: vsseg7e16.v v1, (a0)
; RV32-NEXT: vmv1r.v v26, v19
-; RV32-NEXT: vsseg7e8.v v21, (a1)
-; RV32-NEXT: vl1r.v v18, (a6)
+; RV32-NEXT: vsseg7e16.v v21, (a1)
+; RV32-NEXT: vl1re16.v v18, (a6)
; RV32-NEXT: add a6, a6, a2
-; RV32-NEXT: vl1r.v v19, (a6)
+; RV32-NEXT: vl1re16.v v19, (a6)
; RV32-NEXT: add a6, a6, a2
-; RV32-NEXT: vl1r.v v20, (a6)
+; RV32-NEXT: vl1re16.v v20, (a6)
; RV32-NEXT: add a6, a6, a2
-; RV32-NEXT: vl1r.v v21, (a6)
+; RV32-NEXT: vl1re16.v v21, (a6)
; RV32-NEXT: add a6, a3, a2
-; RV32-NEXT: vl1r.v v10, (a6)
+; RV32-NEXT: vl1re16.v v10, (a6)
; RV32-NEXT: add a6, a6, a2
-; RV32-NEXT: vl1r.v v11, (a6)
-; RV32-NEXT: vl1r.v v8, (a0)
-; RV32-NEXT: vl1r.v v16, (a4)
-; RV32-NEXT: vl1r.v v9, (a3)
-; RV32-NEXT: vl1r.v v17, (a7)
+; RV32-NEXT: vl1re16.v v11, (a6)
+; RV32-NEXT: vl1re16.v v8, (a0)
+; RV32-NEXT: vl1re16.v v16, (a4)
+; RV32-NEXT: vl1re16.v v9, (a3)
+; RV32-NEXT: vl1re16.v v17, (a7)
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a3, 14
; RV32-NEXT: mul a0, a0, a3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 64
; RV32-NEXT: add a6, a6, a2
-; RV32-NEXT: vl1r.v v12, (a6)
+; RV32-NEXT: vl1re16.v v12, (a6)
; RV32-NEXT: add a6, a6, a2
-; RV32-NEXT: vl1r.v v13, (a6)
+; RV32-NEXT: vl1re16.v v13, (a6)
; RV32-NEXT: add a6, a6, a2
; RV32-NEXT: slli a2, a2, 3
; RV32-NEXT: add a2, a0, a2
-; RV32-NEXT: vl1r.v v14, (a6)
-; RV32-NEXT: vl1r.v v15, (a1)
+; RV32-NEXT: vl1re16.v v14, (a6)
+; RV32-NEXT: vl1re16.v v15, (a1)
; RV32-NEXT: add a5, a0, a5
; RV32-NEXT: vs2r.v v20, (a5)
; RV32-NEXT: vs4r.v v16, (a2)
; RV32-NEXT: vs8r.v v8, (a0)
-; RV32-NEXT: vl8r.v v16, (a2)
-; RV32-NEXT: vl8r.v v8, (a0)
+; RV32-NEXT: vl8re16.v v16, (a2)
+; RV32-NEXT: vl8re16.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
-; RV64-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; RV64-LABEL: vector_interleave_nxv56f16_nxv8f16:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 5
; RV64-NEXT: sub sp, sp, a0
; RV64-NEXT: andi sp, sp, -64
-; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; RV64-NEXT: vmv2r.v v26, v20
; RV64-NEXT: addi a0, sp, 64
; RV64-NEXT: vmv2r.v v24, v16
@@ -2830,61 +7350,69 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
; RV64-NEXT: vmv1r.v v22, v11
; RV64-NEXT: add a6, a7, a2
; RV64-NEXT: vmv1r.v v24, v15
-; RV64-NEXT: vsseg7e8.v v1, (a0)
+; RV64-NEXT: vsseg7e16.v v1, (a0)
; RV64-NEXT: vmv1r.v v26, v19
-; RV64-NEXT: vsseg7e8.v v21, (a1)
-; RV64-NEXT: vl1r.v v18, (a6)
+; RV64-NEXT: vsseg7e16.v v21, (a1)
+; RV64-NEXT: vl1re16.v v18, (a6)
; RV64-NEXT: add a6, a6, a2
-; RV64-NEXT: vl1r.v v19, (a6)
+; RV64-NEXT: vl1re16.v v19, (a6)
; RV64-NEXT: add a6, a6, a2
-; RV64-NEXT: vl1r.v v20, (a6)
+; RV64-NEXT: vl1re16.v v20, (a6)
; RV64-NEXT: add a6, a6, a2
-; RV64-NEXT: vl1r.v v21, (a6)
+; RV64-NEXT: vl1re16.v v21, (a6)
; RV64-NEXT: add a6, a3, a2
-; RV64-NEXT: vl1r.v v10, (a6)
+; RV64-NEXT: vl1re16.v v10, (a6)
; RV64-NEXT: add a6, a6, a2
-; RV64-NEXT: vl1r.v v11, (a6)
-; RV64-NEXT: vl1r.v v8, (a0)
-; RV64-NEXT: vl1r.v v16, (a4)
-; RV64-NEXT: vl1r.v v9, (a3)
-; RV64-NEXT: vl1r.v v17, (a7)
+; RV64-NEXT: vl1re16.v v11, (a6)
+; RV64-NEXT: vl1re16.v v8, (a0)
+; RV64-NEXT: vl1re16.v v16, (a4)
+; RV64-NEXT: vl1re16.v v9, (a3)
+; RV64-NEXT: vl1re16.v v17, (a7)
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a3, 14
; RV64-NEXT: mul a0, a0, a3
; RV64-NEXT: add a0, sp, a0
; RV64-NEXT: addi a0, a0, 64
; RV64-NEXT: add a6, a6, a2
-; RV64-NEXT: vl1r.v v12, (a6)
+; RV64-NEXT: vl1re16.v v12, (a6)
; RV64-NEXT: add a6, a6, a2
-; RV64-NEXT: vl1r.v v13, (a6)
+; RV64-NEXT: vl1re16.v v13, (a6)
; RV64-NEXT: add a6, a6, a2
; RV64-NEXT: slli a2, a2, 3
; RV64-NEXT: add a2, a0, a2
-; RV64-NEXT: vl1r.v v14, (a6)
-; RV64-NEXT: vl1r.v v15, (a1)
+; RV64-NEXT: vl1re16.v v14, (a6)
+; RV64-NEXT: vl1re16.v v15, (a1)
; RV64-NEXT: add a5, a0, a5
; RV64-NEXT: vs2r.v v20, (a5)
; RV64-NEXT: vs4r.v v16, (a2)
; RV64-NEXT: vs8r.v v8, (a0)
-; RV64-NEXT: vl8r.v v16, (a2)
-; RV64-NEXT: vl8r.v v8, (a0)
+; RV64-NEXT: vl8re16.v v16, (a2)
+; RV64-NEXT: vl8re16.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
-; ZVBB-RV32-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; ZVBB-RV32-LABEL: vector_interleave_nxv56f16_nxv8f16:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: .cfi_offset ra, -4
+; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: slli a0, a0, 5
; ZVBB-RV32-NEXT: sub sp, sp, a0
; ZVBB-RV32-NEXT: andi sp, sp, -64
-; ZVBB-RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVBB-RV32-NEXT: vmv2r.v v26, v20
; ZVBB-RV32-NEXT: addi a0, sp, 64
; ZVBB-RV32-NEXT: vmv2r.v v24, v16
@@ -2912,61 +7440,69 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
; ZVBB-RV32-NEXT: vmv1r.v v22, v11
; ZVBB-RV32-NEXT: add a6, a7, a2
; ZVBB-RV32-NEXT: vmv1r.v v24, v15
-; ZVBB-RV32-NEXT: vsseg7e8.v v1, (a0)
+; ZVBB-RV32-NEXT: vsseg7e16.v v1, (a0)
; ZVBB-RV32-NEXT: vmv1r.v v26, v19
-; ZVBB-RV32-NEXT: vsseg7e8.v v21, (a1)
-; ZVBB-RV32-NEXT: vl1r.v v18, (a6)
+; ZVBB-RV32-NEXT: vsseg7e16.v v21, (a1)
+; ZVBB-RV32-NEXT: vl1re16.v v18, (a6)
; ZVBB-RV32-NEXT: add a6, a6, a2
-; ZVBB-RV32-NEXT: vl1r.v v19, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v19, (a6)
; ZVBB-RV32-NEXT: add a6, a6, a2
-; ZVBB-RV32-NEXT: vl1r.v v20, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v20, (a6)
; ZVBB-RV32-NEXT: add a6, a6, a2
-; ZVBB-RV32-NEXT: vl1r.v v21, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v21, (a6)
; ZVBB-RV32-NEXT: add a6, a3, a2
-; ZVBB-RV32-NEXT: vl1r.v v10, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v10, (a6)
; ZVBB-RV32-NEXT: add a6, a6, a2
-; ZVBB-RV32-NEXT: vl1r.v v11, (a6)
-; ZVBB-RV32-NEXT: vl1r.v v8, (a0)
-; ZVBB-RV32-NEXT: vl1r.v v16, (a4)
-; ZVBB-RV32-NEXT: vl1r.v v9, (a3)
-; ZVBB-RV32-NEXT: vl1r.v v17, (a7)
+; ZVBB-RV32-NEXT: vl1re16.v v11, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v8, (a0)
+; ZVBB-RV32-NEXT: vl1re16.v v16, (a4)
+; ZVBB-RV32-NEXT: vl1re16.v v9, (a3)
+; ZVBB-RV32-NEXT: vl1re16.v v17, (a7)
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: li a3, 14
; ZVBB-RV32-NEXT: mul a0, a0, a3
; ZVBB-RV32-NEXT: add a0, sp, a0
; ZVBB-RV32-NEXT: addi a0, a0, 64
; ZVBB-RV32-NEXT: add a6, a6, a2
-; ZVBB-RV32-NEXT: vl1r.v v12, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v12, (a6)
; ZVBB-RV32-NEXT: add a6, a6, a2
-; ZVBB-RV32-NEXT: vl1r.v v13, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v13, (a6)
; ZVBB-RV32-NEXT: add a6, a6, a2
; ZVBB-RV32-NEXT: slli a2, a2, 3
; ZVBB-RV32-NEXT: add a2, a0, a2
-; ZVBB-RV32-NEXT: vl1r.v v14, (a6)
-; ZVBB-RV32-NEXT: vl1r.v v15, (a1)
+; ZVBB-RV32-NEXT: vl1re16.v v14, (a6)
+; ZVBB-RV32-NEXT: vl1re16.v v15, (a1)
; ZVBB-RV32-NEXT: add a5, a0, a5
; ZVBB-RV32-NEXT: vs2r.v v20, (a5)
; ZVBB-RV32-NEXT: vs4r.v v16, (a2)
; ZVBB-RV32-NEXT: vs8r.v v8, (a0)
-; ZVBB-RV32-NEXT: vl8r.v v16, (a2)
-; ZVBB-RV32-NEXT: vl8r.v v8, (a0)
+; ZVBB-RV32-NEXT: vl8re16.v v16, (a2)
+; ZVBB-RV32-NEXT: vl8re16.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: .cfi_restore ra
+; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
-; ZVBB-RV64-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; ZVBB-RV64-LABEL: vector_interleave_nxv56f16_nxv8f16:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: .cfi_offset ra, -8
+; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: slli a0, a0, 5
; ZVBB-RV64-NEXT: sub sp, sp, a0
; ZVBB-RV64-NEXT: andi sp, sp, -64
-; ZVBB-RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZVBB-RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVBB-RV64-NEXT: vmv2r.v v26, v20
; ZVBB-RV64-NEXT: addi a0, sp, 64
; ZVBB-RV64-NEXT: vmv2r.v v24, v16
@@ -2994,61 +7530,69 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
; ZVBB-RV64-NEXT: vmv1r.v v22, v11
; ZVBB-RV64-NEXT: add a6, a7, a2
; ZVBB-RV64-NEXT: vmv1r.v v24, v15
-; ZVBB-RV64-NEXT: vsseg7e8.v v1, (a0)
+; ZVBB-RV64-NEXT: vsseg7e16.v v1, (a0)
; ZVBB-RV64-NEXT: vmv1r.v v26, v19
-; ZVBB-RV64-NEXT: vsseg7e8.v v21, (a1)
-; ZVBB-RV64-NEXT: vl1r.v v18, (a6)
+; ZVBB-RV64-NEXT: vsseg7e16.v v21, (a1)
+; ZVBB-RV64-NEXT: vl1re16.v v18, (a6)
; ZVBB-RV64-NEXT: add a6, a6, a2
-; ZVBB-RV64-NEXT: vl1r.v v19, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v19, (a6)
; ZVBB-RV64-NEXT: add a6, a6, a2
-; ZVBB-RV64-NEXT: vl1r.v v20, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v20, (a6)
; ZVBB-RV64-NEXT: add a6, a6, a2
-; ZVBB-RV64-NEXT: vl1r.v v21, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v21, (a6)
; ZVBB-RV64-NEXT: add a6, a3, a2
-; ZVBB-RV64-NEXT: vl1r.v v10, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v10, (a6)
; ZVBB-RV64-NEXT: add a6, a6, a2
-; ZVBB-RV64-NEXT: vl1r.v v11, (a6)
-; ZVBB-RV64-NEXT: vl1r.v v8, (a0)
-; ZVBB-RV64-NEXT: vl1r.v v16, (a4)
-; ZVBB-RV64-NEXT: vl1r.v v9, (a3)
-; ZVBB-RV64-NEXT: vl1r.v v17, (a7)
+; ZVBB-RV64-NEXT: vl1re16.v v11, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v8, (a0)
+; ZVBB-RV64-NEXT: vl1re16.v v16, (a4)
+; ZVBB-RV64-NEXT: vl1re16.v v9, (a3)
+; ZVBB-RV64-NEXT: vl1re16.v v17, (a7)
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: li a3, 14
; ZVBB-RV64-NEXT: mul a0, a0, a3
; ZVBB-RV64-NEXT: add a0, sp, a0
; ZVBB-RV64-NEXT: addi a0, a0, 64
; ZVBB-RV64-NEXT: add a6, a6, a2
-; ZVBB-RV64-NEXT: vl1r.v v12, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v12, (a6)
; ZVBB-RV64-NEXT: add a6, a6, a2
-; ZVBB-RV64-NEXT: vl1r.v v13, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v13, (a6)
; ZVBB-RV64-NEXT: add a6, a6, a2
; ZVBB-RV64-NEXT: slli a2, a2, 3
; ZVBB-RV64-NEXT: add a2, a0, a2
-; ZVBB-RV64-NEXT: vl1r.v v14, (a6)
-; ZVBB-RV64-NEXT: vl1r.v v15, (a1)
+; ZVBB-RV64-NEXT: vl1re16.v v14, (a6)
+; ZVBB-RV64-NEXT: vl1re16.v v15, (a1)
; ZVBB-RV64-NEXT: add a5, a0, a5
; ZVBB-RV64-NEXT: vs2r.v v20, (a5)
; ZVBB-RV64-NEXT: vs4r.v v16, (a2)
; ZVBB-RV64-NEXT: vs8r.v v8, (a0)
-; ZVBB-RV64-NEXT: vl8r.v v16, (a2)
-; ZVBB-RV64-NEXT: vl8r.v v8, (a0)
+; ZVBB-RV64-NEXT: vl8re16.v v16, (a2)
+; ZVBB-RV64-NEXT: vl8re16.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: .cfi_restore ra
+; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
-; ZIP-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; ZIP-LABEL: vector_interleave_nxv56f16_nxv8f16:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: .cfi_offset ra, -8
+; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: slli a0, a0, 5
; ZIP-NEXT: sub sp, sp, a0
; ZIP-NEXT: andi sp, sp, -64
-; ZIP-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZIP-NEXT: vmv2r.v v26, v20
; ZIP-NEXT: addi a0, sp, 64
; ZIP-NEXT: vmv2r.v v24, v16
@@ -3076,62 +7620,234 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
; ZIP-NEXT: vmv1r.v v22, v11
; ZIP-NEXT: add a6, a7, a2
; ZIP-NEXT: vmv1r.v v24, v15
-; ZIP-NEXT: vsseg7e8.v v1, (a0)
+; ZIP-NEXT: vsseg7e16.v v1, (a0)
; ZIP-NEXT: vmv1r.v v26, v19
-; ZIP-NEXT: vsseg7e8.v v21, (a1)
-; ZIP-NEXT: vl1r.v v18, (a6)
+; ZIP-NEXT: vsseg7e16.v v21, (a1)
+; ZIP-NEXT: vl1re16.v v18, (a6)
; ZIP-NEXT: add a6, a6, a2
-; ZIP-NEXT: vl1r.v v19, (a6)
+; ZIP-NEXT: vl1re16.v v19, (a6)
; ZIP-NEXT: add a6, a6, a2
-; ZIP-NEXT: vl1r.v v20, (a6)
+; ZIP-NEXT: vl1re16.v v20, (a6)
; ZIP-NEXT: add a6, a6, a2
-; ZIP-NEXT: vl1r.v v21, (a6)
+; ZIP-NEXT: vl1re16.v v21, (a6)
; ZIP-NEXT: add a6, a3, a2
-; ZIP-NEXT: vl1r.v v10, (a6)
+; ZIP-NEXT: vl1re16.v v10, (a6)
; ZIP-NEXT: add a6, a6, a2
-; ZIP-NEXT: vl1r.v v11, (a6)
-; ZIP-NEXT: vl1r.v v8, (a0)
-; ZIP-NEXT: vl1r.v v16, (a4)
-; ZIP-NEXT: vl1r.v v9, (a3)
-; ZIP-NEXT: vl1r.v v17, (a7)
+; ZIP-NEXT: vl1re16.v v11, (a6)
+; ZIP-NEXT: vl1re16.v v8, (a0)
+; ZIP-NEXT: vl1re16.v v16, (a4)
+; ZIP-NEXT: vl1re16.v v9, (a3)
+; ZIP-NEXT: vl1re16.v v17, (a7)
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: li a3, 14
; ZIP-NEXT: mul a0, a0, a3
; ZIP-NEXT: add a0, sp, a0
; ZIP-NEXT: addi a0, a0, 64
; ZIP-NEXT: add a6, a6, a2
-; ZIP-NEXT: vl1r.v v12, (a6)
+; ZIP-NEXT: vl1re16.v v12, (a6)
; ZIP-NEXT: add a6, a6, a2
-; ZIP-NEXT: vl1r.v v13, (a6)
+; ZIP-NEXT: vl1re16.v v13, (a6)
; ZIP-NEXT: add a6, a6, a2
; ZIP-NEXT: slli a2, a2, 3
; ZIP-NEXT: add a2, a0, a2
-; ZIP-NEXT: vl1r.v v14, (a6)
-; ZIP-NEXT: vl1r.v v15, (a1)
+; ZIP-NEXT: vl1re16.v v14, (a6)
+; ZIP-NEXT: vl1re16.v v15, (a1)
; ZIP-NEXT: add a5, a0, a5
; ZIP-NEXT: vs2r.v v20, (a5)
; ZIP-NEXT: vs4r.v v16, (a2)
; ZIP-NEXT: vs8r.v v8, (a0)
-; ZIP-NEXT: vl8r.v v16, (a2)
-; ZIP-NEXT: vl8r.v v8, (a0)
+; ZIP-NEXT: vl8re16.v v16, (a2)
+; ZIP-NEXT: vl8re16.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: .cfi_restore ra
+; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
- %res = call <vscale x 112 x i8> @llvm.vector.interleave7.nxv112i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e, <vscale x 16 x i8> %f, <vscale x 16 x i8> %g)
- ret <vscale x 112 x i8> %res
+ %res = call <vscale x 56 x half> @llvm.vector.interleave7.nxv56f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x half> %v4, <vscale x 8 x half> %v5, <vscale x 8 x half> %v6)
+ ret <vscale x 56 x half> %res
}
+define <vscale x 14 x bfloat> @vector_interleave_nxv14bf16_nxv2bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2, <vscale x 2 x bfloat> %v3, <vscale x 2 x bfloat> %v4, <vscale x 2 x bfloat> %v5, <vscale x 2 x bfloat> %v6) {
+; CHECK-LABEL: vector_interleave_nxv14bf16_nxv2bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 1
+; CHECK-NEXT: srli a1, a1, 2
+; CHECK-NEXT: add a3, a0, a2
+; CHECK-NEXT: add a4, a3, a2
+; CHECK-NEXT: add a5, a4, a2
+; CHECK-NEXT: add a6, a5, a2
+; CHECK-NEXT: vsetvli a7, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vsseg7e16.v v8, (a0)
+; CHECK-NEXT: add a7, a6, a2
+; CHECK-NEXT: vle16.v v8, (a7)
+; CHECK-NEXT: vle16.v v10, (a6)
+; CHECK-NEXT: add a6, a1, a1
+; CHECK-NEXT: add a2, a7, a2
+; CHECK-NEXT: vle16.v v12, (a5)
+; CHECK-NEXT: vsetvli zero, a6, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v10, v8, a1
+; CHECK-NEXT: vsetvli a5, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v11, (a2)
+; CHECK-NEXT: vle16.v v9, (a4)
+; CHECK-NEXT: vsetvli zero, a6, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v12, a1
+; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v12, (a3)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a6, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v12, a1
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv14bf16_nxv2bf16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a0, a0, 2
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: srli a2, a1, 1
+; ZVBB-NEXT: srli a1, a1, 2
+; ZVBB-NEXT: add a3, a0, a2
+; ZVBB-NEXT: add a4, a3, a2
+; ZVBB-NEXT: add a5, a4, a2
+; ZVBB-NEXT: add a6, a5, a2
+; ZVBB-NEXT: vsetvli a7, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vsseg7e16.v v8, (a0)
+; ZVBB-NEXT: add a7, a6, a2
+; ZVBB-NEXT: vle16.v v8, (a7)
+; ZVBB-NEXT: vle16.v v10, (a6)
+; ZVBB-NEXT: add a6, a1, a1
+; ZVBB-NEXT: add a2, a7, a2
+; ZVBB-NEXT: vle16.v v12, (a5)
+; ZVBB-NEXT: vsetvli zero, a6, e16, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v10, v8, a1
+; ZVBB-NEXT: vsetvli a5, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vle16.v v11, (a2)
+; ZVBB-NEXT: vle16.v v9, (a4)
+; ZVBB-NEXT: vsetvli zero, a6, e16, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v9, v12, a1
+; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vle16.v v12, (a3)
+; ZVBB-NEXT: vle16.v v8, (a0)
+; ZVBB-NEXT: vsetvli zero, a6, e16, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v8, v12, a1
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a0, a0, 2
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 14 x bfloat> @llvm.vector.interleave7.nxv14bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2, <vscale x 2 x bfloat> %v3, <vscale x 2 x bfloat> %v4, <vscale x 2 x bfloat> %v5, <vscale x 2 x bfloat> %v6)
+ ret <vscale x 14 x bfloat> %res
+}
-define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i16> %d, <vscale x 8 x i16> %e, <vscale x 8 x i16> %f, <vscale x 8 x i16> %g) nounwind {
+define <vscale x 28 x bfloat> @vector_interleave_nxv28bf16_nxv4bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2, <vscale x 4 x bfloat> %v3, <vscale x 4 x bfloat> %v4, <vscale x 4 x bfloat> %v5, <vscale x 4 x bfloat> %v6) {
+; CHECK-LABEL: vector_interleave_nxv28bf16_nxv4bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 3
+; CHECK-NEXT: sub a0, a1, a0
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: add a2, a0, a1
+; CHECK-NEXT: add a3, a2, a1
+; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma
+; CHECK-NEXT: vsseg7e16.v v8, (a0)
+; CHECK-NEXT: vl1re16.v v10, (a3)
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: vl1re16.v v11, (a3)
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: vl1re16.v v8, (a0)
+; CHECK-NEXT: add a0, a3, a1
+; CHECK-NEXT: vl1re16.v v9, (a2)
+; CHECK-NEXT: vl1re16.v v12, (a3)
+; CHECK-NEXT: vl1re16.v v13, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl1re16.v v14, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 3
+; CHECK-NEXT: sub a0, a1, a0
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
;
-; RV32-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZVBB-LABEL: vector_interleave_nxv28bf16_nxv4bf16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 3
+; ZVBB-NEXT: sub a0, a1, a0
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: add a2, a0, a1
+; ZVBB-NEXT: add a3, a2, a1
+; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vsseg7e16.v v8, (a0)
+; ZVBB-NEXT: vl1re16.v v10, (a3)
+; ZVBB-NEXT: add a3, a3, a1
+; ZVBB-NEXT: vl1re16.v v11, (a3)
+; ZVBB-NEXT: add a3, a3, a1
+; ZVBB-NEXT: vl1re16.v v8, (a0)
+; ZVBB-NEXT: add a0, a3, a1
+; ZVBB-NEXT: vl1re16.v v9, (a2)
+; ZVBB-NEXT: vl1re16.v v12, (a3)
+; ZVBB-NEXT: vl1re16.v v13, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl1re16.v v14, (a0)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 3
+; ZVBB-NEXT: sub a0, a1, a0
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 28 x bfloat> @llvm.vector.interleave7.nxv28bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2, <vscale x 4 x bfloat> %v3, <vscale x 4 x bfloat> %v4, <vscale x 4 x bfloat> %v5, <vscale x 4 x bfloat> %v6)
+ ret <vscale x 28 x bfloat> %res
+}
+
+define <vscale x 56 x bfloat> @vector_interleave_nxv56bf16_nxv8bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x bfloat> %v4, <vscale x 8 x bfloat> %v5, <vscale x 8 x bfloat> %v6) {
+; RV32-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: sub sp, sp, a0
@@ -3203,17 +7919,25 @@ define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16
; RV32-NEXT: vl8re16.v v16, (a2)
; RV32-NEXT: vl8re16.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
-; RV64-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; RV64-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 5
; RV64-NEXT: sub sp, sp, a0
@@ -3285,17 +8009,25 @@ define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16
; RV64-NEXT: vl8re16.v v16, (a2)
; RV64-NEXT: vl8re16.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
-; ZVBB-RV32-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZVBB-RV32-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: .cfi_offset ra, -4
+; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: slli a0, a0, 5
; ZVBB-RV32-NEXT: sub sp, sp, a0
@@ -3367,17 +8099,25 @@ define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16
; ZVBB-RV32-NEXT: vl8re16.v v16, (a2)
; ZVBB-RV32-NEXT: vl8re16.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: .cfi_restore ra
+; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
-; ZVBB-RV64-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZVBB-RV64-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: .cfi_offset ra, -8
+; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: slli a0, a0, 5
; ZVBB-RV64-NEXT: sub sp, sp, a0
@@ -3449,17 +8189,25 @@ define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16
; ZVBB-RV64-NEXT: vl8re16.v v16, (a2)
; ZVBB-RV64-NEXT: vl8re16.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: .cfi_restore ra
+; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
-; ZIP-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZIP-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: .cfi_offset ra, -8
+; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: slli a0, a0, 5
; ZIP-NEXT: sub sp, sp, a0
@@ -3531,23 +8279,195 @@ define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16
; ZIP-NEXT: vl8re16.v v16, (a2)
; ZIP-NEXT: vl8re16.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: .cfi_restore ra
+; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
- %res = call <vscale x 56 x i16> @llvm.vector.interleave7.nxv56i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i16> %d, <vscale x 8 x i16> %e, <vscale x 8 x i16> %f, <vscale x 8 x i16> %g)
- ret <vscale x 56 x i16> %res
+ %res = call <vscale x 56 x bfloat> @llvm.vector.interleave7.nxv56bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x bfloat> %v4, <vscale x 8 x bfloat> %v5, <vscale x 8 x bfloat> %v6)
+ ret <vscale x 56 x bfloat> %res
}
+define <vscale x 7 x float> @vector_interleave_nxv7f32_nxv1f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2, <vscale x 1 x float> %v3, <vscale x 1 x float> %v4, <vscale x 1 x float> %v5, <vscale x 1 x float> %v6) {
+; CHECK-LABEL: vector_interleave_nxv7f32_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 1
+; CHECK-NEXT: srli a1, a1, 3
+; CHECK-NEXT: add a3, a0, a2
+; CHECK-NEXT: add a4, a3, a2
+; CHECK-NEXT: add a5, a4, a2
+; CHECK-NEXT: add a6, a5, a2
+; CHECK-NEXT: vsetvli a7, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsseg7e32.v v8, (a0)
+; CHECK-NEXT: add a7, a6, a2
+; CHECK-NEXT: vle32.v v8, (a7)
+; CHECK-NEXT: vle32.v v10, (a6)
+; CHECK-NEXT: add a6, a1, a1
+; CHECK-NEXT: add a2, a7, a2
+; CHECK-NEXT: vle32.v v12, (a5)
+; CHECK-NEXT: vsetvli zero, a6, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v10, v8, a1
+; CHECK-NEXT: vsetvli a5, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v11, (a2)
+; CHECK-NEXT: vle32.v v9, (a4)
+; CHECK-NEXT: vsetvli zero, a6, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v12, a1
+; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v12, (a3)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli zero, a6, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vx v8, v12, a1
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: vector_interleave_nxv7f32_nxv1f32:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a0, a0, 2
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: srli a2, a1, 1
+; ZVBB-NEXT: srli a1, a1, 3
+; ZVBB-NEXT: add a3, a0, a2
+; ZVBB-NEXT: add a4, a3, a2
+; ZVBB-NEXT: add a5, a4, a2
+; ZVBB-NEXT: add a6, a5, a2
+; ZVBB-NEXT: vsetvli a7, zero, e32, mf2, ta, ma
+; ZVBB-NEXT: vsseg7e32.v v8, (a0)
+; ZVBB-NEXT: add a7, a6, a2
+; ZVBB-NEXT: vle32.v v8, (a7)
+; ZVBB-NEXT: vle32.v v10, (a6)
+; ZVBB-NEXT: add a6, a1, a1
+; ZVBB-NEXT: add a2, a7, a2
+; ZVBB-NEXT: vle32.v v12, (a5)
+; ZVBB-NEXT: vsetvli zero, a6, e32, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v10, v8, a1
+; ZVBB-NEXT: vsetvli a5, zero, e32, mf2, ta, ma
+; ZVBB-NEXT: vle32.v v11, (a2)
+; ZVBB-NEXT: vle32.v v9, (a4)
+; ZVBB-NEXT: vsetvli zero, a6, e32, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v9, v12, a1
+; ZVBB-NEXT: vsetvli a2, zero, e32, mf2, ta, ma
+; ZVBB-NEXT: vle32.v v12, (a3)
+; ZVBB-NEXT: vle32.v v8, (a0)
+; ZVBB-NEXT: vsetvli zero, a6, e32, m1, ta, ma
+; ZVBB-NEXT: vslideup.vx v8, v12, a1
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a0, a0, 2
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 7 x float> @llvm.vector.interleave7.nxv7f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2, <vscale x 1 x float> %v3, <vscale x 1 x float> %v4, <vscale x 1 x float> %v5, <vscale x 1 x float> %v6)
+ ret <vscale x 7 x float> %res
+}
-define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e, <vscale x 4 x i32> %f, <vscale x 4 x i32> %g) nounwind {
+define <vscale x 14 x float> @vector_interleave_nxv14f32_nxv2f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2, <vscale x 2 x float> %v3, <vscale x 2 x float> %v4, <vscale x 2 x float> %v5, <vscale x 2 x float> %v6) {
+; CHECK-LABEL: vector_interleave_nxv14f32_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 3
+; CHECK-NEXT: sub a0, a1, a0
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: add a2, a0, a1
+; CHECK-NEXT: add a3, a2, a1
+; CHECK-NEXT: vsetvli a4, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsseg7e32.v v8, (a0)
+; CHECK-NEXT: vl1re32.v v10, (a3)
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: vl1re32.v v11, (a3)
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: vl1re32.v v8, (a0)
+; CHECK-NEXT: add a0, a3, a1
+; CHECK-NEXT: vl1re32.v v9, (a2)
+; CHECK-NEXT: vl1re32.v v12, (a3)
+; CHECK-NEXT: vl1re32.v v13, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl1re32.v v14, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 3
+; CHECK-NEXT: sub a0, a1, a0
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
;
-; RV32-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZVBB-LABEL: vector_interleave_nxv14f32_nxv2f32:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 3
+; ZVBB-NEXT: sub a0, a1, a0
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: add a2, a0, a1
+; ZVBB-NEXT: add a3, a2, a1
+; ZVBB-NEXT: vsetvli a4, zero, e32, m1, ta, ma
+; ZVBB-NEXT: vsseg7e32.v v8, (a0)
+; ZVBB-NEXT: vl1re32.v v10, (a3)
+; ZVBB-NEXT: add a3, a3, a1
+; ZVBB-NEXT: vl1re32.v v11, (a3)
+; ZVBB-NEXT: add a3, a3, a1
+; ZVBB-NEXT: vl1re32.v v8, (a0)
+; ZVBB-NEXT: add a0, a3, a1
+; ZVBB-NEXT: vl1re32.v v9, (a2)
+; ZVBB-NEXT: vl1re32.v v12, (a3)
+; ZVBB-NEXT: vl1re32.v v13, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl1re32.v v14, (a0)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 3
+; ZVBB-NEXT: sub a0, a1, a0
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 14 x float> @llvm.vector.interleave7.nxv14f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2, <vscale x 2 x float> %v3, <vscale x 2 x float> %v4, <vscale x 2 x float> %v5, <vscale x 2 x float> %v6)
+ ret <vscale x 14 x float> %res
+}
+
+define <vscale x 28 x float> @vector_interleave_nxv28f32_nxv4f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x float> %v4, <vscale x 4 x float> %v5, <vscale x 4 x float> %v6) {
+; RV32-LABEL: vector_interleave_nxv28f32_nxv4f32:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: sub sp, sp, a0
@@ -3619,17 +8539,25 @@ define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32
; RV32-NEXT: vl8re32.v v16, (a2)
; RV32-NEXT: vl8re32.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
-; RV64-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; RV64-LABEL: vector_interleave_nxv28f32_nxv4f32:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 5
; RV64-NEXT: sub sp, sp, a0
@@ -3701,17 +8629,25 @@ define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32
; RV64-NEXT: vl8re32.v v16, (a2)
; RV64-NEXT: vl8re32.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
-; ZVBB-RV32-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZVBB-RV32-LABEL: vector_interleave_nxv28f32_nxv4f32:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: .cfi_offset ra, -4
+; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: slli a0, a0, 5
; ZVBB-RV32-NEXT: sub sp, sp, a0
@@ -3783,17 +8719,25 @@ define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32
; ZVBB-RV32-NEXT: vl8re32.v v16, (a2)
; ZVBB-RV32-NEXT: vl8re32.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: .cfi_restore ra
+; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
-; ZVBB-RV64-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZVBB-RV64-LABEL: vector_interleave_nxv28f32_nxv4f32:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: .cfi_offset ra, -8
+; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: slli a0, a0, 5
; ZVBB-RV64-NEXT: sub sp, sp, a0
@@ -3865,17 +8809,25 @@ define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32
; ZVBB-RV64-NEXT: vl8re32.v v16, (a2)
; ZVBB-RV64-NEXT: vl8re32.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: .cfi_restore ra
+; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
-; ZIP-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZIP-LABEL: vector_interleave_nxv28f32_nxv4f32:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: .cfi_offset ra, -8
+; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: slli a0, a0, 5
; ZIP-NEXT: sub sp, sp, a0
@@ -3947,22 +8899,103 @@ define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32
; ZIP-NEXT: vl8re32.v v16, (a2)
; ZIP-NEXT: vl8re32.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: .cfi_restore ra
+; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
- %res = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e, <vscale x 4 x i32> %f, <vscale x 4 x i32> %g)
- ret <vscale x 28 x i32> %res
+ %res = call <vscale x 28 x float> @llvm.vector.interleave7.nxv28f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x float> %v4, <vscale x 4 x float> %v5, <vscale x 4 x float> %v6)
+ ret <vscale x 28 x float> %res
}
-define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e, <vscale x 2 x i64> %f, <vscale x 2 x i64> %g) nounwind {
+define <vscale x 7 x double> @vector_interleave_nxv7f64_nxv1f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2, <vscale x 1 x double> %v3, <vscale x 1 x double> %v4, <vscale x 1 x double> %v5, <vscale x 1 x double> %v6) {
+; CHECK-LABEL: vector_interleave_nxv7f64_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 3
+; CHECK-NEXT: sub a0, a1, a0
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: add a2, a0, a1
+; CHECK-NEXT: add a3, a2, a1
+; CHECK-NEXT: vsetvli a4, zero, e64, m1, ta, ma
+; CHECK-NEXT: vsseg7e64.v v8, (a0)
+; CHECK-NEXT: vl1re64.v v10, (a3)
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: vl1re64.v v11, (a3)
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: vl1re64.v v8, (a0)
+; CHECK-NEXT: add a0, a3, a1
+; CHECK-NEXT: vl1re64.v v9, (a2)
+; CHECK-NEXT: vl1re64.v v12, (a3)
+; CHECK-NEXT: vl1re64.v v13, (a0)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vl1re64.v v14, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a1, a0, 3
+; CHECK-NEXT: sub a0, a1, a0
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
;
-; RV32-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZVBB-LABEL: vector_interleave_nxv7f64_nxv1f64:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: addi sp, sp, -16
+; ZVBB-NEXT: .cfi_def_cfa_offset 16
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 3
+; ZVBB-NEXT: sub a0, a1, a0
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
+; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: add a2, a0, a1
+; ZVBB-NEXT: add a3, a2, a1
+; ZVBB-NEXT: vsetvli a4, zero, e64, m1, ta, ma
+; ZVBB-NEXT: vsseg7e64.v v8, (a0)
+; ZVBB-NEXT: vl1re64.v v10, (a3)
+; ZVBB-NEXT: add a3, a3, a1
+; ZVBB-NEXT: vl1re64.v v11, (a3)
+; ZVBB-NEXT: add a3, a3, a1
+; ZVBB-NEXT: vl1re64.v v8, (a0)
+; ZVBB-NEXT: add a0, a3, a1
+; ZVBB-NEXT: vl1re64.v v9, (a2)
+; ZVBB-NEXT: vl1re64.v v12, (a3)
+; ZVBB-NEXT: vl1re64.v v13, (a0)
+; ZVBB-NEXT: add a0, a0, a1
+; ZVBB-NEXT: vl1re64.v v14, (a0)
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: slli a1, a0, 3
+; ZVBB-NEXT: sub a0, a1, a0
+; ZVBB-NEXT: add sp, sp, a0
+; ZVBB-NEXT: .cfi_def_cfa sp, 16
+; ZVBB-NEXT: addi sp, sp, 16
+; ZVBB-NEXT: .cfi_def_cfa_offset 0
+; ZVBB-NEXT: ret
+ %res = call <vscale x 7 x double> @llvm.vector.interleave7.nxv7f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2, <vscale x 1 x double> %v3, <vscale x 1 x double> %v4, <vscale x 1 x double> %v5, <vscale x 1 x double> %v6)
+ ret <vscale x 7 x double> %res
+}
+
+define <vscale x 14 x double> @vector_interleave_nxv14f64_nxv2f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4, <vscale x 2 x double> %v5, <vscale x 2 x double> %v6) {
+; RV32-LABEL: vector_interleave_nxv14f64_nxv2f64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
+; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: sub sp, sp, a0
@@ -4034,17 +9067,25 @@ define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64
; RV32-NEXT: vl8re64.v v16, (a2)
; RV32-NEXT: vl8re64.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
+; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
-; RV64-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; RV64-LABEL: vector_interleave_nxv14f64_nxv2f64:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
+; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 5
; RV64-NEXT: sub sp, sp, a0
@@ -4116,17 +9157,25 @@ define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64
; RV64-NEXT: vl8re64.v v16, (a2)
; RV64-NEXT: vl8re64.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
+; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
-; ZVBB-RV32-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZVBB-RV32-LABEL: vector_interleave_nxv14f64_nxv2f64:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; ZVBB-RV32-NEXT: .cfi_offset ra, -4
+; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: slli a0, a0, 5
; ZVBB-RV32-NEXT: sub sp, sp, a0
@@ -4198,17 +9247,25 @@ define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64
; ZVBB-RV32-NEXT: vl8re64.v v16, (a2)
; ZVBB-RV32-NEXT: vl8re64.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
+; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; ZVBB-RV32-NEXT: .cfi_restore ra
+; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
+; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
-; ZVBB-RV64-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZVBB-RV64-LABEL: vector_interleave_nxv14f64_nxv2f64:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZVBB-RV64-NEXT: .cfi_offset ra, -8
+; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: slli a0, a0, 5
; ZVBB-RV64-NEXT: sub sp, sp, a0
@@ -4280,17 +9337,25 @@ define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64
; ZVBB-RV64-NEXT: vl8re64.v v16, (a2)
; ZVBB-RV64-NEXT: vl8re64.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
+; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZVBB-RV64-NEXT: .cfi_restore ra
+; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
+; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
-; ZIP-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZIP-LABEL: vector_interleave_nxv14f64_nxv2f64:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: .cfi_offset ra, -8
+; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: slli a0, a0, 5
; ZIP-NEXT: sub sp, sp, a0
@@ -4362,10 +9427,14 @@ define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64
; ZIP-NEXT: vl8re64.v v16, (a2)
; ZIP-NEXT: vl8re64.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: .cfi_restore ra
+; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
- %res = call <vscale x 14 x i64> @llvm.vector.interleave7.nxv14i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e, <vscale x 2 x i64> %f, <vscale x 2 x i64> %g)
- ret <vscale x 14 x i64> %res
+ %res = call <vscale x 14 x double> @llvm.vector.interleave7.nxv14f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4, <vscale x 2 x double> %v5, <vscale x 2 x double> %v6)
+ ret <vscale x 14 x double> %res
}
>From e22335285bce7e6cbb959425b9d625118af1b440 Mon Sep 17 00:00:00 2001
From: Min-Yih Hsu <min.hsu at sifive.com>
Date: Thu, 24 Apr 2025 15:41:37 -0700
Subject: [PATCH 2/2] fixup! Add nounwind
---
.../CodeGen/RISCV/rvv/vector-deinterleave.ll | 198 +-----
.../CodeGen/RISCV/rvv/vector-interleave.ll | 586 +-----------------
2 files changed, 66 insertions(+), 718 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index bb4e1f58588f8..e316c022727ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -1315,15 +1315,13 @@ define {<vscale x 8 x double>, <vscale x 8 x double>} @vector_deinterleave_nxv8f
ret {<vscale x 8 x double>, <vscale x 8 x double>} %retval
}
-define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv6f16(<vscale x 6 x half> %arg) {
+define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv6f16(<vscale x 6 x half> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv6f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1336,23 +1334,19 @@ define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @vector_d
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @llvm.vector.deinterleave3.nxv6f16(<vscale x 6 x half> %arg)
ret {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} %res
}
-define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_nxv4f16_nxv12f16(<vscale x 12 x half> %arg) {
+define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_nxv4f16_nxv12f16(<vscale x 12 x half> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv4f16_nxv12f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs4r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1360,23 +1354,19 @@ define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @vector_d
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @llvm.vector.deinterleave3.nxv12f16(<vscale x 12 x half> %arg)
ret {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} %res
}
-define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_nxv8f16_nxv24f16(<vscale x 24 x half> %arg) {
+define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_nxv8f16_nxv24f16(<vscale x 24 x half> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv8f16_nxv24f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
@@ -1384,23 +1374,19 @@ define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @vector_d
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @llvm.vector.deinterleave3.nxv24f16(<vscale x 24 x half> %arg)
ret {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} %res
}
-define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @vector_deinterleave_nxv2bf16_nxv6bf16(<vscale x 6 x bfloat> %arg) {
+define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @vector_deinterleave_nxv2bf16_nxv6bf16(<vscale x 6 x bfloat> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv2bf16_nxv6bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1413,23 +1399,19 @@ define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @ve
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @llvm.vector.deinterleave3.nxv6bf16(<vscale x 6 x bfloat> %arg)
ret {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} %res
}
-define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @vector_deinterleave_nxv4bf16_nxv12bf16(<vscale x 12 x bfloat> %arg) {
+define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @vector_deinterleave_nxv4bf16_nxv12bf16(<vscale x 12 x bfloat> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv4bf16_nxv12bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs4r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1437,23 +1419,19 @@ define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @ve
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @llvm.vector.deinterleave3.nxv12bf16(<vscale x 12 x bfloat> %arg)
ret {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} %res
}
-define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8bf16_nxv24bf16(<vscale x 24 x bfloat> %arg) {
+define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8bf16_nxv24bf16(<vscale x 24 x bfloat> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv8bf16_nxv24bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
@@ -1461,23 +1439,19 @@ define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @ve
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.vector.deinterleave3.nxv24bf16(<vscale x 24 x bfloat> %arg)
ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %res
}
-define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @vector_deinterleave_nxv1f32_nxv3f32(<vscale x 3 x float> %arg) {
+define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @vector_deinterleave_nxv1f32_nxv3f32(<vscale x 3 x float> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv1f32_nxv3f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1490,23 +1464,19 @@ define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @vecto
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @llvm.vector.deinterleave3.nxv3f32(<vscale x 3 x float> %arg)
ret {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} %res
}
-define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_nxv2f32_nxv6f32(<vscale x 6 x float> %arg) {
+define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_nxv2f32_nxv6f32(<vscale x 6 x float> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv2f32_nxv6f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs4r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1514,23 +1484,19 @@ define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @vecto
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @llvm.vector.deinterleave3.nxv6f32(<vscale x 6 x float> %arg)
ret {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} %res
}
-define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @vector_deinterleave_nxv4f32_nxv12f32(<vscale x 12 x float> %arg) {
+define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @vector_deinterleave_nxv4f32_nxv12f32(<vscale x 12 x float> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv4f32_nxv12f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
@@ -1538,23 +1504,19 @@ define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @vecto
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.vector.deinterleave3.nxv12f32(<vscale x 12 x float> %arg)
ret {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res
}
-define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @vector_deinterleave_nxv1f64_nxv3f64(<vscale x 3 x double> %arg) {
+define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @vector_deinterleave_nxv1f64_nxv3f64(<vscale x 3 x double> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv1f64_nxv3f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs4r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
@@ -1562,23 +1524,19 @@ define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @ve
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @llvm.vector.deinterleave3.nxv3f64(<vscale x 3 x double> %arg)
ret {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} %res
}
-define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @vector_deinterleave_nxv2f64_nxv6f64(<vscale x 6 x double> %arg) {
+define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @vector_deinterleave_nxv2f64_nxv6f64(<vscale x 6 x double> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv2f64_nxv6f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
@@ -1586,23 +1544,19 @@ define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @ve
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.vector.deinterleave3.nxv6f64(<vscale x 6 x double> %arg)
ret {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} %res
}
-define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv10f16(<vscale x 10 x half> %arg) {
+define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv10f16(<vscale x 10 x half> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv10f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1617,23 +1571,19 @@ define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @llvm.vector.deinterleave5.nxv10f16(<vscale x 10 x half> %arg)
ret {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} %res
}
-define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_nxv4f16_nxv20f16(<vscale x 20 x half> %arg) {
+define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_nxv4f16_nxv20f16(<vscale x 20 x half> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv4f16_nxv20f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1641,23 +1591,19 @@ define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @llvm.vector.deinterleave5.nxv20f16(<vscale x 20 x half> %arg)
ret {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} %res
}
-define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_nxv8f16_nxv40f16(<vscale x 40 x half> %arg) {
+define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_nxv8f16_nxv40f16(<vscale x 40 x half> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv8f16_nxv40f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv1r.v v26, v15
; CHECK-NEXT: vmv1r.v v27, v16
@@ -1685,23 +1631,19 @@ define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @llvm.vector.deinterleave5.nxv40f16(<vscale x 40 x half> %arg)
ret {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} %res
}
-define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @vector_deinterleave_nxv2bf16_nxv10bf16(<vscale x 10 x bfloat> %arg) {
+define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @vector_deinterleave_nxv2bf16_nxv10bf16(<vscale x 10 x bfloat> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv2bf16_nxv10bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1716,23 +1658,19 @@ define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @llvm.vector.deinterleave5.nxv10bf16(<vscale x 10 x bfloat> %arg)
ret {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} %res
}
-define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @vector_deinterleave_nxv4bf16_nxv20bf16(<vscale x 20 x bfloat> %arg) {
+define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @vector_deinterleave_nxv4bf16_nxv20bf16(<vscale x 20 x bfloat> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv4bf16_nxv20bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1740,23 +1678,19 @@ define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @llvm.vector.deinterleave5.nxv20bf16(<vscale x 20 x bfloat> %arg)
ret {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} %res
}
-define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8bf16_nxv40bf16(<vscale x 40 x bfloat> %arg) {
+define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8bf16_nxv40bf16(<vscale x 40 x bfloat> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv8bf16_nxv40bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv1r.v v26, v15
; CHECK-NEXT: vmv1r.v v27, v16
@@ -1784,23 +1718,19 @@ define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.vector.deinterleave5.nxv40bf16(<vscale x 40 x bfloat> %arg)
ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %res
}
-define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @vector_deinterleave_nxv1f32_nxv5f32(<vscale x 5 x float> %arg) {
+define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @vector_deinterleave_nxv1f32_nxv5f32(<vscale x 5 x float> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv1f32_nxv5f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1815,23 +1745,19 @@ define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscal
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @llvm.vector.deinterleave5.nxv5f32(<vscale x 5 x float> %arg)
ret {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} %res
}
-define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_nxv2f32_nxv10f32(<vscale x 10 x float> %arg) {
+define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_nxv2f32_nxv10f32(<vscale x 10 x float> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv2f32_nxv10f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -1839,23 +1765,19 @@ define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscal
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @llvm.vector.deinterleave5.nxv10f32(<vscale x 10 x float> %arg)
ret {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} %res
}
-define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @vector_deinterleave_nxv4f32_nxv20f32(<vscale x 20 x float> %arg) {
+define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @vector_deinterleave_nxv4f32_nxv20f32(<vscale x 20 x float> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv4f32_nxv20f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vmv1r.v v26, v15
; CHECK-NEXT: vmv1r.v v27, v16
@@ -1883,23 +1805,19 @@ define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscal
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.vector.deinterleave5.nxv20f32(<vscale x 20 x float> %arg)
ret {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res
}
-define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @vector_deinterleave_nxv1f64_nxv5f64(<vscale x 5 x double> %arg) {
+define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @vector_deinterleave_nxv1f64_nxv5f64(<vscale x 5 x double> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv1f64_nxv5f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
@@ -1907,23 +1825,19 @@ define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @llvm.vector.deinterleave5.nxv5f64(<vscale x 5 x double> %arg)
ret {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} %res
}
-define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @vector_deinterleave_nxv2f64_nxv10f64(<vscale x 10 x double> %arg) {
+define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @vector_deinterleave_nxv2f64_nxv10f64(<vscale x 10 x double> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv2f64_nxv10f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vmv1r.v v26, v15
; CHECK-NEXT: vmv1r.v v27, v16
@@ -1951,23 +1865,19 @@ define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.vector.deinterleave5.nxv10f64(<vscale x 10 x double> %arg)
ret {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} %res
}
-define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv14f16(<vscale x 14 x half> %arg) {
+define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv14f16(<vscale x 14 x half> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv14f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -1984,23 +1894,19 @@ define {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} @llvm.vector.deinterleave7.nxv14f16(<vscale x 14 x half> %arg)
ret {<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>} %res
}
-define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_nxv4f16_nxv28f16(<vscale x 28 x half> %arg) {
+define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_nxv4f16_nxv28f16(<vscale x 28 x half> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv4f16_nxv28f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -2008,23 +1914,19 @@ define {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} @llvm.vector.deinterleave7.nxv28f16(<vscale x 28 x half> %arg)
ret {<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>} %res
}
-define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_nxv8f16_nxv56f16(<vscale x 56 x half> %arg) {
+define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_nxv8f16_nxv56f16(<vscale x 56 x half> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv8f16_nxv56f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv1r.v v30, v21
; CHECK-NEXT: vmv1r.v v28, v19
@@ -2058,23 +1960,19 @@ define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @llvm.vector.deinterleave7.nxv56f16(<vscale x 56 x half> %arg)
ret {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} %res
}
-define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @vector_deinterleave_nxv2bf16_nxv14bf16(<vscale x 14 x bfloat> %arg) {
+define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @vector_deinterleave_nxv2bf16_nxv14bf16(<vscale x 14 x bfloat> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv2bf16_nxv14bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -2091,23 +1989,19 @@ define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @llvm.vector.deinterleave7.nxv14bf16(<vscale x 14 x bfloat> %arg)
ret {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x bfloat>} %res
}
-define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @vector_deinterleave_nxv4bf16_nxv28bf16(<vscale x 28 x bfloat> %arg) {
+define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @vector_deinterleave_nxv4bf16_nxv28bf16(<vscale x 28 x bfloat> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv4bf16_nxv28bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
@@ -2115,23 +2009,19 @@ define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @llvm.vector.deinterleave7.nxv28bf16(<vscale x 28 x bfloat> %arg)
ret {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x bfloat>} %res
}
-define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8bf16_nxv56bf16(<vscale x 56 x bfloat> %arg) {
+define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8bf16_nxv56bf16(<vscale x 56 x bfloat> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv8bf16_nxv56bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv1r.v v30, v21
; CHECK-NEXT: vmv1r.v v28, v19
@@ -2165,23 +2055,19 @@ define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.vector.deinterleave7.nxv56bf16(<vscale x 56 x bfloat> %arg)
ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %res
}
-define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @vector_deinterleave_nxv1f32_nxv7f32(<vscale x 7 x float> %arg) {
+define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @vector_deinterleave_nxv1f32_nxv7f32(<vscale x 7 x float> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv1f32_nxv7f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -2198,23 +2084,19 @@ define {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscal
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} @llvm.vector.deinterleave7.nxv7f32(<vscale x 7 x float> %arg)
ret {<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>} %res
}
-define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_nxv2f32_nxv14f32(<vscale x 14 x float> %arg) {
+define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_nxv2f32_nxv14f32(<vscale x 14 x float> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv2f32_nxv14f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
@@ -2222,23 +2104,19 @@ define {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscal
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} @llvm.vector.deinterleave7.nxv14f32(<vscale x 14 x float> %arg)
ret {<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>} %res
}
-define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @vector_deinterleave_nxv4f32_nxv28f32(<vscale x 28 x float> %arg) {
+define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @vector_deinterleave_nxv4f32_nxv28f32(<vscale x 28 x float> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv4f32_nxv28f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vmv1r.v v30, v21
; CHECK-NEXT: vmv1r.v v28, v19
@@ -2272,23 +2150,19 @@ define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscal
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.vector.deinterleave7.nxv28f32(<vscale x 28 x float> %arg)
ret {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res
}
-define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @vector_deinterleave_nxv1f64_nxv7f64(<vscale x 7 x double> %arg) {
+define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @vector_deinterleave_nxv1f64_nxv7f64(<vscale x 7 x double> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv1f64_nxv7f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
@@ -2296,23 +2170,19 @@ define {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} @llvm.vector.deinterleave7.nxv7f64(<vscale x 7 x double> %arg)
ret {<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>} %res
}
-define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @vector_deinterleave_nxv2f64_nxv14f64(<vscale x 14 x double> %arg) {
+define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @vector_deinterleave_nxv2f64_nxv14f64(<vscale x 14 x double> %arg) nounwind {
; CHECK-LABEL: vector_deinterleave_nxv2f64_nxv14f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vmv1r.v v30, v21
; CHECK-NEXT: vmv1r.v v28, v19
@@ -2346,9 +2216,7 @@ define {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vs
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
%res = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.vector.deinterleave7.nxv14f64(<vscale x 14 x double> %arg)
ret {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} %res
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index 5f73855c71453..7347000bf5c71 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -4369,15 +4369,13 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x
ret <vscale x 16 x double> %res
}
-define <vscale x 6 x half> @vector_interleave_nxv6f16_nxv2f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2) {
+define <vscale x 6 x half> @vector_interleave_nxv6f16_nxv2f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2) nounwind {
; CHECK-LABEL: vector_interleave_nxv6f16_nxv2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 1
@@ -4396,19 +4394,15 @@ define <vscale x 6 x half> @vector_interleave_nxv6f16_nxv2f16(<vscale x 2 x half
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv6f16_nxv2f16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 1
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: srli a2, a1, 1
@@ -4427,24 +4421,20 @@ define <vscale x 6 x half> @vector_interleave_nxv6f16_nxv2f16(<vscale x 2 x half
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 1
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 6 x half> @llvm.vector.interleave3.nxv6f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2)
ret <vscale x 6 x half> %res
}
-define <vscale x 12 x half> @vector_interleave_nxv12f16_nxv4f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2) {
+define <vscale x 12 x half> @vector_interleave_nxv12f16_nxv4f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2) nounwind {
; CHECK-LABEL: vector_interleave_nxv12f16_nxv4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
@@ -4458,20 +4448,16 @@ define <vscale x 12 x half> @vector_interleave_nxv12f16_nxv4f16(<vscale x 4 x ha
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv12f16_nxv4f16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 1
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma
@@ -4485,24 +4471,20 @@ define <vscale x 12 x half> @vector_interleave_nxv12f16_nxv4f16(<vscale x 4 x ha
; ZVBB-NEXT: slli a1, a0, 1
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 12 x half> @llvm.vector.interleave3.nxv12f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2)
ret <vscale x 12 x half> %res
}
-define <vscale x 24 x half> @vector_interleave_nxv24f16_nxv8f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2) {
+define <vscale x 24 x half> @vector_interleave_nxv24f16_nxv8f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2) nounwind {
; CHECK-LABEL: vector_interleave_nxv24f16_nxv8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 6
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 1
@@ -4517,20 +4499,16 @@ define <vscale x 24 x half> @vector_interleave_nxv24f16_nxv8f16(<vscale x 8 x ha
; CHECK-NEXT: li a1, 6
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv24f16_nxv8f16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: li a1, 6
; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: slli a1, a1, 1
@@ -4545,23 +4523,19 @@ define <vscale x 24 x half> @vector_interleave_nxv24f16_nxv8f16(<vscale x 8 x ha
; ZVBB-NEXT: li a1, 6
; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 24 x half> @llvm.vector.interleave3.nxv24f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2)
ret <vscale x 24 x half> %res
}
-define <vscale x 6 x bfloat> @vector_interleave_nxv6bf16_nxv2bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2) {
+define <vscale x 6 x bfloat> @vector_interleave_nxv6bf16_nxv2bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2) nounwind {
; CHECK-LABEL: vector_interleave_nxv6bf16_nxv2bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 1
@@ -4580,19 +4554,15 @@ define <vscale x 6 x bfloat> @vector_interleave_nxv6bf16_nxv2bf16(<vscale x 2 x
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv6bf16_nxv2bf16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 1
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: srli a2, a1, 1
@@ -4611,24 +4581,20 @@ define <vscale x 6 x bfloat> @vector_interleave_nxv6bf16_nxv2bf16(<vscale x 2 x
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 1
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 6 x bfloat> @llvm.vector.interleave3.nxv6bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2)
ret <vscale x 6 x bfloat> %res
}
-define <vscale x 12 x bfloat> @vector_interleave_nxv12bf16_nxv4bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2) {
+define <vscale x 12 x bfloat> @vector_interleave_nxv12bf16_nxv4bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2) nounwind {
; CHECK-LABEL: vector_interleave_nxv12bf16_nxv4bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
@@ -4642,20 +4608,16 @@ define <vscale x 12 x bfloat> @vector_interleave_nxv12bf16_nxv4bf16(<vscale x 4
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv12bf16_nxv4bf16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 1
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma
@@ -4669,24 +4631,20 @@ define <vscale x 12 x bfloat> @vector_interleave_nxv12bf16_nxv4bf16(<vscale x 4
; ZVBB-NEXT: slli a1, a0, 1
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 12 x bfloat> @llvm.vector.interleave3.nxv12bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2)
ret <vscale x 12 x bfloat> %res
}
-define <vscale x 24 x bfloat> @vector_interleave_nxv24bf16_nxv8bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2) {
+define <vscale x 24 x bfloat> @vector_interleave_nxv24bf16_nxv8bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2) nounwind {
; CHECK-LABEL: vector_interleave_nxv24bf16_nxv8bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 6
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 1
@@ -4701,20 +4659,16 @@ define <vscale x 24 x bfloat> @vector_interleave_nxv24bf16_nxv8bf16(<vscale x 8
; CHECK-NEXT: li a1, 6
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv24bf16_nxv8bf16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: li a1, 6
; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: slli a1, a1, 1
@@ -4729,23 +4683,19 @@ define <vscale x 24 x bfloat> @vector_interleave_nxv24bf16_nxv8bf16(<vscale x 8
; ZVBB-NEXT: li a1, 6
; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 24 x bfloat> @llvm.vector.interleave3.nxv24bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2)
ret <vscale x 24 x bfloat> %res
}
-define <vscale x 3 x float> @vector_interleave_nxv3f32_nxv1f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2) {
+define <vscale x 3 x float> @vector_interleave_nxv3f32_nxv1f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2) nounwind {
; CHECK-LABEL: vector_interleave_nxv3f32_nxv1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 1
@@ -4764,19 +4714,15 @@ define <vscale x 3 x float> @vector_interleave_nxv3f32_nxv1f32(<vscale x 1 x flo
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv3f32_nxv1f32:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 1
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: srli a2, a1, 1
@@ -4795,24 +4741,20 @@ define <vscale x 3 x float> @vector_interleave_nxv3f32_nxv1f32(<vscale x 1 x flo
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 1
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 3 x float> @llvm.vector.interleave3.nxv3f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2)
ret <vscale x 3 x float> %res
}
-define <vscale x 6 x float> @vector_interleave_nxv6f32_nxv2f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2) {
+define <vscale x 6 x float> @vector_interleave_nxv6f32_nxv2f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2) nounwind {
; CHECK-LABEL: vector_interleave_nxv6f32_nxv2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
@@ -4826,20 +4768,16 @@ define <vscale x 6 x float> @vector_interleave_nxv6f32_nxv2f32(<vscale x 2 x flo
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv6f32_nxv2f32:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 1
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: vsetvli a2, zero, e32, m1, ta, ma
@@ -4853,24 +4791,20 @@ define <vscale x 6 x float> @vector_interleave_nxv6f32_nxv2f32(<vscale x 2 x flo
; ZVBB-NEXT: slli a1, a0, 1
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 6 x float> @llvm.vector.interleave3.nxv6f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2)
ret <vscale x 6 x float> %res
}
-define <vscale x 12 x float> @vector_interleave_nxv12f32_nxv4f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2) {
+define <vscale x 12 x float> @vector_interleave_nxv12f32_nxv4f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2) nounwind {
; CHECK-LABEL: vector_interleave_nxv12f32_nxv4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 6
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 1
@@ -4885,20 +4819,16 @@ define <vscale x 12 x float> @vector_interleave_nxv12f32_nxv4f32(<vscale x 4 x f
; CHECK-NEXT: li a1, 6
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv12f32_nxv4f32:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: li a1, 6
; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: slli a1, a1, 1
@@ -4913,24 +4843,20 @@ define <vscale x 12 x float> @vector_interleave_nxv12f32_nxv4f32(<vscale x 4 x f
; ZVBB-NEXT: li a1, 6
; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 12 x float> @llvm.vector.interleave3.nxv12f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2)
ret <vscale x 12 x float> %res
}
-define <vscale x 3 x double> @vector_interleave_nxv3f64_nxv1f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2) {
+define <vscale x 3 x double> @vector_interleave_nxv3f64_nxv1f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2) nounwind {
; CHECK-LABEL: vector_interleave_nxv3f64_nxv1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
@@ -4944,20 +4870,16 @@ define <vscale x 3 x double> @vector_interleave_nxv3f64_nxv1f64(<vscale x 1 x do
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv3f64_nxv1f64:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 1
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: vsetvli a2, zero, e64, m1, ta, ma
@@ -4971,24 +4893,20 @@ define <vscale x 3 x double> @vector_interleave_nxv3f64_nxv1f64(<vscale x 1 x do
; ZVBB-NEXT: slli a1, a0, 1
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 3 x double> @llvm.vector.interleave3.nxv3f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2)
ret <vscale x 3 x double> %res
}
-define <vscale x 6 x double> @vector_interleave_nxv6f64_nxv2f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2) {
+define <vscale x 6 x double> @vector_interleave_nxv6f64_nxv2f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2) nounwind {
; CHECK-LABEL: vector_interleave_nxv6f64_nxv2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 6
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 1
@@ -5003,20 +4921,16 @@ define <vscale x 6 x double> @vector_interleave_nxv6f64_nxv2f64(<vscale x 2 x do
; CHECK-NEXT: li a1, 6
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv6f64_nxv2f64:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: li a1, 6
; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 6 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: slli a1, a1, 1
@@ -5031,24 +4945,20 @@ define <vscale x 6 x double> @vector_interleave_nxv6f64_nxv2f64(<vscale x 2 x do
; ZVBB-NEXT: li a1, 6
; ZVBB-NEXT: mul a0, a0, a1
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 6 x double> @llvm.vector.interleave3.nxv6f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2)
ret <vscale x 6 x double> %res
}
-define <vscale x 10 x half> @vector_interleave_nxv10f16_nxv2f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2, <vscale x 2 x half> %v3, <vscale x 2 x half> %v4) {
+define <vscale x 10 x half> @vector_interleave_nxv10f16_nxv2f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2, <vscale x 2 x half> %v3, <vscale x 2 x half> %v4) nounwind {
; CHECK-LABEL: vector_interleave_nxv10f16_nxv2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 1
@@ -5075,20 +4985,16 @@ define <vscale x 10 x half> @vector_interleave_nxv10f16_nxv2f16(<vscale x 2 x ha
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv10f16_nxv2f16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 1
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: srli a2, a1, 1
@@ -5115,24 +5021,20 @@ define <vscale x 10 x half> @vector_interleave_nxv10f16_nxv2f16(<vscale x 2 x ha
; ZVBB-NEXT: slli a1, a0, 1
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 10 x half> @llvm.vector.interleave5.nxv10f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2, <vscale x 2 x half> %v3, <vscale x 2 x half> %v4)
ret <vscale x 10 x half> %res
}
-define <vscale x 20 x half> @vector_interleave_nxv20f16_nxv4f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2, <vscale x 4 x half> %v3, <vscale x 4 x half> %v4) {
+define <vscale x 20 x half> @vector_interleave_nxv20f16_nxv4f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2, <vscale x 4 x half> %v3, <vscale x 4 x half> %v4) nounwind {
; CHECK-LABEL: vector_interleave_nxv20f16_nxv4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 2
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: add a2, a0, a1
@@ -5150,20 +5052,16 @@ define <vscale x 20 x half> @vector_interleave_nxv20f16_nxv4f16(<vscale x 4 x ha
; CHECK-NEXT: slli a1, a0, 2
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv20f16_nxv4f16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 2
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: add a2, a0, a1
@@ -5181,25 +5079,19 @@ define <vscale x 20 x half> @vector_interleave_nxv20f16_nxv4f16(<vscale x 4 x ha
; ZVBB-NEXT: slli a1, a0, 2
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 20 x half> @llvm.vector.interleave5.nxv20f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2, <vscale x 4 x half> %v3, <vscale x 4 x half> %v4)
ret <vscale x 20 x half> %res
}
-define <vscale x 40 x half> @vector_interleave_nxv40f16_nxv8f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x half> %v4) {
+define <vscale x 40 x half> @vector_interleave_nxv40f16_nxv8f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x half> %v4) nounwind {
; RV32-LABEL: vector_interleave_nxv40f16_nxv8f16:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
-; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
-; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 28
; RV32-NEXT: mul a0, a0, a1
@@ -5255,25 +5147,17 @@ define <vscale x 40 x half> @vector_interleave_nxv40f16_nxv8f16(<vscale x 8 x ha
; RV32-NEXT: vl8re16.v v16, (a2)
; RV32-NEXT: vl8re16.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
-; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; RV32-NEXT: .cfi_restore ra
-; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
-; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vector_interleave_nxv40f16_nxv8f16:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
-; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
-; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 28
; RV64-NEXT: mul a0, a0, a1
@@ -5329,25 +5213,17 @@ define <vscale x 40 x half> @vector_interleave_nxv40f16_nxv8f16(<vscale x 8 x ha
; RV64-NEXT: vl8re16.v v16, (a2)
; RV64-NEXT: vl8re16.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
-; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: .cfi_restore ra
-; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
-; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
; ZVBB-RV32-LABEL: vector_interleave_nxv40f16_nxv8f16:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; ZVBB-RV32-NEXT: .cfi_offset ra, -4
-; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: li a1, 28
; ZVBB-RV32-NEXT: mul a0, a0, a1
@@ -5403,25 +5279,17 @@ define <vscale x 40 x half> @vector_interleave_nxv40f16_nxv8f16(<vscale x 8 x ha
; ZVBB-RV32-NEXT: vl8re16.v v16, (a2)
; ZVBB-RV32-NEXT: vl8re16.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; ZVBB-RV32-NEXT: .cfi_restore ra
-; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
; ZVBB-RV64-LABEL: vector_interleave_nxv40f16_nxv8f16:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZVBB-RV64-NEXT: .cfi_offset ra, -8
-; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: li a1, 28
; ZVBB-RV64-NEXT: mul a0, a0, a1
@@ -5477,25 +5345,17 @@ define <vscale x 40 x half> @vector_interleave_nxv40f16_nxv8f16(<vscale x 8 x ha
; ZVBB-RV64-NEXT: vl8re16.v v16, (a2)
; ZVBB-RV64-NEXT: vl8re16.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZVBB-RV64-NEXT: .cfi_restore ra
-; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
; ZIP-LABEL: vector_interleave_nxv40f16_nxv8f16:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
-; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZIP-NEXT: .cfi_offset ra, -8
-; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
-; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: li a1, 28
; ZIP-NEXT: mul a0, a0, a1
@@ -5551,28 +5411,22 @@ define <vscale x 40 x half> @vector_interleave_nxv40f16_nxv8f16(<vscale x 8 x ha
; ZIP-NEXT: vl8re16.v v16, (a2)
; ZIP-NEXT: vl8re16.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
-; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZIP-NEXT: .cfi_restore ra
-; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
-; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
%res = call <vscale x 40 x half> @llvm.vector.interleave5.nxv40f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x half> %v4)
ret <vscale x 40 x half> %res
}
-define <vscale x 10 x bfloat> @vector_interleave_nxv10bf16_nxv2bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2, <vscale x 2 x bfloat> %v3, <vscale x 2 x bfloat> %v4) {
+define <vscale x 10 x bfloat> @vector_interleave_nxv10bf16_nxv2bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2, <vscale x 2 x bfloat> %v3, <vscale x 2 x bfloat> %v4) nounwind {
; CHECK-LABEL: vector_interleave_nxv10bf16_nxv2bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 1
@@ -5599,20 +5453,16 @@ define <vscale x 10 x bfloat> @vector_interleave_nxv10bf16_nxv2bf16(<vscale x 2
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv10bf16_nxv2bf16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 1
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: srli a2, a1, 1
@@ -5639,24 +5489,20 @@ define <vscale x 10 x bfloat> @vector_interleave_nxv10bf16_nxv2bf16(<vscale x 2
; ZVBB-NEXT: slli a1, a0, 1
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 10 x bfloat> @llvm.vector.interleave5.nxv10bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2, <vscale x 2 x bfloat> %v3, <vscale x 2 x bfloat> %v4)
ret <vscale x 10 x bfloat> %res
}
-define <vscale x 20 x bfloat> @vector_interleave_nxv20bf16_nxv4bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2, <vscale x 4 x bfloat> %v3, <vscale x 4 x bfloat> %v4) {
+define <vscale x 20 x bfloat> @vector_interleave_nxv20bf16_nxv4bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2, <vscale x 4 x bfloat> %v3, <vscale x 4 x bfloat> %v4) nounwind {
; CHECK-LABEL: vector_interleave_nxv20bf16_nxv4bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 2
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: add a2, a0, a1
@@ -5674,20 +5520,16 @@ define <vscale x 20 x bfloat> @vector_interleave_nxv20bf16_nxv4bf16(<vscale x 4
; CHECK-NEXT: slli a1, a0, 2
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv20bf16_nxv4bf16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 2
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: add a2, a0, a1
@@ -5705,25 +5547,19 @@ define <vscale x 20 x bfloat> @vector_interleave_nxv20bf16_nxv4bf16(<vscale x 4
; ZVBB-NEXT: slli a1, a0, 2
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 20 x bfloat> @llvm.vector.interleave5.nxv20bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2, <vscale x 4 x bfloat> %v3, <vscale x 4 x bfloat> %v4)
ret <vscale x 20 x bfloat> %res
}
-define <vscale x 40 x bfloat> @vector_interleave_nxv40bf16_nxv8bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x bfloat> %v4) {
+define <vscale x 40 x bfloat> @vector_interleave_nxv40bf16_nxv8bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x bfloat> %v4) nounwind {
; RV32-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
-; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
-; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 28
; RV32-NEXT: mul a0, a0, a1
@@ -5779,25 +5615,17 @@ define <vscale x 40 x bfloat> @vector_interleave_nxv40bf16_nxv8bf16(<vscale x 8
; RV32-NEXT: vl8re16.v v16, (a2)
; RV32-NEXT: vl8re16.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
-; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; RV32-NEXT: .cfi_restore ra
-; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
-; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
-; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
-; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 28
; RV64-NEXT: mul a0, a0, a1
@@ -5853,25 +5681,17 @@ define <vscale x 40 x bfloat> @vector_interleave_nxv40bf16_nxv8bf16(<vscale x 8
; RV64-NEXT: vl8re16.v v16, (a2)
; RV64-NEXT: vl8re16.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
-; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: .cfi_restore ra
-; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
-; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
; ZVBB-RV32-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; ZVBB-RV32-NEXT: .cfi_offset ra, -4
-; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: li a1, 28
; ZVBB-RV32-NEXT: mul a0, a0, a1
@@ -5927,25 +5747,17 @@ define <vscale x 40 x bfloat> @vector_interleave_nxv40bf16_nxv8bf16(<vscale x 8
; ZVBB-RV32-NEXT: vl8re16.v v16, (a2)
; ZVBB-RV32-NEXT: vl8re16.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; ZVBB-RV32-NEXT: .cfi_restore ra
-; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
; ZVBB-RV64-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZVBB-RV64-NEXT: .cfi_offset ra, -8
-; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: li a1, 28
; ZVBB-RV64-NEXT: mul a0, a0, a1
@@ -6001,25 +5813,17 @@ define <vscale x 40 x bfloat> @vector_interleave_nxv40bf16_nxv8bf16(<vscale x 8
; ZVBB-RV64-NEXT: vl8re16.v v16, (a2)
; ZVBB-RV64-NEXT: vl8re16.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZVBB-RV64-NEXT: .cfi_restore ra
-; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
; ZIP-LABEL: vector_interleave_nxv40bf16_nxv8bf16:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
-; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZIP-NEXT: .cfi_offset ra, -8
-; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
-; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: li a1, 28
; ZIP-NEXT: mul a0, a0, a1
@@ -6075,28 +5879,22 @@ define <vscale x 40 x bfloat> @vector_interleave_nxv40bf16_nxv8bf16(<vscale x 8
; ZIP-NEXT: vl8re16.v v16, (a2)
; ZIP-NEXT: vl8re16.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
-; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZIP-NEXT: .cfi_restore ra
-; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
-; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
%res = call <vscale x 40 x bfloat> @llvm.vector.interleave5.nxv40bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x bfloat> %v4)
ret <vscale x 40 x bfloat> %res
}
-define <vscale x 5 x float> @vector_interleave_nxv5f32_nxv1f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2, <vscale x 1 x float> %v3, <vscale x 1 x float> %v4) {
+define <vscale x 5 x float> @vector_interleave_nxv5f32_nxv1f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2, <vscale x 1 x float> %v3, <vscale x 1 x float> %v4) nounwind {
; CHECK-LABEL: vector_interleave_nxv5f32_nxv1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 1
@@ -6123,20 +5921,16 @@ define <vscale x 5 x float> @vector_interleave_nxv5f32_nxv1f32(<vscale x 1 x flo
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv5f32_nxv1f32:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 1
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 3 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: srli a2, a1, 1
@@ -6163,24 +5957,20 @@ define <vscale x 5 x float> @vector_interleave_nxv5f32_nxv1f32(<vscale x 1 x flo
; ZVBB-NEXT: slli a1, a0, 1
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 5 x float> @llvm.vector.interleave5.nxv5f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2, <vscale x 1 x float> %v3, <vscale x 1 x float> %v4)
ret <vscale x 5 x float> %res
}
-define <vscale x 10 x float> @vector_interleave_nxv10f32_nxv2f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2, <vscale x 2 x float> %v3, <vscale x 2 x float> %v4) {
+define <vscale x 10 x float> @vector_interleave_nxv10f32_nxv2f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2, <vscale x 2 x float> %v3, <vscale x 2 x float> %v4) nounwind {
; CHECK-LABEL: vector_interleave_nxv10f32_nxv2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 2
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: add a2, a0, a1
@@ -6198,20 +5988,16 @@ define <vscale x 10 x float> @vector_interleave_nxv10f32_nxv2f32(<vscale x 2 x f
; CHECK-NEXT: slli a1, a0, 2
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv10f32_nxv2f32:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 2
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: add a2, a0, a1
@@ -6229,25 +6015,19 @@ define <vscale x 10 x float> @vector_interleave_nxv10f32_nxv2f32(<vscale x 2 x f
; ZVBB-NEXT: slli a1, a0, 2
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 10 x float> @llvm.vector.interleave5.nxv10f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2, <vscale x 2 x float> %v3, <vscale x 2 x float> %v4)
ret <vscale x 10 x float> %res
}
-define <vscale x 20 x float> @vector_interleave_nxv20f32_nxv4f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x float> %v4) {
+define <vscale x 20 x float> @vector_interleave_nxv20f32_nxv4f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x float> %v4) nounwind {
; RV32-LABEL: vector_interleave_nxv20f32_nxv4f32:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
-; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
-; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 28
; RV32-NEXT: mul a0, a0, a1
@@ -6303,25 +6083,17 @@ define <vscale x 20 x float> @vector_interleave_nxv20f32_nxv4f32(<vscale x 4 x f
; RV32-NEXT: vl8re32.v v16, (a2)
; RV32-NEXT: vl8re32.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
-; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; RV32-NEXT: .cfi_restore ra
-; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
-; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vector_interleave_nxv20f32_nxv4f32:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
-; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
-; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 28
; RV64-NEXT: mul a0, a0, a1
@@ -6377,25 +6149,17 @@ define <vscale x 20 x float> @vector_interleave_nxv20f32_nxv4f32(<vscale x 4 x f
; RV64-NEXT: vl8re32.v v16, (a2)
; RV64-NEXT: vl8re32.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
-; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: .cfi_restore ra
-; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
-; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
; ZVBB-RV32-LABEL: vector_interleave_nxv20f32_nxv4f32:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; ZVBB-RV32-NEXT: .cfi_offset ra, -4
-; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: li a1, 28
; ZVBB-RV32-NEXT: mul a0, a0, a1
@@ -6451,25 +6215,17 @@ define <vscale x 20 x float> @vector_interleave_nxv20f32_nxv4f32(<vscale x 4 x f
; ZVBB-RV32-NEXT: vl8re32.v v16, (a2)
; ZVBB-RV32-NEXT: vl8re32.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; ZVBB-RV32-NEXT: .cfi_restore ra
-; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
; ZVBB-RV64-LABEL: vector_interleave_nxv20f32_nxv4f32:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZVBB-RV64-NEXT: .cfi_offset ra, -8
-; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: li a1, 28
; ZVBB-RV64-NEXT: mul a0, a0, a1
@@ -6525,25 +6281,17 @@ define <vscale x 20 x float> @vector_interleave_nxv20f32_nxv4f32(<vscale x 4 x f
; ZVBB-RV64-NEXT: vl8re32.v v16, (a2)
; ZVBB-RV64-NEXT: vl8re32.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZVBB-RV64-NEXT: .cfi_restore ra
-; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
; ZIP-LABEL: vector_interleave_nxv20f32_nxv4f32:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
-; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZIP-NEXT: .cfi_offset ra, -8
-; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
-; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: li a1, 28
; ZIP-NEXT: mul a0, a0, a1
@@ -6599,28 +6347,22 @@ define <vscale x 20 x float> @vector_interleave_nxv20f32_nxv4f32(<vscale x 4 x f
; ZIP-NEXT: vl8re32.v v16, (a2)
; ZIP-NEXT: vl8re32.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
-; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZIP-NEXT: .cfi_restore ra
-; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
-; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
%res = call <vscale x 20 x float> @llvm.vector.interleave5.nxv20f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x float> %v4)
ret <vscale x 20 x float> %res
}
-define <vscale x 5 x double> @vector_interleave_nxv5f64_nxv1f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2, <vscale x 1 x double> %v3, <vscale x 1 x double> %v4) {
+define <vscale x 5 x double> @vector_interleave_nxv5f64_nxv1f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2, <vscale x 1 x double> %v3, <vscale x 1 x double> %v4) nounwind {
; CHECK-LABEL: vector_interleave_nxv5f64_nxv1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 2
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: add a2, a0, a1
@@ -6638,20 +6380,16 @@ define <vscale x 5 x double> @vector_interleave_nxv5f64_nxv1f64(<vscale x 1 x do
; CHECK-NEXT: slli a1, a0, 2
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv5f64_nxv1f64:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 2
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 5 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: add a2, a0, a1
@@ -6669,25 +6407,19 @@ define <vscale x 5 x double> @vector_interleave_nxv5f64_nxv1f64(<vscale x 1 x do
; ZVBB-NEXT: slli a1, a0, 2
; ZVBB-NEXT: add a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 5 x double> @llvm.vector.interleave5.nxv5f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2, <vscale x 1 x double> %v3, <vscale x 1 x double> %v4)
ret <vscale x 5 x double> %res
}
-define <vscale x 10 x double> @vector_interleave_nxv10f64_nxv2f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4) {
+define <vscale x 10 x double> @vector_interleave_nxv10f64_nxv2f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4) nounwind {
; RV32-LABEL: vector_interleave_nxv10f64_nxv2f64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
-; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
-; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 28
; RV32-NEXT: mul a0, a0, a1
@@ -6743,25 +6475,17 @@ define <vscale x 10 x double> @vector_interleave_nxv10f64_nxv2f64(<vscale x 2 x
; RV32-NEXT: vl8re64.v v16, (a2)
; RV32-NEXT: vl8re64.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
-; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; RV32-NEXT: .cfi_restore ra
-; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
-; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vector_interleave_nxv10f64_nxv2f64:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
-; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
-; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 28
; RV64-NEXT: mul a0, a0, a1
@@ -6817,25 +6541,17 @@ define <vscale x 10 x double> @vector_interleave_nxv10f64_nxv2f64(<vscale x 2 x
; RV64-NEXT: vl8re64.v v16, (a2)
; RV64-NEXT: vl8re64.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
-; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: .cfi_restore ra
-; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
-; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
; ZVBB-RV32-LABEL: vector_interleave_nxv10f64_nxv2f64:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; ZVBB-RV32-NEXT: .cfi_offset ra, -4
-; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: li a1, 28
; ZVBB-RV32-NEXT: mul a0, a0, a1
@@ -6891,25 +6607,17 @@ define <vscale x 10 x double> @vector_interleave_nxv10f64_nxv2f64(<vscale x 2 x
; ZVBB-RV32-NEXT: vl8re64.v v16, (a2)
; ZVBB-RV32-NEXT: vl8re64.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; ZVBB-RV32-NEXT: .cfi_restore ra
-; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
; ZVBB-RV64-LABEL: vector_interleave_nxv10f64_nxv2f64:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZVBB-RV64-NEXT: .cfi_offset ra, -8
-; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: li a1, 28
; ZVBB-RV64-NEXT: mul a0, a0, a1
@@ -6965,25 +6673,17 @@ define <vscale x 10 x double> @vector_interleave_nxv10f64_nxv2f64(<vscale x 2 x
; ZVBB-RV64-NEXT: vl8re64.v v16, (a2)
; ZVBB-RV64-NEXT: vl8re64.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZVBB-RV64-NEXT: .cfi_restore ra
-; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
; ZIP-LABEL: vector_interleave_nxv10f64_nxv2f64:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
-; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZIP-NEXT: .cfi_offset ra, -8
-; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
-; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: li a1, 28
; ZIP-NEXT: mul a0, a0, a1
@@ -7039,27 +6739,21 @@ define <vscale x 10 x double> @vector_interleave_nxv10f64_nxv2f64(<vscale x 2 x
; ZIP-NEXT: vl8re64.v v16, (a2)
; ZIP-NEXT: vl8re64.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
-; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZIP-NEXT: .cfi_restore ra
-; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
-; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
%res = call <vscale x 10 x double> @llvm.vector.interleave5.nxv10f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4)
ret <vscale x 10 x double> %res
}
-define <vscale x 14 x half> @vector_interleave_nxv14f16_nxv2f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2, <vscale x 2 x half> %v3, <vscale x 2 x half> %v4, <vscale x 2 x half> %v5, <vscale x 2 x half> %v6) {
+define <vscale x 14 x half> @vector_interleave_nxv14f16_nxv2f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2, <vscale x 2 x half> %v3, <vscale x 2 x half> %v4, <vscale x 2 x half> %v5, <vscale x 2 x half> %v6) nounwind {
; CHECK-LABEL: vector_interleave_nxv14f16_nxv2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 1
@@ -7091,19 +6785,15 @@ define <vscale x 14 x half> @vector_interleave_nxv14f16_nxv2f16(<vscale x 2 x ha
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv14f16_nxv2f16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 2
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: srli a2, a1, 1
@@ -7135,24 +6825,20 @@ define <vscale x 14 x half> @vector_interleave_nxv14f16_nxv2f16(<vscale x 2 x ha
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 2
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 14 x half> @llvm.vector.interleave7.nxv14f16(<vscale x 2 x half> %v0, <vscale x 2 x half> %v1, <vscale x 2 x half> %v2, <vscale x 2 x half> %v3, <vscale x 2 x half> %v4, <vscale x 2 x half> %v5, <vscale x 2 x half> %v6)
ret <vscale x 14 x half> %res
}
-define <vscale x 28 x half> @vector_interleave_nxv28f16_nxv4f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2, <vscale x 4 x half> %v3, <vscale x 4 x half> %v4, <vscale x 4 x half> %v5, <vscale x 4 x half> %v6) {
+define <vscale x 28 x half> @vector_interleave_nxv28f16_nxv4f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2, <vscale x 4 x half> %v3, <vscale x 4 x half> %v4, <vscale x 4 x half> %v5, <vscale x 4 x half> %v6) nounwind {
; CHECK-LABEL: vector_interleave_nxv28f16_nxv4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 3
; CHECK-NEXT: sub a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: add a2, a0, a1
@@ -7174,20 +6860,16 @@ define <vscale x 28 x half> @vector_interleave_nxv28f16_nxv4f16(<vscale x 4 x ha
; CHECK-NEXT: slli a1, a0, 3
; CHECK-NEXT: sub a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv28f16_nxv4f16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 3
; ZVBB-NEXT: sub a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: add a2, a0, a1
@@ -7209,25 +6891,19 @@ define <vscale x 28 x half> @vector_interleave_nxv28f16_nxv4f16(<vscale x 4 x ha
; ZVBB-NEXT: slli a1, a0, 3
; ZVBB-NEXT: sub a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 28 x half> @llvm.vector.interleave7.nxv28f16(<vscale x 4 x half> %v0, <vscale x 4 x half> %v1, <vscale x 4 x half> %v2, <vscale x 4 x half> %v3, <vscale x 4 x half> %v4, <vscale x 4 x half> %v5, <vscale x 4 x half> %v6)
ret <vscale x 28 x half> %res
}
-define <vscale x 56 x half> @vector_interleave_nxv56f16_nxv8f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x half> %v4, <vscale x 8 x half> %v5, <vscale x 8 x half> %v6) {
+define <vscale x 56 x half> @vector_interleave_nxv56f16_nxv8f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x half> %v4, <vscale x 8 x half> %v5, <vscale x 8 x half> %v6) nounwind {
; RV32-LABEL: vector_interleave_nxv56f16_nxv8f16:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
-; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
-; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: sub sp, sp, a0
@@ -7299,25 +6975,17 @@ define <vscale x 56 x half> @vector_interleave_nxv56f16_nxv8f16(<vscale x 8 x ha
; RV32-NEXT: vl8re16.v v16, (a2)
; RV32-NEXT: vl8re16.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
-; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; RV32-NEXT: .cfi_restore ra
-; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
-; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vector_interleave_nxv56f16_nxv8f16:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
-; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
-; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 5
; RV64-NEXT: sub sp, sp, a0
@@ -7389,25 +7057,17 @@ define <vscale x 56 x half> @vector_interleave_nxv56f16_nxv8f16(<vscale x 8 x ha
; RV64-NEXT: vl8re16.v v16, (a2)
; RV64-NEXT: vl8re16.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
-; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: .cfi_restore ra
-; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
-; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
; ZVBB-RV32-LABEL: vector_interleave_nxv56f16_nxv8f16:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; ZVBB-RV32-NEXT: .cfi_offset ra, -4
-; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: slli a0, a0, 5
; ZVBB-RV32-NEXT: sub sp, sp, a0
@@ -7479,25 +7139,17 @@ define <vscale x 56 x half> @vector_interleave_nxv56f16_nxv8f16(<vscale x 8 x ha
; ZVBB-RV32-NEXT: vl8re16.v v16, (a2)
; ZVBB-RV32-NEXT: vl8re16.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; ZVBB-RV32-NEXT: .cfi_restore ra
-; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
; ZVBB-RV64-LABEL: vector_interleave_nxv56f16_nxv8f16:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZVBB-RV64-NEXT: .cfi_offset ra, -8
-; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: slli a0, a0, 5
; ZVBB-RV64-NEXT: sub sp, sp, a0
@@ -7569,25 +7221,17 @@ define <vscale x 56 x half> @vector_interleave_nxv56f16_nxv8f16(<vscale x 8 x ha
; ZVBB-RV64-NEXT: vl8re16.v v16, (a2)
; ZVBB-RV64-NEXT: vl8re16.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZVBB-RV64-NEXT: .cfi_restore ra
-; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
; ZIP-LABEL: vector_interleave_nxv56f16_nxv8f16:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
-; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZIP-NEXT: .cfi_offset ra, -8
-; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
-; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: slli a0, a0, 5
; ZIP-NEXT: sub sp, sp, a0
@@ -7659,27 +7303,21 @@ define <vscale x 56 x half> @vector_interleave_nxv56f16_nxv8f16(<vscale x 8 x ha
; ZIP-NEXT: vl8re16.v v16, (a2)
; ZIP-NEXT: vl8re16.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
-; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZIP-NEXT: .cfi_restore ra
-; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
-; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
%res = call <vscale x 56 x half> @llvm.vector.interleave7.nxv56f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x half> %v4, <vscale x 8 x half> %v5, <vscale x 8 x half> %v6)
ret <vscale x 56 x half> %res
}
-define <vscale x 14 x bfloat> @vector_interleave_nxv14bf16_nxv2bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2, <vscale x 2 x bfloat> %v3, <vscale x 2 x bfloat> %v4, <vscale x 2 x bfloat> %v5, <vscale x 2 x bfloat> %v6) {
+define <vscale x 14 x bfloat> @vector_interleave_nxv14bf16_nxv2bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2, <vscale x 2 x bfloat> %v3, <vscale x 2 x bfloat> %v4, <vscale x 2 x bfloat> %v5, <vscale x 2 x bfloat> %v6) nounwind {
; CHECK-LABEL: vector_interleave_nxv14bf16_nxv2bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 1
@@ -7711,19 +7349,15 @@ define <vscale x 14 x bfloat> @vector_interleave_nxv14bf16_nxv2bf16(<vscale x 2
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv14bf16_nxv2bf16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 2
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: srli a2, a1, 1
@@ -7755,24 +7389,20 @@ define <vscale x 14 x bfloat> @vector_interleave_nxv14bf16_nxv2bf16(<vscale x 2
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 2
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 14 x bfloat> @llvm.vector.interleave7.nxv14bf16(<vscale x 2 x bfloat> %v0, <vscale x 2 x bfloat> %v1, <vscale x 2 x bfloat> %v2, <vscale x 2 x bfloat> %v3, <vscale x 2 x bfloat> %v4, <vscale x 2 x bfloat> %v5, <vscale x 2 x bfloat> %v6)
ret <vscale x 14 x bfloat> %res
}
-define <vscale x 28 x bfloat> @vector_interleave_nxv28bf16_nxv4bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2, <vscale x 4 x bfloat> %v3, <vscale x 4 x bfloat> %v4, <vscale x 4 x bfloat> %v5, <vscale x 4 x bfloat> %v6) {
+define <vscale x 28 x bfloat> @vector_interleave_nxv28bf16_nxv4bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2, <vscale x 4 x bfloat> %v3, <vscale x 4 x bfloat> %v4, <vscale x 4 x bfloat> %v5, <vscale x 4 x bfloat> %v6) nounwind {
; CHECK-LABEL: vector_interleave_nxv28bf16_nxv4bf16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 3
; CHECK-NEXT: sub a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: add a2, a0, a1
@@ -7794,20 +7424,16 @@ define <vscale x 28 x bfloat> @vector_interleave_nxv28bf16_nxv4bf16(<vscale x 4
; CHECK-NEXT: slli a1, a0, 3
; CHECK-NEXT: sub a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv28bf16_nxv4bf16:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 3
; ZVBB-NEXT: sub a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: add a2, a0, a1
@@ -7829,25 +7455,19 @@ define <vscale x 28 x bfloat> @vector_interleave_nxv28bf16_nxv4bf16(<vscale x 4
; ZVBB-NEXT: slli a1, a0, 3
; ZVBB-NEXT: sub a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 28 x bfloat> @llvm.vector.interleave7.nxv28bf16(<vscale x 4 x bfloat> %v0, <vscale x 4 x bfloat> %v1, <vscale x 4 x bfloat> %v2, <vscale x 4 x bfloat> %v3, <vscale x 4 x bfloat> %v4, <vscale x 4 x bfloat> %v5, <vscale x 4 x bfloat> %v6)
ret <vscale x 28 x bfloat> %res
}
-define <vscale x 56 x bfloat> @vector_interleave_nxv56bf16_nxv8bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x bfloat> %v4, <vscale x 8 x bfloat> %v5, <vscale x 8 x bfloat> %v6) {
+define <vscale x 56 x bfloat> @vector_interleave_nxv56bf16_nxv8bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x bfloat> %v4, <vscale x 8 x bfloat> %v5, <vscale x 8 x bfloat> %v6) nounwind {
; RV32-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
-; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
-; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: sub sp, sp, a0
@@ -7919,25 +7539,17 @@ define <vscale x 56 x bfloat> @vector_interleave_nxv56bf16_nxv8bf16(<vscale x 8
; RV32-NEXT: vl8re16.v v16, (a2)
; RV32-NEXT: vl8re16.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
-; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; RV32-NEXT: .cfi_restore ra
-; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
-; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
-; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
-; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 5
; RV64-NEXT: sub sp, sp, a0
@@ -8009,25 +7621,17 @@ define <vscale x 56 x bfloat> @vector_interleave_nxv56bf16_nxv8bf16(<vscale x 8
; RV64-NEXT: vl8re16.v v16, (a2)
; RV64-NEXT: vl8re16.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
-; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: .cfi_restore ra
-; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
-; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
; ZVBB-RV32-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; ZVBB-RV32-NEXT: .cfi_offset ra, -4
-; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: slli a0, a0, 5
; ZVBB-RV32-NEXT: sub sp, sp, a0
@@ -8099,25 +7703,17 @@ define <vscale x 56 x bfloat> @vector_interleave_nxv56bf16_nxv8bf16(<vscale x 8
; ZVBB-RV32-NEXT: vl8re16.v v16, (a2)
; ZVBB-RV32-NEXT: vl8re16.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; ZVBB-RV32-NEXT: .cfi_restore ra
-; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
; ZVBB-RV64-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZVBB-RV64-NEXT: .cfi_offset ra, -8
-; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: slli a0, a0, 5
; ZVBB-RV64-NEXT: sub sp, sp, a0
@@ -8189,25 +7785,17 @@ define <vscale x 56 x bfloat> @vector_interleave_nxv56bf16_nxv8bf16(<vscale x 8
; ZVBB-RV64-NEXT: vl8re16.v v16, (a2)
; ZVBB-RV64-NEXT: vl8re16.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZVBB-RV64-NEXT: .cfi_restore ra
-; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
; ZIP-LABEL: vector_interleave_nxv56bf16_nxv8bf16:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
-; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZIP-NEXT: .cfi_offset ra, -8
-; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
-; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: slli a0, a0, 5
; ZIP-NEXT: sub sp, sp, a0
@@ -8279,27 +7867,21 @@ define <vscale x 56 x bfloat> @vector_interleave_nxv56bf16_nxv8bf16(<vscale x 8
; ZIP-NEXT: vl8re16.v v16, (a2)
; ZIP-NEXT: vl8re16.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
-; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZIP-NEXT: .cfi_restore ra
-; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
-; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
%res = call <vscale x 56 x bfloat> @llvm.vector.interleave7.nxv56bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x bfloat> %v4, <vscale x 8 x bfloat> %v5, <vscale x 8 x bfloat> %v6)
ret <vscale x 56 x bfloat> %res
}
-define <vscale x 7 x float> @vector_interleave_nxv7f32_nxv1f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2, <vscale x 1 x float> %v3, <vscale x 1 x float> %v4, <vscale x 1 x float> %v5, <vscale x 1 x float> %v6) {
+define <vscale x 7 x float> @vector_interleave_nxv7f32_nxv1f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2, <vscale x 1 x float> %v3, <vscale x 1 x float> %v4, <vscale x 1 x float> %v5, <vscale x 1 x float> %v6) nounwind {
; CHECK-LABEL: vector_interleave_nxv7f32_nxv1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 1
@@ -8331,19 +7913,15 @@ define <vscale x 7 x float> @vector_interleave_nxv7f32_nxv1f32(<vscale x 1 x flo
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv7f32_nxv1f32:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 2
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: srli a2, a1, 1
@@ -8375,24 +7953,20 @@ define <vscale x 7 x float> @vector_interleave_nxv7f32_nxv1f32(<vscale x 1 x flo
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 2
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 7 x float> @llvm.vector.interleave7.nxv7f32(<vscale x 1 x float> %v0, <vscale x 1 x float> %v1, <vscale x 1 x float> %v2, <vscale x 1 x float> %v3, <vscale x 1 x float> %v4, <vscale x 1 x float> %v5, <vscale x 1 x float> %v6)
ret <vscale x 7 x float> %res
}
-define <vscale x 14 x float> @vector_interleave_nxv14f32_nxv2f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2, <vscale x 2 x float> %v3, <vscale x 2 x float> %v4, <vscale x 2 x float> %v5, <vscale x 2 x float> %v6) {
+define <vscale x 14 x float> @vector_interleave_nxv14f32_nxv2f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2, <vscale x 2 x float> %v3, <vscale x 2 x float> %v4, <vscale x 2 x float> %v5, <vscale x 2 x float> %v6) nounwind {
; CHECK-LABEL: vector_interleave_nxv14f32_nxv2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 3
; CHECK-NEXT: sub a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: add a2, a0, a1
@@ -8414,20 +7988,16 @@ define <vscale x 14 x float> @vector_interleave_nxv14f32_nxv2f32(<vscale x 2 x f
; CHECK-NEXT: slli a1, a0, 3
; CHECK-NEXT: sub a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv14f32_nxv2f32:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 3
; ZVBB-NEXT: sub a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: add a2, a0, a1
@@ -8449,25 +8019,19 @@ define <vscale x 14 x float> @vector_interleave_nxv14f32_nxv2f32(<vscale x 2 x f
; ZVBB-NEXT: slli a1, a0, 3
; ZVBB-NEXT: sub a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 14 x float> @llvm.vector.interleave7.nxv14f32(<vscale x 2 x float> %v0, <vscale x 2 x float> %v1, <vscale x 2 x float> %v2, <vscale x 2 x float> %v3, <vscale x 2 x float> %v4, <vscale x 2 x float> %v5, <vscale x 2 x float> %v6)
ret <vscale x 14 x float> %res
}
-define <vscale x 28 x float> @vector_interleave_nxv28f32_nxv4f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x float> %v4, <vscale x 4 x float> %v5, <vscale x 4 x float> %v6) {
+define <vscale x 28 x float> @vector_interleave_nxv28f32_nxv4f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x float> %v4, <vscale x 4 x float> %v5, <vscale x 4 x float> %v6) nounwind {
; RV32-LABEL: vector_interleave_nxv28f32_nxv4f32:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
-; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
-; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: sub sp, sp, a0
@@ -8539,25 +8103,17 @@ define <vscale x 28 x float> @vector_interleave_nxv28f32_nxv4f32(<vscale x 4 x f
; RV32-NEXT: vl8re32.v v16, (a2)
; RV32-NEXT: vl8re32.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
-; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; RV32-NEXT: .cfi_restore ra
-; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
-; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vector_interleave_nxv28f32_nxv4f32:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
-; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
-; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 5
; RV64-NEXT: sub sp, sp, a0
@@ -8629,25 +8185,17 @@ define <vscale x 28 x float> @vector_interleave_nxv28f32_nxv4f32(<vscale x 4 x f
; RV64-NEXT: vl8re32.v v16, (a2)
; RV64-NEXT: vl8re32.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
-; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: .cfi_restore ra
-; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
-; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
; ZVBB-RV32-LABEL: vector_interleave_nxv28f32_nxv4f32:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; ZVBB-RV32-NEXT: .cfi_offset ra, -4
-; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: slli a0, a0, 5
; ZVBB-RV32-NEXT: sub sp, sp, a0
@@ -8719,25 +8267,17 @@ define <vscale x 28 x float> @vector_interleave_nxv28f32_nxv4f32(<vscale x 4 x f
; ZVBB-RV32-NEXT: vl8re32.v v16, (a2)
; ZVBB-RV32-NEXT: vl8re32.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; ZVBB-RV32-NEXT: .cfi_restore ra
-; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
; ZVBB-RV64-LABEL: vector_interleave_nxv28f32_nxv4f32:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZVBB-RV64-NEXT: .cfi_offset ra, -8
-; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: slli a0, a0, 5
; ZVBB-RV64-NEXT: sub sp, sp, a0
@@ -8809,25 +8349,17 @@ define <vscale x 28 x float> @vector_interleave_nxv28f32_nxv4f32(<vscale x 4 x f
; ZVBB-RV64-NEXT: vl8re32.v v16, (a2)
; ZVBB-RV64-NEXT: vl8re32.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZVBB-RV64-NEXT: .cfi_restore ra
-; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
; ZIP-LABEL: vector_interleave_nxv28f32_nxv4f32:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
-; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZIP-NEXT: .cfi_offset ra, -8
-; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
-; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: slli a0, a0, 5
; ZIP-NEXT: sub sp, sp, a0
@@ -8899,28 +8431,22 @@ define <vscale x 28 x float> @vector_interleave_nxv28f32_nxv4f32(<vscale x 4 x f
; ZIP-NEXT: vl8re32.v v16, (a2)
; ZIP-NEXT: vl8re32.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
-; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZIP-NEXT: .cfi_restore ra
-; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
-; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
%res = call <vscale x 28 x float> @llvm.vector.interleave7.nxv28f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x float> %v4, <vscale x 4 x float> %v5, <vscale x 4 x float> %v6)
ret <vscale x 28 x float> %res
}
-define <vscale x 7 x double> @vector_interleave_nxv7f64_nxv1f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2, <vscale x 1 x double> %v3, <vscale x 1 x double> %v4, <vscale x 1 x double> %v5, <vscale x 1 x double> %v6) {
+define <vscale x 7 x double> @vector_interleave_nxv7f64_nxv1f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2, <vscale x 1 x double> %v3, <vscale x 1 x double> %v4, <vscale x 1 x double> %v5, <vscale x 1 x double> %v6) nounwind {
; CHECK-LABEL: vector_interleave_nxv7f64_nxv1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 3
; CHECK-NEXT: sub a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: add a2, a0, a1
@@ -8942,20 +8468,16 @@ define <vscale x 7 x double> @vector_interleave_nxv7f64_nxv1f64(<vscale x 1 x do
; CHECK-NEXT: slli a1, a0, 3
; CHECK-NEXT: sub a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv7f64_nxv1f64:
; ZVBB: # %bb.0:
; ZVBB-NEXT: addi sp, sp, -16
-; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a1, a0, 3
; ZVBB-NEXT: sub a0, a1, a0
; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x07, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 7 * vlenb
; ZVBB-NEXT: addi a0, sp, 16
; ZVBB-NEXT: csrr a1, vlenb
; ZVBB-NEXT: add a2, a0, a1
@@ -8977,25 +8499,19 @@ define <vscale x 7 x double> @vector_interleave_nxv7f64_nxv1f64(<vscale x 1 x do
; ZVBB-NEXT: slli a1, a0, 3
; ZVBB-NEXT: sub a0, a1, a0
; ZVBB-NEXT: add sp, sp, a0
-; ZVBB-NEXT: .cfi_def_cfa sp, 16
; ZVBB-NEXT: addi sp, sp, 16
-; ZVBB-NEXT: .cfi_def_cfa_offset 0
; ZVBB-NEXT: ret
%res = call <vscale x 7 x double> @llvm.vector.interleave7.nxv7f64(<vscale x 1 x double> %v0, <vscale x 1 x double> %v1, <vscale x 1 x double> %v2, <vscale x 1 x double> %v3, <vscale x 1 x double> %v4, <vscale x 1 x double> %v5, <vscale x 1 x double> %v6)
ret <vscale x 7 x double> %res
}
-define <vscale x 14 x double> @vector_interleave_nxv14f64_nxv2f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4, <vscale x 2 x double> %v5, <vscale x 2 x double> %v6) {
+define <vscale x 14 x double> @vector_interleave_nxv14f64_nxv2f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4, <vscale x 2 x double> %v5, <vscale x 2 x double> %v6) nounwind {
; RV32-LABEL: vector_interleave_nxv14f64_nxv2f64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
-; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 80
-; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: sub sp, sp, a0
@@ -9067,25 +8583,17 @@ define <vscale x 14 x double> @vector_interleave_nxv14f64_nxv2f64(<vscale x 2 x
; RV32-NEXT: vl8re64.v v16, (a2)
; RV32-NEXT: vl8re64.v v8, (a0)
; RV32-NEXT: addi sp, s0, -80
-; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; RV32-NEXT: .cfi_restore ra
-; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 80
-; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vector_interleave_nxv14f64_nxv2f64:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -80
-; RV64-NEXT: .cfi_def_cfa_offset 80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 80
-; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 5
; RV64-NEXT: sub sp, sp, a0
@@ -9157,25 +8665,17 @@ define <vscale x 14 x double> @vector_interleave_nxv14f64_nxv2f64(<vscale x 2 x
; RV64-NEXT: vl8re64.v v16, (a2)
; RV64-NEXT: vl8re64.v v8, (a0)
; RV64-NEXT: addi sp, s0, -80
-; RV64-NEXT: .cfi_def_cfa sp, 80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: .cfi_restore ra
-; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 80
-; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
;
; ZVBB-RV32-LABEL: vector_interleave_nxv14f64_nxv2f64:
; ZVBB-RV32: # %bb.0:
; ZVBB-RV32-NEXT: addi sp, sp, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; ZVBB-RV32-NEXT: .cfi_offset ra, -4
-; ZVBB-RV32-NEXT: .cfi_offset s0, -8
; ZVBB-RV32-NEXT: addi s0, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV32-NEXT: csrr a0, vlenb
; ZVBB-RV32-NEXT: slli a0, a0, 5
; ZVBB-RV32-NEXT: sub sp, sp, a0
@@ -9247,25 +8747,17 @@ define <vscale x 14 x double> @vector_interleave_nxv14f64_nxv2f64(<vscale x 2 x
; ZVBB-RV32-NEXT: vl8re64.v v16, (a2)
; ZVBB-RV32-NEXT: vl8re64.v v8, (a0)
; ZVBB-RV32-NEXT: addi sp, s0, -80
-; ZVBB-RV32-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; ZVBB-RV32-NEXT: .cfi_restore ra
-; ZVBB-RV32-NEXT: .cfi_restore s0
; ZVBB-RV32-NEXT: addi sp, sp, 80
-; ZVBB-RV32-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV32-NEXT: ret
;
; ZVBB-RV64-LABEL: vector_interleave_nxv14f64_nxv2f64:
; ZVBB-RV64: # %bb.0:
; ZVBB-RV64-NEXT: addi sp, sp, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 80
; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZVBB-RV64-NEXT: .cfi_offset ra, -8
-; ZVBB-RV64-NEXT: .cfi_offset s0, -16
; ZVBB-RV64-NEXT: addi s0, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa s0, 0
; ZVBB-RV64-NEXT: csrr a0, vlenb
; ZVBB-RV64-NEXT: slli a0, a0, 5
; ZVBB-RV64-NEXT: sub sp, sp, a0
@@ -9337,25 +8829,17 @@ define <vscale x 14 x double> @vector_interleave_nxv14f64_nxv2f64(<vscale x 2 x
; ZVBB-RV64-NEXT: vl8re64.v v16, (a2)
; ZVBB-RV64-NEXT: vl8re64.v v8, (a0)
; ZVBB-RV64-NEXT: addi sp, s0, -80
-; ZVBB-RV64-NEXT: .cfi_def_cfa sp, 80
; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZVBB-RV64-NEXT: .cfi_restore ra
-; ZVBB-RV64-NEXT: .cfi_restore s0
; ZVBB-RV64-NEXT: addi sp, sp, 80
-; ZVBB-RV64-NEXT: .cfi_def_cfa_offset 0
; ZVBB-RV64-NEXT: ret
;
; ZIP-LABEL: vector_interleave_nxv14f64_nxv2f64:
; ZIP: # %bb.0:
; ZIP-NEXT: addi sp, sp, -80
-; ZIP-NEXT: .cfi_def_cfa_offset 80
; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
-; ZIP-NEXT: .cfi_offset ra, -8
-; ZIP-NEXT: .cfi_offset s0, -16
; ZIP-NEXT: addi s0, sp, 80
-; ZIP-NEXT: .cfi_def_cfa s0, 0
; ZIP-NEXT: csrr a0, vlenb
; ZIP-NEXT: slli a0, a0, 5
; ZIP-NEXT: sub sp, sp, a0
@@ -9427,13 +8911,9 @@ define <vscale x 14 x double> @vector_interleave_nxv14f64_nxv2f64(<vscale x 2 x
; ZIP-NEXT: vl8re64.v v16, (a2)
; ZIP-NEXT: vl8re64.v v8, (a0)
; ZIP-NEXT: addi sp, s0, -80
-; ZIP-NEXT: .cfi_def_cfa sp, 80
; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
-; ZIP-NEXT: .cfi_restore ra
-; ZIP-NEXT: .cfi_restore s0
; ZIP-NEXT: addi sp, sp, 80
-; ZIP-NEXT: .cfi_def_cfa_offset 0
; ZIP-NEXT: ret
%res = call <vscale x 14 x double> @llvm.vector.interleave7.nxv14f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4, <vscale x 2 x double> %v5, <vscale x 2 x double> %v6)
ret <vscale x 14 x double> %res
More information about the llvm-commits
mailing list