[llvm] [RISCV] Use ri.vzip2{a, b} for interleave2 if available (PR #136364)
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Fri Apr 18 13:28:56 PDT 2025
https://github.com/preames updated https://github.com/llvm/llvm-project/pull/136364
>From 5fde9252cb274a9b372f85a3c78631a6453fed68 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Fri, 18 Apr 2025 11:02:32 -0700
Subject: [PATCH 1/2] [RISCV] Use ri.vzip2{a,b} for interleave2 if available
If XRivosVizip is available, the ri.vzip2a and ri.vzip2b instructions can
be used perform a interleave shuffle. This patch only effects the intrinsic
lowering (and thus scalable vectors). Fixed vectors go through shuffle
lowering and the zip2a (but not zip2b) case is already handled there..
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 20 +-
llvm/lib/Target/RISCV/RISCVISelLowering.h | 1 +
llvm/lib/Target/RISCV/RISCVInstrInfoXRivos.td | 3 +
.../CodeGen/RISCV/rvv/vector-interleave.ll | 1350 +++++++++++++----
4 files changed, 1085 insertions(+), 289 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 98fba9e86e88a..6abcba4fa4935 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5018,8 +5018,8 @@ static SDValue lowerVZIP(unsigned Opc, SDValue Op0, SDValue Op1,
const SDLoc &DL, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
assert(RISCVISD::RI_VZIPEVEN_VL == Opc || RISCVISD::RI_VZIPODD_VL == Opc ||
- RISCVISD::RI_VZIP2A_VL == Opc || RISCVISD::RI_VUNZIP2A_VL == Opc ||
- RISCVISD::RI_VUNZIP2B_VL == Opc);
+ RISCVISD::RI_VZIP2A_VL == Opc || RISCVISD::RI_VZIP2B_VL == Opc ||
+ RISCVISD::RI_VUNZIP2A_VL == Opc || RISCVISD::RI_VUNZIP2B_VL == Opc);
assert(Op0.getSimpleValueType() == Op1.getSimpleValueType());
MVT VT = Op0.getSimpleValueType();
@@ -6935,7 +6935,7 @@ static bool hasPassthruOp(unsigned Opcode) {
Opcode <= RISCVISD::LAST_STRICTFP_OPCODE &&
"not a RISC-V target specific op");
static_assert(
- RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP == 132 &&
+ RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP == 133 &&
RISCVISD::LAST_STRICTFP_OPCODE - RISCVISD::FIRST_STRICTFP_OPCODE == 21 &&
"adding target specific op should update this function");
if (Opcode >= RISCVISD::ADD_VL && Opcode <= RISCVISD::VFMAX_VL)
@@ -6959,7 +6959,7 @@ static bool hasMaskOp(unsigned Opcode) {
Opcode <= RISCVISD::LAST_STRICTFP_OPCODE &&
"not a RISC-V target specific op");
static_assert(
- RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP == 132 &&
+ RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP == 133 &&
RISCVISD::LAST_STRICTFP_OPCODE - RISCVISD::FIRST_STRICTFP_OPCODE == 21 &&
"adding target specific op should update this function");
if (Opcode >= RISCVISD::TRUNCATE_VECTOR_VL && Opcode <= RISCVISD::SETCC_VL)
@@ -11753,6 +11753,17 @@ SDValue RISCVTargetLowering::lowerVECTOR_INTERLEAVE(SDValue Op,
return DAG.getMergeValues(Loads, DL);
}
+ // Use ri.vzip2{a,b} if available
+ // TODO: Figure out the best lowering for the spread variants
+ if (Subtarget.hasVendorXRivosVizip() &&
+ !Op.getOperand(0).isUndef() && !Op.getOperand(1).isUndef()) {
+ SDValue V1 = Op->getOperand(0);
+ SDValue V2 = Op->getOperand(1);
+ SDValue Lo = lowerVZIP(RISCVISD::RI_VZIP2A_VL, V1, V2, DL, DAG, Subtarget);
+ SDValue Hi = lowerVZIP(RISCVISD::RI_VZIP2B_VL, V1, V2, DL, DAG, Subtarget);
+ return DAG.getMergeValues({Lo, Hi}, DL);
+ }
+
// If the element type is smaller than ELEN, then we can interleave with
// vwaddu.vv and vwmaccu.vx
if (VecVT.getScalarSizeInBits() < Subtarget.getELen()) {
@@ -22256,6 +22267,7 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(RI_VZIPEVEN_VL)
NODE_NAME_CASE(RI_VZIPODD_VL)
NODE_NAME_CASE(RI_VZIP2A_VL)
+ NODE_NAME_CASE(RI_VZIP2B_VL)
NODE_NAME_CASE(RI_VUNZIP2A_VL)
NODE_NAME_CASE(RI_VUNZIP2B_VL)
NODE_NAME_CASE(READ_CSR)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index baf1b2e4d8e6e..6e50ab8e1f296 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -408,6 +408,7 @@ enum NodeType : unsigned {
RI_VZIPEVEN_VL,
RI_VZIPODD_VL,
RI_VZIP2A_VL,
+ RI_VZIP2B_VL,
RI_VUNZIP2A_VL,
RI_VUNZIP2B_VL,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXRivos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXRivos.td
index 147f89850765a..110dfdff7f29a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXRivos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXRivos.td
@@ -71,6 +71,7 @@ defm RI_VUNZIP2B_V : VALU_IV_V<"ri.vunzip2b", 0b011000>;
def ri_vzipeven_vl : SDNode<"RISCVISD::RI_VZIPEVEN_VL", SDT_RISCVIntBinOp_VL>;
def ri_vzipodd_vl : SDNode<"RISCVISD::RI_VZIPODD_VL", SDT_RISCVIntBinOp_VL>;
def ri_vzip2a_vl : SDNode<"RISCVISD::RI_VZIP2A_VL", SDT_RISCVIntBinOp_VL>;
+def ri_vzip2b_vl : SDNode<"RISCVISD::RI_VZIP2B_VL", SDT_RISCVIntBinOp_VL>;
def ri_vunzip2a_vl : SDNode<"RISCVISD::RI_VUNZIP2A_VL", SDT_RISCVIntBinOp_VL>;
def ri_vunzip2b_vl : SDNode<"RISCVISD::RI_VUNZIP2B_VL", SDT_RISCVIntBinOp_VL>;
@@ -84,6 +85,7 @@ let Predicates = [HasVendorXRivosVizip],
defm PseudoRI_VZIPEVEN : RIVPseudoVALU_VV;
defm PseudoRI_VZIPODD : RIVPseudoVALU_VV;
defm PseudoRI_VZIP2A : RIVPseudoVALU_VV;
+defm PseudoRI_VZIP2B : RIVPseudoVALU_VV;
defm PseudoRI_VUNZIP2A : RIVPseudoVALU_VV;
defm PseudoRI_VUNZIP2B : RIVPseudoVALU_VV;
}
@@ -102,6 +104,7 @@ multiclass RIVPatBinaryVL_VV<SDPatternOperator vop, string instruction_name,
defm : RIVPatBinaryVL_VV<ri_vzipeven_vl, "PseudoRI_VZIPEVEN">;
defm : RIVPatBinaryVL_VV<ri_vzipodd_vl, "PseudoRI_VZIPODD">;
defm : RIVPatBinaryVL_VV<ri_vzip2a_vl, "PseudoRI_VZIP2A">;
+defm : RIVPatBinaryVL_VV<ri_vzip2b_vl, "PseudoRI_VZIP2B">;
defm : RIVPatBinaryVL_VV<ri_vunzip2a_vl, "PseudoRI_VUNZIP2A">;
defm : RIVPatBinaryVL_VV<ri_vunzip2b_vl, "PseudoRI_VUNZIP2B">;
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index b0eac8bdf48dd..a6322c50ff233 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -1,34 +1,35 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+zvfh,+zvfbfmin | FileCheck %s --check-prefixes=CHECK,RV32
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+zvfh,+zvfbfmin | FileCheck %s --check-prefixes=CHECK,RV64
-; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+zvfhmin,+zvfbfmin | FileCheck %s --check-prefixes=CHECK,RV32
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+zvfhmin,+zvfbfmin | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+zvfh,+zvfbfmin | FileCheck %s --check-prefixes=CHECK,V,RV32
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+zvfh,+zvfbfmin | FileCheck %s --check-prefixes=CHECK,V,RV64
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+zvfhmin,+zvfbfmin | FileCheck %s --check-prefixes=CHECK,V,RV32
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+zvfhmin,+zvfbfmin | FileCheck %s --check-prefixes=CHECK,V,RV64
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+zvbb,+zvfh,+zvfbfmin | FileCheck %s --check-prefixes=ZVBB,ZVBB-RV32
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+zvbb,+zvfh,+zvfbfmin | FileCheck %s --check-prefixes=ZVBB,ZVBB-RV64
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+zvfhmin,+zvfbfmin,+experimental-xrivosvizip | FileCheck %s --check-prefixes=CHECK,ZIP
; Integers
define <vscale x 32 x i1> @vector_interleave_nxv32i1_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
-; CHECK-LABEL: vector_interleave_nxv32i1_nxv16i1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmv.v.i v10, 0
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmerge.vim v14, v10, 1, v0
-; CHECK-NEXT: srli a1, a1, 2
-; CHECK-NEXT: vwaddu.vv v8, v14, v12
-; CHECK-NEXT: vwmaccu.vx v8, a0, v12
-; CHECK-NEXT: vmsne.vi v12, v10, 0
-; CHECK-NEXT: vmsne.vi v0, v8, 0
-; CHECK-NEXT: add a0, a1, a1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vslideup.vx v0, v12, a1
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv32i1_nxv16i1:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; V-NEXT: vmv1r.v v9, v0
+; V-NEXT: vmv1r.v v0, v8
+; V-NEXT: vmv.v.i v10, 0
+; V-NEXT: li a0, -1
+; V-NEXT: csrr a1, vlenb
+; V-NEXT: vmerge.vim v12, v10, 1, v0
+; V-NEXT: vmv1r.v v0, v9
+; V-NEXT: vmerge.vim v14, v10, 1, v0
+; V-NEXT: srli a1, a1, 2
+; V-NEXT: vwaddu.vv v8, v14, v12
+; V-NEXT: vwmaccu.vx v8, a0, v12
+; V-NEXT: vmsne.vi v12, v10, 0
+; V-NEXT: vmsne.vi v0, v8, 0
+; V-NEXT: add a0, a1, a1
+; V-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; V-NEXT: vslideup.vx v0, v12, a1
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv32i1_nxv16i1:
; ZVBB: # %bb.0:
@@ -49,20 +50,40 @@ define <vscale x 32 x i1> @vector_interleave_nxv32i1_nxv16i1(<vscale x 16 x i1>
; ZVBB-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; ZVBB-NEXT: vslideup.vx v0, v8, a1
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv32i1_nxv16i1:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; ZIP-NEXT: vmv1r.v v9, v0
+; ZIP-NEXT: vmv1r.v v0, v8
+; ZIP-NEXT: vmv.v.i v10, 0
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: vmerge.vim v12, v10, 1, v0
+; ZIP-NEXT: vmv1r.v v0, v9
+; ZIP-NEXT: vmerge.vim v8, v10, 1, v0
+; ZIP-NEXT: srli a0, a0, 2
+; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT: ri.vzip2a.vv v14, v8, v12
+; ZIP-NEXT: vmsne.vi v8, v10, 0
+; ZIP-NEXT: vmsne.vi v0, v14, 0
+; ZIP-NEXT: add a1, a0, a0
+; ZIP-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; ZIP-NEXT: vslideup.vx v0, v8, a0
+; ZIP-NEXT: ret
%res = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b)
ret <vscale x 32 x i1> %res
}
define <vscale x 32 x i8> @vector_interleave_nxv32i8_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; CHECK-LABEL: vector_interleave_nxv32i8_nxv16i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv2r.v v12, v10
-; CHECK-NEXT: vmv2r.v v14, v8
-; CHECK-NEXT: vwaddu.vv v8, v14, v12
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwmaccu.vx v8, a0, v12
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv32i8_nxv16i8:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; V-NEXT: vmv2r.v v12, v10
+; V-NEXT: vmv2r.v v14, v8
+; V-NEXT: vwaddu.vv v8, v14, v12
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v12
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv32i8_nxv16i8:
; ZVBB: # %bb.0:
@@ -72,20 +93,29 @@ define <vscale x 32 x i8> @vector_interleave_nxv32i8_nxv16i8(<vscale x 16 x i8>
; ZVBB-NEXT: vwsll.vi v8, v12, 8
; ZVBB-NEXT: vwaddu.wv v8, v8, v14
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv32i8_nxv16i8:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; ZIP-NEXT: vmv2r.v v12, v10
+; ZIP-NEXT: vmv2r.v v14, v8
+; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT: ret
%res = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
ret <vscale x 32 x i8> %res
}
define <vscale x 16 x i16> @vector_interleave_nxv16i16_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; CHECK-LABEL: vector_interleave_nxv16i16_nxv8i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv2r.v v12, v10
-; CHECK-NEXT: vmv2r.v v14, v8
-; CHECK-NEXT: vwaddu.vv v8, v14, v12
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwmaccu.vx v8, a0, v12
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv16i16_nxv8i16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; V-NEXT: vmv2r.v v12, v10
+; V-NEXT: vmv2r.v v14, v8
+; V-NEXT: vwaddu.vv v8, v14, v12
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v12
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv16i16_nxv8i16:
; ZVBB: # %bb.0:
@@ -95,20 +125,29 @@ define <vscale x 16 x i16> @vector_interleave_nxv16i16_nxv8i16(<vscale x 8 x i16
; ZVBB-NEXT: vwsll.vi v8, v12, 16
; ZVBB-NEXT: vwaddu.wv v8, v8, v14
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv16i16_nxv8i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZIP-NEXT: vmv2r.v v12, v10
+; ZIP-NEXT: vmv2r.v v14, v8
+; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT: ret
%res = call <vscale x 16 x i16> @llvm.vector.interleave2.nxv16i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
ret <vscale x 16 x i16> %res
}
define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
-; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT: vmv2r.v v12, v10
-; CHECK-NEXT: vmv2r.v v14, v8
-; CHECK-NEXT: vwaddu.vv v8, v14, v12
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwmaccu.vx v8, a0, v12
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv8i32_nxv4i32:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; V-NEXT: vmv2r.v v12, v10
+; V-NEXT: vmv2r.v v14, v8
+; V-NEXT: vwaddu.vv v8, v14, v12
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v12
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32:
; ZVBB: # %bb.0:
@@ -119,25 +158,34 @@ define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32(<vscale x 4 x i32>
; ZVBB-NEXT: vwsll.vx v8, v12, a0
; ZVBB-NEXT: vwaddu.wv v8, v8, v14
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv8i32_nxv4i32:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; ZIP-NEXT: vmv2r.v v12, v10
+; ZIP-NEXT: vmv2r.v v14, v8
+; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT: ret
%res = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
ret <vscale x 8 x i32> %res
}
define <vscale x 4 x i64> @vector_interleave_nxv4i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
-; CHECK-LABEL: vector_interleave_nxv4i64_nxv2i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
-; CHECK-NEXT: vid.v v12
-; CHECK-NEXT: srli a0, a0, 2
-; CHECK-NEXT: vand.vi v13, v12, 1
-; CHECK-NEXT: vmsne.vi v0, v13, 0
-; CHECK-NEXT: vsrl.vi v16, v12, 1
-; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
-; CHECK-NEXT: vmv.v.v v8, v12
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv4i64_nxv2i64:
+; V: # %bb.0:
+; V-NEXT: csrr a0, vlenb
+; V-NEXT: vsetvli a1, zero, e16, m1, ta, mu
+; V-NEXT: vid.v v12
+; V-NEXT: srli a0, a0, 2
+; V-NEXT: vand.vi v13, v12, 1
+; V-NEXT: vmsne.vi v0, v13, 0
+; V-NEXT: vsrl.vi v16, v12, 1
+; V-NEXT: vadd.vx v16, v16, a0, v0.t
+; V-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; V-NEXT: vrgatherei16.vv v12, v8, v16
+; V-NEXT: vmv.v.v v8, v12
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv4i64_nxv2i64:
; ZVBB: # %bb.0:
@@ -153,32 +201,41 @@ define <vscale x 4 x i64> @vector_interleave_nxv4i64_nxv2i64(<vscale x 2 x i64>
; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16
; ZVBB-NEXT: vmv.v.v v8, v12
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv4i64_nxv2i64:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; ZIP-NEXT: vmv2r.v v12, v10
+; ZIP-NEXT: vmv2r.v v14, v8
+; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT: ret
%res = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
ret <vscale x 4 x i64> %res
}
define <vscale x 128 x i1> @vector_interleave_nxv128i1_nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) {
-; CHECK-LABEL: vector_interleave_nxv128i1_nxv64i1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmv.v.i v24, 0
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vmerge.vim v16, v24, 1, v0
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vmerge.vim v24, v24, 1, v0
-; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT: vwaddu.vv v8, v24, v16
-; CHECK-NEXT: vwaddu.vv v0, v28, v20
-; CHECK-NEXT: vwmaccu.vx v8, a0, v16
-; CHECK-NEXT: vwmaccu.vx v0, a0, v20
-; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmsne.vi v16, v8, 0
-; CHECK-NEXT: vmsne.vi v8, v0, 0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv128i1_nxv64i1:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; V-NEXT: vmv1r.v v9, v0
+; V-NEXT: vmv1r.v v0, v8
+; V-NEXT: vmv.v.i v24, 0
+; V-NEXT: li a0, -1
+; V-NEXT: vmerge.vim v16, v24, 1, v0
+; V-NEXT: vmv1r.v v0, v9
+; V-NEXT: vmerge.vim v24, v24, 1, v0
+; V-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; V-NEXT: vwaddu.vv v8, v24, v16
+; V-NEXT: vwaddu.vv v0, v28, v20
+; V-NEXT: vwmaccu.vx v8, a0, v16
+; V-NEXT: vwmaccu.vx v0, a0, v20
+; V-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; V-NEXT: vmsne.vi v16, v8, 0
+; V-NEXT: vmsne.vi v8, v0, 0
+; V-NEXT: vmv1r.v v0, v16
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv128i1_nxv64i1:
; ZVBB: # %bb.0:
@@ -197,22 +254,42 @@ define <vscale x 128 x i1> @vector_interleave_nxv128i1_nxv64i1(<vscale x 64 x i1
; ZVBB-NEXT: vmsne.vi v8, v0, 0
; ZVBB-NEXT: vmv1r.v v0, v16
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv128i1_nxv64i1:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; ZIP-NEXT: vmv1r.v v9, v0
+; ZIP-NEXT: vmv1r.v v0, v8
+; ZIP-NEXT: vmv.v.i v24, 0
+; ZIP-NEXT: vmerge.vim v16, v24, 1, v0
+; ZIP-NEXT: vmv1r.v v0, v9
+; ZIP-NEXT: vmerge.vim v8, v24, 1, v0
+; ZIP-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v4, v8, v16
+; ZIP-NEXT: ri.vzip2b.vv v28, v12, v20
+; ZIP-NEXT: ri.vzip2a.vv v0, v8, v16
+; ZIP-NEXT: ri.vzip2a.vv v24, v12, v20
+; ZIP-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; ZIP-NEXT: vmsne.vi v9, v0, 0
+; ZIP-NEXT: vmsne.vi v8, v24, 0
+; ZIP-NEXT: vmv1r.v v0, v9
+; ZIP-NEXT: ret
%res = call <vscale x 128 x i1> @llvm.vector.interleave2.nxv128i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b)
ret <vscale x 128 x i1> %res
}
define <vscale x 128 x i8> @vector_interleave_nxv128i8_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
-; CHECK-LABEL: vector_interleave_nxv128i8_nxv64i8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT: vmv8r.v v24, v8
-; CHECK-NEXT: vwaddu.vv v8, v24, v16
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwaddu.vv v0, v28, v20
-; CHECK-NEXT: vwmaccu.vx v8, a0, v16
-; CHECK-NEXT: vwmaccu.vx v0, a0, v20
-; CHECK-NEXT: vmv8r.v v16, v0
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv128i8_nxv64i8:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; V-NEXT: vmv8r.v v24, v8
+; V-NEXT: vwaddu.vv v8, v24, v16
+; V-NEXT: li a0, -1
+; V-NEXT: vwaddu.vv v0, v28, v20
+; V-NEXT: vwmaccu.vx v8, a0, v16
+; V-NEXT: vwmaccu.vx v0, a0, v20
+; V-NEXT: vmv8r.v v16, v0
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv128i8_nxv64i8:
; ZVBB: # %bb.0:
@@ -224,22 +301,33 @@ define <vscale x 128 x i8> @vector_interleave_nxv128i8_nxv64i8(<vscale x 64 x i8
; ZVBB-NEXT: vmv8r.v v8, v24
; ZVBB-NEXT: vmv8r.v v16, v0
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv128i8_nxv64i8:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT: vmv8r.v v8, v24
+; ZIP-NEXT: vmv8r.v v16, v0
+; ZIP-NEXT: ret
%res = call <vscale x 128 x i8> @llvm.vector.interleave2.nxv128i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b)
ret <vscale x 128 x i8> %res
}
define <vscale x 64 x i16> @vector_interleave_nxv64i16_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
-; CHECK-LABEL: vector_interleave_nxv64i16_nxv32i16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vmv8r.v v24, v8
-; CHECK-NEXT: vwaddu.vv v8, v24, v16
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwaddu.vv v0, v28, v20
-; CHECK-NEXT: vwmaccu.vx v8, a0, v16
-; CHECK-NEXT: vwmaccu.vx v0, a0, v20
-; CHECK-NEXT: vmv8r.v v16, v0
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv64i16_nxv32i16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; V-NEXT: vmv8r.v v24, v8
+; V-NEXT: vwaddu.vv v8, v24, v16
+; V-NEXT: li a0, -1
+; V-NEXT: vwaddu.vv v0, v28, v20
+; V-NEXT: vwmaccu.vx v8, a0, v16
+; V-NEXT: vwmaccu.vx v0, a0, v20
+; V-NEXT: vmv8r.v v16, v0
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv64i16_nxv32i16:
; ZVBB: # %bb.0:
@@ -251,22 +339,33 @@ define <vscale x 64 x i16> @vector_interleave_nxv64i16_nxv32i16(<vscale x 32 x i
; ZVBB-NEXT: vmv8r.v v8, v24
; ZVBB-NEXT: vmv8r.v v16, v0
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv64i16_nxv32i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT: vmv8r.v v8, v24
+; ZIP-NEXT: vmv8r.v v16, v0
+; ZIP-NEXT: ret
%res = call <vscale x 64 x i16> @llvm.vector.interleave2.nxv64i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b)
ret <vscale x 64 x i16> %res
}
define <vscale x 32 x i32> @vector_interleave_nxv32i32_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) {
-; CHECK-LABEL: vector_interleave_nxv32i32_nxv16i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT: vmv8r.v v24, v8
-; CHECK-NEXT: vwaddu.vv v8, v24, v16
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwaddu.vv v0, v28, v20
-; CHECK-NEXT: vwmaccu.vx v8, a0, v16
-; CHECK-NEXT: vwmaccu.vx v0, a0, v20
-; CHECK-NEXT: vmv8r.v v16, v0
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv32i32_nxv16i32:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; V-NEXT: vmv8r.v v24, v8
+; V-NEXT: vwaddu.vv v8, v24, v16
+; V-NEXT: li a0, -1
+; V-NEXT: vwaddu.vv v0, v28, v20
+; V-NEXT: vwmaccu.vx v8, a0, v16
+; V-NEXT: vwmaccu.vx v0, a0, v20
+; V-NEXT: vmv8r.v v16, v0
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv32i32_nxv16i32:
; ZVBB: # %bb.0:
@@ -279,29 +378,40 @@ define <vscale x 32 x i32> @vector_interleave_nxv32i32_nxv16i32(<vscale x 16 x i
; ZVBB-NEXT: vmv8r.v v8, v24
; ZVBB-NEXT: vmv8r.v v16, v0
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv32i32_nxv16i32:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT: vmv8r.v v8, v24
+; ZIP-NEXT: vmv8r.v v16, v0
+; ZIP-NEXT: ret
%res = call <vscale x 32 x i32> @llvm.vector.interleave2.nxv32i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b)
ret <vscale x 32 x i32> %res
}
define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b) {
-; CHECK-LABEL: vector_interleave_nxv16i64_nxv8i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
-; CHECK-NEXT: vid.v v6
-; CHECK-NEXT: vmv8r.v v24, v8
-; CHECK-NEXT: srli a0, a0, 1
-; CHECK-NEXT: vmv4r.v v28, v16
-; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: vand.vi v8, v6, 1
-; CHECK-NEXT: vmsne.vi v0, v8, 0
-; CHECK-NEXT: vsrl.vi v6, v6, 1
-; CHECK-NEXT: vadd.vx v6, v6, a0, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v8, v24, v6
-; CHECK-NEXT: vrgatherei16.vv v24, v16, v6
-; CHECK-NEXT: vmv.v.v v16, v24
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv16i64_nxv8i64:
+; V: # %bb.0:
+; V-NEXT: csrr a0, vlenb
+; V-NEXT: vsetvli a1, zero, e16, m2, ta, mu
+; V-NEXT: vid.v v6
+; V-NEXT: vmv8r.v v24, v8
+; V-NEXT: srli a0, a0, 1
+; V-NEXT: vmv4r.v v28, v16
+; V-NEXT: vmv4r.v v16, v12
+; V-NEXT: vand.vi v8, v6, 1
+; V-NEXT: vmsne.vi v0, v8, 0
+; V-NEXT: vsrl.vi v6, v6, 1
+; V-NEXT: vadd.vx v6, v6, a0, v0.t
+; V-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; V-NEXT: vrgatherei16.vv v8, v24, v6
+; V-NEXT: vrgatherei16.vv v24, v16, v6
+; V-NEXT: vmv.v.v v16, v24
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv16i64_nxv8i64:
; ZVBB: # %bb.0:
@@ -321,6 +431,17 @@ define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64
; ZVBB-NEXT: vrgatherei16.vv v24, v16, v6
; ZVBB-NEXT: vmv.v.v v16, v24
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv16i64_nxv8i64:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT: vmv8r.v v8, v24
+; ZIP-NEXT: vmv8r.v v16, v0
+; ZIP-NEXT: ret
%res = call <vscale x 16 x i64> @llvm.vector.interleave2.nxv16i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b)
ret <vscale x 16 x i64> %res
}
@@ -329,21 +450,21 @@ define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64
; Floats
define <vscale x 4 x bfloat> @vector_interleave_nxv4bf16_nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) {
-; CHECK-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vwaddu.vv v10, v8, v9
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: vwmaccu.vx v10, a0, v9
-; CHECK-NEXT: srli a1, a1, 2
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v8, v10, a1
-; CHECK-NEXT: add a0, a1, a1
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vslideup.vx v10, v8, a1
-; CHECK-NEXT: vmv.v.v v8, v10
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; V-NEXT: vwaddu.vv v10, v8, v9
+; V-NEXT: li a0, -1
+; V-NEXT: csrr a1, vlenb
+; V-NEXT: vwmaccu.vx v10, a0, v9
+; V-NEXT: srli a1, a1, 2
+; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; V-NEXT: vslidedown.vx v8, v10, a1
+; V-NEXT: add a0, a1, a1
+; V-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; V-NEXT: vslideup.vx v10, v8, a1
+; V-NEXT: vmv.v.v v8, v10
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
; ZVBB: # %bb.0:
@@ -359,20 +480,33 @@ define <vscale x 4 x bfloat> @vector_interleave_nxv4bf16_nxv2bf16(<vscale x 2 x
; ZVBB-NEXT: vslideup.vx v10, v8, a0
; ZVBB-NEXT: vmv.v.v v8, v10
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv4bf16_nxv2bf16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v11, v8, v9
+; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: srli a0, a0, 2
+; ZIP-NEXT: add a1, a0, a0
+; ZIP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; ZIP-NEXT: vslideup.vx v10, v11, a0
+; ZIP-NEXT: vmv.v.v v8, v10
+; ZIP-NEXT: ret
%res = call <vscale x 4 x bfloat> @llvm.vector.interleave2.nxv4bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b)
ret <vscale x 4 x bfloat> %res
}
define <vscale x 8 x bfloat> @vector_interleave_nxv8bf16_nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) {
-; CHECK-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v9
-; CHECK-NEXT: vmv1r.v v11, v8
-; CHECK-NEXT: vwaddu.vv v8, v11, v10
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwmaccu.vx v8, a0, v10
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; V-NEXT: vmv1r.v v10, v9
+; V-NEXT: vmv1r.v v11, v8
+; V-NEXT: vwaddu.vv v8, v11, v10
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v10
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
; ZVBB: # %bb.0:
@@ -382,26 +516,35 @@ define <vscale x 8 x bfloat> @vector_interleave_nxv8bf16_nxv4bf16(<vscale x 4 x
; ZVBB-NEXT: vwsll.vi v8, v10, 16
; ZVBB-NEXT: vwaddu.wv v8, v8, v11
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv8bf16_nxv4bf16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZIP-NEXT: vmv1r.v v10, v9
+; ZIP-NEXT: vmv1r.v v11, v8
+; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10
+; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10
+; ZIP-NEXT: ret
%res = call <vscale x 8 x bfloat> @llvm.vector.interleave2.nxv8bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b)
ret <vscale x 8 x bfloat> %res
}
define <vscale x 4 x half> @vector_interleave_nxv4f16_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
-; CHECK-LABEL: vector_interleave_nxv4f16_nxv2f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vwaddu.vv v10, v8, v9
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: vwmaccu.vx v10, a0, v9
-; CHECK-NEXT: srli a1, a1, 2
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v8, v10, a1
-; CHECK-NEXT: add a0, a1, a1
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vslideup.vx v10, v8, a1
-; CHECK-NEXT: vmv.v.v v8, v10
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv4f16_nxv2f16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; V-NEXT: vwaddu.vv v10, v8, v9
+; V-NEXT: li a0, -1
+; V-NEXT: csrr a1, vlenb
+; V-NEXT: vwmaccu.vx v10, a0, v9
+; V-NEXT: srli a1, a1, 2
+; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; V-NEXT: vslidedown.vx v8, v10, a1
+; V-NEXT: add a0, a1, a1
+; V-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; V-NEXT: vslideup.vx v10, v8, a1
+; V-NEXT: vmv.v.v v8, v10
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv4f16_nxv2f16:
; ZVBB: # %bb.0:
@@ -417,20 +560,33 @@ define <vscale x 4 x half> @vector_interleave_nxv4f16_nxv2f16(<vscale x 2 x half
; ZVBB-NEXT: vslideup.vx v10, v8, a0
; ZVBB-NEXT: vmv.v.v v8, v10
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv4f16_nxv2f16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v11, v8, v9
+; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: srli a0, a0, 2
+; ZIP-NEXT: add a1, a0, a0
+; ZIP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; ZIP-NEXT: vslideup.vx v10, v11, a0
+; ZIP-NEXT: vmv.v.v v8, v10
+; ZIP-NEXT: ret
%res = call <vscale x 4 x half> @llvm.vector.interleave2.nxv4f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
ret <vscale x 4 x half> %res
}
define <vscale x 8 x half> @vector_interleave_nxv8f16_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
-; CHECK-LABEL: vector_interleave_nxv8f16_nxv4f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v9
-; CHECK-NEXT: vmv1r.v v11, v8
-; CHECK-NEXT: vwaddu.vv v8, v11, v10
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwmaccu.vx v8, a0, v10
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv8f16_nxv4f16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; V-NEXT: vmv1r.v v10, v9
+; V-NEXT: vmv1r.v v11, v8
+; V-NEXT: vwaddu.vv v8, v11, v10
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v10
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv8f16_nxv4f16:
; ZVBB: # %bb.0:
@@ -440,20 +596,29 @@ define <vscale x 8 x half> @vector_interleave_nxv8f16_nxv4f16(<vscale x 4 x half
; ZVBB-NEXT: vwsll.vi v8, v10, 16
; ZVBB-NEXT: vwaddu.wv v8, v8, v11
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv8f16_nxv4f16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZIP-NEXT: vmv1r.v v10, v9
+; ZIP-NEXT: vmv1r.v v11, v8
+; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10
+; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10
+; ZIP-NEXT: ret
%res = call <vscale x 8 x half> @llvm.vector.interleave2.nxv8f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
ret <vscale x 8 x half> %res
}
define <vscale x 4 x float> @vector_interleave_nxv4f32_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
-; CHECK-LABEL: vector_interleave_nxv4f32_nxv2f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v9
-; CHECK-NEXT: vmv1r.v v11, v8
-; CHECK-NEXT: vwaddu.vv v8, v11, v10
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwmaccu.vx v8, a0, v10
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv4f32_nxv2f32:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; V-NEXT: vmv1r.v v10, v9
+; V-NEXT: vmv1r.v v11, v8
+; V-NEXT: vwaddu.vv v8, v11, v10
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v10
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv4f32_nxv2f32:
; ZVBB: # %bb.0:
@@ -464,20 +629,29 @@ define <vscale x 4 x float> @vector_interleave_nxv4f32_nxv2f32(<vscale x 2 x flo
; ZVBB-NEXT: vwsll.vx v8, v10, a0
; ZVBB-NEXT: vwaddu.wv v8, v8, v11
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv4f32_nxv2f32:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; ZIP-NEXT: vmv1r.v v10, v9
+; ZIP-NEXT: vmv1r.v v11, v8
+; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10
+; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10
+; ZIP-NEXT: ret
%res = call <vscale x 4 x float> @llvm.vector.interleave2.nxv4f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
ret <vscale x 4 x float> %res
}
define <vscale x 16 x bfloat> @vector_interleave_nxv16bf16_nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
-; CHECK-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv2r.v v12, v10
-; CHECK-NEXT: vmv2r.v v14, v8
-; CHECK-NEXT: vwaddu.vv v8, v14, v12
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwmaccu.vx v8, a0, v12
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; V-NEXT: vmv2r.v v12, v10
+; V-NEXT: vmv2r.v v14, v8
+; V-NEXT: vwaddu.vv v8, v14, v12
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v12
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
; ZVBB: # %bb.0:
@@ -487,20 +661,29 @@ define <vscale x 16 x bfloat> @vector_interleave_nxv16bf16_nxv8bf16(<vscale x 8
; ZVBB-NEXT: vwsll.vi v8, v12, 16
; ZVBB-NEXT: vwaddu.wv v8, v8, v14
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv16bf16_nxv8bf16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZIP-NEXT: vmv2r.v v12, v10
+; ZIP-NEXT: vmv2r.v v14, v8
+; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT: ret
%res = call <vscale x 16 x bfloat> @llvm.vector.interleave2.nxv16bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
ret <vscale x 16 x bfloat> %res
}
define <vscale x 16 x half> @vector_interleave_nxv16f16_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
-; CHECK-LABEL: vector_interleave_nxv16f16_nxv8f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv2r.v v12, v10
-; CHECK-NEXT: vmv2r.v v14, v8
-; CHECK-NEXT: vwaddu.vv v8, v14, v12
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwmaccu.vx v8, a0, v12
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv16f16_nxv8f16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; V-NEXT: vmv2r.v v12, v10
+; V-NEXT: vmv2r.v v14, v8
+; V-NEXT: vwaddu.vv v8, v14, v12
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v12
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv16f16_nxv8f16:
; ZVBB: # %bb.0:
@@ -510,20 +693,29 @@ define <vscale x 16 x half> @vector_interleave_nxv16f16_nxv8f16(<vscale x 8 x ha
; ZVBB-NEXT: vwsll.vi v8, v12, 16
; ZVBB-NEXT: vwaddu.wv v8, v8, v14
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv16f16_nxv8f16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZIP-NEXT: vmv2r.v v12, v10
+; ZIP-NEXT: vmv2r.v v14, v8
+; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT: ret
%res = call <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
ret <vscale x 16 x half> %res
}
define <vscale x 8 x float> @vector_interleave_nxv8f32_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
-; CHECK-LABEL: vector_interleave_nxv8f32_nxv4f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT: vmv2r.v v12, v10
-; CHECK-NEXT: vmv2r.v v14, v8
-; CHECK-NEXT: vwaddu.vv v8, v14, v12
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwmaccu.vx v8, a0, v12
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv8f32_nxv4f32:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; V-NEXT: vmv2r.v v12, v10
+; V-NEXT: vmv2r.v v14, v8
+; V-NEXT: vwaddu.vv v8, v14, v12
+; V-NEXT: li a0, -1
+; V-NEXT: vwmaccu.vx v8, a0, v12
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv8f32_nxv4f32:
; ZVBB: # %bb.0:
@@ -534,25 +726,34 @@ define <vscale x 8 x float> @vector_interleave_nxv8f32_nxv4f32(<vscale x 4 x flo
; ZVBB-NEXT: vwsll.vx v8, v12, a0
; ZVBB-NEXT: vwaddu.wv v8, v8, v14
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv8f32_nxv4f32:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; ZIP-NEXT: vmv2r.v v12, v10
+; ZIP-NEXT: vmv2r.v v14, v8
+; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT: ret
%res = call <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
ret <vscale x 8 x float> %res
}
define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
-; CHECK-LABEL: vector_interleave_nxv4f64_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
-; CHECK-NEXT: vid.v v12
-; CHECK-NEXT: srli a0, a0, 2
-; CHECK-NEXT: vand.vi v13, v12, 1
-; CHECK-NEXT: vmsne.vi v0, v13, 0
-; CHECK-NEXT: vsrl.vi v16, v12, 1
-; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
-; CHECK-NEXT: vmv.v.v v8, v12
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv4f64_nxv2f64:
+; V: # %bb.0:
+; V-NEXT: csrr a0, vlenb
+; V-NEXT: vsetvli a1, zero, e16, m1, ta, mu
+; V-NEXT: vid.v v12
+; V-NEXT: srli a0, a0, 2
+; V-NEXT: vand.vi v13, v12, 1
+; V-NEXT: vmsne.vi v0, v13, 0
+; V-NEXT: vsrl.vi v16, v12, 1
+; V-NEXT: vadd.vx v16, v16, a0, v0.t
+; V-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; V-NEXT: vrgatherei16.vv v12, v8, v16
+; V-NEXT: vmv.v.v v8, v12
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv4f64_nxv2f64:
; ZVBB: # %bb.0:
@@ -568,6 +769,15 @@ define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x do
; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16
; ZVBB-NEXT: vmv.v.v v8, v12
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv4f64_nxv2f64:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; ZIP-NEXT: vmv2r.v v12, v10
+; ZIP-NEXT: vmv2r.v v14, v8
+; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12
+; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12
+; ZIP-NEXT: ret
%res = call <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
ret <vscale x 4 x double> %res
}
@@ -575,17 +785,17 @@ define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x do
define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) {
-; CHECK-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vmv8r.v v24, v8
-; CHECK-NEXT: vwaddu.vv v8, v24, v16
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwaddu.vv v0, v28, v20
-; CHECK-NEXT: vwmaccu.vx v8, a0, v16
-; CHECK-NEXT: vwmaccu.vx v0, a0, v20
-; CHECK-NEXT: vmv8r.v v16, v0
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; V-NEXT: vmv8r.v v24, v8
+; V-NEXT: vwaddu.vv v8, v24, v16
+; V-NEXT: li a0, -1
+; V-NEXT: vwaddu.vv v0, v28, v20
+; V-NEXT: vwmaccu.vx v8, a0, v16
+; V-NEXT: vwmaccu.vx v0, a0, v20
+; V-NEXT: vmv8r.v v16, v0
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
; ZVBB: # %bb.0:
@@ -597,22 +807,33 @@ define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 3
; ZVBB-NEXT: vmv8r.v v8, v24
; ZVBB-NEXT: vmv8r.v v16, v0
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv64bf16_nxv32bf16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT: vmv8r.v v8, v24
+; ZIP-NEXT: vmv8r.v v16, v0
+; ZIP-NEXT: ret
%res = call <vscale x 64 x bfloat> @llvm.vector.interleave2.nxv64bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b)
ret <vscale x 64 x bfloat> %res
}
define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b) {
-; CHECK-LABEL: vector_interleave_nxv64f16_nxv32f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vmv8r.v v24, v8
-; CHECK-NEXT: vwaddu.vv v8, v24, v16
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwaddu.vv v0, v28, v20
-; CHECK-NEXT: vwmaccu.vx v8, a0, v16
-; CHECK-NEXT: vwmaccu.vx v0, a0, v20
-; CHECK-NEXT: vmv8r.v v16, v0
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv64f16_nxv32f16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; V-NEXT: vmv8r.v v24, v8
+; V-NEXT: vwaddu.vv v8, v24, v16
+; V-NEXT: li a0, -1
+; V-NEXT: vwaddu.vv v0, v28, v20
+; V-NEXT: vwmaccu.vx v8, a0, v16
+; V-NEXT: vwmaccu.vx v0, a0, v20
+; V-NEXT: vmv8r.v v16, v0
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv64f16_nxv32f16:
; ZVBB: # %bb.0:
@@ -624,22 +845,33 @@ define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x
; ZVBB-NEXT: vmv8r.v v8, v24
; ZVBB-NEXT: vmv8r.v v16, v0
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv64f16_nxv32f16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT: vmv8r.v v8, v24
+; ZIP-NEXT: vmv8r.v v16, v0
+; ZIP-NEXT: ret
%res = call <vscale x 64 x half> @llvm.vector.interleave2.nxv64f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b)
ret <vscale x 64 x half> %res
}
define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b) {
-; CHECK-LABEL: vector_interleave_nxv32f32_nxv16f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT: vmv8r.v v24, v8
-; CHECK-NEXT: vwaddu.vv v8, v24, v16
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwaddu.vv v0, v28, v20
-; CHECK-NEXT: vwmaccu.vx v8, a0, v16
-; CHECK-NEXT: vwmaccu.vx v0, a0, v20
-; CHECK-NEXT: vmv8r.v v16, v0
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv32f32_nxv16f32:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; V-NEXT: vmv8r.v v24, v8
+; V-NEXT: vwaddu.vv v8, v24, v16
+; V-NEXT: li a0, -1
+; V-NEXT: vwaddu.vv v0, v28, v20
+; V-NEXT: vwmaccu.vx v8, a0, v16
+; V-NEXT: vwmaccu.vx v0, a0, v20
+; V-NEXT: vmv8r.v v16, v0
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32:
; ZVBB: # %bb.0:
@@ -652,29 +884,40 @@ define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x
; ZVBB-NEXT: vmv8r.v v8, v24
; ZVBB-NEXT: vmv8r.v v16, v0
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv32f32_nxv16f32:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT: vmv8r.v v8, v24
+; ZIP-NEXT: vmv8r.v v16, v0
+; ZIP-NEXT: ret
%res = call <vscale x 32 x float> @llvm.vector.interleave2.nxv32f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b)
ret <vscale x 32 x float> %res
}
define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b) {
-; CHECK-LABEL: vector_interleave_nxv16f64_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
-; CHECK-NEXT: vid.v v6
-; CHECK-NEXT: vmv8r.v v24, v8
-; CHECK-NEXT: srli a0, a0, 1
-; CHECK-NEXT: vmv4r.v v28, v16
-; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: vand.vi v8, v6, 1
-; CHECK-NEXT: vmsne.vi v0, v8, 0
-; CHECK-NEXT: vsrl.vi v6, v6, 1
-; CHECK-NEXT: vadd.vx v6, v6, a0, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v8, v24, v6
-; CHECK-NEXT: vrgatherei16.vv v24, v16, v6
-; CHECK-NEXT: vmv.v.v v16, v24
-; CHECK-NEXT: ret
+; V-LABEL: vector_interleave_nxv16f64_nxv8f64:
+; V: # %bb.0:
+; V-NEXT: csrr a0, vlenb
+; V-NEXT: vsetvli a1, zero, e16, m2, ta, mu
+; V-NEXT: vid.v v6
+; V-NEXT: vmv8r.v v24, v8
+; V-NEXT: srli a0, a0, 1
+; V-NEXT: vmv4r.v v28, v16
+; V-NEXT: vmv4r.v v16, v12
+; V-NEXT: vand.vi v8, v6, 1
+; V-NEXT: vmsne.vi v0, v8, 0
+; V-NEXT: vsrl.vi v6, v6, 1
+; V-NEXT: vadd.vx v6, v6, a0, v0.t
+; V-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; V-NEXT: vrgatherei16.vv v8, v24, v6
+; V-NEXT: vrgatherei16.vv v24, v16, v6
+; V-NEXT: vmv.v.v v16, v24
+; V-NEXT: ret
;
; ZVBB-LABEL: vector_interleave_nxv16f64_nxv8f64:
; ZVBB: # %bb.0:
@@ -694,6 +937,17 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x
; ZVBB-NEXT: vrgatherei16.vv v24, v16, v6
; ZVBB-NEXT: vmv.v.v v16, v24
; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv16f64_nxv8f64:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16
+; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20
+; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16
+; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20
+; ZIP-NEXT: vmv8r.v v8, v24
+; ZIP-NEXT: vmv8r.v v16, v0
+; ZIP-NEXT: ret
%res = call <vscale x 16 x double> @llvm.vector.interleave2.nxv16f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b)
ret <vscale x 16 x double> %res
}
@@ -1460,6 +1714,72 @@ define <vscale x 80 x i8> @vector_interleave_nxv80i8_nxv16i8(<vscale x 16 x i8>
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: addi sp, sp, 80
; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv80i8_nxv16i8:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a1, 28
+; ZIP-NEXT: mul a0, a0, a1
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v20, v16
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v18, v12
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 2
+; ZIP-NEXT: add a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v16, v8
+; ZIP-NEXT: vmv2r.v v22, v16
+; ZIP-NEXT: vmv2r.v v24, v18
+; ZIP-NEXT: vmv1r.v v26, v20
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v23, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: add a5, a4, a2
+; ZIP-NEXT: vmv1r.v v25, v14
+; ZIP-NEXT: add a6, a5, a2
+; ZIP-NEXT: vmv1r.v v18, v11
+; ZIP-NEXT: vsseg5e8.v v22, (a0)
+; ZIP-NEXT: vmv1r.v v20, v15
+; ZIP-NEXT: vsseg5e8.v v17, (a1)
+; ZIP-NEXT: vl1r.v v16, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v17, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1r.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v11, (a6)
+; ZIP-NEXT: vl1r.v v8, (a0)
+; ZIP-NEXT: vl1r.v v9, (a3)
+; ZIP-NEXT: vl1r.v v14, (a4)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 10
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v15, (a5)
+; ZIP-NEXT: vl1r.v v12, (a6)
+; ZIP-NEXT: vl1r.v v13, (a1)
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vs2r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8r.v v16, (a2)
+; ZIP-NEXT: vl8r.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: ret
%res = call <vscale x 80 x i8> @llvm.vector.interleave5.nxv80i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e)
ret <vscale x 80 x i8> %res
}
@@ -1789,6 +2109,72 @@ define <vscale x 20 x i32> @vector_interleave_nxv20i32_nxv4i32(<vscale x 4 x i32
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: addi sp, sp, 80
; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv20i32_nxv4i32:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a1, 28
+; ZIP-NEXT: mul a0, a0, a1
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v20, v16
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v18, v12
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 2
+; ZIP-NEXT: add a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v16, v8
+; ZIP-NEXT: vmv2r.v v22, v16
+; ZIP-NEXT: vmv2r.v v24, v18
+; ZIP-NEXT: vmv1r.v v26, v20
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v23, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: add a5, a4, a2
+; ZIP-NEXT: vmv1r.v v25, v14
+; ZIP-NEXT: add a6, a5, a2
+; ZIP-NEXT: vmv1r.v v18, v11
+; ZIP-NEXT: vsseg5e32.v v22, (a0)
+; ZIP-NEXT: vmv1r.v v20, v15
+; ZIP-NEXT: vsseg5e32.v v17, (a1)
+; ZIP-NEXT: vl1re32.v v16, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v17, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1re32.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v11, (a6)
+; ZIP-NEXT: vl1re32.v v8, (a0)
+; ZIP-NEXT: vl1re32.v v9, (a3)
+; ZIP-NEXT: vl1re32.v v14, (a4)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 10
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v15, (a5)
+; ZIP-NEXT: vl1re32.v v12, (a6)
+; ZIP-NEXT: vl1re32.v v13, (a1)
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vs2r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8re32.v v16, (a2)
+; ZIP-NEXT: vl8re32.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: ret
%res = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e)
ret <vscale x 20 x i32> %res
}
@@ -2059,6 +2445,72 @@ define <vscale x 10 x i64> @vector_interleave_nxv10i64_nxv2i64(<vscale x 2 x i64
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: addi sp, sp, 80
; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv10i64_nxv2i64:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a1, 28
+; ZIP-NEXT: mul a0, a0, a1
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v20, v16
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v18, v12
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 2
+; ZIP-NEXT: add a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v16, v8
+; ZIP-NEXT: vmv2r.v v22, v16
+; ZIP-NEXT: vmv2r.v v24, v18
+; ZIP-NEXT: vmv1r.v v26, v20
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v23, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: add a5, a4, a2
+; ZIP-NEXT: vmv1r.v v25, v14
+; ZIP-NEXT: add a6, a5, a2
+; ZIP-NEXT: vmv1r.v v18, v11
+; ZIP-NEXT: vsseg5e64.v v22, (a0)
+; ZIP-NEXT: vmv1r.v v20, v15
+; ZIP-NEXT: vsseg5e64.v v17, (a1)
+; ZIP-NEXT: vl1re64.v v16, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v17, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1re64.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v11, (a6)
+; ZIP-NEXT: vl1re64.v v8, (a0)
+; ZIP-NEXT: vl1re64.v v9, (a3)
+; ZIP-NEXT: vl1re64.v v14, (a4)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 10
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v15, (a5)
+; ZIP-NEXT: vl1re64.v v12, (a6)
+; ZIP-NEXT: vl1re64.v v13, (a1)
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vs2r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8re64.v v16, (a2)
+; ZIP-NEXT: vl8re64.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: ret
%res = call <vscale x 10 x i64> @llvm.vector.interleave5.nxv10i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e)
ret <vscale x 10 x i64> %res
}
@@ -2585,6 +3037,88 @@ define <vscale x 112 x i8> @vector_interleave_nxv112i8_nxv16i8(<vscale x 16 x i8
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: addi sp, sp, 80
; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv112i8_nxv16i8:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: slli a0, a0, 5
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v26, v20
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v24, v16
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 3
+; ZIP-NEXT: sub a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: vmv2r.v v22, v12
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v20, v8
+; ZIP-NEXT: vmv1r.v v1, v20
+; ZIP-NEXT: vmv1r.v v3, v22
+; ZIP-NEXT: vmv1r.v v5, v24
+; ZIP-NEXT: vmv1r.v v7, v26
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v2, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: slli a5, a2, 2
+; ZIP-NEXT: vmv1r.v v4, v14
+; ZIP-NEXT: slli a6, a2, 4
+; ZIP-NEXT: add a7, a4, a2
+; ZIP-NEXT: vmv1r.v v6, v18
+; ZIP-NEXT: sub a5, a6, a5
+; ZIP-NEXT: vmv1r.v v22, v11
+; ZIP-NEXT: add a6, a7, a2
+; ZIP-NEXT: vmv1r.v v24, v15
+; ZIP-NEXT: vsseg7e8.v v1, (a0)
+; ZIP-NEXT: vmv1r.v v26, v19
+; ZIP-NEXT: vsseg7e8.v v21, (a1)
+; ZIP-NEXT: vl1r.v v18, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v19, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v20, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v21, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1r.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v11, (a6)
+; ZIP-NEXT: vl1r.v v8, (a0)
+; ZIP-NEXT: vl1r.v v16, (a4)
+; ZIP-NEXT: vl1r.v v9, (a3)
+; ZIP-NEXT: vl1r.v v17, (a7)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 14
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v12, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1r.v v13, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vl1r.v v14, (a6)
+; ZIP-NEXT: vl1r.v v15, (a1)
+; ZIP-NEXT: add a5, a0, a5
+; ZIP-NEXT: vs2r.v v20, (a5)
+; ZIP-NEXT: vs4r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8r.v v16, (a2)
+; ZIP-NEXT: vl8r.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: ret
%res = call <vscale x 112 x i8> @llvm.vector.interleave7.nxv112i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d, <vscale x 16 x i8> %e, <vscale x 16 x i8> %f, <vscale x 16 x i8> %g)
ret <vscale x 112 x i8> %res
}
@@ -2919,6 +3453,88 @@ define <vscale x 56 x i16> @vector_interleave_nxv56i16_nxv8i16(<vscale x 8 x i16
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: addi sp, sp, 80
; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv56i16_nxv8i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: slli a0, a0, 5
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v26, v20
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v24, v16
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 3
+; ZIP-NEXT: sub a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: vmv2r.v v22, v12
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v20, v8
+; ZIP-NEXT: vmv1r.v v1, v20
+; ZIP-NEXT: vmv1r.v v3, v22
+; ZIP-NEXT: vmv1r.v v5, v24
+; ZIP-NEXT: vmv1r.v v7, v26
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v2, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: slli a5, a2, 2
+; ZIP-NEXT: vmv1r.v v4, v14
+; ZIP-NEXT: slli a6, a2, 4
+; ZIP-NEXT: add a7, a4, a2
+; ZIP-NEXT: vmv1r.v v6, v18
+; ZIP-NEXT: sub a5, a6, a5
+; ZIP-NEXT: vmv1r.v v22, v11
+; ZIP-NEXT: add a6, a7, a2
+; ZIP-NEXT: vmv1r.v v24, v15
+; ZIP-NEXT: vsseg7e16.v v1, (a0)
+; ZIP-NEXT: vmv1r.v v26, v19
+; ZIP-NEXT: vsseg7e16.v v21, (a1)
+; ZIP-NEXT: vl1re16.v v18, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v19, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v20, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v21, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1re16.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v11, (a6)
+; ZIP-NEXT: vl1re16.v v8, (a0)
+; ZIP-NEXT: vl1re16.v v16, (a4)
+; ZIP-NEXT: vl1re16.v v9, (a3)
+; ZIP-NEXT: vl1re16.v v17, (a7)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 14
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v12, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re16.v v13, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vl1re16.v v14, (a6)
+; ZIP-NEXT: vl1re16.v v15, (a1)
+; ZIP-NEXT: add a5, a0, a5
+; ZIP-NEXT: vs2r.v v20, (a5)
+; ZIP-NEXT: vs4r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8re16.v v16, (a2)
+; ZIP-NEXT: vl8re16.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: ret
%res = call <vscale x 56 x i16> @llvm.vector.interleave7.nxv56i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i16> %d, <vscale x 8 x i16> %e, <vscale x 8 x i16> %f, <vscale x 8 x i16> %g)
ret <vscale x 56 x i16> %res
}
@@ -3253,6 +3869,88 @@ define <vscale x 28 x i32> @vector_interleave_nxv28i32_nxv4i32(<vscale x 4 x i32
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: addi sp, sp, 80
; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv28i32_nxv4i32:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: slli a0, a0, 5
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v26, v20
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v24, v16
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 3
+; ZIP-NEXT: sub a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: vmv2r.v v22, v12
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v20, v8
+; ZIP-NEXT: vmv1r.v v1, v20
+; ZIP-NEXT: vmv1r.v v3, v22
+; ZIP-NEXT: vmv1r.v v5, v24
+; ZIP-NEXT: vmv1r.v v7, v26
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v2, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: slli a5, a2, 2
+; ZIP-NEXT: vmv1r.v v4, v14
+; ZIP-NEXT: slli a6, a2, 4
+; ZIP-NEXT: add a7, a4, a2
+; ZIP-NEXT: vmv1r.v v6, v18
+; ZIP-NEXT: sub a5, a6, a5
+; ZIP-NEXT: vmv1r.v v22, v11
+; ZIP-NEXT: add a6, a7, a2
+; ZIP-NEXT: vmv1r.v v24, v15
+; ZIP-NEXT: vsseg7e32.v v1, (a0)
+; ZIP-NEXT: vmv1r.v v26, v19
+; ZIP-NEXT: vsseg7e32.v v21, (a1)
+; ZIP-NEXT: vl1re32.v v18, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v19, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v20, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v21, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1re32.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v11, (a6)
+; ZIP-NEXT: vl1re32.v v8, (a0)
+; ZIP-NEXT: vl1re32.v v16, (a4)
+; ZIP-NEXT: vl1re32.v v9, (a3)
+; ZIP-NEXT: vl1re32.v v17, (a7)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 14
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v12, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re32.v v13, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vl1re32.v v14, (a6)
+; ZIP-NEXT: vl1re32.v v15, (a1)
+; ZIP-NEXT: add a5, a0, a5
+; ZIP-NEXT: vs2r.v v20, (a5)
+; ZIP-NEXT: vs4r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8re32.v v16, (a2)
+; ZIP-NEXT: vl8re32.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: ret
%res = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32> %e, <vscale x 4 x i32> %f, <vscale x 4 x i32> %g)
ret <vscale x 28 x i32> %res
}
@@ -3586,6 +4284,88 @@ define <vscale x 14 x i64> @vector_interleave_nxv14i64_nxv2i64(<vscale x 2 x i64
; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; ZVBB-RV64-NEXT: addi sp, sp, 80
; ZVBB-RV64-NEXT: ret
+;
+; ZIP-LABEL: vector_interleave_nxv14i64_nxv2i64:
+; ZIP: # %bb.0:
+; ZIP-NEXT: addi sp, sp, -80
+; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
+; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
+; ZIP-NEXT: addi s0, sp, 80
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: slli a0, a0, 5
+; ZIP-NEXT: sub sp, sp, a0
+; ZIP-NEXT: andi sp, sp, -64
+; ZIP-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; ZIP-NEXT: vmv2r.v v26, v20
+; ZIP-NEXT: addi a0, sp, 64
+; ZIP-NEXT: vmv2r.v v24, v16
+; ZIP-NEXT: csrr a1, vlenb
+; ZIP-NEXT: slli a2, a1, 3
+; ZIP-NEXT: sub a1, a2, a1
+; ZIP-NEXT: add a1, sp, a1
+; ZIP-NEXT: addi a1, a1, 64
+; ZIP-NEXT: vmv2r.v v22, v12
+; ZIP-NEXT: csrr a2, vlenb
+; ZIP-NEXT: vmv2r.v v20, v8
+; ZIP-NEXT: vmv1r.v v1, v20
+; ZIP-NEXT: vmv1r.v v3, v22
+; ZIP-NEXT: vmv1r.v v5, v24
+; ZIP-NEXT: vmv1r.v v7, v26
+; ZIP-NEXT: add a3, a0, a2
+; ZIP-NEXT: vmv1r.v v2, v10
+; ZIP-NEXT: add a4, a1, a2
+; ZIP-NEXT: slli a5, a2, 2
+; ZIP-NEXT: vmv1r.v v4, v14
+; ZIP-NEXT: slli a6, a2, 4
+; ZIP-NEXT: add a7, a4, a2
+; ZIP-NEXT: vmv1r.v v6, v18
+; ZIP-NEXT: sub a5, a6, a5
+; ZIP-NEXT: vmv1r.v v22, v11
+; ZIP-NEXT: add a6, a7, a2
+; ZIP-NEXT: vmv1r.v v24, v15
+; ZIP-NEXT: vsseg7e64.v v1, (a0)
+; ZIP-NEXT: vmv1r.v v26, v19
+; ZIP-NEXT: vsseg7e64.v v21, (a1)
+; ZIP-NEXT: vl1re64.v v18, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v19, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v20, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v21, (a6)
+; ZIP-NEXT: add a6, a3, a2
+; ZIP-NEXT: vl1re64.v v10, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v11, (a6)
+; ZIP-NEXT: vl1re64.v v8, (a0)
+; ZIP-NEXT: vl1re64.v v16, (a4)
+; ZIP-NEXT: vl1re64.v v9, (a3)
+; ZIP-NEXT: vl1re64.v v17, (a7)
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: li a3, 14
+; ZIP-NEXT: mul a0, a0, a3
+; ZIP-NEXT: add a0, sp, a0
+; ZIP-NEXT: addi a0, a0, 64
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v12, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: vl1re64.v v13, (a6)
+; ZIP-NEXT: add a6, a6, a2
+; ZIP-NEXT: slli a2, a2, 3
+; ZIP-NEXT: add a2, a0, a2
+; ZIP-NEXT: vl1re64.v v14, (a6)
+; ZIP-NEXT: vl1re64.v v15, (a1)
+; ZIP-NEXT: add a5, a0, a5
+; ZIP-NEXT: vs2r.v v20, (a5)
+; ZIP-NEXT: vs4r.v v16, (a2)
+; ZIP-NEXT: vs8r.v v8, (a0)
+; ZIP-NEXT: vl8re64.v v16, (a2)
+; ZIP-NEXT: vl8re64.v v8, (a0)
+; ZIP-NEXT: addi sp, s0, -80
+; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
+; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
+; ZIP-NEXT: addi sp, sp, 80
+; ZIP-NEXT: ret
%res = call <vscale x 14 x i64> @llvm.vector.interleave7.nxv14i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d, <vscale x 2 x i64> %e, <vscale x 2 x i64> %f, <vscale x 2 x i64> %g)
ret <vscale x 14 x i64> %res
}
>From ae537363774c8a93557633a32a270251c04b642e Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Fri, 18 Apr 2025 13:28:45 -0700
Subject: [PATCH 2/2] Clang-format
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 6abcba4fa4935..8f96a8e379425 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -11755,8 +11755,8 @@ SDValue RISCVTargetLowering::lowerVECTOR_INTERLEAVE(SDValue Op,
// Use ri.vzip2{a,b} if available
// TODO: Figure out the best lowering for the spread variants
- if (Subtarget.hasVendorXRivosVizip() &&
- !Op.getOperand(0).isUndef() && !Op.getOperand(1).isUndef()) {
+ if (Subtarget.hasVendorXRivosVizip() && !Op.getOperand(0).isUndef() &&
+ !Op.getOperand(1).isUndef()) {
SDValue V1 = Op->getOperand(0);
SDValue V2 = Op->getOperand(1);
SDValue Lo = lowerVZIP(RISCVISD::RI_VZIP2A_VL, V1, V2, DL, DAG, Subtarget);
More information about the llvm-commits
mailing list