[llvm] [LLVM][CodeGen][AArch64] Add NEON lowering for vector.(de)interleave intrinsics. (PR #169700)

via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 26 09:51:43 PST 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-aarch64

Author: Paul Walker (paulwalker-arm)

<details>
<summary>Changes</summary>

While vector.(de)interleave2 lowers today, this is because it's expanded during initial selection.  Any other source of `ISD::VECTOR_(DE)INTERLEAVE` (e.g. vector.(de)interleave4) will result in failure.  I doubt I've covered all the cases but this patch puts NEON mostly on parity with SVE, with the exception of vector.(de)interleave3.

---

Patch is 28.77 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/169700.diff


3 Files Affected:

- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+12-8) 
- (modified) llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll (+257-13) 
- (modified) llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll (+242-1) 


``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 83ce39fa314d1..b292db83ea9da 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2092,6 +2092,8 @@ void AArch64TargetLowering::addTypeForNEON(MVT VT) {
   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
   setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
   setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
+  setOperationAction(ISD::VECTOR_DEINTERLEAVE, VT, Custom);
+  setOperationAction(ISD::VECTOR_INTERLEAVE, VT, Custom);
   setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
   setOperationAction(ISD::SRA, VT, Custom);
@@ -30990,10 +30992,8 @@ AArch64TargetLowering::LowerVECTOR_DEINTERLEAVE(SDValue Op,
                                                 SelectionDAG &DAG) const {
   SDLoc DL(Op);
   EVT OpVT = Op.getValueType();
-  assert(OpVT.isScalableVector() &&
-         "Expected scalable vector in LowerVECTOR_DEINTERLEAVE.");
 
-  if (Op->getNumOperands() == 3) {
+  if (OpVT.isScalableVector() && Op->getNumOperands() == 3) {
     // aarch64_sve_ld3 only supports packed datatypes.
     EVT PackedVT = getPackedSVEVectorVT(OpVT.getVectorElementCount());
     Align Alignment = DAG.getReducedAlign(PackedVT, /*UseABI=*/false);
@@ -31032,7 +31032,7 @@ AArch64TargetLowering::LowerVECTOR_DEINTERLEAVE(SDValue Op,
 
   // Are multi-register uzp instructions available?
   if (Subtarget->hasSME2() && Subtarget->isStreaming() &&
-      OpVT.getVectorElementType() != MVT::i1) {
+      OpVT.isScalableVector() && OpVT.getVectorElementType() != MVT::i1) {
     Intrinsic::ID IntID;
     switch (Op->getNumOperands()) {
     default:
@@ -31057,6 +31057,9 @@ AArch64TargetLowering::LowerVECTOR_DEINTERLEAVE(SDValue Op,
   if (Op->getNumOperands() != 2)
     return SDValue();
 
+  if (OpVT == MVT::v1i64 || OpVT == MVT::v1f64)
+    return DAG.getMergeValues({Op.getOperand(0), Op.getOperand(1)}, DL);
+
   SDValue Even = DAG.getNode(AArch64ISD::UZP1, DL, OpVT, Op.getOperand(0),
                              Op.getOperand(1));
   SDValue Odd = DAG.getNode(AArch64ISD::UZP2, DL, OpVT, Op.getOperand(0),
@@ -31068,10 +31071,8 @@ SDValue AArch64TargetLowering::LowerVECTOR_INTERLEAVE(SDValue Op,
                                                       SelectionDAG &DAG) const {
   SDLoc DL(Op);
   EVT OpVT = Op.getValueType();
-  assert(OpVT.isScalableVector() &&
-         "Expected scalable vector in LowerVECTOR_INTERLEAVE.");
 
-  if (Op->getNumOperands() == 3) {
+  if (OpVT.isScalableVector() && Op->getNumOperands() == 3) {
     // aarch64_sve_st3 only supports packed datatypes.
     EVT PackedVT = getPackedSVEVectorVT(OpVT.getVectorElementCount());
     SmallVector<SDValue, 3> InVecs;
@@ -31109,7 +31110,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_INTERLEAVE(SDValue Op,
 
   // Are multi-register zip instructions available?
   if (Subtarget->hasSME2() && Subtarget->isStreaming() &&
-      OpVT.getVectorElementType() != MVT::i1) {
+      OpVT.isScalableVector() && OpVT.getVectorElementType() != MVT::i1) {
     Intrinsic::ID IntID;
     switch (Op->getNumOperands()) {
     default:
@@ -31134,6 +31135,9 @@ SDValue AArch64TargetLowering::LowerVECTOR_INTERLEAVE(SDValue Op,
   if (Op->getNumOperands() != 2)
     return SDValue();
 
+  if (OpVT == MVT::v1i64 || OpVT == MVT::v1f64)
+    return DAG.getMergeValues({Op.getOperand(0), Op.getOperand(1)}, DL);
+
   SDValue Lo = DAG.getNode(AArch64ISD::ZIP1, DL, OpVT, Op.getOperand(0),
                            Op.getOperand(1));
   SDValue Hi = DAG.getNode(AArch64ISD::ZIP2, DL, OpVT, Op.getOperand(0),
diff --git a/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll
index 4ab5db450a7f3..12c521092ced4 100644
--- a/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll
+++ b/llvm/test/CodeGen/AArch64/fixed-vector-deinterleave.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-none-linux-gnu %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=0 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 define {<2 x half>, <2 x half>} @vector_deinterleave_v2f16_v4f16(<4 x half> %vec) {
 ; CHECK-SD-LABEL: vector_deinterleave_v2f16_v4f16:
@@ -135,18 +135,262 @@ define {<2 x i64>, <2 x i64>} @vector_deinterleave_v2i64_v4i64(<4 x i64> %vec) {
   ret {<2 x i64>, <2 x i64>}   %retval
 }
 
+define {<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>} @vector_deinterleave4_v8i8_v32i8(<32 x i8> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v8i8_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uzp2 v2.16b, v1.16b, v0.16b
+; CHECK-NEXT:    uzp1 v3.16b, v0.16b, v1.16b
+; CHECK-NEXT:    uzp2 v4.16b, v0.16b, v0.16b
+; CHECK-NEXT:    xtn v5.8b, v1.8h
+; CHECK-NEXT:    xtn v6.8b, v0.8h
+; CHECK-NEXT:    xtn v0.8b, v3.8h
+; CHECK-NEXT:    uzp1 v1.8b, v4.8b, v2.8b
+; CHECK-NEXT:    uzp2 v3.8b, v4.8b, v2.8b
+; CHECK-NEXT:    uzp2 v2.8b, v6.8b, v5.8b
+; CHECK-NEXT:    ret
+  %retval = call {<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>} @llvm.vector.deinterleave4.v32i8(<32 x i8> %vec)
+  ret {<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>} %retval
+}
+
+define {<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>} @vector_deinterleave4_v16i8_v64i8(<64 x i8> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v16i8_v64i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uzp1 v4.16b, v2.16b, v3.16b
+; CHECK-NEXT:    uzp1 v5.16b, v0.16b, v1.16b
+; CHECK-NEXT:    uzp2 v3.16b, v2.16b, v3.16b
+; CHECK-NEXT:    uzp2 v6.16b, v0.16b, v1.16b
+; CHECK-NEXT:    uzp1 v0.16b, v5.16b, v4.16b
+; CHECK-NEXT:    uzp2 v2.16b, v5.16b, v4.16b
+; CHECK-NEXT:    uzp1 v1.16b, v6.16b, v3.16b
+; CHECK-NEXT:    uzp2 v3.16b, v6.16b, v3.16b
+; CHECK-NEXT:    ret
+  %retval = call {<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>} @llvm.vector.deinterleave4.v64i8(<64 x i8> %vec)
+  ret {<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>} %retval
+}
+
+define {<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>} @vector_deinterleave4_v4i16_v16i16(<16 x i16> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v4i16_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uzp2 v2.8h, v1.8h, v0.8h
+; CHECK-NEXT:    uzp1 v3.8h, v0.8h, v1.8h
+; CHECK-NEXT:    uzp2 v4.8h, v0.8h, v0.8h
+; CHECK-NEXT:    xtn v5.4h, v1.4s
+; CHECK-NEXT:    xtn v6.4h, v0.4s
+; CHECK-NEXT:    xtn v0.4h, v3.4s
+; CHECK-NEXT:    uzp1 v1.4h, v4.4h, v2.4h
+; CHECK-NEXT:    uzp2 v3.4h, v4.4h, v2.4h
+; CHECK-NEXT:    uzp2 v2.4h, v6.4h, v5.4h
+; CHECK-NEXT:    ret
+  %retval = call {<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>} @llvm.vector.deinterleave4.v16i16(<16 x i16> %vec)
+  ret {<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>} %retval
+}
+
+define {<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>} @vector_deinterleave4_v8i16_v32i16(<32 x i16> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v8i16_v32i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uzp1 v4.8h, v2.8h, v3.8h
+; CHECK-NEXT:    uzp1 v5.8h, v0.8h, v1.8h
+; CHECK-NEXT:    uzp2 v3.8h, v2.8h, v3.8h
+; CHECK-NEXT:    uzp2 v6.8h, v0.8h, v1.8h
+; CHECK-NEXT:    uzp1 v0.8h, v5.8h, v4.8h
+; CHECK-NEXT:    uzp2 v2.8h, v5.8h, v4.8h
+; CHECK-NEXT:    uzp1 v1.8h, v6.8h, v3.8h
+; CHECK-NEXT:    uzp2 v3.8h, v6.8h, v3.8h
+; CHECK-NEXT:    ret
+  %retval = call {<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>} @llvm.vector.deinterleave4.v32i16(<32 x i16> %vec)
+  ret {<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>} %retval
+}
+
+define {<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>} @vector_deinterleave4_v2i32_v8i32(<8 x i32> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v2i32_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uzp2 v2.4s, v1.4s, v0.4s
+; CHECK-NEXT:    uzp1 v3.4s, v0.4s, v1.4s
+; CHECK-NEXT:    uzp2 v4.4s, v0.4s, v0.4s
+; CHECK-NEXT:    xtn v5.2s, v1.2d
+; CHECK-NEXT:    xtn v6.2s, v0.2d
+; CHECK-NEXT:    xtn v0.2s, v3.2d
+; CHECK-NEXT:    uzp1 v1.2s, v4.2s, v2.2s
+; CHECK-NEXT:    uzp2 v3.2s, v4.2s, v2.2s
+; CHECK-NEXT:    uzp2 v2.2s, v6.2s, v5.2s
+; CHECK-NEXT:    ret
+  %retval = call {<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>} @llvm.vector.deinterleave4.v8i32(<8 x i32> %vec)
+  ret {<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>} %retval
+}
+
+define {<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>} @vector_deinterleave4_v4i32_v16i32(<16 x i32> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v4i32_v16i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uzp1 v4.4s, v2.4s, v3.4s
+; CHECK-NEXT:    uzp1 v5.4s, v0.4s, v1.4s
+; CHECK-NEXT:    uzp2 v3.4s, v2.4s, v3.4s
+; CHECK-NEXT:    uzp2 v6.4s, v0.4s, v1.4s
+; CHECK-NEXT:    uzp1 v0.4s, v5.4s, v4.4s
+; CHECK-NEXT:    uzp2 v2.4s, v5.4s, v4.4s
+; CHECK-NEXT:    uzp1 v1.4s, v6.4s, v3.4s
+; CHECK-NEXT:    uzp2 v3.4s, v6.4s, v3.4s
+; CHECK-NEXT:    ret
+  %retval = call {<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>} @llvm.vector.deinterleave4.v16i32(<16 x i32> %vec)
+  ret {<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>} %retval
+}
+
+define {<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>} @vector_deinterleave4_v1i64_v4i64(<4 x i64> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v1i64_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v2.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 killed $q1
+; CHECK-NEXT:    ext v3.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    // kill: def $d2 killed $d2 killed $q2
+; CHECK-NEXT:    // kill: def $d3 killed $d3 killed $q3
+; CHECK-NEXT:    ret
+  %retval = call {<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>} @llvm.vector.deinterleave4.v4i64(<4 x i64> %vec)
+  ret {<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>} %retval
+}
 
-; Floating declarations
-declare {<2 x half>,<2 x half>} @llvm.vector.deinterleave2.v4f16(<4 x half>)
-declare {<4 x half>, <4 x half>} @llvm.vector.deinterleave2.v8f16(<8 x half>)
-declare {<2 x float>, <2 x float>} @llvm.vector.deinterleave2.v4f32(<4 x float>)
-declare {<8 x half>, <8 x half>} @llvm.vector.deinterleave2.v16f16(<16 x half>)
-declare {<4 x float>, <4 x float>} @llvm.vector.deinterleave2.v8f32(<8 x float>)
-declare {<2 x double>, <2 x double>} @llvm.vector.deinterleave2.v4f64(<4 x double>)
+define {<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>} @vector_deinterleave4_v2i64_v8i64(<8 x i64> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v2i64_v8i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uzp1 v4.2d, v2.2d, v3.2d
+; CHECK-NEXT:    uzp1 v5.2d, v0.2d, v1.2d
+; CHECK-NEXT:    uzp2 v3.2d, v2.2d, v3.2d
+; CHECK-NEXT:    uzp2 v6.2d, v0.2d, v1.2d
+; CHECK-NEXT:    uzp1 v0.2d, v5.2d, v4.2d
+; CHECK-NEXT:    uzp2 v2.2d, v5.2d, v4.2d
+; CHECK-NEXT:    uzp1 v1.2d, v6.2d, v3.2d
+; CHECK-NEXT:    uzp2 v3.2d, v6.2d, v3.2d
+; CHECK-NEXT:    ret
+  %retval = call {<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>} @llvm.vector.deinterleave4.v8i64(<8 x i64> %vec)
+  ret {<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>} %retval
+}
 
-; Integer declarations
-declare {<16 x i8>, <16 x i8>} @llvm.vector.deinterleave2.v32i8(<32 x i8>)
-declare {<8 x i16>, <8 x i16>} @llvm.vector.deinterleave2.v16i16(<16 x i16>)
-declare {<4 x i32>, <4 x i32>} @llvm.vector.deinterleave2.v8i32(<8 x i32>)
-declare {<2 x i64>, <2 x i64>} @llvm.vector.deinterleave2.v4i64(<4 x i64>)
+define {<4 x half>, <4 x half>, <4 x half>, <4 x half>} @vector_deinterleave4_v4f16_v16f16(<16 x half> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v4f16_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uzp1 v2.8h, v1.8h, v0.8h
+; CHECK-NEXT:    uzp1 v3.8h, v0.8h, v0.8h
+; CHECK-NEXT:    uzp2 v4.8h, v1.8h, v0.8h
+; CHECK-NEXT:    uzp2 v5.8h, v0.8h, v0.8h
+; CHECK-NEXT:    uzp1 v0.4h, v3.4h, v2.4h
+; CHECK-NEXT:    uzp2 v2.4h, v3.4h, v2.4h
+; CHECK-NEXT:    uzp1 v1.4h, v5.4h, v4.4h
+; CHECK-NEXT:    uzp2 v3.4h, v5.4h, v4.4h
+; CHECK-NEXT:    ret
+  %retval = call {<4 x half>, <4 x half>, <4 x half>, <4 x half>} @llvm.vector.deinterleave4.v16f16(<16 x half> %vec)
+  ret {<4 x half>, <4 x half>, <4 x half>, <4 x half>} %retval
+}
+
+define {<8 x half>, <8 x half>, <8 x half>, <8 x half>} @vector_deinterleave4_v8f16_v32f16(<32 x half> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v8f16_v32f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uzp1 v4.8h, v2.8h, v3.8h
+; CHECK-NEXT:    uzp1 v5.8h, v0.8h, v1.8h
+; CHECK-NEXT:    uzp2 v3.8h, v2.8h, v3.8h
+; CHECK-NEXT:    uzp2 v6.8h, v0.8h, v1.8h
+; CHECK-NEXT:    uzp1 v0.8h, v5.8h, v4.8h
+; CHECK-NEXT:    uzp2 v2.8h, v5.8h, v4.8h
+; CHECK-NEXT:    uzp1 v1.8h, v6.8h, v3.8h
+; CHECK-NEXT:    uzp2 v3.8h, v6.8h, v3.8h
+; CHECK-NEXT:    ret
+  %retval = call {<8 x half>, <8 x half>, <8 x half>, <8 x half>} @llvm.vector.deinterleave4.v32f16(<32 x half> %vec)
+  ret {<8 x half>, <8 x half>, <8 x half>, <8 x half>} %retval
+}
+
+define {<2 x float>, <2 x float>, <2 x float>, <2 x float>} @vector_deinterleave4_v2f32_v8f32(<8 x float> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v2f32_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v2.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    ext v3.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    uzp1 v4.2s, v1.2s, v2.2s
+; CHECK-NEXT:    uzp1 v5.2s, v0.2s, v3.2s
+; CHECK-NEXT:    uzp2 v6.2s, v1.2s, v2.2s
+; CHECK-NEXT:    uzp2 v3.2s, v0.2s, v3.2s
+; CHECK-NEXT:    uzp1 v0.2s, v5.2s, v4.2s
+; CHECK-NEXT:    uzp2 v2.2s, v5.2s, v4.2s
+; CHECK-NEXT:    uzp1 v1.2s, v3.2s, v6.2s
+; CHECK-NEXT:    uzp2 v3.2s, v3.2s, v6.2s
+; CHECK-NEXT:    ret
+  %retval = call {<2 x float>, <2 x float>, <2 x float>, <2 x float>} @llvm.vector.deinterleave4.v8f32(<8 x float> %vec)
+  ret {<2 x float>, <2 x float>, <2 x float>, <2 x float>} %retval
+}
+
+define {<4 x float>, <4 x float>, <4 x float>, <4 x float>} @vector_deinterleave4_v4f32_v16f32(<16 x float> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v4f32_v16f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uzp1 v4.4s, v2.4s, v3.4s
+; CHECK-NEXT:    uzp1 v5.4s, v0.4s, v1.4s
+; CHECK-NEXT:    uzp2 v3.4s, v2.4s, v3.4s
+; CHECK-NEXT:    uzp2 v6.4s, v0.4s, v1.4s
+; CHECK-NEXT:    uzp1 v0.4s, v5.4s, v4.4s
+; CHECK-NEXT:    uzp2 v2.4s, v5.4s, v4.4s
+; CHECK-NEXT:    uzp1 v1.4s, v6.4s, v3.4s
+; CHECK-NEXT:    uzp2 v3.4s, v6.4s, v3.4s
+; CHECK-NEXT:    ret
+  %retval = call {<4 x float>, <4 x float>, <4 x float>, <4 x float>} @llvm.vector.deinterleave4.v16f32(<16 x float> %vec)
+  ret {<4 x float>, <4 x float>, <4 x float>, <4 x float>} %retval
+}
+
+define {<1 x double>, <1 x double>, <1 x double>, <1 x double>} @vector_deinterleave4_v1f64_v4f64(<4 x double> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v1f64_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v2.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 killed $q1
+; CHECK-NEXT:    ext v3.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    // kill: def $d2 killed $d2 killed $q2
+; CHECK-NEXT:    // kill: def $d3 killed $d3 killed $q3
+; CHECK-NEXT:    ret
+  %retval = call {<1 x double>, <1 x double>, <1 x double>, <1 x double>} @llvm.vector.deinterleave4.v4f64(<4 x double> %vec)
+  ret {<1 x double>, <1 x double>, <1 x double>, <1 x double>} %retval
+}
+
+define {<2 x double>, <2 x double>, <2 x double>, <2 x double>} @vector_deinterleave4_v2f64_v8f64(<8 x double> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v2f64_v8f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uzp1 v4.2d, v2.2d, v3.2d
+; CHECK-NEXT:    uzp1 v5.2d, v0.2d, v1.2d
+; CHECK-NEXT:    uzp2 v3.2d, v2.2d, v3.2d
+; CHECK-NEXT:    uzp2 v6.2d, v0.2d, v1.2d
+; CHECK-NEXT:    uzp1 v0.2d, v5.2d, v4.2d
+; CHECK-NEXT:    uzp2 v2.2d, v5.2d, v4.2d
+; CHECK-NEXT:    uzp1 v1.2d, v6.2d, v3.2d
+; CHECK-NEXT:    uzp2 v3.2d, v6.2d, v3.2d
+; CHECK-NEXT:    ret
+  %retval = call {<2 x double>, <2 x double>, <2 x double>, <2 x double>} @llvm.vector.deinterleave4.v8f64(<8 x double> %vec)
+  ret {<2 x double>, <2 x double>, <2 x double>, <2 x double>} %retval
+}
+
+define {<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>} @vector_deinterleave4_v4bf16_v16bf16(<16 x bfloat> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v4bf16_v16bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uzp1 v2.8h, v1.8h, v0.8h
+; CHECK-NEXT:    uzp1 v3.8h, v0.8h, v0.8h
+; CHECK-NEXT:    uzp2 v4.8h, v1.8h, v0.8h
+; CHECK-NEXT:    uzp2 v5.8h, v0.8h, v0.8h
+; CHECK-NEXT:    uzp1 v0.4h, v3.4h, v2.4h
+; CHECK-NEXT:    uzp2 v2.4h, v3.4h, v2.4h
+; CHECK-NEXT:    uzp1 v1.4h, v5.4h, v4.4h
+; CHECK-NEXT:    uzp2 v3.4h, v5.4h, v4.4h
+; CHECK-NEXT:    ret
+  %retval = call {<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>} @llvm.vector.deinterleave4.v16bf16(<16 x bfloat> %vec)
+  ret {<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>} %retval
+}
+
+define {<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>} @vector_deinterleave4_v8bf16_v32bf16(<32 x bfloat> %vec) {
+; CHECK-LABEL: vector_deinterleave4_v8bf16_v32bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uzp1 v4.8h, v2.8h, v3.8h
+; CHECK-NEXT:    uzp1 v5.8h, v0.8h, v1.8h
+; CHECK-NEXT:    uzp2 v3.8h, v2.8h, v3.8h
+; CHECK-NEXT:    uzp2 v6.8h, v0.8h, v1.8h
+; CHECK-NEXT:    uzp1 v0.8h, v5.8h, v4.8h
+; CHECK-NEXT:    uzp2 v2.8h, v5.8h, v4.8h
+; CHECK-NEXT:    uzp1 v1.8h, v6.8h, v3.8h
+; CHECK-NEXT:    uzp2 v3.8h, v6.8h, v3.8h
+; CHECK-NEXT:    ret
+  %retval = call {<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>} @llvm.vector.deinterleave4.v32bf16(<32 x bfloat> %vec)
+  ret {<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>} %retval
+}
 
diff --git a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
index 05ecc9e7b49d4..6e2410579deda 100644
--- a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-none-linux-gnu %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel -global-isel-abort=0 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 define <4 x half> @interleave2_v4f16(<2 x half> %vec0, <2 x half> %vec1) {
 ; CHECK-LABEL: interleave2_v4f16:
@@ -211,3 +211,244 @@ define <4 x i16> @interleave2_diff_nonconst_splat_v4i16(i16 %a, i16 %b) {
   ret <4 x i16> %retval
 }
 
+define <32 x i8> @interleave4_v32i8(<8 x i8> %vec0, <8 x i8> %vec1, <8 x i8> %vec2, <8 x i8> %vec3) {
+; CHECK-LABEL: interleave4_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    zip1 v4.8b, v1.8b, v3.8b
+; CHECK-NEXT:    zip1 v5.8b, v0.8b, v2.8b
+; CHECK-NEXT:    zip2 v1.8b, v1.8b, v3.8b
+; CHECK-NEXT:    zip2 v2.8b, v0.8b, v2.8b
+; CHECK-NEXT:    zip1 v0.16b, v5.16b, v4.16b
+; CHECK-NEXT:    zip1 v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    ret
+  %retval = call <32 x i8> @llvm.vector.interleave4.v32i8(<8 x i8> %vec0, <8 x i8> %vec1, <8 x i8> %vec2, <8 x i8> %vec3)
+  ret <32 x i8> %retval
+}
+
+define <64 x i8> @interleave4_v64i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, <16 x i8> %vec3) {
+; CHECK-LABEL: interleave4_v64i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    zip1 v4.16b, v1.16b, v3.16b
+; CHECK-NEXT:    zip1 v5.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip2 v3.16b, v1.16b, v3.16b
+; CHECK-NEXT:    zip2 v6.16b, v0.16b, v2.16b
+; CHECK-NEXT:    zip1 v0.16b, v5.16b, v4.16b
+; CHECK-NEXT:    zip2 v1.16b, v5.16b, v4.16b
+; CHECK-NEXT:    zip1 v2.16b, v6.16b, v3.16b
+; CHECK-NEXT:    zip2 v3.16b, v6.16b, v3.16b
+; CHECK-NEXT:    ret
+  %retval = call <64 x i8> @llvm.vector.interleave4.v64i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, <16 x i8> %vec3)
+  ret <64 x i8> %retval
+}
+
+define <16 x i16> @interleave4_v16i16(<4 x i16> %vec0, <4 x i16> %vec1, <4 x i16> %vec2, <4 x i16> %vec3) {
+; CHECK-LABEL: interleave4_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    zip1 v4.4h, v1.4h, v3.4h
+; CHECK-NEXT:    zip1 v5.4h, v0.4h, v2.4h
+; CHECK-NEXT:    zip2 v1.4h, v1.4h, v3.4h
+; CHECK-NEXT:    zip2 v2.4h, v0.4h, v2.4h
+; CHECK-NEXT:    zip1 v0.8h, v5.8h, v4.8h
+; CHECK-NEXT:    zip1 v1.8h, v2.8h, v1.8h
+; CHECK-NEXT:    ret
+  %retval = call <16 x i16> @llvm.vect...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/169700


More information about the llvm-commits mailing list