[llvm] [LLVM][CodeGen][AArch64] Lower vector-(de)interleave to multi-register uzp/zip instructions. (PR #143128)

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 10 05:47:49 PDT 2025


https://github.com/paulwalker-arm updated https://github.com/llvm/llvm-project/pull/143128

>From f31e6869324d1c6623f7d7519ab50d5cc92723e0 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Fri, 6 Jun 2025 12:36:18 +0100
Subject: [PATCH 1/4] Update sve-vector-(de)interleave to include streaming-sve
 output.

---
 llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll | 7 ++++++-
 llvm/test/CodeGen/AArch64/sve-vector-interleave.ll   | 7 ++++++-
 2 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll b/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll
index 89fc10b47bb35..be49043e3cd6e 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll
@@ -1,5 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sve2 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sve | FileCheck %s -check-prefixes=CHECK,SVE
+; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sve,+sme2 | FileCheck %s -check-prefixes=CHECK,SVE
+; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming | FileCheck %s -check-prefixes=CHECK,SME2
 
 define {<vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv4f16(<vscale x 4 x half> %vec) {
 ; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv4f16:
@@ -402,3 +404,6 @@ declare {<vscale x 8 x i64>, <vscale x 8 x i64>} @llvm.vector.deinterleave2.nxv1
 declare {<vscale x 8 x i8>, <vscale x 8 x i8>} @llvm.vector.deinterleave2.nxv16i8(<vscale x 16 x i8>)
 declare {<vscale x 4 x i16>, <vscale x 4 x i16>} @llvm.vector.deinterleave2.nxv8i16(<vscale x 8 x i16>)
 declare {<vscale x 2 x i32>, <vscale x 2 x i32>} @llvm.vector.deinterleave2.nxv4i32(<vscale x 4 x i32>)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; SME2: {{.*}}
+; SVE: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
index 34d026f43708c..3ec1b7a4b5fe2 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
@@ -1,5 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sve | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sve | FileCheck %s -check-prefixes=CHECK,SVE
+; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sve,+sme2 | FileCheck %s -check-prefixes=CHECK,SVE
+; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming | FileCheck %s -check-prefixes=CHECK,SME2
 
 define <vscale x 4 x half> @interleave2_nxv4f16(<vscale x 2 x half> %vec0, <vscale x 2 x half> %vec1) {
 ; CHECK-LABEL: interleave2_nxv4f16:
@@ -382,3 +384,6 @@ declare <vscale x 8 x i64> @llvm.vector.interleave2.nxv8i64(<vscale x 4 x i64>,
 declare <vscale x 16 x i8> @llvm.vector.interleave2.nxv16i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
 declare <vscale x 8 x i16> @llvm.vector.interleave2.nxv8i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
 declare <vscale x 4 x i32> @llvm.vector.interleave2.nxv4i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; SME2: {{.*}}
+; SVE: {{.*}}

>From 1e1251cbb5ee5d4ea628685ecd381dec5369a7ac Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Fri, 6 Jun 2025 12:39:00 +0100
Subject: [PATCH 2/4] [LLVM][CodeGen] Lower vector-(de)interleave to
 multi-register uzp/zip instructions.

---
 .../Target/AArch64/AArch64ISelLowering.cpp    |  44 ++
 .../AArch64/sve-vector-deinterleave.ll        | 593 ++++++++++++------
 .../CodeGen/AArch64/sve-vector-interleave.ll  | 533 ++++++++++------
 3 files changed, 774 insertions(+), 396 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 9f51caef6d228..882264b859410 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -29210,6 +29210,28 @@ AArch64TargetLowering::LowerVECTOR_DEINTERLEAVE(SDValue Op,
   assert(OpVT.isScalableVector() &&
          "Expected scalable vector in LowerVECTOR_DEINTERLEAVE.");
 
+  // Are multi-register uzp instructions available?
+  if (Subtarget->hasSME2() && Subtarget->isStreaming() &&
+      OpVT.getVectorElementType() != MVT::i1) {
+    Intrinsic::ID IntID;
+    switch (Op->getNumOperands()) {
+    default:
+      return SDValue();
+    case 2:
+      IntID = Intrinsic::aarch64_sve_uzp_x2;
+      break;
+    case 4:
+      IntID = Intrinsic::aarch64_sve_uzp_x4;
+      break;
+    }
+
+    SmallVector<SDValue, 5> Ops;
+    Ops.push_back(DAG.getTargetConstant(IntID, DL, MVT::i64));
+    for (unsigned I = 0; I < Op.getNumOperands(); ++I)
+      Ops.push_back(Op.getOperand(I));
+    return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op->getVTList(), Ops);
+  }
+
   if (Op->getNumOperands() != 2)
     return SDValue();
 
@@ -29227,6 +29249,28 @@ SDValue AArch64TargetLowering::LowerVECTOR_INTERLEAVE(SDValue Op,
   assert(OpVT.isScalableVector() &&
          "Expected scalable vector in LowerVECTOR_INTERLEAVE.");
 
+  // Are multi-register zip instructions available?
+  if (Subtarget->hasSME2() && Subtarget->isStreaming() &&
+      OpVT.getVectorElementType() != MVT::i1) {
+    Intrinsic::ID IntID;
+    switch (Op->getNumOperands()) {
+    default:
+      return SDValue();
+    case 2:
+      IntID = Intrinsic::aarch64_sve_zip_x2;
+      break;
+    case 4:
+      IntID = Intrinsic::aarch64_sve_zip_x4;
+      break;
+    }
+
+    SmallVector<SDValue, 5> Ops;
+    Ops.push_back(DAG.getTargetConstant(IntID, DL, MVT::i64));
+    for (unsigned I = 0; I < Op.getNumOperands(); ++I)
+      Ops.push_back(Op.getOperand(I));
+    return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op->getVTList(), Ops);
+  }
+
   if (Op->getNumOperands() != 2)
     return SDValue();
 
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll b/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll
index be49043e3cd6e..4889861444bbe 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll
@@ -4,105 +4,160 @@
 ; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming | FileCheck %s -check-prefixes=CHECK,SME2
 
 define {<vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv4f16(<vscale x 4 x half> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv4f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z1.s, z0.s, z0.s
-; CHECK-NEXT:    uzp2 z2.s, z0.s, z0.s
-; CHECK-NEXT:    uunpklo z0.d, z1.s
-; CHECK-NEXT:    uunpklo z1.d, z2.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv2f16_nxv4f16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z1.s, z0.s, z0.s
+; SVE-NEXT:    uzp2 z2.s, z0.s, z0.s
+; SVE-NEXT:    uunpklo z0.d, z1.s
+; SVE-NEXT:    uunpklo z1.d, z2.s
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv2f16_nxv4f16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uunpkhi z1.d, z0.s
+; SME2-NEXT:    uunpklo z0.d, z0.s
+; SME2-NEXT:    uzp { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT:    ret
   %retval = call {<vscale x 2 x half>, <vscale x 2 x half>} @llvm.vector.deinterleave2.nxv4f16(<vscale x 4 x half> %vec)
   ret {<vscale x 2 x half>, <vscale x 2 x half>} %retval
 }
 
 define {<vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_nxv4f16_nxv8f16(<vscale x 8 x half> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv4f16_nxv8f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z1.h, z0.h, z0.h
-; CHECK-NEXT:    uzp2 z2.h, z0.h, z0.h
-; CHECK-NEXT:    uunpklo z0.s, z1.h
-; CHECK-NEXT:    uunpklo z1.s, z2.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv4f16_nxv8f16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z1.h, z0.h, z0.h
+; SVE-NEXT:    uzp2 z2.h, z0.h, z0.h
+; SVE-NEXT:    uunpklo z0.s, z1.h
+; SVE-NEXT:    uunpklo z1.s, z2.h
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv4f16_nxv8f16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uunpkhi z1.s, z0.h
+; SME2-NEXT:    uunpklo z0.s, z0.h
+; SME2-NEXT:    uzp { z0.s, z1.s }, z0.s, z1.s
+; SME2-NEXT:    ret
   %retval = call {<vscale x 4 x half>, <vscale x 4 x half>} @llvm.vector.deinterleave2.nxv8f16(<vscale x 8 x half> %vec)
   ret {<vscale x 4 x half>, <vscale x 4 x half>} %retval
 }
 
 define {<vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_nxv8f16_nxv16f16(<vscale x 16 x half> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv8f16_nxv16f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z2.h, z0.h, z1.h
-; CHECK-NEXT:    uzp2 z1.h, z0.h, z1.h
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv8f16_nxv16f16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z2.h, z0.h, z1.h
+; SVE-NEXT:    uzp2 z1.h, z0.h, z1.h
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv8f16_nxv16f16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uzp { z0.h, z1.h }, z0.h, z1.h
+; SME2-NEXT:    ret
   %retval = call {<vscale x 8 x half>, <vscale x 8 x half>} @llvm.vector.deinterleave2.nxv16f16(<vscale x 16 x half> %vec)
   ret {<vscale x 8 x half>, <vscale x 8 x half>} %retval
 }
 
 define {<vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_nxv2f32_nxv4f32(<vscale x 4 x float> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv2f32_nxv4f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z1.s, z0.s, z0.s
-; CHECK-NEXT:    uzp2 z2.s, z0.s, z0.s
-; CHECK-NEXT:    uunpklo z0.d, z1.s
-; CHECK-NEXT:    uunpklo z1.d, z2.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv2f32_nxv4f32:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z1.s, z0.s, z0.s
+; SVE-NEXT:    uzp2 z2.s, z0.s, z0.s
+; SVE-NEXT:    uunpklo z0.d, z1.s
+; SVE-NEXT:    uunpklo z1.d, z2.s
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv2f32_nxv4f32:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uunpkhi z1.d, z0.s
+; SME2-NEXT:    uunpklo z0.d, z0.s
+; SME2-NEXT:    uzp { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT:    ret
   %retval = call {<vscale x 2 x float>, <vscale x 2 x float>} @llvm.vector.deinterleave2.nxv4f32(<vscale x 4 x float> %vec)
   ret {<vscale x 2 x float>, <vscale x 2 x float>} %retval
 }
 
 define {<vscale x 4 x float>, <vscale x 4 x float>} @vector_deinterleave_nxv4f32_nxv8f32(<vscale x 8 x float> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv4f32_nxv8f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z2.s, z0.s, z1.s
-; CHECK-NEXT:    uzp2 z1.s, z0.s, z1.s
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv4f32_nxv8f32:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z2.s, z0.s, z1.s
+; SVE-NEXT:    uzp2 z1.s, z0.s, z1.s
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv4f32_nxv8f32:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uzp { z0.s, z1.s }, z0.s, z1.s
+; SME2-NEXT:    ret
   %retval = call {<vscale x 4 x float>, <vscale x 4 x float>} @llvm.vector.deinterleave2.nxv8f32(<vscale x 8 x float> %vec)
   ret {<vscale x 4 x float>, <vscale x 4 x float>} %retval
 }
 
 define {<vscale x 2 x double>, <vscale x 2 x double>} @vector_deinterleave_nxv2f64_nxv4f64(<vscale x 4 x double> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv2f64_nxv4f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z2.d, z0.d, z1.d
-; CHECK-NEXT:    uzp2 z1.d, z0.d, z1.d
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv2f64_nxv4f64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z2.d, z0.d, z1.d
+; SVE-NEXT:    uzp2 z1.d, z0.d, z1.d
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv2f64_nxv4f64:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uzp { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT:    ret
   %retval = call {<vscale x 2 x double>, <vscale x 2 x double>} @llvm.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %vec)
   ret {<vscale x 2 x double>, <vscale x 2 x double>} %retval
 }
 
 define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @vector_deinterleave_nxv2bf16_nxv4bf16(<vscale x 4 x bfloat> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv2bf16_nxv4bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z1.s, z0.s, z0.s
-; CHECK-NEXT:    uzp2 z2.s, z0.s, z0.s
-; CHECK-NEXT:    uunpklo z0.d, z1.s
-; CHECK-NEXT:    uunpklo z1.d, z2.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv2bf16_nxv4bf16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z1.s, z0.s, z0.s
+; SVE-NEXT:    uzp2 z2.s, z0.s, z0.s
+; SVE-NEXT:    uunpklo z0.d, z1.s
+; SVE-NEXT:    uunpklo z1.d, z2.s
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv2bf16_nxv4bf16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uunpkhi z1.d, z0.s
+; SME2-NEXT:    uunpklo z0.d, z0.s
+; SME2-NEXT:    uzp { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT:    ret
   %retval = call {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @llvm.vector.deinterleave2.nxv4bf16(<vscale x 4 x bfloat> %vec)
   ret {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>} %retval
 }
 
 define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @vector_deinterleave_nxv4bf16_nxv8bf16(<vscale x 8 x bfloat> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv4bf16_nxv8bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z1.h, z0.h, z0.h
-; CHECK-NEXT:    uzp2 z2.h, z0.h, z0.h
-; CHECK-NEXT:    uunpklo z0.s, z1.h
-; CHECK-NEXT:    uunpklo z1.s, z2.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv4bf16_nxv8bf16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z1.h, z0.h, z0.h
+; SVE-NEXT:    uzp2 z2.h, z0.h, z0.h
+; SVE-NEXT:    uunpklo z0.s, z1.h
+; SVE-NEXT:    uunpklo z1.s, z2.h
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv4bf16_nxv8bf16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uunpkhi z1.s, z0.h
+; SME2-NEXT:    uunpklo z0.s, z0.h
+; SME2-NEXT:    uzp { z0.s, z1.s }, z0.s, z1.s
+; SME2-NEXT:    ret
   %retval = call {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @llvm.vector.deinterleave2.nxv8bf16(<vscale x 8 x bfloat> %vec)
   ret {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>} %retval
 }
 
 define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8bf16_nxv16bf16(<vscale x 16 x bfloat> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv8bf16_nxv16bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z2.h, z0.h, z1.h
-; CHECK-NEXT:    uzp2 z1.h, z0.h, z1.h
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv8bf16_nxv16bf16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z2.h, z0.h, z1.h
+; SVE-NEXT:    uzp2 z1.h, z0.h, z1.h
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv8bf16_nxv16bf16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uzp { z0.h, z1.h }, z0.h, z1.h
+; SME2-NEXT:    ret
   %retval = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.vector.deinterleave2.nxv16bf16(<vscale x 16 x bfloat> %vec)
   ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %retval
 }
@@ -110,141 +165,223 @@ define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8b
 ; Integers
 
 define {<vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_nxv16i8_nxv32i8(<vscale x 32 x i8> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv32i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z2.b, z0.b, z1.b
-; CHECK-NEXT:    uzp2 z1.b, z0.b, z1.b
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv16i8_nxv32i8:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z2.b, z0.b, z1.b
+; SVE-NEXT:    uzp2 z1.b, z0.b, z1.b
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv16i8_nxv32i8:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uzp { z0.b, z1.b }, z0.b, z1.b
+; SME2-NEXT:    ret
   %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> %vec)
   ret {<vscale x 16 x i8>, <vscale x 16 x i8>} %retval
 }
 
 define {<vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_nxv8i16_nxv16i16(<vscale x 16 x i16> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv16i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z2.h, z0.h, z1.h
-; CHECK-NEXT:    uzp2 z1.h, z0.h, z1.h
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv8i16_nxv16i16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z2.h, z0.h, z1.h
+; SVE-NEXT:    uzp2 z1.h, z0.h, z1.h
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv8i16_nxv16i16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uzp { z0.h, z1.h }, z0.h, z1.h
+; SME2-NEXT:    ret
   %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.vector.deinterleave2.nxv16i16(<vscale x 16 x i16> %vec)
   ret {<vscale x 8 x i16>, <vscale x 8 x i16>} %retval
 }
 
 define {<vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_nxv4i32_nxvv8i32(<vscale x 8 x i32> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv4i32_nxvv8i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z2.s, z0.s, z1.s
-; CHECK-NEXT:    uzp2 z1.s, z0.s, z1.s
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv4i32_nxvv8i32:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z2.s, z0.s, z1.s
+; SVE-NEXT:    uzp2 z1.s, z0.s, z1.s
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv4i32_nxvv8i32:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uzp { z0.s, z1.s }, z0.s, z1.s
+; SME2-NEXT:    ret
   %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %vec)
   ret {<vscale x 4 x i32>, <vscale x 4 x i32>} %retval
 }
 
 define {<vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv4i64(<vscale x 4 x i64> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv4i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z2.d, z0.d, z1.d
-; CHECK-NEXT:    uzp2 z1.d, z0.d, z1.d
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv2i64_nxv4i64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z2.d, z0.d, z1.d
+; SVE-NEXT:    uzp2 z1.d, z0.d, z1.d
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv2i64_nxv4i64:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uzp { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT:    ret
   %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> %vec)
   ret {<vscale x 2 x i64>, <vscale x 2 x i64>} %retval
 }
 
 define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_nxv16i8_nxv64i8(<vscale x 64 x i8> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv16i8_nxv64i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z4.b, z2.b, z3.b
-; CHECK-NEXT:    uzp1 z5.b, z0.b, z1.b
-; CHECK-NEXT:    uzp2 z3.b, z2.b, z3.b
-; CHECK-NEXT:    uzp2 z6.b, z0.b, z1.b
-; CHECK-NEXT:    uzp1 z0.b, z5.b, z4.b
-; CHECK-NEXT:    uzp2 z2.b, z5.b, z4.b
-; CHECK-NEXT:    uzp1 z1.b, z6.b, z3.b
-; CHECK-NEXT:    uzp2 z3.b, z6.b, z3.b
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv16i8_nxv64i8:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z4.b, z2.b, z3.b
+; SVE-NEXT:    uzp1 z5.b, z0.b, z1.b
+; SVE-NEXT:    uzp2 z3.b, z2.b, z3.b
+; SVE-NEXT:    uzp2 z6.b, z0.b, z1.b
+; SVE-NEXT:    uzp1 z0.b, z5.b, z4.b
+; SVE-NEXT:    uzp2 z2.b, z5.b, z4.b
+; SVE-NEXT:    uzp1 z1.b, z6.b, z3.b
+; SVE-NEXT:    uzp2 z3.b, z6.b, z3.b
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv16i8_nxv64i8:
+; SME2:       // %bb.0:
+; SME2-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    uzp { z0.b - z3.b }, { z0.b - z3.b }
+; SME2-NEXT:    ret
   %retval = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> %vec)
   ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %retval
 }
 
 define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_nxv8i16_nxv32i16(<vscale x 32 x i16> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv8i16_nxv32i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z4.h, z2.h, z3.h
-; CHECK-NEXT:    uzp1 z5.h, z0.h, z1.h
-; CHECK-NEXT:    uzp2 z3.h, z2.h, z3.h
-; CHECK-NEXT:    uzp2 z6.h, z0.h, z1.h
-; CHECK-NEXT:    uzp1 z0.h, z5.h, z4.h
-; CHECK-NEXT:    uzp2 z2.h, z5.h, z4.h
-; CHECK-NEXT:    uzp1 z1.h, z6.h, z3.h
-; CHECK-NEXT:    uzp2 z3.h, z6.h, z3.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv8i16_nxv32i16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z4.h, z2.h, z3.h
+; SVE-NEXT:    uzp1 z5.h, z0.h, z1.h
+; SVE-NEXT:    uzp2 z3.h, z2.h, z3.h
+; SVE-NEXT:    uzp2 z6.h, z0.h, z1.h
+; SVE-NEXT:    uzp1 z0.h, z5.h, z4.h
+; SVE-NEXT:    uzp2 z2.h, z5.h, z4.h
+; SVE-NEXT:    uzp1 z1.h, z6.h, z3.h
+; SVE-NEXT:    uzp2 z3.h, z6.h, z3.h
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv8i16_nxv32i16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    uzp { z0.h - z3.h }, { z0.h - z3.h }
+; SME2-NEXT:    ret
   %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.vector.deinterleave4.nxv32i16(<vscale x 32 x i16> %vec)
   ret {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} %retval
 }
 
 define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @vector_deinterleave_nxv4i32_nxv16i32(<vscale x 16 x i32> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv4i32_nxv16i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z4.s, z2.s, z3.s
-; CHECK-NEXT:    uzp1 z5.s, z0.s, z1.s
-; CHECK-NEXT:    uzp2 z3.s, z2.s, z3.s
-; CHECK-NEXT:    uzp2 z6.s, z0.s, z1.s
-; CHECK-NEXT:    uzp1 z0.s, z5.s, z4.s
-; CHECK-NEXT:    uzp2 z2.s, z5.s, z4.s
-; CHECK-NEXT:    uzp1 z1.s, z6.s, z3.s
-; CHECK-NEXT:    uzp2 z3.s, z6.s, z3.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv4i32_nxv16i32:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z4.s, z2.s, z3.s
+; SVE-NEXT:    uzp1 z5.s, z0.s, z1.s
+; SVE-NEXT:    uzp2 z3.s, z2.s, z3.s
+; SVE-NEXT:    uzp2 z6.s, z0.s, z1.s
+; SVE-NEXT:    uzp1 z0.s, z5.s, z4.s
+; SVE-NEXT:    uzp2 z2.s, z5.s, z4.s
+; SVE-NEXT:    uzp1 z1.s, z6.s, z3.s
+; SVE-NEXT:    uzp2 z3.s, z6.s, z3.s
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv4i32_nxv16i32:
+; SME2:       // %bb.0:
+; SME2-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    uzp { z0.s - z3.s }, { z0.s - z3.s }
+; SME2-NEXT:    ret
   %retval = call {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave4.nxv16i32(<vscale x 16 x i32> %vec)
   ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %retval
 }
 
 define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv8i64(<vscale x 8 x i64> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv8i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z4.d, z2.d, z3.d
-; CHECK-NEXT:    uzp1 z5.d, z0.d, z1.d
-; CHECK-NEXT:    uzp2 z3.d, z2.d, z3.d
-; CHECK-NEXT:    uzp2 z6.d, z0.d, z1.d
-; CHECK-NEXT:    uzp1 z0.d, z5.d, z4.d
-; CHECK-NEXT:    uzp2 z2.d, z5.d, z4.d
-; CHECK-NEXT:    uzp1 z1.d, z6.d, z3.d
-; CHECK-NEXT:    uzp2 z3.d, z6.d, z3.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv2i64_nxv8i64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z4.d, z2.d, z3.d
+; SVE-NEXT:    uzp1 z5.d, z0.d, z1.d
+; SVE-NEXT:    uzp2 z3.d, z2.d, z3.d
+; SVE-NEXT:    uzp2 z6.d, z0.d, z1.d
+; SVE-NEXT:    uzp1 z0.d, z5.d, z4.d
+; SVE-NEXT:    uzp2 z2.d, z5.d, z4.d
+; SVE-NEXT:    uzp1 z1.d, z6.d, z3.d
+; SVE-NEXT:    uzp2 z3.d, z6.d, z3.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv2i64_nxv8i64:
+; SME2:       // %bb.0:
+; SME2-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    uzp { z0.d - z3.d }, { z0.d - z3.d }
+; SME2-NEXT:    ret
   %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave4.nxv8i64(<vscale x 8 x i64> %vec)
   ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
 }
 
 define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @vector_deinterleave_nxv2i64_nxv16i64(<vscale x 16 x i64> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv2i64_nxv16i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z24.d, z6.d, z7.d
-; CHECK-NEXT:    uzp1 z25.d, z4.d, z5.d
-; CHECK-NEXT:    uzp1 z26.d, z2.d, z3.d
-; CHECK-NEXT:    uzp1 z27.d, z0.d, z1.d
-; CHECK-NEXT:    uzp2 z6.d, z6.d, z7.d
-; CHECK-NEXT:    uzp2 z4.d, z4.d, z5.d
-; CHECK-NEXT:    uzp2 z2.d, z2.d, z3.d
-; CHECK-NEXT:    uzp2 z0.d, z0.d, z1.d
-; CHECK-NEXT:    uzp1 z5.d, z25.d, z24.d
-; CHECK-NEXT:    uzp2 z24.d, z25.d, z24.d
-; CHECK-NEXT:    uzp1 z7.d, z27.d, z26.d
-; CHECK-NEXT:    uzp1 z28.d, z4.d, z6.d
-; CHECK-NEXT:    uzp2 z25.d, z27.d, z26.d
-; CHECK-NEXT:    uzp1 z29.d, z0.d, z2.d
-; CHECK-NEXT:    uzp2 z26.d, z4.d, z6.d
-; CHECK-NEXT:    uzp2 z27.d, z0.d, z2.d
-; CHECK-NEXT:    uzp1 z0.d, z7.d, z5.d
-; CHECK-NEXT:    uzp1 z2.d, z25.d, z24.d
-; CHECK-NEXT:    uzp2 z4.d, z7.d, z5.d
-; CHECK-NEXT:    uzp1 z1.d, z29.d, z28.d
-; CHECK-NEXT:    uzp1 z3.d, z27.d, z26.d
-; CHECK-NEXT:    uzp2 z5.d, z29.d, z28.d
-; CHECK-NEXT:    uzp2 z6.d, z25.d, z24.d
-; CHECK-NEXT:    uzp2 z7.d, z27.d, z26.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv2i64_nxv16i64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z24.d, z6.d, z7.d
+; SVE-NEXT:    uzp1 z25.d, z4.d, z5.d
+; SVE-NEXT:    uzp1 z26.d, z2.d, z3.d
+; SVE-NEXT:    uzp1 z27.d, z0.d, z1.d
+; SVE-NEXT:    uzp2 z6.d, z6.d, z7.d
+; SVE-NEXT:    uzp2 z4.d, z4.d, z5.d
+; SVE-NEXT:    uzp2 z2.d, z2.d, z3.d
+; SVE-NEXT:    uzp2 z0.d, z0.d, z1.d
+; SVE-NEXT:    uzp1 z5.d, z25.d, z24.d
+; SVE-NEXT:    uzp2 z24.d, z25.d, z24.d
+; SVE-NEXT:    uzp1 z7.d, z27.d, z26.d
+; SVE-NEXT:    uzp1 z28.d, z4.d, z6.d
+; SVE-NEXT:    uzp2 z25.d, z27.d, z26.d
+; SVE-NEXT:    uzp1 z29.d, z0.d, z2.d
+; SVE-NEXT:    uzp2 z26.d, z4.d, z6.d
+; SVE-NEXT:    uzp2 z27.d, z0.d, z2.d
+; SVE-NEXT:    uzp1 z0.d, z7.d, z5.d
+; SVE-NEXT:    uzp1 z2.d, z25.d, z24.d
+; SVE-NEXT:    uzp2 z4.d, z7.d, z5.d
+; SVE-NEXT:    uzp1 z1.d, z29.d, z28.d
+; SVE-NEXT:    uzp1 z3.d, z27.d, z26.d
+; SVE-NEXT:    uzp2 z5.d, z29.d, z28.d
+; SVE-NEXT:    uzp2 z6.d, z25.d, z24.d
+; SVE-NEXT:    uzp2 z7.d, z27.d, z26.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv2i64_nxv16i64:
+; SME2:       // %bb.0:
+; SME2-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; SME2-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; SME2-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; SME2-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; SME2-NEXT:    uzp { z28.d - z31.d }, { z4.d - z7.d }
+; SME2-NEXT:    uzp { z0.d - z3.d }, { z0.d - z3.d }
+; SME2-NEXT:    uzp { z4.d, z5.d }, z0.d, z28.d
+; SME2-NEXT:    uzp { z6.d, z7.d }, z1.d, z29.d
+; SME2-NEXT:    uzp { z24.d, z25.d }, z2.d, z30.d
+; SME2-NEXT:    uzp { z26.d, z27.d }, z3.d, z31.d
+; SME2-NEXT:    mov z0.d, z4.d
+; SME2-NEXT:    mov z1.d, z6.d
+; SME2-NEXT:    mov z2.d, z24.d
+; SME2-NEXT:    mov z3.d, z26.d
+; SME2-NEXT:    mov z4.d, z5.d
+; SME2-NEXT:    mov z5.d, z7.d
+; SME2-NEXT:    mov z6.d, z25.d
+; SME2-NEXT:    mov z7.d, z27.d
+; SME2-NEXT:    ret
   %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave8.nxv16i64(<vscale x 16 x i64> %vec)
   ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
 }
@@ -301,39 +438,65 @@ define {<vscale x 2 x i1>, <vscale x 2 x i1>} @vector_deinterleave_nxv2i1_nxv4i1
 ; Split illegal types
 
 define {<vscale x 4 x i64>, <vscale x 4 x i64>} @vector_deinterleave_nxv4i64_nxv8i64(<vscale x 8 x i64> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv4i64_nxv8i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z4.d, z2.d, z3.d
-; CHECK-NEXT:    uzp1 z5.d, z0.d, z1.d
-; CHECK-NEXT:    uzp2 z6.d, z0.d, z1.d
-; CHECK-NEXT:    uzp2 z3.d, z2.d, z3.d
-; CHECK-NEXT:    mov z0.d, z5.d
-; CHECK-NEXT:    mov z1.d, z4.d
-; CHECK-NEXT:    mov z2.d, z6.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv4i64_nxv8i64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z4.d, z2.d, z3.d
+; SVE-NEXT:    uzp1 z5.d, z0.d, z1.d
+; SVE-NEXT:    uzp2 z6.d, z0.d, z1.d
+; SVE-NEXT:    uzp2 z3.d, z2.d, z3.d
+; SVE-NEXT:    mov z0.d, z5.d
+; SVE-NEXT:    mov z1.d, z4.d
+; SVE-NEXT:    mov z2.d, z6.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv4i64_nxv8i64:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uzp { z4.d, z5.d }, z0.d, z1.d
+; SME2-NEXT:    uzp { z6.d, z7.d }, z2.d, z3.d
+; SME2-NEXT:    mov z0.d, z4.d
+; SME2-NEXT:    mov z1.d, z6.d
+; SME2-NEXT:    mov z2.d, z5.d
+; SME2-NEXT:    mov z3.d, z7.d
+; SME2-NEXT:    ret
   %retval = call {<vscale x 4 x i64>, <vscale x 4 x i64>} @llvm.vector.deinterleave2.nxv8i64(<vscale x 8 x i64> %vec)
   ret {<vscale x 4 x i64>, <vscale x 4 x i64>} %retval
 }
 
 define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_nxv8i64_nxv16i64(<vscale x 16 x i64> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv8i64_nxv16i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uzp1 z24.d, z2.d, z3.d
-; CHECK-NEXT:    uzp1 z25.d, z0.d, z1.d
-; CHECK-NEXT:    uzp1 z26.d, z4.d, z5.d
-; CHECK-NEXT:    uzp1 z27.d, z6.d, z7.d
-; CHECK-NEXT:    uzp2 z28.d, z0.d, z1.d
-; CHECK-NEXT:    uzp2 z29.d, z2.d, z3.d
-; CHECK-NEXT:    uzp2 z30.d, z4.d, z5.d
-; CHECK-NEXT:    uzp2 z7.d, z6.d, z7.d
-; CHECK-NEXT:    mov z0.d, z25.d
-; CHECK-NEXT:    mov z1.d, z24.d
-; CHECK-NEXT:    mov z2.d, z26.d
-; CHECK-NEXT:    mov z3.d, z27.d
-; CHECK-NEXT:    mov z4.d, z28.d
-; CHECK-NEXT:    mov z5.d, z29.d
-; CHECK-NEXT:    mov z6.d, z30.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv8i64_nxv16i64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uzp1 z24.d, z2.d, z3.d
+; SVE-NEXT:    uzp1 z25.d, z0.d, z1.d
+; SVE-NEXT:    uzp1 z26.d, z4.d, z5.d
+; SVE-NEXT:    uzp1 z27.d, z6.d, z7.d
+; SVE-NEXT:    uzp2 z28.d, z0.d, z1.d
+; SVE-NEXT:    uzp2 z29.d, z2.d, z3.d
+; SVE-NEXT:    uzp2 z30.d, z4.d, z5.d
+; SVE-NEXT:    uzp2 z7.d, z6.d, z7.d
+; SVE-NEXT:    mov z0.d, z25.d
+; SVE-NEXT:    mov z1.d, z24.d
+; SVE-NEXT:    mov z2.d, z26.d
+; SVE-NEXT:    mov z3.d, z27.d
+; SVE-NEXT:    mov z4.d, z28.d
+; SVE-NEXT:    mov z5.d, z29.d
+; SVE-NEXT:    mov z6.d, z30.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv8i64_nxv16i64:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uzp { z24.d, z25.d }, z0.d, z1.d
+; SME2-NEXT:    uzp { z26.d, z27.d }, z2.d, z3.d
+; SME2-NEXT:    uzp { z28.d, z29.d }, z4.d, z5.d
+; SME2-NEXT:    uzp { z30.d, z31.d }, z6.d, z7.d
+; SME2-NEXT:    mov z0.d, z24.d
+; SME2-NEXT:    mov z1.d, z26.d
+; SME2-NEXT:    mov z2.d, z28.d
+; SME2-NEXT:    mov z3.d, z30.d
+; SME2-NEXT:    mov z4.d, z25.d
+; SME2-NEXT:    mov z5.d, z27.d
+; SME2-NEXT:    mov z6.d, z29.d
+; SME2-NEXT:    mov z7.d, z31.d
+; SME2-NEXT:    ret
   %retval = call {<vscale x 8 x i64>, <vscale x 8 x i64>} @llvm.vector.deinterleave2.nxv16i64(<vscale x 16 x i64> %vec)
   ret {<vscale x 8 x i64>, <vscale x 8 x i64>} %retval
 }
@@ -342,37 +505,58 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_nxv8i64_nxv
 ; Promote illegal type size
 
 define {<vscale x 8 x i8>, <vscale x 8 x i8>} @vector_deinterleave_nxv8i8_nxv16i8(<vscale x 16 x i8> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv8i8_nxv16i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uunpkhi z1.h, z0.b
-; CHECK-NEXT:    uunpklo z2.h, z0.b
-; CHECK-NEXT:    uzp1 z0.h, z2.h, z1.h
-; CHECK-NEXT:    uzp2 z1.h, z2.h, z1.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv8i8_nxv16i8:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uunpkhi z1.h, z0.b
+; SVE-NEXT:    uunpklo z2.h, z0.b
+; SVE-NEXT:    uzp1 z0.h, z2.h, z1.h
+; SVE-NEXT:    uzp2 z1.h, z2.h, z1.h
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv8i8_nxv16i8:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uunpkhi z1.h, z0.b
+; SME2-NEXT:    uunpklo z0.h, z0.b
+; SME2-NEXT:    uzp { z0.h, z1.h }, z0.h, z1.h
+; SME2-NEXT:    ret
   %retval = call {<vscale x 8 x i8>, <vscale x 8 x i8>} @llvm.vector.deinterleave2.nxv16i8(<vscale x 16 x i8> %vec)
   ret {<vscale x 8 x i8>, <vscale x 8 x i8>} %retval
 }
 
 define {<vscale x 4 x i16>, <vscale x 4 x i16>} @vector_deinterleave_nxv4i16_nxv8i16(<vscale x 8 x i16> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv4i16_nxv8i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uunpkhi z1.s, z0.h
-; CHECK-NEXT:    uunpklo z2.s, z0.h
-; CHECK-NEXT:    uzp1 z0.s, z2.s, z1.s
-; CHECK-NEXT:    uzp2 z1.s, z2.s, z1.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv4i16_nxv8i16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uunpkhi z1.s, z0.h
+; SVE-NEXT:    uunpklo z2.s, z0.h
+; SVE-NEXT:    uzp1 z0.s, z2.s, z1.s
+; SVE-NEXT:    uzp2 z1.s, z2.s, z1.s
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv4i16_nxv8i16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uunpkhi z1.s, z0.h
+; SME2-NEXT:    uunpklo z0.s, z0.h
+; SME2-NEXT:    uzp { z0.s, z1.s }, z0.s, z1.s
+; SME2-NEXT:    ret
   %retval = call {<vscale x 4 x i16>, <vscale x 4 x i16>} @llvm.vector.deinterleave2.nxv8i16(<vscale x 8 x i16> %vec)
   ret {<vscale x 4 x i16>, <vscale x 4 x i16>} %retval
 }
 
 define {<vscale x 2 x i32>, <vscale x 2 x i32>} @vector_deinterleave_nxv2i32_nxv4i32(<vscale x 4 x i32> %vec) {
-; CHECK-LABEL: vector_deinterleave_nxv2i32_nxv4i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uunpkhi z1.d, z0.s
-; CHECK-NEXT:    uunpklo z2.d, z0.s
-; CHECK-NEXT:    uzp1 z0.d, z2.d, z1.d
-; CHECK-NEXT:    uzp2 z1.d, z2.d, z1.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: vector_deinterleave_nxv2i32_nxv4i32:
+; SVE:       // %bb.0:
+; SVE-NEXT:    uunpkhi z1.d, z0.s
+; SVE-NEXT:    uunpklo z2.d, z0.s
+; SVE-NEXT:    uzp1 z0.d, z2.d, z1.d
+; SVE-NEXT:    uzp2 z1.d, z2.d, z1.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: vector_deinterleave_nxv2i32_nxv4i32:
+; SME2:       // %bb.0:
+; SME2-NEXT:    uunpkhi z1.d, z0.s
+; SME2-NEXT:    uunpklo z0.d, z0.s
+; SME2-NEXT:    uzp { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT:    ret
   %retval = call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.vector.deinterleave2.nxv4i32(<vscale x 4 x i32> %vec)
   ret {<vscale x 2 x i32>, <vscale x 2 x i32>} %retval
 }
@@ -404,6 +588,3 @@ declare {<vscale x 8 x i64>, <vscale x 8 x i64>} @llvm.vector.deinterleave2.nxv1
 declare {<vscale x 8 x i8>, <vscale x 8 x i8>} @llvm.vector.deinterleave2.nxv16i8(<vscale x 16 x i8>)
 declare {<vscale x 4 x i16>, <vscale x 4 x i16>} @llvm.vector.deinterleave2.nxv8i16(<vscale x 8 x i16>)
 declare {<vscale x 2 x i32>, <vscale x 2 x i32>} @llvm.vector.deinterleave2.nxv4i32(<vscale x 4 x i32>)
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; SME2: {{.*}}
-; SVE: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
index 3ec1b7a4b5fe2..cc6385c09f6c6 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
@@ -4,100 +4,150 @@
 ; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming | FileCheck %s -check-prefixes=CHECK,SME2
 
 define <vscale x 4 x half> @interleave2_nxv4f16(<vscale x 2 x half> %vec0, <vscale x 2 x half> %vec1) {
-; CHECK-LABEL: interleave2_nxv4f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip2 z2.d, z0.d, z1.d
-; CHECK-NEXT:    zip1 z0.d, z0.d, z1.d
-; CHECK-NEXT:    uzp1 z0.s, z0.s, z2.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv4f16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip2 z2.d, z0.d, z1.d
+; SVE-NEXT:    zip1 z0.d, z0.d, z1.d
+; SVE-NEXT:    uzp1 z0.s, z0.s, z2.s
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv4f16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT:    uzp1 z0.s, z0.s, z1.s
+; SME2-NEXT:    ret
   %retval = call <vscale x 4 x half> @llvm.vector.interleave2.nxv4f16(<vscale x 2 x half> %vec0, <vscale x 2 x half> %vec1)
   ret <vscale x 4 x half> %retval
 }
 
 define <vscale x 8 x half> @interleave2_nxv8f16(<vscale x 4 x half> %vec0, <vscale x 4 x half> %vec1) {
-; CHECK-LABEL: interleave2_nxv8f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip2 z2.s, z0.s, z1.s
-; CHECK-NEXT:    zip1 z0.s, z0.s, z1.s
-; CHECK-NEXT:    uzp1 z0.h, z0.h, z2.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv8f16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip2 z2.s, z0.s, z1.s
+; SVE-NEXT:    zip1 z0.s, z0.s, z1.s
+; SVE-NEXT:    uzp1 z0.h, z0.h, z2.h
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv8f16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.s, z1.s }, z0.s, z1.s
+; SME2-NEXT:    uzp1 z0.h, z0.h, z1.h
+; SME2-NEXT:    ret
   %retval = call <vscale x 8 x half> @llvm.vector.interleave2.nxv8f16(<vscale x 4 x half> %vec0, <vscale x 4 x half> %vec1)
   ret <vscale x 8 x half> %retval
 }
 
 define <vscale x 16 x half> @interleave2_nxv16f16(<vscale x 8 x half> %vec0, <vscale x 8 x half> %vec1) {
-; CHECK-LABEL: interleave2_nxv16f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z2.h, z0.h, z1.h
-; CHECK-NEXT:    zip2 z1.h, z0.h, z1.h
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv16f16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z2.h, z0.h, z1.h
+; SVE-NEXT:    zip2 z1.h, z0.h, z1.h
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv16f16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.h, z1.h }, z0.h, z1.h
+; SME2-NEXT:    ret
   %retval = call <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x half> %vec0, <vscale x 8 x half> %vec1)
   ret <vscale x 16 x half> %retval
 }
 
 define <vscale x 4 x float> @interleave2_nxv4f32(<vscale x 2 x float> %vec0, <vscale x 2 x float> %vec1) {
-; CHECK-LABEL: interleave2_nxv4f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip2 z2.d, z0.d, z1.d
-; CHECK-NEXT:    zip1 z0.d, z0.d, z1.d
-; CHECK-NEXT:    uzp1 z0.s, z0.s, z2.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv4f32:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip2 z2.d, z0.d, z1.d
+; SVE-NEXT:    zip1 z0.d, z0.d, z1.d
+; SVE-NEXT:    uzp1 z0.s, z0.s, z2.s
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv4f32:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT:    uzp1 z0.s, z0.s, z1.s
+; SME2-NEXT:    ret
   %retval = call <vscale x 4 x float> @llvm.vector.interleave2.nxv4f32(<vscale x 2 x float> %vec0, <vscale x 2 x float> %vec1)
   ret <vscale x 4 x float> %retval
 }
 
 define <vscale x 8 x float> @interleave2_nxv8f32(<vscale x 4 x float> %vec0, <vscale x 4 x float> %vec1) {
-; CHECK-LABEL: interleave2_nxv8f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z2.s, z0.s, z1.s
-; CHECK-NEXT:    zip2 z1.s, z0.s, z1.s
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv8f32:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z2.s, z0.s, z1.s
+; SVE-NEXT:    zip2 z1.s, z0.s, z1.s
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv8f32:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.s, z1.s }, z0.s, z1.s
+; SME2-NEXT:    ret
   %retval = call <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float> %vec0, <vscale x 4 x float> %vec1)
   ret <vscale x 8 x float> %retval
 }
 
 define <vscale x 4 x double> @interleave2_nxv4f64(<vscale x 2 x double> %vec0, <vscale x 2 x double> %vec1) {
-; CHECK-LABEL: interleave2_nxv4f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z2.d, z0.d, z1.d
-; CHECK-NEXT:    zip2 z1.d, z0.d, z1.d
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv4f64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z2.d, z0.d, z1.d
+; SVE-NEXT:    zip2 z1.d, z0.d, z1.d
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv4f64:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT:    ret
   %retval = call <vscale x 4 x double>@llvm.vector.interleave2.nxv4f64(<vscale x 2 x double> %vec0, <vscale x 2 x double> %vec1)
   ret <vscale x 4 x double> %retval
 }
 
 define <vscale x 4 x bfloat> @interleave2_nxv4bf16(<vscale x 2 x bfloat> %vec0, <vscale x 2 x bfloat> %vec1) {
-; CHECK-LABEL: interleave2_nxv4bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip2 z2.d, z0.d, z1.d
-; CHECK-NEXT:    zip1 z0.d, z0.d, z1.d
-; CHECK-NEXT:    uzp1 z0.s, z0.s, z2.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv4bf16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip2 z2.d, z0.d, z1.d
+; SVE-NEXT:    zip1 z0.d, z0.d, z1.d
+; SVE-NEXT:    uzp1 z0.s, z0.s, z2.s
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv4bf16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT:    uzp1 z0.s, z0.s, z1.s
+; SME2-NEXT:    ret
   %retval = call <vscale x 4 x bfloat> @llvm.vector.interleave2.nxv4bf16(<vscale x 2 x bfloat> %vec0, <vscale x 2 x bfloat> %vec1)
   ret <vscale x 4 x bfloat> %retval
 }
 
 define <vscale x 8 x bfloat> @interleave2_nxv8bf16(<vscale x 4 x bfloat> %vec0, <vscale x 4 x bfloat> %vec1) {
-; CHECK-LABEL: interleave2_nxv8bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip2 z2.s, z0.s, z1.s
-; CHECK-NEXT:    zip1 z0.s, z0.s, z1.s
-; CHECK-NEXT:    uzp1 z0.h, z0.h, z2.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv8bf16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip2 z2.s, z0.s, z1.s
+; SVE-NEXT:    zip1 z0.s, z0.s, z1.s
+; SVE-NEXT:    uzp1 z0.h, z0.h, z2.h
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv8bf16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.s, z1.s }, z0.s, z1.s
+; SME2-NEXT:    uzp1 z0.h, z0.h, z1.h
+; SME2-NEXT:    ret
   %retval = call <vscale x 8 x bfloat> @llvm.vector.interleave2.nxv8bf16(<vscale x 4 x bfloat> %vec0, <vscale x 4 x bfloat> %vec1)
   ret <vscale x 8 x bfloat> %retval
 }
 
 define <vscale x 16 x bfloat> @interleave2_nxv16bf16(<vscale x 8 x bfloat> %vec0, <vscale x 8 x bfloat> %vec1) {
-; CHECK-LABEL: interleave2_nxv16bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z2.h, z0.h, z1.h
-; CHECK-NEXT:    zip2 z1.h, z0.h, z1.h
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv16bf16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z2.h, z0.h, z1.h
+; SVE-NEXT:    zip2 z1.h, z0.h, z1.h
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv16bf16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.h, z1.h }, z0.h, z1.h
+; SME2-NEXT:    ret
   %retval = call <vscale x 16 x bfloat> @llvm.vector.interleave2.nxv16bf16(<vscale x 8 x bfloat> %vec0, <vscale x 8 x bfloat> %vec1)
   ret <vscale x 16 x bfloat> %retval
 }
@@ -105,141 +155,213 @@ define <vscale x 16 x bfloat> @interleave2_nxv16bf16(<vscale x 8 x bfloat> %vec0
 ; Integers
 
 define <vscale x 32 x i8> @interleave2_nxv32i8(<vscale x 16 x i8> %vec0, <vscale x 16 x i8> %vec1) {
-; CHECK-LABEL: interleave2_nxv32i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z2.b, z0.b, z1.b
-; CHECK-NEXT:    zip2 z1.b, z0.b, z1.b
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv32i8:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z2.b, z0.b, z1.b
+; SVE-NEXT:    zip2 z1.b, z0.b, z1.b
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv32i8:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.b, z1.b }, z0.b, z1.b
+; SME2-NEXT:    ret
   %retval = call <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8> %vec0, <vscale x 16 x i8> %vec1)
   ret <vscale x 32 x i8> %retval
 }
 
 define <vscale x 16 x i16> @interleave2_nxv16i16(<vscale x 8 x i16> %vec0, <vscale x 8 x i16> %vec1) {
-; CHECK-LABEL: interleave2_nxv16i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z2.h, z0.h, z1.h
-; CHECK-NEXT:    zip2 z1.h, z0.h, z1.h
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv16i16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z2.h, z0.h, z1.h
+; SVE-NEXT:    zip2 z1.h, z0.h, z1.h
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv16i16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.h, z1.h }, z0.h, z1.h
+; SME2-NEXT:    ret
   %retval = call <vscale x 16 x i16> @llvm.vector.interleave2.nxv16i16(<vscale x 8 x i16> %vec0, <vscale x 8 x i16> %vec1)
   ret <vscale x 16 x i16> %retval
 }
 
 define <vscale x 8 x i32> @interleave2_nxv8i32(<vscale x 4 x i32> %vec0, <vscale x 4 x i32> %vec1) {
-; CHECK-LABEL: interleave2_nxv8i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z2.s, z0.s, z1.s
-; CHECK-NEXT:    zip2 z1.s, z0.s, z1.s
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv8i32:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z2.s, z0.s, z1.s
+; SVE-NEXT:    zip2 z1.s, z0.s, z1.s
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv8i32:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.s, z1.s }, z0.s, z1.s
+; SME2-NEXT:    ret
   %retval = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> %vec0, <vscale x 4 x i32> %vec1)
   ret <vscale x 8 x i32> %retval
 }
 
 define <vscale x 4 x i64> @interleave2_nxv4i64(<vscale x 2 x i64> %vec0, <vscale x 2 x i64> %vec1) {
-; CHECK-LABEL: interleave2_nxv4i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z2.d, z0.d, z1.d
-; CHECK-NEXT:    zip2 z1.d, z0.d, z1.d
-; CHECK-NEXT:    mov z0.d, z2.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv4i64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z2.d, z0.d, z1.d
+; SVE-NEXT:    zip2 z1.d, z0.d, z1.d
+; SVE-NEXT:    mov z0.d, z2.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv4i64:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT:    ret
   %retval = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> %vec0, <vscale x 2 x i64> %vec1)
   ret <vscale x 4 x i64> %retval
 }
 
 define <vscale x 64 x i8> @interleave4_nxv16i8(<vscale x 16 x i8> %vec0, <vscale x 16 x i8> %vec1, <vscale x 16 x i8> %vec2, <vscale x 16 x i8> %vec3) {
-; CHECK-LABEL: interleave4_nxv16i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z4.b, z1.b, z3.b
-; CHECK-NEXT:    zip1 z5.b, z0.b, z2.b
-; CHECK-NEXT:    zip2 z3.b, z1.b, z3.b
-; CHECK-NEXT:    zip2 z6.b, z0.b, z2.b
-; CHECK-NEXT:    zip1 z0.b, z5.b, z4.b
-; CHECK-NEXT:    zip2 z1.b, z5.b, z4.b
-; CHECK-NEXT:    zip1 z2.b, z6.b, z3.b
-; CHECK-NEXT:    zip2 z3.b, z6.b, z3.b
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave4_nxv16i8:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z4.b, z1.b, z3.b
+; SVE-NEXT:    zip1 z5.b, z0.b, z2.b
+; SVE-NEXT:    zip2 z3.b, z1.b, z3.b
+; SVE-NEXT:    zip2 z6.b, z0.b, z2.b
+; SVE-NEXT:    zip1 z0.b, z5.b, z4.b
+; SVE-NEXT:    zip2 z1.b, z5.b, z4.b
+; SVE-NEXT:    zip1 z2.b, z6.b, z3.b
+; SVE-NEXT:    zip2 z3.b, z6.b, z3.b
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave4_nxv16i8:
+; SME2:       // %bb.0:
+; SME2-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    zip { z0.b - z3.b }, { z0.b - z3.b }
+; SME2-NEXT:    ret
   %retval = call <vscale x 64 x i8> @llvm.vector.interleave4.nxv16i8(<vscale x 16 x i8> %vec0, <vscale x 16 x i8> %vec1, <vscale x 16 x i8> %vec2, <vscale x 16 x i8> %vec3)
   ret <vscale x 64 x i8> %retval
 }
 
 define <vscale x 32 x i16> @interleave4_nxv8i16(<vscale x 8 x i16> %vec0, <vscale x 8 x i16> %vec1, <vscale x 8 x i16> %vec2, <vscale x 8 x i16> %vec3) {
-; CHECK-LABEL: interleave4_nxv8i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z4.h, z1.h, z3.h
-; CHECK-NEXT:    zip1 z5.h, z0.h, z2.h
-; CHECK-NEXT:    zip2 z3.h, z1.h, z3.h
-; CHECK-NEXT:    zip2 z6.h, z0.h, z2.h
-; CHECK-NEXT:    zip1 z0.h, z5.h, z4.h
-; CHECK-NEXT:    zip2 z1.h, z5.h, z4.h
-; CHECK-NEXT:    zip1 z2.h, z6.h, z3.h
-; CHECK-NEXT:    zip2 z3.h, z6.h, z3.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave4_nxv8i16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z4.h, z1.h, z3.h
+; SVE-NEXT:    zip1 z5.h, z0.h, z2.h
+; SVE-NEXT:    zip2 z3.h, z1.h, z3.h
+; SVE-NEXT:    zip2 z6.h, z0.h, z2.h
+; SVE-NEXT:    zip1 z0.h, z5.h, z4.h
+; SVE-NEXT:    zip2 z1.h, z5.h, z4.h
+; SVE-NEXT:    zip1 z2.h, z6.h, z3.h
+; SVE-NEXT:    zip2 z3.h, z6.h, z3.h
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave4_nxv8i16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    zip { z0.h - z3.h }, { z0.h - z3.h }
+; SME2-NEXT:    ret
   %retval = call <vscale x 32 x i16> @llvm.vector.interleave4.nxv8i16(<vscale x 8 x i16> %vec0, <vscale x 8 x i16> %vec1, <vscale x 8 x i16> %vec2, <vscale x 8 x i16> %vec3)
   ret <vscale x 32 x i16> %retval
 }
 
 define <vscale x 16 x i32> @interleave4_nxv4i32(<vscale x 4 x i32> %vec0, <vscale x 4 x i32> %vec1, <vscale x 4 x i32> %vec2, <vscale x 4 x i32> %vec3) {
-; CHECK-LABEL: interleave4_nxv4i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z4.s, z1.s, z3.s
-; CHECK-NEXT:    zip1 z5.s, z0.s, z2.s
-; CHECK-NEXT:    zip2 z3.s, z1.s, z3.s
-; CHECK-NEXT:    zip2 z6.s, z0.s, z2.s
-; CHECK-NEXT:    zip1 z0.s, z5.s, z4.s
-; CHECK-NEXT:    zip2 z1.s, z5.s, z4.s
-; CHECK-NEXT:    zip1 z2.s, z6.s, z3.s
-; CHECK-NEXT:    zip2 z3.s, z6.s, z3.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave4_nxv4i32:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z4.s, z1.s, z3.s
+; SVE-NEXT:    zip1 z5.s, z0.s, z2.s
+; SVE-NEXT:    zip2 z3.s, z1.s, z3.s
+; SVE-NEXT:    zip2 z6.s, z0.s, z2.s
+; SVE-NEXT:    zip1 z0.s, z5.s, z4.s
+; SVE-NEXT:    zip2 z1.s, z5.s, z4.s
+; SVE-NEXT:    zip1 z2.s, z6.s, z3.s
+; SVE-NEXT:    zip2 z3.s, z6.s, z3.s
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave4_nxv4i32:
+; SME2:       // %bb.0:
+; SME2-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    zip { z0.s - z3.s }, { z0.s - z3.s }
+; SME2-NEXT:    ret
   %retval = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv4i32(<vscale x 4 x i32> %vec0, <vscale x 4 x i32> %vec1, <vscale x 4 x i32> %vec2, <vscale x 4 x i32> %vec3)
   ret <vscale x 16 x i32> %retval
 }
 
 define <vscale x 8 x i64> @interleave4_nxv8i64(<vscale x 2 x i64> %vec0, <vscale x 2 x i64> %vec1, <vscale x 2 x i64> %vec2, <vscale x 2 x i64> %vec3) {
-; CHECK-LABEL: interleave4_nxv8i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z4.d, z1.d, z3.d
-; CHECK-NEXT:    zip1 z5.d, z0.d, z2.d
-; CHECK-NEXT:    zip2 z3.d, z1.d, z3.d
-; CHECK-NEXT:    zip2 z6.d, z0.d, z2.d
-; CHECK-NEXT:    zip1 z0.d, z5.d, z4.d
-; CHECK-NEXT:    zip2 z1.d, z5.d, z4.d
-; CHECK-NEXT:    zip1 z2.d, z6.d, z3.d
-; CHECK-NEXT:    zip2 z3.d, z6.d, z3.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave4_nxv8i64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z4.d, z1.d, z3.d
+; SVE-NEXT:    zip1 z5.d, z0.d, z2.d
+; SVE-NEXT:    zip2 z3.d, z1.d, z3.d
+; SVE-NEXT:    zip2 z6.d, z0.d, z2.d
+; SVE-NEXT:    zip1 z0.d, z5.d, z4.d
+; SVE-NEXT:    zip2 z1.d, z5.d, z4.d
+; SVE-NEXT:    zip1 z2.d, z6.d, z3.d
+; SVE-NEXT:    zip2 z3.d, z6.d, z3.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave4_nxv8i64:
+; SME2:       // %bb.0:
+; SME2-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-NEXT:    zip { z0.d - z3.d }, { z0.d - z3.d }
+; SME2-NEXT:    ret
   %retval = call <vscale x 8 x i64> @llvm.vector.interleave4.nxv8i64(<vscale x 2 x i64> %vec0, <vscale x 2 x i64> %vec1, <vscale x 2 x i64> %vec2, <vscale x 2 x i64> %vec3)
   ret <vscale x 8 x i64> %retval
 }
 
 define <vscale x 16 x i64> @interleave8_nxv16i64(<vscale x 2 x i64> %vec0, <vscale x 2 x i64> %vec1, <vscale x 2 x i64> %vec2, <vscale x 2 x i64> %vec3, <vscale x 2 x i64> %vec4, <vscale x 2 x i64> %vec5, <vscale x 2 x i64> %vec6, <vscale x 2 x i64> %vec7) {
-; CHECK-LABEL: interleave8_nxv16i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z24.d, z3.d, z7.d
-; CHECK-NEXT:    zip1 z25.d, z1.d, z5.d
-; CHECK-NEXT:    zip1 z26.d, z2.d, z6.d
-; CHECK-NEXT:    zip1 z27.d, z0.d, z4.d
-; CHECK-NEXT:    zip2 z3.d, z3.d, z7.d
-; CHECK-NEXT:    zip2 z1.d, z1.d, z5.d
-; CHECK-NEXT:    zip2 z2.d, z2.d, z6.d
-; CHECK-NEXT:    zip2 z0.d, z0.d, z4.d
-; CHECK-NEXT:    zip1 z4.d, z25.d, z24.d
-; CHECK-NEXT:    zip2 z6.d, z25.d, z24.d
-; CHECK-NEXT:    zip1 z5.d, z27.d, z26.d
-; CHECK-NEXT:    zip2 z7.d, z27.d, z26.d
-; CHECK-NEXT:    zip1 z24.d, z1.d, z3.d
-; CHECK-NEXT:    zip1 z25.d, z0.d, z2.d
-; CHECK-NEXT:    zip2 z26.d, z1.d, z3.d
-; CHECK-NEXT:    zip2 z27.d, z0.d, z2.d
-; CHECK-NEXT:    zip1 z0.d, z5.d, z4.d
-; CHECK-NEXT:    zip2 z1.d, z5.d, z4.d
-; CHECK-NEXT:    zip1 z2.d, z7.d, z6.d
-; CHECK-NEXT:    zip2 z3.d, z7.d, z6.d
-; CHECK-NEXT:    zip1 z4.d, z25.d, z24.d
-; CHECK-NEXT:    zip2 z5.d, z25.d, z24.d
-; CHECK-NEXT:    zip1 z6.d, z27.d, z26.d
-; CHECK-NEXT:    zip2 z7.d, z27.d, z26.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave8_nxv16i64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z24.d, z3.d, z7.d
+; SVE-NEXT:    zip1 z25.d, z1.d, z5.d
+; SVE-NEXT:    zip1 z26.d, z2.d, z6.d
+; SVE-NEXT:    zip1 z27.d, z0.d, z4.d
+; SVE-NEXT:    zip2 z3.d, z3.d, z7.d
+; SVE-NEXT:    zip2 z1.d, z1.d, z5.d
+; SVE-NEXT:    zip2 z2.d, z2.d, z6.d
+; SVE-NEXT:    zip2 z0.d, z0.d, z4.d
+; SVE-NEXT:    zip1 z4.d, z25.d, z24.d
+; SVE-NEXT:    zip2 z6.d, z25.d, z24.d
+; SVE-NEXT:    zip1 z5.d, z27.d, z26.d
+; SVE-NEXT:    zip2 z7.d, z27.d, z26.d
+; SVE-NEXT:    zip1 z24.d, z1.d, z3.d
+; SVE-NEXT:    zip1 z25.d, z0.d, z2.d
+; SVE-NEXT:    zip2 z26.d, z1.d, z3.d
+; SVE-NEXT:    zip2 z27.d, z0.d, z2.d
+; SVE-NEXT:    zip1 z0.d, z5.d, z4.d
+; SVE-NEXT:    zip2 z1.d, z5.d, z4.d
+; SVE-NEXT:    zip1 z2.d, z7.d, z6.d
+; SVE-NEXT:    zip2 z3.d, z7.d, z6.d
+; SVE-NEXT:    zip1 z4.d, z25.d, z24.d
+; SVE-NEXT:    zip2 z5.d, z25.d, z24.d
+; SVE-NEXT:    zip1 z6.d, z27.d, z26.d
+; SVE-NEXT:    zip2 z7.d, z27.d, z26.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave8_nxv16i64:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z26.d, z27.d }, z3.d, z7.d
+; SME2-NEXT:    zip { z6.d, z7.d }, z2.d, z6.d
+; SME2-NEXT:    zip { z24.d, z25.d }, z1.d, z5.d
+; SME2-NEXT:    zip { z0.d, z1.d }, z0.d, z4.d
+; SME2-NEXT:    mov z28.d, z0.d
+; SME2-NEXT:    mov z29.d, z24.d
+; SME2-NEXT:    mov z30.d, z6.d
+; SME2-NEXT:    mov z31.d, z26.d
+; SME2-NEXT:    mov z24.d, z1.d
+; SME2-NEXT:    mov z26.d, z7.d
+; SME2-NEXT:    zip { z0.d - z3.d }, { z28.d - z31.d }
+; SME2-NEXT:    zip { z4.d - z7.d }, { z24.d - z27.d }
+; SME2-NEXT:    ret
   %retval = call <vscale x 16 x i64> @llvm.vector.interleave8.nxv16i64(<vscale x 2 x i64> %vec0, <vscale x 2 x i64> %vec1, <vscale x 2 x i64> %vec2, <vscale x 2 x i64> %vec3, <vscale x 2 x i64> %vec4, <vscale x 2 x i64> %vec5, <vscale x 2 x i64> %vec6, <vscale x 2 x i64> %vec7)
   ret <vscale x 16 x i64> %retval
 }
@@ -293,31 +415,47 @@ define <vscale x 4 x i1> @interleave2_nxv4i1(<vscale x 2 x i1> %vec0, <vscale x
 ; Split illegal type size
 
 define <vscale x 16 x i32> @interleave2_nxv16i32(<vscale x 8 x i32> %vec0, <vscale x 8 x i32> %vec1) {
-; CHECK-LABEL: interleave2_nxv16i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z4.s, z1.s, z3.s
-; CHECK-NEXT:    zip1 z5.s, z0.s, z2.s
-; CHECK-NEXT:    zip2 z2.s, z0.s, z2.s
-; CHECK-NEXT:    zip2 z3.s, z1.s, z3.s
-; CHECK-NEXT:    mov z0.d, z5.d
-; CHECK-NEXT:    mov z1.d, z2.d
-; CHECK-NEXT:    mov z2.d, z4.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv16i32:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z4.s, z1.s, z3.s
+; SVE-NEXT:    zip1 z5.s, z0.s, z2.s
+; SVE-NEXT:    zip2 z2.s, z0.s, z2.s
+; SVE-NEXT:    zip2 z3.s, z1.s, z3.s
+; SVE-NEXT:    mov z0.d, z5.d
+; SVE-NEXT:    mov z1.d, z2.d
+; SVE-NEXT:    mov z2.d, z4.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv16i32:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z4.s, z5.s }, z0.s, z2.s
+; SME2-NEXT:    zip { z2.s, z3.s }, z1.s, z3.s
+; SME2-NEXT:    mov z0.d, z4.d
+; SME2-NEXT:    mov z1.d, z5.d
+; SME2-NEXT:    ret
   %retval = call <vscale x 16 x i32>@llvm.vector.interleave2.nxv16i32(<vscale x 8 x i32> %vec0, <vscale x 8 x i32> %vec1)
   ret <vscale x 16 x i32> %retval
 }
 
 define <vscale x 8 x i64> @interleave2_nxv8i64(<vscale x 4 x i64> %vec0, <vscale x 4 x i64> %vec1) {
-; CHECK-LABEL: interleave2_nxv8i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip1 z4.d, z1.d, z3.d
-; CHECK-NEXT:    zip1 z5.d, z0.d, z2.d
-; CHECK-NEXT:    zip2 z2.d, z0.d, z2.d
-; CHECK-NEXT:    zip2 z3.d, z1.d, z3.d
-; CHECK-NEXT:    mov z0.d, z5.d
-; CHECK-NEXT:    mov z1.d, z2.d
-; CHECK-NEXT:    mov z2.d, z4.d
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv8i64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip1 z4.d, z1.d, z3.d
+; SVE-NEXT:    zip1 z5.d, z0.d, z2.d
+; SVE-NEXT:    zip2 z2.d, z0.d, z2.d
+; SVE-NEXT:    zip2 z3.d, z1.d, z3.d
+; SVE-NEXT:    mov z0.d, z5.d
+; SVE-NEXT:    mov z1.d, z2.d
+; SVE-NEXT:    mov z2.d, z4.d
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv8i64:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z4.d, z5.d }, z0.d, z2.d
+; SME2-NEXT:    zip { z2.d, z3.d }, z1.d, z3.d
+; SME2-NEXT:    mov z0.d, z4.d
+; SME2-NEXT:    mov z1.d, z5.d
+; SME2-NEXT:    ret
   %retval = call <vscale x 8 x i64> @llvm.vector.interleave2.nxv8i64(<vscale x 4 x i64> %vec0, <vscale x 4 x i64> %vec1)
   ret <vscale x 8 x i64> %retval
 }
@@ -325,34 +463,52 @@ define <vscale x 8 x i64> @interleave2_nxv8i64(<vscale x 4 x i64> %vec0, <vscale
 ; Promote illegal type size
 
 define <vscale x 16 x i8> @interleave2_nxv8i8(<vscale x 8 x i8> %vec0, <vscale x 8 x i8> %vec1) {
-; CHECK-LABEL: interleave2_nxv8i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip2 z2.h, z0.h, z1.h
-; CHECK-NEXT:    zip1 z0.h, z0.h, z1.h
-; CHECK-NEXT:    uzp1 z0.b, z0.b, z2.b
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv8i8:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip2 z2.h, z0.h, z1.h
+; SVE-NEXT:    zip1 z0.h, z0.h, z1.h
+; SVE-NEXT:    uzp1 z0.b, z0.b, z2.b
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv8i8:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.h, z1.h }, z0.h, z1.h
+; SME2-NEXT:    uzp1 z0.b, z0.b, z1.b
+; SME2-NEXT:    ret
   %retval = call <vscale x 16 x i8> @llvm.vector.interleave2.nxv16i8(<vscale x 8 x i8> %vec0, <vscale x 8 x i8> %vec1)
   ret <vscale x 16 x i8> %retval
 }
 
 define <vscale x 8 x i16> @interleave2_nxv4i16(<vscale x 4 x i16> %vec0, <vscale x 4 x i16> %vec1) {
-; CHECK-LABEL: interleave2_nxv4i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip2 z2.s, z0.s, z1.s
-; CHECK-NEXT:    zip1 z0.s, z0.s, z1.s
-; CHECK-NEXT:    uzp1 z0.h, z0.h, z2.h
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv4i16:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip2 z2.s, z0.s, z1.s
+; SVE-NEXT:    zip1 z0.s, z0.s, z1.s
+; SVE-NEXT:    uzp1 z0.h, z0.h, z2.h
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv4i16:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.s, z1.s }, z0.s, z1.s
+; SME2-NEXT:    uzp1 z0.h, z0.h, z1.h
+; SME2-NEXT:    ret
   %retval = call <vscale x 8 x i16> @llvm.vector.interleave2.nxv8i16(<vscale x 4 x i16> %vec0, <vscale x 4 x i16> %vec1)
   ret <vscale x 8 x i16> %retval
 }
 
 define <vscale x 4 x i32> @interleave2_nxv2i32(<vscale x 2 x i32> %vec0, <vscale x 2 x i32> %vec1) {
-; CHECK-LABEL: interleave2_nxv2i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    zip2 z2.d, z0.d, z1.d
-; CHECK-NEXT:    zip1 z0.d, z0.d, z1.d
-; CHECK-NEXT:    uzp1 z0.s, z0.s, z2.s
-; CHECK-NEXT:    ret
+; SVE-LABEL: interleave2_nxv2i32:
+; SVE:       // %bb.0:
+; SVE-NEXT:    zip2 z2.d, z0.d, z1.d
+; SVE-NEXT:    zip1 z0.d, z0.d, z1.d
+; SVE-NEXT:    uzp1 z0.s, z0.s, z2.s
+; SVE-NEXT:    ret
+;
+; SME2-LABEL: interleave2_nxv2i32:
+; SME2:       // %bb.0:
+; SME2-NEXT:    zip { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT:    uzp1 z0.s, z0.s, z1.s
+; SME2-NEXT:    ret
   %retval = call <vscale x 4 x i32> @llvm.vector.interleave2.nxv4i32(<vscale x 2 x i32> %vec0, <vscale x 2 x i32> %vec1)
   ret <vscale x 4 x i32> %retval
 }
@@ -384,6 +540,3 @@ declare <vscale x 8 x i64> @llvm.vector.interleave2.nxv8i64(<vscale x 4 x i64>,
 declare <vscale x 16 x i8> @llvm.vector.interleave2.nxv16i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
 declare <vscale x 8 x i16> @llvm.vector.interleave2.nxv8i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
 declare <vscale x 4 x i32> @llvm.vector.interleave2.nxv4i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; SME2: {{.*}}
-; SVE: {{.*}}

>From 01170749b6cb09c23bb87ba33f1aaecd947e4b73 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Tue, 10 Jun 2025 12:25:30 +0000
Subject: [PATCH 3/4] Replace push_back loop with append. Add RUN lines for
 256+bit SME2.

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp      | 6 ++----
 llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll | 6 +++++-
 llvm/test/CodeGen/AArch64/sve-vector-interleave.ll   | 6 +++++-
 3 files changed, 12 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 882264b859410..1a24ae8c04748 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -29227,8 +29227,7 @@ AArch64TargetLowering::LowerVECTOR_DEINTERLEAVE(SDValue Op,
 
     SmallVector<SDValue, 5> Ops;
     Ops.push_back(DAG.getTargetConstant(IntID, DL, MVT::i64));
-    for (unsigned I = 0; I < Op.getNumOperands(); ++I)
-      Ops.push_back(Op.getOperand(I));
+    Ops.append(Op->op_values().begin(), Op->op_values().end());
     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op->getVTList(), Ops);
   }
 
@@ -29266,8 +29265,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_INTERLEAVE(SDValue Op,
 
     SmallVector<SDValue, 5> Ops;
     Ops.push_back(DAG.getTargetConstant(IntID, DL, MVT::i64));
-    for (unsigned I = 0; I < Op.getNumOperands(); ++I)
-      Ops.push_back(Op.getOperand(I));
+    Ops.append(Op->op_values().begin(), Op->op_values().end());
     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op->getVTList(), Ops);
   }
 
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll b/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll
index 4889861444bbe..597a792a3945e 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll
@@ -1,7 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sve | FileCheck %s -check-prefixes=CHECK,SVE
 ; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sve,+sme2 | FileCheck %s -check-prefixes=CHECK,SVE
-; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming | FileCheck %s -check-prefixes=CHECK,SME2
+; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming | FileCheck %s -check-prefixes=CHECK,SME2,SME2-ALL
+; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -aarch64-sve-vector-bits-min=256 | FileCheck %s -check-prefixes=CHECK,SME2,SME2-256
 
 define {<vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv4f16(<vscale x 4 x half> %vec) {
 ; SVE-LABEL: vector_deinterleave_nxv2f16_nxv4f16:
@@ -588,3 +589,6 @@ declare {<vscale x 8 x i64>, <vscale x 8 x i64>} @llvm.vector.deinterleave2.nxv1
 declare {<vscale x 8 x i8>, <vscale x 8 x i8>} @llvm.vector.deinterleave2.nxv16i8(<vscale x 16 x i8>)
 declare {<vscale x 4 x i16>, <vscale x 4 x i16>} @llvm.vector.deinterleave2.nxv8i16(<vscale x 8 x i16>)
 declare {<vscale x 2 x i32>, <vscale x 2 x i32>} @llvm.vector.deinterleave2.nxv4i32(<vscale x 4 x i32>)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; SME2-256: {{.*}}
+; SME2-ALL: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
index cc6385c09f6c6..3e140bde7be80 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
@@ -1,7 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sve | FileCheck %s -check-prefixes=CHECK,SVE
 ; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sve,+sme2 | FileCheck %s -check-prefixes=CHECK,SVE
-; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming | FileCheck %s -check-prefixes=CHECK,SME2
+; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming | FileCheck %s -check-prefixes=CHECK,SME2,SME-ALL
+; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -aarch64-sve-vector-bits-min=256 | FileCheck %s -check-prefixes=CHECK,SME2,SME2-256
 
 define <vscale x 4 x half> @interleave2_nxv4f16(<vscale x 2 x half> %vec0, <vscale x 2 x half> %vec1) {
 ; SVE-LABEL: interleave2_nxv4f16:
@@ -540,3 +541,6 @@ declare <vscale x 8 x i64> @llvm.vector.interleave2.nxv8i64(<vscale x 4 x i64>,
 declare <vscale x 16 x i8> @llvm.vector.interleave2.nxv16i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
 declare <vscale x 8 x i16> @llvm.vector.interleave2.nxv8i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
 declare <vscale x 4 x i32> @llvm.vector.interleave2.nxv4i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; SME-ALL: {{.*}}
+; SME2-256: {{.*}}

>From 2270e0f79c8f8e492ef35bae2b499c2c741dc9c7 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Tue, 10 Jun 2025 12:35:50 +0000
Subject: [PATCH 4/4] 4 register variants of UZP.D and ZIP.D are only available
 for 256+bit SVE implementations.

---
 .../Target/AArch64/AArch64ISelLowering.cpp    |   6 +
 .../AArch64/sve-vector-deinterleave.ll        | 115 ++++++++++++------
 .../CodeGen/AArch64/sve-vector-interleave.ll  |  83 ++++++++-----
 3 files changed, 134 insertions(+), 70 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 1a24ae8c04748..a1e587b32efc3 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -29221,6 +29221,9 @@ AArch64TargetLowering::LowerVECTOR_DEINTERLEAVE(SDValue Op,
       IntID = Intrinsic::aarch64_sve_uzp_x2;
       break;
     case 4:
+      if (Subtarget->getMinSVEVectorSizeInBits() < 256 &&
+          OpVT.getScalarSizeInBits() == 64)
+        return SDValue();
       IntID = Intrinsic::aarch64_sve_uzp_x4;
       break;
     }
@@ -29259,6 +29262,9 @@ SDValue AArch64TargetLowering::LowerVECTOR_INTERLEAVE(SDValue Op,
       IntID = Intrinsic::aarch64_sve_zip_x2;
       break;
     case 4:
+      if (Subtarget->getMinSVEVectorSizeInBits() < 256 &&
+          OpVT.getScalarSizeInBits() == 64)
+        return SDValue();
       IntID = Intrinsic::aarch64_sve_zip_x4;
       break;
     }
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll b/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll
index 597a792a3945e..139ecafaff0eb 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll
@@ -1,8 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sve | FileCheck %s -check-prefixes=CHECK,SVE
-; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sve,+sme2 | FileCheck %s -check-prefixes=CHECK,SVE
-; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming | FileCheck %s -check-prefixes=CHECK,SME2,SME2-ALL
-; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -aarch64-sve-vector-bits-min=256 | FileCheck %s -check-prefixes=CHECK,SME2,SME2-256
+; RUN: llc < %s -mattr=+sve | FileCheck %s -check-prefixes=CHECK,SVE
+; RUN: llc < %s -mattr=+sve,+sme2 | FileCheck %s -check-prefixes=CHECK,SVE
+; RUN: llc < %s -mattr=+sme2 -force-streaming | FileCheck %s -check-prefixes=CHECK,SME2,SME2-ALL
+; RUN: llc < %s -mattr=+sme2 -force-streaming -aarch64-sve-vector-bits-min=256 | FileCheck %s -check-prefixes=CHECK,SME2,SME2-256
+
+target triple = "aarch64-unknown-linux-gnu"
 
 define {<vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv4f16(<vscale x 4 x half> %vec) {
 ; SVE-LABEL: vector_deinterleave_nxv2f16_nxv4f16:
@@ -317,14 +319,26 @@ define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2
 ; SVE-NEXT:    uzp2 z3.d, z6.d, z3.d
 ; SVE-NEXT:    ret
 ;
-; SME2-LABEL: vector_deinterleave_nxv2i64_nxv8i64:
-; SME2:       // %bb.0:
-; SME2-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; SME2-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; SME2-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; SME2-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; SME2-NEXT:    uzp { z0.d - z3.d }, { z0.d - z3.d }
-; SME2-NEXT:    ret
+; SME2-ALL-LABEL: vector_deinterleave_nxv2i64_nxv8i64:
+; SME2-ALL:       // %bb.0:
+; SME2-ALL-NEXT:    uzp { z4.d, z5.d }, z2.d, z3.d
+; SME2-ALL-NEXT:    uzp { z0.d, z1.d }, z0.d, z1.d
+; SME2-ALL-NEXT:    uzp { z2.d, z3.d }, z0.d, z4.d
+; SME2-ALL-NEXT:    uzp { z4.d, z5.d }, z1.d, z5.d
+; SME2-ALL-NEXT:    mov z0.d, z2.d
+; SME2-ALL-NEXT:    mov z1.d, z4.d
+; SME2-ALL-NEXT:    mov z2.d, z3.d
+; SME2-ALL-NEXT:    mov z3.d, z5.d
+; SME2-ALL-NEXT:    ret
+;
+; SME2-256-LABEL: vector_deinterleave_nxv2i64_nxv8i64:
+; SME2-256:       // %bb.0:
+; SME2-256-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-256-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-256-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-256-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-256-NEXT:    uzp { z0.d - z3.d }, { z0.d - z3.d }
+; SME2-256-NEXT:    ret
   %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave4.nxv8i64(<vscale x 8 x i64> %vec)
   ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
 }
@@ -358,31 +372,55 @@ define {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2
 ; SVE-NEXT:    uzp2 z7.d, z27.d, z26.d
 ; SVE-NEXT:    ret
 ;
-; SME2-LABEL: vector_deinterleave_nxv2i64_nxv16i64:
-; SME2:       // %bb.0:
-; SME2-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; SME2-NEXT:    // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
-; SME2-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; SME2-NEXT:    // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
-; SME2-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; SME2-NEXT:    // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
-; SME2-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; SME2-NEXT:    // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
-; SME2-NEXT:    uzp { z28.d - z31.d }, { z4.d - z7.d }
-; SME2-NEXT:    uzp { z0.d - z3.d }, { z0.d - z3.d }
-; SME2-NEXT:    uzp { z4.d, z5.d }, z0.d, z28.d
-; SME2-NEXT:    uzp { z6.d, z7.d }, z1.d, z29.d
-; SME2-NEXT:    uzp { z24.d, z25.d }, z2.d, z30.d
-; SME2-NEXT:    uzp { z26.d, z27.d }, z3.d, z31.d
-; SME2-NEXT:    mov z0.d, z4.d
-; SME2-NEXT:    mov z1.d, z6.d
-; SME2-NEXT:    mov z2.d, z24.d
-; SME2-NEXT:    mov z3.d, z26.d
-; SME2-NEXT:    mov z4.d, z5.d
-; SME2-NEXT:    mov z5.d, z7.d
-; SME2-NEXT:    mov z6.d, z25.d
-; SME2-NEXT:    mov z7.d, z27.d
-; SME2-NEXT:    ret
+; SME2-ALL-LABEL: vector_deinterleave_nxv2i64_nxv16i64:
+; SME2-ALL:       // %bb.0:
+; SME2-ALL-NEXT:    uzp { z6.d, z7.d }, z6.d, z7.d
+; SME2-ALL-NEXT:    uzp { z24.d, z25.d }, z4.d, z5.d
+; SME2-ALL-NEXT:    uzp { z26.d, z27.d }, z24.d, z6.d
+; SME2-ALL-NEXT:    uzp { z2.d, z3.d }, z2.d, z3.d
+; SME2-ALL-NEXT:    uzp { z0.d, z1.d }, z0.d, z1.d
+; SME2-ALL-NEXT:    uzp { z28.d, z29.d }, z0.d, z2.d
+; SME2-ALL-NEXT:    uzp { z4.d, z5.d }, z28.d, z26.d
+; SME2-ALL-NEXT:    uzp { z30.d, z31.d }, z25.d, z7.d
+; SME2-ALL-NEXT:    uzp { z0.d, z1.d }, z1.d, z3.d
+; SME2-ALL-NEXT:    uzp { z6.d, z7.d }, z0.d, z30.d
+; SME2-ALL-NEXT:    uzp { z24.d, z25.d }, z29.d, z27.d
+; SME2-ALL-NEXT:    uzp { z26.d, z27.d }, z1.d, z31.d
+; SME2-ALL-NEXT:    mov z0.d, z4.d
+; SME2-ALL-NEXT:    mov z1.d, z6.d
+; SME2-ALL-NEXT:    mov z2.d, z24.d
+; SME2-ALL-NEXT:    mov z3.d, z26.d
+; SME2-ALL-NEXT:    mov z4.d, z5.d
+; SME2-ALL-NEXT:    mov z5.d, z7.d
+; SME2-ALL-NEXT:    mov z6.d, z25.d
+; SME2-ALL-NEXT:    mov z7.d, z27.d
+; SME2-ALL-NEXT:    ret
+;
+; SME2-256-LABEL: vector_deinterleave_nxv2i64_nxv16i64:
+; SME2-256:       // %bb.0:
+; SME2-256-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-256-NEXT:    // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; SME2-256-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-256-NEXT:    // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; SME2-256-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-256-NEXT:    // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; SME2-256-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-256-NEXT:    // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7
+; SME2-256-NEXT:    uzp { z28.d - z31.d }, { z4.d - z7.d }
+; SME2-256-NEXT:    uzp { z0.d - z3.d }, { z0.d - z3.d }
+; SME2-256-NEXT:    uzp { z4.d, z5.d }, z0.d, z28.d
+; SME2-256-NEXT:    uzp { z6.d, z7.d }, z1.d, z29.d
+; SME2-256-NEXT:    uzp { z24.d, z25.d }, z2.d, z30.d
+; SME2-256-NEXT:    uzp { z26.d, z27.d }, z3.d, z31.d
+; SME2-256-NEXT:    mov z0.d, z4.d
+; SME2-256-NEXT:    mov z1.d, z6.d
+; SME2-256-NEXT:    mov z2.d, z24.d
+; SME2-256-NEXT:    mov z3.d, z26.d
+; SME2-256-NEXT:    mov z4.d, z5.d
+; SME2-256-NEXT:    mov z5.d, z7.d
+; SME2-256-NEXT:    mov z6.d, z25.d
+; SME2-256-NEXT:    mov z7.d, z27.d
+; SME2-256-NEXT:    ret
   %retval = call {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave8.nxv16i64(<vscale x 16 x i64> %vec)
   ret {<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>} %retval
 }
@@ -589,6 +627,3 @@ declare {<vscale x 8 x i64>, <vscale x 8 x i64>} @llvm.vector.deinterleave2.nxv1
 declare {<vscale x 8 x i8>, <vscale x 8 x i8>} @llvm.vector.deinterleave2.nxv16i8(<vscale x 16 x i8>)
 declare {<vscale x 4 x i16>, <vscale x 4 x i16>} @llvm.vector.deinterleave2.nxv8i16(<vscale x 8 x i16>)
 declare {<vscale x 2 x i32>, <vscale x 2 x i32>} @llvm.vector.deinterleave2.nxv4i32(<vscale x 4 x i32>)
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; SME2-256: {{.*}}
-; SME2-ALL: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
index 3e140bde7be80..52cb2d9ebe343 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
@@ -1,8 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sve | FileCheck %s -check-prefixes=CHECK,SVE
-; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sve,+sme2 | FileCheck %s -check-prefixes=CHECK,SVE
-; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming | FileCheck %s -check-prefixes=CHECK,SME2,SME-ALL
-; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -aarch64-sve-vector-bits-min=256 | FileCheck %s -check-prefixes=CHECK,SME2,SME2-256
+; RUN: llc < %s -mattr=+sve | FileCheck %s -check-prefixes=CHECK,SVE
+; RUN: llc < %s -mattr=+sve,+sme2 | FileCheck %s -check-prefixes=CHECK,SVE
+; RUN: llc < %s -mattr=+sme2 -force-streaming | FileCheck %s -check-prefixes=CHECK,SME2,SME-ALL
+; RUN: llc < %s -mattr=+sme2 -force-streaming -aarch64-sve-vector-bits-min=256 | FileCheck %s -check-prefixes=CHECK,SME2,SME2-256
+
+target triple = "aarch64-unknown-linux-gnu"
 
 define <vscale x 4 x half> @interleave2_nxv4f16(<vscale x 2 x half> %vec0, <vscale x 2 x half> %vec1) {
 ; SVE-LABEL: interleave2_nxv4f16:
@@ -307,14 +309,22 @@ define <vscale x 8 x i64> @interleave4_nxv8i64(<vscale x 2 x i64> %vec0, <vscale
 ; SVE-NEXT:    zip2 z3.d, z6.d, z3.d
 ; SVE-NEXT:    ret
 ;
-; SME2-LABEL: interleave4_nxv8i64:
-; SME2:       // %bb.0:
-; SME2-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; SME2-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; SME2-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; SME2-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
-; SME2-NEXT:    zip { z0.d - z3.d }, { z0.d - z3.d }
-; SME2-NEXT:    ret
+; SME-ALL-LABEL: interleave4_nxv8i64:
+; SME-ALL:       // %bb.0:
+; SME-ALL-NEXT:    zip { z4.d, z5.d }, z1.d, z3.d
+; SME-ALL-NEXT:    zip { z2.d, z3.d }, z0.d, z2.d
+; SME-ALL-NEXT:    zip { z0.d, z1.d }, z2.d, z4.d
+; SME-ALL-NEXT:    zip { z2.d, z3.d }, z3.d, z5.d
+; SME-ALL-NEXT:    ret
+;
+; SME2-256-LABEL: interleave4_nxv8i64:
+; SME2-256:       // %bb.0:
+; SME2-256-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-256-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-256-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-256-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
+; SME2-256-NEXT:    zip { z0.d - z3.d }, { z0.d - z3.d }
+; SME2-256-NEXT:    ret
   %retval = call <vscale x 8 x i64> @llvm.vector.interleave4.nxv8i64(<vscale x 2 x i64> %vec0, <vscale x 2 x i64> %vec1, <vscale x 2 x i64> %vec2, <vscale x 2 x i64> %vec3)
   ret <vscale x 8 x i64> %retval
 }
@@ -348,21 +358,37 @@ define <vscale x 16 x i64> @interleave8_nxv16i64(<vscale x 2 x i64> %vec0, <vsca
 ; SVE-NEXT:    zip2 z7.d, z27.d, z26.d
 ; SVE-NEXT:    ret
 ;
-; SME2-LABEL: interleave8_nxv16i64:
-; SME2:       // %bb.0:
-; SME2-NEXT:    zip { z26.d, z27.d }, z3.d, z7.d
-; SME2-NEXT:    zip { z6.d, z7.d }, z2.d, z6.d
-; SME2-NEXT:    zip { z24.d, z25.d }, z1.d, z5.d
-; SME2-NEXT:    zip { z0.d, z1.d }, z0.d, z4.d
-; SME2-NEXT:    mov z28.d, z0.d
-; SME2-NEXT:    mov z29.d, z24.d
-; SME2-NEXT:    mov z30.d, z6.d
-; SME2-NEXT:    mov z31.d, z26.d
-; SME2-NEXT:    mov z24.d, z1.d
-; SME2-NEXT:    mov z26.d, z7.d
-; SME2-NEXT:    zip { z0.d - z3.d }, { z28.d - z31.d }
-; SME2-NEXT:    zip { z4.d - z7.d }, { z24.d - z27.d }
-; SME2-NEXT:    ret
+; SME-ALL-LABEL: interleave8_nxv16i64:
+; SME-ALL:       // %bb.0:
+; SME-ALL-NEXT:    zip { z24.d, z25.d }, z3.d, z7.d
+; SME-ALL-NEXT:    zip { z26.d, z27.d }, z1.d, z5.d
+; SME-ALL-NEXT:    zip { z28.d, z29.d }, z26.d, z24.d
+; SME-ALL-NEXT:    zip { z6.d, z7.d }, z2.d, z6.d
+; SME-ALL-NEXT:    zip { z4.d, z5.d }, z0.d, z4.d
+; SME-ALL-NEXT:    zip { z2.d, z3.d }, z4.d, z6.d
+; SME-ALL-NEXT:    zip { z0.d, z1.d }, z2.d, z28.d
+; SME-ALL-NEXT:    zip { z2.d, z3.d }, z3.d, z29.d
+; SME-ALL-NEXT:    zip { z24.d, z25.d }, z27.d, z25.d
+; SME-ALL-NEXT:    zip { z6.d, z7.d }, z5.d, z7.d
+; SME-ALL-NEXT:    zip { z4.d, z5.d }, z6.d, z24.d
+; SME-ALL-NEXT:    zip { z6.d, z7.d }, z7.d, z25.d
+; SME-ALL-NEXT:    ret
+;
+; SME2-256-LABEL: interleave8_nxv16i64:
+; SME2-256:       // %bb.0:
+; SME2-256-NEXT:    zip { z26.d, z27.d }, z3.d, z7.d
+; SME2-256-NEXT:    zip { z6.d, z7.d }, z2.d, z6.d
+; SME2-256-NEXT:    zip { z24.d, z25.d }, z1.d, z5.d
+; SME2-256-NEXT:    zip { z0.d, z1.d }, z0.d, z4.d
+; SME2-256-NEXT:    mov z28.d, z0.d
+; SME2-256-NEXT:    mov z29.d, z24.d
+; SME2-256-NEXT:    mov z30.d, z6.d
+; SME2-256-NEXT:    mov z31.d, z26.d
+; SME2-256-NEXT:    mov z24.d, z1.d
+; SME2-256-NEXT:    mov z26.d, z7.d
+; SME2-256-NEXT:    zip { z0.d - z3.d }, { z28.d - z31.d }
+; SME2-256-NEXT:    zip { z4.d - z7.d }, { z24.d - z27.d }
+; SME2-256-NEXT:    ret
   %retval = call <vscale x 16 x i64> @llvm.vector.interleave8.nxv16i64(<vscale x 2 x i64> %vec0, <vscale x 2 x i64> %vec1, <vscale x 2 x i64> %vec2, <vscale x 2 x i64> %vec3, <vscale x 2 x i64> %vec4, <vscale x 2 x i64> %vec5, <vscale x 2 x i64> %vec6, <vscale x 2 x i64> %vec7)
   ret <vscale x 16 x i64> %retval
 }
@@ -541,6 +567,3 @@ declare <vscale x 8 x i64> @llvm.vector.interleave2.nxv8i64(<vscale x 4 x i64>,
 declare <vscale x 16 x i8> @llvm.vector.interleave2.nxv16i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
 declare <vscale x 8 x i16> @llvm.vector.interleave2.nxv8i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
 declare <vscale x 4 x i32> @llvm.vector.interleave2.nxv4i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; SME-ALL: {{.*}}
-; SME2-256: {{.*}}



More information about the llvm-commits mailing list