[llvm] [IA]: Construct (de)interleave4 out of (de)interleave2 (PR #89276)

Hassnaa Hamdi via llvm-commits llvm-commits at lists.llvm.org
Wed May 15 19:19:40 PDT 2024


https://github.com/hassnaaHamdi updated https://github.com/llvm/llvm-project/pull/89276

>From da7bc8599ddae11df4725e65adcede91811cb618 Mon Sep 17 00:00:00 2001
From: Hassnaa Hamdi <hassnaa.hamdi at arm.com>
Date: Wed, 15 May 2024 23:44:44 +0000
Subject: [PATCH 1/4] [AArch64][Interleave]: Add test precommit

Change-Id: I5e2613156a482dcadae3e4cfa1bacdf7f3293fe2
---
 .../AArch64/sve-interleave_accesses4-load.ll  | 106 ++++++++++++++++++
 1 file changed, 106 insertions(+)
 create mode 100644 llvm/test/CodeGen/AArch64/sve-interleave_accesses4-load.ll

diff --git a/llvm/test/CodeGen/AArch64/sve-interleave_accesses4-load.ll b/llvm/test/CodeGen/AArch64/sve-interleave_accesses4-load.ll
new file mode 100644
index 0000000000000..dcade71ccb684
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-interleave_accesses4-load.ll
@@ -0,0 +1,106 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
+
+%struct.xyzt = type { i32, i32, i32, i32 }
+
+define void @interleave(ptr noalias nocapture noundef writeonly %dst, ptr nocapture noundef readonly %a, <vscale x 4 x i32> %x) {
+; CHECK-LABEL: interleave:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ld2w { z1.s, z2.s }, p0/z, [x1]
+; CHECK-NEXT:    ld2w { z3.s, z4.s }, p0/z, [x1, #2, mul vl]
+; CHECK-NEXT:    uzp2 z5.s, z1.s, z3.s
+; CHECK-NEXT:    uzp1 z6.s, z1.s, z3.s
+; CHECK-NEXT:    uzp2 z7.s, z2.s, z4.s
+; CHECK-NEXT:    uzp1 z1.s, z2.s, z4.s
+; CHECK-NEXT:    add z2.s, z0.s, z6.s
+; CHECK-NEXT:    movprfx z3, z5
+; CHECK-NEXT:    lsl z3.s, p0/m, z3.s, z0.s
+; CHECK-NEXT:    sub z1.s, z1.s, z0.s
+; CHECK-NEXT:    asrr z0.s, p0/m, z0.s, z7.s
+; CHECK-NEXT:    zip1 z4.s, z2.s, z3.s
+; CHECK-NEXT:    zip2 z2.s, z2.s, z3.s
+; CHECK-NEXT:    zip1 z5.s, z1.s, z0.s
+; CHECK-NEXT:    zip2 z3.s, z1.s, z0.s
+; CHECK-NEXT:    st2w { z4.s, z5.s }, p0, [x0]
+; CHECK-NEXT:    st2w { z2.s, z3.s }, p0, [x0, #2, mul vl]
+; CHECK-NEXT:    ret
+  %wide.vec = load <vscale x 16 x i32>, ptr %a, align 4
+  %root.strided.vec = tail call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %wide.vec)
+  %3 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec, 0
+  %4 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec, 1
+  %root.strided.vec55 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %3)
+  %5 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec55, 0
+  %6 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec55, 1
+  %root.strided.vec56 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %4)
+  %7 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec56, 0
+  %8 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec56, 1
+  %9 = add nsw <vscale x 4 x i32> %x, %5
+  %10 = sub nsw <vscale x 4 x i32> %7, %x
+  %11 = shl <vscale x 4 x i32> %6, %x
+  %12 = ashr <vscale x 4 x i32> %8, %x
+  %interleaved.vec = tail call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> %9, <vscale x 4 x i32> %11)
+  %interleaved.vec61 = tail call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> %10, <vscale x 4 x i32> %12)
+  %interleaved.vec62 = tail call <vscale x 16 x i32> @llvm.vector.interleave2.nxv16i32(<vscale x 8 x i32> %interleaved.vec, <vscale x 8 x i32> %interleaved.vec61)
+  store <vscale x 16 x i32> %interleaved.vec62, ptr %dst, align 4
+  ret void
+}
+
+define void @wide_interleave(ptr noalias nocapture noundef writeonly %dst, ptr nocapture noundef readonly %a, <vscale x 8 x i32> %x) {
+; CHECK-LABEL: wide_interleave:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ld2w { z2.s, z3.s }, p0/z, [x1]
+; CHECK-NEXT:    ld2w { z4.s, z5.s }, p0/z, [x1, #2, mul vl]
+; CHECK-NEXT:    ld2w { z6.s, z7.s }, p0/z, [x1, #4, mul vl]
+; CHECK-NEXT:    ld2w { z24.s, z25.s }, p0/z, [x1, #6, mul vl]
+; CHECK-NEXT:    uzp2 z26.s, z2.s, z4.s
+; CHECK-NEXT:    uzp1 z27.s, z2.s, z4.s
+; CHECK-NEXT:    uzp2 z28.s, z3.s, z5.s
+; CHECK-NEXT:    uzp1 z2.s, z3.s, z5.s
+; CHECK-NEXT:    add z3.s, z0.s, z27.s
+; CHECK-NEXT:    movprfx z4, z26
+; CHECK-NEXT:    lsl z4.s, p0/m, z4.s, z0.s
+; CHECK-NEXT:    sub z2.s, z2.s, z0.s
+; CHECK-NEXT:    asrr z0.s, p0/m, z0.s, z28.s
+; CHECK-NEXT:    zip1 z26.s, z3.s, z4.s
+; CHECK-NEXT:    zip2 z3.s, z3.s, z4.s
+; CHECK-NEXT:    zip1 z27.s, z2.s, z0.s
+; CHECK-NEXT:    zip2 z4.s, z2.s, z0.s
+; CHECK-NEXT:    uzp2 z0.s, z6.s, z24.s
+; CHECK-NEXT:    uzp1 z2.s, z6.s, z24.s
+; CHECK-NEXT:    st2w { z26.s, z27.s }, p0, [x0]
+; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    add z2.s, z1.s, z2.s
+; CHECK-NEXT:    st2w { z3.s, z4.s }, p0, [x0, #2, mul vl]
+; CHECK-NEXT:    uzp2 z3.s, z7.s, z25.s
+; CHECK-NEXT:    uzp1 z4.s, z7.s, z25.s
+; CHECK-NEXT:    zip1 z5.s, z2.s, z0.s
+; CHECK-NEXT:    sub z4.s, z4.s, z1.s
+; CHECK-NEXT:    asrr z1.s, p0/m, z1.s, z3.s
+; CHECK-NEXT:    zip2 z2.s, z2.s, z0.s
+; CHECK-NEXT:    zip1 z6.s, z4.s, z1.s
+; CHECK-NEXT:    zip2 z3.s, z4.s, z1.s
+; CHECK-NEXT:    st2w { z5.s, z6.s }, p0, [x0, #4, mul vl]
+; CHECK-NEXT:    st2w { z2.s, z3.s }, p0, [x0, #6, mul vl]
+; CHECK-NEXT:    ret
+  %wide.vec = load <vscale x 32 x i32>, ptr %a, align 4
+  %root.strided.vec = tail call { <vscale x 16 x i32>, <vscale x 16 x i32> } @llvm.vector.deinterleave2.nxv32i32(<vscale x 32 x i32> %wide.vec)
+  %3 = extractvalue { <vscale x 16 x i32>, <vscale x 16 x i32> } %root.strided.vec, 0
+  %4 = extractvalue { <vscale x 16 x i32>, <vscale x 16 x i32> } %root.strided.vec, 1
+  %root.strided.vec55 = tail call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %3)
+  %5 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec55, 0
+  %6 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec55, 1
+  %root.strided.vec56 = tail call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %4)
+  %7 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec56, 0
+  %8 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec56, 1
+  %9 = add nsw <vscale x 8 x i32> %x, %5
+  %10 = sub nsw <vscale x 8 x i32> %7, %x
+  %11 = shl <vscale x 8 x i32> %6, %x
+  %12 = ashr <vscale x 8 x i32> %8, %x
+  %interleaved.vec = tail call <vscale x 16 x i32> @llvm.vector.interleave2.nxv16i32(<vscale x 8 x i32> %9, <vscale x 8 x i32> %11)
+  %interleaved.vec61 = tail call <vscale x 16 x i32> @llvm.vector.interleave2.nxv16i32(<vscale x 8 x i32> %10, <vscale x 8 x i32> %12)
+  %interleaved.vec62 = tail call <vscale x 32 x i32> @llvm.vector.interleave2.nxv32i32(<vscale x 16 x i32> %interleaved.vec, <vscale x 16 x i32> %interleaved.vec61)
+  store <vscale x 32 x i32> %interleaved.vec62, ptr %dst, align 4
+  ret void
+}

>From 196e018259599ff272d6f593ace0f430dadc3a99 Mon Sep 17 00:00:00 2001
From: Hassnaa Hamdi <hassnaa.hamdi at arm.com>
Date: Thu, 18 Apr 2024 17:30:51 +0000
Subject: [PATCH 2/4] [IA]: Construct (de)interleave4 out of (de)interleave2

- InterleavedAccess pass is updated to spot load/store (de)interleave4 like sequences,
  and emit equivalent sve.ld4 or sve.st4 intrinsics through targets that support SV.
- Tests are added for targets that support SV.

Change-Id: I76ef31080ddd72b182c1a3b1752a6178dc78ea84
---
 llvm/include/llvm/CodeGen/TargetLowering.h    |  4 +
 llvm/lib/CodeGen/InterleavedAccessPass.cpp    | 83 +++++++++++++++++--
 .../Target/AArch64/AArch64ISelLowering.cpp    | 40 ++++++---
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |  2 +
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 37 +++++++--
 llvm/lib/Target/RISCV/RISCVISelLowering.h     |  4 +-
 .../CodeGen/AArch64/sve-deinterleave-load.ll  | 78 +++++++++++++++++
 .../RISCV/rvv/sve-deinterleave-load.ll        | 74 +++++++++++++++++
 8 files changed, 294 insertions(+), 28 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/sve-deinterleave-load.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/sve-deinterleave-load.ll

diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 50a8c7eb75af5..818d3d9241806 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -56,6 +56,8 @@
 #include <cstdint>
 #include <iterator>
 #include <map>
+#include <queue>
+#include <stack>
 #include <string>
 #include <utility>
 #include <vector>
@@ -3157,6 +3159,7 @@ class TargetLoweringBase {
   /// \p DI is the deinterleave intrinsic.
   /// \p LI is the accompanying load instruction
   virtual bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
+                                                SmallVector<Value *> &LeafNodes,
                                                 LoadInst *LI) const {
     return false;
   }
@@ -3168,6 +3171,7 @@ class TargetLoweringBase {
   /// \p II is the interleave intrinsic.
   /// \p SI is the accompanying store instruction
   virtual bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
+                                               SmallVector<Value *> &LeafNodes,
                                                StoreInst *SI) const {
     return false;
   }
diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
index 8c9065aec7faa..21e6ba79e365a 100644
--- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp
+++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
@@ -70,6 +70,7 @@
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Transforms/Utils/Local.h"
 #include <cassert>
+#include <queue>
 #include <utility>
 
 using namespace llvm;
@@ -488,12 +489,57 @@ bool InterleavedAccessImpl::lowerDeinterleaveIntrinsic(
 
   LLVM_DEBUG(dbgs() << "IA: Found a deinterleave intrinsic: " << *DI << "\n");
 
+  std::stack<IntrinsicInst *> DeinterleaveTreeQueue;
+  SmallVector<Value *> TempLeafNodes, LeafNodes;
+  std::map<IntrinsicInst *, bool> mp;
+  SmallVector<Instruction *> TempDeadInsts;
+
+  DeinterleaveTreeQueue.push(DI);
+  while (!DeinterleaveTreeQueue.empty()) {
+    auto CurrentDI = DeinterleaveTreeQueue.top();
+    DeinterleaveTreeQueue.pop();
+    TempDeadInsts.push_back(CurrentDI);
+    // iterate over extract users of deinterleave
+    for (auto UserExtract : CurrentDI->users()) {
+      Instruction *Extract = dyn_cast<Instruction>(UserExtract);
+      if (!Extract || Extract->getOpcode() != Instruction::ExtractValue)
+        continue;
+      bool IsLeaf = true;
+      // iterate over deinterleave users of extract
+      for (auto UserDI : UserExtract->users()) {
+        IntrinsicInst *Child_DI = dyn_cast<IntrinsicInst>(UserDI);
+        if (!Child_DI || Child_DI->getIntrinsicID() !=
+                             Intrinsic::experimental_vector_deinterleave2)
+          continue;
+        IsLeaf = false;
+        if (mp.count(Child_DI) == 0) {
+          DeinterleaveTreeQueue.push(Child_DI);
+        }
+        continue;
+      }
+      if (IsLeaf) {
+        TempLeafNodes.push_back(UserExtract);
+        TempDeadInsts.push_back(Extract);
+      } else {
+        TempDeadInsts.push_back(Extract);
+      }
+    }
+  }
+  // sort the deinterleaved nodes in the order that
+  // they will be extracted from the target-specific intrinsic.
+  for (unsigned I = 1; I < TempLeafNodes.size(); I += 2)
+    LeafNodes.push_back(TempLeafNodes[I]);
+
+  for (unsigned I = 0; I < TempLeafNodes.size(); I += 2)
+    LeafNodes.push_back(TempLeafNodes[I]);
+
   // Try and match this with target specific intrinsics.
-  if (!TLI->lowerDeinterleaveIntrinsicToLoad(DI, LI))
+  if (!TLI->lowerDeinterleaveIntrinsicToLoad(DI, LeafNodes, LI))
     return false;
 
   // We now have a target-specific load, so delete the old one.
-  DeadInsts.push_back(DI);
+  DeadInsts.insert(DeadInsts.end(), TempDeadInsts.rbegin(),
+                   TempDeadInsts.rend());
   DeadInsts.push_back(LI);
   return true;
 }
@@ -509,14 +555,38 @@ bool InterleavedAccessImpl::lowerInterleaveIntrinsic(
     return false;
 
   LLVM_DEBUG(dbgs() << "IA: Found an interleave intrinsic: " << *II << "\n");
-
+  std::queue<IntrinsicInst *> IeinterleaveTreeQueue;
+  SmallVector<Value *> TempLeafNodes, LeafNodes;
+  SmallVector<Instruction *> TempDeadInsts;
+
+  IeinterleaveTreeQueue.push(II);
+  while (!IeinterleaveTreeQueue.empty()) {
+    auto node = IeinterleaveTreeQueue.front();
+    TempDeadInsts.push_back(node);
+    IeinterleaveTreeQueue.pop();
+    for (unsigned i = 0; i < 2; i++) {
+      auto op = node->getOperand(i);
+      if (auto CurrentII = dyn_cast<IntrinsicInst>(op)) {
+        if (CurrentII->getIntrinsicID() !=
+            Intrinsic::experimental_vector_interleave2)
+          continue;
+        IeinterleaveTreeQueue.push(CurrentII);
+        continue;
+      }
+      TempLeafNodes.push_back(op);
+    }
+  }
+  for (unsigned I = 0; I < TempLeafNodes.size(); I += 2)
+    LeafNodes.push_back(TempLeafNodes[I]);
+  for (unsigned I = 1; I < TempLeafNodes.size(); I += 2)
+    LeafNodes.push_back(TempLeafNodes[I]);
   // Try and match this with target specific intrinsics.
-  if (!TLI->lowerInterleaveIntrinsicToStore(II, SI))
+  if (!TLI->lowerInterleaveIntrinsicToStore(II, LeafNodes, SI))
     return false;
 
   // We now have a target-specific store, so delete the old one.
   DeadInsts.push_back(SI);
-  DeadInsts.push_back(II);
+  DeadInsts.insert(DeadInsts.end(), TempDeadInsts.begin(), TempDeadInsts.end());
   return true;
 }
 
@@ -537,7 +607,8 @@ bool InterleavedAccessImpl::runOnFunction(Function &F) {
       // with a factor of 2.
       if (II->getIntrinsicID() == Intrinsic::vector_deinterleave2)
         Changed |= lowerDeinterleaveIntrinsic(II, DeadInsts);
-      if (II->getIntrinsicID() == Intrinsic::vector_interleave2)
+
+      else if (II->getIntrinsicID() == Intrinsic::vector_interleave2)
         Changed |= lowerInterleaveIntrinsic(II, DeadInsts);
     }
   }
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 6223c211b33b6..5de075970457a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16442,15 +16442,16 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
 }
 
 bool AArch64TargetLowering::lowerDeinterleaveIntrinsicToLoad(
-    IntrinsicInst *DI, LoadInst *LI) const {
+    IntrinsicInst *DI, SmallVector<Value *> &LeafNodes, LoadInst *LI) const {
   // Only deinterleave2 supported at present.
   if (DI->getIntrinsicID() != Intrinsic::vector_deinterleave2)
     return false;
 
-  // Only a factor of 2 supported at present.
-  const unsigned Factor = 2;
+  const unsigned Factor = std::max(2, (int)LeafNodes.size());
 
-  VectorType *VTy = cast<VectorType>(DI->getType()->getContainedType(0));
+  VectorType *VTy = (LeafNodes.size() > 0)
+                        ? cast<VectorType>(LeafNodes.front()->getType())
+                        : cast<VectorType>(DI->getType()->getContainedType(0));
   const DataLayout &DL = DI->getModule()->getDataLayout();
   bool UseScalable;
   if (!isLegalInterleavedAccessType(VTy, DL, UseScalable))
@@ -16506,9 +16507,19 @@ bool AArch64TargetLowering::lowerDeinterleaveIntrinsicToLoad(
     Result = Builder.CreateInsertValue(Result, Left, 0);
     Result = Builder.CreateInsertValue(Result, Right, 1);
   } else {
-    if (UseScalable)
+    if (UseScalable) {
       Result = Builder.CreateCall(LdNFunc, {Pred, BaseAddr}, "ldN");
-    else
+      if (Factor == 2) {
+        DI->replaceAllUsesWith(Result);
+        return true;
+      }
+      for (unsigned I = 0; I < LeafNodes.size(); I++) {
+        llvm::Value *CurrentExtract = LeafNodes[I];
+        Value *Newextrct = Builder.CreateExtractValue(Result, I);
+        CurrentExtract->replaceAllUsesWith(Newextrct);
+      }
+      return true;
+    } else
       Result = Builder.CreateCall(LdNFunc, BaseAddr, "ldN");
   }
 
@@ -16517,15 +16528,15 @@ bool AArch64TargetLowering::lowerDeinterleaveIntrinsicToLoad(
 }
 
 bool AArch64TargetLowering::lowerInterleaveIntrinsicToStore(
-    IntrinsicInst *II, StoreInst *SI) const {
+    IntrinsicInst *II, SmallVector<Value *> &LeafNodes, StoreInst *SI) const {
   // Only interleave2 supported at present.
   if (II->getIntrinsicID() != Intrinsic::vector_interleave2)
     return false;
 
-  // Only a factor of 2 supported at present.
-  const unsigned Factor = 2;
+  // leaf nodes are the nodes that will be interleaved
+  const unsigned Factor = LeafNodes.size();
 
-  VectorType *VTy = cast<VectorType>(II->getOperand(0)->getType());
+  VectorType *VTy = cast<VectorType>(LeafNodes.front()->getType());
   const DataLayout &DL = II->getModule()->getDataLayout();
   bool UseScalable;
   if (!isLegalInterleavedAccessType(VTy, DL, UseScalable))
@@ -16570,9 +16581,12 @@ bool AArch64TargetLowering::lowerInterleaveIntrinsicToStore(
       R = Builder.CreateExtractVector(StTy, II->getOperand(1), Idx);
     }
 
-    if (UseScalable)
-      Builder.CreateCall(StNFunc, {L, R, Pred, Address});
-    else
+    if (UseScalable) {
+      SmallVector<Value *> Args(LeafNodes);
+      Args.push_back(Pred);
+      Args.push_back(Address);
+      Builder.CreateCall(StNFunc, Args);
+    } else
       Builder.CreateCall(StNFunc, {L, R, Address});
   }
 
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index a44a3d35d2f9c..40567c7946269 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -686,9 +686,11 @@ class AArch64TargetLowering : public TargetLowering {
                              unsigned Factor) const override;
 
   bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
+                                        SmallVector<Value *> &LeafNodes,
                                         LoadInst *LI) const override;
 
   bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
+                                       SmallVector<Value *> &LeafNodes,
                                        StoreInst *SI) const override;
 
   bool isLegalAddImmediate(int64_t) const override;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 8d9b0f2acc5f3..8d779ddfd54c1 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -21231,8 +21231,8 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
   return true;
 }
 
-bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
-                                                           LoadInst *LI) const {
+bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
+    IntrinsicInst *DI, SmallVector<Value *> &LeafNodes, LoadInst *LI) const {
   assert(LI->isSimple());
   IRBuilder<> Builder(LI);
 
@@ -21240,10 +21240,13 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
   if (DI->getIntrinsicID() != Intrinsic::vector_deinterleave2)
     return false;
 
-  unsigned Factor = 2;
+  unsigned Factor = std::max(2, (int)LeafNodes.size());
 
   VectorType *VTy = cast<VectorType>(DI->getOperand(0)->getType());
-  VectorType *ResVTy = cast<VectorType>(DI->getType()->getContainedType(0));
+  VectorType *ResVTy =
+      (LeafNodes.size() > 0)
+          ? cast<VectorType>(LeafNodes.front()->getType())
+          : cast<VectorType>(DI->getType()->getContainedType(0));
 
   if (!isLegalInterleavedAccessType(ResVTy, Factor, LI->getAlign(),
                                     LI->getPointerAddressSpace(),
@@ -21271,6 +21274,19 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
                                            {ResVTy, XLenTy});
     VL = Constant::getAllOnesValue(XLenTy);
     Ops.append(Factor, PoisonValue::get(ResVTy));
+    Ops.append({LI->getPointerOperand(), VL});
+    Value *Vlseg = Builder.CreateCall(VlsegNFunc, Ops);
+    //-----------
+    if (Factor == 2) {
+      DI->replaceAllUsesWith(Vlseg);
+      return true;
+    }
+    for (unsigned I = 0; I < LeafNodes.size(); I++) {
+      auto CurrentExtract = LeafNodes[I];
+      Value *NewExtract = Builder.CreateExtractValue(Vlseg, I);
+      CurrentExtract->replaceAllUsesWith(NewExtract);
+    }
+    return true;
   }
 
   Ops.append({LI->getPointerOperand(), VL});
@@ -21281,8 +21297,8 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
   return true;
 }
 
-bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
-                                                          StoreInst *SI) const {
+bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
+    IntrinsicInst *II, SmallVector<Value *> &LeafNodes, StoreInst *SI) const {
   assert(SI->isSimple());
   IRBuilder<> Builder(SI);
 
@@ -21290,10 +21306,10 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
   if (II->getIntrinsicID() != Intrinsic::vector_interleave2)
     return false;
 
-  unsigned Factor = 2;
+  unsigned Factor = LeafNodes.size();
 
   VectorType *VTy = cast<VectorType>(II->getType());
-  VectorType *InVTy = cast<VectorType>(II->getOperand(0)->getType());
+  VectorType *InVTy = cast<VectorType>(LeafNodes.front()->getType());
 
   if (!isLegalInterleavedAccessType(InVTy, Factor, SI->getAlign(),
                                     SI->getPointerAddressSpace(),
@@ -21319,6 +21335,11 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
     VssegNFunc = Intrinsic::getDeclaration(SI->getModule(), IntrIds[Factor - 2],
                                            {InVTy, XLenTy});
     VL = Constant::getAllOnesValue(XLenTy);
+    SmallVector<Value *> Args(LeafNodes);
+    Args.push_back(SI->getPointerOperand());
+    Args.push_back(VL);
+    Builder.CreateCall(VssegNFunc, Args);
+    return true;
   }
 
   Builder.CreateCall(VssegNFunc, {II->getOperand(0), II->getOperand(1),
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 1efc54566b4b1..1288573106fb3 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -867,10 +867,12 @@ class RISCVTargetLowering : public TargetLowering {
   bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
                              unsigned Factor) const override;
 
-  bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *II,
+  bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
+                                        SmallVector<Value *> &LeafNodes,
                                         LoadInst *LI) const override;
 
   bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
+                                       SmallVector<Value *> &LeafNodes,
                                        StoreInst *SI) const override;
 
   bool supportKCFIBundles() const override { return true; }
diff --git a/llvm/test/CodeGen/AArch64/sve-deinterleave-load.ll b/llvm/test/CodeGen/AArch64/sve-deinterleave-load.ll
new file mode 100644
index 0000000000000..606bb93e309e1
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-deinterleave-load.ll
@@ -0,0 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
+
+%struct.xyzt = type { i32, i32, i32, i32 }
+
+define dso_local void @loop_xyzt(ptr noalias nocapture noundef writeonly %dst, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b) {
+; CHECK-LABEL: loop_xyzt:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    cntw x10
+; CHECK-NEXT:    mov x8, xzr
+; CHECK-NEXT:    mov w9, #1024 // =0x400
+; CHECK-NEXT:    neg x10, x10
+; CHECK-NEXT:    rdvl x11, #4
+; CHECK-NEXT:  .LBB0_1: // %vector.body
+; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    add x12, x1, x8
+; CHECK-NEXT:    adds x9, x9, x10
+; CHECK-NEXT:    ld4w { z0.s - z3.s }, p0/z, [x12]
+; CHECK-NEXT:    add x12, x2, x8
+; CHECK-NEXT:    ld4w { z4.s - z7.s }, p0/z, [x12]
+; CHECK-NEXT:    add x12, x0, x8
+; CHECK-NEXT:    add x8, x8, x11
+; CHECK-NEXT:    add z16.s, z4.s, z0.s
+; CHECK-NEXT:    sub z17.s, z1.s, z5.s
+; CHECK-NEXT:    movprfx z18, z2
+; CHECK-NEXT:    lsl z18.s, p0/m, z18.s, z6.s
+; CHECK-NEXT:    movprfx z19, z3
+; CHECK-NEXT:    asr z19.s, p0/m, z19.s, z7.s
+; CHECK-NEXT:    st4w { z16.s - z19.s }, p0, [x12]
+; CHECK-NEXT:    b.ne .LBB0_1
+; CHECK-NEXT:  // %bb.2: // %for.cond.cleanup
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i64 @llvm.vscale.i64()
+  %1 = shl nuw nsw i64 %0, 2
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+  %2 = getelementptr inbounds %struct.xyzt, ptr %a, i64 %index
+  %wide.vec = load <vscale x 16 x i32>, ptr %2, align 4
+  %root.strided.vec = tail call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.experimental.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %wide.vec)
+  %3 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec, 0
+  %4 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec, 1
+  %root.strided.vec55 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %3)
+  %5 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec55, 0
+  %6 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec55, 1
+  %root.strided.vec56 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %4)
+  %7 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec56, 0
+  %8 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec56, 1
+  %9 = getelementptr inbounds %struct.xyzt, ptr %b, i64 %index
+  %wide.vec57 = load <vscale x 16 x i32>, ptr %9, align 4
+  %root.strided.vec58 = tail call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.experimental.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %wide.vec57)
+  %10 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec58, 0
+  %11 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec58, 1
+  %root.strided.vec59 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %10)
+  %12 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec59, 0
+  %13 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec59, 1
+  %root.strided.vec60 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %11)
+  %14 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec60, 0
+  %15 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec60, 1
+  %16 = add nsw <vscale x 4 x i32> %12, %5
+  %17 = sub nsw <vscale x 4 x i32> %7, %14
+  %18 = shl <vscale x 4 x i32> %6, %13
+  %19 = ashr <vscale x 4 x i32> %8, %15
+  %20 = getelementptr inbounds %struct.xyzt, ptr %dst, i64 %index
+  %interleaved.vec = tail call <vscale x 8 x i32> @llvm.experimental.vector.interleave2.nxv8i32(<vscale x 4 x i32> %16, <vscale x 4 x i32> %18)
+  %interleaved.vec61 = tail call <vscale x 8 x i32> @llvm.experimental.vector.interleave2.nxv8i32(<vscale x 4 x i32> %17, <vscale x 4 x i32> %19)
+  %interleaved.vec62 = tail call <vscale x 16 x i32> @llvm.experimental.vector.interleave2.nxv16i32(<vscale x 8 x i32> %interleaved.vec, <vscale x 8 x i32> %interleaved.vec61)
+  store <vscale x 16 x i32> %interleaved.vec62, ptr %20, align 4
+  %index.next = add nuw i64 %index, %1
+  %21 = icmp eq i64 %index.next, 1024
+  br i1 %21, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sve-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/sve-deinterleave-load.ll
new file mode 100644
index 0000000000000..2ea14b13265c6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sve-deinterleave-load.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh | FileCheck %s
+
+%struct.xyzt = type { i32, i32, i32, i32 }
+
+define dso_local void @loop_xyzt(ptr noalias nocapture noundef writeonly %dst, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b) {
+; CHECK-LABEL: loop_xyzt:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrr a4, vlenb
+; CHECK-NEXT:    srli a3, a4, 1
+; CHECK-NEXT:    slli a4, a4, 3
+; CHECK-NEXT:    li a5, 1024
+; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
+; CHECK-NEXT:  .LBB0_1: # %vector.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vlseg4e32.v v8, (a1)
+; CHECK-NEXT:    vlseg4e32.v v16, (a2)
+; CHECK-NEXT:    vadd.vv v8, v16, v8
+; CHECK-NEXT:    vsub.vv v10, v10, v18
+; CHECK-NEXT:    vsll.vv v12, v12, v20
+; CHECK-NEXT:    vsra.vv v14, v14, v22
+; CHECK-NEXT:    vsseg4e32.v v8, (a0)
+; CHECK-NEXT:    sub a5, a5, a3
+; CHECK-NEXT:    add a0, a0, a4
+; CHECK-NEXT:    add a2, a2, a4
+; CHECK-NEXT:    add a1, a1, a4
+; CHECK-NEXT:    bnez a5, .LBB0_1
+; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i64 @llvm.vscale.i64()
+  %1 = shl nuw nsw i64 %0, 2
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+  %2 = getelementptr inbounds %struct.xyzt, ptr %a, i64 %index
+  %wide.vec = load <vscale x 16 x i32>, ptr %2, align 4
+  %root.strided.vec = tail call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.experimental.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %wide.vec)
+  %3 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec, 0
+  %4 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec, 1
+  %root.strided.vec55 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %3)
+  %5 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec55, 0
+  %6 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec55, 1
+  %root.strided.vec56 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %4)
+  %7 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec56, 0
+  %8 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec56, 1
+  %9 = getelementptr inbounds %struct.xyzt, ptr %b, i64 %index
+  %wide.vec57 = load <vscale x 16 x i32>, ptr %9, align 4
+  %root.strided.vec58 = tail call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.experimental.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %wide.vec57)
+  %10 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec58, 0
+  %11 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec58, 1
+  %root.strided.vec59 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %10)
+  %12 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec59, 0
+  %13 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec59, 1
+  %root.strided.vec60 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %11)
+  %14 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec60, 0
+  %15 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec60, 1
+  %16 = add nsw <vscale x 4 x i32> %12, %5
+  %17 = sub nsw <vscale x 4 x i32> %7, %14
+  %18 = shl <vscale x 4 x i32> %6, %13
+  %19 = ashr <vscale x 4 x i32> %8, %15
+  %20 = getelementptr inbounds %struct.xyzt, ptr %dst, i64 %index
+  %interleaved.vec = tail call <vscale x 8 x i32> @llvm.experimental.vector.interleave2.nxv8i32(<vscale x 4 x i32> %16, <vscale x 4 x i32> %18)
+  %interleaved.vec61 = tail call <vscale x 8 x i32> @llvm.experimental.vector.interleave2.nxv8i32(<vscale x 4 x i32> %17, <vscale x 4 x i32> %19)
+  %interleaved.vec62 = tail call <vscale x 16 x i32> @llvm.experimental.vector.interleave2.nxv16i32(<vscale x 8 x i32> %interleaved.vec, <vscale x 8 x i32> %interleaved.vec61)
+  store <vscale x 16 x i32> %interleaved.vec62, ptr %20, align 4
+  %index.next = add nuw i64 %index, %1
+  %21 = icmp eq i64 %index.next, 1024
+  br i1 %21, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}

>From 71120eca0055c9279cf94d3b5c43e4cba2d9fe4a Mon Sep 17 00:00:00 2001
From: Hassnaa Hamdi <hassnaa.hamdi at arm.com>
Date: Mon, 29 Apr 2024 05:03:36 +0000
Subject: [PATCH 3/4] [PatternMatch]: Add m_Interleave and m_Deinterleave
 matchers.

Change-Id: Id94189e601ed70c5ea922f9adbee63cf8b80829a
---
 llvm/include/llvm/IR/PatternMatch.h | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h
index 0d6d86cb47e67..28280720fb2c0 100644
--- a/llvm/include/llvm/IR/PatternMatch.h
+++ b/llvm/include/llvm/IR/PatternMatch.h
@@ -2856,6 +2856,17 @@ inline VScaleVal_match m_VScale() {
   return VScaleVal_match();
 }
 
+template <typename Opnd0, typename Opnd1>
+inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty
+m_Interleave2(const Opnd0 &Op0, const Opnd1 &Op1) {
+  return m_Intrinsic<Intrinsic::vector_interleave2>(Op0, Op1);
+}
+
+template <typename Opnd>
+inline typename m_Intrinsic_Ty<Opnd>::Ty m_Deinterleave2(const Opnd &Op) {
+  return m_Intrinsic<Intrinsic::vector_deinterleave2>(Op);
+}
+
 template <typename LHS, typename RHS, unsigned Opcode, bool Commutable = false>
 struct LogicalOp_match {
   LHS L;

>From bc0344efa36570cb15b695bd9ba54f7eea4525f4 Mon Sep 17 00:00:00 2001
From: Hassnaa Hamdi <hassnaa.hamdi at arm.com>
Date: Wed, 15 May 2024 17:25:10 +0000
Subject: [PATCH 4/4] [AArch64]: Use PatternMatch to spot (de)interleave
 accesses

Change-Id: Id7639dcb125a2f642b2fea78ea884b74be1c6b74
---
 llvm/include/llvm/CodeGen/TargetLowering.h    |   2 -
 llvm/lib/CodeGen/InterleavedAccessPass.cpp    |  80 +--------
 .../Target/AArch64/AArch64ISelLowering.cpp    | 153 ++++++++++++------
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |   2 -
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  37 +----
 llvm/lib/Target/RISCV/RISCVISelLowering.h     |   2 -
 .../CodeGen/AArch64/sve-deinterleave-load.ll  |  78 ---------
 .../AArch64/sve-interleave_accesses4-load.ll  |  74 +++------
 .../RISCV/rvv/sve-deinterleave-load.ll        |  74 ---------
 9 files changed, 141 insertions(+), 361 deletions(-)
 delete mode 100644 llvm/test/CodeGen/AArch64/sve-deinterleave-load.ll
 delete mode 100644 llvm/test/CodeGen/RISCV/rvv/sve-deinterleave-load.ll

diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 818d3d9241806..4927cb5926743 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -3159,7 +3159,6 @@ class TargetLoweringBase {
   /// \p DI is the deinterleave intrinsic.
   /// \p LI is the accompanying load instruction
   virtual bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
-                                                SmallVector<Value *> &LeafNodes,
                                                 LoadInst *LI) const {
     return false;
   }
@@ -3171,7 +3170,6 @@ class TargetLoweringBase {
   /// \p II is the interleave intrinsic.
   /// \p SI is the accompanying store instruction
   virtual bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
-                                               SmallVector<Value *> &LeafNodes,
                                                StoreInst *SI) const {
     return false;
   }
diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
index 21e6ba79e365a..4b7f97433e0c4 100644
--- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp
+++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
@@ -70,7 +70,6 @@
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Transforms/Utils/Local.h"
 #include <cassert>
-#include <queue>
 #include <utility>
 
 using namespace llvm;
@@ -489,57 +488,12 @@ bool InterleavedAccessImpl::lowerDeinterleaveIntrinsic(
 
   LLVM_DEBUG(dbgs() << "IA: Found a deinterleave intrinsic: " << *DI << "\n");
 
-  std::stack<IntrinsicInst *> DeinterleaveTreeQueue;
-  SmallVector<Value *> TempLeafNodes, LeafNodes;
-  std::map<IntrinsicInst *, bool> mp;
-  SmallVector<Instruction *> TempDeadInsts;
-
-  DeinterleaveTreeQueue.push(DI);
-  while (!DeinterleaveTreeQueue.empty()) {
-    auto CurrentDI = DeinterleaveTreeQueue.top();
-    DeinterleaveTreeQueue.pop();
-    TempDeadInsts.push_back(CurrentDI);
-    // iterate over extract users of deinterleave
-    for (auto UserExtract : CurrentDI->users()) {
-      Instruction *Extract = dyn_cast<Instruction>(UserExtract);
-      if (!Extract || Extract->getOpcode() != Instruction::ExtractValue)
-        continue;
-      bool IsLeaf = true;
-      // iterate over deinterleave users of extract
-      for (auto UserDI : UserExtract->users()) {
-        IntrinsicInst *Child_DI = dyn_cast<IntrinsicInst>(UserDI);
-        if (!Child_DI || Child_DI->getIntrinsicID() !=
-                             Intrinsic::experimental_vector_deinterleave2)
-          continue;
-        IsLeaf = false;
-        if (mp.count(Child_DI) == 0) {
-          DeinterleaveTreeQueue.push(Child_DI);
-        }
-        continue;
-      }
-      if (IsLeaf) {
-        TempLeafNodes.push_back(UserExtract);
-        TempDeadInsts.push_back(Extract);
-      } else {
-        TempDeadInsts.push_back(Extract);
-      }
-    }
-  }
-  // sort the deinterleaved nodes in the order that
-  // they will be extracted from the target-specific intrinsic.
-  for (unsigned I = 1; I < TempLeafNodes.size(); I += 2)
-    LeafNodes.push_back(TempLeafNodes[I]);
-
-  for (unsigned I = 0; I < TempLeafNodes.size(); I += 2)
-    LeafNodes.push_back(TempLeafNodes[I]);
-
   // Try and match this with target specific intrinsics.
-  if (!TLI->lowerDeinterleaveIntrinsicToLoad(DI, LeafNodes, LI))
+  if (!TLI->lowerDeinterleaveIntrinsicToLoad(DI, LI))
     return false;
 
   // We now have a target-specific load, so delete the old one.
-  DeadInsts.insert(DeadInsts.end(), TempDeadInsts.rbegin(),
-                   TempDeadInsts.rend());
+  DeadInsts.push_back(DI);
   DeadInsts.push_back(LI);
   return true;
 }
@@ -555,38 +509,14 @@ bool InterleavedAccessImpl::lowerInterleaveIntrinsic(
     return false;
 
   LLVM_DEBUG(dbgs() << "IA: Found an interleave intrinsic: " << *II << "\n");
-  std::queue<IntrinsicInst *> IeinterleaveTreeQueue;
-  SmallVector<Value *> TempLeafNodes, LeafNodes;
-  SmallVector<Instruction *> TempDeadInsts;
-
-  IeinterleaveTreeQueue.push(II);
-  while (!IeinterleaveTreeQueue.empty()) {
-    auto node = IeinterleaveTreeQueue.front();
-    TempDeadInsts.push_back(node);
-    IeinterleaveTreeQueue.pop();
-    for (unsigned i = 0; i < 2; i++) {
-      auto op = node->getOperand(i);
-      if (auto CurrentII = dyn_cast<IntrinsicInst>(op)) {
-        if (CurrentII->getIntrinsicID() !=
-            Intrinsic::experimental_vector_interleave2)
-          continue;
-        IeinterleaveTreeQueue.push(CurrentII);
-        continue;
-      }
-      TempLeafNodes.push_back(op);
-    }
-  }
-  for (unsigned I = 0; I < TempLeafNodes.size(); I += 2)
-    LeafNodes.push_back(TempLeafNodes[I]);
-  for (unsigned I = 1; I < TempLeafNodes.size(); I += 2)
-    LeafNodes.push_back(TempLeafNodes[I]);
+
   // Try and match this with target specific intrinsics.
-  if (!TLI->lowerInterleaveIntrinsicToStore(II, LeafNodes, SI))
+  if (!TLI->lowerInterleaveIntrinsicToStore(II, SI))
     return false;
 
   // We now have a target-specific store, so delete the old one.
   DeadInsts.push_back(SI);
-  DeadInsts.insert(DeadInsts.end(), TempDeadInsts.begin(), TempDeadInsts.end());
+  DeadInsts.push_back(II);
   return true;
 }
 
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 5de075970457a..773377e901163 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16441,18 +16441,56 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
   return true;
 }
 
+bool GetDeinterleaveLeaves(Value *DI,
+                           SmallVectorImpl<Value *> &DeinterleaveUsers,
+                           SmallVectorImpl<Instruction *> &DeadInsts) {
+  if (!DI->hasNUses(2))
+    return false;
+
+  auto *Extr0 = *(++DI->user_begin());
+  auto *Extr1 = *(DI->user_begin());
+  if (!match(Extr0, m_ExtractValue<0>(m_Deinterleave2(m_Value()))))
+    return false;
+
+  auto De1 = *(Extr0->user_begin());
+  if (!GetDeinterleaveLeaves(De1, DeinterleaveUsers, DeadInsts))
+    // leaf extract
+    DeinterleaveUsers.push_back(Extr0);
+  else {
+    // parent extract that will not be used anymore
+    DeadInsts.push_back(dyn_cast<Instruction>(De1));
+    DeadInsts.push_back(dyn_cast<Instruction>(Extr0));
+  }
+  auto De2 = *(Extr1->user_begin());
+  if (!GetDeinterleaveLeaves(De2, DeinterleaveUsers, DeadInsts))
+    // leaf extract
+    DeinterleaveUsers.push_back(Extr1);
+  else {
+    // parent extract that will not be used anymore
+    DeadInsts.push_back(dyn_cast<Instruction>(De2));
+    DeadInsts.push_back(dyn_cast<Instruction>(Extr1));
+  }
+  return true;
+}
+
 bool AArch64TargetLowering::lowerDeinterleaveIntrinsicToLoad(
-    IntrinsicInst *DI, SmallVector<Value *> &LeafNodes, LoadInst *LI) const {
+    IntrinsicInst *DI, LoadInst *LI) const {
   // Only deinterleave2 supported at present.
   if (DI->getIntrinsicID() != Intrinsic::vector_deinterleave2)
     return false;
 
-  const unsigned Factor = std::max(2, (int)LeafNodes.size());
-
-  VectorType *VTy = (LeafNodes.size() > 0)
-                        ? cast<VectorType>(LeafNodes.front()->getType())
-                        : cast<VectorType>(DI->getType()->getContainedType(0));
+  SmallVector<Value *, 4> ValuesToDeinterleave;
+  SmallVector<Instruction *, 10> DeadInsts;
   const DataLayout &DL = DI->getModule()->getDataLayout();
+  unsigned Factor = 2;
+  VectorType *VTy = cast<VectorType>(DI->getType()->getContainedType(0));
+  if (GetDeinterleaveLeaves(DI, ValuesToDeinterleave, DeadInsts)) {
+    Factor = ValuesToDeinterleave.size();
+    VTy = cast<VectorType>(ValuesToDeinterleave[0]->getType());
+  }
+
+  assert(Factor && "Expected Interleave Factor >= 2");
+
   bool UseScalable;
   if (!isLegalInterleavedAccessType(VTy, DL, UseScalable))
     return false;
@@ -16463,7 +16501,6 @@ bool AArch64TargetLowering::lowerDeinterleaveIntrinsicToLoad(
     return false;
 
   unsigned NumLoads = getNumInterleavedAccesses(VTy, DL, UseScalable);
-
   VectorType *LdTy =
       VectorType::get(VTy->getElementType(),
                       VTy->getElementCount().divideCoefficientBy(NumLoads));
@@ -16473,7 +16510,6 @@ bool AArch64TargetLowering::lowerDeinterleaveIntrinsicToLoad(
                                                 UseScalable, LdTy, PtrTy);
 
   IRBuilder<> Builder(LI);
-
   Value *Pred = nullptr;
   if (UseScalable)
     Pred =
@@ -16482,9 +16518,8 @@ bool AArch64TargetLowering::lowerDeinterleaveIntrinsicToLoad(
   Value *BaseAddr = LI->getPointerOperand();
   Value *Result;
   if (NumLoads > 1) {
-    Value *Left = PoisonValue::get(VTy);
-    Value *Right = PoisonValue::get(VTy);
-
+    // Create multiple legal small ldN instead of a wide one.
+    SmallVector<Value *, 4> WideValues(Factor, (PoisonValue::get(VTy)));
     for (unsigned I = 0; I < NumLoads; ++I) {
       Value *Offset = Builder.getInt64(I * Factor);
 
@@ -16494,49 +16529,71 @@ bool AArch64TargetLowering::lowerDeinterleaveIntrinsicToLoad(
         LdN = Builder.CreateCall(LdNFunc, {Pred, Address}, "ldN");
       else
         LdN = Builder.CreateCall(LdNFunc, Address, "ldN");
-
       Value *Idx =
           Builder.getInt64(I * LdTy->getElementCount().getKnownMinValue());
-      Left = Builder.CreateInsertVector(
-          VTy, Left, Builder.CreateExtractValue(LdN, 0), Idx);
-      Right = Builder.CreateInsertVector(
-          VTy, Right, Builder.CreateExtractValue(LdN, 1), Idx);
+      for (int J = 0; J < Factor; ++J) {
+        WideValues[J] = Builder.CreateInsertVector(
+            VTy, WideValues[J], Builder.CreateExtractValue(LdN, J), Idx);
+      }
+    }
+    // FIXME: the types should NOT be added manually.
+    if (2 == Factor)
+      Result = PoisonValue::get(StructType::get(VTy, VTy));
+    else
+      Result = PoisonValue::get(StructType::get(VTy, VTy, VTy, VTy));
+    // Construct the wide result out of the small results.
+    for (int J = 0; J < Factor; ++J) {
+      Result = Builder.CreateInsertValue(Result, WideValues[J], J);
     }
-
-    Result = PoisonValue::get(DI->getType());
-    Result = Builder.CreateInsertValue(Result, Left, 0);
-    Result = Builder.CreateInsertValue(Result, Right, 1);
   } else {
-    if (UseScalable) {
+    if (UseScalable)
       Result = Builder.CreateCall(LdNFunc, {Pred, BaseAddr}, "ldN");
-      if (Factor == 2) {
-        DI->replaceAllUsesWith(Result);
-        return true;
-      }
-      for (unsigned I = 0; I < LeafNodes.size(); I++) {
-        llvm::Value *CurrentExtract = LeafNodes[I];
-        Value *Newextrct = Builder.CreateExtractValue(Result, I);
-        CurrentExtract->replaceAllUsesWith(Newextrct);
-      }
-      return true;
-    } else
+    else
       Result = Builder.CreateCall(LdNFunc, BaseAddr, "ldN");
   }
+  if (Factor > 2) {
+    for (unsigned I = 0; I < ValuesToDeinterleave.size(); I++) {
+      llvm::Value *CurrentExtract = ValuesToDeinterleave[I];
+      Value *NewExtract = Builder.CreateExtractValue(Result, I);
+      CurrentExtract->replaceAllUsesWith(NewExtract);
+      dyn_cast<Instruction>(CurrentExtract)->eraseFromParent();
+    }
 
+    for (auto &dead : DeadInsts)
+      dead->eraseFromParent();
+    return true;
+  }
   DI->replaceAllUsesWith(Result);
   return true;
 }
 
+bool GetInterleaveLeaves(Value *II, SmallVectorImpl<Value *> &InterleaveOps) {
+  Value *Op0, *Op1;
+  if (!match(II, m_Interleave2(m_Value(Op0), m_Value(Op1))))
+    return false;
+
+  if (!GetInterleaveLeaves(Op0, InterleaveOps)) {
+    InterleaveOps.push_back(Op0);
+  }
+
+  if (!GetInterleaveLeaves(Op1, InterleaveOps)) {
+    InterleaveOps.push_back(Op1);
+  }
+  return true;
+}
+
 bool AArch64TargetLowering::lowerInterleaveIntrinsicToStore(
-    IntrinsicInst *II, SmallVector<Value *> &LeafNodes, StoreInst *SI) const {
+    IntrinsicInst *II, StoreInst *SI) const {
   // Only interleave2 supported at present.
   if (II->getIntrinsicID() != Intrinsic::vector_interleave2)
     return false;
 
-  // leaf nodes are the nodes that will be interleaved
-  const unsigned Factor = LeafNodes.size();
+  SmallVector<Value *, 4> ValuesToInterleave;
+  GetInterleaveLeaves(II, ValuesToInterleave);
+  unsigned Factor = ValuesToInterleave.size();
+  assert(Factor >= 2 && "Expected Interleave Factor >= 2");
+  VectorType *VTy = cast<VectorType>(ValuesToInterleave[0]->getType());
 
-  VectorType *VTy = cast<VectorType>(LeafNodes.front()->getType());
   const DataLayout &DL = II->getModule()->getDataLayout();
   bool UseScalable;
   if (!isLegalInterleavedAccessType(VTy, DL, UseScalable))
@@ -16566,28 +16623,26 @@ bool AArch64TargetLowering::lowerInterleaveIntrinsicToStore(
     Pred =
         Builder.CreateVectorSplat(StTy->getElementCount(), Builder.getTrue());
 
-  Value *L = II->getOperand(0);
-  Value *R = II->getOperand(1);
-
+  auto InterleaveOps = ValuesToInterleave;
+  if (UseScalable)
+    ValuesToInterleave.push_back(Pred);
+  ValuesToInterleave.push_back(BaseAddr);
   for (unsigned I = 0; I < NumStores; ++I) {
     Value *Address = BaseAddr;
     if (NumStores > 1) {
       Value *Offset = Builder.getInt64(I * Factor);
       Address = Builder.CreateGEP(StTy, BaseAddr, {Offset});
-
       Value *Idx =
           Builder.getInt64(I * StTy->getElementCount().getKnownMinValue());
-      L = Builder.CreateExtractVector(StTy, II->getOperand(0), Idx);
-      R = Builder.CreateExtractVector(StTy, II->getOperand(1), Idx);
+      for (int J = 0; J < Factor; J++) {
+        ValuesToInterleave[J] =
+            Builder.CreateExtractVector(StTy, InterleaveOps[J], Idx);
+      }
+      // update the address
+      ValuesToInterleave[ValuesToInterleave.size() - 1] = Address;
     }
 
-    if (UseScalable) {
-      SmallVector<Value *> Args(LeafNodes);
-      Args.push_back(Pred);
-      Args.push_back(Address);
-      Builder.CreateCall(StNFunc, Args);
-    } else
-      Builder.CreateCall(StNFunc, {L, R, Address});
+    Builder.CreateCall(StNFunc, ValuesToInterleave);
   }
 
   return true;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 40567c7946269..a44a3d35d2f9c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -686,11 +686,9 @@ class AArch64TargetLowering : public TargetLowering {
                              unsigned Factor) const override;
 
   bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
-                                        SmallVector<Value *> &LeafNodes,
                                         LoadInst *LI) const override;
 
   bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
-                                       SmallVector<Value *> &LeafNodes,
                                        StoreInst *SI) const override;
 
   bool isLegalAddImmediate(int64_t) const override;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 8d779ddfd54c1..8d9b0f2acc5f3 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -21231,8 +21231,8 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
   return true;
 }
 
-bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
-    IntrinsicInst *DI, SmallVector<Value *> &LeafNodes, LoadInst *LI) const {
+bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
+                                                           LoadInst *LI) const {
   assert(LI->isSimple());
   IRBuilder<> Builder(LI);
 
@@ -21240,13 +21240,10 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
   if (DI->getIntrinsicID() != Intrinsic::vector_deinterleave2)
     return false;
 
-  unsigned Factor = std::max(2, (int)LeafNodes.size());
+  unsigned Factor = 2;
 
   VectorType *VTy = cast<VectorType>(DI->getOperand(0)->getType());
-  VectorType *ResVTy =
-      (LeafNodes.size() > 0)
-          ? cast<VectorType>(LeafNodes.front()->getType())
-          : cast<VectorType>(DI->getType()->getContainedType(0));
+  VectorType *ResVTy = cast<VectorType>(DI->getType()->getContainedType(0));
 
   if (!isLegalInterleavedAccessType(ResVTy, Factor, LI->getAlign(),
                                     LI->getPointerAddressSpace(),
@@ -21274,19 +21271,6 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
                                            {ResVTy, XLenTy});
     VL = Constant::getAllOnesValue(XLenTy);
     Ops.append(Factor, PoisonValue::get(ResVTy));
-    Ops.append({LI->getPointerOperand(), VL});
-    Value *Vlseg = Builder.CreateCall(VlsegNFunc, Ops);
-    //-----------
-    if (Factor == 2) {
-      DI->replaceAllUsesWith(Vlseg);
-      return true;
-    }
-    for (unsigned I = 0; I < LeafNodes.size(); I++) {
-      auto CurrentExtract = LeafNodes[I];
-      Value *NewExtract = Builder.CreateExtractValue(Vlseg, I);
-      CurrentExtract->replaceAllUsesWith(NewExtract);
-    }
-    return true;
   }
 
   Ops.append({LI->getPointerOperand(), VL});
@@ -21297,8 +21281,8 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
   return true;
 }
 
-bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
-    IntrinsicInst *II, SmallVector<Value *> &LeafNodes, StoreInst *SI) const {
+bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
+                                                          StoreInst *SI) const {
   assert(SI->isSimple());
   IRBuilder<> Builder(SI);
 
@@ -21306,10 +21290,10 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
   if (II->getIntrinsicID() != Intrinsic::vector_interleave2)
     return false;
 
-  unsigned Factor = LeafNodes.size();
+  unsigned Factor = 2;
 
   VectorType *VTy = cast<VectorType>(II->getType());
-  VectorType *InVTy = cast<VectorType>(LeafNodes.front()->getType());
+  VectorType *InVTy = cast<VectorType>(II->getOperand(0)->getType());
 
   if (!isLegalInterleavedAccessType(InVTy, Factor, SI->getAlign(),
                                     SI->getPointerAddressSpace(),
@@ -21335,11 +21319,6 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
     VssegNFunc = Intrinsic::getDeclaration(SI->getModule(), IntrIds[Factor - 2],
                                            {InVTy, XLenTy});
     VL = Constant::getAllOnesValue(XLenTy);
-    SmallVector<Value *> Args(LeafNodes);
-    Args.push_back(SI->getPointerOperand());
-    Args.push_back(VL);
-    Builder.CreateCall(VssegNFunc, Args);
-    return true;
   }
 
   Builder.CreateCall(VssegNFunc, {II->getOperand(0), II->getOperand(1),
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 1288573106fb3..713acc37c3f43 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -868,11 +868,9 @@ class RISCVTargetLowering : public TargetLowering {
                              unsigned Factor) const override;
 
   bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
-                                        SmallVector<Value *> &LeafNodes,
                                         LoadInst *LI) const override;
 
   bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
-                                       SmallVector<Value *> &LeafNodes,
                                        StoreInst *SI) const override;
 
   bool supportKCFIBundles() const override { return true; }
diff --git a/llvm/test/CodeGen/AArch64/sve-deinterleave-load.ll b/llvm/test/CodeGen/AArch64/sve-deinterleave-load.ll
deleted file mode 100644
index 606bb93e309e1..0000000000000
--- a/llvm/test/CodeGen/AArch64/sve-deinterleave-load.ll
+++ /dev/null
@@ -1,78 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
-
-%struct.xyzt = type { i32, i32, i32, i32 }
-
-define dso_local void @loop_xyzt(ptr noalias nocapture noundef writeonly %dst, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b) {
-; CHECK-LABEL: loop_xyzt:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    cntw x10
-; CHECK-NEXT:    mov x8, xzr
-; CHECK-NEXT:    mov w9, #1024 // =0x400
-; CHECK-NEXT:    neg x10, x10
-; CHECK-NEXT:    rdvl x11, #4
-; CHECK-NEXT:  .LBB0_1: // %vector.body
-; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    add x12, x1, x8
-; CHECK-NEXT:    adds x9, x9, x10
-; CHECK-NEXT:    ld4w { z0.s - z3.s }, p0/z, [x12]
-; CHECK-NEXT:    add x12, x2, x8
-; CHECK-NEXT:    ld4w { z4.s - z7.s }, p0/z, [x12]
-; CHECK-NEXT:    add x12, x0, x8
-; CHECK-NEXT:    add x8, x8, x11
-; CHECK-NEXT:    add z16.s, z4.s, z0.s
-; CHECK-NEXT:    sub z17.s, z1.s, z5.s
-; CHECK-NEXT:    movprfx z18, z2
-; CHECK-NEXT:    lsl z18.s, p0/m, z18.s, z6.s
-; CHECK-NEXT:    movprfx z19, z3
-; CHECK-NEXT:    asr z19.s, p0/m, z19.s, z7.s
-; CHECK-NEXT:    st4w { z16.s - z19.s }, p0, [x12]
-; CHECK-NEXT:    b.ne .LBB0_1
-; CHECK-NEXT:  // %bb.2: // %for.cond.cleanup
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call i64 @llvm.vscale.i64()
-  %1 = shl nuw nsw i64 %0, 2
-  br label %vector.body
-
-vector.body:                                      ; preds = %vector.body, %entry
-  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
-  %2 = getelementptr inbounds %struct.xyzt, ptr %a, i64 %index
-  %wide.vec = load <vscale x 16 x i32>, ptr %2, align 4
-  %root.strided.vec = tail call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.experimental.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %wide.vec)
-  %3 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec, 0
-  %4 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec, 1
-  %root.strided.vec55 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %3)
-  %5 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec55, 0
-  %6 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec55, 1
-  %root.strided.vec56 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %4)
-  %7 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec56, 0
-  %8 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec56, 1
-  %9 = getelementptr inbounds %struct.xyzt, ptr %b, i64 %index
-  %wide.vec57 = load <vscale x 16 x i32>, ptr %9, align 4
-  %root.strided.vec58 = tail call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.experimental.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %wide.vec57)
-  %10 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec58, 0
-  %11 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec58, 1
-  %root.strided.vec59 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %10)
-  %12 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec59, 0
-  %13 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec59, 1
-  %root.strided.vec60 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %11)
-  %14 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec60, 0
-  %15 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec60, 1
-  %16 = add nsw <vscale x 4 x i32> %12, %5
-  %17 = sub nsw <vscale x 4 x i32> %7, %14
-  %18 = shl <vscale x 4 x i32> %6, %13
-  %19 = ashr <vscale x 4 x i32> %8, %15
-  %20 = getelementptr inbounds %struct.xyzt, ptr %dst, i64 %index
-  %interleaved.vec = tail call <vscale x 8 x i32> @llvm.experimental.vector.interleave2.nxv8i32(<vscale x 4 x i32> %16, <vscale x 4 x i32> %18)
-  %interleaved.vec61 = tail call <vscale x 8 x i32> @llvm.experimental.vector.interleave2.nxv8i32(<vscale x 4 x i32> %17, <vscale x 4 x i32> %19)
-  %interleaved.vec62 = tail call <vscale x 16 x i32> @llvm.experimental.vector.interleave2.nxv16i32(<vscale x 8 x i32> %interleaved.vec, <vscale x 8 x i32> %interleaved.vec61)
-  store <vscale x 16 x i32> %interleaved.vec62, ptr %20, align 4
-  %index.next = add nuw i64 %index, %1
-  %21 = icmp eq i64 %index.next, 1024
-  br i1 %21, label %for.cond.cleanup, label %vector.body
-
-for.cond.cleanup:                                 ; preds = %vector.body
-  ret void
-}
diff --git a/llvm/test/CodeGen/AArch64/sve-interleave_accesses4-load.ll b/llvm/test/CodeGen/AArch64/sve-interleave_accesses4-load.ll
index dcade71ccb684..8453d811cca4c 100644
--- a/llvm/test/CodeGen/AArch64/sve-interleave_accesses4-load.ll
+++ b/llvm/test/CodeGen/AArch64/sve-interleave_accesses4-load.ll
@@ -7,23 +7,14 @@ define void @interleave(ptr noalias nocapture noundef writeonly %dst, ptr nocapt
 ; CHECK-LABEL: interleave:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ld2w { z1.s, z2.s }, p0/z, [x1]
-; CHECK-NEXT:    ld2w { z3.s, z4.s }, p0/z, [x1, #2, mul vl]
-; CHECK-NEXT:    uzp2 z5.s, z1.s, z3.s
-; CHECK-NEXT:    uzp1 z6.s, z1.s, z3.s
-; CHECK-NEXT:    uzp2 z7.s, z2.s, z4.s
-; CHECK-NEXT:    uzp1 z1.s, z2.s, z4.s
-; CHECK-NEXT:    add z2.s, z0.s, z6.s
-; CHECK-NEXT:    movprfx z3, z5
-; CHECK-NEXT:    lsl z3.s, p0/m, z3.s, z0.s
-; CHECK-NEXT:    sub z1.s, z1.s, z0.s
-; CHECK-NEXT:    asrr z0.s, p0/m, z0.s, z7.s
-; CHECK-NEXT:    zip1 z4.s, z2.s, z3.s
-; CHECK-NEXT:    zip2 z2.s, z2.s, z3.s
-; CHECK-NEXT:    zip1 z5.s, z1.s, z0.s
-; CHECK-NEXT:    zip2 z3.s, z1.s, z0.s
-; CHECK-NEXT:    st2w { z4.s, z5.s }, p0, [x0]
-; CHECK-NEXT:    st2w { z2.s, z3.s }, p0, [x0, #2, mul vl]
+; CHECK-NEXT:    ld4w { z1.s - z4.s }, p0/z, [x1]
+; CHECK-NEXT:    add z24.s, z0.s, z1.s
+; CHECK-NEXT:    sub z26.s, z3.s, z0.s
+; CHECK-NEXT:    movprfx z25, z2
+; CHECK-NEXT:    lsl z25.s, p0/m, z25.s, z0.s
+; CHECK-NEXT:    movprfx z27, z4
+; CHECK-NEXT:    asr z27.s, p0/m, z27.s, z0.s
+; CHECK-NEXT:    st4w { z24.s - z27.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   %wide.vec = load <vscale x 16 x i32>, ptr %a, align 4
   %root.strided.vec = tail call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %wide.vec)
@@ -50,39 +41,22 @@ define void @wide_interleave(ptr noalias nocapture noundef writeonly %dst, ptr n
 ; CHECK-LABEL: wide_interleave:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    ld2w { z2.s, z3.s }, p0/z, [x1]
-; CHECK-NEXT:    ld2w { z4.s, z5.s }, p0/z, [x1, #2, mul vl]
-; CHECK-NEXT:    ld2w { z6.s, z7.s }, p0/z, [x1, #4, mul vl]
-; CHECK-NEXT:    ld2w { z24.s, z25.s }, p0/z, [x1, #6, mul vl]
-; CHECK-NEXT:    uzp2 z26.s, z2.s, z4.s
-; CHECK-NEXT:    uzp1 z27.s, z2.s, z4.s
-; CHECK-NEXT:    uzp2 z28.s, z3.s, z5.s
-; CHECK-NEXT:    uzp1 z2.s, z3.s, z5.s
-; CHECK-NEXT:    add z3.s, z0.s, z27.s
-; CHECK-NEXT:    movprfx z4, z26
-; CHECK-NEXT:    lsl z4.s, p0/m, z4.s, z0.s
-; CHECK-NEXT:    sub z2.s, z2.s, z0.s
-; CHECK-NEXT:    asrr z0.s, p0/m, z0.s, z28.s
-; CHECK-NEXT:    zip1 z26.s, z3.s, z4.s
-; CHECK-NEXT:    zip2 z3.s, z3.s, z4.s
-; CHECK-NEXT:    zip1 z27.s, z2.s, z0.s
-; CHECK-NEXT:    zip2 z4.s, z2.s, z0.s
-; CHECK-NEXT:    uzp2 z0.s, z6.s, z24.s
-; CHECK-NEXT:    uzp1 z2.s, z6.s, z24.s
-; CHECK-NEXT:    st2w { z26.s, z27.s }, p0, [x0]
-; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT:    add z2.s, z1.s, z2.s
-; CHECK-NEXT:    st2w { z3.s, z4.s }, p0, [x0, #2, mul vl]
-; CHECK-NEXT:    uzp2 z3.s, z7.s, z25.s
-; CHECK-NEXT:    uzp1 z4.s, z7.s, z25.s
-; CHECK-NEXT:    zip1 z5.s, z2.s, z0.s
-; CHECK-NEXT:    sub z4.s, z4.s, z1.s
-; CHECK-NEXT:    asrr z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT:    zip2 z2.s, z2.s, z0.s
-; CHECK-NEXT:    zip1 z6.s, z4.s, z1.s
-; CHECK-NEXT:    zip2 z3.s, z4.s, z1.s
-; CHECK-NEXT:    st2w { z5.s, z6.s }, p0, [x0, #4, mul vl]
-; CHECK-NEXT:    st2w { z2.s, z3.s }, p0, [x0, #6, mul vl]
+; CHECK-NEXT:    ld4w { z2.s - z5.s }, p0/z, [x1]
+; CHECK-NEXT:    ld4w { z24.s - z27.s }, p0/z, [x1, #4, mul vl]
+; CHECK-NEXT:    add z28.s, z0.s, z2.s
+; CHECK-NEXT:    sub z30.s, z4.s, z0.s
+; CHECK-NEXT:    movprfx z29, z3
+; CHECK-NEXT:    lsl z29.s, p0/m, z29.s, z0.s
+; CHECK-NEXT:    movprfx z31, z5
+; CHECK-NEXT:    asr z31.s, p0/m, z31.s, z0.s
+; CHECK-NEXT:    add z2.s, z1.s, z24.s
+; CHECK-NEXT:    sub z4.s, z26.s, z1.s
+; CHECK-NEXT:    movprfx z3, z25
+; CHECK-NEXT:    lsl z3.s, p0/m, z3.s, z1.s
+; CHECK-NEXT:    movprfx z5, z27
+; CHECK-NEXT:    asr z5.s, p0/m, z5.s, z1.s
+; CHECK-NEXT:    st4w { z28.s - z31.s }, p0, [x0]
+; CHECK-NEXT:    st4w { z2.s - z5.s }, p0, [x0, #4, mul vl]
 ; CHECK-NEXT:    ret
   %wide.vec = load <vscale x 32 x i32>, ptr %a, align 4
   %root.strided.vec = tail call { <vscale x 16 x i32>, <vscale x 16 x i32> } @llvm.vector.deinterleave2.nxv32i32(<vscale x 32 x i32> %wide.vec)
diff --git a/llvm/test/CodeGen/RISCV/rvv/sve-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/sve-deinterleave-load.ll
deleted file mode 100644
index 2ea14b13265c6..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/sve-deinterleave-load.ll
+++ /dev/null
@@ -1,74 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh | FileCheck %s
-
-%struct.xyzt = type { i32, i32, i32, i32 }
-
-define dso_local void @loop_xyzt(ptr noalias nocapture noundef writeonly %dst, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b) {
-; CHECK-LABEL: loop_xyzt:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    csrr a4, vlenb
-; CHECK-NEXT:    srli a3, a4, 1
-; CHECK-NEXT:    slli a4, a4, 3
-; CHECK-NEXT:    li a5, 1024
-; CHECK-NEXT:    vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT:  .LBB0_1: # %vector.body
-; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vlseg4e32.v v8, (a1)
-; CHECK-NEXT:    vlseg4e32.v v16, (a2)
-; CHECK-NEXT:    vadd.vv v8, v16, v8
-; CHECK-NEXT:    vsub.vv v10, v10, v18
-; CHECK-NEXT:    vsll.vv v12, v12, v20
-; CHECK-NEXT:    vsra.vv v14, v14, v22
-; CHECK-NEXT:    vsseg4e32.v v8, (a0)
-; CHECK-NEXT:    sub a5, a5, a3
-; CHECK-NEXT:    add a0, a0, a4
-; CHECK-NEXT:    add a2, a2, a4
-; CHECK-NEXT:    add a1, a1, a4
-; CHECK-NEXT:    bnez a5, .LBB0_1
-; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call i64 @llvm.vscale.i64()
-  %1 = shl nuw nsw i64 %0, 2
-  br label %vector.body
-
-vector.body:                                      ; preds = %vector.body, %entry
-  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
-  %2 = getelementptr inbounds %struct.xyzt, ptr %a, i64 %index
-  %wide.vec = load <vscale x 16 x i32>, ptr %2, align 4
-  %root.strided.vec = tail call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.experimental.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %wide.vec)
-  %3 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec, 0
-  %4 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec, 1
-  %root.strided.vec55 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %3)
-  %5 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec55, 0
-  %6 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec55, 1
-  %root.strided.vec56 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %4)
-  %7 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec56, 0
-  %8 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec56, 1
-  %9 = getelementptr inbounds %struct.xyzt, ptr %b, i64 %index
-  %wide.vec57 = load <vscale x 16 x i32>, ptr %9, align 4
-  %root.strided.vec58 = tail call { <vscale x 8 x i32>, <vscale x 8 x i32> } @llvm.experimental.vector.deinterleave2.nxv16i32(<vscale x 16 x i32> %wide.vec57)
-  %10 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec58, 0
-  %11 = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i32> } %root.strided.vec58, 1
-  %root.strided.vec59 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %10)
-  %12 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec59, 0
-  %13 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec59, 1
-  %root.strided.vec60 = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> %11)
-  %14 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec60, 0
-  %15 = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } %root.strided.vec60, 1
-  %16 = add nsw <vscale x 4 x i32> %12, %5
-  %17 = sub nsw <vscale x 4 x i32> %7, %14
-  %18 = shl <vscale x 4 x i32> %6, %13
-  %19 = ashr <vscale x 4 x i32> %8, %15
-  %20 = getelementptr inbounds %struct.xyzt, ptr %dst, i64 %index
-  %interleaved.vec = tail call <vscale x 8 x i32> @llvm.experimental.vector.interleave2.nxv8i32(<vscale x 4 x i32> %16, <vscale x 4 x i32> %18)
-  %interleaved.vec61 = tail call <vscale x 8 x i32> @llvm.experimental.vector.interleave2.nxv8i32(<vscale x 4 x i32> %17, <vscale x 4 x i32> %19)
-  %interleaved.vec62 = tail call <vscale x 16 x i32> @llvm.experimental.vector.interleave2.nxv16i32(<vscale x 8 x i32> %interleaved.vec, <vscale x 8 x i32> %interleaved.vec61)
-  store <vscale x 16 x i32> %interleaved.vec62, ptr %20, align 4
-  %index.next = add nuw i64 %index, %1
-  %21 = icmp eq i64 %index.next, 1024
-  br i1 %21, label %for.cond.cleanup, label %vector.body
-
-for.cond.cleanup:                                 ; preds = %vector.body
-  ret void
-}



More information about the llvm-commits mailing list