[llvm] [Machine-Combiner] Add a pass to reassociate chains of accumulation instructions into a tree (PR #132728)

Jonathan Cohen via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 24 08:45:50 PDT 2025


https://github.com/jcohen-apple updated https://github.com/llvm/llvm-project/pull/132728

>From 133205fc5682305f996db7a607850edb90dd4291 Mon Sep 17 00:00:00 2001
From: Jonathan Cohen <joncoh at apple.com>
Date: Wed, 19 Mar 2025 16:27:00 +0200
Subject: [PATCH] [Machine-Combiner] Add a pass to reassociate chains of
 accumulation instructions into a tree

This pass is designed to increase ILP by performing accumulation into multiple registers. It currently supports only the S/UABAL accumulation instruction, but can be extended to support additional instructions.
---
 .../llvm/CodeGen/MachineCombinerPattern.h     |   1 +
 llvm/include/llvm/CodeGen/TargetInstrInfo.h   |  35 ++
 llvm/lib/CodeGen/TargetInstrInfo.cpp          | 270 ++++++++-
 llvm/lib/Target/AArch64/AArch64InstrInfo.cpp  | 186 +++++-
 llvm/lib/Target/AArch64/AArch64InstrInfo.h    |  16 +-
 .../aarch64-reassociate-accumulators-sve.ll   | 426 ++++++++++++++
 .../aarch64-reassociate-accumulators.ll       | 530 ++++++++++++++++++
 ...hine-combiner-reassociate-accumulators.mir | 181 ++++++
 8 files changed, 1631 insertions(+), 14 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators-sve.ll
 create mode 100644 llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll
 create mode 100644 llvm/test/CodeGen/AArch64/machine-combiner-reassociate-accumulators.mir

diff --git a/llvm/include/llvm/CodeGen/MachineCombinerPattern.h b/llvm/include/llvm/CodeGen/MachineCombinerPattern.h
index 3428c4dde5c7f..25fce679323ee 100644
--- a/llvm/include/llvm/CodeGen/MachineCombinerPattern.h
+++ b/llvm/include/llvm/CodeGen/MachineCombinerPattern.h
@@ -32,6 +32,7 @@ enum MachineCombinerPattern : unsigned {
   REASSOC_AX_YB,
   REASSOC_XA_BY,
   REASSOC_XA_YB,
+  ACC_CHAIN,
 
   TARGET_PATTERN_START
 };
diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index 8f2792c1cb7d5..1a8be37f321ae 100644
--- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -1276,6 +1276,41 @@ class TargetInstrInfo : public MCInstrInfo {
     return false;
   }
 
+  /// Find chains of accumulations that can be rewritten as a tree for increased
+  /// ILP.
+  bool getAccumulatorReassociationPatterns(
+      MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const;
+
+  /// Find the chain of accumulator instructions in \P MBB and return them in
+  /// \P Chain.
+  void getAccumulatorChain(MachineInstr *CurrentInstr,
+                           SmallVectorImpl<Register> &Chain) const;
+
+  /// Return true when \P OpCode is an instruction which performs
+  /// accumulation into one of its operand registers.
+  virtual bool isAccumulationOpcode(unsigned Opcode) const { return false; }
+
+  /// Returns an opcode which defines the accumulator used by \P Opcode.
+  virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const {
+    llvm_unreachable("Function not implemented for target!");
+    return 0;
+  }
+
+  /// Returns the opcode that should be use to reduce accumulation registers.
+  virtual unsigned
+  getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const {
+    llvm_unreachable("Function not implemented for target!");
+    return 0;
+  }
+
+  /// Reduces branches of the accumulator tree into a single register.
+  void reduceAccumulatorTree(SmallVectorImpl<Register> &RegistersToReduce,
+                             SmallVectorImpl<MachineInstr *> &InsInstrs,
+                             MachineFunction &MF, MachineInstr &Root,
+                             MachineRegisterInfo &MRI,
+                             DenseMap<Register, unsigned> &InstrIdxForVirtReg,
+                             Register ResultReg) const;
+
   /// Return the inverse operation opcode if it exists for \P Opcode (e.g. add
   /// for sub and vice versa).
   virtual std::optional<unsigned> getInverseOpcode(unsigned Opcode) const {
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index e517ae1a7c44c..d077dc189f5ac 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -11,6 +11,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/ADT/SmallSet.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/BinaryFormat/Dwarf.h"
 #include "llvm/CodeGen/MachineCombinerPattern.h"
@@ -42,6 +43,19 @@ static cl::opt<bool> DisableHazardRecognizer(
   "disable-sched-hazard", cl::Hidden, cl::init(false),
   cl::desc("Disable hazard detection during preRA scheduling"));
 
+static cl::opt<bool> EnableAccReassociation(
+    "acc-reassoc", cl::Hidden, cl::init(true),
+    cl::desc("Enable reassociation of accumulation chains"));
+
+static cl::opt<unsigned int>
+    MinAccumulatorDepth("acc-min-depth", cl::Hidden, cl::init(8),
+                        cl::desc("Minimum length of accumulator chains "
+                                 "required for the optimization to kick in"));
+
+static cl::opt<unsigned int> MaxAccumulatorWidth(
+    "acc-max-width", cl::Hidden, cl::init(3),
+    cl::desc("Maximum number of branches in the accumulator tree"));
+
 TargetInstrInfo::~TargetInstrInfo() = default;
 
 const TargetRegisterClass*
@@ -899,6 +913,154 @@ bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
          hasReassociableSibling(Inst, Commuted);
 }
 
+// Utility routine that checks if \param MO is defined by an
+// \param CombineOpc instruction in the basic block \param MBB.
+// If \param CombineOpc is not provided, the OpCode check will
+// be skipped.
+static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
+                       unsigned CombineOpc = 0) {
+  MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+  MachineInstr *MI = nullptr;
+
+  if (MO.isReg() && MO.getReg().isVirtual())
+    MI = MRI.getUniqueVRegDef(MO.getReg());
+  // And it needs to be in the trace (otherwise, it won't have a depth).
+  if (!MI || MI->getParent() != &MBB ||
+      ((unsigned)MI->getOpcode() != CombineOpc && CombineOpc != 0))
+    return false;
+  // Must only used by the user we combine with.
+  if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
+    return false;
+
+  return true;
+}
+
+// A chain of accumulation instructions will be selected IFF:
+//    1. All the accumulation instructions in the chain have the same opcode,
+//       besides the first that has a slightly different opcode because it does
+//       not accumulate into a register.
+//    2. All the instructions in the chain are combinable (have a single use
+//       which itself is part of the chain).
+//    3. Meets the required minimum length.
+void TargetInstrInfo::getAccumulatorChain(
+    MachineInstr *CurrentInstr, SmallVectorImpl<Register> &Chain) const {
+  // Walk up the chain of accumulation instructions and collect them in the
+  // vector.
+  MachineBasicBlock &MBB = *CurrentInstr->getParent();
+  const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+  unsigned AccumulatorOpcode = CurrentInstr->getOpcode();
+  std::optional<unsigned> ChainStartOpCode =
+      getAccumulationStartOpcode(AccumulatorOpcode);
+
+  if (!ChainStartOpCode.has_value())
+    return;
+
+  // Push the first accumulator result to the start of the chain.
+  Chain.push_back(CurrentInstr->getOperand(0).getReg());
+
+  // Collect the accumulator input register from all instructions in the chain.
+  while (CurrentInstr &&
+         canCombine(MBB, CurrentInstr->getOperand(1), AccumulatorOpcode)) {
+    Chain.push_back(CurrentInstr->getOperand(1).getReg());
+    CurrentInstr = MRI.getUniqueVRegDef(CurrentInstr->getOperand(1).getReg());
+  }
+
+  // Add the instruction at the top of the chain.
+  if (CurrentInstr->getOpcode() == AccumulatorOpcode &&
+      canCombine(MBB, CurrentInstr->getOperand(1)))
+    Chain.push_back(CurrentInstr->getOperand(1).getReg());
+}
+
+/// Find chains of accumulations that can be rewritten as a tree for increased
+/// ILP.
+bool TargetInstrInfo::getAccumulatorReassociationPatterns(
+    MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const {
+  if (!EnableAccReassociation)
+    return false;
+
+  unsigned Opc = Root.getOpcode();
+  if (!isAccumulationOpcode(Opc))
+    return false;
+
+  // Verify that this is the end of the chain.
+  MachineBasicBlock &MBB = *Root.getParent();
+  MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+  if (!MRI.hasOneNonDBGUser(Root.getOperand(0).getReg()))
+    return false;
+
+  auto User = MRI.use_instr_begin(Root.getOperand(0).getReg());
+  if (User->getOpcode() == Opc)
+    return false;
+
+  // Walk up the use chain and collect the reduction chain.
+  SmallVector<Register, 32> Chain;
+  getAccumulatorChain(&Root, Chain);
+
+  // Reject chains which are too short to be worth modifying.
+  if (Chain.size() < MinAccumulatorDepth)
+    return false;
+
+  // Check if the MBB this instruction is a part of contains any other chains.
+  // If so, don't apply it.
+  SmallSet<Register, 32> ReductionChain(Chain.begin(), Chain.end());
+  for (const auto &I : MBB) {
+    if (I.getOpcode() == Opc &&
+        !ReductionChain.contains(I.getOperand(0).getReg()))
+      return false;
+  }
+
+  Patterns.push_back(MachineCombinerPattern::ACC_CHAIN);
+  return true;
+}
+
+// Reduce branches of the accumulator tree by adding them together.
+void TargetInstrInfo::reduceAccumulatorTree(
+    SmallVectorImpl<Register> &RegistersToReduce,
+    SmallVectorImpl<MachineInstr *> &InsInstrs, MachineFunction &MF,
+    MachineInstr &Root, MachineRegisterInfo &MRI,
+    DenseMap<Register, unsigned> &InstrIdxForVirtReg,
+    Register ResultReg) const {
+  const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+  SmallVector<Register, 8> NewRegs;
+
+  // Get the opcode for the reduction instruction we will need to build.
+  // If for some reason it is not defined, early exit and don't apply this.
+  unsigned ReduceOpCode = getReduceOpcodeForAccumulator(Root.getOpcode());
+
+  for (unsigned int i = 1; i <= (RegistersToReduce.size() / 2); i += 2) {
+    auto RHS = RegistersToReduce[i - 1];
+    auto LHS = RegistersToReduce[i];
+    Register Dest;
+    // If we are reducing 2 registers, reuse the original result register.
+    if (RegistersToReduce.size() == 2)
+      Dest = ResultReg;
+    // Otherwise, create a new virtual register to hold the partial sum.
+    else {
+      auto NewVR = MRI.createVirtualRegister(
+          MRI.getRegClass(Root.getOperand(0).getReg()));
+      Dest = NewVR;
+      NewRegs.push_back(Dest);
+      InstrIdxForVirtReg.insert(std::make_pair(Dest, InsInstrs.size()));
+    }
+
+    // Create the new reduction instruction.
+    MachineInstrBuilder MIB =
+        BuildMI(MF, MIMetadata(Root), TII->get(ReduceOpCode), Dest)
+            .addReg(RHS, getKillRegState(true))
+            .addReg(LHS, getKillRegState(true));
+    // Copy any flags needed from the original instruction.
+    MIB->setFlags(Root.getFlags());
+    InsInstrs.push_back(MIB);
+  }
+
+  // If the number of registers to reduce is odd, add the remaining register to
+  // the vector of registers to reduce.
+  if (RegistersToReduce.size() % 2 != 0)
+    NewRegs.push_back(RegistersToReduce[RegistersToReduce.size() - 1]);
+
+  RegistersToReduce = NewRegs;
+}
+
 // The concept of the reassociation pass is that these operations can benefit
 // from this kind of transformation:
 //
@@ -938,6 +1100,8 @@ bool TargetInstrInfo::getMachineCombinerPatterns(
     }
     return true;
   }
+  if (getAccumulatorReassociationPatterns(Root, Patterns))
+    return true;
 
   return false;
 }
@@ -949,7 +1113,12 @@ bool TargetInstrInfo::isThroughputPattern(unsigned Pattern) const {
 
 CombinerObjective
 TargetInstrInfo::getCombinerObjective(unsigned Pattern) const {
-  return CombinerObjective::Default;
+  switch (Pattern) {
+  case MachineCombinerPattern::ACC_CHAIN:
+    return CombinerObjective::MustReduceDepth;
+  default:
+    return CombinerObjective::Default;
+  }
 }
 
 std::pair<unsigned, unsigned>
@@ -1252,19 +1421,98 @@ void TargetInstrInfo::genAlternativeCodeSequence(
     SmallVectorImpl<MachineInstr *> &DelInstrs,
     DenseMap<Register, unsigned> &InstIdxForVirtReg) const {
   MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
+  MachineBasicBlock &MBB = *Root.getParent();
+  MachineFunction &MF = *MBB.getParent();
+  const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
 
-  // Select the previous instruction in the sequence based on the input pattern.
-  std::array<unsigned, 5> OperandIndices;
-  getReassociateOperandIndices(Root, Pattern, OperandIndices);
-  MachineInstr *Prev =
-      MRI.getUniqueVRegDef(Root.getOperand(OperandIndices[0]).getReg());
+  switch (Pattern) {
+  case MachineCombinerPattern::REASSOC_AX_BY:
+  case MachineCombinerPattern::REASSOC_AX_YB:
+  case MachineCombinerPattern::REASSOC_XA_BY:
+  case MachineCombinerPattern::REASSOC_XA_YB: {
+    // Select the previous instruction in the sequence based on the input
+    // pattern.
+    std::array<unsigned, 5> OperandIndices;
+    getReassociateOperandIndices(Root, Pattern, OperandIndices);
+    MachineInstr *Prev =
+        MRI.getUniqueVRegDef(Root.getOperand(OperandIndices[0]).getReg());
+
+    // Don't reassociate if Prev and Root are in different blocks.
+    if (Prev->getParent() != Root.getParent())
+      return;
 
-  // Don't reassociate if Prev and Root are in different blocks.
-  if (Prev->getParent() != Root.getParent())
-    return;
+    reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, OperandIndices,
+                   InstIdxForVirtReg);
+    break;
+  }
+  case MachineCombinerPattern::ACC_CHAIN: {
+    SmallVector<Register, 32> ChainRegs;
+    getAccumulatorChain(&Root, ChainRegs);
+    unsigned int Depth = ChainRegs.size();
+    assert(MaxAccumulatorWidth > 1 &&
+           "Max accumulator width set to illegal value");
+    unsigned int MaxWidth = Log2_32(Depth) < MaxAccumulatorWidth
+                                ? Log2_32(Depth)
+                                : MaxAccumulatorWidth;
+
+    // Walk down the chain and rewrite it as a tree.
+    for (auto IndexedReg : llvm::enumerate(llvm::reverse(ChainRegs))) {
+      // No need to rewrite the first node, it is already perfect as it is.
+      if (IndexedReg.index() == 0)
+        continue;
+
+      MachineInstr *Instr = MRI.getUniqueVRegDef(IndexedReg.value());
+      MachineInstrBuilder MIB;
+      Register AccReg;
+      if (IndexedReg.index() < MaxWidth) {
+        // Now we need to create new instructions for the first row.
+        AccReg = Instr->getOperand(0).getReg();
+        unsigned OpCode = getAccumulationStartOpcode(Root.getOpcode());
+
+        MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(OpCode), AccReg)
+                  .addReg(Instr->getOperand(2).getReg(),
+                          getKillRegState(Instr->getOperand(2).isKill()))
+                  .addReg(Instr->getOperand(3).getReg(),
+                          getKillRegState(Instr->getOperand(3).isKill()));
+      } else {
+        // For the remaining cases, we need to use an output register of one of
+        // the newly inserted instuctions as operand 1
+        AccReg = Instr->getOperand(0).getReg() == Root.getOperand(0).getReg()
+                     ? MRI.createVirtualRegister(
+                           MRI.getRegClass(Root.getOperand(0).getReg()))
+                     : Instr->getOperand(0).getReg();
+        assert(IndexedReg.index() >= MaxWidth);
+        auto AccumulatorInput =
+            ChainRegs[Depth - (IndexedReg.index() - MaxWidth) - 1];
+        MIB = BuildMI(MF, MIMetadata(*Instr), TII->get(Instr->getOpcode()),
+                      AccReg)
+                  .addReg(AccumulatorInput, getKillRegState(true))
+                  .addReg(Instr->getOperand(2).getReg(),
+                          getKillRegState(Instr->getOperand(2).isKill()))
+                  .addReg(Instr->getOperand(3).getReg(),
+                          getKillRegState(Instr->getOperand(3).isKill()));
+      }
 
-  reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, OperandIndices,
-                 InstIdxForVirtReg);
+      MIB->setFlags(Instr->getFlags());
+      InstIdxForVirtReg.insert(std::make_pair(AccReg, InsInstrs.size()));
+      InsInstrs.push_back(MIB);
+      DelInstrs.push_back(Instr);
+    }
+
+    SmallVector<Register, 8> RegistersToReduce;
+    for (unsigned i = (InsInstrs.size() - MaxWidth); i < InsInstrs.size();
+         ++i) {
+      auto Reg = InsInstrs[i]->getOperand(0).getReg();
+      RegistersToReduce.push_back(Reg);
+    }
+
+    while (RegistersToReduce.size() > 1)
+      reduceAccumulatorTree(RegistersToReduce, InsInstrs, MF, Root, MRI,
+                            InstIdxForVirtReg, Root.getOperand(0).getReg());
+
+    break;
+  }
+  }
 }
 
 MachineTraceStrategy TargetInstrInfo::getMachineCombinerTraceStrategy() const {
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 9f8082b64ab18..d370f8c7ff6ea 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -53,6 +53,7 @@
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetOptions.h"
 #include <cassert>
+#include <cstddef>
 #include <cstdint>
 #include <iterator>
 #include <utility>
@@ -6770,6 +6771,133 @@ static bool getMaddPatterns(MachineInstr &Root,
   }
   return Found;
 }
+
+bool AArch64InstrInfo::isAccumulationOpcode(unsigned Opcode) const {
+  switch (Opcode) {
+  default:
+    break;
+  case AArch64::UABALB_ZZZ_D:
+  case AArch64::UABALB_ZZZ_H:
+  case AArch64::UABALB_ZZZ_S:
+  case AArch64::UABALT_ZZZ_D:
+  case AArch64::UABALT_ZZZ_H:
+  case AArch64::UABALT_ZZZ_S:
+  case AArch64::SABALB_ZZZ_D:
+  case AArch64::SABALB_ZZZ_S:
+  case AArch64::SABALB_ZZZ_H:
+  case AArch64::SABALT_ZZZ_D:
+  case AArch64::SABALT_ZZZ_S:
+  case AArch64::SABALT_ZZZ_H:
+  case AArch64::UABALv16i8_v8i16:
+  case AArch64::UABALv2i32_v2i64:
+  case AArch64::UABALv4i16_v4i32:
+  case AArch64::UABALv4i32_v2i64:
+  case AArch64::UABALv8i16_v4i32:
+  case AArch64::UABALv8i8_v8i16:
+  case AArch64::UABAv16i8:
+  case AArch64::UABAv2i32:
+  case AArch64::UABAv4i16:
+  case AArch64::UABAv4i32:
+  case AArch64::UABAv8i16:
+  case AArch64::UABAv8i8:
+  case AArch64::SABALv16i8_v8i16:
+  case AArch64::SABALv2i32_v2i64:
+  case AArch64::SABALv4i16_v4i32:
+  case AArch64::SABALv4i32_v2i64:
+  case AArch64::SABALv8i16_v4i32:
+  case AArch64::SABALv8i8_v8i16:
+  case AArch64::SABAv16i8:
+  case AArch64::SABAv2i32:
+  case AArch64::SABAv4i16:
+  case AArch64::SABAv4i32:
+  case AArch64::SABAv8i16:
+  case AArch64::SABAv8i8:
+    return true;
+  }
+
+  return false;
+}
+
+unsigned AArch64InstrInfo::getAccumulationStartOpcode(
+    unsigned AccumulationOpcode) const {
+  switch (AccumulationOpcode) {
+  default:
+    llvm_unreachable("Unsupported accumulation Opcode!");
+  case AArch64::UABALB_ZZZ_D:
+    return AArch64::UABDLB_ZZZ_D;
+  case AArch64::UABALB_ZZZ_H:
+    return AArch64::UABDLB_ZZZ_H;
+  case AArch64::UABALB_ZZZ_S:
+    return AArch64::UABDLB_ZZZ_S;
+  case AArch64::UABALT_ZZZ_D:
+    return AArch64::UABDLT_ZZZ_D;
+  case AArch64::UABALT_ZZZ_H:
+    return AArch64::UABDLT_ZZZ_H;
+  case AArch64::UABALT_ZZZ_S:
+    return AArch64::UABDLT_ZZZ_S;
+  case AArch64::UABALv16i8_v8i16:
+    return AArch64::UABDLv16i8_v8i16;
+  case AArch64::UABALv2i32_v2i64:
+    return AArch64::UABDLv2i32_v2i64;
+  case AArch64::UABALv4i16_v4i32:
+    return AArch64::UABDLv4i16_v4i32;
+  case AArch64::UABALv4i32_v2i64:
+    return AArch64::UABDLv4i32_v2i64;
+  case AArch64::UABALv8i16_v4i32:
+    return AArch64::UABDLv8i16_v4i32;
+  case AArch64::UABALv8i8_v8i16:
+    return AArch64::UABDLv8i8_v8i16;
+  case AArch64::UABAv16i8:
+    return AArch64::UABDv16i8;
+  case AArch64::UABAv2i32:
+    return AArch64::UABDv2i32;
+  case AArch64::UABAv4i16:
+    return AArch64::UABDv4i16;
+  case AArch64::UABAv4i32:
+    return AArch64::UABDv4i32;
+  case AArch64::UABAv8i16:
+    return AArch64::UABDv8i16;
+  case AArch64::UABAv8i8:
+    return AArch64::UABDv8i8;
+  case AArch64::SABALB_ZZZ_D:
+    return AArch64::SABDLB_ZZZ_D;
+  case AArch64::SABALB_ZZZ_S:
+    return AArch64::SABDLB_ZZZ_S;
+  case AArch64::SABALB_ZZZ_H:
+    return AArch64::SABDLB_ZZZ_H;
+  case AArch64::SABALT_ZZZ_D:
+    return AArch64::SABDLT_ZZZ_D;
+  case AArch64::SABALT_ZZZ_S:
+    return AArch64::SABDLT_ZZZ_S;
+  case AArch64::SABALT_ZZZ_H:
+    return AArch64::SABDLT_ZZZ_H;
+  case AArch64::SABALv16i8_v8i16:
+    return AArch64::SABDLv16i8_v8i16;
+  case AArch64::SABALv2i32_v2i64:
+    return AArch64::SABDLv2i32_v2i64;
+  case AArch64::SABALv4i16_v4i32:
+    return AArch64::SABDLv4i16_v4i32;
+  case AArch64::SABALv4i32_v2i64:
+    return AArch64::SABDLv4i32_v2i64;
+  case AArch64::SABALv8i16_v4i32:
+    return AArch64::SABDLv8i16_v4i32;
+  case AArch64::SABALv8i8_v8i16:
+    return AArch64::SABDLv8i8_v8i16;
+  case AArch64::SABAv16i8:
+    return AArch64::SABDv16i8;
+  case AArch64::SABAv2i32:
+    return AArch64::SABAv2i32;
+  case AArch64::SABAv4i16:
+    return AArch64::SABDv4i16;
+  case AArch64::SABAv4i32:
+    return AArch64::SABDv4i32;
+  case AArch64::SABAv8i16:
+    return AArch64::SABDv8i16;
+  case AArch64::SABAv8i8:
+    return AArch64::SABDv8i8;
+  }
+}
+
 /// Floating-Point Support
 
 /// Find instructions that can be turned into madd.
@@ -7531,6 +7659,63 @@ static void genSubAdd2SubSub(MachineFunction &MF, MachineRegisterInfo &MRI,
   DelInstrs.push_back(&Root);
 }
 
+unsigned AArch64InstrInfo::getReduceOpcodeForAccumulator(
+    unsigned int AccumulatorOpCode) const {
+  switch (AccumulatorOpCode) {
+  case AArch64::UABALB_ZZZ_D:
+  case AArch64::SABALB_ZZZ_D:
+  case AArch64::UABALT_ZZZ_D:
+  case AArch64::SABALT_ZZZ_D:
+    return AArch64::ADD_ZZZ_D;
+  case AArch64::UABALB_ZZZ_H:
+  case AArch64::SABALB_ZZZ_H:
+  case AArch64::UABALT_ZZZ_H:
+  case AArch64::SABALT_ZZZ_H:
+    return AArch64::ADD_ZZZ_H;
+  case AArch64::UABALB_ZZZ_S:
+  case AArch64::SABALB_ZZZ_S:
+  case AArch64::UABALT_ZZZ_S:
+  case AArch64::SABALT_ZZZ_S:
+    return AArch64::ADD_ZZZ_S;
+  case AArch64::UABALv16i8_v8i16:
+  case AArch64::SABALv8i8_v8i16:
+  case AArch64::SABAv8i16:
+  case AArch64::UABAv8i16:
+    return AArch64::ADDv8i16;
+  case AArch64::SABALv2i32_v2i64:
+  case AArch64::UABALv2i32_v2i64:
+  case AArch64::SABALv4i32_v2i64:
+    return AArch64::ADDv2i64;
+  case AArch64::UABALv4i16_v4i32:
+  case AArch64::SABALv4i16_v4i32:
+  case AArch64::SABALv8i16_v4i32:
+  case AArch64::SABAv4i32:
+  case AArch64::UABAv4i32:
+    return AArch64::ADDv4i32;
+  case AArch64::UABALv4i32_v2i64:
+    return AArch64::ADDv2i64;
+  case AArch64::UABALv8i16_v4i32:
+    return AArch64::ADDv4i32;
+  case AArch64::UABALv8i8_v8i16:
+  case AArch64::SABALv16i8_v8i16:
+    return AArch64::ADDv8i16;
+  case AArch64::UABAv16i8:
+  case AArch64::SABAv16i8:
+    return AArch64::ADDv16i8;
+  case AArch64::UABAv4i16:
+  case AArch64::SABAv4i16:
+    return AArch64::ADDv4i16;
+  case AArch64::UABAv2i32:
+  case AArch64::SABAv2i32:
+    return AArch64::ADDv2i32;
+  case AArch64::UABAv8i8:
+  case AArch64::SABAv8i8:
+    return AArch64::ADDv8i8;
+  default:
+    llvm_unreachable("Unknown accumulator opcode");
+  }
+}
+
 /// When getMachineCombinerPatterns() finds potential patterns,
 /// this function generates the instructions that could replace the
 /// original code sequence
@@ -7766,7 +7951,6 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
     MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
     break;
   }
-
   case AArch64MachineCombinerPattern::MULADDv8i8_OP1:
     Opc = AArch64::MLAv8i8;
     RC = &AArch64::FPR64RegClass;
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index 6503d105c82cc..b3d3ec1455c8b 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -448,8 +448,20 @@ class AArch64InstrInfo final : public AArch64GenInstrInfo {
   /// be checked.
   bool isAssociativeAndCommutative(const MachineInstr &Inst,
                                    bool Invert) const override;
-  /// When getMachineCombinerPatterns() finds patterns, this function generates
-  /// the instructions that could replace the original code sequence
+
+  /// Returns true if \P Opcode is an instruction which performs accumulation
+  /// into a destination register.
+  bool isAccumulationOpcode(unsigned Opcode) const override;
+
+  /// Returns an opcode which defines the accumulator used by \P Opcode.
+  unsigned getAccumulationStartOpcode(unsigned Opcode) const override;
+
+  unsigned
+  getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const override;
+
+  /// When getMachineCombinerPatterns() finds patterns, this function
+  /// generates the instructions that could replace the original code
+  /// sequence
   void genAlternativeCodeSequence(
       MachineInstr &Root, unsigned Pattern,
       SmallVectorImpl<MachineInstr *> &InsInstrs,
diff --git a/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators-sve.ll b/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators-sve.ll
new file mode 100644
index 0000000000000..0d4c053551011
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators-sve.ll
@@ -0,0 +1,426 @@
+; RUN: opt -passes=loop-unroll %s -o - | llc -O3 - -mtriple=aarch64-unknown-unknown -mcpu=neoverse-v2 -o - | FileCheck %s
+
+define i64 @sabalb_i32_to_i64_accumulation(ptr %ptr1, ptr %ptr2) {
+  ; CHECK-LABEL: sabalb_i32_to_i64_accumulation
+entry:
+  br label %loop
+loop:
+; CHECK: sabdlb
+; CHECK: sabalb z0.d
+; CHECK: sabalb z1.d
+; CHECK: sabalb z2.d
+; CHECK: add	z0.d, z2.d, z0.d
+; CHECK: add	z0.d, z0.d, z1.d
+; CHECK: uaddv	d0, p0, z0.d
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <vscale x 2 x i64> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
+  %a = load <vscale x 4 x i32>, ptr %ptr1_i, align 1
+  %b = load <vscale x 4 x i32>, ptr %ptr2_i, align 1
+  %acc_next = call <vscale x 2 x i64> @llvm.aarch64.sve.sabalb.nxv2i64(<vscale x 2 x i64> %acc_phi,
+                                                                       <vscale x 4 x i32> %a,
+                                                                       <vscale x 4 x i32> %b)
+  
+  %next_i = add i32 %i, 4
+  %cmp = icmp slt i32 %next_i, 64
+  br i1 %cmp, label %loop, label %exit
+exit:
+  %reduce = tail call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %acc_next)
+  ret i64 %reduce
+}
+
+declare <vscale x  2 x i64> @llvm.aarch64.sve.sabalb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64>)
+
+define i32 @sabalb_i16_to_i32_accumulation(ptr %ptr1, ptr %ptr2) {
+  ; CHECK-LABEL: sabalb_i16_to_i32_accumulation
+entry:
+  br label %loop
+loop:
+; CHECK: sabdlb
+; CHECK: sabalb z0.s
+; CHECK: sabalb z1.s
+; CHECK: sabalb z2.s
+; CHECK: add	z0.s, z2.s, z0.s
+; CHECK: add	z0.s, z0.s, z1.s
+; CHECK: uaddv	d0, p0, z0.s
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <vscale x 4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
+  %a = load <vscale x 8 x i16>, ptr %ptr1_i, align 1
+  %b = load <vscale x 8 x i16>, ptr %ptr2_i, align 1
+  %acc_next = call <vscale x 4 x i32> @llvm.aarch64.sve.sabalb.nxv4i32(<vscale x 4 x i32> %acc_phi,
+                                                                       <vscale x 8 x i16> %a,
+                                                                       <vscale x 8 x i16> %b)
+  
+  %next_i = add i32 %i, 8
+  %cmp = icmp slt i32 %next_i, 128
+  br i1 %cmp, label %loop, label %exit
+exit:
+  %reduce = tail call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %acc_next)
+  ret i32 %reduce
+}
+
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sabalb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32>)
+
+define i16 @sabalb_i8_to_i16_accumulation(ptr %ptr1, ptr %ptr2) {
+  ; CHECK-LABEL: sabalb_i8_to_i16_accumulation
+entry:
+  br label %loop
+loop:
+; CHECK: sabdlb
+; CHECK: sabalb z0.h
+; CHECK: sabalb z1.h
+; CHECK: sabalb z2.h
+; CHECK: add	z0.h, z2.h, z0.h
+; CHECK: add	z0.h, z0.h, z1.h
+; CHECK: uaddv	d0, p0, z0.h
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <vscale x 8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
+  %a = load <vscale x 16 x i8>, ptr %ptr1_i, align 1
+  %b = load <vscale x 16 x i8>, ptr %ptr2_i, align 1
+  %acc_next = call <vscale x 8 x i16> @llvm.aarch64.sve.sabalb.nxv8i16(<vscale x 8 x i16> %acc_phi,
+                                                                       <vscale x 16 x i8> %a,
+                                                                       <vscale x 16 x i8> %b)
+  
+  %next_i = add i32 %i, 16
+  %cmp = icmp slt i32 %next_i, 256
+  br i1 %cmp, label %loop, label %exit
+exit:
+  %reduce = tail call i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16> %acc_next)
+  ret i16 %reduce
+}
+
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sabalb.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16>)
+
+define i64 @sabalt_i32_to_i64_accumulation(ptr %ptr1, ptr %ptr2) {
+  ; CHECK-LABEL: sabalt_i32_to_i64_accumulation
+entry:
+  br label %loop
+loop:
+; CHECK: sabdlt
+; CHECK: sabalt z0.d
+; CHECK: sabalt z1.d
+; CHECK: sabalt z2.d
+; CHECK: add	z0.d, z2.d, z0.d
+; CHECK: add	z0.d, z0.d, z1.d
+; CHECK: uaddv	d0, p0, z0.d
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <vscale x 2 x i64> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
+  %a = load <vscale x 4 x i32>, ptr %ptr1_i, align 1
+  %b = load <vscale x 4 x i32>, ptr %ptr2_i, align 1
+  %acc_next = call <vscale x 2 x i64> @llvm.aarch64.sve.sabalt.nxv2i64(<vscale x 2 x i64> %acc_phi,
+                                                                       <vscale x 4 x i32> %a,
+                                                                       <vscale x 4 x i32> %b)
+  
+  %next_i = add i32 %i, 4
+  %cmp = icmp slt i32 %next_i, 64
+  br i1 %cmp, label %loop, label %exit
+exit:
+  %reduce = tail call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %acc_next)
+  ret i64 %reduce
+}
+
+declare <vscale x  2 x i64> @llvm.aarch64.sve.sabalt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+
+define i32 @sabalt_i16_to_i32_accumulation(ptr %ptr1, ptr %ptr2) {
+  ; CHECK-LABEL: sabalt_i16_to_i32_accumulation
+entry:
+  br label %loop
+loop:
+; CHECK: sabdlt
+; CHECK: sabalt z0.s
+; CHECK: sabalt z1.s
+; CHECK: sabalt z2.s
+; CHECK: add	z0.s, z2.s, z0.s
+; CHECK: add	z0.s, z0.s, z1.s
+; CHECK: uaddv	d0, p0, z0.s
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <vscale x 4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
+  %a = load <vscale x 8 x i16>, ptr %ptr1_i, align 1
+  %b = load <vscale x 8 x i16>, ptr %ptr2_i, align 1
+  %acc_next = call <vscale x 4 x i32> @llvm.aarch64.sve.sabalt.nxv4i32(<vscale x 4 x i32> %acc_phi,
+                                                                       <vscale x 8 x i16> %a,
+                                                                       <vscale x 8 x i16> %b)
+  
+  %next_i = add i32 %i, 8
+  %cmp = icmp slt i32 %next_i, 128
+  br i1 %cmp, label %loop, label %exit
+exit:
+  %reduce = tail call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %acc_next)
+  ret i32 %reduce
+}
+
+declare <vscale x 4 x i32> @llvm.aarch64.sve.sabalt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+
+define i16 @sabalt_i8_to_i16_accumulation(ptr %ptr1, ptr %ptr2) {
+  ; CHECK-LABEL: sabalt_i8_to_i16_accumulation
+entry:
+  br label %loop
+loop:
+; CHECK: sabdlt
+; CHECK: sabalt z0.h
+; CHECK: sabalt z1.h
+; CHECK: sabalt z2.h
+; CHECK: add	z0.h, z2.h, z0.h
+; CHECK: add	z0.h, z0.h, z1.h
+; CHECK: uaddv	d0, p0, z0.h
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <vscale x 8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
+  %a = load <vscale x 16 x i8>, ptr %ptr1_i, align 1
+  %b = load <vscale x 16 x i8>, ptr %ptr2_i, align 1
+  %acc_next = call <vscale x 8 x i16> @llvm.aarch64.sve.sabalt.nxv8i16(<vscale x 8 x i16> %acc_phi,
+                                                                       <vscale x 16 x i8> %a,
+                                                                       <vscale x 16 x i8> %b)
+  
+  %next_i = add i32 %i, 16
+  %cmp = icmp slt i32 %next_i, 256
+  br i1 %cmp, label %loop, label %exit
+exit:
+  %reduce = tail call i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16> %acc_next)
+  ret i16 %reduce
+}
+
+declare <vscale x 8 x i16> @llvm.aarch64.sve.sabalt.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+
+define i64 @uabalb_i32_to_i64_accumulation(ptr %ptr1, ptr %ptr2) {
+  ; CHECK-LABEL: uabalb_i32_to_i64_accumulation
+entry:
+  br label %loop
+loop:
+; CHECK: uabdlb
+; CHECK: uabalb z0.d
+; CHECK: uabalb z1.d
+; CHECK: uabalb z2.d
+; CHECK: add	z0.d, z2.d, z0.d
+; CHECK: add	z0.d, z0.d, z1.d
+; CHECK: uaddv	d0, p0, z0.d
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <vscale x 2 x i64> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
+  %a = load <vscale x 4 x i32>, ptr %ptr1_i, align 1
+  %b = load <vscale x 4 x i32>, ptr %ptr2_i, align 1
+  %acc_next = call <vscale x 2 x i64> @llvm.aarch64.sve.uabalb.nxv2i64(<vscale x 2 x i64> %acc_phi,
+                                                                       <vscale x 4 x i32> %a,
+                                                                       <vscale x 4 x i32> %b)
+  
+  %next_i = add i32 %i, 4
+  %cmp = icmp slt i32 %next_i, 64
+  br i1 %cmp, label %loop, label %exit
+exit:
+  %reduce = tail call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %acc_next)
+  ret i64 %reduce
+}
+
+declare <vscale x  2 x i64> @llvm.aarch64.sve.uabalb.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+
+define i32 @uabalb_i16_to_i32_accumulation(ptr %ptr1, ptr %ptr2) {
+  ; CHECK-LABEL: uabalb_i16_to_i32_accumulation
+entry:
+  br label %loop
+loop:
+; CHECK: uabdlb
+; CHECK: uabalb z0.s
+; CHECK: uabalb z1.s
+; CHECK: uabalb z2.s
+; CHECK: add	z0.s, z2.s, z0.s
+; CHECK: add	z0.s, z0.s, z1.s
+; CHECK: uaddv	d0, p0, z0.s
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <vscale x 4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
+  %a = load <vscale x 8 x i16>, ptr %ptr1_i, align 1
+  %b = load <vscale x 8 x i16>, ptr %ptr2_i, align 1
+  %acc_next = call <vscale x 4 x i32> @llvm.aarch64.sve.uabalb.nxv4i32(<vscale x 4 x i32> %acc_phi,
+                                                                       <vscale x 8 x i16> %a,
+                                                                       <vscale x 8 x i16> %b)
+  
+  %next_i = add i32 %i, 8
+  %cmp = icmp slt i32 %next_i, 128
+  br i1 %cmp, label %loop, label %exit
+exit:
+  %reduce = tail call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %acc_next)
+  ret i32 %reduce
+}
+
+declare <vscale x 4 x i32> @llvm.aarch64.sve.uabalb.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+
+define i16 @uabalb_i8_to_i16_accumulation(ptr %ptr1, ptr %ptr2) {
+  ; CHECK-LABEL: uabalb_i8_to_i16_accumulation
+entry:
+  br label %loop
+loop:
+; CHECK: uabdlb
+; CHECK: uabalb z0.h
+; CHECK: uabalb z1.h
+; CHECK: uabalb z2.h
+; CHECK: add	z0.h, z2.h, z0.h
+; CHECK: add	z0.h, z0.h, z1.h
+; CHECK: uaddv	d0, p0, z0.h
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <vscale x 8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
+  %a = load <vscale x 16 x i8>, ptr %ptr1_i, align 1
+  %b = load <vscale x 16 x i8>, ptr %ptr2_i, align 1
+  %acc_next = call <vscale x 8 x i16> @llvm.aarch64.sve.uabalb.nxv8i16(<vscale x 8 x i16> %acc_phi,
+                                                                       <vscale x 16 x i8> %a,
+                                                                       <vscale x 16 x i8> %b)
+  
+  %next_i = add i32 %i, 16
+  %cmp = icmp slt i32 %next_i, 256
+  br i1 %cmp, label %loop, label %exit
+exit:
+  %reduce = tail call i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16> %acc_next)
+  ret i16 %reduce
+}
+
+declare <vscale x 8 x i16> @llvm.aarch64.sve.uabalb.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+
+define i64 @uabalt_i32_to_i64_accumulation(ptr %ptr1, ptr %ptr2) {
+  ; CHECK-LABEL: uabalt_i32_to_i64_accumulation
+entry:
+  br label %loop
+loop:
+; CHECK: uabdlt
+; CHECK: uabalt z0.d
+; CHECK: uabalt z1.d
+; CHECK: uabalt z2.d
+; CHECK: add	z0.d, z2.d, z0.d
+; CHECK: add	z0.d, z0.d, z1.d
+; CHECK: uaddv	d0, p0, z0.d
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <vscale x 2 x i64> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
+  %a = load <vscale x 4 x i32>, ptr %ptr1_i, align 1
+  %b = load <vscale x 4 x i32>, ptr %ptr2_i, align 1
+  %acc_next = call <vscale x 2 x i64> @llvm.aarch64.sve.uabalt.nxv2i64(<vscale x 2 x i64> %acc_phi,
+                                                                       <vscale x 4 x i32> %a,
+                                                                       <vscale x 4 x i32> %b)
+  
+  %next_i = add i32 %i, 4
+  %cmp = icmp slt i32 %next_i, 64
+  br i1 %cmp, label %loop, label %exit
+exit:
+  %reduce = tail call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %acc_next)
+  ret i64 %reduce
+}
+
+declare <vscale x  2 x i64> @llvm.aarch64.sve.uabalt.nxv2i64(<vscale x 2 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>)
+
+define i32 @uabalt_i16_to_i32_accumulation(ptr %ptr1, ptr %ptr2) {
+  ; CHECK-LABEL: uabalt_i16_to_i32_accumulation
+entry:
+  br label %loop
+loop:
+; CHECK: uabdlt
+; CHECK: uabalt z0.s
+; CHECK: uabalt z1.s
+; CHECK: uabalt z2.s
+; CHECK: add	z0.s, z2.s, z0.s
+; CHECK: add	z0.s, z0.s, z1.s
+; CHECK: uaddv	d0, p0, z0.s
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <vscale x 4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
+  %a = load <vscale x 8 x i16>, ptr %ptr1_i, align 1
+  %b = load <vscale x 8 x i16>, ptr %ptr2_i, align 1
+  %acc_next = call <vscale x 4 x i32> @llvm.aarch64.sve.uabalt.nxv4i32(<vscale x 4 x i32> %acc_phi,
+                                                                       <vscale x 8 x i16> %a,
+                                                                       <vscale x 8 x i16> %b)
+  
+  %next_i = add i32 %i, 8
+  %cmp = icmp slt i32 %next_i, 128
+  br i1 %cmp, label %loop, label %exit
+exit:
+  %reduce = tail call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %acc_next)
+  ret i32 %reduce
+}
+
+declare <vscale x 4 x i32> @llvm.aarch64.sve.uabalt.nxv4i32(<vscale x 4 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>)
+
+define i16 @uabalt_i8_to_i16_accumulation(ptr %ptr1, ptr %ptr2) {
+  ; CHECK-LABEL: uabalt_i8_to_i16_accumulation
+entry:
+  br label %loop
+loop:
+; CHECK: uabdlt
+; CHECK: uabalt z0.h
+; CHECK: uabalt z1.h
+; CHECK: uabalt z2.h
+; CHECK: add	z0.h, z2.h, z0.h
+; CHECK: add	z0.h, z0.h, z1.h
+; CHECK: uaddv	d0, p0, z0.h
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <vscale x 8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
+  %a = load <vscale x 16 x i8>, ptr %ptr1_i, align 1
+  %b = load <vscale x 16 x i8>, ptr %ptr2_i, align 1
+  %acc_next = call <vscale x 8 x i16> @llvm.aarch64.sve.uabalt.nxv8i16(<vscale x 8 x i16> %acc_phi,
+                                                                       <vscale x 16 x i8> %a,
+                                                                       <vscale x 16 x i8> %b)
+  
+  %next_i = add i32 %i, 16
+  %cmp = icmp slt i32 %next_i, 256
+  br i1 %cmp, label %loop, label %exit
+exit:
+  %reduce = tail call i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16> %acc_next)
+  ret i16 %reduce
+}
+
+declare <vscale x 8 x i16> @llvm.aarch64.sve.uabalt.nxv8i16(<vscale x 8 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>)
+
+define i16 @uabalt_and_uabalb_accumulation(ptr %ptr1, ptr %ptr2) {
+  ; CHECK-LABEL: uabalt_and_uabalb_accumulation
+entry:
+  br label %loop
+loop:
+; CHECK: uabdlt
+; CHECK: uabdlb
+; CHECK: uabalt z0.h
+; CHECK: uabalt z2.h
+; CHECK: uabalt z4.h
+; CHECK: uabalb z1.h
+; CHECK: uabalb z6.h
+; CHECK: uabalb z5.h
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_hi_phi = phi <vscale x 8 x i16> [ zeroinitializer, %entry ], [ %acc_next_hi, %loop ]
+  %acc_lo_phi = phi <vscale x 8 x i16> [ zeroinitializer, %entry ], [ %acc_next_lo, %loop ]
+  %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
+  %a = load <vscale x 16 x i8>, ptr %ptr1_i, align 1
+  %b = load <vscale x 16 x i8>, ptr %ptr2_i, align 1
+  %acc_next_lo = call <vscale x 8 x i16> @llvm.aarch64.sve.uabalb.nxv8i16(<vscale x 8 x i16> %acc_lo_phi,
+                                                                         <vscale x 16 x i8> %a,
+                                                                         <vscale x 16 x i8> %b)
+  %acc_next_hi = call <vscale x 8 x i16> @llvm.aarch64.sve.uabalt.nxv8i16(<vscale x 8 x i16> %acc_hi_phi,
+                                                                         <vscale x 16 x i8> %a,
+                                                                         <vscale x 16 x i8> %b)
+  %next_i = add i32 %i, 16
+  %cmp = icmp slt i32 %next_i, 256
+  br i1 %cmp, label %loop, label %exit
+exit:
+  %mask = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
+  %acc_next = tail call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %acc_next_lo, <vscale x 8 x i16> %acc_next_hi)
+  %reduce = tail call i16 @llvm.vector.reduce.add.nxv8i16(<vscale x 8 x i16> %acc_next)
+  ret i16 %reduce
+}
+
+declare <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
\ No newline at end of file
diff --git a/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll b/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll
new file mode 100644
index 0000000000000..86150a8d3d3ce
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll
@@ -0,0 +1,530 @@
+; RUN: opt -passes=loop-unroll %s -o - | llc -O3 - -mtriple=arm64e-apple-darwin -o - | FileCheck %s
+
+
+define i16 @sabal_i8_to_i16_accumulation(ptr %ptr1, ptr %ptr2) {
+  ; CHECK-LABEL: sabal_i8_to_i16_accumulation
+entry:
+  br label %loop
+
+loop:
+; CHECK: sabdl.8h v1
+; CHECK: sabdl.8h v0
+; CHECK: sabdl.8h v2
+; CHECK: sabal.8h v1
+; CHECK: sabal.8h v0
+; CHECK: sabal.8h v2
+; CHECK: sabal.8h v1
+; CHECK: sabal.8h v0
+; CHECK: add.8h v1, v2, v1
+; CHECK: add.8h v0, v1, v0
+; CHECK: addv.8h
+
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
+  %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1
+  %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1
+  %vabd = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %a, <8 x i8> %b)
+  %vabd_ext = zext <8 x i8> %vabd to <8 x i16>
+  %acc_next = add <8 x i16> %vabd_ext, %acc_phi
+  %next_i = add i32 %i, 8
+  %cmp = icmp slt i32 %next_i, 64
+  br i1 %cmp, label %loop, label %exit
+
+exit:
+  %reduce = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %acc_next)
+  ret i16 %reduce
+}
+
+; Declare the signed absolute difference intrinsic
+declare <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8>, <8 x i8>)
+declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
+
+
+define i32 @sabal_i16_to_i32_accumulation(ptr %ptr1, ptr %ptr2) {
+; CHECK-LABEL: sabal_i16_to_i32_accumulation
+entry:
+  br label %loop
+
+loop:
+; CHECK: sabdl.4s v1
+; CHECK: sabdl.4s v0
+; CHECK: sabdl.4s v2
+; CHECK: sabal.4s v1
+; CHECK: sabal.4s v0
+; CHECK: sabal.4s v2
+; CHECK: sabal.4s v1
+; CHECK: sabal.4s v0
+; CHECK: add.4s v1, v2, v1
+; CHECK: add.4s v0, v1, v0
+; CHECK: addv.4s
+
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
+  %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1
+  %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1
+  %vabd = tail call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %a, <4 x i16> %b)
+  %vmov = zext <4 x i16> %vabd to <4 x i32>
+  %acc_next = add <4 x i32> %vmov, %acc_phi
+  %next_i = add i32 %i, 4
+  %cmp = icmp slt i32 %next_i, 32
+  br i1 %cmp, label %loop, label %exit
+
+exit:
+  %reduce = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %acc_next)
+  ret i32 %reduce
+}
+
+declare <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16>, <4 x i16>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+
+define i16 @uabal2_accumulation(ptr %ptr1, ptr %ptr2) {
+; CHECK-LABEL: uabal2_accumulation
+entry:
+  br label %loop
+
+loop:
+; CHECK:  uabdl2.8h       v4
+; CHECK:  uabdl.8h        v1 
+; CHECK:  uabdl2.8h       v24
+; CHECK:  uabdl2.8h       v25
+; CHECK:  uabal2.8h       v4 
+; CHECK:  uabal2.8h       v24
+; CHECK:  uabal2.8h       v25
+; CHECK:  uabal2.8h       v4
+; CHECK:  uabal2.8h       v24
+; CHECK:  add.8h          v4, v25, v4
+; CHECK:  add.8h          v4, v4, v24
+; CHECK:  uabdl.8h        v0
+; CHECK:  uabdl.8h        v2
+; CHECK:  uabal.8h        v1
+; CHECK:  uabal.8h        v0
+; CHECK:  uabal.8h        v2
+; CHECK:  uabal.8h        v1
+; CHECK:  uabal.8h        v0
+; CHECK:  add.8h          v1, v2, v1
+; CHECK:  add.8h          v0, v1, v0
+; CHECK:  add.8h          v0, v4, v0
+; CHECK:  addv.8h         h0, v0
+
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi_hi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next_hi, %loop ]
+  %acc_phi_lo = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next_lo, %loop ]
+  %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
+  %a = load <16 x i8>, <16 x i8>* %ptr1_i, align 1
+  %b = load <16 x i8>, <16 x i8>* %ptr2_i, align 1
+  %a_hi = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %b_hi = shufflevector <16 x i8> %b, <16 x i8> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %a_lo = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %b_lo = shufflevector <16 x i8> %b, <16 x i8> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %vabd_hi = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %a_hi, <8 x i8> %b_hi)
+  %vabd_lo = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %a_lo, <8 x i8> %b_lo)
+  %vmov_hi = zext <8 x i8> %vabd_hi to <8 x i16>
+  %vmov_lo = zext <8 x i8> %vabd_lo to <8 x i16>
+  %acc_next_hi = add <8 x i16> %vmov_hi, %acc_phi_hi
+  %acc_next_lo = add <8 x i16> %vmov_lo, %acc_phi_lo
+  %next_i = add i32 %i, 16
+  %cmp = icmp slt i32 %next_i, 128
+  br i1 %cmp, label %loop, label %exit
+
+exit:
+  %hi_plus_lo = add <8 x i16> %acc_next_hi, %acc_next_lo
+  %reduce = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %hi_plus_lo)
+  ret i16 %reduce
+}
+
+define i32 @uaba_accumulation(ptr %ptr1, ptr %ptr2) {
+; CHECK-LABEL: uaba_accumulation
+entry:
+  br label %loop
+
+loop:
+; CHECK: uabd.4s v0
+; CHECK: uabd.4s v1
+; CHECK: uabd.4s v2
+; CHECK: uaba.4s v0
+; CHECK: uaba.4s v1
+; CHECK: uaba.4s v2
+; CHECK: uaba.4s v0
+; CHECK: uaba.4s v1
+; CHECK: add.4s v0, v2, v0
+; CHECK: add.4s v0, v0, v1
+; CHECK: addv.4s 
+
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
+  %a = load <4 x i32>, <4 x i32>* %ptr1_i, align 1
+  %b = load <4 x i32>, <4 x i32>* %ptr2_i, align 1
+  %vabd = tail call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %a, <4 x i32> %b)
+  %acc_next = add <4 x i32> %acc_phi, %vabd
+  %next_i = add i32 %i, 4
+  %cmp = icmp slt i32 %next_i, 32
+  br i1 %cmp, label %loop, label %exit
+exit:
+
+  %reduce = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %acc_next)
+  ret i32 %reduce
+}
+
+declare <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+define i32 @saba_accumulation(ptr %ptr1, ptr %ptr2) {
+; CHECK-LABEL: saba_accumulation
+entry:
+  br label %loop
+
+loop:
+; CHECK: sabd.4s v0
+; CHECK: sabd.4s v1
+; CHECK: sabd.4s v2
+; CHECK: saba.4s v0
+; CHECK: saba.4s v1
+; CHECK: saba.4s v2
+; CHECK: saba.4s v0
+; CHECK: saba.4s v1
+; CHECK: add.4s v0, v2, v0
+; CHECK: add.4s v0, v0, v1
+; CHECK: addv.4s
+
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  ; Load values from ptr1 and ptr2
+  %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
+  %a = load <4 x i32>, <4 x i32>* %ptr1_i, align 1
+  %b = load <4 x i32>, <4 x i32>* %ptr2_i, align 1
+  ; Perform the intrinsic operation
+  %vabd = tail call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %a, <4 x i32> %b)
+  %acc_next = add <4 x i32> %acc_phi, %vabd
+  ; Increment loop counter and check the bound
+  %next_i = add i32 %i, 4 
+  %cmp = icmp slt i32 %next_i, 32  
+  br i1 %cmp, label %loop, label %exit
+
+exit:                            
+  %reduce = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %acc_next)
+  ret i32 %reduce
+}
+
+declare <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+define i32 @uaba_v2i32_accumulation(ptr %ptr1, ptr %ptr2) {
+; CHECK-LABEL: uaba_v2i32_accumulation
+entry:
+  br label %loop
+
+loop:
+; CHECK: uabd.2s v0
+; CHECK: uabd.2s v1
+; CHECK: uabd.2s v2
+; CHECK: uaba.2s v0
+; CHECK: uaba.2s v1
+; CHECK: uaba.2s v2
+; CHECK: uaba.2s v0
+; CHECK: uaba.2s v1
+; CHECK: add.2s v0, v2, v0
+; CHECK: add.2s v0, v0, v1
+; CHECK: addp.2s
+
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <2 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
+  %a = load <2 x i32>, <2 x i32>* %ptr1_i, align 1
+  %b = load <2 x i32>, <2 x i32>* %ptr2_i, align 1
+  %vabd = tail call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %a, <2 x i32> %b)
+  %acc_next = add <2 x i32> %acc_phi, %vabd
+  %next_i = add i32 %i, 2
+  %cmp = icmp slt i32 %next_i, 16
+  br i1 %cmp, label %loop, label %exit
+
+exit:
+  %reduce = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %acc_next)
+  ret i32 %reduce
+}
+
+define i8 @uaba_v8i8_accumulation(ptr %ptr1, ptr %ptr2) {
+; CHECK-LABEL: uaba_v8i8_accumulation
+entry:
+  br label %loop
+
+loop:
+; CHECK: uabd.8b v0
+; CHECK: uabd.8b v1
+; CHECK: uabd.8b v2
+; CHECK: uaba.8b v0
+; CHECK: uaba.8b v1
+; CHECK: uaba.8b v2
+; CHECK: uaba.8b v0
+; CHECK: uaba.8b v1
+; CHECK: add.8b v0, v2, v0
+; CHECK: add.8b v0, v0, v1
+; CHECK: addv.8b
+
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <8 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
+  %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1
+  %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1
+  %vabd = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %a, <8 x i8> %b)
+  %acc_next = add <8 x i8> %acc_phi, %vabd
+  %next_i = add i32 %i, 8
+  %cmp = icmp slt i32 %next_i, 64
+  br i1 %cmp, label %loop, label %exit
+
+exit:
+  %reduce = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %acc_next)
+  ret i8 %reduce
+}
+
+define i8 @uaba_v16i8_accumulation(ptr %ptr1, ptr %ptr2) {
+; CHECK-LABEL: uaba_v16i8_accumulation
+entry:
+  br label %loop
+
+loop:
+; CHECK: uabd.16b v0
+; CHECK: uabd.16b v1
+; CHECK: uabd.16b v2
+; CHECK: uaba.16b v0
+; CHECK: uaba.16b v1
+; CHECK: uaba.16b v2
+; CHECK: uaba.16b v0
+; CHECK: uaba.16b v1
+; CHECK: add.16b v0, v2, v0
+; CHECK: add.16b v0, v0, v1
+; CHECK: addv.16b
+
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <16 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
+  %a = load <16 x i8>, <16 x i8>* %ptr1_i, align 1
+  %b = load <16 x i8>, <16 x i8>* %ptr2_i, align 1
+  %vabd = tail call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %a, <16 x i8> %b)
+  %acc_next = add <16 x i8> %acc_phi, %vabd
+  %next_i = add i32 %i, 16
+  %cmp = icmp slt i32 %next_i, 128
+  br i1 %cmp, label %loop, label %exit
+
+exit:
+  %reduce = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %acc_next)
+  ret i8 %reduce
+}
+
+define i16 @uaba_v8i16_accumulation(ptr %ptr1, ptr %ptr2) {
+; CHECK-LABEL: uaba_v8i16_accumulation
+entry:
+  br label %loop
+
+loop:
+; CHECK: uabd.8h v0
+; CHECK: uabd.8h v1
+; CHECK: uabd.8h v2
+; CHECK: uaba.8h v0
+; CHECK: uaba.8h v1
+; CHECK: uaba.8h v2
+; CHECK: uaba.8h v0
+; CHECK: uaba.8h v1
+; CHECK: add.8h v0, v2, v0
+; CHECK: add.8h v0, v0, v1
+; CHECK: addv.8h
+
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
+  %a = load <8 x i16>, <8 x i16>* %ptr1_i, align 1
+  %b = load <8 x i16>, <8 x i16>* %ptr2_i, align 1
+  %vabd = tail call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %a, <8 x i16> %b)
+  %acc_next = add <8 x i16> %acc_phi, %vabd
+  %next_i = add i32 %i, 8
+  %cmp = icmp slt i32 %next_i, 64
+  br i1 %cmp, label %loop, label %exit
+
+exit:
+  %reduce = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %acc_next)
+  ret i16 %reduce
+}
+
+define i8 @saba_v8i8_accumulation(ptr %ptr1, ptr %ptr2) {
+; CHECK-LABEL: saba_v8i8_accumulation
+entry:
+  br label %loop
+
+loop:
+; CHECK: sabd.8b v0
+; CHECK: sabd.8b v1
+; CHECK: sabd.8b v2
+; CHECK: saba.8b v0
+; CHECK: saba.8b v1
+; CHECK: saba.8b v2
+; CHECK: saba.8b v0
+; CHECK: saba.8b v1
+; CHECK: add.8b v0, v2, v0
+; CHECK: add.8b v0, v0, v1
+; CHECK: addv.8b
+
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <8 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
+  %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1
+  %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1
+  %vabd = tail call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %a, <8 x i8> %b)
+  %acc_next = add <8 x i8> %acc_phi, %vabd
+  %next_i = add i32 %i, 8
+  %cmp = icmp slt i32 %next_i, 64
+  br i1 %cmp, label %loop, label %exit
+
+exit:
+  %reduce = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %acc_next)
+  ret i8 %reduce
+}
+
+define i16 @saba_v4i16_accumulation(ptr %ptr1, ptr %ptr2) {
+; CHECK-LABEL: saba_v4i16_accumulation
+entry:
+  br label %loop
+loop:
+; CHECK: sabd.4h v0
+; CHECK: sabd.4h v1
+; CHECK: sabd.4h v2
+; CHECK: saba.4h v0
+; CHECK: saba.4h v1
+; CHECK: saba.4h v2
+; CHECK: saba.4h v0
+; CHECK: saba.4h v1
+; CHECK: add.4h v0, v2, v0
+; CHECK: add.4h v0, v0, v1
+; CHECK: addv.4h
+
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <4 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
+  %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1
+  %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1
+  %vabd = tail call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %a, <4 x i16> %b)
+  %acc_next = add <4 x i16> %acc_phi, %vabd
+  %next_i = add i32 %i, 4
+  %cmp = icmp slt i32 %next_i, 32
+  br i1 %cmp, label %loop, label %exit
+exit:
+  %reduce = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %acc_next)
+  ret i16 %reduce
+}
+
+define i16 @saba_v8i16_accumulation(ptr %ptr1, ptr %ptr2) {
+; CHECK-LABEL: saba_v8i16_accumulation
+entry:
+  br label %loop
+
+loop:
+; CHECK: sabd.8h v0
+; CHECK: sabd.8h v1
+; CHECK: sabd.8h v2
+; CHECK: saba.8h v0
+; CHECK: saba.8h v1
+; CHECK: saba.8h v2
+; CHECK: saba.8h v0
+; CHECK: saba.8h v1
+; CHECK: add.8h v0, v2, v0
+; CHECK: add.8h v0, v0, v1
+; CHECK: addv.8h
+
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
+  %a = load <8 x i16>, <8 x i16>* %ptr1_i, align 1
+  %b = load <8 x i16>, <8 x i16>* %ptr2_i, align 1
+  %vabd = tail call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %a, <8 x i16> %b)
+  %acc_next = add <8 x i16> %acc_phi, %vabd
+  %next_i = add i32 %i, 8
+  %cmp = icmp slt i32 %next_i, 64
+  br i1 %cmp, label %loop, label %exit
+
+exit:
+  %reduce = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %acc_next)
+  ret i16 %reduce
+}
+
+define i16 @uabal_i8_to_i16_accumulation(ptr %ptr1, ptr %ptr2) {
+; CHECK-LABEL: uabal_i8_to_i16_accumulation
+entry:
+  br label %loop
+
+loop:
+; CHECK: uabdl.8h v1
+; CHECK: uabdl.8h v0
+; CHECK: uabdl.8h v2
+; CHECK: uabal.8h v1
+; CHECK: uabal.8h v0
+; CHECK: uabal.8h v2
+; CHECK: uabal.8h v1
+; CHECK: uabal.8h v0
+; CHECK: add.8h v1, v2, v1
+; CHECK: add.8h v0, v1, v0
+; CHECK: addv.8h
+
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
+  %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1
+  %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1
+  %vabd = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %a, <8 x i8> %b)
+  %vmov = zext <8 x i8> %vabd to <8 x i16>
+  %acc_next = add <8 x i16> %vmov, %acc_phi
+  %next_i = add i32 %i, 8
+  %cmp = icmp slt i32 %next_i, 64
+  br i1 %cmp, label %loop, label %exit
+
+exit:
+  %reduce = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %acc_next)
+  ret i16 %reduce
+}
+
+define i32 @uabal_i16_to_i32_accumulation(ptr %ptr1, ptr %ptr2) {
+; CHECK-LABEL: uabal_i16_to_i32_accumulation
+entry:
+  br label %loop
+
+loop:
+; CHECK: uabdl.4s v1
+; CHECK: uabdl.4s v0
+; CHECK: uabdl.4s v2
+; CHECK: uabal.4s v1
+; CHECK: uabal.4s v0
+; CHECK: uabal.4s v2
+; CHECK: uabal.4s v1
+; CHECK: uabal.4s v0
+; CHECK: add.4s v1, v2, v1
+; CHECK: add.4s v0, v1, v0
+; CHECK: addv.4s
+
+  %i = phi i32 [ 0, %entry ], [ %next_i, %loop ]
+  %acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
+  %ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
+  %ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
+  %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1
+  %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1
+  %vabd = tail call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %a, <4 x i16> %b)
+  %vmov = zext <4 x i16> %vabd to <4 x i32>
+  %acc_next = add <4 x i32> %vmov, %acc_phi
+  %next_i = add i32 %i, 4
+  %cmp = icmp slt i32 %next_i, 32
+  br i1 %cmp, label %loop, label %exit
+
+exit:
+  %reduce = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %acc_next)
+  ret i32 %reduce
+}
diff --git a/llvm/test/CodeGen/AArch64/machine-combiner-reassociate-accumulators.mir b/llvm/test/CodeGen/AArch64/machine-combiner-reassociate-accumulators.mir
new file mode 100644
index 0000000000000..888c4027bc2bc
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/machine-combiner-reassociate-accumulators.mir
@@ -0,0 +1,181 @@
+# RUN: llc -run-pass=machine-combiner -mtriple=arm64-unknown-unknown %s -o - | FileCheck %s
+# RUN: llc -run-pass=machine-combiner -mtriple=arm64-unknown-unknown -acc-max-width=2   %s -o -   | FileCheck %s  --check-prefix=NARROW-TREE
+# RUN: llc -run-pass=machine-combiner -mtriple=arm64-unknown-unknown -acc-min-depth=100  %s -o -  | FileCheck %s  --check-prefix=NO-TREE
+
+# A chain of UABAL instructions that can be reassociated for better ILP.
+# Before the optimization, we accumulate in a single long chain.
+# CHECK-LABEL: name:  uabal_accumulation
+# CHECK:          [[START1:%.*]]:fpr128 = UABDLv4i16_v4i32
+# CHECK:          [[START2:%.*]]:fpr128 = UABDLv4i16_v4i32
+# CHECK:          [[START3:%.*]]:fpr128 = UABDLv4i16_v4i32
+# CHECK:          [[A1:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[START1]] 
+# CHECK:          [[B1:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[START2]] 
+# CHECK:          [[C1:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[START3]] 
+# CHECK:          [[A2:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[A1]] 
+# CHECK:          [[B2:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[B1]] 
+# CHECK:          [[C2:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[C1]] 
+# CHECK:          [[PARTIAL_SUM:%.*]]:fpr128 = ADDv4i32 killed [[A2]], killed [[B2]]
+# CHECK:          [[TOTAL_SUM:%.*]]:fpr128 = ADDv4i32 killed [[PARTIAL_SUM]], killed [[C2]]
+# CHECK:          [[END:%.*]]:fpr32 = ADDVv4i32v killed [[TOTAL_SUM]]
+
+# NARROW-TREE:    [[START1:%.*]]:fpr128 = UABDLv4i16_v4i32
+# NARROW-TREE:    [[START2:%.*]]:fpr128 = UABDLv4i16_v4i32          
+# NARROW-TREE:    [[A1:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[START1]]
+# NARROW-TREE:    [[B1:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[START2]]
+# NARROW-TREE:    [[A2:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[A1]]              
+# NARROW-TREE:    [[B2:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[B1]]
+# NARROW-TREE:    [[A3:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[A2]]
+# NARROW-TREE:    [[B3:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[B2]]
+# NARROW-TREE:    [[A4:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[A3]]
+# NARROW-TREE:    [[PARTIAL_SUM:%.*]]:fpr128 = ADDv4i32 killed [[B3]], killed [[A4]]
+# NARROW-TREE:    [[END:%.*]]:fpr32 = ADDVv4i32v killed [[PARTIAL_SUM]]  
+
+# NO-TREE:        [[START1:%.*]]:fpr128 = UABDLv4i16_v4i32
+# NO-TREE:        [[A1:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[START1]]
+# NO-TREE:        [[A2:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[A1]]
+# NO-TREE:        [[A3:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[A2]]
+# NO-TREE:        [[A4:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[A3]]              
+# NO-TREE:        [[A5:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[A4]]
+# NO-TREE:        [[A6:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[A5]]
+# NO-TREE:        [[A7:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[A6]]
+# NO-TREE:        [[A8:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[A7]]
+# NO-TREE:        [[END:%.*]]:fpr32 = ADDVv4i32v killed [[A8]]
+
+---
+name:            uabal_accumulation
+body:             |
+  bb.0.entry:
+    liveins: $x0, $x1, $x2, $x3
+  
+    %3:gpr64 = COPY $x3
+    %2:gpr64common = COPY $x2
+    %1:gpr64 = COPY $x1
+    %0:gpr64common = COPY $x0
+    %4:fpr64 = LDRDui %0, 0 :: (load (s64))
+    %5:fpr64 = LDRDui %2, 0 :: (load (s64))
+    %6:gpr64common = ADDXrr %0, %1
+    %7:gpr64common = ADDXrr %2, %3
+    %8:fpr64 = LDRDui %6, 0 :: (load (s64))
+    %9:fpr64 = LDRDui %7, 0 :: (load (s64))
+    %10:fpr128 = UABDLv4i16_v4i32 killed %8, killed %9
+    %11:fpr128 = UABALv4i16_v4i32 killed %10, killed %4, killed %5
+    %12:gpr64common = ADDXrr %6, %1
+    %13:gpr64common = ADDXrr %7, %3
+    %14:fpr64 = LDRDui %12, 0 :: (load (s64))
+    %15:fpr64 = LDRDui %13, 0 :: (load (s64))
+    %16:fpr128 = UABALv4i16_v4i32 killed %11, killed %14, killed %15
+    %17:gpr64common = ADDXrr %12, %1
+    %18:gpr64common = ADDXrr %13, %3
+    %19:fpr64 = LDRDui %17, 0 :: (load (s64))
+    %20:fpr64 = LDRDui %18, 0 :: (load (s64))
+    %21:fpr128 = UABALv4i16_v4i32 killed %16, killed %19, killed %20
+    %22:gpr64common = ADDXrr %17, %1
+    %23:gpr64common = ADDXrr %18, %3
+    %24:fpr64 = LDRDui %22, 0 :: (load (s64))
+    %25:fpr64 = LDRDui %23, 0 :: (load (s64))
+    %26:fpr128 = UABALv4i16_v4i32 killed %21, killed %24, killed %25
+    %27:gpr64common = ADDXrr %22, %1
+    %28:gpr64common = ADDXrr %23, %3
+    %29:fpr64 = LDRDui %27, 0 :: (load (s64))
+    %30:fpr64 = LDRDui %28, 0 :: (load (s64))
+    %31:fpr128 = UABALv4i16_v4i32 killed %26, killed %29, killed %30
+    %32:gpr64common = ADDXrr %27, %1
+    %33:gpr64common = ADDXrr %28, %3
+    %34:fpr64 = LDRDui %32, 0 :: (load (s64))
+    %35:fpr64 = LDRDui %33, 0 :: (load (s64))
+    %36:fpr128 = UABALv4i16_v4i32 killed %31, killed %34, killed %35
+    %37:gpr64common = ADDXrr %32, %1
+    %38:gpr64common = ADDXrr %33, %3
+    %39:fpr64 = LDRDui %37, 0 :: (load (s64))
+    %40:fpr64 = LDRDui %38, 0 :: (load (s64))
+    %41:fpr128 = UABALv4i16_v4i32 killed %36, killed %39, killed %40
+    %42:gpr64common = ADDXrr %37, %1
+    %43:gpr64common = ADDXrr %38, %3
+    %44:fpr64 = LDRDui %42, 0 :: (load (s64))
+    %45:fpr64 = LDRDui %43, 0 :: (load (s64))
+    %46:fpr128 = UABALv4i16_v4i32 killed %41, killed %44, killed %45
+    %47:fpr32 = ADDVv4i32v killed %46
+    %48:fpr128 = IMPLICIT_DEF
+    %49:fpr128 = INSERT_SUBREG %48, killed %47, %subreg.ssub
+    %50:gpr32all = COPY %49.ssub
+    $w0 = COPY %50
+    RET_ReallyLR implicit $w0
+...
+
+# In this test case we don't perform the reassociation because we don't recognize the
+# instruction at the top of the chain.
+# CHECK-LABEL: name:  uabal_accumulation_with_different_start
+# CHECK:          [[START1:%.*]]:fpr128 = ADDv4i32
+# CHECK:          [[START2:%.*]]:fpr128 = UABDLv4i16_v4i32
+# CHECK:          [[START3:%.*]]:fpr128 = UABDLv4i16_v4i32
+# CHECK:          [[A1:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[START1]]
+# CHECK:          [[B1:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[START2]]
+# CHECK:          [[C1:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[START3]]
+# CHECK:          [[A2:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[A1]]
+# CHECK:          [[B2:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[B1]]
+# CHECK:          [[C2:%.*]]:fpr128 = UABALv4i16_v4i32 killed [[C1]]
+# CHECK:          [[PARTIAL_SUM:%.*]]:fpr128 = ADDv4i32 killed [[A2]], killed [[B2]]
+# CHECK:          [[TOTAL_SUM:%.*]]:fpr128 = ADDv4i32 killed [[PARTIAL_SUM]], killed [[C2]]
+# CHECK:          [[END:%.*]]:fpr32 = ADDVv4i32v killed [[TOTAL_SUM]]
+
+---
+name:            uabal_accumulation_with_different_start
+body:             |
+  bb.0.entry:
+    liveins: $x0, $x1, $x2, $x3
+  
+    %3:gpr64 = COPY $x3
+    %2:gpr64common = COPY $x2
+    %1:gpr64 = COPY $x1
+    %0:gpr64common = COPY $x0
+    %4:fpr64 = LDRDui %0, 0 :: (load (s64))
+    %5:fpr64 = LDRDui %2, 0 :: (load (s64))
+    %6:gpr64common = ADDXrr %0, %1
+    %7:gpr64common = ADDXrr %2, %3
+    %8:fpr128 = LDRQui %6, 0 :: (load (s128))
+    %9:fpr128 = LDRQui %7, 0 :: (load (s128))
+    %10:fpr128 = ADDv4i32 killed %8, killed %9
+    %11:fpr128 = UABALv4i16_v4i32 killed %10, killed %4, killed %5
+    %12:gpr64common = ADDXrr %6, %1
+    %13:gpr64common = ADDXrr %7, %3
+    %14:fpr64 = LDRDui %12, 0 :: (load (s64))
+    %15:fpr64 = LDRDui %13, 0 :: (load (s64))
+    %16:fpr128 = UABALv4i16_v4i32 killed %11, killed %14, killed %15
+    %17:gpr64common = ADDXrr %12, %1
+    %18:gpr64common = ADDXrr %13, %3
+    %19:fpr64 = LDRDui %17, 0 :: (load (s64))
+    %20:fpr64 = LDRDui %18, 0 :: (load (s64))
+    %21:fpr128 = UABALv4i16_v4i32 killed %16, killed %19, killed %20
+    %22:gpr64common = ADDXrr %17, %1
+    %23:gpr64common = ADDXrr %18, %3
+    %24:fpr64 = LDRDui %22, 0 :: (load (s64))
+    %25:fpr64 = LDRDui %23, 0 :: (load (s64))
+    %26:fpr128 = UABALv4i16_v4i32 killed %21, killed %24, killed %25
+    %27:gpr64common = ADDXrr %22, %1
+    %28:gpr64common = ADDXrr %23, %3
+    %29:fpr64 = LDRDui %27, 0 :: (load (s64))
+    %30:fpr64 = LDRDui %28, 0 :: (load (s64))
+    %31:fpr128 = UABALv4i16_v4i32 killed %26, killed %29, killed %30
+    %32:gpr64common = ADDXrr %27, %1
+    %33:gpr64common = ADDXrr %28, %3
+    %34:fpr64 = LDRDui %32, 0 :: (load (s64))
+    %35:fpr64 = LDRDui %33, 0 :: (load (s64))
+    %36:fpr128 = UABALv4i16_v4i32 killed %31, killed %34, killed %35
+    %37:gpr64common = ADDXrr %32, %1
+    %38:gpr64common = ADDXrr %33, %3
+    %39:fpr64 = LDRDui %37, 0 :: (load (s64))
+    %40:fpr64 = LDRDui %38, 0 :: (load (s64))
+    %41:fpr128 = UABALv4i16_v4i32 killed %36, killed %39, killed %40
+    %42:gpr64common = ADDXrr %37, %1
+    %43:gpr64common = ADDXrr %38, %3
+    %44:fpr64 = LDRDui %42, 0 :: (load (s64))
+    %45:fpr64 = LDRDui %43, 0 :: (load (s64))
+    %46:fpr128 = UABALv4i16_v4i32 killed %41, killed %44, killed %45
+    %47:fpr32 = ADDVv4i32v killed %46
+    %48:fpr128 = IMPLICIT_DEF
+    %49:fpr128 = INSERT_SUBREG %48, killed %47, %subreg.ssub
+    %50:gpr32all = COPY %49.ssub
+    $w0 = COPY %50
+    RET_ReallyLR implicit $w0
+
+...



More information about the llvm-commits mailing list