[llvm] r335850 - [ARM] Parallel DSP Pass

Sjoerd Meijer via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 28 05:55:29 PDT 2018


Author: sjoerdmeijer
Date: Thu Jun 28 05:55:29 2018
New Revision: 335850

URL: http://llvm.org/viewvc/llvm-project?rev=335850&view=rev
Log:
[ARM] Parallel DSP Pass

Armv6 introduced instructions to perform 32-bit SIMD operations. The purpose of
this pass is to do some straightforward IR pattern matching to create ACLE DSP
intrinsics, which map on these 32-bit SIMD operations.

Currently, only the SMLAD instruction gets recognised. This instruction
performs two multiplications with 16-bit operands, and stores the result in an
accumulator. We will follow this up with patches to recognise SMLAD in more
cases, and also to generate other DSP instructions (like e.g. SADD16).

Patch by: Sam Parker and Sjoerd Meijer

Differential Revision: https://reviews.llvm.org/D48128

Added:
    llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp
    llvm/trunk/test/CodeGen/ARM/smlad0.ll
    llvm/trunk/test/CodeGen/ARM/smlad1.ll
    llvm/trunk/test/CodeGen/ARM/smlad10.ll
    llvm/trunk/test/CodeGen/ARM/smlad11.ll
    llvm/trunk/test/CodeGen/ARM/smlad12.ll
    llvm/trunk/test/CodeGen/ARM/smlad2.ll
    llvm/trunk/test/CodeGen/ARM/smlad3.ll
    llvm/trunk/test/CodeGen/ARM/smlad4.ll
    llvm/trunk/test/CodeGen/ARM/smlad5.ll
    llvm/trunk/test/CodeGen/ARM/smlad6.ll
    llvm/trunk/test/CodeGen/ARM/smlad7.ll
    llvm/trunk/test/CodeGen/ARM/smlad8.ll
    llvm/trunk/test/CodeGen/ARM/smlad9.ll
Modified:
    llvm/trunk/lib/Target/ARM/ARM.h
    llvm/trunk/lib/Target/ARM/ARMTargetMachine.cpp
    llvm/trunk/lib/Target/ARM/CMakeLists.txt
    llvm/trunk/lib/Target/ARM/LLVMBuild.txt

Modified: llvm/trunk/lib/Target/ARM/ARM.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARM.h?rev=335850&r1=335849&r2=335850&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARM.h (original)
+++ llvm/trunk/lib/Target/ARM/ARM.h Thu Jun 28 05:55:29 2018
@@ -15,6 +15,7 @@
 #ifndef LLVM_LIB_TARGET_ARM_ARM_H
 #define LLVM_LIB_TARGET_ARM_ARM_H
 
+#include "llvm/IR/LegacyPassManager.h"
 #include "llvm/Support/CodeGen.h"
 #include <functional>
 #include <vector>
@@ -35,6 +36,8 @@ class MachineInstr;
 class MCInst;
 class PassRegistry;
 
+
+Pass *createARMParallelDSPPass();
 FunctionPass *createARMISelDag(ARMBaseTargetMachine &TM,
                                CodeGenOpt::Level OptLevel);
 FunctionPass *createA15SDOptimizerPass();
@@ -57,6 +60,8 @@ void computeBlockSize(MachineFunction *M
                       BasicBlockInfo &BBI);
 std::vector<BasicBlockInfo> computeAllBlockSizes(MachineFunction *MF);
 
+
+void initializeARMParallelDSPPass(PassRegistry &);
 void initializeARMLoadStoreOptPass(PassRegistry &);
 void initializeARMPreAllocLoadStoreOptPass(PassRegistry &);
 void initializeARMConstantIslandsPass(PassRegistry &);

Added: llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp?rev=335850&view=auto
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp (added)
+++ llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp Thu Jun 28 05:55:29 2018
@@ -0,0 +1,613 @@
+//===- ParallelDSP.cpp - Parallel DSP Pass --------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// Armv6 introduced instructions to perform 32-bit SIMD operations. The
+/// purpose of this pass is do some IR pattern matching to create ACLE
+/// DSP intrinsics, which map on these 32-bit SIMD operations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/LoopAccessAnalysis.h"
+#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/NoFolder.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Pass.h"
+#include "llvm/PassRegistry.h"
+#include "llvm/PassSupport.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "ARM.h"
+#include "ARMSubtarget.h"
+
+using namespace llvm;
+using namespace PatternMatch;
+
+#define DEBUG_TYPE "parallel-dsp"
+
+namespace {
+  struct ParallelMAC;
+  struct Reduction;
+
+  using ParallelMACList = SmallVector<ParallelMAC, 8>;
+  using ReductionList   = SmallVector<Reduction, 8>;
+  using ValueList       = SmallVector<Value*, 8>;
+  using LoadInstList    = SmallVector<LoadInst*, 8>;
+  using PMACPair        = std::pair<ParallelMAC*,ParallelMAC*>;
+  using PMACPairList    = SmallVector<PMACPair, 8>;
+  using Instructions    = SmallVector<Instruction*,16>;
+  using MemLocList      = SmallVector<MemoryLocation, 4>;
+
+  // 'ParallelMAC' and 'Reduction' are just some bookkeeping data structures.
+  // 'Reduction' contains the phi-node and accumulator statement from where we
+  // start pattern matching, and 'ParallelMAC' the multiplication
+  // instructions that are candidates for parallel execution.
+  struct ParallelMAC {
+    Instruction *Mul;
+    ValueList    VL;        // List of all (narrow) operands of this Mul
+    LoadInstList VecLd;     // List of all load instructions of this Mul
+    MemLocList   MemLocs;   // All memory locations read by this Mul
+
+    ParallelMAC(Instruction *I, ValueList &V) : Mul(I), VL(V) {};
+  };
+
+  struct Reduction {
+    PHINode         *Phi;             // The Phi-node from where we start
+                                      // pattern matching.
+    Instruction     *AccIntAdd;       // The accumulating integer add statement,
+                                      // i.e, the reduction statement.
+
+    Reduction (PHINode *P, Instruction *Acc) : Phi(P), AccIntAdd(Acc) { };
+  };
+
+  class ARMParallelDSP : public LoopPass {
+    ScalarEvolution   *SE;
+    AliasAnalysis     *AA;
+    TargetLibraryInfo *TLI;
+    DominatorTree     *DT;
+    LoopInfo          *LI;
+    Loop              *L;
+    const DataLayout  *DL;
+    Module            *M;
+
+    bool InsertParallelMACs(Reduction &Reduction, PMACPairList &PMACPairs);
+    bool AreSequentialLoads(LoadInst *Ld0, LoadInst *Ld1, LoadInstList &VecLd);
+    PMACPairList CreateParallelMACPairs(ParallelMACList &Candidates);
+    Instruction *CreateSMLADCall(LoadInst *VecLd0, LoadInst *VecLd1,
+                                 Instruction *Acc, Instruction *InsertAfter);
+
+    /// Try to match and generate: SMLAD, SMLADX - Signed Multiply Accumulate
+    /// Dual performs two signed 16x16-bit multiplications. It adds the
+    /// products to a 32-bit accumulate operand. Optionally, the instruction can
+    /// exchange the halfwords of the second operand before performing the
+    /// arithmetic.
+    bool MatchSMLAD(Function &F);
+
+  public:
+    static char ID;
+
+    ARMParallelDSP() : LoopPass(ID) { }
+
+    void getAnalysisUsage(AnalysisUsage &AU) const override {
+      LoopPass::getAnalysisUsage(AU);
+      AU.addRequired<AssumptionCacheTracker>();
+      AU.addRequired<ScalarEvolutionWrapperPass>();
+      AU.addRequired<AAResultsWrapperPass>();
+      AU.addRequired<TargetLibraryInfoWrapperPass>();
+      AU.addRequired<LoopInfoWrapperPass>();
+      AU.addRequired<DominatorTreeWrapperPass>();
+      AU.addRequired<TargetPassConfig>();
+      AU.addPreserved<LoopInfoWrapperPass>();
+      AU.setPreservesCFG();
+    }
+
+    bool runOnLoop(Loop *TheLoop, LPPassManager &) override {
+      L = TheLoop;
+      SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
+      AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
+      TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+      DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+      LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+      auto &TPC = getAnalysis<TargetPassConfig>();
+
+      BasicBlock *Header = TheLoop->getHeader();
+      if (!Header)
+        return false;
+
+      // TODO: We assume the loop header and latch to be the same block.
+      // This is not a fundamental restriction, but lifting this would just
+      // require more work to do the transformation and then patch up the CFG.
+      if (Header != TheLoop->getLoopLatch()) {
+        LLVM_DEBUG(dbgs() << "The loop header is not the loop latch: not "
+                             "running pass ARMParallelDSP\n");
+        return false;
+      }
+
+      Function &F = *Header->getParent();
+      M = F.getParent();
+      DL = &M->getDataLayout();
+
+      auto &TM = TPC.getTM<TargetMachine>();
+      auto *ST = &TM.getSubtarget<ARMSubtarget>(F);
+
+      if (!ST->allowsUnalignedMem()) {
+        LLVM_DEBUG(dbgs() << "Unaligned memory access not supported: not "
+                             "running pass ARMParallelDSP\n");
+        return false;
+      }
+
+      if (!ST->hasDSP()) {
+        LLVM_DEBUG(dbgs() << "DSP extension not enabled: not running pass "
+                             "ARMParallelDSP\n");
+        return false;
+      }
+
+      LoopAccessInfo LAI(L, SE, TLI, AA, DT, LI);
+      bool Changes = false;
+
+      LLVM_DEBUG(dbgs() << "\n== Parallel DSP pass ==\n\n");
+      Changes = MatchSMLAD(F);
+      return Changes;
+    }
+  };
+}
+
+template<unsigned BitWidth>
+static bool IsNarrowSequence(Value *V, ValueList &VL) {
+  LLVM_DEBUG(dbgs() << "Is narrow sequence: "; V->dump());
+  ConstantInt *CInt;
+
+  if (match(V, m_ConstantInt(CInt))) {
+    // TODO: if a constant is used, it needs to fit within the bit width.
+    return false;
+  }
+
+  auto *I = dyn_cast<Instruction>(V);
+  if (!I)
+   return false;
+
+  Value *Val, *LHS, *RHS;
+  bool isNarrow = false;
+
+  if (match(V, m_Trunc(m_Value(Val)))) {
+    if (cast<TruncInst>(I)->getDestTy()->getIntegerBitWidth() == BitWidth)
+      isNarrow = IsNarrowSequence<BitWidth>(Val, VL);
+  } else if (match(V, m_Add(m_Value(LHS), m_Value(RHS)))) {
+    // TODO: we need to implement sadd16/sadd8 for this, which enables to
+    // also do the rewrite for smlad8.ll, but it is unsupported for now.
+    isNarrow = false;
+  } else if (match(V, m_ZExtOrSExt(m_Value(Val)))) {
+    if (cast<CastInst>(I)->getSrcTy()->getIntegerBitWidth() == BitWidth)
+      isNarrow = true;
+    else
+      LLVM_DEBUG(dbgs() << "Wrong SrcTy size of CastInst: " <<
+                 cast<CastInst>(I)->getSrcTy()->getIntegerBitWidth());
+
+    if (match(Val, m_Load(m_Value(Val)))) {
+      auto *Ld = dyn_cast<LoadInst>(I->getOperand(0));
+      LLVM_DEBUG(dbgs() << "Found narrow Load:\t"; Ld->dump());
+      VL.push_back(Ld);
+      isNarrow = true;
+    } else if (!isa<Instruction>(I->getOperand(0)))
+      VL.push_back(I->getOperand(0));
+  }
+
+  if (isNarrow) {
+    LLVM_DEBUG(dbgs() << "Found narrow Op:\t"; I->dump());
+    VL.push_back(I);
+  } else
+    LLVM_DEBUG(dbgs() << "Found unsupported Op:\t"; I->dump());
+
+  return isNarrow;
+}
+
+// Element-by-element comparison of Value lists returning true if they are
+// instructions with the same opcode or constants with the same value.
+static bool AreSymmetrical(const ValueList &VL0,
+                           const ValueList &VL1) {
+  if (VL0.size() != VL1.size()) {
+    LLVM_DEBUG(dbgs() << "Muls are mismatching operand list lengths: "
+                      << VL0.size() << " != " << VL1.size() << "\n");
+    return false;
+  }
+
+  const unsigned Pairs = VL0.size();
+  LLVM_DEBUG(dbgs() << "Number of operand pairs: " << Pairs << "\n");
+
+  for (unsigned i = 0; i < Pairs; ++i) {
+    const Value *V0 = VL0[i];
+    const Value *V1 = VL1[i];
+    const auto *Inst0 = dyn_cast<Instruction>(V0);
+    const auto *Inst1 = dyn_cast<Instruction>(V1);
+
+    LLVM_DEBUG(dbgs() << "Pair " << i << ":\n";
+               dbgs() << "mul1: "; V0->dump();
+               dbgs() << "mul2: "; V1->dump());
+
+    if (!Inst0 || !Inst1)
+      return false;
+
+    if (Inst0->isSameOperationAs(Inst1)) {
+      LLVM_DEBUG(dbgs() << "OK: same operation found!\n");
+      continue;
+    }
+
+    const APInt *C0, *C1;
+    if (!(match(V0, m_APInt(C0)) && match(V1, m_APInt(C1)) && C0 == C1))
+      return false;
+  }
+
+  LLVM_DEBUG(dbgs() << "OK: found symmetrical operand lists.\n");
+  return true;
+}
+
+bool ARMParallelDSP::AreSequentialLoads(LoadInst *Ld0, LoadInst *Ld1,
+                                        LoadInstList &VecLd) {
+  if (!Ld0 || !Ld1)
+    return false;
+
+  LLVM_DEBUG(dbgs() << "Are consecutive loads:\n";
+    dbgs() << "Ld0:"; Ld0->dump();
+    dbgs() << "Ld1:"; Ld1->dump();
+  );
+
+  if (!Ld0->isSimple() || !Ld1->isSimple()) {
+    LLVM_DEBUG(dbgs() << "No, not touching volatile loads\n");
+    return false;
+  }
+  if (!Ld0->hasOneUse() || !Ld1->hasOneUse()) {
+    LLVM_DEBUG(dbgs() << "No, load has more than one use.\n");
+    return false;
+  }
+  if (isConsecutiveAccess(Ld0, Ld1, *DL, *SE)) {
+    VecLd.push_back(Ld0);
+    VecLd.push_back(Ld1);
+    LLVM_DEBUG(dbgs() << "OK: loads are consecutive.\n");
+    return true;
+  }
+  LLVM_DEBUG(dbgs() << "No, Ld0 and Ld1 aren't consecutive.\n");
+  return false;
+}
+
+PMACPairList
+ARMParallelDSP::CreateParallelMACPairs(ParallelMACList &Candidates) {
+  const unsigned Elems = Candidates.size();
+  PMACPairList PMACPairs;
+
+  if (Elems < 2)
+    return PMACPairs;
+
+  // TODO: for now we simply try to match consecutive pairs i and i+1.
+  // We can compare all elements, but then we need to compare and evaluate
+  // different solutions.
+  for(unsigned i=0; i<Elems-1; i+=2) {
+    ParallelMAC &PMul0 = Candidates[i];
+    ParallelMAC &PMul1 = Candidates[i+1];
+    const Instruction *Mul0 = PMul0.Mul;
+    const Instruction *Mul1 = PMul1.Mul;
+
+    if (Mul0 == Mul1)
+      continue;
+
+    LLVM_DEBUG(dbgs() << "\nCheck parallel muls:\n";
+               dbgs() << "- "; Mul0->dump();
+               dbgs() << "- "; Mul1->dump());
+
+    const ValueList &VL0 = PMul0.VL;
+    const ValueList &VL1 = PMul1.VL;
+
+    if (!AreSymmetrical(VL0, VL1))
+      continue;
+
+    LLVM_DEBUG(dbgs() << "OK: mul operands list match:\n");
+    // The first elements of each vector should be loads with sexts. If we find
+    // that its two pairs of consecutive loads, then these can be transformed
+    // into two wider loads and the users can be replaced with DSP
+    // intrinsics.
+    for (unsigned x = 0; x < VL0.size(); x += 4) {
+      auto *Ld0 = dyn_cast<LoadInst>(VL0[x]);
+      auto *Ld1 = dyn_cast<LoadInst>(VL1[x]);
+      auto *Ld2 = dyn_cast<LoadInst>(VL0[x+2]);
+      auto *Ld3 = dyn_cast<LoadInst>(VL1[x+2]);
+
+      LLVM_DEBUG(dbgs() << "Looking at operands " << x << ":\n";
+                 dbgs() << "\t mul1: "; VL0[x]->dump();
+                 dbgs() << "\t mul2: "; VL1[x]->dump();
+                 dbgs() << "and operands " << x + 2 << ":\n";
+                 dbgs() << "\t mul1: "; VL0[x+2]->dump();
+                 dbgs() << "\t mul2: "; VL1[x+2]->dump());
+
+      if (AreSequentialLoads(Ld0, Ld1, Candidates[i].VecLd) &&
+          AreSequentialLoads(Ld2, Ld3, Candidates[i+1].VecLd)) {
+        LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
+        PMACPairs.push_back(std::make_pair(&PMul0, &PMul1));
+      }
+    }
+  }
+  return PMACPairs;
+}
+
+bool ARMParallelDSP::InsertParallelMACs(Reduction &Reduction,
+                                        PMACPairList &PMACPairs) {
+  Instruction *Acc = Reduction.Phi;
+  Instruction *InsertAfter = Reduction.AccIntAdd;
+
+  for (auto &Pair : PMACPairs) {
+    LLVM_DEBUG(dbgs() << "Found parallel MACs!!\n";
+               dbgs() << "- "; Pair.first->Mul->dump();
+               dbgs() << "- "; Pair.second->Mul->dump());
+    Acc = CreateSMLADCall(Pair.first->VecLd[0], Pair.second->VecLd[0], Acc,
+	                        InsertAfter);
+    InsertAfter = Acc;
+  }
+
+  if (Acc != Reduction.Phi) {
+    LLVM_DEBUG(dbgs() << "Replace Accumulate: "; Acc->dump());
+    Reduction.AccIntAdd->replaceAllUsesWith(Acc);
+    return true;
+  }
+  return false;
+}
+
+static ReductionList MatchReductions(Function &F, Loop *TheLoop,
+                                     BasicBlock *Header) {
+  ReductionList Reductions;
+  RecurrenceDescriptor RecDesc;
+  const bool HasFnNoNaNAttr =
+    F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true";
+  const BasicBlock *Latch = TheLoop->getLoopLatch();
+
+  // We need a preheader as getIncomingValueForBlock assumes there is one.
+  if (!TheLoop->getLoopPreheader())
+    return Reductions;
+
+  for (PHINode &Phi : Header->phis()) {
+    const auto *Ty = Phi.getType();
+    if (!Ty->isIntegerTy(32))
+      continue;
+
+    const bool IsReduction =
+      RecurrenceDescriptor::AddReductionVar(&Phi,
+                                            RecurrenceDescriptor::RK_IntegerAdd,
+                                            TheLoop, HasFnNoNaNAttr, RecDesc);
+    if (!IsReduction)
+      continue;
+
+    Instruction *Acc = dyn_cast<Instruction>(Phi.getIncomingValueForBlock(Latch));
+    if (!Acc)
+      continue;
+
+    Reductions.push_back(Reduction(&Phi, Acc));
+  }
+
+  LLVM_DEBUG(
+    dbgs() << "\nAccumulating integer additions (reductions) found:\n";
+    for (auto R : Reductions) {
+      dbgs() << "-  "; R.Phi->dump();
+      dbgs() << "-> "; R.AccIntAdd->dump();
+    }
+  );
+  return Reductions;
+}
+
+static void AddCandidateMAC(ParallelMACList &Candidates, const Instruction *Acc,
+                            Value *MulOp0, Value *MulOp1, int MulOpNum) {
+  Instruction *Mul = dyn_cast<Instruction>(Acc->getOperand(MulOpNum));
+  LLVM_DEBUG(dbgs() << "OK, found acc mul:\t"; Mul->dump());
+  ValueList VL;
+  if (IsNarrowSequence<16>(MulOp0, VL) &&
+      IsNarrowSequence<16>(MulOp1, VL)) {
+    LLVM_DEBUG(dbgs() << "OK, found narrow mul: "; Mul->dump());
+    Candidates.push_back(ParallelMAC(Mul, VL));
+  }
+}
+
+static ParallelMACList MatchParallelMACs(Reduction &R) {
+  ParallelMACList Candidates;
+  const Instruction *Acc = R.AccIntAdd;
+  Value *A, *MulOp0, *MulOp1;
+  LLVM_DEBUG(dbgs() << "\n- Analysing:\t"; Acc->dump());
+
+  // Pattern 1: the accumulator is the RHS of the mul.
+  while(match(Acc, m_Add(m_Mul(m_Value(MulOp0), m_Value(MulOp1)),
+                         m_Value(A)))){
+    AddCandidateMAC(Candidates, Acc, MulOp0, MulOp1, 0);
+    Acc = dyn_cast<Instruction>(A);
+  }
+  // Pattern 2: the accumulator is the LHS of the mul.
+  while(match(Acc, m_Add(m_Value(A),
+                         m_Mul(m_Value(MulOp0), m_Value(MulOp1))))) {
+    AddCandidateMAC(Candidates, Acc, MulOp0, MulOp1, 1);
+    Acc = dyn_cast<Instruction>(A);
+  }
+
+  // The last mul in the chain has a slightly different pattern:
+  // the mul is the first operand
+  if (match(Acc, m_Add(m_Mul(m_Value(MulOp0), m_Value(MulOp1)), m_Value(A))))
+    AddCandidateMAC(Candidates, Acc, MulOp0, MulOp1, 0);
+
+  // Because we start at the bottom of the chain, and we work our way up,
+  // the muls are added in reverse program order to the list.
+  std::reverse(Candidates.begin(), Candidates.end());
+  return Candidates;
+}
+
+// Collects all instructions that are not part of the MAC chains, which is the
+// set of instructions that can potentially alias with the MAC operands.
+static Instructions AliasCandidates(BasicBlock *Header,
+                                    ParallelMACList &MACCandidates) {
+  Instructions Aliases;
+  auto IsMACCandidate = [] (Instruction *I, ParallelMACList &MACCandidates) {
+    for (auto &MAC : MACCandidates)
+      for (auto *Val : MAC.VL)
+        if (I == MAC.Mul || Val == I)
+          return true;
+   return false;
+  };
+
+  std::for_each(Header->begin(), Header->end(),
+                [&Aliases, &MACCandidates, &IsMACCandidate] (Instruction &I) {
+                  if (I.mayReadOrWriteMemory() &&
+                      !IsMACCandidate(&I, MACCandidates))
+                    Aliases.push_back(&I); });
+  return Aliases;
+}
+
+// This compares all instructions from the "alias candidates" set, i.e., all
+// instructions that are not part of the MAC-chain, with all instructions in
+// the MAC candidate set, to see if instructions are aliased.
+static bool AreAliased(AliasAnalysis *AA, Instructions AliasCandidates,
+                       ParallelMACList &MACCandidates) {
+  LLVM_DEBUG(dbgs() << "Alias checks:\n");
+  for (auto *I : AliasCandidates) {
+    LLVM_DEBUG(dbgs() << "- "; I->dump());
+    for (auto &MAC : MACCandidates) {
+      LLVM_DEBUG(dbgs() << "mul: "; MAC.Mul->dump());
+      assert(MAC.MemLocs.size() >= 2 && "expecting at least 2 memlocs");
+      for (auto &MemLoc : MAC.MemLocs) {
+        if (isModOrRefSet(intersectModRef(AA->getModRefInfo(I, MemLoc),
+                                          ModRefInfo::ModRef))) {
+          LLVM_DEBUG(dbgs() << "Yes, aliases found\n");
+          return true;
+        }
+      }
+    }
+  }
+  LLVM_DEBUG(dbgs() << "OK: no aliases found!\n");
+  return false;
+}
+
+static bool SetMemoryLocations(ParallelMACList &Candidates) {
+  const auto Size = MemoryLocation::UnknownSize;
+  for (auto &C : Candidates) {
+    // A mul has 2 operands, and a narrow op consist of sext and a load; thus
+    // we expect at least 4 items in this operand value list.
+    if (C.VL.size() < 4) {
+      LLVM_DEBUG(dbgs() << "Operand list too short.\n");
+      return false;
+    }
+
+    for (unsigned i = 0; i < C.VL.size(); i += 4) {
+      auto *LdOp0 = dyn_cast<LoadInst>(C.VL[i]);
+      auto *LdOp1 = dyn_cast<LoadInst>(C.VL[i+2]);
+      if (!LdOp0 || !LdOp1)
+        return false;
+
+      C.MemLocs.push_back(MemoryLocation(LdOp0->getPointerOperand(), Size));
+      C.MemLocs.push_back(MemoryLocation(LdOp1->getPointerOperand(), Size));
+    }
+  }
+  return true;
+}
+
+// Loop Pass that needs to identify integer add/sub reductions of 16-bit vector
+// multiplications.
+// To use SMLAD:
+// 1) we first need to find integer add reduction PHIs,
+// 2) then from the PHI, look for this pattern:
+//
+// acc0 = phi i32 [0, %entry], [%acc1, %loop.body]
+// ld0 = load i16
+// sext0 = sext i16 %ld0 to i32
+// ld1 = load i16
+// sext1 = sext i16 %ld1 to i32
+// mul0 = mul %sext0, %sext1
+// ld2 = load i16
+// sext2 = sext i16 %ld2 to i32
+// ld3 = load i16
+// sext3 = sext i16 %ld3 to i32
+// mul1 = mul i32 %sext2, %sext3
+// add0 = add i32 %mul0, %acc0
+// acc1 = add i32 %add0, %mul1
+//
+// Which can be selected to:
+//
+// ldr.h r0
+// ldr.h r1
+// smlad r2, r0, r1, r2
+//
+// If constants are used instead of loads, these will need to be hoisted
+// out and into a register.
+//
+// If loop invariants are used instead of loads, these need to be packed
+// before the loop begins.
+//
+// Can only be enabled for cores which support unaligned accesses.
+//
+bool ARMParallelDSP::MatchSMLAD(Function &F) {
+  BasicBlock *Header = L->getHeader();
+  LLVM_DEBUG(dbgs() << "= Matching SMLAD =\n";
+             dbgs() << "Header block:\n"; Header->dump();
+             dbgs() << "Loop info:\n\n"; L->dump());
+
+  bool Changed = false;
+  ReductionList Reductions = MatchReductions(F, L, Header);
+
+  for (auto &R : Reductions) {
+    ParallelMACList MACCandidates = MatchParallelMACs(R);
+    if (!SetMemoryLocations(MACCandidates))
+      continue;
+    Instructions Aliases = AliasCandidates(Header, MACCandidates);
+    if (AreAliased(AA, Aliases, MACCandidates))
+      continue;
+    PMACPairList PMACPairs = CreateParallelMACPairs(MACCandidates);
+    Changed = InsertParallelMACs(R, PMACPairs) || Changed;
+  }
+
+  LLVM_DEBUG(if (Changed) dbgs() << "Header block:\n"; Header->dump(););
+  return Changed;
+}
+
+static void CreateLoadIns(IRBuilder<NoFolder> &IRB, Instruction *Acc,
+                          LoadInst **VecLd) {
+  const Type *AccTy = Acc->getType();
+  const unsigned AddrSpace = (*VecLd)->getPointerAddressSpace();
+
+  Value *VecPtr = IRB.CreateBitCast((*VecLd)->getPointerOperand(),
+                                    AccTy->getPointerTo(AddrSpace));
+  *VecLd = IRB.CreateAlignedLoad(VecPtr, (*VecLd)->getAlignment());
+}
+
+Instruction *ARMParallelDSP::CreateSMLADCall(LoadInst *VecLd0, LoadInst *VecLd1,
+                                             Instruction *Acc,
+                                             Instruction *InsertAfter) {
+  LLVM_DEBUG(dbgs() << "Create SMLAD intrinsic using:\n";
+             dbgs() << "- "; VecLd0->dump();
+             dbgs() << "- "; VecLd1->dump();
+             dbgs() << "- "; Acc->dump());
+
+  IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
+                              ++BasicBlock::iterator(InsertAfter));
+
+  // Replace the reduction chain with an intrinsic call
+  CreateLoadIns(Builder, Acc, &VecLd0);
+  CreateLoadIns(Builder, Acc, &VecLd1);
+  Value* Args[] = { VecLd0, VecLd1, Acc };
+  Function *SMLAD = Intrinsic::getDeclaration(M, Intrinsic::arm_smlad);
+  CallInst *Call = Builder.CreateCall(SMLAD, Args);
+  return Call;
+}
+
+Pass *llvm::createARMParallelDSPPass() {
+  return new ARMParallelDSP();
+}
+
+char ARMParallelDSP::ID = 0;
+
+INITIALIZE_PASS_BEGIN(ARMParallelDSP, "parallel-dsp",
+                "Transform loops to use DSP intrinsics", false, false);
+INITIALIZE_PASS_END(ARMParallelDSP, "parallel-dsp",
+                "Transform loops to use DSP intrinsics", false, false);

Modified: llvm/trunk/lib/Target/ARM/ARMTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMTargetMachine.cpp?rev=335850&r1=335849&r2=335850&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMTargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMTargetMachine.cpp Thu Jun 28 05:55:29 2018
@@ -89,6 +89,7 @@ extern "C" void LLVMInitializeARMTarget(
   initializeGlobalISel(Registry);
   initializeARMLoadStoreOptPass(Registry);
   initializeARMPreAllocLoadStoreOptPass(Registry);
+  initializeARMParallelDSPPass(Registry);
   initializeARMConstantIslandsPass(Registry);
   initializeARMExecutionDomainFixPass(Registry);
   initializeARMExpandPseudoPass(Registry);
@@ -404,6 +405,9 @@ void ARMPassConfig::addIRPasses() {
 }
 
 bool ARMPassConfig::addPreISel() {
+  if (getOptLevel() != CodeGenOpt::None)
+    addPass(createARMParallelDSPPass());
+
   if ((TM->getOptLevel() != CodeGenOpt::None &&
        EnableGlobalMerge == cl::BOU_UNSET) ||
       EnableGlobalMerge == cl::BOU_TRUE) {

Modified: llvm/trunk/lib/Target/ARM/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/CMakeLists.txt?rev=335850&r1=335849&r2=335850&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/ARM/CMakeLists.txt Thu Jun 28 05:55:29 2018
@@ -34,6 +34,7 @@ add_llvm_target(ARMCodeGen
   ARMISelLowering.cpp
   ARMInstrInfo.cpp
   ARMLegalizerInfo.cpp
+  ARMParallelDSP.cpp
   ARMLoadStoreOptimizer.cpp
   ARMMCInstLower.cpp
   ARMMachineFunctionInfo.cpp

Modified: llvm/trunk/lib/Target/ARM/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/LLVMBuild.txt?rev=335850&r1=335849&r2=335850&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/ARM/LLVMBuild.txt Thu Jun 28 05:55:29 2018
@@ -31,5 +31,5 @@ has_jit = 1
 type = Library
 name = ARMCodeGen
 parent = ARM
-required_libraries = ARMAsmPrinter ARMDesc ARMInfo Analysis AsmPrinter CodeGen Core MC Scalar SelectionDAG Support Target GlobalISel ARMUtils
+required_libraries = ARMAsmPrinter ARMDesc ARMInfo Analysis AsmPrinter CodeGen Core MC Scalar SelectionDAG Support Target GlobalISel ARMUtils TransformUtils
 add_to_library_groups = ARM

Added: llvm/trunk/test/CodeGen/ARM/smlad0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlad0.ll?rev=335850&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlad0.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlad0.ll Thu Jun 28 05:55:29 2018
@@ -0,0 +1,57 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s
+;
+; The Cortex-M0 does not support unaligned accesses:
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m0 < %s -parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+;
+; Check DSP extension:
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 -mattr=-dsp < %s -parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+;
+; CHECK:  %mac1{{\.}}026 = phi i32 [ [[V8:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK:  [[V4:%[0-9]+]] = bitcast i16* %arrayidx3 to i32*
+; CHECK:  [[V5:%[0-9]+]] = load i32, i32* [[V4]], align 2
+; CHECK:  [[V6:%[0-9]+]] = bitcast i16* %arrayidx to i32*
+; CHECK:  [[V7:%[0-9]+]] = load i32, i32* [[V6]], align 2
+; CHECK:  [[V8]] = call i32 @llvm.arm.smlad(i32 [[V5]], i32 [[V7]], i32 %mac1{{\.}}026)
+;
+; CHECK-UNSUPPORTED-NOT:  call i32 @llvm.arm.smlad
+;
+define dso_local i32 @test(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  ret i32 %mac1.0.lcssa
+
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+  %conv4 = sext i16 %0 to i32
+  %mul = mul nsw i32 %conv, %conv4
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+  %mul9 = mul nsw i32 %conv7, %conv8
+  %add10 = add i32 %mul, %mac1.026
+
+; Here the Mul is the LHS, and the Add the RHS.
+  %add11 = add i32 %mul9, %add10
+
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}

Added: llvm/trunk/test/CodeGen/ARM/smlad1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlad1.ll?rev=335850&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlad1.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlad1.ll Thu Jun 28 05:55:29 2018
@@ -0,0 +1,50 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s
+
+; CHECK:  %mac1{{\.}}026 = phi i32 [ [[V8:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK:  [[V4:%[0-9]+]] = bitcast i16* %arrayidx3 to i32*
+; CHECK:  [[V5:%[0-9]+]] = load i32, i32* [[V4]], align 2
+; CHECK:  [[V6:%[0-9]+]] = bitcast i16* %arrayidx to i32*
+; CHECK:  [[V7:%[0-9]+]] = load i32, i32* [[V6]], align 2
+; CHECK:  [[V8]] = call i32 @llvm.arm.smlad(i32 [[V5]], i32 [[V7]], i32 %mac1{{\.}}026)
+
+define dso_local i32 @test(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  ret i32 %mac1.0.lcssa
+
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+  %conv4 = sext i16 %0 to i32
+  %mul = mul nsw i32 %conv, %conv4
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+  %mul9 = mul nsw i32 %conv7, %conv8
+  %add10 = add i32 %mul, %mac1.026
+
+; And here the Add is the LHS, the Mul the RHS
+  %add11 = add i32 %add10, %mul9
+
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+

Added: llvm/trunk/test/CodeGen/ARM/smlad10.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlad10.ll?rev=335850&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlad10.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlad10.ll Thu Jun 28 05:55:29 2018
@@ -0,0 +1,47 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s
+;
+; Reduction statement is an i64 type: we only support i32 so check that the
+; rewrite isn't triggered.
+;
+; CHECK-NOT:  call i32 @llvm.arm.smlad
+;
+define dso_local i64 @test(i64 %arg, i64* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i64 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add11, %for.body ]
+  ret i64 %mac1.0.lcssa
+
+for.body:
+  %mac1.026 = phi i64 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i64 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i64 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i64 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i64 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i64 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i64
+  %conv4 = sext i16 %0 to i64
+  %mul = mul nsw i64 %conv, %conv4
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i64 %add
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i64
+  %conv8 = sext i16 %1 to i64
+  %mul9 = mul nsw i64 %conv7, %conv8
+  %add10 = add i64 %mul, %mac1.026
+
+  %add11 = add i64 %mul9, %add10
+
+  %exitcond = icmp ne i64 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+

Added: llvm/trunk/test/CodeGen/ARM/smlad11.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlad11.ll?rev=335850&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlad11.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlad11.ll Thu Jun 28 05:55:29 2018
@@ -0,0 +1,74 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s
+;
+; A more complicated chain: 4 mul operations, so we expect 2 smlad calls.
+;
+; CHECK:  %mac1{{\.}}054 = phi i32 [ [[V17:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK:  [[V8:%[0-9]+]] = bitcast i16* %arrayidx8 to i32*
+; CHECK:  [[V9:%[0-9]+]] = load i32, i32* [[V8]], align 2
+; CHECK:  [[V10:%[0-9]+]] = bitcast i16* %arrayidx to i32*
+; CHECK:  [[V11:%[0-9]+]] = load i32, i32* [[V10]], align 2
+; CHECK:  [[V12:%[0-9]+]] = call i32 @llvm.arm.smlad(i32 [[V9]], i32 [[V11]], i32 %mac1{{\.}}054)
+; CHECK:  [[V13:%[0-9]+]] = bitcast i16* %arrayidx17 to i32*
+; CHECK:  [[V14:%[0-9]+]] = load i32, i32* [[V13]], align 2
+; CHECK:  [[V15:%[0-9]+]] = bitcast i16* %arrayidx4 to i32*
+; CHECK:  [[V16:%[0-9]+]] = load i32, i32* [[V15]], align 2
+; CHECK:  [[V17:%[0-9]+]] = call i32 @llvm.arm.smlad(i32 [[V14]], i32 [[V16]], i32 [[V12]])
+;
+; And we don't want to see a 3rd smlad:
+;
+; CHECK-NOT: call i32 @llvm.arm.smlad
+;
+define dso_local i32 @test(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp52 = icmp sgt i32 %arg, 0
+  br i1 %cmp52, label %for.body.preheader, label %for.cond.cleanup
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add28, %for.body ]
+  ret i32 %mac1.0.lcssa
+
+for.body.preheader:
+  br label %for.body
+
+for.body:
+  %mac1.054 = phi i32 [ %add28, %for.body ], [ 0, %for.body.preheader ]
+  %i.053 = phi i32 [ %add29, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.053
+  %0 = load i16, i16* %arrayidx, align 2
+  %add1 = or i32 %i.053, 1
+  %arrayidx2 = getelementptr inbounds i16, i16* %arg3, i32 %add1
+  %1 = load i16, i16* %arrayidx2, align 2
+  %add3 = or i32 %i.053, 2
+  %arrayidx4 = getelementptr inbounds i16, i16* %arg3, i32 %add3
+  %2 = load i16, i16* %arrayidx4, align 2
+  %add5 = or i32 %i.053, 3
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg3, i32 %add5
+  %3 = load i16, i16* %arrayidx6, align 2
+  %arrayidx8 = getelementptr inbounds i16, i16* %arg2, i32 %i.053
+  %4 = load i16, i16* %arrayidx8, align 2
+  %conv = sext i16 %4 to i32
+  %conv9 = sext i16 %0 to i32
+  %mul = mul nsw i32 %conv, %conv9
+  %arrayidx11 = getelementptr inbounds i16, i16* %arg2, i32 %add1
+  %5 = load i16, i16* %arrayidx11, align 2
+  %conv12 = sext i16 %5 to i32
+  %conv13 = sext i16 %1 to i32
+  %mul14 = mul nsw i32 %conv12, %conv13
+  %arrayidx17 = getelementptr inbounds i16, i16* %arg2, i32 %add3
+  %6 = load i16, i16* %arrayidx17, align 2
+  %conv18 = sext i16 %6 to i32
+  %conv19 = sext i16 %2 to i32
+  %mul20 = mul nsw i32 %conv18, %conv19
+  %arrayidx23 = getelementptr inbounds i16, i16* %arg2, i32 %add5
+  %7 = load i16, i16* %arrayidx23, align 2
+  %conv24 = sext i16 %7 to i32
+  %conv25 = sext i16 %3 to i32
+  %mul26 = mul nsw i32 %conv24, %conv25
+  %add15 = add i32 %mul, %mac1.054
+  %add21 = add i32 %add15, %mul14
+  %add27 = add i32 %add21, %mul20
+  %add28 = add i32 %add27, %mul26
+  %add29 = add nuw nsw i32 %i.053, 4
+  %cmp = icmp slt i32 %add29, %arg
+  br i1 %cmp, label %for.body, label %for.cond.cleanup
+}

Added: llvm/trunk/test/CodeGen/ARM/smlad12.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlad12.ll?rev=335850&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlad12.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlad12.ll Thu Jun 28 05:55:29 2018
@@ -0,0 +1,48 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s
+;
+; The loop header is not the loop latch.
+;
+; CHECK-NOT:  call i32 @llvm.arm.smlad
+;
+define dso_local i32 @test(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  ret i32 %mac1.0.lcssa
+
+; This is the loop header:
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body2 ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body2 ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+  %conv4 = sext i16 %0 to i32
+  %mul = mul nsw i32 %conv, %conv4
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+  %mul9 = mul nsw i32 %conv7, %conv8
+  %add10 = add i32 %mul, %mac1.026
+  %add11 = add i32 %mul9, %add10
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body2, label %for.cond.cleanup
+
+; And this is the loop latch:
+for.body2:
+  br label %for.body
+}

Added: llvm/trunk/test/CodeGen/ARM/smlad2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlad2.ll?rev=335850&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlad2.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlad2.ll Thu Jun 28 05:55:29 2018
@@ -0,0 +1,52 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s
+;
+; Operands of both muls are not symmetrical (see also comments inlined below), check
+; that the rewrite isn't triggered.
+;
+; CHECK-NOT:  call i32 @llvm.arm.smlad
+;
+define dso_local i32 @test(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  ret i32 %mac1.0.lcssa
+
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+
+; This zero-extends the 2nd operand of %mul:
+  %conv4 = zext i16 %0 to i32
+
+  %mul = mul nsw i32 %conv, %conv4
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %3 = load i16, i16* %arrayidx6, align 2
+
+; And here we only have sign-extensions. Thus, the operands of
+; %mul and %mul9 are not symmetrical:
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+
+  %mul9 = mul nsw i32 %conv7, %conv8
+  %add10 = add i32 %mul, %mac1.026
+  %add11 = add i32 %add10, %mul9
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+

Added: llvm/trunk/test/CodeGen/ARM/smlad3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlad3.ll?rev=335850&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlad3.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlad3.ll Thu Jun 28 05:55:29 2018
@@ -0,0 +1,50 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s
+;
+; The loads are not consecutive: check that the rewrite isn't triggered.
+;
+; CHECK-NOT:  call i32 @llvm.arm.smlad
+;
+define dso_local i32 @test(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  ret i32 %mac1.0.lcssa
+
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+  %conv4 = sext i16 %0 to i32
+  %mul = mul nsw i32 %conv, %conv4
+
+; Here we add another constants offset of 2, to make sure the
+; loads to %3 and %2 are not consecutive:
+
+  %add5 = add nuw nsw i32 %i.025, 2
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add5
+  %3 = load i16, i16* %arrayidx6, align 2
+
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+  %mul9 = mul nsw i32 %conv7, %conv8
+  %add10 = add i32 %mul, %mac1.026
+  %add11 = add i32 %add10, %mul9
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+

Added: llvm/trunk/test/CodeGen/ARM/smlad4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlad4.ll?rev=335850&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlad4.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlad4.ll Thu Jun 28 05:55:29 2018
@@ -0,0 +1,48 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s
+;
+; The loads are not narrow loads: check that the rewrite isn't triggered.
+;
+; CHECK-NOT:  call i32 @llvm.arm.smlad
+;
+; Arg2 is now an i32, while Arg3 is still and i16:
+;
+define dso_local i32 @test(i32 %arg, i32* nocapture readnone %arg1, i32* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp22 = icmp sgt i32 %arg, 0
+  br i1 %cmp22, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add9, %for.body ]
+  ret i32 %mac1.0.lcssa
+
+for.body:
+  %0 = phi i16 [ %1, %for.body ], [ %.pre, %for.body.preheader ]
+  %mac1.024 = phi i32 [ %add9, %for.body ], [ 0, %for.body.preheader ]
+  %i.023 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %add = add nuw nsw i32 %i.023, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %conv = sext i16 %0 to i32
+
+; This is a 'normal' i32 load to %2:
+  %arrayidx3 = getelementptr inbounds i32, i32* %arg2, i32 %i.023
+  %2 = load i32, i32* %arrayidx3, align 4
+
+; This mul has now 1 operand which is a narrow load, and the other a normal
+; i32 load:
+  %mul = mul nsw i32 %2, %conv
+
+  %add4 = add nuw nsw i32 %i.023, 2
+  %arrayidx5 = getelementptr inbounds i32, i32* %arg2, i32 %add4
+  %3 = load i32, i32* %arrayidx5, align 4
+  %conv6 = sext i16 %1 to i32
+  %mul7 = mul nsw i32 %3, %conv6
+  %add8 = add i32 %mul, %mac1.024
+  %add9 = add i32 %add8, %mul7
+  %exitcond = icmp eq i32 %add, %arg
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}

Added: llvm/trunk/test/CodeGen/ARM/smlad5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlad5.ll?rev=335850&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlad5.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlad5.ll Thu Jun 28 05:55:29 2018
@@ -0,0 +1,44 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s
+;
+; The loads are volatile loads: check that the rewrite isn't triggered.
+;
+; CHECK-NOT:  call i32 @llvm.arm.smlad
+;
+define dso_local i32 @test(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  ret i32 %mac1.0.lcssa
+
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load volatile i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load volatile i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load volatile i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+  %conv4 = sext i16 %0 to i32
+  %mul = mul nsw i32 %conv, %conv4
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %3 = load volatile i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+  %mul9 = mul nsw i32 %conv7, %conv8
+  %add10 = add i32 %mul, %mac1.026
+  %add11 = add i32 %add10, %mul9
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+

Added: llvm/trunk/test/CodeGen/ARM/smlad6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlad6.ll?rev=335850&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlad6.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlad6.ll Thu Jun 28 05:55:29 2018
@@ -0,0 +1,50 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s
+;
+; Alias check: check that the rewrite isn't triggered when there's a store
+; instruction possibly aliasing any mul load operands; arguments are passed
+; without 'restrict' enabled.
+;
+; CHECK-NOT:  call i32 @llvm.arm.smlad
+;
+define dso_local i32 @test(i32 %arg, i32* nocapture %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  ret i32 %mac1.0.lcssa
+
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+
+; Store inserted here, aliasing with arrayidx, arrayidx1, arrayidx3
+  store i16 42, i16* %arrayidx, align 2
+
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+  %conv4 = sext i16 %0 to i32
+  %mul = mul nsw i32 %conv, %conv4
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+  %mul9 = mul nsw i32 %conv7, %conv8
+  %add10 = add i32 %mul, %mac1.026
+  %add11 = add i32 %mul9, %add10
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+

Added: llvm/trunk/test/CodeGen/ARM/smlad7.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlad7.ll?rev=335850&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlad7.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlad7.ll Thu Jun 28 05:55:29 2018
@@ -0,0 +1,53 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s
+;
+; Alias check: check that the rewrite isn't triggered when there's a store
+; aliasing one of the mul load operands. Arguments are now annotated with
+; 'noalias'.
+;
+; CHECK-NOT:  call i32 @llvm.arm.smlad
+;
+define dso_local i32 @test(i32 %arg, i32* noalias %arg1, i16* noalias readonly %arg2, i16* noalias readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  ret i32 %mac1.0.lcssa
+
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+
+; Store inserted here, aliasing only with loads from 'arrayidx'.
+  store i16 42, i16* %arrayidx, align 2
+
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+  %conv4 = sext i16 %0 to i32
+  %mul = mul nsw i32 %conv, %conv4
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+  %mul9 = mul nsw i32 %conv7, %conv8
+  %add10 = add i32 %mul, %mac1.026
+
+; Here the Mul is the LHS, and the Add the RHS.
+  %add11 = add i32 %mul9, %add10
+
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+

Added: llvm/trunk/test/CodeGen/ARM/smlad8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlad8.ll?rev=335850&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlad8.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlad8.ll Thu Jun 28 05:55:29 2018
@@ -0,0 +1,59 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s
+;
+; Mul with operands that are not simple load and sext/zext chains: this is not
+; yet supported so the rewrite shouldn't trigger (but we do want to support this
+; soon).
+;
+; CHECK-NOT:  call i32 @llvm.arm.smlad
+;
+define dso_local i32 @test(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3, i16* %arg4) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  %gep0 = getelementptr inbounds i16, i16* %arg4, i32 0
+  %gep1 = getelementptr inbounds i16, i16* %arg4, i32 1
+  %.add4 = load i16, i16* %gep0, align 2
+  %.add5 = load i16, i16* %gep1, align 2
+  %.zext4 = zext i16 %.add4 to i32
+  %.zext5 = zext i16 %.add5 to i32
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  ret i32 %mac1.0.lcssa
+
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+  %conv4 = sext i16 %0 to i32
+  %add1 = add i32 %conv, %.zext4
+
+; This mul has a more complicated pattern as an operand, %add1
+; is another add and load, which we don't support for now.
+  %mul = mul nsw i32 %add1, %conv4
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+  %add2 = add i32 %conv7, %.zext5
+
+; Same here
+  %mul9 = mul nsw i32 %add2, %conv8
+  %add10 = add i32 %mul, %mac1.026
+
+  %add11 = add i32 %mul9, %add10
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}

Added: llvm/trunk/test/CodeGen/ARM/smlad9.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlad9.ll?rev=335850&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlad9.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlad9.ll Thu Jun 28 05:55:29 2018
@@ -0,0 +1,45 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -parallel-dsp -S | FileCheck %s
+;
+; Muls with operands that are constants: not yet supported, so the rewrite
+; should not trigger (but we do want to add this soon).
+;
+; CHECK-NOT:  call i32 @llvm.arm.smlad
+;
+define dso_local i32 @test(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  ret i32 %mac1.0.lcssa
+
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %v2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %v2 to i32
+
+; RHS operand of this mul is a constant
+  %mul = mul nsw i32 %conv, 43
+
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %v3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %v3 to i32
+
+; And this RHS operand is a constant too.
+  %mul9 = mul nsw i32 %conv7, 42
+
+  %add10 = add i32 %mul, %mac1.026
+  %add11 = add i32 %mul9, %add10
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+




More information about the llvm-commits mailing list