[llvm] r341941 - [ARM] Add smlald support in ARMParallelDSP

Sam Parker via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 11 07:01:22 PDT 2018


Author: sam_parker
Date: Tue Sep 11 07:01:22 2018
New Revision: 341941

URL: http://llvm.org/viewvc/llvm-project?rev=341941&view=rev
Log:
[ARM] Add smlald support in ARMParallelDSP

Search from i64 reducing phis, as well as i32, to allow the
generation of smlald instructions.

Differential Revision: https://reviews.llvm.org/D51101

Added:
    llvm/trunk/test/CodeGen/ARM/smlald0.ll
    llvm/trunk/test/CodeGen/ARM/smlald1.ll
    llvm/trunk/test/CodeGen/ARM/smlald2.ll
Modified:
    llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp

Modified: llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp?rev=341941&r1=341940&r2=341941&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMParallelDSP.cpp Tue Sep 11 07:01:22 2018
@@ -426,7 +426,7 @@ static void MatchReductions(Function &F,
 
   for (PHINode &Phi : Header->phis()) {
     const auto *Ty = Phi.getType();
-    if (!Ty->isIntegerTy(32))
+    if (!Ty->isIntegerTy(32) && !Ty->isIntegerTy(64))
       continue;
 
     const bool IsReduction =
@@ -454,9 +454,11 @@ static void MatchReductions(Function &F,
 
 static void AddMACCandidate(OpChainList &Candidates,
                             const Instruction *Acc,
-                            Value *MulOp0, Value *MulOp1, int MulOpNum) {
-  Instruction *Mul = dyn_cast<Instruction>(Acc->getOperand(MulOpNum));
+                            Instruction *Mul,
+                            Value *MulOp0, Value *MulOp1) {
   LLVM_DEBUG(dbgs() << "OK, found acc mul:\t"; Mul->dump());
+  assert(Mul->getOpcode() == Instruction::Mul &&
+         "expected mul instruction");
   ValueList LHS;
   ValueList RHS;
   if (IsNarrowSequence<16>(MulOp0, LHS) &&
@@ -475,20 +477,44 @@ static void MatchParallelMACSequences(Re
   // Pattern 1: the accumulator is the RHS of the mul.
   while(match(Acc, m_Add(m_Mul(m_Value(MulOp0), m_Value(MulOp1)),
                          m_Value(A)))){
-    AddMACCandidate(Candidates, Acc, MulOp0, MulOp1, 0);
+    Instruction *Mul = cast<Instruction>(Acc->getOperand(0));
+    AddMACCandidate(Candidates, Acc, Mul, MulOp0, MulOp1);
     Acc = dyn_cast<Instruction>(A);
   }
   // Pattern 2: the accumulator is the LHS of the mul.
   while(match(Acc, m_Add(m_Value(A),
                          m_Mul(m_Value(MulOp0), m_Value(MulOp1))))) {
-    AddMACCandidate(Candidates, Acc, MulOp0, MulOp1, 1);
+    Instruction *Mul = cast<Instruction>(Acc->getOperand(1));
+    AddMACCandidate(Candidates, Acc, Mul, MulOp0, MulOp1);
     Acc = dyn_cast<Instruction>(A);
   }
 
   // The last mul in the chain has a slightly different pattern:
   // the mul is the first operand
   if (match(Acc, m_Add(m_Mul(m_Value(MulOp0), m_Value(MulOp1)), m_Value(A))))
-    AddMACCandidate(Candidates, Acc, MulOp0, MulOp1, 0);
+    AddMACCandidate(Candidates, Acc, cast<Instruction>(Acc->getOperand(0)),
+                    MulOp0, MulOp1);
+
+  // Same as above, but SMLALD may perform 32-bit muls, sext the results and
+  // then accumulate.
+  while(match(Acc, m_Add(m_SExt(m_Mul(m_Value(MulOp0), m_Value(MulOp1))),
+                        m_Value(A)))) {
+    Value *Mul = cast<Instruction>(Acc->getOperand(0))->getOperand(0);
+    AddMACCandidate(Candidates, Acc, cast<Instruction>(Mul), MulOp0, MulOp1);
+    Acc = dyn_cast<Instruction>(A);
+  }
+  while(match(Acc, m_Add(m_Value(A),
+                         m_SExt(m_Mul(m_Value(MulOp0), m_Value(MulOp1)))))) {
+    Value *Mul = cast<Instruction>(Acc->getOperand(1))->getOperand(0);
+    AddMACCandidate(Candidates, Acc, cast<Instruction>(Mul), MulOp0, MulOp1);
+    Acc = dyn_cast<Instruction>(A);
+  }
+  if (match(Acc, m_Add(m_SExt(m_Mul(m_Value(MulOp0), m_Value(MulOp1))),
+                       m_Value(A)))) {
+    Value *Mul = cast<Instruction>(
+      cast<Instruction>(Acc)->getOperand(0))->getOperand(0);
+    AddMACCandidate(Candidates, Acc, cast<Instruction>(Mul), MulOp0, MulOp1);
+  }
 
   // Because we start at the bottom of the chain, and we work our way up,
   // the muls are added in reverse program order to the list.
@@ -635,13 +661,12 @@ bool ARMParallelDSP::MatchSMLAD(Function
   return Changed;
 }
 
-static void CreateLoadIns(IRBuilder<NoFolder> &IRB, Instruction *Acc,
-                          LoadInst **VecLd) {
-  const Type *AccTy = Acc->getType();
+static void CreateLoadIns(IRBuilder<NoFolder> &IRB, LoadInst **VecLd,
+                          const Type *LoadTy) {
   const unsigned AddrSpace = (*VecLd)->getPointerAddressSpace();
 
   Value *VecPtr = IRB.CreateBitCast((*VecLd)->getPointerOperand(),
-                                    AccTy->getPointerTo(AddrSpace));
+                                    LoadTy->getPointerTo(AddrSpace));
   *VecLd = IRB.CreateAlignedLoad(VecPtr, (*VecLd)->getAlignment());
 }
 
@@ -657,10 +682,13 @@ Instruction *ARMParallelDSP::CreateSMLAD
                               ++BasicBlock::iterator(InsertAfter));
 
   // Replace the reduction chain with an intrinsic call
-  CreateLoadIns(Builder, Acc, &VecLd0);
-  CreateLoadIns(Builder, Acc, &VecLd1);
+  const Type *Ty = IntegerType::get(M->getContext(), 32);
+  CreateLoadIns(Builder, &VecLd0, Ty);
+  CreateLoadIns(Builder, &VecLd1, Ty);
   Value* Args[] = { VecLd0, VecLd1, Acc };
-  Function *SMLAD = Intrinsic::getDeclaration(M, Intrinsic::arm_smlad);
+  Function *SMLAD = Acc->getType()->isIntegerTy(32) ?
+    Intrinsic::getDeclaration(M, Intrinsic::arm_smlad) :
+    Intrinsic::getDeclaration(M, Intrinsic::arm_smlald);
   CallInst *Call = Builder.CreateCall(SMLAD, Args);
   NumSMLAD++;
   return Call;

Added: llvm/trunk/test/CodeGen/ARM/smlald0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlald0.ll?rev=341941&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlald0.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlald0.ll Tue Sep 11 07:01:22 2018
@@ -0,0 +1,132 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -arm-parallel-dsp -S | FileCheck %s
+;
+; The Cortex-M0 does not support unaligned accesses:
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m0 < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+;
+; Check DSP extension:
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 -mattr=-dsp < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+
+define dso_local i64 @OneReduction(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+;
+; CHECK-LABEL: @OneReduction
+; CHECK:  %mac1{{\.}}026 = phi i64 [ [[V8:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK:  [[V4:%[0-9]+]] = bitcast i16* %arrayidx3 to i32*
+; CHECK:  [[V5:%[0-9]+]] = load i32, i32* [[V4]], align 2
+; CHECK:  [[V6:%[0-9]+]] = bitcast i16* %arrayidx to i32*
+; CHECK:  [[V7:%[0-9]+]] = load i32, i32* [[V6]], align 2
+; CHECK:  [[V8]] = call i64 @llvm.arm.smlald(i32 [[V5]], i32 [[V7]], i64 %mac1{{\.}}026)
+; CHECK-NOT: call i64 @llvm.arm.smlald
+;
+; CHECK-UNSUPPORTED-NOT:  call i64 @llvm.arm.smlald
+;
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add11, %for.body ]
+  ret i64 %mac1.0.lcssa
+
+for.body:
+; One reduction statement here:
+  %mac1.026 = phi i64 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i64
+  %conv4 = sext i16 %0 to i64
+  %mul = mul nsw i64 %conv, %conv4
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i64
+  %conv8 = sext i16 %1 to i64
+  %mul9 = mul nsw i64 %conv7, %conv8
+  %add10 = add i64 %mul, %mac1.026
+
+; Here the Mul is the LHS, and the Add the RHS.
+  %add11 = add i64 %mul9, %add10
+
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+
+define dso_local arm_aapcs_vfpcc i64 @TwoReductions(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+;
+; CHECK-LABEL: @TwoReductions
+;
+; CHECK:  %mac1{{\.}}058 = phi i64 [ [[V10:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK:  %mac2{{\.}}057 = phi i64 [ [[V17:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK:  [[V10]] = call i64 @llvm.arm.smlald(i32 %{{.*}}, i32 %{{.*}}, i64 %mac1{{\.}}058)
+; CHECK:  [[V17]] = call i64 @llvm.arm.smlald(i32 %{{.*}}, i32 %{{.*}}, i64 %mac2{{\.}}057)
+; CHECK-NOT: call i64 @llvm.arm.smlald
+;
+entry:
+  %cmp55 = icmp sgt i32 %arg, 0
+  br i1 %cmp55, label %for.body.preheader, label %for.cond.cleanup
+
+for.cond.cleanup:
+  %mac2.0.lcssa = phi i64 [ 0, %entry ], [ %add28, %for.body ]
+  %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add16, %for.body ]
+  %add30 = add nsw i64 %mac1.0.lcssa, %mac2.0.lcssa
+  ret i64 %add30
+
+for.body.preheader:
+  br label %for.body
+
+for.body:
+; And two reduction statements here:
+  %mac1.058 = phi i64 [ %add16, %for.body ], [ 0, %for.body.preheader ]
+  %mac2.057 = phi i64 [ %add28, %for.body ], [ 0, %for.body.preheader ]
+
+  %i.056 = phi i32 [ %add29, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.056
+  %0 = load i16, i16* %arrayidx, align 2
+  %add1 = or i32 %i.056, 1
+  %arrayidx2 = getelementptr inbounds i16, i16* %arg3, i32 %add1
+  %1 = load i16, i16* %arrayidx2, align 2
+  %add3 = or i32 %i.056, 2
+  %arrayidx4 = getelementptr inbounds i16, i16* %arg3, i32 %add3
+  %2 = load i16, i16* %arrayidx4, align 2
+
+  %add5 = or i32 %i.056, 3
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg3, i32 %add5
+  %3 = load i16, i16* %arrayidx6, align 2
+  %arrayidx8 = getelementptr inbounds i16, i16* %arg2, i32 %i.056
+  %4 = load i16, i16* %arrayidx8, align 2
+  %conv = sext i16 %4 to i64
+  %conv9 = sext i16 %0 to i64
+  %mul = mul nsw i64 %conv, %conv9
+  %arrayidx11 = getelementptr inbounds i16, i16* %arg2, i32 %add1
+  %5 = load i16, i16* %arrayidx11, align 2
+  %conv12 = sext i16 %5 to i64
+  %conv13 = sext i16 %1 to i64
+  %mul14 = mul nsw i64 %conv12, %conv13
+  %add15 = add i64 %mul, %mac1.058
+  %add16 = add i64 %add15, %mul14
+  %arrayidx18 = getelementptr inbounds i16, i16* %arg2, i32 %add3
+  %6 = load i16, i16* %arrayidx18, align 2
+  %conv19 = sext i16 %6 to i64
+  %conv20 = sext i16 %2 to i64
+  %mul21 = mul nsw i64 %conv19, %conv20
+  %arrayidx23 = getelementptr inbounds i16, i16* %arg2, i32 %add5
+  %7 = load i16, i16* %arrayidx23, align 2
+  %conv24 = sext i16 %7 to i64
+  %conv25 = sext i16 %3 to i64
+  %mul26 = mul nsw i64 %conv24, %conv25
+  %add27 = add i64 %mul21, %mac2.057
+  %add28 = add i64 %add27, %mul26
+  %add29 = add nuw nsw i32 %i.056, 4
+  %cmp = icmp slt i32 %add29, %arg
+  br i1 %cmp, label %for.body, label %for.cond.cleanup
+}

Added: llvm/trunk/test/CodeGen/ARM/smlald1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlald1.ll?rev=341941&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlald1.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlald1.ll Tue Sep 11 07:01:22 2018
@@ -0,0 +1,94 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -arm-parallel-dsp -S | FileCheck %s
+
+; CHECK-LABEL: @test1
+; CHECK:  %mac1{{\.}}026 = phi i64 [ [[V8:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK:  [[V4:%[0-9]+]] = bitcast i16* %arrayidx3 to i32*
+; CHECK:  [[V5:%[0-9]+]] = load i32, i32* [[V4]], align 2
+; CHECK:  [[V6:%[0-9]+]] = bitcast i16* %arrayidx to i32*
+; CHECK:  [[V7:%[0-9]+]] = load i32, i32* [[V6]], align 2
+; CHECK:  [[V8]] = call i64 @llvm.arm.smlald(i32 [[V5]], i32 [[V7]], i64 %mac1{{\.}}026)
+
+define dso_local i64 @test1(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add11, %for.body ]
+  ret i64 %mac1.0.lcssa
+
+for.body:
+  %mac1.026 = phi i64 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i64
+  %conv4 = sext i16 %0 to i64
+  %mul = mul nsw i64 %conv, %conv4
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i64
+  %conv8 = sext i16 %1 to i64
+  %mul9 = mul nsw i64 %conv7, %conv8
+  %add10 = add i64 %mul, %mac1.026
+
+; And here the Add is the LHS, the Mul the RHS
+  %add11 = add i64 %add10, %mul9
+
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+
+; Here we have i8 loads, which we do want to support, but don't handle yet.
+;
+; CHECK-LABEL: @test2
+; CHECK-NOT:   call i64 @llvm.arm.smlad
+;
+define dso_local i64 @test2(i32 %arg, i32* nocapture readnone %arg1, i8* nocapture readonly %arg2, i8* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i8, i8* %arg3, align 2
+  %.pre27 = load i8, i8* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add11, %for.body ]
+  ret i64 %mac1.0.lcssa
+
+for.body:
+  %mac1.026 = phi i64 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i8, i8* %arg3, i32 %i.025
+  %0 = load i8, i8* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i8, i8* %arg3, i32 %add
+  %1 = load i8, i8* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i8, i8* %arg2, i32 %i.025
+  %2 = load i8, i8* %arrayidx3, align 2
+  %conv = sext i8 %2 to i64
+  %conv4 = sext i8 %0 to i64
+  %mul = mul nsw i64 %conv, %conv4
+  %arrayidx6 = getelementptr inbounds i8, i8* %arg2, i32 %add
+  %3 = load i8, i8* %arrayidx6, align 2
+  %conv7 = sext i8 %3 to i64
+  %conv8 = sext i8 %1 to i64
+  %mul9 = mul nsw i64 %conv7, %conv8
+  %add10 = add i64 %mul, %mac1.026
+  %add11 = add i64 %add10, %mul9
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+

Added: llvm/trunk/test/CodeGen/ARM/smlald2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/smlald2.ll?rev=341941&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/smlald2.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/smlald2.ll Tue Sep 11 07:01:22 2018
@@ -0,0 +1,138 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -arm-parallel-dsp -S | FileCheck %s
+;
+; The Cortex-M0 does not support unaligned accesses:
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m0 < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+;
+; Check DSP extension:
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 -mattr=-dsp < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+
+define dso_local i64 @OneReduction(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+;
+; CHECK-LABEL: @OneReduction
+; CHECK:  %mac1{{\.}}026 = phi i64 [ [[V8:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK:  [[V4:%[0-9]+]] = bitcast i16* %arrayidx3 to i32*
+; CHECK:  [[V5:%[0-9]+]] = load i32, i32* [[V4]], align 2
+; CHECK:  [[V6:%[0-9]+]] = bitcast i16* %arrayidx to i32*
+; CHECK:  [[V7:%[0-9]+]] = load i32, i32* [[V6]], align 2
+; CHECK:  [[V8]] = call i64 @llvm.arm.smlald(i32 [[V5]], i32 [[V7]], i64 %mac1{{\.}}026)
+; CHECK-NOT: call i64 @llvm.arm.smlald
+;
+; CHECK-UNSUPPORTED-NOT:  call i64 @llvm.arm.smlald
+;
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add11, %for.body ]
+  ret i64 %mac1.0.lcssa
+
+for.body:
+; One reduction statement here:
+  %mac1.026 = phi i64 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+  %conv4 = sext i16 %0 to i32
+  %mul = mul nsw i32 %conv, %conv4
+  %sext0 = sext i32 %mul to i64
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+  %mul9 = mul nsw i32 %conv7, %conv8
+  %sext1 = sext i32 %mul9 to i64
+  %add10 = add i64 %sext0, %mac1.026
+
+; Here the Mul is the LHS, and the Add the RHS.
+  %add11 = add i64 %sext1, %add10
+
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+
+define dso_local arm_aapcs_vfpcc i64 @TwoReductions(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+;
+; CHECK-LABEL: @TwoReductions
+;
+; CHECK:  %mac1{{\.}}058 = phi i64 [ [[V10:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK:  %mac2{{\.}}057 = phi i64 [ [[V17:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK:  [[V10]] = call i64 @llvm.arm.smlald(i32 %{{.*}}, i32 %{{.*}}, i64 %mac1{{\.}}058)
+; CHECK:  [[V17]] = call i64 @llvm.arm.smlald(i32 %{{.*}}, i32 %{{.*}}, i64 %mac2{{\.}}057)
+; CHECK-NOT: call i64 @llvm.arm.smlald
+;
+entry:
+  %cmp55 = icmp sgt i32 %arg, 0
+  br i1 %cmp55, label %for.body.preheader, label %for.cond.cleanup
+
+for.cond.cleanup:
+  %mac2.0.lcssa = phi i64 [ 0, %entry ], [ %add28, %for.body ]
+  %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add16, %for.body ]
+  %add30 = add nsw i64 %mac1.0.lcssa, %mac2.0.lcssa
+  ret i64 %add30
+
+for.body.preheader:
+  br label %for.body
+
+for.body:
+; And two reduction statements here:
+  %mac1.058 = phi i64 [ %add16, %for.body ], [ 0, %for.body.preheader ]
+  %mac2.057 = phi i64 [ %add28, %for.body ], [ 0, %for.body.preheader ]
+
+  %i.056 = phi i32 [ %add29, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.056
+  %0 = load i16, i16* %arrayidx, align 2
+  %add1 = or i32 %i.056, 1
+  %arrayidx2 = getelementptr inbounds i16, i16* %arg3, i32 %add1
+  %1 = load i16, i16* %arrayidx2, align 2
+  %add3 = or i32 %i.056, 2
+  %arrayidx4 = getelementptr inbounds i16, i16* %arg3, i32 %add3
+  %2 = load i16, i16* %arrayidx4, align 2
+
+  %add5 = or i32 %i.056, 3
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg3, i32 %add5
+  %3 = load i16, i16* %arrayidx6, align 2
+  %arrayidx8 = getelementptr inbounds i16, i16* %arg2, i32 %i.056
+  %4 = load i16, i16* %arrayidx8, align 2
+  %conv = sext i16 %4 to i32
+  %conv9 = sext i16 %0 to i32
+  %mul = mul nsw i32 %conv, %conv9
+  %sext0 = sext i32 %mul to i64
+  %arrayidx11 = getelementptr inbounds i16, i16* %arg2, i32 %add1
+  %5 = load i16, i16* %arrayidx11, align 2
+  %conv12 = sext i16 %5 to i32
+  %conv13 = sext i16 %1 to i32
+  %mul14 = mul nsw i32 %conv12, %conv13
+  %sext1 = sext i32 %mul14 to i64
+  %add15 = add i64 %sext0, %mac1.058
+  %add16 = add i64 %add15, %sext1
+  %arrayidx18 = getelementptr inbounds i16, i16* %arg2, i32 %add3
+  %6 = load i16, i16* %arrayidx18, align 2
+  %conv19 = sext i16 %6 to i32
+  %conv20 = sext i16 %2 to i32
+  %mul21 = mul nsw i32 %conv19, %conv20
+  %sext2 = sext i32 %mul21 to i64
+  %arrayidx23 = getelementptr inbounds i16, i16* %arg2, i32 %add5
+  %7 = load i16, i16* %arrayidx23, align 2
+  %conv24 = sext i16 %7 to i32
+  %conv25 = sext i16 %3 to i32
+  %mul26 = mul nsw i32 %conv24, %conv25
+  %sext3 = sext i32 %mul26 to i64
+  %add27 = add i64 %sext2, %mac2.057
+  %add28 = add i64 %add27, %sext3
+  %add29 = add nuw nsw i32 %i.056, 4
+  %cmp = icmp slt i32 %add29, %arg
+  br i1 %cmp, label %for.body, label %for.cond.cleanup
+}




More information about the llvm-commits mailing list