[llvm] r265901 - Loop vectorization with uniform load

Elena Demikhovsky via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 10 09:53:20 PDT 2016


Author: delena
Date: Sun Apr 10 11:53:19 2016
New Revision: 265901

URL: http://llvm.org/viewvc/llvm-project?rev=265901&view=rev
Log:
Loop vectorization with uniform load

Vectorization cost of uniform load wasn't correctly calculated.
As a result, a simple loop that loads a uniform value wasn't vectorized.

Differential Revision: http://reviews.llvm.org/D18940


Added:
    llvm/trunk/test/Transforms/LoopVectorize/X86/uniform_load.ll
Modified:
    llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp

Modified: llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp?rev=265901&r1=265900&r2=265901&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp (original)
+++ llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp Sun Apr 10 11:53:19 2016
@@ -5819,6 +5819,15 @@ LoopVectorizationCostModel::getInstructi
       return TTI.getAddressComputationCost(VectorTy) +
         TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
 
+    if (LI && Legal->isUniform(Ptr)) {
+      // Scalar load + broadcast
+      unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType());
+      Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(),
+                                  Alignment, AS);
+      return Cost + TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast,
+                                       ValTy);
+    }
+
     // For an interleaved access, calculate the total cost of the whole
     // interleave group.
     if (Legal->isAccessInterleaved(I)) {

Added: llvm/trunk/test/Transforms/LoopVectorize/X86/uniform_load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/X86/uniform_load.ll?rev=265901&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/X86/uniform_load.ll (added)
+++ llvm/trunk/test/Transforms/LoopVectorize/X86/uniform_load.ll Sun Apr 10 11:53:19 2016
@@ -0,0 +1,47 @@
+; RUN: opt -basicaa -loop-vectorize -S -mcpu=core-avx2 < %s | FileCheck %s
+
+;float inc = 0.5;
+;void foo(float *A, unsigned N) {
+;
+;  for (unsigned i=0; i<N; i++){
+;    A[i] += inc;
+;  }
+;}
+
+; CHECK-LABEL: foo
+; CHECK: vector.body
+; CHECK: load <8 x float>
+; CHECK: fadd <8 x float>
+; CHECK: store <8 x float>
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at inc = global float 5.000000e-01, align 4
+
+define void @foo(float* nocapture %A, i32 %N) #0 {
+entry:
+  %cmp3 = icmp eq i32 %N, 0
+  br i1 %cmp3, label %for.end, label %for.body.preheader
+
+for.body.preheader:                               ; preds = %entry
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+  %0 = load float, float* @inc, align 4
+  %arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv
+  %1 = load float, float* %arrayidx, align 4
+  %add = fadd float %0, %1
+  store float %add, float* %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+  %exitcond = icmp eq i32 %lftr.wideiv, %N
+  br i1 %exitcond, label %for.end.loopexit, label %for.body
+
+for.end.loopexit:                                 ; preds = %for.body
+  br label %for.end
+
+for.end:                                          ; preds = %for.end.loopexit, %entry
+  ret void
+}




More information about the llvm-commits mailing list