[llvm] r345603 - [LoopVectorizer] Fix for cost values of memory accesses.
Jonas Paulsson via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 30 07:34:15 PDT 2018
Author: jonpa
Date: Tue Oct 30 07:34:15 2018
New Revision: 345603
URL: http://llvm.org/viewvc/llvm-project?rev=345603&view=rev
Log:
[LoopVectorizer] Fix for cost values of memory accesses.
This commit is a combination of two patches:
* "Fix in getScalarizationOverhead()"
If target returns false in TTI.prefersVectorizedAddressing(), it means the
address registers will not need to be extracted. Therefore, there should
be no operands scalarization overhead for a load instruction.
* "Don't pass the instruction pointer from getMemInstScalarizationCost."
Since VF is always > 1, this is a cost query for an instruction in the
vectorized loop and it should not be evaluated within the scalar
context of the instruction.
Review: Ulrich Weigand, Hal Finkel
https://reviews.llvm.org/D52351
https://reviews.llvm.org/D52417
Added:
llvm/trunk/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-0.ll
llvm/trunk/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-1.ll
Modified:
llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp
Modified: llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp?rev=345603&r1=345602&r2=345603&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp (original)
+++ llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp Tue Oct 30 07:34:15 2018
@@ -2982,6 +2982,10 @@ static unsigned getScalarizationOverhead
!TTI.supportsEfficientVectorElementLoadStore()))
Cost += TTI.getScalarizationOverhead(RetTy, true, false);
+ // Some targets keep addresses scalar.
+ if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
+ return Cost;
+
if (CallInst *CI = dyn_cast<CallInst>(I)) {
SmallVector<const Value *, 4> Operands(CI->arg_operands());
Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
@@ -5372,6 +5376,7 @@ static bool isStrideMul(Instruction *I,
unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
unsigned VF) {
+ assert(VF > 1 && "Scalarization cost of instruction implies vectorization.");
Type *ValTy = getMemInstValueType(I);
auto SE = PSE.getSE();
@@ -5387,9 +5392,11 @@ unsigned LoopVectorizationCostModel::get
// Get the cost of the scalar memory instruction and address computation.
unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
+ // Don't pass *I here, since it is scalar but will actually be part of a
+ // vectorized loop where the user of it is a vectorized instruction.
Cost += VF *
TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
- AS, I);
+ AS);
// Get the overhead of the extractelement and insertelement instructions
// we might create due to scalarization.
Added: llvm/trunk/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-0.ll?rev=345603&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-0.ll (added)
+++ llvm/trunk/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-0.ll Tue Oct 30 07:34:15 2018
@@ -0,0 +1,27 @@
+; RUN: opt -mtriple=s390x-unknown-linux -mcpu=z13 -loop-vectorize \
+; RUN: -force-vector-width=2 -debug-only=loop-vectorize \
+; RUN: -disable-output < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+;
+; Check that a scalarized load does not get operands scalarization costs added.
+
+define void @fun(i64* %data, i64 %n, i64 %s, double* %Src) {
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %mul = mul nsw i64 %iv, %s
+ %gep = getelementptr inbounds double, double* %Src, i64 %mul
+ %bct = bitcast double* %gep to i64*
+ %ld = load i64, i64* %bct
+ %iv.next = add nuw nsw i64 %iv, 1
+ %cmp110.us = icmp slt i64 %iv.next, %n
+ br i1 %cmp110.us, label %for.body, label %for.end
+
+for.end:
+ ret void
+
+; CHECK: LV: Found an estimated cost of 2 for VF 2 For instruction: %mul = mul nsw i64 %iv, %s
+; CHECK: LV: Found an estimated cost of 2 for VF 2 For instruction: %ld = load i64, i64* %bct
+}
Added: llvm/trunk/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-1.ll?rev=345603&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-1.ll (added)
+++ llvm/trunk/test/Transforms/LoopVectorize/SystemZ/load-scalarization-cost-1.ll Tue Oct 30 07:34:15 2018
@@ -0,0 +1,28 @@
+; RUN: opt -mtriple=s390x-unknown-linux -mcpu=z13 -loop-vectorize \
+; RUN: -force-vector-width=4 -debug-only=loop-vectorize \
+; RUN: -enable-interleaved-mem-accesses=false -disable-output < %s 2>&1 \
+; RUN: | FileCheck %s
+; REQUIRES: asserts
+;
+; Check that a scalarized load does not get a zero cost in a vectorized
+; loop. It can only be folded into the add operand in the scalar loop.
+
+define i32 @fun(i64* %data, i64 %n, i64 %s, i32* %Src) {
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %acc = phi i32 [ 0, %entry ], [ %acc_next, %for.body ]
+ %gep = getelementptr inbounds i32, i32* %Src, i64 %iv
+ %ld = load i32, i32* %gep
+ %acc_next = add i32 %acc, %ld
+ %iv.next = add nuw nsw i64 %iv, 2
+ %cmp110.us = icmp slt i64 %iv.next, %n
+ br i1 %cmp110.us, label %for.body, label %for.end
+
+for.end:
+ ret i32 %acc_next
+
+; CHECK: Found an estimated cost of 4 for VF 4 For instruction: %ld = load i32, i32* %gep
+}
More information about the llvm-commits
mailing list