[llvm] r209568 - Implement sext(C1 + C2*X) --> sext(C1) + sext(C2*X) and
Michael Zolotukhin
mzolotukhin at apple.com
Sat May 24 08:08:53 PDT 2014
Hi Benjamin,
Thank you for your feedback, I’ll try to address your remarks after the weekend.
Best regards,
Michael
On May 24, 2014, at 5:59 PM, Benjamin Kramer <benny.kra at gmail.com> wrote:
>
> On 24.05.2014, at 10:09, Michael Zolotukhin <mzolotukhin at apple.com> wrote:
>
>> Author: mzolotukhin
>> Date: Sat May 24 03:09:57 2014
>> New Revision: 209568
>>
>> URL: http://llvm.org/viewvc/llvm-project?rev=209568&view=rev
>> Log:
>> Implement sext(C1 + C2*X) --> sext(C1) + sext(C2*X) and
>> sext{C1,+,C2} --> sext(C1) + sext{0,+,C2} transformation in Scalar
>> Evolution.
>>
>> That helps SLP-vectorizer to recognize consecutive loads/stores.
>>
>> <rdar://problem/14860614>
>>
>> Added:
>> llvm/trunk/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
>> Modified:
>> llvm/trunk/lib/Analysis/ScalarEvolution.cpp
>>
>> Modified: llvm/trunk/lib/Analysis/ScalarEvolution.cpp
>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ScalarEvolution.cpp?rev=209568&r1=209567&r2=209568&view=diff
>> ==============================================================================
>> --- llvm/trunk/lib/Analysis/ScalarEvolution.cpp (original)
>> +++ llvm/trunk/lib/Analysis/ScalarEvolution.cpp Sat May 24 03:09:57 2014
>> @@ -1201,6 +1201,24 @@ const SCEV *ScalarEvolution::getSignExte
>> return getTruncateOrSignExtend(X, Ty);
>> }
>>
>> + // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2
>> + if (auto SA = dyn_cast<SCEVAddExpr>(Op)) {
>> + if (SA->getNumOperands() == 2) {
>> + auto SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0));
>> + auto SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1));
>> + if (SMul && SC1) {
>> + if (auto SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) {
>> + APInt C1 = SC1->getValue()->getValue();
>> + APInt C2 = SC2->getValue()->getValue();
>
> const APInt & can avoid a potentially expensive copy of the APInt.
>
>> + APInt CDiff = C2 - C1;
>> + if (C1.isStrictlyPositive() && C2.isStrictlyPositive() &&
>> + CDiff.isStrictlyPositive() && C2.isPowerOf2())
>
> Why not C1.ult(C2) instead of comparing the subtracted result?
>
>> + return getAddExpr(getSignExtendExpr(SC1, Ty),
>> + getSignExtendExpr(SMul, Ty));
>> + }
>> + }
>> + }
>> + }
>> // If the input value is a chrec scev, and we can prove that the value
>> // did not overflow the old, smaller, value, we can sign extend all of the
>> // operands (often constants). This allows analysis of something like
>> @@ -1292,6 +1310,23 @@ const SCEV *ScalarEvolution::getSignExte
>> L, AR->getNoWrapFlags());
>> }
>> }
>> + // If Start and Step are constants, check if we can apply this
>> + // transformation:
>> + // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2
>> + auto SC1 = dyn_cast<SCEVConstant>(Start);
>> + auto SC2 = dyn_cast<SCEVConstant>(Step);
>> + if (SC1 && SC2) {
>> + APInt C1 = SC1->getValue()->getValue();
>> + APInt C2 = SC2->getValue()->getValue();
>> + APInt CDiff = C2 - C1;
>
> same here.
>
>> + if (C1.isStrictlyPositive() && C2.isStrictlyPositive() &&
>> + CDiff.isStrictlyPositive() && C2.isPowerOf2()) {
>> + Start = getSignExtendExpr(Start, Ty);
>> + const SCEV *NewAR = getAddRecExpr(getConstant(AR->getType(), 0), Step,
>> + L, AR->getNoWrapFlags());
>> + return getAddExpr(Start, getSignExtendExpr(NewAR, Ty));
>> + }
>> + }
>> }
>>
>> // The cast wasn't folded; create an explicit cast node.
>>
>> Added: llvm/trunk/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/consecutive-access.ll?rev=209568&view=auto
>> ==============================================================================
>> --- llvm/trunk/test/Transforms/SLPVectorizer/X86/consecutive-access.ll (added)
>> +++ llvm/trunk/test/Transforms/SLPVectorizer/X86/consecutive-access.ll Sat May 24 03:09:57 2014
>> @@ -0,0 +1,175 @@
>> +; RUN: opt < %s -basicaa -slp-vectorizer -S | FileCheck %s
>> +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
>> +target triple = "x86_64-apple-macosx10.9.0"
>> +
>> + at A = common global [2000 x double] zeroinitializer, align 16
>> + at B = common global [2000 x double] zeroinitializer, align 16
>> + at C = common global [2000 x float] zeroinitializer, align 16
>> + at D = common global [2000 x float] zeroinitializer, align 16
>> +
>> +; Currently SCEV isn't smart enough to figure out that accesses
>> +; A[3*i], A[3*i+1] and A[3*i+2] are consecutive, but in future
>> +; that would hopefully be fixed. For now, check that this isn't
>> +; vectorized.
>> +; CHECK-LABEL: foo_3double
>> +; CHECK-NOT: x double>
>> +; Function Attrs: nounwind ssp uwtable
>> +define void @foo_3double(i32 %u) #0 {
>> +entry:
>> + %u.addr = alloca i32, align 4
>> + store i32 %u, i32* %u.addr, align 4
>> + %mul = mul nsw i32 %u, 3
>> + %idxprom = sext i32 %mul to i64
>> + %arrayidx = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom
>> + %0 = load double* %arrayidx, align 8
>> + %arrayidx4 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom
>> + %1 = load double* %arrayidx4, align 8
>> + %add5 = fadd double %0, %1
>> + store double %add5, double* %arrayidx, align 8
>> + %add11 = add nsw i32 %mul, 1
>> + %idxprom12 = sext i32 %add11 to i64
>> + %arrayidx13 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom12
>> + %2 = load double* %arrayidx13, align 8
>> + %arrayidx17 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom12
>> + %3 = load double* %arrayidx17, align 8
>> + %add18 = fadd double %2, %3
>> + store double %add18, double* %arrayidx13, align 8
>> + %add24 = add nsw i32 %mul, 2
>> + %idxprom25 = sext i32 %add24 to i64
>> + %arrayidx26 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom25
>> + %4 = load double* %arrayidx26, align 8
>> + %arrayidx30 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom25
>> + %5 = load double* %arrayidx30, align 8
>> + %add31 = fadd double %4, %5
>> + store double %add31, double* %arrayidx26, align 8
>> + ret void
>> +}
>> +
>> +; SCEV should be able to tell that accesses A[C1 + C2*i], A[C1 + C2*i], ...
>> +; A[C1 + C2*i] are consecutive, if C2 is a power of 2, and C2 > C1 > 0.
>> +; Thus, the following code should be vectorized.
>> +; CHECK-LABEL: foo_2double
>> +; CHECK: x double>
>> +; Function Attrs: nounwind ssp uwtable
>> +define void @foo_2double(i32 %u) #0 {
>> +entry:
>> + %u.addr = alloca i32, align 4
>> + store i32 %u, i32* %u.addr, align 4
>> + %mul = mul nsw i32 %u, 2
>> + %idxprom = sext i32 %mul to i64
>> + %arrayidx = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom
>> + %0 = load double* %arrayidx, align 8
>> + %arrayidx4 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom
>> + %1 = load double* %arrayidx4, align 8
>> + %add5 = fadd double %0, %1
>> + store double %add5, double* %arrayidx, align 8
>> + %add11 = add nsw i32 %mul, 1
>> + %idxprom12 = sext i32 %add11 to i64
>> + %arrayidx13 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom12
>> + %2 = load double* %arrayidx13, align 8
>> + %arrayidx17 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom12
>> + %3 = load double* %arrayidx17, align 8
>> + %add18 = fadd double %2, %3
>> + store double %add18, double* %arrayidx13, align 8
>> + ret void
>> +}
>> +
>> +; Similar to the previous test, but with different datatype.
>> +; CHECK-LABEL: foo_4float
>> +; CHECK: x float>
>> +; Function Attrs: nounwind ssp uwtable
>> +define void @foo_4float(i32 %u) #0 {
>> +entry:
>> + %u.addr = alloca i32, align 4
>> + store i32 %u, i32* %u.addr, align 4
>> + %mul = mul nsw i32 %u, 4
>> + %idxprom = sext i32 %mul to i64
>> + %arrayidx = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom
>> + %0 = load float* %arrayidx, align 4
>> + %arrayidx4 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom
>> + %1 = load float* %arrayidx4, align 4
>> + %add5 = fadd float %0, %1
>> + store float %add5, float* %arrayidx, align 4
>> + %add11 = add nsw i32 %mul, 1
>> + %idxprom12 = sext i32 %add11 to i64
>> + %arrayidx13 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom12
>> + %2 = load float* %arrayidx13, align 4
>> + %arrayidx17 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom12
>> + %3 = load float* %arrayidx17, align 4
>> + %add18 = fadd float %2, %3
>> + store float %add18, float* %arrayidx13, align 4
>> + %add24 = add nsw i32 %mul, 2
>> + %idxprom25 = sext i32 %add24 to i64
>> + %arrayidx26 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom25
>> + %4 = load float* %arrayidx26, align 4
>> + %arrayidx30 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom25
>> + %5 = load float* %arrayidx30, align 4
>> + %add31 = fadd float %4, %5
>> + store float %add31, float* %arrayidx26, align 4
>> + %add37 = add nsw i32 %mul, 3
>> + %idxprom38 = sext i32 %add37 to i64
>> + %arrayidx39 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom38
>> + %6 = load float* %arrayidx39, align 4
>> + %arrayidx43 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom38
>> + %7 = load float* %arrayidx43, align 4
>> + %add44 = fadd float %6, %7
>> + store float %add44, float* %arrayidx39, align 4
>> + ret void
>> +}
>> +
>> +; Similar to the previous tests, but now we are dealing with AddRec SCEV.
>> +; CHECK-LABEL: foo_loop
>> +; CHECK: x double>
>> +; Function Attrs: nounwind ssp uwtable
>> +define i32 @foo_loop(double* %A, i32 %n) #0 {
>> +entry:
>> + %A.addr = alloca double*, align 8
>> + %n.addr = alloca i32, align 4
>> + %sum = alloca double, align 8
>> + %i = alloca i32, align 4
>> + store double* %A, double** %A.addr, align 8
>> + store i32 %n, i32* %n.addr, align 4
>> + store double 0.000000e+00, double* %sum, align 8
>> + store i32 0, i32* %i, align 4
>> + %cmp1 = icmp slt i32 0, %n
>> + br i1 %cmp1, label %for.body.lr.ph, label %for.end
>> +
>> +for.body.lr.ph: ; preds = %entry
>> + br label %for.body
>> +
>> +for.body: ; preds = %for.body.lr.ph, %for.body
>> + %0 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
>> + %1 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add7, %for.body ]
>> + %mul = mul nsw i32 %0, 2
>> + %idxprom = sext i32 %mul to i64
>> + %arrayidx = getelementptr inbounds double* %A, i64 %idxprom
>> + %2 = load double* %arrayidx, align 8
>> + %mul1 = fmul double 7.000000e+00, %2
>> + %add = add nsw i32 %mul, 1
>> + %idxprom3 = sext i32 %add to i64
>> + %arrayidx4 = getelementptr inbounds double* %A, i64 %idxprom3
>> + %3 = load double* %arrayidx4, align 8
>> + %mul5 = fmul double 7.000000e+00, %3
>> + %add6 = fadd double %mul1, %mul5
>> + %add7 = fadd double %1, %add6
>> + store double %add7, double* %sum, align 8
>> + %inc = add nsw i32 %0, 1
>> + store i32 %inc, i32* %i, align 4
>> + %cmp = icmp slt i32 %inc, %n
>> + br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
>> +
>> +for.cond.for.end_crit_edge: ; preds = %for.body
>> + %split = phi double [ %add7, %for.body ]
>> + br label %for.end
>> +
>> +for.end: ; preds = %for.cond.for.end_crit_edge, %entry
>> + %.lcssa = phi double [ %split, %for.cond.for.end_crit_edge ], [ 0.000000e+00, %entry ]
>> + %conv = fptosi double %.lcssa to i32
>> + ret i32 %conv
>> +}
>> +
>> +attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
>> +
>> +!llvm.ident = !{!0}
>> +
>> +!0 = metadata !{metadata !"clang version 3.5.0 "}
>>
>>
>> _______________________________________________
>> llvm-commits mailing list
>> llvm-commits at cs.uiuc.edu
>> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
More information about the llvm-commits
mailing list