<div dir="ltr">I've sent further feedback to the review thread, but please revert this patch. It violates very basic principles of floating point arithmetic and is trigger miscompiles.</div><div class="gmail_extra"><br><div class="gmail_quote">On Fri, Dec 12, 2014 at 4:53 AM, Suyog Sarda <span dir="ltr"><<a href="mailto:suyog.sarda@samsung.com" target="_blank">suyog.sarda@samsung.com</a>></span> wrote:<blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Author: suyog<br>
Date: Fri Dec 12 06:53:44 2014<br>
New Revision: 224119<br>
<br>
URL: <a href="http://llvm.org/viewvc/llvm-project?rev=224119&view=rev" target="_blank">http://llvm.org/viewvc/llvm-project?rev=224119&view=rev</a><br>
Log:<br>
This patch recognizes (+ (+ v0, v1) (+ v2, v3)), reorders them for bundling into vector of loads,<br>
and vectorizes it.<br>
<br>
Test case :<br>
<br>
float hadd(float* a) {<br>
return (a[0] + a[1]) + (a[2] + a[3]);<br>
}<br>
<br>
<br>
AArch64 assembly before patch :<br>
<br>
ldp s0, s1, [x0]<br>
ldp s2, s3, [x0, #8]<br>
fadd s0, s0, s1<br>
fadd s1, s2, s3<br>
fadd s0, s0, s1<br>
ret<br>
<br>
AArch64 assembly after patch :<br>
<br>
ldp d0, d1, [x0]<br>
fadd v0.2s, v0.2s, v1.2s<br>
faddp s0, v0.2s<br>
ret<br>
<br>
Reviewed Link : <a href="http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20141208/248531.html" target="_blank">http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20141208/248531.html</a><br>
<br>
<br>
Added:<br>
llvm/trunk/test/Transforms/SLPVectorizer/AArch64/horizontaladd.ll<br>
Modified:<br>
llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp<br>
<br>
Modified: llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp?rev=224119&r1=224118&r2=224119&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp?rev=224119&r1=224118&r2=224119&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Vectorize/SLPVectorizer.cpp Fri Dec 12 06:53:44 2014<br>
@@ -439,6 +439,13 @@ public:<br>
/// \returns true if the memory operations A and B are consecutive.<br>
bool isConsecutiveAccess(Value *A, Value *B);<br>
<br>
+ /// For consecutive loads (+(+ v0, v1)(+ v2, v3)), Left had v0 and v2<br>
+ /// while Right had v1 and v3, which prevented bundling them into<br>
+ /// a vector of loads. Rorder them so that Left now has v0 and v1<br>
+ /// while Right has v2 and v3 enabling their bundling into a vector.<br>
+ void reorderIfConsecutiveLoads(SmallVectorImpl<Value *> &Left,<br>
+ SmallVectorImpl<Value *> &Right);<br>
+<br>
/// \brief Perform LICM and CSE on the newly generated gather sequences.<br>
void optimizeGatherSequence();<br>
<br>
@@ -1234,6 +1241,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Val<br>
if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {<br>
ValueList Left, Right;<br>
reorderInputsAccordingToOpcode(VL, Left, Right);<br>
+ reorderIfConsecutiveLoads (Left, Right);<br>
buildTree_rec(Left, Depth + 1);<br>
buildTree_rec(Right, Depth + 1);<br>
return;<br>
@@ -1818,6 +1826,19 @@ bool BoUpSLP::isConsecutiveAccess(Value<br>
return X == PtrSCEVB;<br>
}<br>
<br>
+void BoUpSLP::reorderIfConsecutiveLoads(SmallVectorImpl<Value *> &Left,<br>
+ SmallVectorImpl<Value *> &Right) {<br>
+ for (unsigned i = 0, e = Left.size(); i < e - 1; ++i) {<br>
+ if (!isa<LoadInst>(Left[i]) || !isa<LoadInst>(Right[i]))<br>
+ return;<br>
+ if (!(isConsecutiveAccess(Left[i], Right[i]) &&<br>
+ isConsecutiveAccess(Right[i], Left[i + 1])))<br>
+ continue;<br>
+ else<br>
+ std::swap(Left[i + 1], Right[i]);<br>
+ }<br>
+}<br>
+<br>
void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) {<br>
Instruction *VL0 = cast<Instruction>(VL[0]);<br>
BasicBlock::iterator NextInst = VL0;<br>
@@ -2048,9 +2069,10 @@ Value *BoUpSLP::vectorizeTree(TreeEntry<br>
case Instruction::Or:<br>
case Instruction::Xor: {<br>
ValueList LHSVL, RHSVL;<br>
- if (isa<BinaryOperator>(VL0) && VL0->isCommutative())<br>
+ if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {<br>
reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL);<br>
- else<br>
+ reorderIfConsecutiveLoads(LHSVL, RHSVL);<br>
+ } else<br>
for (int i = 0, e = E->Scalars.size(); i < e; ++i) {<br>
LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0));<br>
RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1));<br>
<br>
Added: llvm/trunk/test/Transforms/SLPVectorizer/AArch64/horizontaladd.ll<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/AArch64/horizontaladd.ll?rev=224119&view=auto" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/AArch64/horizontaladd.ll?rev=224119&view=auto</a><br>
==============================================================================<br>
--- llvm/trunk/test/Transforms/SLPVectorizer/AArch64/horizontaladd.ll (added)<br>
+++ llvm/trunk/test/Transforms/SLPVectorizer/AArch64/horizontaladd.ll Fri Dec 12 06:53:44 2014<br>
@@ -0,0 +1,27 @@<br>
+; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu -mcpu=cortex-a57 | FileCheck %s<br>
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"<br>
+target triple = "aarch64--linux-gnu"<br>
+<br>
+; float hadd (float *a) {<br>
+; return (a[0] + a[1]) + (a[2] + a[3]);<br>
+; }<br>
+<br>
+; CHECK_LABEL: @hadd<br>
+; CHECK: load <2 x float>*<br>
+; CHECK: fadd <2 x float><br>
+; CHECK: extractelement <2 x float><br>
+<br>
+define float @hadd(float* nocapture readonly %a) {<br>
+entry:<br>
+ %0 = load float* %a, align 4<br>
+ %arrayidx1 = getelementptr inbounds float* %a, i64 1<br>
+ %1 = load float* %arrayidx1, align 4<br>
+ %add = fadd float %0, %1<br>
+ %arrayidx2 = getelementptr inbounds float* %a, i64 2<br>
+ %2 = load float* %arrayidx2, align 4<br>
+ %arrayidx3 = getelementptr inbounds float* %a, i64 3<br>
+ %3 = load float* %arrayidx3, align 4<br>
+ %add4 = fadd float %2, %3<br>
+ %add5 = fadd float %add, %add4<br>
+ ret float %add5<br>
+}<br>
<br>
<br>
_______________________________________________<br>
llvm-commits mailing list<br>
<a href="mailto:llvm-commits@cs.uiuc.edu">llvm-commits@cs.uiuc.edu</a><br>
<a href="http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits" target="_blank">http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits</a><br>
</blockquote></div></div>