[llvm] r193574 - ARM cost model: Unaligned vectorized double stores are expensive

Arnold Schwaighofer aschwaighofer at apple.com
Mon Oct 28 18:33:57 PDT 2013


Author: arnolds
Date: Mon Oct 28 20:33:57 2013
New Revision: 193574

URL: http://llvm.org/viewvc/llvm-project?rev=193574&view=rev
Log:
ARM cost model: Unaligned vectorized double stores are expensive

Updated a test case that assumed that <2 x double> would vectorize to use
<4 x float>.

radar://15338229

Added:
    llvm/trunk/test/Transforms/SLPVectorizer/ARM/memory.ll
Modified:
    llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.cpp
    llvm/trunk/test/Transforms/LoopVectorize/ARM/width-detect.ll

Modified: llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.cpp?rev=193574&r1=193573&r2=193574&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.cpp Mon Oct 28 20:33:57 2013
@@ -129,6 +129,9 @@ public:
   unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
                                   OperandValueKind Op1Info = OK_AnyValue,
                                   OperandValueKind Op2Info = OK_AnyValue) const;
+
+  unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+                           unsigned AddressSpace) const;
   /// @}
 };
 
@@ -540,3 +543,15 @@ unsigned ARMTTI::getArithmeticInstrCost(
   return Cost;
 }
 
+unsigned ARMTTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+                                 unsigned AddressSpace) const {
+  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
+
+  if (Src->isVectorTy() && Alignment != 16 &&
+      Src->getVectorElementType()->isDoubleTy()) {
+    // Unaligned loads/stores are extremely inefficient.
+    // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
+    return LT.first * 4;
+  }
+  return LT.first;
+}

Modified: llvm/trunk/test/Transforms/LoopVectorize/ARM/width-detect.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/ARM/width-detect.ll?rev=193574&r1=193573&r2=193574&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/ARM/width-detect.ll (original)
+++ llvm/trunk/test/Transforms/LoopVectorize/ARM/width-detect.ll Mon Oct 28 20:33:57 2013
@@ -3,27 +3,27 @@
 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
 target triple = "thumbv7-apple-ios3.0.0"
 
-;CHECK:foo_F64
-;CHECK: <2 x double>
+;CHECK:foo_F32
+;CHECK: <4 x float>
 ;CHECK:ret
-define double @foo_F64(double* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
+define float @foo_F32(float* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
   %1 = icmp sgt i32 %n, 0
   br i1 %1, label %.lr.ph, label %._crit_edge
 
 .lr.ph:                                           ; preds = %0, %.lr.ph
   %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
-  %prod.01 = phi double [ %4, %.lr.ph ], [ 0.000000e+00, %0 ]
-  %2 = getelementptr inbounds double* %A, i64 %indvars.iv
-  %3 = load double* %2, align 8
-  %4 = fmul fast double %prod.01, %3
+  %prod.01 = phi float [ %4, %.lr.ph ], [ 0.000000e+00, %0 ]
+  %2 = getelementptr inbounds float* %A, i64 %indvars.iv
+  %3 = load float* %2, align 8
+  %4 = fmul fast float %prod.01, %3
   %indvars.iv.next = add i64 %indvars.iv, 1
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
   %exitcond = icmp eq i32 %lftr.wideiv, %n
   br i1 %exitcond, label %._crit_edge, label %.lr.ph
 
 ._crit_edge:                                      ; preds = %.lr.ph, %0
-  %prod.0.lcssa = phi double [ 0.000000e+00, %0 ], [ %4, %.lr.ph ]
-  ret double %prod.0.lcssa
+  %prod.0.lcssa = phi float [ 0.000000e+00, %0 ], [ %4, %.lr.ph ]
+  ret float %prod.0.lcssa
 }
 
 ;CHECK:foo_I8

Added: llvm/trunk/test/Transforms/SLPVectorizer/ARM/memory.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/ARM/memory.ll?rev=193574&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/ARM/memory.ll (added)
+++ llvm/trunk/test/Transforms/SLPVectorizer/ARM/memory.ll Mon Oct 28 20:33:57 2013
@@ -0,0 +1,20 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=thumbv7-apple-ios3.0.0 -mcpu=swift | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+
+; On swift unaligned <2 x double> stores need 4uops and it is there for cheaper
+; to do this scalar.
+
+; CHECK-LABEL: expensive_double_store
+; CHECK-NOT: load <2 x double>
+; CHECK-NOT: store <2 x double>
+define void @expensive_double_store(double* noalias %dst, double* noalias %src, i64 %count) {
+entry:
+  %0 = load double* %src, align 8
+  store double %0, double* %dst, align 8
+  %arrayidx2 = getelementptr inbounds double* %src, i64 1
+  %1 = load double* %arrayidx2, align 8
+  %arrayidx3 = getelementptr inbounds double* %dst, i64 1
+  store double %1, double* %arrayidx3, align 8
+  ret void
+}





More information about the llvm-commits mailing list