[llvm] r205253 - [X86] Adjust cost of FP_TO_UINT v4f64->v4i32 as well

Adam Nemet anemet at apple.com
Mon Mar 31 14:54:48 PDT 2014


Author: anemet
Date: Mon Mar 31 16:54:48 2014
New Revision: 205253

URL: http://llvm.org/viewvc/llvm-project?rev=205253&view=rev
Log:
[X86] Adjust cost of FP_TO_UINT v4f64->v4i32 as well

Pretty obvious follow-on to r205159 to also handle conversion from double
besides float.

Fixes <rdar://problem/16373208>

Added:
    llvm/trunk/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll
Modified:
    llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp

Modified: llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp?rev=205253&r1=205252&r2=205253&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp Mon Mar 31 16:54:48 2014
@@ -528,6 +528,7 @@ unsigned X86TTI::getCastInstrCost(unsign
     // problem is that the inserts form a read-modify-write chain so latency
     // should be factored in too.  Inflating the cost per element by 1.
     { ISD::FP_TO_UINT,  MVT::v8i32, MVT::v8f32, 8*4 },
+    { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f64, 4*4 },
   };
 
   if (ST->hasAVX2()) {

Added: llvm/trunk/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll?rev=205253&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll (added)
+++ llvm/trunk/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll Mon Mar 31 16:54:48 2014
@@ -0,0 +1,40 @@
+; RUN: opt < %s -mcpu=core-avx2 -loop-vectorize -S | llc -mcpu=core-avx2 | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx"
+
+ at n = global i32 10000, align 4
+ at double_array = common global [10000 x double] zeroinitializer, align 16
+ at unsigned_array = common global [10000 x i32] zeroinitializer, align 16
+
+; If we need to scalarize the fptoui and then use inserts to build up the
+; vector again, then there is certainly no value in going 256-bit wide.
+; CHECK-NOT: vpinsrd
+
+define void @convert() {
+entry:
+  %0 = load i32* @n, align 4
+  %cmp4 = icmp eq i32 %0, 0
+  br i1 %cmp4, label %for.end, label %for.body.preheader
+
+for.body.preheader:                               ; preds = %entry
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds [10000 x double]* @double_array, i64 0, i64 %indvars.iv
+  %1 = load double* %arrayidx, align 8
+  %conv = fptoui double %1 to i32
+  %arrayidx2 = getelementptr inbounds [10000 x i32]* @unsigned_array, i64 0, i64 %indvars.iv
+  store i32 %conv, i32* %arrayidx2, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %2 = trunc i64 %indvars.iv.next to i32
+  %cmp = icmp ult i32 %2, %0
+  br i1 %cmp, label %for.body, label %for.end.loopexit
+
+for.end.loopexit:                                 ; preds = %for.body
+  br label %for.end
+
+for.end:                                          ; preds = %for.end.loopexit, %entry
+  ret void
+}





More information about the llvm-commits mailing list