[llvm] r179033 - X86 cost model: Model cost for uitofp and sitofp on SSE2

Arnold Schwaighofer aschwaighofer at apple.com
Mon Apr 8 11:05:48 PDT 2013


Author: arnolds
Date: Mon Apr  8 13:05:48 2013
New Revision: 179033

URL: http://llvm.org/viewvc/llvm-project?rev=179033&view=rev
Log:
X86 cost model: Model cost for uitofp and sitofp on SSE2

The costs are overfitted so that I can still use the legalization factor.

For example the following kernel has about half the throughput vectorized than
unvectorized when compiled with SSE2. Before this patch we would vectorize it.

unsigned short A[1024];
double B[1024];
void f() {
  int i;
  for (i = 0; i < 1024; ++i) {
    B[i] = (double) A[i];
  }
}

radar://13599001

Added:
    llvm/trunk/test/Analysis/CostModel/X86/sitofp.ll
    llvm/trunk/test/Analysis/CostModel/X86/uitofp.ll
Modified:
    llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp

Modified: llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp?rev=179033&r1=179032&r2=179033&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp Mon Apr  8 13:05:48 2013
@@ -334,12 +334,43 @@ unsigned X86TTI::getCastInstrCost(unsign
   int ISD = TLI->InstructionOpcodeToISD(Opcode);
   assert(ISD && "Invalid opcode");
 
+  std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src);
+  std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst);
+
+  static const TypeConversionCostTblEntry<MVT> SSE2ConvTbl[] = {
+    // These are somewhat magic numbers justified by looking at the output of
+    // Intel's IACA, running some kernels and making sure when we take
+    // legalization into account the throughput will be overestimated.
+    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
+    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
+    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
+    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
+    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
+    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
+    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
+    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
+    // There are faster sequences for float conversions.
+    { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
+    { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
+    { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
+    { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
+    { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
+    { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
+    { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
+    { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
+  };
+
+  if (ST->hasSSE2() && !ST->hasAVX()) {
+    int Idx = ConvertCostTableLookup<MVT>(SSE2ConvTbl,
+                                          array_lengthof(SSE2ConvTbl),
+                                          ISD, LTDest.second, LTSrc.second);
+    if (Idx != -1)
+      return LTSrc.first * SSE2ConvTbl[Idx].Cost;
+  }
+
   EVT SrcTy = TLI->getValueType(Src);
   EVT DstTy = TLI->getValueType(Dst);
 
-  if (!SrcTy.isSimple() || !DstTy.isSimple())
-    return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
-
   static const TypeConversionCostTblEntry<MVT> AVXConversionTbl[] = {
     { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
     { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },

Added: llvm/trunk/test/Analysis/CostModel/X86/sitofp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/X86/sitofp.ll?rev=179033&view=auto
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/X86/sitofp.ll (added)
+++ llvm/trunk/test/Analysis/CostModel/X86/sitofp.ll Mon Apr  8 13:05:48 2013
@@ -0,0 +1,281 @@
+; RUN: opt -mtriple=x86_64-apple-darwin -mcpu=core2 -cost-model -analyze < %s | FileCheck --check-prefix=SSE2 %s
+
+define <2 x double> @sitofpv2i8v2double(<2 x i8> %a) {
+  ; SSE2: sitofpv2i8v2double
+  ; SSE2: cost of 20 {{.*}} sitofp
+  %1 = sitofp <2 x i8> %a to <2 x double>
+  ret <2 x double> %1
+}
+
+define <4 x double> @sitofpv4i8v4double(<4 x i8> %a) {
+  ; SSE2: sitofpv4i8v4double
+  ; SSE2: cost of 40 {{.*}} sitofp
+  %1 = sitofp <4 x i8> %a to <4 x double>
+  ret <4 x double> %1
+}
+
+define <8 x double> @sitofpv8i8v8double(<8 x i8> %a) {
+  ; SSE2: sitofpv8i8v8double
+  ; SSE2: cost of 80 {{.*}} sitofp
+%1 = sitofp <8 x i8> %a to <8 x double>
+  ret <8 x double> %1
+}
+
+define <16 x double> @sitofpv16i8v16double(<16 x i8> %a) {
+  ; SSE2: sitofpv16i8v16double
+  ; SSE2: cost of 160 {{.*}} sitofp
+  %1 = sitofp <16 x i8> %a to <16 x double>
+  ret <16 x double> %1
+}
+
+define <32 x double> @sitofpv32i8v32double(<32 x i8> %a) {
+  ; SSE2: sitofpv32i8v32double
+  ; SSE2: cost of 320 {{.*}} sitofp
+  %1 = sitofp <32 x i8> %a to <32 x double>
+  ret <32 x double> %1
+}
+
+define <2 x double> @sitofpv2i16v2double(<2 x i16> %a) {
+  ; SSE2: sitofpv2i16v2double
+  ; SSE2: cost of 20 {{.*}} sitofp
+  %1 = sitofp <2 x i16> %a to <2 x double>
+  ret <2 x double> %1
+}
+
+define <4 x double> @sitofpv4i16v4double(<4 x i16> %a) {
+  ; SSE2: sitofpv4i16v4double
+  ; SSE2: cost of 40 {{.*}} sitofp
+  %1 = sitofp <4 x i16> %a to <4 x double>
+  ret <4 x double> %1
+}
+
+define <8 x double> @sitofpv8i16v8double(<8 x i16> %a) {
+  ; SSE2: sitofpv8i16v8double
+  ; SSE2: cost of 80 {{.*}} sitofp
+  %1 = sitofp <8 x i16> %a to <8 x double>
+  ret <8 x double> %1
+}
+
+define <16 x double> @sitofpv16i16v16double(<16 x i16> %a) {
+  ; SSE2: sitofpv16i16v16double
+  ; SSE2: cost of 160 {{.*}} sitofp
+  %1 = sitofp <16 x i16> %a to <16 x double>
+  ret <16 x double> %1
+}
+
+define <32 x double> @sitofpv32i16v32double(<32 x i16> %a) {
+  ; SSE2: sitofpv32i16v32double
+  ; SSE2: cost of 320 {{.*}} sitofp
+  %1 = sitofp <32 x i16> %a to <32 x double>
+  ret <32 x double> %1
+}
+
+define <2 x double> @sitofpv2i32v2double(<2 x i32> %a) {
+  ; SSE2: sitofpv2i32v2double
+  ; SSE2: cost of 20 {{.*}} sitofp
+  %1 = sitofp <2 x i32> %a to <2 x double>
+  ret <2 x double> %1
+}
+
+define <4 x double> @sitofpv4i32v4double(<4 x i32> %a) {
+  ; SSE2: sitofpv4i32v4double
+  ; SSE2: cost of 40 {{.*}} sitofp
+  %1 = sitofp <4 x i32> %a to <4 x double>
+  ret <4 x double> %1
+}
+
+define <8 x double> @sitofpv8i32v8double(<8 x i32> %a) {
+  ; SSE2: sitofpv8i32v8double
+  ; SSE2: cost of 80 {{.*}} sitofp
+  %1 = sitofp <8 x i32> %a to <8 x double>
+  ret <8 x double> %1
+}
+
+define <16 x double> @sitofpv16i32v16double(<16 x i32> %a) {
+  ; SSE2: sitofpv16i32v16double
+  ; SSE2: cost of 160 {{.*}} sitofp
+  %1 = sitofp <16 x i32> %a to <16 x double>
+  ret <16 x double> %1
+}
+
+define <32 x double> @sitofpv32i32v32double(<32 x i32> %a) {
+  ; SSE2: sitofpv32i32v32double
+  ; SSE2: cost of 320 {{.*}} sitofp
+  %1 = sitofp <32 x i32> %a to <32 x double>
+  ret <32 x double> %1
+}
+
+define <2 x double> @sitofpv2i64v2double(<2 x i64> %a) {
+  ; SSE2: sitofpv2i64v2double
+  ; SSE2: cost of 20 {{.*}} sitofp
+  %1 = sitofp <2 x i64> %a to <2 x double>
+  ret <2 x double> %1
+}
+
+define <4 x double> @sitofpv4i64v4double(<4 x i64> %a) {
+  ; SSE2: sitofpv4i64v4double
+  ; SSE2: cost of 40 {{.*}} sitofp
+  %1 = sitofp <4 x i64> %a to <4 x double>
+  ret <4 x double> %1
+}
+
+define <8 x double> @sitofpv8i64v8double(<8 x i64> %a) {
+  %1 = sitofp <8 x i64> %a to <8 x double>
+  ; SSE2: sitofpv8i64v8double
+  ; SSE2: cost of 80 {{.*}} sitofp
+  ret <8 x double> %1
+}
+
+define <16 x double> @sitofpv16i64v16double(<16 x i64> %a) {
+  ; SSE2: sitofpv16i64v16double
+  ; SSE2: cost of 160 {{.*}} sitofp
+  %1 = sitofp <16 x i64> %a to <16 x double>
+  ret <16 x double> %1
+}
+
+define <32 x double> @sitofpv32i64v32double(<32 x i64> %a) {
+  ; SSE2: sitofpv32i64v32double
+  ; SSE2: cost of 320 {{.*}} sitofp
+  %1 = sitofp <32 x i64> %a to <32 x double>
+  ret <32 x double> %1
+}
+
+define <2 x float> @sitofpv2i8v2float(<2 x i8> %a) {
+  ; SSE2: sitofpv2i8v2float
+  ; SSE2: cost of 15 {{.*}} sitofp
+  %1 = sitofp <2 x i8> %a to <2 x float>
+  ret <2 x float> %1
+}
+
+define <4 x float> @sitofpv4i8v4float(<4 x i8> %a) {
+  ; SSE2: sitofpv4i8v4float
+  ; SSE2: cost of 15 {{.*}} sitofp
+  %1 = sitofp <4 x i8> %a to <4 x float>
+  ret <4 x float> %1
+}
+
+define <8 x float> @sitofpv8i8v8float(<8 x i8> %a) {
+  ; SSE2: sitofpv8i8v8float
+  ; SSE2: cost of 15 {{.*}} sitofp
+  %1 = sitofp <8 x i8> %a to <8 x float>
+  ret <8 x float> %1
+}
+
+define <16 x float> @sitofpv16i8v16float(<16 x i8> %a) {
+  ; SSE2: sitofpv16i8v16float
+  ; SSE2: cost of 8 {{.*}} sitofp
+  %1 = sitofp <16 x i8> %a to <16 x float>
+  ret <16 x float> %1
+}
+
+define <32 x float> @sitofpv32i8v32float(<32 x i8> %a) {
+  ; SSE2: sitofpv32i8v32float
+  ; SSE2: cost of 16 {{.*}} sitofp
+  %1 = sitofp <32 x i8> %a to <32 x float>
+  ret <32 x float> %1
+}
+
+define <2 x float> @sitofpv2i16v2float(<2 x i16> %a) {
+  ; SSE2: sitofpv2i16v2float
+  ; SSE2: cost of 15 {{.*}} sitofp
+  %1 = sitofp <2 x i16> %a to <2 x float>
+  ret <2 x float> %1
+}
+
+define <4 x float> @sitofpv4i16v4float(<4 x i16> %a) {
+  ; SSE2: sitofpv4i16v4float
+  ; SSE2: cost of 15 {{.*}} sitofp
+  %1 = sitofp <4 x i16> %a to <4 x float>
+  ret <4 x float> %1
+}
+
+define <8 x float> @sitofpv8i16v8float(<8 x i16> %a) {
+  ; SSE2: sitofpv8i16v8float
+  ; SSE2: cost of 15 {{.*}} sitofp
+  %1 = sitofp <8 x i16> %a to <8 x float>
+  ret <8 x float> %1
+}
+
+define <16 x float> @sitofpv16i16v16float(<16 x i16> %a) {
+  ; SSE2: sitofpv16i16v16float
+  ; SSE2: cost of 30 {{.*}} sitofp
+  %1 = sitofp <16 x i16> %a to <16 x float>
+  ret <16 x float> %1
+}
+
+define <32 x float> @sitofpv32i16v32float(<32 x i16> %a) {
+  ; SSE2: sitofpv32i16v32float
+  ; SSE2: cost of 60 {{.*}} sitofp
+  %1 = sitofp <32 x i16> %a to <32 x float>
+  ret <32 x float> %1
+}
+
+define <2 x float> @sitofpv2i32v2float(<2 x i32> %a) {
+  ; SSE2: sitofpv2i32v2float
+  ; SSE2: cost of 15 {{.*}} sitofp
+  %1 = sitofp <2 x i32> %a to <2 x float>
+  ret <2 x float> %1
+}
+
+define <4 x float> @sitofpv4i32v4float(<4 x i32> %a) {
+  ; SSE2: sitofpv4i32v4float
+  ; SSE2: cost of 15 {{.*}} sitofp
+  %1 = sitofp <4 x i32> %a to <4 x float>
+  ret <4 x float> %1
+}
+
+define <8 x float> @sitofpv8i32v8float(<8 x i32> %a) {
+  ; SSE2: sitofpv8i32v8float
+  ; SSE2: cost of 30 {{.*}} sitofp
+  %1 = sitofp <8 x i32> %a to <8 x float>
+  ret <8 x float> %1
+}
+
+define <16 x float> @sitofpv16i32v16float(<16 x i32> %a) {
+  ; SSE2: sitofpv16i32v16float
+  ; SSE2: cost of 60 {{.*}} sitofp
+  %1 = sitofp <16 x i32> %a to <16 x float>
+  ret <16 x float> %1
+}
+
+define <32 x float> @sitofpv32i32v32float(<32 x i32> %a) {
+  ; SSE2: sitofpv32i32v32float
+  ; SSE2: cost of 120 {{.*}} sitofp
+  %1 = sitofp <32 x i32> %a to <32 x float>
+  ret <32 x float> %1
+}
+
+define <2 x float> @sitofpv2i64v2float(<2 x i64> %a) {
+  ; SSE2: sitofpv2i64v2float
+  ; SSE2: cost of 15 {{.*}} sitofp
+  %1 = sitofp <2 x i64> %a to <2 x float>
+  ret <2 x float> %1
+}
+
+define <4 x float> @sitofpv4i64v4float(<4 x i64> %a) {
+  ; SSE2: sitofpv4i64v4float
+  ; SSE2: cost of 30 {{.*}} sitofp
+  %1 = sitofp <4 x i64> %a to <4 x float>
+  ret <4 x float> %1
+}
+
+define <8 x float> @sitofpv8i64v8float(<8 x i64> %a) {
+  ; SSE2: sitofpv8i64v8float
+  ; SSE2: cost of 60 {{.*}} sitofp
+  %1 = sitofp <8 x i64> %a to <8 x float>
+  ret <8 x float> %1
+}
+
+define <16 x float> @sitofpv16i64v16float(<16 x i64> %a) {
+  ; SSE2: sitofpv16i64v16float
+  ; SSE2: cost of 120 {{.*}} sitofp
+  %1 = sitofp <16 x i64> %a to <16 x float>
+  ret <16 x float> %1
+}
+
+define <32 x float> @sitofpv32i64v32float(<32 x i64> %a) {
+  ; SSE2: sitofpv32i64v32float
+  ; SSE2: cost of 240 {{.*}} sitofp
+  %1 = sitofp <32 x i64> %a to <32 x float>
+  ret <32 x float> %1
+}

Added: llvm/trunk/test/Analysis/CostModel/X86/uitofp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/X86/uitofp.ll?rev=179033&view=auto
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/X86/uitofp.ll (added)
+++ llvm/trunk/test/Analysis/CostModel/X86/uitofp.ll Mon Apr  8 13:05:48 2013
@@ -0,0 +1,362 @@
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=core2 < %s | FileCheck --check-prefix=SSE2-CODEGEN %s
+; RUN: opt -mtriple=x86_64-apple-darwin -mcpu=core2 -cost-model -analyze < %s | FileCheck --check-prefix=SSE2 %s
+
+define <2 x double> @uitofpv2i8v2double(<2 x i8> %a) {
+  ; SSE2: uitofpv2i8v2double
+  ; SSE2: cost of 20 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv2i8v2double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <2 x i8> %a to <2 x double>
+  ret <2 x double> %1
+}
+
+define <4 x double> @uitofpv4i8v4double(<4 x i8> %a) {
+  ; SSE2: uitofpv4i8v4double
+  ; SSE2: cost of 40 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv4i8v4double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <4 x i8> %a to <4 x double>
+  ret <4 x double> %1
+}
+
+define <8 x double> @uitofpv8i8v8double(<8 x i8> %a) {
+  ; SSE2: uitofpv8i8v8double
+  ; SSE2: cost of 80 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv8i8v8double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+%1 = uitofp <8 x i8> %a to <8 x double>
+  ret <8 x double> %1
+}
+
+define <16 x double> @uitofpv16i8v16double(<16 x i8> %a) {
+  ; SSE2: uitofpv16i8v16double
+  ; SSE2: cost of 160 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv16i8v16double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <16 x i8> %a to <16 x double>
+  ret <16 x double> %1
+}
+
+define <32 x double> @uitofpv32i8v32double(<32 x i8> %a) {
+  ; SSE2: uitofpv32i8v32double
+  ; SSE2: cost of 320 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv32i8v32double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <32 x i8> %a to <32 x double>
+  ret <32 x double> %1
+}
+
+define <2 x double> @uitofpv2i16v2double(<2 x i16> %a) {
+  ; SSE2: uitofpv2i16v2double
+  ; SSE2: cost of 20 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv2i16v2double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <2 x i16> %a to <2 x double>
+  ret <2 x double> %1
+}
+
+define <4 x double> @uitofpv4i16v4double(<4 x i16> %a) {
+  ; SSE2: uitofpv4i16v4double
+  ; SSE2: cost of 40 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv4i16v4double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <4 x i16> %a to <4 x double>
+  ret <4 x double> %1
+}
+
+define <8 x double> @uitofpv8i16v8double(<8 x i16> %a) {
+  ; SSE2: uitofpv8i16v8double
+  ; SSE2: cost of 80 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv8i16v8double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <8 x i16> %a to <8 x double>
+  ret <8 x double> %1
+}
+
+define <16 x double> @uitofpv16i16v16double(<16 x i16> %a) {
+  ; SSE2: uitofpv16i16v16double
+  ; SSE2: cost of 160 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv16i16v16double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <16 x i16> %a to <16 x double>
+  ret <16 x double> %1
+}
+
+define <32 x double> @uitofpv32i16v32double(<32 x i16> %a) {
+  ; SSE2: uitofpv32i16v32double
+  ; SSE2: cost of 320 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv32i16v32double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <32 x i16> %a to <32 x double>
+  ret <32 x double> %1
+}
+
+define <2 x double> @uitofpv2i32v2double(<2 x i32> %a) {
+  ; SSE2: uitofpv2i32v2double
+  ; SSE2: cost of 20 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv2i32v2double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <2 x i32> %a to <2 x double>
+  ret <2 x double> %1
+}
+
+define <4 x double> @uitofpv4i32v4double(<4 x i32> %a) {
+  ; SSE2: uitofpv4i32v4double
+  ; SSE2: cost of 40 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv4i32v4double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <4 x i32> %a to <4 x double>
+  ret <4 x double> %1
+}
+
+define <8 x double> @uitofpv8i32v8double(<8 x i32> %a) {
+  ; SSE2: uitofpv8i32v8double
+  ; SSE2: cost of 80 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv8i32v8double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <8 x i32> %a to <8 x double>
+  ret <8 x double> %1
+}
+
+define <16 x double> @uitofpv16i32v16double(<16 x i32> %a) {
+  ; SSE2: uitofpv16i32v16double
+  ; SSE2: cost of 160 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv16i32v16double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <16 x i32> %a to <16 x double>
+  ret <16 x double> %1
+}
+
+define <32 x double> @uitofpv32i32v32double(<32 x i32> %a) {
+  ; SSE2: uitofpv32i32v32double
+  ; SSE2: cost of 320 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv32i32v32double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <32 x i32> %a to <32 x double>
+  ret <32 x double> %1
+}
+
+define <2 x double> @uitofpv2i64v2double(<2 x i64> %a) {
+  ; SSE2: uitofpv2i64v2double
+  ; SSE2: cost of 20 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv2i64v2double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <2 x i64> %a to <2 x double>
+  ret <2 x double> %1
+}
+
+define <4 x double> @uitofpv4i64v4double(<4 x i64> %a) {
+  ; SSE2: uitofpv4i64v4double
+  ; SSE2: cost of 40 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv4i64v4double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <4 x i64> %a to <4 x double>
+  ret <4 x double> %1
+}
+
+define <8 x double> @uitofpv8i64v8double(<8 x i64> %a) {
+  %1 = uitofp <8 x i64> %a to <8 x double>
+  ; SSE2: uitofpv8i64v8double
+  ; SSE2: cost of 80 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv8i64v8double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  ret <8 x double> %1
+}
+
+define <16 x double> @uitofpv16i64v16double(<16 x i64> %a) {
+  ; SSE2: uitofpv16i64v16double
+  ; SSE2: cost of 160 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv16i64v16double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <16 x i64> %a to <16 x double>
+  ret <16 x double> %1
+}
+
+define <32 x double> @uitofpv32i64v32double(<32 x i64> %a) {
+  ; SSE2: uitofpv32i64v32double
+  ; SSE2: cost of 320 {{.*}} uitofp
+  ; SSE2-CODEGEN: uitofpv32i64v32double
+  ; SSE2-CODEGEN: movapd  LCPI
+  ; SSE2-CODEGEN: subpd
+  ; SSE2-CODEGEN: addpd
+  %1 = uitofp <32 x i64> %a to <32 x double>
+  ret <32 x double> %1
+}
+
+define <2 x float> @uitofpv2i8v2float(<2 x i8> %a) {
+  ; SSE2: uitofpv2i8v2float
+  ; SSE2: cost of 15 {{.*}} uitofp
+  %1 = uitofp <2 x i8> %a to <2 x float>
+  ret <2 x float> %1
+}
+
+define <4 x float> @uitofpv4i8v4float(<4 x i8> %a) {
+  ; SSE2: uitofpv4i8v4float
+  ; SSE2: cost of 15 {{.*}} uitofp
+  %1 = uitofp <4 x i8> %a to <4 x float>
+  ret <4 x float> %1
+}
+
+define <8 x float> @uitofpv8i8v8float(<8 x i8> %a) {
+  ; SSE2: uitofpv8i8v8float
+  ; SSE2: cost of 15 {{.*}} uitofp
+  %1 = uitofp <8 x i8> %a to <8 x float>
+  ret <8 x float> %1
+}
+
+define <16 x float> @uitofpv16i8v16float(<16 x i8> %a) {
+  ; SSE2: uitofpv16i8v16float
+  ; SSE2: cost of 8 {{.*}} uitofp
+  %1 = uitofp <16 x i8> %a to <16 x float>
+  ret <16 x float> %1
+}
+
+define <32 x float> @uitofpv32i8v32float(<32 x i8> %a) {
+  ; SSE2: uitofpv32i8v32float
+  ; SSE2: cost of 16 {{.*}} uitofp
+  %1 = uitofp <32 x i8> %a to <32 x float>
+  ret <32 x float> %1
+}
+
+define <2 x float> @uitofpv2i16v2float(<2 x i16> %a) {
+  ; SSE2: uitofpv2i16v2float
+  ; SSE2: cost of 15 {{.*}} uitofp
+  %1 = uitofp <2 x i16> %a to <2 x float>
+  ret <2 x float> %1
+}
+
+define <4 x float> @uitofpv4i16v4float(<4 x i16> %a) {
+  ; SSE2: uitofpv4i16v4float
+  ; SSE2: cost of 15 {{.*}} uitofp
+  %1 = uitofp <4 x i16> %a to <4 x float>
+  ret <4 x float> %1
+}
+
+define <8 x float> @uitofpv8i16v8float(<8 x i16> %a) {
+  ; SSE2: uitofpv8i16v8float
+  ; SSE2: cost of 15 {{.*}} uitofp
+  %1 = uitofp <8 x i16> %a to <8 x float>
+  ret <8 x float> %1
+}
+
+define <16 x float> @uitofpv16i16v16float(<16 x i16> %a) {
+  ; SSE2: uitofpv16i16v16float
+  ; SSE2: cost of 30 {{.*}} uitofp
+  %1 = uitofp <16 x i16> %a to <16 x float>
+  ret <16 x float> %1
+}
+
+define <32 x float> @uitofpv32i16v32float(<32 x i16> %a) {
+  ; SSE2: uitofpv32i16v32float
+  ; SSE2: cost of 60 {{.*}} uitofp
+  %1 = uitofp <32 x i16> %a to <32 x float>
+  ret <32 x float> %1
+}
+
+define <2 x float> @uitofpv2i32v2float(<2 x i32> %a) {
+  ; SSE2: uitofpv2i32v2float
+  ; SSE2: cost of 15 {{.*}} uitofp
+  %1 = uitofp <2 x i32> %a to <2 x float>
+  ret <2 x float> %1
+}
+
+define <4 x float> @uitofpv4i32v4float(<4 x i32> %a) {
+  ; SSE2: uitofpv4i32v4float
+  ; SSE2: cost of 15 {{.*}} uitofp
+  %1 = uitofp <4 x i32> %a to <4 x float>
+  ret <4 x float> %1
+}
+
+define <8 x float> @uitofpv8i32v8float(<8 x i32> %a) {
+  ; SSE2: uitofpv8i32v8float
+  ; SSE2: cost of 30 {{.*}} uitofp
+  %1 = uitofp <8 x i32> %a to <8 x float>
+  ret <8 x float> %1
+}
+
+define <16 x float> @uitofpv16i32v16float(<16 x i32> %a) {
+  ; SSE2: uitofpv16i32v16float
+  ; SSE2: cost of 60 {{.*}} uitofp
+  %1 = uitofp <16 x i32> %a to <16 x float>
+  ret <16 x float> %1
+}
+
+define <32 x float> @uitofpv32i32v32float(<32 x i32> %a) {
+  ; SSE2: uitofpv32i32v32float
+  ; SSE2: cost of 120 {{.*}} uitofp
+  %1 = uitofp <32 x i32> %a to <32 x float>
+  ret <32 x float> %1
+}
+
+define <2 x float> @uitofpv2i64v2float(<2 x i64> %a) {
+  ; SSE2: uitofpv2i64v2float
+  ; SSE2: cost of 15 {{.*}} uitofp
+  %1 = uitofp <2 x i64> %a to <2 x float>
+  ret <2 x float> %1
+}
+
+define <4 x float> @uitofpv4i64v4float(<4 x i64> %a) {
+  ; SSE2: uitofpv4i64v4float
+  ; SSE2: cost of 30 {{.*}} uitofp
+  %1 = uitofp <4 x i64> %a to <4 x float>
+  ret <4 x float> %1
+}
+
+define <8 x float> @uitofpv8i64v8float(<8 x i64> %a) {
+  ; SSE2: uitofpv8i64v8float
+  ; SSE2: cost of 60 {{.*}} uitofp
+  %1 = uitofp <8 x i64> %a to <8 x float>
+  ret <8 x float> %1
+}
+
+define <16 x float> @uitofpv16i64v16float(<16 x i64> %a) {
+  ; SSE2: uitofpv16i64v16float
+  ; SSE2: cost of 120 {{.*}} uitofp
+  %1 = uitofp <16 x i64> %a to <16 x float>
+  ret <16 x float> %1
+}
+
+define <32 x float> @uitofpv32i64v32float(<32 x i64> %a) {
+  ; SSE2: uitofpv32i64v32float
+  ; SSE2: cost of 240 {{.*}} uitofp
+  %1 = uitofp <32 x i64> %a to <32 x float>
+  ret <32 x float> %1
+}





More information about the llvm-commits mailing list