[llvm] r177586 - Correct cost model for vector shift on AVX2

Michael Liao michael.liao at intel.com
Wed Mar 20 15:01:10 PDT 2013


Author: hliao
Date: Wed Mar 20 17:01:10 2013
New Revision: 177586

URL: http://llvm.org/viewvc/llvm-project?rev=177586&view=rev
Log:
Correct cost model for vector shift on AVX2

- After moving logic recognizing vector shift with scalar amount from
  DAG combining into DAG lowering, we declare to customize all vector
  shifts even vector shift on AVX is legal. As a result, the cost model
  needs special tuning to identify these legal cases.


Modified:
    llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp
    llvm/trunk/test/Analysis/CostModel/X86/arith.ll

Modified: llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp?rev=177586&r1=177585&r2=177586&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp Wed Mar 20 17:01:10 2013
@@ -169,6 +169,29 @@ unsigned X86TTI::getArithmeticInstrCost(
   int ISD = TLI->InstructionOpcodeToISD(Opcode);
   assert(ISD && "Invalid opcode");
 
+  static const CostTblEntry<MVT> AVX2CostTable[] = {
+    // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
+    // customize them to detect the cases where shift amount is a scalar one.
+    { ISD::SHL,     MVT::v4i32,    1 },
+    { ISD::SRL,     MVT::v4i32,    1 },
+    { ISD::SRA,     MVT::v4i32,    1 },
+    { ISD::SHL,     MVT::v8i32,    1 },
+    { ISD::SRL,     MVT::v8i32,    1 },
+    { ISD::SRA,     MVT::v8i32,    1 },
+    { ISD::SHL,     MVT::v2i64,    1 },
+    { ISD::SRL,     MVT::v2i64,    1 },
+    { ISD::SHL,     MVT::v4i64,    1 },
+    { ISD::SRL,     MVT::v4i64,    1 },
+  };
+
+  // Look for AVX2 lowering tricks.
+  if (ST->hasAVX2()) {
+    int Idx = CostTableLookup<MVT>(AVX2CostTable, array_lengthof(AVX2CostTable),
+                                   ISD, LT.second);
+    if (Idx != -1)
+      return LT.first * AVX2CostTable[Idx].Cost;
+  }
+
   static const CostTblEntry<MVT> AVX1CostTable[] = {
     // We don't have to scalarize unsupported ops. We can issue two half-sized
     // operations and we only need to extract the upper YMM half.

Modified: llvm/trunk/test/Analysis/CostModel/X86/arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/X86/arith.ll?rev=177586&r1=177585&r2=177586&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/X86/arith.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/X86/arith.ll Wed Mar 20 17:01:10 2013
@@ -72,3 +72,57 @@ define i32 @fmul(i32 %arg) {
   %B = fmul <8 x float> undef, undef
   ret i32 undef
 }
+
+; AVX: shift
+; AVX2: shift
+define void @shift() {
+  ; AVX: cost of 2 {{.*}} shl
+  ; AVX2: cost of 1 {{.*}} shl
+  %A0 = shl <4 x i32> undef, undef
+  ; AVX: cost of 2 {{.*}} shl
+  ; AVX2: cost of 1 {{.*}} shl
+  %A1 = shl <2 x i64> undef, undef
+
+  ; AVX: cost of 2 {{.*}} lshr
+  ; AVX2: cost of 1 {{.*}} lshr
+  %B0 = lshr <4 x i32> undef, undef
+  ; AVX: cost of 2 {{.*}} lshr
+  ; AVX2: cost of 1 {{.*}} lshr
+  %B1 = lshr <2 x i64> undef, undef
+
+  ; AVX: cost of 2 {{.*}} ashr
+  ; AVX2: cost of 1 {{.*}} ashr
+  %C0 = ashr <4 x i32> undef, undef
+  ; AVX: cost of 6 {{.*}} ashr
+  ; AVX2: cost of 6 {{.*}} ashr
+  %C1 = ashr <2 x i64> undef, undef
+
+  ret void
+}
+
+; AVX: avx2shift
+; AVX2: avx2shift
+define void @avx2shift() {
+  ; AVX: cost of 2 {{.*}} shl
+  ; AVX2: cost of 1 {{.*}} shl
+  %A0 = shl <8 x i32> undef, undef
+  ; AVX: cost of 2 {{.*}} shl
+  ; AVX2: cost of 1 {{.*}} shl
+  %A1 = shl <4 x i64> undef, undef
+
+  ; AVX: cost of 2 {{.*}} lshr
+  ; AVX2: cost of 1 {{.*}} lshr
+  %B0 = lshr <8 x i32> undef, undef
+  ; AVX: cost of 2 {{.*}} lshr
+  ; AVX2: cost of 1 {{.*}} lshr
+  %B1 = lshr <4 x i64> undef, undef
+
+  ; AVX: cost of 2 {{.*}} ashr
+  ; AVX2: cost of 1 {{.*}} ashr
+  %C0 = ashr <8 x i32> undef, undef
+  ; AVX: cost of 12 {{.*}} ashr
+  ; AVX2: cost of 12 {{.*}} ashr
+  %C1 = ashr <4 x i64> undef, undef
+
+  ret void
+}





More information about the llvm-commits mailing list