[PATCH] D27998: [AArch64] Consider all vector types for FeatureSlowMisaligned128Store

Evandro Menezes via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 10 13:38:51 PST 2017


evandro updated this revision to Diff 83859.

Repository:
  rL LLVM

https://reviews.llvm.org/D27998

Files:
  llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
  llvm/test/Analysis/CostModel/AArch64/store.ll


Index: llvm/test/Analysis/CostModel/AArch64/store.ll
===================================================================
--- llvm/test/Analysis/CostModel/AArch64/store.ll
+++ llvm/test/Analysis/CostModel/AArch64/store.ll
@@ -1,17 +1,35 @@
-; RUN: opt < %s  -cost-model -analyze -mtriple=aarch64-apple-ios | FileCheck %s
-; RUN: opt < %s  -cost-model -analyze -mtriple=aarch64-apple-ios -mattr=slow-misaligned-128store | FileCheck %s --check-prefix=SLOW_MISALIGNED_128_STORE
+; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-unknown | FileCheck %s
+; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-unknown -mattr=slow-misaligned-128store | FileCheck %s --check-prefix=SLOW_MISALIGNED_128_STORE
 
 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
 ; CHECK-LABEL: getMemoryOpCost
 ; SLOW_MISALIGNED_128_STORE-LABEL: getMemoryOpCost
 define void @getMemoryOpCost() {
-    ; If FeatureSlowMisaligned128Store is set, we penalize <2 x i64> stores. On
+    ; If FeatureSlowMisaligned128Store is set, we penalize 128-bit stores. On
     ; Cyclone, for example, such stores should be expensive because we don't
     ; split them and misaligned 16b stores have bad performance.
-    ;
+
     ; CHECK: cost of 1 {{.*}} store
     ; SLOW_MISALIGNED_128_STORE: cost of 12 {{.*}} store
     store <2 x i64> undef, <2 x i64> * undef
+    ; CHECK-NEXT: cost of 1 {{.*}} store
+    ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 {{.*}} store
+    store <2 x double> undef, <2 x double> * undef
+    ; CHECK-NEXT: cost of 1 {{.*}} store
+    ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 {{.*}} store
+    store <4 x i32> undef, <4 x i32> * undef
+    ; CHECK-NEXT: cost of 1 {{.*}} store
+    ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 {{.*}} store
+    store <4 x float> undef, <4 x float> * undef
+    ; CHECK-NEXT: cost of 1 {{.*}} store
+    ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 {{.*}} store
+    store <8 x i16> undef, <8 x i16> * undef
+    ; CHECK-NEXT: cost of 1 {{.*}} store
+    ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 {{.*}} store
+    store <8 x half> undef, <8 x half> * undef
+    ; CHECK-NEXT: cost of 1 {{.*}} store
+    ; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 {{.*}} store
+    store <16 x i8> undef, <16 x i8> * undef
 
     ; We scalarize the loads/stores because there is no vector register name for
     ; these types (they get extended to v.4h/v.2s).
Index: llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
===================================================================
--- llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -466,28 +466,27 @@
   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
 }
 
-int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
+int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
                                     unsigned Alignment, unsigned AddressSpace) {
-  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
+  auto LT = TLI->getTypeLegalizationCost(DL, Ty);
 
   if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
-      Src->isVectorTy() && Alignment != 16 &&
-      Src->getVectorElementType()->isIntegerTy(64)) {
-    // Unaligned stores are extremely inefficient. We don't split
-    // unaligned v2i64 stores because the negative impact that has shown in
-    // practice on inlined memcpy code.
-    // We make v2i64 stores expensive so that we will only vectorize if there
+      LT.second.is128BitVector() && Alignment < 16) {
+    // Unaligned stores are extremely inefficient. We don't split all
+    // unaligned 128-bit stores because the negative impact that has shown in
+    // practice on inlined block copy code.
+    // We make such stores expensive so that we will only vectorize if there
     // are 6 other instructions getting vectorized.
-    int AmortizationCost = 6;
+    const int AmortizationCost = 6;
 
     return LT.first * 2 * AmortizationCost;
   }
 
-  if (Src->isVectorTy() && Src->getVectorElementType()->isIntegerTy(8) &&
-      Src->getVectorNumElements() < 8) {
+  if (Ty->isVectorTy() && Ty->getVectorElementType()->isIntegerTy(8) &&
+      Ty->getVectorNumElements() < 8) {
     // We scalarize the loads/stores because there is not v.4b register and we
     // have to promote the elements to v.4h.
-    unsigned NumVecElts = Src->getVectorNumElements();
+    unsigned NumVecElts = Ty->getVectorNumElements();
     unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
     // We generate 2 instructions per vector element.
     return NumVectorizableInstsToAmortize * NumVecElts * 2;


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D27998.83859.patch
Type: text/x-patch
Size: 4727 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20170110/0f984067/attachment.bin>


More information about the llvm-commits mailing list