[llvm-branch-commits] [llvm] d96c31f - [LV] Allow large RT checks, if they are a fraction of the scalar cost.

Florian Hahn via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Sun Sep 13 10:48:58 PDT 2020


Author: Florian Hahn
Date: 2020-09-13T18:48:41+01:00
New Revision: d96c31f3b74cf84a294132012aa2b9289aa950b4

URL: https://github.com/llvm/llvm-project/commit/d96c31f3b74cf84a294132012aa2b9289aa950b4
DIFF: https://github.com/llvm/llvm-project/commit/d96c31f3b74cf84a294132012aa2b9289aa950b4.diff

LOG: [LV] Allow large RT checks, if they are a fraction of the scalar cost.

Differential Revision: https://reviews.llvm.org/D75981

Added: 
    llvm/test/Transforms/LoopVectorize/AArch64/runtime-check-size-based-threshold.ll

Modified: 
    llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
    llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
    llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
index 46d107128ce1..e5b519bc0928 100644
--- a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
+++ b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
@@ -171,7 +171,8 @@ class LoopVectorizationRequirements {
 
   void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; }
 
-  bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints);
+  bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints,
+                   bool CanIgnoreRTThreshold);
 
 private:
   unsigned NumRuntimePointerChecks = 0;

diff  --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
index 157620c30b98..da88272e9d03 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
@@ -242,8 +242,9 @@ void LoopVectorizeHints::setHint(StringRef Name, Metadata *Arg) {
   }
 }
 
-bool LoopVectorizationRequirements::doesNotMeet(
-    Function *F, Loop *L, const LoopVectorizeHints &Hints) {
+bool LoopVectorizationRequirements::doesNotMeet(Function *F, Loop *L,
+                                                const LoopVectorizeHints &Hints,
+                                                bool IgnoreRTThreshold) {
   const char *PassName = Hints.vectorizeAnalysisPassName();
   bool Failed = false;
   if (UnsafeAlgebraInst && !Hints.allowReordering()) {
@@ -262,8 +263,12 @@ bool LoopVectorizationRequirements::doesNotMeet(
       NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
   bool ThresholdReached =
       NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
-  if ((ThresholdReached && !Hints.allowReordering()) ||
-      PragmaThresholdReached) {
+  bool DoubleThresholdReached =
+      NumRuntimePointerChecks >
+      2 * VectorizerParams::RuntimeMemoryCheckThreshold;
+  if ((!IgnoreRTThreshold && ((ThresholdReached && !Hints.allowReordering()) ||
+                              PragmaThresholdReached)) ||
+      (DoubleThresholdReached && !Hints.allowReordering())) {
     ORE.emit([&]() {
       return OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps",
                                                 L->getStartLoc(),

diff  --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index f9a0e6f35f50..b4ba9e5f8684 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -411,7 +411,9 @@ static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
 
   return None;
 }
+
 struct GeneratedRTChecks;
+
 namespace llvm {
 /// InnerLoopVectorizer vectorizes loops which contain only one basic
 /// block to a specified vectorization factor (VF).
@@ -1432,9 +1434,6 @@ class LoopVectorizationCostModel {
     Scalars.clear();
   }
 
-private:
-  unsigned NumPredStores = 0;
-
   /// \return An upper bound for the vectorization factor, a power-of-2 larger
   /// than zero. One is returned if vectorization should best be avoided due
   /// to cost.
@@ -1449,16 +1448,21 @@ class LoopVectorizationCostModel {
   /// actually taken place).
   using VectorizationCostTy = std::pair<unsigned, bool>;
 
+  /// Returns the execution time cost of an instruction for a given vector
+  /// width. Vector width of one means scalar.
+  VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
+
+  float ScalarCost;
+
+private:
+  unsigned NumPredStores = 0;
+
   /// Returns the expected execution cost. The unit of the cost does
   /// not matter because we use the 'cost' units to compare 
diff erent
   /// vector widths. The cost that is returned is *not* normalized by
   /// the factor width.
   VectorizationCostTy expectedCost(ElementCount VF);
 
-  /// Returns the execution time cost of an instruction for a given vector
-  /// width. Vector width of one means scalar.
-  VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
-
   /// The cost-computation logic from getInstructionCost which provides
   /// the vector type as an output parameter.
   unsigned getInstructionCost(Instruction *I, ElementCount VF, Type *&VectorTy);
@@ -1705,6 +1709,13 @@ struct GeneratedRTChecks {
     LI->removeBlock(TmpBlock);
   }
 
+  unsigned getCost(LoopVectorizationCostModel &CM) {
+    unsigned RTCheckCost = 0;
+    for (Instruction &I : *TmpBlock)
+      RTCheckCost += CM.getInstructionCost(&I, 1).first;
+    return RTCheckCost;
+  }
+
   ~GeneratedRTChecks() {
     if (!TmpBlock) {
       Cleaner.markResultUsed();
@@ -5511,7 +5522,7 @@ LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount) {
 VectorizationFactor
 LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
   float Cost = expectedCost(ElementCount::getFixed(1)).first;
-  const float ScalarCost = Cost;
+  ScalarCost = Cost;
   unsigned Width = 1;
   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
 
@@ -8506,10 +8517,21 @@ bool LoopVectorizePass::processLoop(Loop *L) {
   GeneratedRTChecks Checks(L->getLoopPreheader(), *PSE.getSE(), DT);
   Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate(), LI);
 
+  bool CanIgnoreRTThreshold = false;
+  unsigned RTCost = Checks.getCost(CM);
+  if (ExpectedTC) {
+    // If the expected cost of the runtime checks is a small fraction of the
+    // expected cost of the scalar loop, we can be more aggressive with using
+    // runtime checks.
+    CanIgnoreRTThreshold = RTCost < (*ExpectedTC * CM.ScalarCost * 0.005);
+    LLVM_DEBUG(dbgs() << "LV: Cost of runtime check: " << RTCost << " "
+                      << *ExpectedTC * CM.ScalarCost << "\n");
+  }
+
   // Identify the diagnostic messages that should be produced.
   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
   bool VectorizeLoop = true, InterleaveLoop = true;
-  if (Requirements.doesNotMeet(F, L, Hints)) {
+  if (Requirements.doesNotMeet(F, L, Hints, CanIgnoreRTThreshold)) {
     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
                          "requirements.\n");
     Hints.emitRemarkWithHints();

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/runtime-check-size-based-threshold.ll b/llvm/test/Transforms/LoopVectorize/AArch64/runtime-check-size-based-threshold.ll
new file mode 100644
index 000000000000..0dfc87d4ff32
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/runtime-check-size-based-threshold.ll
@@ -0,0 +1,156 @@
+; RUN: opt -loop-vectorize -mtriple=arm64-apple-iphoneos -S %s | FileCheck %s
+
+%struct.snork = type <{ i32, i32, i16, [6 x i8], %struct.snork.0, i32, [4 x i8] }>
+%struct.snork.0 = type { [4 x %struct.zot] }
+%struct.zot = type { %struct.baz }
+%struct.baz = type { %struct.pluto }
+%struct.pluto = type { %struct.quux }
+%struct.quux = type { %struct.widget }
+%struct.widget = type { %struct.baz.1* }
+%struct.baz.1 = type { i32 (...)**, %struct.zot.2 }
+%struct.zot.2 = type { %struct.pluto.3 }
+%struct.pluto.3 = type { %struct.bar }
+%struct.bar = type { %struct.barney, %struct.blam.4 }
+%struct.barney = type { %struct.blam }
+%struct.blam = type { i8 }
+%struct.blam.4 = type { i16*, i16*, i16* }
+%struct.foo = type { i32, i16*, i32, i32 }
+%struct.blam.5 = type { i32, i16*, i32, i32 }
+
+; The trip count in the loop in this function is too to warrant large runtime checks.
+; CHECK-LABEL: define {{.*}} @test_tc_too_small
+; CHECK-NOT: vector.memcheck
+; CHECK-NOT: vector.body
+define void @test_tc_too_small(%struct.snork* nocapture readonly %arg, %struct.foo* nocapture readonly byval(%struct.foo) align 8 %arg1, %struct.blam.5* nocapture readonly byval(%struct.blam.5) align 8 %arg2, %struct.blam.5* nocapture readonly byval(%struct.blam.5) align 8 %arg3) {
+entry:
+  %tmp11 = getelementptr inbounds %struct.blam.5, %struct.blam.5* %arg3, i64 0, i32 0
+  %tmp12 = load i32, i32* %tmp11, align 8
+  %tmp13 = getelementptr inbounds %struct.blam.5, %struct.blam.5* %arg3, i64 0, i32 1
+  %tmp14 = load i16*, i16** %tmp13, align 8
+  %tmp17 = getelementptr inbounds %struct.blam.5, %struct.blam.5* %arg2, i64 0, i32 1
+  %tmp18 = load i16*, i16** %tmp17, align 8
+  %tmp19 = getelementptr inbounds %struct.foo, %struct.foo* %arg1, i64 0, i32 0
+  %tmp20 = load i32, i32* %tmp19, align 8
+  %tmp21 = getelementptr inbounds %struct.foo, %struct.foo* %arg1, i64 0, i32 1
+  %tmp22 = load i16*, i16** %tmp21, align 8
+  %tmp23 = getelementptr inbounds %struct.snork, %struct.snork* %arg, i64 0, i32 1
+  %tmp24 = load i32, i32* %tmp23, align 4
+  %tmp26 = icmp sgt i32 %tmp24, 0
+  %tmp39 = sext i32 %tmp12 to i64
+  %tmp40 = shl nsw i64 %tmp39, 1
+  %tmp41 = sext i32 %tmp20 to i64
+  %tmp42 = getelementptr inbounds i16, i16* %tmp22, i64 %tmp41
+  br label %bb54
+
+bb54:                                             ; preds = %bb54, %bb37
+  %tmp55 = phi i64 [ 0, %entry ], [ %tmp88, %bb54 ]
+  %tmp56 = getelementptr inbounds i16, i16* %tmp18, i64 %tmp55
+  %tmp57 = load i16, i16* %tmp56, align 2
+  %tmp58 = sext i16 %tmp57 to i32
+  %tmp59 = getelementptr inbounds i16, i16* %tmp14, i64 %tmp55
+  %tmp60 = load i16, i16* %tmp59, align 2
+  %tmp61 = sext i16 %tmp60 to i32
+  %tmp62 = mul nsw i32 %tmp61, 11
+  %tmp63 = getelementptr inbounds i16, i16* %tmp59, i64 %tmp39
+  %tmp64 = load i16, i16* %tmp63, align 2
+  %tmp65 = sext i16 %tmp64 to i32
+  %tmp66 = mul nsw i32 %tmp65, -4
+  %tmp67 = getelementptr inbounds i16, i16* %tmp59, i64 %tmp40
+  %tmp68 = load i16, i16* %tmp67, align 2
+  %tmp69 = sext i16 %tmp68 to i32
+  %tmp70 = add nsw i32 %tmp62, 4
+  %tmp71 = add nsw i32 %tmp70, %tmp66
+  %tmp72 = add nsw i32 %tmp71, %tmp69
+  %tmp73 = lshr i32 %tmp72, 3
+  %tmp74 = add nsw i32 %tmp73, %tmp58
+  %tmp75 = lshr i32 %tmp74, 1
+  %tmp76 = mul nsw i32 %tmp61, 5
+  %tmp77 = shl nsw i32 %tmp65, 2
+  %tmp78 = add nsw i32 %tmp76, 4
+  %tmp79 = add nsw i32 %tmp78, %tmp77
+  %tmp80 = sub nsw i32 %tmp79, %tmp69
+  %tmp81 = lshr i32 %tmp80, 3
+  %tmp82 = sub nsw i32 %tmp81, %tmp58
+  %tmp83 = lshr i32 %tmp82, 1
+  %tmp84 = trunc i32 %tmp75 to i16
+  %tmp85 = getelementptr inbounds i16, i16* %tmp22, i64 %tmp55
+  store i16 %tmp84, i16* %tmp85, align 2
+  %tmp86 = trunc i32 %tmp83 to i16
+  %tmp87 = getelementptr inbounds i16, i16* %tmp42, i64 %tmp55
+  store i16 %tmp86, i16* %tmp87, align 2
+  %tmp88 = add nuw nsw i64 %tmp55, 1
+  %tmp89 = icmp ult i64 %tmp55, 50
+  br i1 %tmp89, label %bb54, label %bb90
+
+bb90:                                             ; preds = %bb54, %bb27, %bb
+  ret void
+}
+
+; The trip count in the loop in this function high enough to warrant large runtime checks.
+; CHECK-LABEL: define {{.*}} @test_tc_big_enough
+; CHECK: vector.memcheck
+; CHECK: vector.body
+define void @test_tc_big_enough(%struct.snork* nocapture readonly %arg, %struct.foo* nocapture readonly byval(%struct.foo) align 8 %arg1, %struct.blam.5* nocapture readonly byval(%struct.blam.5) align 8 %arg2, %struct.blam.5* nocapture readonly byval(%struct.blam.5) align 8 %arg3) {
+entry:
+  %tmp11 = getelementptr inbounds %struct.blam.5, %struct.blam.5* %arg3, i64 0, i32 0
+  %tmp12 = load i32, i32* %tmp11, align 8
+  %tmp13 = getelementptr inbounds %struct.blam.5, %struct.blam.5* %arg3, i64 0, i32 1
+  %tmp14 = load i16*, i16** %tmp13, align 8
+  %tmp17 = getelementptr inbounds %struct.blam.5, %struct.blam.5* %arg2, i64 0, i32 1
+  %tmp18 = load i16*, i16** %tmp17, align 8
+  %tmp19 = getelementptr inbounds %struct.foo, %struct.foo* %arg1, i64 0, i32 0
+  %tmp20 = load i32, i32* %tmp19, align 8
+  %tmp21 = getelementptr inbounds %struct.foo, %struct.foo* %arg1, i64 0, i32 1
+  %tmp22 = load i16*, i16** %tmp21, align 8
+  %tmp23 = getelementptr inbounds %struct.snork, %struct.snork* %arg, i64 0, i32 1
+  %tmp24 = load i32, i32* %tmp23, align 4
+  %tmp26 = icmp sgt i32 %tmp24, 0
+  %tmp39 = sext i32 %tmp12 to i64
+  %tmp40 = shl nsw i64 %tmp39, 1
+  %tmp41 = sext i32 %tmp20 to i64
+  %tmp42 = getelementptr inbounds i16, i16* %tmp22, i64 %tmp41
+  br label %bb54
+
+bb54:                                             ; preds = %bb54, %bb37
+  %tmp55 = phi i64 [ 0, %entry ], [ %tmp88, %bb54 ]
+  %tmp56 = getelementptr inbounds i16, i16* %tmp18, i64 %tmp55
+  %tmp57 = load i16, i16* %tmp56, align 2
+  %tmp58 = sext i16 %tmp57 to i32
+  %tmp59 = getelementptr inbounds i16, i16* %tmp14, i64 %tmp55
+  %tmp60 = load i16, i16* %tmp59, align 2
+  %tmp61 = sext i16 %tmp60 to i32
+  %tmp62 = mul nsw i32 %tmp61, 11
+  %tmp63 = getelementptr inbounds i16, i16* %tmp59, i64 %tmp39
+  %tmp64 = load i16, i16* %tmp63, align 2
+  %tmp65 = sext i16 %tmp64 to i32
+  %tmp66 = mul nsw i32 %tmp65, -4
+  %tmp67 = getelementptr inbounds i16, i16* %tmp59, i64 %tmp40
+  %tmp68 = load i16, i16* %tmp67, align 2
+  %tmp69 = sext i16 %tmp68 to i32
+  %tmp70 = add nsw i32 %tmp62, 4
+  %tmp71 = add nsw i32 %tmp70, %tmp66
+  %tmp72 = add nsw i32 %tmp71, %tmp69
+  %tmp73 = lshr i32 %tmp72, 3
+  %tmp74 = add nsw i32 %tmp73, %tmp58
+  %tmp75 = lshr i32 %tmp74, 1
+  %tmp76 = mul nsw i32 %tmp61, 5
+  %tmp77 = shl nsw i32 %tmp65, 2
+  %tmp78 = add nsw i32 %tmp76, 4
+  %tmp79 = add nsw i32 %tmp78, %tmp77
+  %tmp80 = sub nsw i32 %tmp79, %tmp69
+  %tmp81 = lshr i32 %tmp80, 3
+  %tmp82 = sub nsw i32 %tmp81, %tmp58
+  %tmp83 = lshr i32 %tmp82, 1
+  %tmp84 = trunc i32 %tmp75 to i16
+  %tmp85 = getelementptr inbounds i16, i16* %tmp22, i64 %tmp55
+  store i16 %tmp84, i16* %tmp85, align 2
+  %tmp86 = trunc i32 %tmp83 to i16
+  %tmp87 = getelementptr inbounds i16, i16* %tmp42, i64 %tmp55
+  store i16 %tmp86, i16* %tmp87, align 2
+  %tmp88 = add nuw nsw i64 %tmp55, 1
+  %tmp89 = icmp ult i64 %tmp55, 500
+  br i1 %tmp89, label %bb54, label %bb90
+
+bb90:                                             ; preds = %bb54, %bb27, %bb
+  ret void
+}


        


More information about the llvm-branch-commits mailing list