[llvm] r306584 - [AArch64][Falkor] Try to avoid exhausting HW prefetcher resources when unrolling.

Geoff Berry via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 28 11:53:09 PDT 2017


Author: gberry
Date: Wed Jun 28 11:53:09 2017
New Revision: 306584

URL: http://llvm.org/viewvc/llvm-project?rev=306584&view=rev
Log:
[AArch64][Falkor] Try to avoid exhausting HW prefetcher resources when unrolling.

Reviewers: t.p.northover, mcrosier

Subscribers: aemerson, rengolin, javed.absar, kristof.beyls, llvm-commits

Differential Revision: https://reviews.llvm.org/D34533

Added:
    llvm/trunk/test/Transforms/LoopUnroll/AArch64/falkor-prefetch.ll
Modified:
    llvm/trunk/lib/Target/AArch64/AArch64TargetTransformInfo.cpp

Modified: llvm/trunk/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64TargetTransformInfo.cpp?rev=306584&r1=306583&r2=306584&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64TargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64TargetTransformInfo.cpp Wed Jun 28 11:53:09 2017
@@ -20,6 +20,9 @@ using namespace llvm;
 
 #define DEBUG_TYPE "aarch64tti"
 
+static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix",
+                                               cl::init(true), cl::Hidden);
+
 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
                                          const Function *Callee) const {
   const TargetMachine &TM = getTLI()->getTargetMachine();
@@ -645,6 +648,58 @@ unsigned AArch64TTIImpl::getMaxInterleav
   return ST->getMaxInterleaveFactor();
 }
 
+// For Falkor, we want to avoid having too many strided loads in a loop since
+// that can exhaust the HW prefetcher resources.  We adjust the unroller
+// MaxCount preference below to attempt to ensure unrolling doesn't create too
+// many strided loads.
+static void
+getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
+                              TargetTransformInfo::UnrollingPreferences &UP) {
+  const int MaxStridedLoads = 7;
+  auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
+    int StridedLoads = 0;
+    // FIXME? We could make this more precise by looking at the CFG and
+    // e.g. not counting loads in each side of an if-then-else diamond.
+    for (const auto BB : L->blocks()) {
+      for (auto &I : *BB) {
+        LoadInst *LMemI = dyn_cast<LoadInst>(&I);
+        if (!LMemI)
+          continue;
+
+        Value *PtrValue = LMemI->getPointerOperand();
+        if (L->isLoopInvariant(PtrValue))
+          continue;
+
+        const SCEV *LSCEV = SE.getSCEV(PtrValue);
+        const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
+        if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
+          continue;
+
+        // FIXME? We could take pairing of unrolled load copies into account
+        // by looking at the AddRec, but we would probably have to limit this
+        // to loops with no stores or other memory optimization barriers.
+        ++StridedLoads;
+        // We've seen enough strided loads that seeing more won't make a
+        // difference.
+        if (StridedLoads > MaxStridedLoads / 2)
+          return StridedLoads;
+      }
+    }
+    return StridedLoads;
+  };
+
+  int StridedLoads = countStridedLoads(L, SE);
+  DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
+               << " strided loads\n");
+  // Pick the largest power of 2 unroll count that won't result in too many
+  // strided loads.
+  if (StridedLoads) {
+    UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
+    DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to " << UP.MaxCount
+                 << '\n');
+  }
+}
+
 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                              TTI::UnrollingPreferences &UP) {
   // Enable partial unrolling and runtime unrolling.
@@ -658,6 +713,10 @@ void AArch64TTIImpl::getUnrollingPrefere
 
   // Disable partial & runtime unrolling on -Os.
   UP.PartialOptSizeThreshold = 0;
+
+  if (ST->getProcFamily() == AArch64Subtarget::Falkor &&
+      EnableFalkorHWPFUnrollFix)
+    getFalkorUnrollingPreferences(L, SE, UP);
 }
 
 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,

Added: llvm/trunk/test/Transforms/LoopUnroll/AArch64/falkor-prefetch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopUnroll/AArch64/falkor-prefetch.ll?rev=306584&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoopUnroll/AArch64/falkor-prefetch.ll (added)
+++ llvm/trunk/test/Transforms/LoopUnroll/AArch64/falkor-prefetch.ll Wed Jun 28 11:53:09 2017
@@ -0,0 +1,169 @@
+; RUN: opt < %s -S -loop-unroll -mtriple aarch64 -mcpu=falkor | FileCheck %s
+; RUN: opt < %s -S -loop-unroll -mtriple aarch64 -mcpu=falkor -enable-falkor-hwpf-unroll-fix=0 | FileCheck %s --check-prefix=NOHWPF
+
+; Check that loop unroller doesn't exhaust HW prefetcher resources.
+
+; Partial unroll 2 times for this loop on falkor instead of 4.
+; NOHWPF-LABEL: @unroll1(
+; NOHWPF-LABEL: loop:
+; NOHWPF-NEXT: phi
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: icmp
+; NOHWPF-NEXT: br
+; NOHWPF-NEXT-LABEL: exit:
+;
+; CHECK-LABEL: @unroll1(
+; CHECK-LABEL: loop:
+; CHECK-NEXT: phi
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: add
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: add
+; CHECK-NEXT: icmp
+; CHECK-NEXT: br
+; CHECK-NEXT-LABEL: exit:
+define void @unroll1(i32* %p, i32* %p2) {
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i32 [ 0, %entry ], [ %inc, %loop ]
+
+  %gep = getelementptr inbounds i32, i32* %p, i32 %iv
+  %load = load volatile i32, i32* %gep
+
+  %gep2 = getelementptr inbounds i32, i32* %p2, i32 %iv
+  %load2 = load volatile i32, i32* %gep2
+
+  %inc = add i32 %iv, 1
+  %exitcnd = icmp uge i32 %inc, 1024
+  br i1 %exitcnd, label %exit, label %loop
+
+exit:
+  ret void
+}
+
+; Partial unroll 4 times for this loop on falkor instead of 8.
+; NOHWPF-LABEL: @unroll2(
+; NOHWPF-LABEL: loop2:
+; NOHWPF-NEXT: phi
+; NOHWPF-NEXT: phi
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: getelementptr
+; NOHWPF-NEXT: load
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: add
+; NOHWPF-NEXT: icmp
+; NOHWPF-NEXT: br
+; NOHWPF-NEXT-LABEL: exit2:
+;
+; CHECK-LABEL: @unroll2(
+; CHECK-LABEL: loop2:
+; CHECK-NEXT: phi
+; CHECK-NEXT: phi
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: add
+; CHECK-NEXT: add
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: add
+; CHECK-NEXT: add
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: add
+; CHECK-NEXT: add
+; CHECK-NEXT: getelementptr
+; CHECK-NEXT: load
+; CHECK-NEXT: add
+; CHECK-NEXT: add
+; CHECK-NEXT: icmp
+; CHECK-NEXT: br
+; CHECK-NEXT-LABEL: exit2:
+
+define void @unroll2(i32* %p) {
+entry:
+  br label %loop1
+
+loop1:
+  %iv1 = phi i32 [ 0, %entry ], [ %inc1, %loop1.latch ]
+  %outer.sum = phi i32 [ 0, %entry ], [ %sum, %loop1.latch ]
+  br label %loop2.header
+
+loop2.header:
+  br label %loop2
+
+loop2:
+  %iv2 = phi i32 [ 0, %loop2.header ], [ %inc2, %loop2 ]
+  %sum = phi i32 [ %outer.sum, %loop2.header ], [ %sum.inc, %loop2 ]
+  %gep = getelementptr inbounds i32, i32* %p, i32 %iv2
+  %load = load i32, i32* %gep
+  %sum.inc = add i32 %sum, %load
+  %inc2 = add i32 %iv2, 1
+  %exitcnd2 = icmp uge i32 %inc2, 1024
+  br i1 %exitcnd2, label %exit2, label %loop2
+
+exit2:
+  br label %loop1.latch
+
+loop1.latch:
+  %inc1 = add i32 %iv1, 1
+  %exitcnd1 = icmp uge i32 %inc1, 1024
+  br i1 %exitcnd2, label %exit, label %loop1
+
+exit:
+  ret void
+}
+




More information about the llvm-commits mailing list