[llvm] r315476 - [NFC] Convert OptimizationRemarkEmitter old emit() calls to new closure

Vivek Pandya via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 11 10:12:59 PDT 2017


Author: vivekvpandya
Date: Wed Oct 11 10:12:59 2017
New Revision: 315476

URL: http://llvm.org/viewvc/llvm-project?rev=315476&view=rev
Log:
[NFC] Convert OptimizationRemarkEmitter old emit() calls to new closure
parameterized emit() calls

Summary: This is not functional change to adopt new emit() API added in r313691.

Reviewed By: anemet

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D38285

Modified:
    llvm/trunk/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h
    llvm/trunk/lib/Analysis/InlineCost.cpp
    llvm/trunk/lib/Analysis/ValueTracking.cpp
    llvm/trunk/lib/CodeGen/MachineOutliner.cpp
    llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp
    llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp
    llvm/trunk/lib/CodeGen/StackProtector.cpp
    llvm/trunk/lib/Transforms/IPO/Inliner.cpp
    llvm/trunk/lib/Transforms/IPO/PartialInlining.cpp
    llvm/trunk/lib/Transforms/IPO/SampleProfile.cpp
    llvm/trunk/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
    llvm/trunk/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
    llvm/trunk/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
    llvm/trunk/lib/Transforms/Scalar/GVN.cpp
    llvm/trunk/lib/Transforms/Scalar/LICM.cpp
    llvm/trunk/lib/Transforms/Scalar/LoopDataPrefetch.cpp
    llvm/trunk/lib/Transforms/Scalar/LoopDistribute.cpp
    llvm/trunk/lib/Transforms/Scalar/LoopInterchange.cpp
    llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp
    llvm/trunk/lib/Transforms/Scalar/TailRecursionElimination.cpp
    llvm/trunk/lib/Transforms/Utils/LoopUnroll.cpp
    llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp
    llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp

Modified: llvm/trunk/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h (original)
+++ llvm/trunk/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h Wed Oct 11 10:12:59 2017
@@ -164,6 +164,21 @@ public:
             .getDiagHandlerPtr()->isAnyRemarkEnabled(PassName));
   }
 
+  /// \brief Take a lambda that returns a remark which will be emitted.  Second
+  /// argument is only used to restrict this to functions.
+  template <typename T>
+  void emit(T RemarkBuilder, decltype(RemarkBuilder()) * = nullptr) {
+    // Avoid building the remark unless we know there are at least *some*
+    // remarks enabled. We can't currently check whether remarks are requested
+    // for the calling pass since that requires actually building the remark.
+
+    if (MF.getFunction()->getContext().getDiagnosticsOutputFile() ||
+        MF.getFunction()->getContext().getDiagHandlerPtr()->isAnyRemarkEnabled()) {
+      auto R = RemarkBuilder();
+      emit(R);
+    }
+  }
+
 private:
   MachineFunction &MF;
 

Modified: llvm/trunk/lib/Analysis/InlineCost.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/InlineCost.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/InlineCost.cpp (original)
+++ llvm/trunk/lib/Analysis/InlineCost.cpp Wed Oct 11 10:12:59 2017
@@ -1440,10 +1440,12 @@ bool CallAnalyzer::analyzeBlock(BasicBlo
     if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
         HasIndirectBr || HasFrameEscape) {
       if (ORE)
-        ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
-                                           CandidateCS.getInstruction())
-                  << NV("Callee", &F)
-                  << " has uninlinable pattern and cost is not fully computed");
+        ORE->emit([&]() {
+          return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
+                                          CandidateCS.getInstruction())
+                 << NV("Callee", &F)
+                 << " has uninlinable pattern and cost is not fully computed";
+        });
       return false;
     }
 
@@ -1453,12 +1455,13 @@ bool CallAnalyzer::analyzeBlock(BasicBlo
     if (IsCallerRecursive &&
         AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) {
       if (ORE)
-        ORE->emit(
-            OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
-                                     CandidateCS.getInstruction())
-            << NV("Callee", &F)
-            << " is recursive and allocates too much stack space. Cost is "
-               "not fully computed");
+        ORE->emit([&]() {
+          return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
+                                          CandidateCS.getInstruction())
+                 << NV("Callee", &F)
+                 << " is recursive and allocates too much stack space. Cost is "
+                    "not fully computed";
+        });
       return false;
     }
 

Modified: llvm/trunk/lib/Analysis/ValueTracking.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ValueTracking.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/ValueTracking.cpp (original)
+++ llvm/trunk/lib/Analysis/ValueTracking.cpp Wed Oct 11 10:12:59 2017
@@ -777,13 +777,15 @@ static void computeKnownBitsFromAssume(c
   if (Known.Zero.intersects(Known.One)) {
     Known.resetAll();
 
-    if (Q.ORE) {
-      auto *CxtI = const_cast<Instruction *>(Q.CxtI);
-      OptimizationRemarkAnalysis ORA("value-tracking", "BadAssumption", CxtI);
-      Q.ORE->emit(ORA << "Detected conflicting code assumptions. Program may "
-                         "have undefined behavior, or compiler may have "
-                         "internal error.");
-    }
+    if (Q.ORE)
+      Q.ORE->emit([&]() {
+        auto *CxtI = const_cast<Instruction *>(Q.CxtI);
+        return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
+                                          CxtI)
+               << "Detected conflicting code assumptions. Program may "
+                  "have undefined behavior, or compiler may have "
+                  "internal error.";
+      });
   }
 }
 

Modified: llvm/trunk/lib/CodeGen/MachineOutliner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineOutliner.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineOutliner.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineOutliner.cpp Wed Oct 11 10:12:59 2017
@@ -939,29 +939,32 @@ MachineOutliner::findCandidates(SuffixTr
       // Emit a remark explaining why we didn't outline this candidate.
       std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator> C =
           RepeatedSequenceLocs[0];
-      MachineOptimizationRemarkEmitter MORE(*(C.first->getMF()), nullptr);
-      MachineOptimizationRemarkMissed R(DEBUG_TYPE, "NotOutliningCheaper",
-                                        C.first->getDebugLoc(),
-                                        C.first->getParent());
-      R << "Did not outline " << NV("Length", StringLen) << " instructions"
-        << " from " << NV("NumOccurrences", RepeatedSequenceLocs.size())
-        << " locations."
-        << " Instructions from outlining all occurrences ("
-        << NV("OutliningCost", OF.getOutliningCost()) << ")"
-        << " >= Unoutlined instruction count ("
-        << NV("NotOutliningCost", StringLen * OF.OccurrenceCount) << ")"
-        << " (Also found at: ";
+      MachineOptimizationRemarkEmitter MORE(
+          *(C.first->getParent()->getParent()), nullptr);
+      MORE.emit([&]() {
+        MachineOptimizationRemarkMissed R(DEBUG_TYPE, "NotOutliningCheaper",
+                                          C.first->getDebugLoc(),
+                                          C.first->getParent());
+        R << "Did not outline " << NV("Length", StringLen) << " instructions"
+          << " from " << NV("NumOccurrences", RepeatedSequenceLocs.size())
+          << " locations."
+          << " Instructions from outlining all occurrences ("
+          << NV("OutliningCost", OF.getOutliningCost()) << ")"
+          << " >= Unoutlined instruction count ("
+          << NV("NotOutliningCost", StringLen * OF.OccurrenceCount) << ")"
+          << " (Also found at: ";
 
-      // Tell the user the other places the candidate was found.
-      for (unsigned i = 1, e = RepeatedSequenceLocs.size(); i < e; i++) {
-        R << NV((Twine("OtherStartLoc") + Twine(i)).str(),
-                RepeatedSequenceLocs[i].first->getDebugLoc());
-        if (i != e - 1)
-          R << ", ";
-      }
+        // Tell the user the other places the candidate was found.
+        for (unsigned i = 1, e = RepeatedSequenceLocs.size(); i < e; i++) {
+          R << NV((Twine("OtherStartLoc") + Twine(i)).str(),
+                  RepeatedSequenceLocs[i].first->getDebugLoc());
+          if (i != e - 1)
+            R << ", ";
+        }
 
-      R << ")";
-      MORE.emit(R);
+        R << ")";
+        return R;
+      });
 
       // Move to the next candidate.
       continue;

Modified: llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp (original)
+++ llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp Wed Oct 11 10:12:59 2017
@@ -960,11 +960,12 @@ void PEI::calculateFrameObjectOffsets(Ma
   MFI.setStackSize(StackSize);
   NumBytesStackSpace += StackSize;
 
-  MachineOptimizationRemarkAnalysis R(
-      DEBUG_TYPE, "StackSize", Fn.getFunction()->getSubprogram(), &Fn.front());
-  R << ore::NV("NumStackBytes", StackSize)
-    << " stack bytes in function";
-  ORE->emit(R);
+  ORE->emit([&]() {
+    return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize",
+                                             Fn.getFunction()->getSubprogram(),
+                                             &Fn.front())
+           << ore::NV("NumStackBytes", StackSize) << " stack bytes in function";
+  });
 }
 
 /// insertPrologEpilogCode - Scan the function for modified callee saved

Modified: llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp (original)
+++ llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp Wed Oct 11 10:12:59 2017
@@ -2717,17 +2717,20 @@ void RAGreedy::reportNumberOfSplillsRelo
   if (Reloads || FoldedReloads || Spills || FoldedSpills) {
     using namespace ore;
 
-    MachineOptimizationRemarkMissed R(DEBUG_TYPE, "LoopSpillReload",
-                                      L->getStartLoc(), L->getHeader());
-    if (Spills)
-      R << NV("NumSpills", Spills) << " spills ";
-    if (FoldedSpills)
-      R << NV("NumFoldedSpills", FoldedSpills) << " folded spills ";
-    if (Reloads)
-      R << NV("NumReloads", Reloads) << " reloads ";
-    if (FoldedReloads)
-      R << NV("NumFoldedReloads", FoldedReloads) << " folded reloads ";
-    ORE->emit(R << "generated in loop");
+    ORE->emit([&]() {
+      MachineOptimizationRemarkMissed R(DEBUG_TYPE, "LoopSpillReload",
+                                        L->getStartLoc(), L->getHeader());
+      if (Spills)
+        R << NV("NumSpills", Spills) << " spills ";
+      if (FoldedSpills)
+        R << NV("NumFoldedSpills", FoldedSpills) << " folded spills ";
+      if (Reloads)
+        R << NV("NumReloads", Reloads) << " reloads ";
+      if (FoldedReloads)
+        R << NV("NumFoldedReloads", FoldedReloads) << " folded reloads ";
+      R << "generated in loop";
+      return R;
+    });
   }
 }
 

Modified: llvm/trunk/lib/CodeGen/StackProtector.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/StackProtector.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/StackProtector.cpp (original)
+++ llvm/trunk/lib/CodeGen/StackProtector.cpp Wed Oct 11 10:12:59 2017
@@ -247,10 +247,12 @@ bool StackProtector::RequiresStackProtec
   OptimizationRemarkEmitter ORE(F);
 
   if (F->hasFnAttribute(Attribute::StackProtectReq)) {
-    ORE.emit(OptimizationRemark(DEBUG_TYPE, "StackProtectorRequested", F)
+    ORE.emit([&]() {
+      return OptimizationRemark(DEBUG_TYPE, "StackProtectorRequested", F)
              << "Stack protection applied to function "
              << ore::NV("Function", F)
-             << " due to a function attribute or command-line switch");
+             << " due to a function attribute or command-line switch";
+    });
     NeedsProtector = true;
     Strong = true; // Use the same heuristic as strong to determine SSPLayout
   } else if (F->hasFnAttribute(Attribute::StackProtectStrong))
@@ -264,29 +266,31 @@ bool StackProtector::RequiresStackProtec
     for (const Instruction &I : BB) {
       if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
         if (AI->isArrayAllocation()) {
-          OptimizationRemark Remark(DEBUG_TYPE, "StackProtectorAllocaOrArray",
-                                    &I);
-          Remark
-              << "Stack protection applied to function "
-              << ore::NV("Function", F)
-              << " due to a call to alloca or use of a variable length array";
+          auto RemarkBuilder = [&]() {
+            return OptimizationRemark(DEBUG_TYPE, "StackProtectorAllocaOrArray",
+                                      &I)
+                   << "Stack protection applied to function "
+                   << ore::NV("Function", F)
+                   << " due to a call to alloca or use of a variable length "
+                      "array";
+          };
           if (const auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
             if (CI->getLimitedValue(SSPBufferSize) >= SSPBufferSize) {
               // A call to alloca with size >= SSPBufferSize requires
               // stack protectors.
               Layout.insert(std::make_pair(AI, SSPLK_LargeArray));
-              ORE.emit(Remark);
+              ORE.emit(RemarkBuilder);
               NeedsProtector = true;
             } else if (Strong) {
               // Require protectors for all alloca calls in strong mode.
               Layout.insert(std::make_pair(AI, SSPLK_SmallArray));
-              ORE.emit(Remark);
+              ORE.emit(RemarkBuilder);
               NeedsProtector = true;
             }
           } else {
             // A call to alloca with a variable size requires protectors.
             Layout.insert(std::make_pair(AI, SSPLK_LargeArray));
-            ORE.emit(Remark);
+            ORE.emit(RemarkBuilder);
             NeedsProtector = true;
           }
           continue;
@@ -296,11 +300,13 @@ bool StackProtector::RequiresStackProtec
         if (ContainsProtectableArray(AI->getAllocatedType(), IsLarge, Strong)) {
           Layout.insert(std::make_pair(AI, IsLarge ? SSPLK_LargeArray
                                                    : SSPLK_SmallArray));
-          ORE.emit(OptimizationRemark(DEBUG_TYPE, "StackProtectorBuffer", &I)
+          ORE.emit([&]() {
+            return OptimizationRemark(DEBUG_TYPE, "StackProtectorBuffer", &I)
                    << "Stack protection applied to function "
                    << ore::NV("Function", F)
                    << " due to a stack allocated buffer or struct containing a "
-                      "buffer");
+                      "buffer";
+          });
           NeedsProtector = true;
           continue;
         }
@@ -308,11 +314,13 @@ bool StackProtector::RequiresStackProtec
         if (Strong && HasAddressTaken(AI)) {
           ++NumAddrTaken;
           Layout.insert(std::make_pair(AI, SSPLK_AddrOf));
-          ORE.emit(
-              OptimizationRemark(DEBUG_TYPE, "StackProtectorAddressTaken", &I)
-              << "Stack protection applied to function "
-              << ore::NV("Function", F)
-              << " due to the address of a local variable being taken");
+          ORE.emit([&]() {
+            return OptimizationRemark(DEBUG_TYPE, "StackProtectorAddressTaken",
+                                      &I)
+                   << "Stack protection applied to function "
+                   << ore::NV("Function", F)
+                   << " due to the address of a local variable being taken";
+          });
           NeedsProtector = true;
         }
       }

Modified: llvm/trunk/lib/Transforms/IPO/Inliner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/Inliner.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/IPO/Inliner.cpp (original)
+++ llvm/trunk/lib/Transforms/IPO/Inliner.cpp Wed Oct 11 10:12:59 2017
@@ -384,11 +384,13 @@ shouldInline(CallSite CS, function_ref<I
     DEBUG(dbgs() << "    NOT Inlining: " << *CS.getInstruction()
                  << " Cost = " << IC.getCost()
                  << ", outer Cost = " << TotalSecondaryCost << '\n');
-    ORE.emit(OptimizationRemarkMissed(DEBUG_TYPE, "IncreaseCostInOtherContexts",
+    ORE.emit([&]() {
+      return OptimizationRemarkMissed(DEBUG_TYPE, "IncreaseCostInOtherContexts",
                                       Call)
              << "Not inlining. Cost of inlining " << NV("Callee", Callee)
              << " increases the cost of inlining " << NV("Caller", Caller)
-             << " in other contexts");
+             << " in other contexts";
+    });
 
     // IC does not bool() to false, so get an InlineCost that will.
     // This will not be inspected to make an error message.
@@ -476,11 +478,13 @@ inlineCallsImpl(CallGraphSCC &SCC, CallG
         if (Function *Callee = CS.getCalledFunction())
           if (Callee->isDeclaration()) {
             using namespace ore;
-            ORE.emit(OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
+            ORE.emit([&]() {
+              return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
                      << NV("Callee", Callee) << " will not be inlined into "
                      << NV("Caller", CS.getCaller())
                      << " because its definition is unavailable"
-                     << setIsVerbose());
+                     << setIsVerbose();
+            });
             continue;
           }
 
@@ -572,27 +576,31 @@ inlineCallsImpl(CallGraphSCC &SCC, CallG
         if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
                                   InlineHistoryID, InsertLifetime, AARGetter,
                                   ImportedFunctionsStats)) {
-          ORE.emit(
-              OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, Block)
-              << NV("Callee", Callee) << " will not be inlined into "
-              << NV("Caller", Caller));
+          ORE.emit([&]() {
+            return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc,
+                                            Block)
+                   << NV("Callee", Callee) << " will not be inlined into "
+                   << NV("Caller", Caller);
+          });
           continue;
         }
         ++NumInlined;
 
-        if (OIC->isAlways())
-          ORE.emit(OptimizationRemark(DEBUG_TYPE, "AlwaysInline", DLoc, Block)
-                   << NV("Callee", Callee) << " inlined into "
-                   << NV("Caller", Caller) << " with cost=always");
-        else
-          ORE.emit([&]() {
-            return OptimizationRemark(DEBUG_TYPE, "Inlined", DLoc, Block)
-                   << NV("Callee", Callee) << " inlined into "
-                   << NV("Caller", Caller)
-                   << " with cost=" << NV("Cost", OIC->getCost())
-                   << " (threshold=" << NV("Threshold", OIC->getThreshold())
-                   << ")";
-          });
+        ORE.emit([&]() {
+          bool AlwaysInline = OIC->isAlways();
+          StringRef RemarkName = AlwaysInline ? "AlwaysInline" : "Inlined";
+          OptimizationRemark R(DEBUG_TYPE, RemarkName, DLoc, Block);
+          R << NV("Callee", Callee) << " inlined into ";
+          R << NV("Caller", Caller);
+          if (AlwaysInline)
+            R << " with cost=always";
+          else {
+            R << " with cost=" << NV("Cost", OIC->getCost());
+            R << " (threshold=" << NV("Threshold", OIC->getThreshold());
+            R << ")";
+          }
+          return R;
+        });
 
         // If inlining this function gave us any new call sites, throw them
         // onto our worklist to process.  They are useful inline candidates.
@@ -915,25 +923,31 @@ PreservedAnalyses InlinerPass::run(LazyC
 
       using namespace ore;
       if (!InlineFunction(CS, IFI)) {
-        ORE.emit(
-            OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, Block)
-            << NV("Callee", &Callee) << " will not be inlined into "
-            << NV("Caller", &F));
+        ORE.emit([&]() {
+          return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, Block)
+                 << NV("Callee", &Callee) << " will not be inlined into "
+                 << NV("Caller", &F);
+        });
         continue;
       }
       DidInline = true;
       InlinedCallees.insert(&Callee);
 
-      if (OIC->isAlways())
-        ORE.emit(OptimizationRemark(DEBUG_TYPE, "AlwaysInline", DLoc, Block)
-                 << NV("Callee", &Callee) << " inlined into "
-                 << NV("Caller", &F) << " with cost=always");
-      else
-        ORE.emit(
-            OptimizationRemark(DEBUG_TYPE, "Inlined", DLoc, Block)
-            << NV("Callee", &Callee) << " inlined into " << NV("Caller", &F)
-            << " with cost=" << NV("Cost", OIC->getCost())
-            << " (threshold=" << NV("Threshold", OIC->getThreshold()) << ")");
+      ORE.emit([&]() {
+        bool AlwaysInline = OIC->isAlways();
+        StringRef RemarkName = AlwaysInline ? "AlwaysInline" : "Inlined";
+        OptimizationRemark R(DEBUG_TYPE, RemarkName, DLoc, Block);
+        R << NV("Callee", &Callee) << " inlined into ";
+        R << NV("Caller", &F);
+        if (AlwaysInline)
+          R << " with cost=always";
+        else {
+          R << " with cost=" << NV("Cost", OIC->getCost());
+          R << " (threshold=" << NV("Threshold", OIC->getThreshold());
+          R << ")";
+        }
+        return R;
+      });
 
       // Add any new callsites to defined functions to the worklist.
       if (!IFI.InlinedCallSites.empty()) {

Modified: llvm/trunk/lib/Transforms/IPO/PartialInlining.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/PartialInlining.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/IPO/PartialInlining.cpp (original)
+++ llvm/trunk/lib/Transforms/IPO/PartialInlining.cpp Wed Oct 11 10:12:59 2017
@@ -499,26 +499,32 @@ bool PartialInlinerImpl::shouldPartialIn
                                 *GetAssumptionCache, GetBFI, PSI, &ORE);
 
   if (IC.isAlways()) {
-    ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AlwaysInline", Call)
+    ORE.emit([&]() {
+      return OptimizationRemarkAnalysis(DEBUG_TYPE, "AlwaysInline", Call)
              << NV("Callee", Cloner.OrigFunc)
-             << " should always be fully inlined, not partially");
+             << " should always be fully inlined, not partially";
+    });
     return false;
   }
 
   if (IC.isNever()) {
-    ORE.emit(OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline", Call)
+    ORE.emit([&]() {
+      return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline", Call)
              << NV("Callee", Cloner.OrigFunc) << " not partially inlined into "
              << NV("Caller", Caller)
-             << " because it should never be inlined (cost=never)");
+             << " because it should never be inlined (cost=never)";
+    });
     return false;
   }
 
   if (!IC) {
-    ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "TooCostly", Call)
+    ORE.emit([&]() {
+      return OptimizationRemarkAnalysis(DEBUG_TYPE, "TooCostly", Call)
              << NV("Callee", Cloner.OrigFunc) << " not partially inlined into "
              << NV("Caller", Caller) << " because too costly to inline (cost="
              << NV("Cost", IC.getCost()) << ", threshold="
-             << NV("Threshold", IC.getCostDelta() + IC.getCost()) << ")");
+             << NV("Threshold", IC.getCostDelta() + IC.getCost()) << ")";
+    });
     return false;
   }
   const DataLayout &DL = Caller->getParent()->getDataLayout();
@@ -529,23 +535,28 @@ bool PartialInlinerImpl::shouldPartialIn
 
   // Weighted saving is smaller than weighted cost, return false
   if (NormWeightedSavings < WeightedOutliningRcost) {
-    ORE.emit(
-        OptimizationRemarkAnalysis(DEBUG_TYPE, "OutliningCallcostTooHigh", Call)
-        << NV("Callee", Cloner.OrigFunc) << " not partially inlined into "
-        << NV("Caller", Caller) << " runtime overhead (overhead="
-        << NV("Overhead", (unsigned)WeightedOutliningRcost.getFrequency())
-        << ", savings="
-        << NV("Savings", (unsigned)NormWeightedSavings.getFrequency()) << ")"
-        << " of making the outlined call is too high");
+    ORE.emit([&]() {
+      return OptimizationRemarkAnalysis(DEBUG_TYPE, "OutliningCallcostTooHigh",
+                                        Call)
+             << NV("Callee", Cloner.OrigFunc) << " not partially inlined into "
+             << NV("Caller", Caller) << " runtime overhead (overhead="
+             << NV("Overhead", (unsigned)WeightedOutliningRcost.getFrequency())
+             << ", savings="
+             << NV("Savings", (unsigned)NormWeightedSavings.getFrequency())
+             << ")"
+             << " of making the outlined call is too high";
+    });
 
     return false;
   }
 
-  ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "CanBePartiallyInlined", Call)
+  ORE.emit([&]() {
+    return OptimizationRemarkAnalysis(DEBUG_TYPE, "CanBePartiallyInlined", Call)
            << NV("Callee", Cloner.OrigFunc) << " can be partially inlined into "
            << NV("Caller", Caller) << " with cost=" << NV("Cost", IC.getCost())
            << " (threshold="
-           << NV("Threshold", IC.getCostDelta() + IC.getCost()) << ")");
+           << NV("Threshold", IC.getCostDelta() + IC.getCost()) << ")";
+  });
   return true;
 }
 
@@ -883,13 +894,15 @@ bool PartialInlinerImpl::tryPartialInlin
     DebugLoc DLoc;
     BasicBlock *Block;
     std::tie(DLoc, Block) = getOneDebugLoc(Cloner.ClonedFunc);
-    ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "OutlineRegionTooSmall",
+    ORE.emit([&]() {
+      return OptimizationRemarkAnalysis(DEBUG_TYPE, "OutlineRegionTooSmall",
                                         DLoc, Block)
              << ore::NV("Function", Cloner.OrigFunc)
              << " not partially inlined into callers (Original Size = "
              << ore::NV("OutlinedRegionOriginalSize", Cloner.OutlinedRegionCost)
              << ", Size of call sequence to outlined function = "
-             << ore::NV("NewSize", SizeCost) << ")");
+             << ore::NV("NewSize", SizeCost) << ")";
+    });
     return false;
   }
 
@@ -918,10 +931,12 @@ bool PartialInlinerImpl::tryPartialInlin
     if (!shouldPartialInline(CS, Cloner, WeightedRcost, ORE))
       continue;
 
-    ORE.emit(
-        OptimizationRemark(DEBUG_TYPE, "PartiallyInlined", CS.getInstruction())
-        << ore::NV("Callee", Cloner.OrigFunc) << " partially inlined into "
-        << ore::NV("Caller", CS.getCaller()));
+    ORE.emit([&]() {
+      return OptimizationRemark(DEBUG_TYPE, "PartiallyInlined",
+                                CS.getInstruction())
+             << ore::NV("Callee", Cloner.OrigFunc) << " partially inlined into "
+             << ore::NV("Caller", CS.getCaller());
+    });
 
     InlineFunctionInfo IFI(nullptr, GetAssumptionCache, PSI);
     InlineFunction(CS, IFI);

Modified: llvm/trunk/lib/Transforms/IPO/SampleProfile.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/SampleProfile.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/IPO/SampleProfile.cpp (original)
+++ llvm/trunk/lib/Transforms/IPO/SampleProfile.cpp Wed Oct 11 10:12:59 2017
@@ -528,17 +528,18 @@ ErrorOr<uint64_t> SampleProfileLoader::g
     bool FirstMark =
         CoverageTracker.markSamplesUsed(FS, LineOffset, Discriminator, R.get());
     if (FirstMark) {
-      if (Discriminator)
-        ORE->emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AppliedSamples", &Inst)
-                  << "Applied " << ore::NV("NumSamples", *R)
-                  << " samples from profile (offset: "
-                  << ore::NV("LineOffset", LineOffset) << "."
-                  << ore::NV("Discriminator", Discriminator) << ")");
-      else
-        ORE->emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AppliedSamples", &Inst)
-                  << "Applied " << ore::NV("NumSamples", *R)
-                  << " samples from profile (offset: "
-                  << ore::NV("LineOffset", LineOffset) << ")");
+      ORE->emit([&]() {
+        OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst);
+        Remark << "Applied " << ore::NV("NumSamples", *R);
+        Remark << " samples from profile (offset: ";
+        Remark << ore::NV("LineOffset", LineOffset);
+        if (Discriminator) {
+          Remark << ".";
+          Remark << ore::NV("Discriminator", Discriminator);
+        }
+        Remark << ")";
+        return Remark;
+      });
     }
     DEBUG(dbgs() << "    " << DLoc.getLine() << "."
                  << DIL->getBaseDiscriminator() << ":" << Inst
@@ -1324,9 +1325,11 @@ void SampleProfileLoader::propagateWeigh
       DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n");
       TI->setMetadata(llvm::LLVMContext::MD_prof,
                       MDB.createBranchWeights(Weights));
-      ORE->emit(OptimizationRemark(DEBUG_TYPE, "PopularDest", MaxDestInst)
-                << "most popular destination for conditional branches at "
-                << ore::NV("CondBranchesLoc", BranchLoc));
+      ORE->emit([&]() {
+        return OptimizationRemark(DEBUG_TYPE, "PopularDest", MaxDestInst)
+               << "most popular destination for conditional branches at "
+               << ore::NV("CondBranchesLoc", BranchLoc);
+      });
     } else {
       DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n");
     }

Modified: llvm/trunk/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp (original)
+++ llvm/trunk/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp Wed Oct 11 10:12:59 2017
@@ -277,38 +277,48 @@ ICallPromotionFunc::getPromotionCandidat
 
     if (ICPInvokeOnly && dyn_cast<CallInst>(Inst)) {
       DEBUG(dbgs() << " Not promote: User options.\n");
-      ORE.emit(OptimizationRemarkMissed(DEBUG_TYPE, "UserOptions", Inst)
-               << " Not promote: User options");
+      ORE.emit([&]() {
+        return OptimizationRemarkMissed(DEBUG_TYPE, "UserOptions", Inst)
+               << " Not promote: User options";
+      });
       break;
     }
     if (ICPCallOnly && dyn_cast<InvokeInst>(Inst)) {
       DEBUG(dbgs() << " Not promote: User option.\n");
-      ORE.emit(OptimizationRemarkMissed(DEBUG_TYPE, "UserOptions", Inst)
-               << " Not promote: User options");
+      ORE.emit([&]() {
+        return OptimizationRemarkMissed(DEBUG_TYPE, "UserOptions", Inst)
+               << " Not promote: User options";
+      });
       break;
     }
     if (ICPCutOff != 0 && NumOfPGOICallPromotion >= ICPCutOff) {
       DEBUG(dbgs() << " Not promote: Cutoff reached.\n");
-      ORE.emit(OptimizationRemarkMissed(DEBUG_TYPE, "CutOffReached", Inst)
-               << " Not promote: Cutoff reached");
+      ORE.emit([&]() {
+        return OptimizationRemarkMissed(DEBUG_TYPE, "CutOffReached", Inst)
+               << " Not promote: Cutoff reached";
+      });
       break;
     }
 
     Function *TargetFunction = Symtab->getFunction(Target);
     if (TargetFunction == nullptr) {
       DEBUG(dbgs() << " Not promote: Cannot find the target\n");
-      ORE.emit(OptimizationRemarkMissed(DEBUG_TYPE, "UnableToFindTarget", Inst)
-               << "Cannot promote indirect call: target not found");
+      ORE.emit([&]() {
+        return OptimizationRemarkMissed(DEBUG_TYPE, "UnableToFindTarget", Inst)
+               << "Cannot promote indirect call: target not found";
+      });
       break;
     }
 
     const char *Reason = nullptr;
     if (!isLegalToPromote(Inst, TargetFunction, &Reason)) {
       using namespace ore;
-      ORE.emit(OptimizationRemarkMissed(DEBUG_TYPE, "UnableToPromote", Inst)
+      ORE.emit([&]() {
+        return OptimizationRemarkMissed(DEBUG_TYPE, "UnableToPromote", Inst)
                << "Cannot promote indirect call to "
                << NV("TargetFunction", TargetFunction) << " with count of "
-               << NV("Count", Count) << ": " << Reason);
+               << NV("Count", Count) << ": " << Reason;
+      });
       break;
     }
 
@@ -604,10 +614,12 @@ Instruction *llvm::promoteIndirectCall(I
 
   using namespace ore;
   if (ORE)
-    ORE->emit(OptimizationRemark(DEBUG_TYPE, "Promoted", Inst)
-              << "Promote indirect call to " << NV("DirectCallee", DirectCallee)
-              << " with count " << NV("Count", Count) << " out of "
-              << NV("TotalCount", TotalCount));
+    ORE->emit([&]() {
+      return OptimizationRemark(DEBUG_TYPE, "Promoted", Inst)
+             << "Promote indirect call to " << NV("DirectCallee", DirectCallee)
+             << " with count " << NV("Count", Count) << " out of "
+             << NV("TotalCount", TotalCount);
+    });
   return NewInst;
 }
 

Modified: llvm/trunk/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/PGOInstrumentation.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Instrumentation/PGOInstrumentation.cpp (original)
+++ llvm/trunk/lib/Transforms/Instrumentation/PGOInstrumentation.cpp Wed Oct 11 10:12:59 2017
@@ -1510,8 +1510,10 @@ void setProfMetadata(Module *M, Instruct
     OS.flush();
     Function *F = TI->getParent()->getParent();
     OptimizationRemarkEmitter ORE(F);
-    ORE.emit(OptimizationRemark(DEBUG_TYPE, "pgo-instrumentation", TI)
-             << BrCondStr << " is true with probability : " << BranchProbStr);
+    ORE.emit([&]() {
+      return OptimizationRemark(DEBUG_TYPE, "pgo-instrumentation", TI)
+             << BrCondStr << " is true with probability : " << BranchProbStr;
+    });
   }
 }
 

Modified: llvm/trunk/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp (original)
+++ llvm/trunk/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp Wed Oct 11 10:12:59 2017
@@ -382,14 +382,14 @@ bool MemOPSizeOpt::perform(MemIntrinsic
   DEBUG(dbgs() << *DefaultBB << "\n");
   DEBUG(dbgs() << *MergeBB << "\n");
 
-  {
+  ORE.emit([&]() {
     using namespace ore;
-    ORE.emit(OptimizationRemark(DEBUG_TYPE, "memopt-opt", MI)
+    return OptimizationRemark(DEBUG_TYPE, "memopt-opt", MI)
              << "optimized " << NV("Intrinsic", StringRef(getMIName(MI)))
              << " with count " << NV("Count", SumForOpt) << " out of "
              << NV("Total", TotalCount) << " for " << NV("Versions", Version)
-             << " versions");
-  }
+             << " versions";
+  });
 
   return true;
 }

Modified: llvm/trunk/lib/Transforms/Scalar/GVN.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/GVN.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/GVN.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/GVN.cpp Wed Oct 11 10:12:59 2017
@@ -1276,8 +1276,10 @@ bool GVN::PerformLoadPRE(LoadInst *LI, A
   if (V->getType()->isPtrOrPtrVectorTy())
     MD->invalidateCachedPointerInfo(V);
   markInstructionForDeletion(LI);
-  ORE->emit(OptimizationRemark(DEBUG_TYPE, "LoadPRE", LI)
-            << "load eliminated by PRE");
+  ORE->emit([&]() {
+    return OptimizationRemark(DEBUG_TYPE, "LoadPRE", LI)
+           << "load eliminated by PRE";
+  });
   ++NumPRELoad;
   return true;
 }
@@ -1286,10 +1288,12 @@ static void reportLoadElim(LoadInst *LI,
                            OptimizationRemarkEmitter *ORE) {
   using namespace ore;
 
-  ORE->emit(OptimizationRemark(DEBUG_TYPE, "LoadElim", LI)
-            << "load of type " << NV("Type", LI->getType()) << " eliminated"
-            << setExtraArgs() << " in favor of "
-            << NV("InfavorOfValue", AvailableValue));
+  ORE->emit([&]() {
+    return OptimizationRemark(DEBUG_TYPE, "LoadElim", LI)
+           << "load of type " << NV("Type", LI->getType()) << " eliminated"
+           << setExtraArgs() << " in favor of "
+           << NV("InfavorOfValue", AvailableValue);
+  });
 }
 
 /// Attempt to eliminate a load whose dependencies are

Modified: llvm/trunk/lib/Transforms/Scalar/LICM.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LICM.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LICM.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LICM.cpp Wed Oct 11 10:12:59 2017
@@ -612,10 +612,12 @@ bool llvm::canSinkOrHoistInst(Instructio
     // Check loop-invariant address because this may also be a sinkable load
     // whose address is not necessarily loop-invariant.
     if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
-      ORE->emit(OptimizationRemarkMissed(
-                    DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
-                << "failed to move load with loop-invariant address "
-                   "because the loop may invalidate its value");
+      ORE->emit([&]() {
+        return OptimizationRemarkMissed(
+                   DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
+               << "failed to move load with loop-invariant address "
+                  "because the loop may invalidate its value";
+      });
 
     return !Invalidated;
   } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
@@ -814,8 +816,10 @@ static bool sink(Instruction &I, const L
                  const LoopSafetyInfo *SafetyInfo,
                  OptimizationRemarkEmitter *ORE) {
   DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
-  ORE->emit(OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
-            << "sinking " << ore::NV("Inst", &I));
+  ORE->emit([&]() {
+    return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
+           << "sinking " << ore::NV("Inst", &I);
+  });
   bool Changed = false;
   if (isa<LoadInst>(I))
     ++NumMovedLoads;
@@ -887,8 +891,10 @@ static bool hoist(Instruction &I, const
   auto *Preheader = CurLoop->getLoopPreheader();
   DEBUG(dbgs() << "LICM hoisting to " << Preheader->getName() << ": " << I
                << "\n");
-  ORE->emit(OptimizationRemark(DEBUG_TYPE, "Hoisted", &I)
-            << "hoisting " << ore::NV("Inst", &I));
+  ORE->emit([&]() {
+    return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
+                                                         << ore::NV("Inst", &I);
+  });
 
   // Metadata can be dependent on conditions we are hoisting above.
   // Conservatively strip all metadata on the instruction unless we were
@@ -938,10 +944,12 @@ static bool isSafeToExecuteUnconditional
   if (!GuaranteedToExecute) {
     auto *LI = dyn_cast<LoadInst>(&Inst);
     if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
-      ORE->emit(OptimizationRemarkMissed(
-                    DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
-                << "failed to hoist load with loop-invariant address "
-                   "because load is conditionally executed");
+      ORE->emit([&]() {
+        return OptimizationRemarkMissed(
+                   DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
+               << "failed to hoist load with loop-invariant address "
+                  "because load is conditionally executed";
+      });
   }
 
   return GuaranteedToExecute;
@@ -1257,9 +1265,11 @@ bool llvm::promoteLoopAccessesToScalars(
   // Otherwise, this is safe to promote, lets do it!
   DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr
                << '\n');
-  ORE->emit(
-      OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar", LoopUses[0])
-      << "Moving accesses to memory location out of the loop");
+  ORE->emit([&]() {
+    return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
+                              LoopUses[0])
+           << "Moving accesses to memory location out of the loop";
+  });
   ++NumPromoted;
 
   // Grab a debug location for the inserted loads/stores; given that the

Modified: llvm/trunk/lib/Transforms/Scalar/LoopDataPrefetch.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopDataPrefetch.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LoopDataPrefetch.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LoopDataPrefetch.cpp Wed Oct 11 10:12:59 2017
@@ -327,8 +327,10 @@ bool LoopDataPrefetch::runOnLoop(Loop *L
       ++NumPrefetches;
       DEBUG(dbgs() << "  Access: " << *PtrValue << ", SCEV: " << *LSCEV
                    << "\n");
-      ORE->emit(OptimizationRemark(DEBUG_TYPE, "Prefetched", MemI)
-                << "prefetched memory access");
+      ORE->emit([&]() {
+        return OptimizationRemark(DEBUG_TYPE, "Prefetched", MemI)
+               << "prefetched memory access";
+      });
 
       MadeChange = true;
     }

Modified: llvm/trunk/lib/Transforms/Scalar/LoopDistribute.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopDistribute.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LoopDistribute.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LoopDistribute.cpp Wed Oct 11 10:12:59 2017
@@ -755,9 +755,11 @@ public:
 
     ++NumLoopsDistributed;
     // Report the success.
-    ORE->emit(OptimizationRemark(LDIST_NAME, "Distribute", L->getStartLoc(),
-                                 L->getHeader())
-              << "distributed loop");
+    ORE->emit([&]() {
+      return OptimizationRemark(LDIST_NAME, "Distribute", L->getStartLoc(),
+                                L->getHeader())
+             << "distributed loop";
+    });
     return true;
   }
 
@@ -769,11 +771,13 @@ public:
     DEBUG(dbgs() << "Skipping; " << Message << "\n");
 
     // With Rpass-missed report that distribution failed.
-    ORE->emit(
-        OptimizationRemarkMissed(LDIST_NAME, "NotDistributed", L->getStartLoc(),
-                                 L->getHeader())
-        << "loop not distributed: use -Rpass-analysis=loop-distribute for more "
-           "info");
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(LDIST_NAME, "NotDistributed",
+                                      L->getStartLoc(), L->getHeader())
+             << "loop not distributed: use -Rpass-analysis=loop-distribute for "
+                "more "
+                "info";
+    });
 
     // With Rpass-analysis report why.  This is on by default if distribution
     // was requested explicitly.

Modified: llvm/trunk/lib/Transforms/Scalar/LoopInterchange.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopInterchange.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LoopInterchange.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LoopInterchange.cpp Wed Oct 11 10:12:59 2017
@@ -596,10 +596,12 @@ struct LoopInterchange : public Function
       return false;
     }
 
-    ORE->emit(OptimizationRemark(DEBUG_TYPE, "Interchanged",
-                                 InnerLoop->getStartLoc(),
-                                 InnerLoop->getHeader())
-              << "Loop interchanged with enclosing loop.");
+    ORE->emit([&]() {
+      return OptimizationRemark(DEBUG_TYPE, "Interchanged",
+                                InnerLoop->getStartLoc(),
+                                InnerLoop->getHeader())
+             << "Loop interchanged with enclosing loop.";
+    });
 
     LoopInterchangeTransform LIT(OuterLoop, InnerLoop, SE, LI, DT,
                                  LoopNestExit, LIL.hasInnerLoopReduction());
@@ -772,12 +774,13 @@ bool LoopInterchangeLegality::currentLim
   if (!findInductionAndReductions(InnerLoop, Inductions, Reductions)) {
     DEBUG(dbgs() << "Only inner loops with induction or reduction PHI nodes "
                  << "are supported currently.\n");
-    ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE,
-                                       "UnsupportedPHIInner",
-                                       InnerLoop->getStartLoc(),
-                                       InnerLoop->getHeader())
-              << "Only inner loops with induction or reduction PHI nodes can be"
-                 " interchange currently.");
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(DEBUG_TYPE, "UnsupportedPHIInner",
+                                      InnerLoop->getStartLoc(),
+                                      InnerLoop->getHeader())
+             << "Only inner loops with induction or reduction PHI nodes can be"
+                " interchange currently.";
+    });
     return true;
   }
 
@@ -785,12 +788,13 @@ bool LoopInterchangeLegality::currentLim
   if (Inductions.size() != 1) {
     DEBUG(dbgs() << "We currently only support loops with 1 induction variable."
                  << "Failed to interchange due to current limitation\n");
-    ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE,
-                                       "MultiInductionInner",
-                                       InnerLoop->getStartLoc(),
-                                       InnerLoop->getHeader())
-              << "Only inner loops with 1 induction variable can be "
-                 "interchanged currently.");
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(DEBUG_TYPE, "MultiInductionInner",
+                                      InnerLoop->getStartLoc(),
+                                      InnerLoop->getHeader())
+             << "Only inner loops with 1 induction variable can be "
+                "interchanged currently.";
+    });
     return true;
   }
   if (Reductions.size() > 0)
@@ -801,12 +805,13 @@ bool LoopInterchangeLegality::currentLim
   if (!findInductionAndReductions(OuterLoop, Inductions, Reductions)) {
     DEBUG(dbgs() << "Only outer loops with induction or reduction PHI nodes "
                  << "are supported currently.\n");
-    ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE,
-                                       "UnsupportedPHIOuter",
-                                       OuterLoop->getStartLoc(),
-                                       OuterLoop->getHeader())
-              << "Only outer loops with induction or reduction PHI nodes can be"
-                 " interchanged currently.");
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(DEBUG_TYPE, "UnsupportedPHIOuter",
+                                      OuterLoop->getStartLoc(),
+                                      OuterLoop->getHeader())
+             << "Only outer loops with induction or reduction PHI nodes can be"
+                " interchanged currently.";
+    });
     return true;
   }
 
@@ -815,35 +820,38 @@ bool LoopInterchangeLegality::currentLim
   if (!Reductions.empty()) {
     DEBUG(dbgs() << "Outer loops with reductions are not supported "
                  << "currently.\n");
-    ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE,
-                                       "ReductionsOuter",
-                                       OuterLoop->getStartLoc(),
-                                       OuterLoop->getHeader())
-              << "Outer loops with reductions cannot be interchangeed "
-                 "currently.");
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(DEBUG_TYPE, "ReductionsOuter",
+                                      OuterLoop->getStartLoc(),
+                                      OuterLoop->getHeader())
+             << "Outer loops with reductions cannot be interchangeed "
+                "currently.";
+    });
     return true;
   }
   // TODO: Currently we handle only loops with 1 induction variable.
   if (Inductions.size() != 1) {
     DEBUG(dbgs() << "Loops with more than 1 induction variables are not "
                  << "supported currently.\n");
-    ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE,
-                                       "MultiIndutionOuter",
-                                       OuterLoop->getStartLoc(),
-                                       OuterLoop->getHeader())
-              << "Only outer loops with 1 induction variable can be "
-                 "interchanged currently.");
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(DEBUG_TYPE, "MultiIndutionOuter",
+                                      OuterLoop->getStartLoc(),
+                                      OuterLoop->getHeader())
+             << "Only outer loops with 1 induction variable can be "
+                "interchanged currently.";
+    });
     return true;
   }
 
   // TODO: Triangular loops are not handled for now.
   if (!isLoopStructureUnderstood(InnerInductionVar)) {
     DEBUG(dbgs() << "Loop structure not understood by pass\n");
-    ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE,
-                                       "UnsupportedStructureInner",
-                                       InnerLoop->getStartLoc(),
-                                       InnerLoop->getHeader())
-              << "Inner loop structure not understood currently.");
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(DEBUG_TYPE, "UnsupportedStructureInner",
+                                      InnerLoop->getStartLoc(),
+                                      InnerLoop->getHeader())
+             << "Inner loop structure not understood currently.";
+    });
     return true;
   }
 
@@ -852,24 +860,26 @@ bool LoopInterchangeLegality::currentLim
       getLoopLatchExitBlock(OuterLoopLatch, OuterLoopHeader);
   if (!LoopExitBlock || !containsSafePHI(LoopExitBlock, true)) {
     DEBUG(dbgs() << "Can only handle LCSSA PHIs in outer loops currently.\n");
-    ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE,
-                                       "NoLCSSAPHIOuter",
-                                       OuterLoop->getStartLoc(),
-                                       OuterLoop->getHeader())
-              << "Only outer loops with LCSSA PHIs can be interchange "
-                 "currently.");
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(DEBUG_TYPE, "NoLCSSAPHIOuter",
+                                      OuterLoop->getStartLoc(),
+                                      OuterLoop->getHeader())
+             << "Only outer loops with LCSSA PHIs can be interchange "
+                "currently.";
+    });
     return true;
   }
 
   LoopExitBlock = getLoopLatchExitBlock(InnerLoopLatch, InnerLoopHeader);
   if (!LoopExitBlock || !containsSafePHI(LoopExitBlock, false)) {
     DEBUG(dbgs() << "Can only handle LCSSA PHIs in inner loops currently.\n");
-    ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE,
-                                       "NoLCSSAPHIOuterInner",
-                                       InnerLoop->getStartLoc(),
-                                       InnerLoop->getHeader())
-              << "Only inner loops with LCSSA PHIs can be interchange "
-                 "currently.");
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(DEBUG_TYPE, "NoLCSSAPHIOuterInner",
+                                      InnerLoop->getStartLoc(),
+                                      InnerLoop->getHeader())
+             << "Only inner loops with LCSSA PHIs can be interchange "
+                "currently.";
+    });
     return true;
   }
 
@@ -894,11 +904,12 @@ bool LoopInterchangeLegality::currentLim
   if (!InnerIndexVarInc) {
     DEBUG(dbgs() << "Did not find an instruction to increment the induction "
                  << "variable.\n");
-    ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE,
-                                       "NoIncrementInInner",
-                                       InnerLoop->getStartLoc(),
-                                       InnerLoop->getHeader())
-              << "The inner loop does not increment the induction variable.");
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(DEBUG_TYPE, "NoIncrementInInner",
+                                      InnerLoop->getStartLoc(),
+                                      InnerLoop->getHeader())
+             << "The inner loop does not increment the induction variable.";
+    });
     return true;
   }
 
@@ -917,12 +928,13 @@ bool LoopInterchangeLegality::currentLim
     if (!I.isIdenticalTo(InnerIndexVarInc)) {
       DEBUG(dbgs() << "Found unsupported instructions between induction "
                    << "variable increment and branch.\n");
-    ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE,
-                                       "UnsupportedInsBetweenInduction",
-                                       InnerLoop->getStartLoc(),
-                                       InnerLoop->getHeader())
-              << "Found unsupported instruction between induction variable "
-                 "increment and branch.");
+      ORE->emit([&]() {
+        return OptimizationRemarkMissed(
+                   DEBUG_TYPE, "UnsupportedInsBetweenInduction",
+                   InnerLoop->getStartLoc(), InnerLoop->getHeader())
+               << "Found unsupported instruction between induction variable "
+                  "increment and branch.";
+      });
       return true;
     }
 
@@ -933,11 +945,12 @@ bool LoopInterchangeLegality::currentLim
   // current limitation.
   if (!FoundInduction) {
     DEBUG(dbgs() << "Did not find the induction variable.\n");
-    ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE,
-                                       "NoIndutionVariable",
-                                       InnerLoop->getStartLoc(),
-                                       InnerLoop->getHeader())
-              << "Did not find the induction variable.");
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(DEBUG_TYPE, "NoIndutionVariable",
+                                      InnerLoop->getStartLoc(),
+                                      InnerLoop->getHeader())
+             << "Did not find the induction variable.";
+    });
     return true;
   }
   return false;
@@ -951,11 +964,12 @@ bool LoopInterchangeLegality::canInterch
     DEBUG(dbgs() << "Failed interchange InnerLoopId = " << InnerLoopId
                  << " and OuterLoopId = " << OuterLoopId
                  << " due to dependence\n");
-    ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE,
-                                       "Dependence",
-                                       InnerLoop->getStartLoc(),
-                                       InnerLoop->getHeader())
-              << "Cannot interchange loops due to dependences.");
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(DEBUG_TYPE, "Dependence",
+                                      InnerLoop->getStartLoc(),
+                                      InnerLoop->getHeader())
+             << "Cannot interchange loops due to dependences.";
+    });
     return false;
   }
 
@@ -1003,12 +1017,13 @@ bool LoopInterchangeLegality::canInterch
   // Check if the loops are tightly nested.
   if (!tightlyNested(OuterLoop, InnerLoop)) {
     DEBUG(dbgs() << "Loops not tightly nested\n");
-    ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE,
-                                       "NotTightlyNested",
-                                       InnerLoop->getStartLoc(),
-                                       InnerLoop->getHeader())
-              << "Cannot interchange loops because they are not tightly "
-                 "nested.");
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(DEBUG_TYPE, "NotTightlyNested",
+                                      InnerLoop->getStartLoc(),
+                                      InnerLoop->getHeader())
+             << "Cannot interchange loops because they are not tightly "
+                "nested.";
+    });
     return false;
   }
 
@@ -1105,14 +1120,15 @@ bool LoopInterchangeProfitability::isPro
   if (isProfitableForVectorization(InnerLoopId, OuterLoopId, DepMatrix))
     return true;
 
-  ORE->emit(OptimizationRemarkMissed(DEBUG_TYPE,
-                                     "InterchangeNotProfitable",
-                                     InnerLoop->getStartLoc(),
-                                     InnerLoop->getHeader())
-            << "Interchanging loops is too costly (cost="
-            << ore::NV("Cost", Cost) << ", threshold="
-            << ore::NV("Threshold", LoopInterchangeCostThreshold) <<
-            ") and it does not improve parallelism.");
+  ORE->emit([&]() {
+    return OptimizationRemarkMissed(DEBUG_TYPE, "InterchangeNotProfitable",
+                                    InnerLoop->getStartLoc(),
+                                    InnerLoop->getHeader())
+           << "Interchanging loops is too costly (cost="
+           << ore::NV("Cost", Cost) << ", threshold="
+           << ore::NV("Threshold", LoopInterchangeCostThreshold)
+           << ") and it does not improve parallelism.";
+  });
   return false;
 }
 

Modified: llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp Wed Oct 11 10:12:59 2017
@@ -842,11 +842,14 @@ static bool computeUnrollCount(
       }
       if (UP.Count < 2) {
         if (PragmaEnableUnroll)
-          ORE->emit(
-              OptimizationRemarkMissed(DEBUG_TYPE, "UnrollAsDirectedTooLarge",
-                                       L->getStartLoc(), L->getHeader())
-              << "Unable to unroll loop as directed by unroll(enable) pragma "
-                 "because unrolled size is too large.");
+          ORE->emit([&]() {
+            return OptimizationRemarkMissed(DEBUG_TYPE,
+                                            "UnrollAsDirectedTooLarge",
+                                            L->getStartLoc(), L->getHeader())
+                   << "Unable to unroll loop as directed by unroll(enable) "
+                      "pragma "
+                      "because unrolled size is too large.";
+          });
         UP.Count = 0;
       }
     } else {
@@ -856,22 +859,27 @@ static bool computeUnrollCount(
       UP.Count = UP.MaxCount;
     if ((PragmaFullUnroll || PragmaEnableUnroll) && TripCount &&
         UP.Count != TripCount)
-      ORE->emit(
-          OptimizationRemarkMissed(DEBUG_TYPE, "FullUnrollAsDirectedTooLarge",
-                                   L->getStartLoc(), L->getHeader())
-          << "Unable to fully unroll loop as directed by unroll pragma because "
-             "unrolled size is too large.");
+      ORE->emit([&]() {
+        return OptimizationRemarkMissed(DEBUG_TYPE,
+                                        "FullUnrollAsDirectedTooLarge",
+                                        L->getStartLoc(), L->getHeader())
+               << "Unable to fully unroll loop as directed by unroll pragma "
+                  "because "
+                  "unrolled size is too large.";
+      });
     return ExplicitUnroll;
   }
   assert(TripCount == 0 &&
          "All cases when TripCount is constant should be covered here.");
   if (PragmaFullUnroll)
-    ORE->emit(
-        OptimizationRemarkMissed(DEBUG_TYPE,
-                                 "CantFullUnrollAsDirectedRuntimeTripCount",
-                                 L->getStartLoc(), L->getHeader())
-        << "Unable to fully unroll loop as directed by unroll(full) pragma "
-           "because loop has a runtime trip count.");
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(
+                 DEBUG_TYPE, "CantFullUnrollAsDirectedRuntimeTripCount",
+                 L->getStartLoc(), L->getHeader())
+             << "Unable to fully unroll loop as directed by unroll(full) "
+                "pragma "
+                "because loop has a runtime trip count.";
+    });
 
   // 6th priority is runtime unrolling.
   // Don't unroll a runtime trip count loop when it is disabled.
@@ -922,17 +930,19 @@ static bool computeUnrollCount(
                  << OrigCount << " to " << UP.Count << ".\n");
     using namespace ore;
     if (PragmaCount > 0 && !UP.AllowRemainder)
-      ORE->emit(
-          OptimizationRemarkMissed(DEBUG_TYPE,
-                                   "DifferentUnrollCountFromDirected",
-                                   L->getStartLoc(), L->getHeader())
-          << "Unable to unroll loop the number of times directed by "
-             "unroll_count pragma because remainder loop is restricted "
-             "(that could architecture specific or because the loop "
-             "contains a convergent instruction) and so must have an unroll "
-             "count that divides the loop trip multiple of "
-          << NV("TripMultiple", TripMultiple) << ".  Unrolling instead "
-          << NV("UnrollCount", UP.Count) << " time(s).");
+      ORE->emit([&]() {
+        return OptimizationRemarkMissed(DEBUG_TYPE,
+                                        "DifferentUnrollCountFromDirected",
+                                        L->getStartLoc(), L->getHeader())
+               << "Unable to unroll loop the number of times directed by "
+                  "unroll_count pragma because remainder loop is restricted "
+                  "(that could architecture specific or because the loop "
+                  "contains a convergent instruction) and so must have an "
+                  "unroll "
+                  "count that divides the loop trip multiple of "
+               << NV("TripMultiple", TripMultiple) << ".  Unrolling instead "
+               << NV("UnrollCount", UP.Count) << " time(s).";
+      });
   }
 
   if (UP.Count > UP.MaxCount)

Modified: llvm/trunk/lib/Transforms/Scalar/TailRecursionElimination.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/TailRecursionElimination.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/TailRecursionElimination.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/TailRecursionElimination.cpp Wed Oct 11 10:12:59 2017
@@ -255,8 +255,10 @@ static bool markTails(Function &F, bool
         }
         if (SafeToTail) {
           using namespace ore;
-          ORE->emit(OptimizationRemark(DEBUG_TYPE, "tailcall-readnone", CI)
-                    << "marked as tail call candidate (readnone)");
+          ORE->emit([&]() {
+            return OptimizationRemark(DEBUG_TYPE, "tailcall-readnone", CI)
+                   << "marked as tail call candidate (readnone)";
+          });
           CI->setTailCall();
           Modified = true;
           continue;
@@ -301,8 +303,10 @@ static bool markTails(Function &F, bool
     if (Visited[CI->getParent()] != ESCAPED) {
       // If the escape point was part way through the block, calls after the
       // escape point wouldn't have been put into DeferredTails.
-      ORE->emit(OptimizationRemark(DEBUG_TYPE, "tailcall", CI)
-                << "marked as tail call candidate");
+      ORE->emit([&]() {
+        return OptimizationRemark(DEBUG_TYPE, "tailcall", CI)
+               << "marked as tail call candidate";
+      });
       CI->setTailCall();
       Modified = true;
     } else {
@@ -554,8 +558,10 @@ static bool eliminateRecursiveTailCall(C
   Function *F = BB->getParent();
 
   using namespace ore;
-  ORE->emit(OptimizationRemark(DEBUG_TYPE, "tailcall-recursion", CI)
-            << "transforming tail recursion into loop");
+  ORE->emit([&]() {
+    return OptimizationRemark(DEBUG_TYPE, "tailcall-recursion", CI)
+           << "transforming tail recursion into loop";
+  });
 
   // OK! We can transform this tail call.  If this is the first one found,
   // create the new entry block, allowing us to branch back to the old entry.

Modified: llvm/trunk/lib/Transforms/Utils/LoopUnroll.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/LoopUnroll.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/LoopUnroll.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/LoopUnroll.cpp Wed Oct 11 10:12:59 2017
@@ -460,18 +460,22 @@ LoopUnrollResult llvm::UnrollLoop(
   // Report the unrolling decision.
   if (CompletelyUnroll) {
     DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName()
-          << " with trip count " << TripCount << "!\n");
-    ORE->emit(OptimizationRemark(DEBUG_TYPE, "FullyUnrolled", L->getStartLoc(),
-                                 L->getHeader())
-              << "completely unrolled loop with "
-              << NV("UnrollCount", TripCount) << " iterations");
+                 << " with trip count " << TripCount << "!\n");
+    ORE->emit([&]() {
+      return OptimizationRemark(DEBUG_TYPE, "FullyUnrolled", L->getStartLoc(),
+                                L->getHeader())
+             << "completely unrolled loop with " << NV("UnrollCount", TripCount)
+             << " iterations";
+    });
   } else if (PeelCount) {
     DEBUG(dbgs() << "PEELING loop %" << Header->getName()
                  << " with iteration count " << PeelCount << "!\n");
-    ORE->emit(OptimizationRemark(DEBUG_TYPE, "Peeled", L->getStartLoc(),
-                                 L->getHeader())
-              << " peeled loop by " << NV("PeelCount", PeelCount)
-              << " iterations");
+    ORE->emit([&]() {
+      return OptimizationRemark(DEBUG_TYPE, "Peeled", L->getStartLoc(),
+                                L->getHeader())
+             << " peeled loop by " << NV("PeelCount", PeelCount)
+             << " iterations";
+    });
   } else {
     auto DiagBuilder = [&]() {
       OptimizationRemark Diag(DEBUG_TYPE, "PartialUnrolled", L->getStartLoc(),

Modified: llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp Wed Oct 11 10:12:59 2017
@@ -485,8 +485,10 @@ Value *LibCallSimplifier::optimizeString
     uint64_t LenTrue = GetStringLength(SI->getTrueValue(), CharSize);
     uint64_t LenFalse = GetStringLength(SI->getFalseValue(), CharSize);
     if (LenTrue && LenFalse) {
-      ORE.emit(OptimizationRemark("instcombine", "simplify-libcalls", CI)
-               << "folded strlen(select) to select of constants");
+      ORE.emit([&]() {
+        return OptimizationRemark("instcombine", "simplify-libcalls", CI)
+               << "folded strlen(select) to select of constants";
+      });
       return B.CreateSelect(SI->getCondition(),
                             ConstantInt::get(CI->getType(), LenTrue - 1),
                             ConstantInt::get(CI->getType(), LenFalse - 1));

Modified: llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp?rev=315476&r1=315475&r2=315476&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp (original)
+++ llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp Wed Oct 11 10:12:59 2017
@@ -1248,25 +1248,29 @@ public:
   /// Dumps all the hint information.
   void emitRemarkWithHints() const {
     using namespace ore;
-    if (Force.Value == LoopVectorizeHints::FK_Disabled)
-      ORE.emit(OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled",
+    ORE.emit([&]() {
+      if (Force.Value == LoopVectorizeHints::FK_Disabled)
+        return OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled",
                                         TheLoop->getStartLoc(),
                                         TheLoop->getHeader())
-               << "loop not vectorized: vectorization is explicitly disabled");
-    else {
-      OptimizationRemarkMissed R(LV_NAME, "MissedDetails",
-                                 TheLoop->getStartLoc(), TheLoop->getHeader());
-      R << "loop not vectorized";
-      if (Force.Value == LoopVectorizeHints::FK_Enabled) {
-        R << " (Force=" << NV("Force", true);
-        if (Width.Value != 0)
-          R << ", Vector Width=" << NV("VectorWidth", Width.Value);
-        if (Interleave.Value != 0)
-          R << ", Interleave Count=" << NV("InterleaveCount", Interleave.Value);
-        R << ")";
+               << "loop not vectorized: vectorization is explicitly disabled";
+      else {
+        OptimizationRemarkMissed R(LV_NAME, "MissedDetails",
+                                   TheLoop->getStartLoc(),
+                                   TheLoop->getHeader());
+        R << "loop not vectorized";
+        if (Force.Value == LoopVectorizeHints::FK_Enabled) {
+          R << " (Force=" << NV("Force", true);
+          if (Width.Value != 0)
+            R << ", Vector Width=" << NV("VectorWidth", Width.Value);
+          if (Interleave.Value != 0)
+            R << ", Interleave Count="
+              << NV("InterleaveCount", Interleave.Value);
+          R << ")";
+        }
+        return R;
       }
-      ORE.emit(R);
-    }
+    });
   }
 
   unsigned getWidth() const { return Width.Value; }
@@ -2277,12 +2281,14 @@ public:
     const char *PassName = Hints.vectorizeAnalysisPassName();
     bool Failed = false;
     if (UnsafeAlgebraInst && !Hints.allowReordering()) {
-      ORE.emit(
-          OptimizationRemarkAnalysisFPCommute(PassName, "CantReorderFPOps",
-                                              UnsafeAlgebraInst->getDebugLoc(),
-                                              UnsafeAlgebraInst->getParent())
-          << "loop not vectorized: cannot prove it is safe to reorder "
-             "floating-point operations");
+      ORE.emit([&]() {
+        return OptimizationRemarkAnalysisFPCommute(
+                   PassName, "CantReorderFPOps",
+                   UnsafeAlgebraInst->getDebugLoc(),
+                   UnsafeAlgebraInst->getParent())
+               << "loop not vectorized: cannot prove it is safe to reorder "
+                  "floating-point operations";
+      });
       Failed = true;
     }
 
@@ -2293,11 +2299,13 @@ public:
         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
     if ((ThresholdReached && !Hints.allowReordering()) ||
         PragmaThresholdReached) {
-      ORE.emit(OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps",
+      ORE.emit([&]() {
+        return OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps",
                                                   L->getStartLoc(),
                                                   L->getHeader())
                << "loop not vectorized: cannot prove it is safe to reorder "
-                  "memory operations");
+                  "memory operations";
+      });
       DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
       Failed = true;
     }
@@ -5742,9 +5750,10 @@ bool LoopVectorizationLegality::canVecto
   InterleaveInfo.setLAI(LAI);
   const OptimizationRemarkAnalysis *LAR = LAI->getReport();
   if (LAR) {
-    OptimizationRemarkAnalysis VR(Hints->vectorizeAnalysisPassName(),
-                                  "loop not vectorized: ", *LAR);
-    ORE->emit(VR);
+    ORE->emit([&]() {
+      return OptimizationRemarkAnalysis(Hints->vectorizeAnalysisPassName(),
+                                        "loop not vectorized: ", *LAR);
+    });
   }
   if (!LAI->canVectorizeMemory())
     return false;
@@ -8573,24 +8582,32 @@ bool LoopVectorizePass::processLoop(Loop
   const char *VAPassName = Hints.vectorizeAnalysisPassName();
   if (!VectorizeLoop && !InterleaveLoop) {
     // Do not vectorize or interleaving the loop.
-    ORE->emit(OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
-                                         L->getStartLoc(), L->getHeader())
-              << VecDiagMsg.second);
-    ORE->emit(OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
-                                         L->getStartLoc(), L->getHeader())
-              << IntDiagMsg.second);
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
+                                      L->getStartLoc(), L->getHeader())
+             << VecDiagMsg.second;
+    });
+    ORE->emit([&]() {
+      return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
+                                      L->getStartLoc(), L->getHeader())
+             << IntDiagMsg.second;
+    });
     return false;
   } else if (!VectorizeLoop && InterleaveLoop) {
     DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
-    ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
-                                         L->getStartLoc(), L->getHeader())
-              << VecDiagMsg.second);
+    ORE->emit([&]() {
+      return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
+                                        L->getStartLoc(), L->getHeader())
+             << VecDiagMsg.second;
+    });
   } else if (VectorizeLoop && !InterleaveLoop) {
     DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
                  << DebugLocStr << '\n');
-    ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
-                                         L->getStartLoc(), L->getHeader())
-              << IntDiagMsg.second);
+    ORE->emit([&]() {
+      return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
+                                        L->getStartLoc(), L->getHeader())
+             << IntDiagMsg.second;
+    });
   } else if (VectorizeLoop && InterleaveLoop) {
     DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
                  << DebugLocStr << '\n');
@@ -8608,10 +8625,12 @@ bool LoopVectorizePass::processLoop(Loop
                                &CM);
     LVP.executePlan(Unroller, DT);
 
-    ORE->emit(OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
-                                 L->getHeader())
-              << "interleaved loop (interleaved count: "
-              << NV("InterleaveCount", IC) << ")");
+    ORE->emit([&]() {
+      return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
+                                L->getHeader())
+             << "interleaved loop (interleaved count: "
+             << NV("InterleaveCount", IC) << ")";
+    });
   } else {
     // If we decided that it is *legal* to vectorize the loop, then do it.
     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
@@ -8626,11 +8645,13 @@ bool LoopVectorizePass::processLoop(Loop
       AddRuntimeUnrollDisableMetaData(L);
 
     // Report the vectorization decision.
-    ORE->emit(OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
-                                 L->getHeader())
-              << "vectorized loop (vectorization width: "
-              << NV("VectorizationFactor", VF.Width)
-              << ", interleaved count: " << NV("InterleaveCount", IC) << ")");
+    ORE->emit([&]() {
+      return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
+                                L->getHeader())
+             << "vectorized loop (vectorization width: "
+             << NV("VectorizationFactor", VF.Width)
+             << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
+    });
   }
 
   // Mark the loop as already vectorized to avoid vectorizing again.




More information about the llvm-commits mailing list