[llvm] r278580 - [LSV] Use OrderedBasicBlock instead of rolling it ourselves. NFC

Justin Lebar via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 12 17:04:08 PDT 2016


Author: jlebar
Date: Fri Aug 12 19:04:08 2016
New Revision: 278580

URL: http://llvm.org/viewvc/llvm-project?rev=278580&view=rev
Log:
[LSV] Use OrderedBasicBlock instead of rolling it ourselves. NFC

Summary:
In getVectorizablePrefix, this is less efficient (because we have to
iterate over the BB twice), but boy is it simpler.  Given how much
trouble we've had here, I think the simplicity gain is worthwhile.

In reorder(), this is actually more efficient, as
DominatorTree::dominates iterates over the BB from the beginning when
the two instructions are in the same BB.

Reviewers: asbirlea

Subscribers: arsenm, llvm-commits, mzolotukhin

Differential Revision: https://reviews.llvm.org/D23472

Modified:
    llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp

Modified: llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp?rev=278580&r1=278579&r2=278580&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp (original)
+++ llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp Fri Aug 12 19:04:08 2016
@@ -15,6 +15,7 @@
 #include "llvm/ADT/Statistic.h"
 #include "llvm/ADT/Triple.h"
 #include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/OrderedBasicBlock.h"
 #include "llvm/Analysis/ScalarEvolution.h"
 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
 #include "llvm/Analysis/TargetTransformInfo.h"
@@ -345,6 +346,7 @@ bool Vectorizer::isConsecutiveAccess(Val
 }
 
 void Vectorizer::reorder(Instruction *I) {
+  OrderedBasicBlock OBB(I->getParent());
   SmallPtrSet<Instruction *, 16> InstructionsToMove;
   SmallVector<Instruction *, 16> Worklist;
 
@@ -357,11 +359,14 @@ void Vectorizer::reorder(Instruction *I)
       if (!IM || IM->getOpcode() == Instruction::PHI)
         continue;
 
-      if (!DT.dominates(IM, I)) {
+      // If IM is in another BB, no need to move it, because this pass only
+      // vectorizes instructions within one BB.
+      if (IM->getParent() != I->getParent())
+        continue;
+
+      if (!OBB.dominates(IM, I)) {
         InstructionsToMove.insert(IM);
         Worklist.push_back(IM);
-        assert(IM->getParent() == IW->getParent() &&
-               "Instructions to move should be in the same basic block");
       }
     }
   }
@@ -433,8 +438,8 @@ Vectorizer::splitOddVectorElts(ArrayRef<
 ArrayRef<Instruction *>
 Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) {
   // These are in BB order, unlike Chain, which is in address order.
-  SmallVector<std::pair<Instruction *, unsigned>, 16> MemoryInstrs;
-  SmallVector<std::pair<Instruction *, unsigned>, 16> ChainInstrs;
+  SmallVector<Instruction *, 16> MemoryInstrs;
+  SmallVector<Instruction *, 16> ChainInstrs;
 
   bool IsLoadChain = isa<LoadInst>(Chain[0]);
   DEBUG({
@@ -448,14 +453,12 @@ Vectorizer::getVectorizablePrefix(ArrayR
     }
   });
 
-  unsigned InstrIdx = 0;
   for (Instruction &I : make_range(getBoundaryInstrs(Chain))) {
-    ++InstrIdx;
     if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
       if (!is_contained(Chain, &I))
-        MemoryInstrs.push_back({&I, InstrIdx});
+        MemoryInstrs.push_back(&I);
       else
-        ChainInstrs.push_back({&I, InstrIdx});
+        ChainInstrs.push_back(&I);
     } else if (IsLoadChain && (I.mayWriteToMemory() || I.mayThrow())) {
       DEBUG(dbgs() << "LSV: Found may-write/throw operation: " << I << '\n');
       break;
@@ -466,16 +469,14 @@ Vectorizer::getVectorizablePrefix(ArrayR
     }
   }
 
+  OrderedBasicBlock OBB(Chain[0]->getParent());
+
   // Loop until we find an instruction in ChainInstrs that we can't vectorize.
-  unsigned ChainInstrIdx, ChainInstrsLen;
-  for (ChainInstrIdx = 0, ChainInstrsLen = ChainInstrs.size();
-       ChainInstrIdx < ChainInstrsLen; ++ChainInstrIdx) {
-    Instruction *ChainInstr = ChainInstrs[ChainInstrIdx].first;
-    unsigned ChainInstrLoc = ChainInstrs[ChainInstrIdx].second;
+  unsigned ChainInstrIdx = 0;
+  for (unsigned E = ChainInstrs.size(); ChainInstrIdx < E; ++ChainInstrIdx) {
+    Instruction *ChainInstr = ChainInstrs[ChainInstrIdx];
     bool AliasFound = false;
-    for (auto EntryMem : MemoryInstrs) {
-      Instruction *MemInstr = EntryMem.first;
-      unsigned MemInstrLoc = EntryMem.second;
+    for (Instruction *MemInstr : MemoryInstrs) {
       if (isa<LoadInst>(MemInstr) && isa<LoadInst>(ChainInstr))
         continue;
 
@@ -484,12 +485,12 @@ Vectorizer::getVectorizablePrefix(ArrayR
       // vectorize it (the vectorized load is inserted at the location of the
       // first load in the chain).
       if (isa<StoreInst>(MemInstr) && isa<LoadInst>(ChainInstr) &&
-          ChainInstrLoc < MemInstrLoc)
+          OBB.dominates(ChainInstr, MemInstr))
         continue;
 
       // Same case, but in reverse.
       if (isa<LoadInst>(MemInstr) && isa<StoreInst>(ChainInstr) &&
-          ChainInstrLoc > MemInstrLoc)
+          OBB.dominates(MemInstr, ChainInstr))
         continue;
 
       if (!AA.isNoAlias(MemoryLocation::get(MemInstr),
@@ -520,10 +521,7 @@ Vectorizer::getVectorizablePrefix(ArrayR
   unsigned ChainIdx, ChainLen;
   for (ChainIdx = 0, ChainLen = Chain.size(); ChainIdx < ChainLen; ++ChainIdx) {
     Instruction *I = Chain[ChainIdx];
-    if (none_of(VectorizableChainInstrs,
-                [I](std::pair<Instruction *, unsigned> CI) {
-                  return I == CI.first;
-                }))
+    if (!is_contained(VectorizableChainInstrs, I))
       break;
   }
   return Chain.slice(0, ChainIdx);




More information about the llvm-commits mailing list