[llvm] r290394 - Reimplement depedency tracking in the ImplicitNullChecks pass

Sanjoy Das via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 22 16:41:21 PST 2016


Author: sanjoy
Date: Thu Dec 22 18:41:21 2016
New Revision: 290394

URL: http://llvm.org/viewvc/llvm-project?rev=290394&view=rev
Log:
Reimplement depedency tracking in the ImplicitNullChecks pass

Summary:
This change rewrites a core component in the ImplicitNullChecks pass for
greater simplicity since the original design was over-complicated for no
good reason.  Please review this as essentially a new pass.  The change
is almost NFC and I've added a test case for a scenario that this new
code handles that wasn't handled earlier.

The implicit null check pass, at its core, is a code hoisting transform.
It differs from "normal" code transforms in that it speculates
potentially faulting instructions (by design), but a lot of the usual
hazard detection logic (register read-after-write etc.) still applies.
We previously detected hazards by keeping track of registers defined and
used by machine instructions over an instruction range, but that was
unwieldy and did not actually confer any performance benefits.  The
intent was to have linear time complexity over the number of machine
instructions considered, but it ended up being N^2 is practice.

This new version is more obviously O(N^2) (with N capped to 8 by
default) in hazard detection.  It does not attempt to be clever in
tracking register uses or defs (the previous cleverness here was a
source of bugs).

Once this is checked in, I'll extract out the `IsSuitableMemoryOp` and
`CanHoistLoadInst` lambda into member functions (they're too complicated
to be inline lambdas) and do some other related NFC cleanups.

Reviewers: reames, anna, atrick

Subscribers: mcrosier, llvm-commits

Differential Revision: https://reviews.llvm.org/D27592

Modified:
    llvm/trunk/lib/CodeGen/ImplicitNullChecks.cpp
    llvm/trunk/test/CodeGen/X86/implicit-null-checks.mir

Modified: llvm/trunk/lib/CodeGen/ImplicitNullChecks.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/ImplicitNullChecks.cpp?rev=290394&r1=290393&r2=290394&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/ImplicitNullChecks.cpp (original)
+++ llvm/trunk/lib/CodeGen/ImplicitNullChecks.cpp Thu Dec 22 18:41:21 2016
@@ -51,6 +51,12 @@ static cl::opt<int> PageSize("imp-null-c
                              cl::desc("The page size of the target in bytes"),
                              cl::init(4096));
 
+static cl::opt<unsigned> MaxInstsToConsider(
+    "imp-null-max-insts-to-consider",
+    cl::desc("The max number of instructions to consider hoisting loads over "
+             "(the algorithm is quadratic over this number)"),
+    cl::init(8));
+
 #define DEBUG_TYPE "implicit-null-checks"
 
 STATISTIC(NumImplicitNullChecks,
@@ -59,6 +65,44 @@ STATISTIC(NumImplicitNullChecks,
 namespace {
 
 class ImplicitNullChecks : public MachineFunctionPass {
+  /// Return true if \c computeDependence can process \p MI.
+  static bool canHandle(const MachineInstr *MI);
+
+  /// Helper function for \c computeDependence.  Return true if \p A
+  /// and \p B do not have any dependences between them, and can be
+  /// re-ordered without changing program semantics.
+  bool canReorder(const MachineInstr *A, const MachineInstr *B);
+
+  /// A data type for representing the result computed by \c
+  /// computeDependence.  States whether it is okay to reorder the
+  /// instruction passed to \c computeDependence with at most one
+  /// depednency.
+  struct DependenceResult {
+    /// Can we actually re-order \p MI with \p Insts (see \c
+    /// computeDependence).
+    bool CanReorder;
+
+    /// If non-None, then an instruction in \p Insts that also must be
+    /// hoisted.
+    Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence;
+
+    /*implicit*/ DependenceResult(
+        bool CanReorder,
+        Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence)
+        : CanReorder(CanReorder), PotentialDependence(PotentialDependence) {
+      assert((!PotentialDependence || CanReorder) &&
+             "!CanReorder && PotentialDependence.hasValue() not allowed!");
+    }
+  };
+
+  /// Compute a result for the following question: can \p MI be
+  /// re-ordered from after \p Insts to before it.
+  ///
+  /// \c canHandle should return true for all instructions in \p
+  /// Insts.
+  DependenceResult computeDependence(const MachineInstr *MI,
+                                     ArrayRef<MachineInstr *> Insts);
+
   /// Represents one null check that can be made implicit.
   class NullCheck {
     // The memory operation the null check can be folded into.
@@ -133,173 +177,66 @@ public:
   }
 };
 
-/// \brief Detect re-ordering hazards and dependencies.
-///
-/// This class keeps track of defs and uses, and can be queried if a given
-/// machine instruction can be re-ordered from after the machine instructions
-/// seen so far to before them.
-class HazardDetector {
-  static MachineInstr *getUnknownMI() {
-    return DenseMapInfo<MachineInstr *>::getTombstoneKey();
-  }
-
-  // Maps physical registers to the instruction defining them.  If there has
-  // been more than one def of an specific register, that register is mapped to
-  // getUnknownMI().
-  DenseMap<unsigned, MachineInstr *> RegDefs;
-  DenseSet<unsigned> RegUses;
-  const TargetRegisterInfo &TRI;
-  bool hasSeenClobber;
-  AliasAnalysis &AA;
-
-public:
-  explicit HazardDetector(const TargetRegisterInfo &TRI, AliasAnalysis &AA)
-      : TRI(TRI), hasSeenClobber(false), AA(AA) {}
+}
 
-  /// \brief Make a note of \p MI for later queries to isSafeToHoist.
-  ///
-  /// May clobber this HazardDetector instance.  \see isClobbered.
-  void rememberInstruction(MachineInstr *MI);
+bool ImplicitNullChecks::canHandle(const MachineInstr *MI) {
+  if (MI->isCall() || MI->mayStore() || MI->hasUnmodeledSideEffects())
+    return false;
+  auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); };
+  (void)IsRegMask;
 
-  /// \brief Return true if it is safe to hoist \p MI from after all the
-  /// instructions seen so far (via rememberInstruction) to before it.  If \p MI
-  /// has one and only one transitive dependency, set \p Dependency to that
-  /// instruction.  If there are more dependencies, return false.
-  bool isSafeToHoist(MachineInstr *MI, MachineInstr *&Dependency);
+  assert(!llvm::any_of(MI->operands(), IsRegMask) &&
+         "Calls were filtered out above!");
 
-  /// \brief Return true if this instance of HazardDetector has been clobbered
-  /// (i.e. has no more useful information).
-  ///
-  /// A HazardDetecter is clobbered when it sees a construct it cannot
-  /// understand, and it would have to return a conservative answer for all
-  /// future queries.  Having a separate clobbered state lets the client code
-  /// bail early, without making queries about all of the future instructions
-  /// (which would have returned the most conservative answer anyway).
-  ///
-  /// Calling rememberInstruction or isSafeToHoist on a clobbered HazardDetector
-  /// is an error.
-  bool isClobbered() { return hasSeenClobber; }
-};
+  auto IsUnordered = [](MachineMemOperand *MMO) { return MMO->isUnordered(); };
+  return llvm::all_of(MI->memoperands(), IsUnordered);
 }
 
+ImplicitNullChecks::DependenceResult
+ImplicitNullChecks::computeDependence(const MachineInstr *MI,
+                                      ArrayRef<MachineInstr *> Block) {
+  assert(llvm::all_of(Block, canHandle) && "Check this first!");
+  assert(!llvm::is_contained(Block, MI) && "Block must be exclusive of MI!");
 
-void HazardDetector::rememberInstruction(MachineInstr *MI) {
-  assert(!isClobbered() &&
-         "Don't add instructions to a clobbered hazard detector");
-
-  // There may be readonly calls that we can handle in theory, but for
-  // now we don't bother since we don't handle callee clobbered
-  // registers.
-  if (MI->isCall() || MI->mayStore() || MI->hasUnmodeledSideEffects()) {
-    hasSeenClobber = true;
-    return;
-  }
+  Optional<ArrayRef<MachineInstr *>::iterator> Dep;
 
-  for (auto *MMO : MI->memoperands()) {
-    // Right now we don't want to worry about LLVM's memory model.
-    if (!MMO->isUnordered()) {
-      hasSeenClobber = true;
-      return;
-    }
-  }
-
-  for (auto &MO : MI->operands()) {
-    if (!MO.isReg() || !MO.getReg())
+  for (auto I = Block.begin(), E = Block.end(); I != E; ++I) {
+    if (canReorder(*I, MI))
       continue;
 
-    if (MO.isDef()) {
-      auto It = RegDefs.find(MO.getReg());
-      if (It == RegDefs.end())
-        RegDefs.insert({MO.getReg(), MI});
-      else {
-        assert(It->second && "Found null MI?");
-        It->second = getUnknownMI();
-      }
-    } else
-      RegUses.insert(MO.getReg());
+    if (Dep == None) {
+      // Found one possible dependency, keep track of it.
+      Dep = I;
+    } else {
+      // We found two dependencies, so bail out.
+      return {false, None};
+    }
   }
-}
-
-bool HazardDetector::isSafeToHoist(MachineInstr *MI,
-                                   MachineInstr *&Dependency) {
-  assert(!isClobbered() && "isSafeToHoist cannot do anything useful!");
-  Dependency = nullptr;
-
-  // Right now we don't want to worry about LLVM's memory model.  This can be
-  // made more precise later.
-  for (auto *MMO : MI->memoperands())
-    if (!MMO->isUnordered())
-      return false;
-
-  for (auto &MO : MI->operands()) {
-    if (MO.isReg() && MO.getReg()) {
-      for (auto &RegDef : RegDefs) {
-        unsigned Reg = RegDef.first;
-        MachineInstr *MI = RegDef.second;
-        if (!TRI.regsOverlap(Reg, MO.getReg()))
-          continue;
 
-        // We found a write-after-write or read-after-write, see if the
-        // instruction causing this dependency can be hoisted too.
-
-        if (MI == getUnknownMI())
-          // We don't have precise dependency information.
-          return false;
-
-        if (Dependency) {
-          if (Dependency == MI)
-            continue;
-          // We already have one dependency, and we can track only one.
-          return false;
-        }
-
-        // Now check if MI is actually a dependency that can be hoisted.
-
-        // We don't want to track transitive dependencies.  We already know that
-        // MI is the only instruction that defines Reg, but we need to be sure
-        // that it does not use any registers that have been defined (trivially
-        // checked below by ensuring that there are no register uses), and that
-        // it is the only def for every register it defines (otherwise we could
-        // violate a write after write hazard).
-        auto IsMIOperandSafe = [&](MachineOperand &MO) {
-          if (!MO.isReg() || !MO.getReg())
-            return true;
-          if (MO.isUse())
-            return false;
-          assert(MO.isDef() &&
-                 "Register MachineOperands must either be uses or be defs.");
-          assert(RegDefs.count(MO.getReg()) &&
-                 "All defs must be tracked in RegDefs by now!");
-
-          for (unsigned Reg : RegUses)
-            if (TRI.regsOverlap(Reg, MO.getReg()))
-              return false; // We found a write-after-read
-
-          for (auto &OtherDef : RegDefs) {
-            unsigned OtherReg = OtherDef.first;
-            MachineInstr *OtherMI = OtherDef.second;
-            if (OtherMI != MI && TRI.regsOverlap(OtherReg, MO.getReg()))
-              return false;
-          }
+  return {true, Dep};
+}
 
-          return true;
-        };
+bool ImplicitNullChecks::canReorder(const MachineInstr *A,
+                                    const MachineInstr *B) {
+  assert(canHandle(A) && canHandle(B) && "Precondition!");
+
+  // canHandle makes sure that we _can_ correctly analyze the dependencies
+  // between A and B here -- for instance, we should not be dealing with heap
+  // load-store dependencies here.
 
-        if (!all_of(MI->operands(), IsMIOperandSafe))
-          return false;
+  for (auto MOA : A->operands()) {
+    if (!(MOA.isReg() && MOA.getReg()))
+      continue;
 
-        // Now check for speculation safety:
-        bool SawStore = true;
-        if (!MI->isSafeToMove(&AA, SawStore) || MI->mayLoad())
-          return false;
+    unsigned RegA = MOA.getReg();
+    for (auto MOB : B->operands()) {
+      if (!(MOB.isReg() && MOB.getReg()))
+        continue;
 
-        Dependency = MI;
-      }
+      unsigned RegB = MOB.getReg();
 
-      if (MO.isDef())
-        for (unsigned Reg : RegUses)
-          if (TRI.regsOverlap(Reg, MO.getReg()))
-            return false;  // We found a write-after-read
+      if (TRI->regsOverlap(RegA, RegB))
+        return false;
     }
   }
 
@@ -432,63 +369,117 @@ bool ImplicitNullChecks::analyzeBlockFor
   // ptr could be some non-null invalid reference that never gets loaded from
   // because some_cond is always true.
 
-  unsigned PointerReg = MBP.LHS.getReg();
+  const unsigned PointerReg = MBP.LHS.getReg();
 
-  HazardDetector HD(*TRI, *AA);
+  SmallVector<MachineInstr *, 8> InstsSeenSoFar;
 
-  for (auto MII = NotNullSucc->begin(), MIE = NotNullSucc->end(); MII != MIE;
-       ++MII) {
-    MachineInstr &MI = *MII;
-    unsigned BaseReg;
+  // Is \p MI a memory operation that can be used to null check the value in \p
+  // PointerReg?
+  auto IsSuitableMemoryOp = [&](MachineInstr &MI,
+                                ArrayRef<MachineInstr *> PrevInsts) {
     int64_t Offset;
-    MachineInstr *Dependency = nullptr;
-    if (TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI))
-      if (MI.mayLoad() && !MI.isPredicable() && BaseReg == PointerReg &&
-          Offset < PageSize && MI.getDesc().getNumDefs() <= 1 &&
-          HD.isSafeToHoist(&MI, Dependency)) {
-
-        auto DependencyOperandIsOk = [&](MachineOperand &MO) {
-          assert(!(MO.isReg() && MO.isUse()) &&
-                 "No transitive dependendencies please!");
-          if (!MO.isReg() || !MO.getReg() || !MO.isDef())
-            return true;
-
-          // Make sure that we won't clobber any live ins to the sibling block
-          // by hoisting Dependency.  For instance, we can't hoist INST to
-          // before the null check (even if it safe, and does not violate any
-          // dependencies in the non_null_block) if %rdx is live in to
-          // _null_block.
-          //
-          //    test %rcx, %rcx
-          //    je _null_block
-          //  _non_null_block:
-          //    %rdx<def> = INST
-          //    ...
-          if (AnyAliasLiveIn(TRI, NullSucc, MO.getReg()))
-            return false;
-
-          // Make sure Dependency isn't re-defining the base register.  Then we
-          // won't get the memory operation on the address we want.
-          if (TRI->regsOverlap(MO.getReg(), BaseReg))
-            return false;
-
-          return true;
-        };
-
-        bool DependencyOperandsAreOk =
-            !Dependency ||
-            all_of(Dependency->operands(), DependencyOperandIsOk);
-
-        if (DependencyOperandsAreOk) {
-          NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc,
-                                     NullSucc, Dependency);
-          return true;
-        }
-      }
+    unsigned BaseReg;
+
+    if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI) ||
+        BaseReg != PointerReg)
+      return false;
+
+    // We want the load to be issued at a sane offset from PointerReg, so that
+    // if PointerReg is null then the load reliably page faults.
+    if (!(MI.mayLoad() && !MI.isPredicable() && Offset < PageSize))
+      return false;
+
+    // Finally, we need to make sure that the load instruction actually is
+    // loading from PointerReg, and there isn't some re-definition of PointerReg
+    // between the compare and the load.
+    for (auto *PrevMI : PrevInsts)
+      for (auto &PrevMO : PrevMI->operands())
+        if (PrevMO.isReg() && PrevMO.getReg() &&
+            TRI->regsOverlap(PrevMO.getReg(), PointerReg))
+          return false;
+
+    return true;
+  };
+
+  // Return true if \p FaultingMI can be hoisted from after the the instructions
+  // in \p InstsSeenSoFar to before them.  Set \p Dependence to a non-null value
+  // if we also need to (and legally can) hoist a depedency.
+  auto CanHoistLoadInst = [&](MachineInstr *FaultingMI,
+                              ArrayRef<MachineInstr *> InstsSeenSoFar,
+                              MachineInstr *&Dependence) {
+    auto DepResult = computeDependence(FaultingMI, InstsSeenSoFar);
+    if (!DepResult.CanReorder)
+      return false;
+
+    if (!DepResult.PotentialDependence) {
+      Dependence = nullptr;
+      return true;
+    }
+
+    auto DependenceItr = *DepResult.PotentialDependence;
+    auto *DependenceMI = *DependenceItr;
+
+    // We don't want to reason about speculating loads.  Note -- at this point
+    // we should have already filtered out all of the other non-speculatable
+    // things, like calls and stores.
+    assert(canHandle(DependenceMI) && "Should never have reached here!");
+    if (DependenceMI->mayLoad())
+      return false;
+
+    for (auto &DependenceMO : DependenceMI->operands()) {
+      if (!(DependenceMO.isReg() && DependenceMO.getReg()))
+        continue;
+
+      // Make sure that we won't clobber any live ins to the sibling block by
+      // hoisting Dependency.  For instance, we can't hoist INST to before the
+      // null check (even if it safe, and does not violate any dependencies in
+      // the non_null_block) if %rdx is live in to _null_block.
+      //
+      //    test %rcx, %rcx
+      //    je _null_block
+      //  _non_null_block:
+      //    %rdx<def> = INST
+      //    ...
+      //
+      // This restriction does not apply to the faulting load inst because in
+      // case the pointer loaded from is in the null page, the load will not
+      // semantically execute, and affect machine state.  That is, if the load
+      // was loading into %rax and it faults, the value of %rax should stay the
+      // same as it would have been had the load not have executed and we'd have
+      // branched to NullSucc directly.
+      if (AnyAliasLiveIn(TRI, NullSucc, DependenceMO.getReg()))
+        return false;
+
+      // The Dependency can't be re-defining the base register -- then we won't
+      // get the memory operation on the address we want.  This is already
+      // checked in \c IsSuitableMemoryOp.
+      assert(!TRI->regsOverlap(DependenceMO.getReg(), PointerReg) &&
+             "Should have been checked before!");
+    }
+
+    auto DepDepResult = computeDependence(
+        DependenceMI, {InstsSeenSoFar.begin(), DependenceItr});
 
-    HD.rememberInstruction(&MI);
-    if (HD.isClobbered())
+    if (!DepDepResult.CanReorder || DepDepResult.PotentialDependence)
       return false;
+
+    Dependence = DependenceMI;
+    return true;
+  };
+
+  for (auto &MI : *NotNullSucc) {
+    if (!canHandle(&MI) || InstsSeenSoFar.size() >= MaxInstsToConsider)
+      return false;
+
+    MachineInstr *Dependence;
+    if (IsSuitableMemoryOp(MI, InstsSeenSoFar) &&
+        CanHoistLoadInst(&MI, InstsSeenSoFar, Dependence)) {
+      NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc,
+                                 NullSucc, Dependence);
+      return true;
+    }
+
+    InstsSeenSoFar.push_back(&MI);
   }
 
   return false;
@@ -584,6 +575,7 @@ void ImplicitNullChecks::rewriteNullChec
   }
 }
 
+
 char ImplicitNullChecks::ID = 0;
 char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID;
 INITIALIZE_PASS_BEGIN(ImplicitNullChecks, "implicit-null-checks",

Modified: llvm/trunk/test/CodeGen/X86/implicit-null-checks.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/implicit-null-checks.mir?rev=290394&r1=290393&r2=290394&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/implicit-null-checks.mir (original)
+++ llvm/trunk/test/CodeGen/X86/implicit-null-checks.mir Thu Dec 22 18:41:21 2016
@@ -79,6 +79,24 @@
     ret i32 200
   }
 
+  ;; Positive test
+  define i32 @imp_null_check_with_bitwise_op_4(i32* %x, i32 %val) {
+  entry:
+    br i1 undef, label %is_null, label %not_null, !make.implicit !0
+
+  is_null:
+    ret i32 42
+
+  not_null:
+    br i1 undef, label %ret_100, label %ret_200
+
+  ret_100:
+    ret i32 100
+
+  ret_200:
+    ret i32 200
+  }
+
   declare void @f() readonly
 
   define i32 @no_hoist_across_call(i32* %ptr) {
@@ -278,6 +296,49 @@ body:             |
     CMP64rr killed %rdi, killed %rsi, implicit-def %eflags
     JE_1 %bb.4.ret_100, implicit %eflags
 
+  bb.2.ret_200:
+    %eax = MOV32ri 200
+    RET 0, %eax
+
+  bb.3.is_null:
+    %eax = MOV32ri 42
+    RET 0, %eax
+
+  bb.4.ret_100:
+    %eax = MOV32ri 100
+    RET 0, %eax
+
+...
+---
+name:            imp_null_check_with_bitwise_op_4
+# CHECK-LABEL: name:            imp_null_check_with_bitwise_op_4
+alignment:       4
+tracksRegLiveness: true
+liveins:
+  - { reg: '%rdi' }
+  - { reg: '%rsi' }
+# CHECK:  bb.0.entry:
+# CHECK:  %rbx = MOV64rr %rdx
+# CHECK-NEXT:  %rdi = FAULTING_LOAD_OP %bb.3.is_null, 260, killed %rbx, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x)
+
+body:             |
+  bb.0.entry:
+    successors: %bb.3.is_null, %bb.1.not_null
+    liveins: %rsi, %rdi, %rdx
+
+    TEST64rr %rdi, %rdi, implicit-def %eflags
+    JE_1 %bb.3.is_null, implicit %eflags
+
+  bb.1.not_null:
+    successors: %bb.4.ret_100, %bb.2.ret_200
+    liveins: %rsi, %rdi, %rdx
+
+    %rbx  = MOV64rr %rdx
+    %rdi = AND64rm killed %rbx, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x)
+    %rdx = MOV64ri 0
+    CMP64rr killed %rdi, killed %rsi, implicit-def %eflags
+    JE_1 %bb.4.ret_100, implicit %eflags
+
   bb.2.ret_200:
     %eax = MOV32ri 200
     RET 0, %eax




More information about the llvm-commits mailing list