[llvm] bf22593 - [InferAddressSpaces] Support assumed addrspaces from addrspace predicates.

Michael Liao via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 8 13:52:14 PST 2021


Author: Michael Liao
Date: 2021-11-08T16:51:57-05:00
New Revision: bf225939bc3acf936c962f24423d3bb5ddd4c93f

URL: https://github.com/llvm/llvm-project/commit/bf225939bc3acf936c962f24423d3bb5ddd4c93f
DIFF: https://github.com/llvm/llvm-project/commit/bf225939bc3acf936c962f24423d3bb5ddd4c93f.diff

LOG: [InferAddressSpaces] Support assumed addrspaces from addrspace predicates.

- CUDA cannot associate memory space with pointer types. Even though Clang could add extra attributes to specify the address space explicitly on a pointer type, it breaks the portability between Clang and NVCC.
- This change proposes to assume the address space from a pointer from the assumption built upon target-specific address space predicates, such as `__isGlobal` from CUDA. E.g.,

```
  foo(float *p) {
    __builtin_assume(__isGlobal(p));
    // From there, we could assume p is a global pointer instead of a
    // generic one.
  }
```

This makes the code portable without introducing the implementation-specific features.

Note that NVCC starts to support __builtin_assume from version 11.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D112041

Added: 
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll
    llvm/test/Transforms/InferAddressSpaces/NVPTX/builtin-assumed-addrspace.ll

Modified: 
    clang/test/CodeGen/thinlto-distributed-newpm.ll
    llvm/include/llvm/Analysis/AssumptionCache.h
    llvm/include/llvm/Analysis/TargetTransformInfo.h
    llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
    llvm/include/llvm/CodeGen/BasicTTIImpl.h
    llvm/include/llvm/Target/TargetMachine.h
    llvm/lib/Analysis/AssumptionCache.cpp
    llvm/lib/Analysis/TargetTransformInfo.cpp
    llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
    llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h
    llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
    llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
    llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
    llvm/test/Other/loop-pm-invalidation.ll
    llvm/test/Other/new-pass-manager.ll
    llvm/test/Other/new-pm-lto-defaults.ll
    llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
    llvm/test/Transforms/LoopRotate/pr35210.ll
    llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/thinlto-distributed-newpm.ll b/clang/test/CodeGen/thinlto-distributed-newpm.ll
index 8f7fc5e9b8411..87dc19f29e1ba 100644
--- a/clang/test/CodeGen/thinlto-distributed-newpm.ll
+++ b/clang/test/CodeGen/thinlto-distributed-newpm.ll
@@ -47,11 +47,11 @@
 ; CHECK-O: Running pass: PromotePass
 ; CHECK-O: Running analysis: DominatorTreeAnalysis on main
 ; CHECK-O: Running analysis: AssumptionAnalysis on main
+; CHECK-O: Running analysis: TargetIRAnalysis on main
 ; CHECK-O: Running pass: DeadArgumentEliminationPass
 ; CHECK-O: Running pass: InstCombinePass on main
 ; CHECK-O: Running analysis: TargetLibraryAnalysis on main
 ; CHECK-O: Running analysis: OptimizationRemarkEmitterAnalysis on main
-; CHECK-O: Running analysis: TargetIRAnalysis on main
 ; CHECK-O: Running analysis: AAManager on main
 ; CHECK-O: Running analysis: BasicAA on main
 ; CHECK-O: Running analysis: ScopedNoAliasAA on main

diff  --git a/llvm/include/llvm/Analysis/AssumptionCache.h b/llvm/include/llvm/Analysis/AssumptionCache.h
index 51d04bd8cf022..12dd9b04c9323 100644
--- a/llvm/include/llvm/Analysis/AssumptionCache.h
+++ b/llvm/include/llvm/Analysis/AssumptionCache.h
@@ -29,6 +29,7 @@ namespace llvm {
 class AssumeInst;
 class Function;
 class raw_ostream;
+class TargetTransformInfo;
 class Value;
 
 /// A cache of \@llvm.assume calls within a function.
@@ -59,6 +60,8 @@ class AssumptionCache {
   /// We track this to lazily populate our assumptions.
   Function &F;
 
+  TargetTransformInfo *TTI;
+
   /// Vector of weak value handles to calls of the \@llvm.assume
   /// intrinsic.
   SmallVector<ResultElem, 4> AssumeHandles;
@@ -103,7 +106,8 @@ class AssumptionCache {
 public:
   /// Construct an AssumptionCache from a function by scanning all of
   /// its instructions.
-  AssumptionCache(Function &F) : F(F) {}
+  AssumptionCache(Function &F, TargetTransformInfo *TTI = nullptr)
+      : F(F), TTI(TTI) {}
 
   /// This cache is designed to be self-updating and so it should never be
   /// invalidated.
@@ -174,9 +178,7 @@ class AssumptionAnalysis : public AnalysisInfoMixin<AssumptionAnalysis> {
 public:
   using Result = AssumptionCache;
 
-  AssumptionCache run(Function &F, FunctionAnalysisManager &) {
-    return AssumptionCache(F);
-  }
+  AssumptionCache run(Function &F, FunctionAnalysisManager &);
 };
 
 /// Printer pass for the \c AssumptionAnalysis results.

diff  --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index e3cf87612e9c3..4312c2ae0de63 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -30,6 +30,7 @@
 #include "llvm/Support/DataTypes.h"
 #include "llvm/Support/InstructionCost.h"
 #include <functional>
+#include <utility>
 
 namespace llvm {
 
@@ -389,6 +390,9 @@ class TargetTransformInfo {
 
   unsigned getAssumedAddrSpace(const Value *V) const;
 
+  std::pair<const Value *, unsigned>
+  getPredicatedAddrSpace(const Value *V) const;
+
   /// Rewrite intrinsic call \p II such that \p OldV will be replaced with \p
   /// NewV, which has a 
diff erent address space. This should happen for every
   /// operand index that collectFlatAddressOperands returned for the intrinsic.
@@ -1481,6 +1485,8 @@ class TargetTransformInfo::Concept {
   virtual bool
   canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const = 0;
   virtual unsigned getAssumedAddrSpace(const Value *V) const = 0;
+  virtual std::pair<const Value *, unsigned>
+  getPredicatedAddrSpace(const Value *V) const = 0;
   virtual Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
                                                   Value *OldV,
                                                   Value *NewV) const = 0;
@@ -1824,6 +1830,11 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
     return Impl.getAssumedAddrSpace(V);
   }
 
+  std::pair<const Value *, unsigned>
+  getPredicatedAddrSpace(const Value *V) const override {
+    return Impl.getPredicatedAddrSpace(V);
+  }
+
   Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
                                           Value *NewV) const override {
     return Impl.rewriteIntrinsicWithAddressSpace(II, OldV, NewV);

diff  --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 6f02b88e17db2..707912dd38730 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -24,6 +24,7 @@
 #include "llvm/IR/Operator.h"
 #include "llvm/IR/PatternMatch.h"
 #include "llvm/IR/Type.h"
+#include <utility>
 
 using namespace llvm::PatternMatch;
 
@@ -110,6 +111,11 @@ class TargetTransformInfoImplBase {
 
   unsigned getAssumedAddrSpace(const Value *V) const { return -1; }
 
+  std::pair<const Value *, unsigned>
+  getPredicatedAddrSpace(const Value *V) const {
+    return std::make_pair(nullptr, -1);
+  }
+
   Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
                                           Value *NewV) const {
     return nullptr;

diff  --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 8f43caf1eb282..aeefa01e2ff00 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -283,6 +283,11 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
     return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
   }
 
+  std::pair<const Value *, unsigned>
+  getPredicatedAddrSpace(const Value *V) const {
+    return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
+  }
+
   Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
                                           Value *NewV) const {
     return nullptr;

diff  --git a/llvm/include/llvm/Target/TargetMachine.h b/llvm/include/llvm/Target/TargetMachine.h
index 9e8853853aa26..cae30ab3a7f3a 100644
--- a/llvm/include/llvm/Target/TargetMachine.h
+++ b/llvm/include/llvm/Target/TargetMachine.h
@@ -25,6 +25,7 @@
 #include "llvm/Target/CGPassBuilderOption.h"
 #include "llvm/Target/TargetOptions.h"
 #include <string>
+#include <utility>
 
 namespace llvm {
 
@@ -319,6 +320,18 @@ class TargetMachine {
   /// properties.
   virtual unsigned getAssumedAddrSpace(const Value *V) const { return -1; }
 
+  /// If the specified predicate checks whether a generic pointer falls within
+  /// a specified address space, return that generic pointer and the address
+  /// space being queried.
+  ///
+  /// Such predicates could be specified in @llvm.assume intrinsics for the
+  /// optimizer to assume that the given generic pointer always falls within
+  /// the address space based on that predicate.
+  virtual std::pair<const Value *, unsigned>
+  getPredicatedAddrSpace(const Value *V) const {
+    return std::make_pair(nullptr, -1);
+  }
+
   /// Get a \c TargetIRAnalysis appropriate for the target.
   ///
   /// This is used to construct the new pass manager's target IR analysis pass,

diff  --git a/llvm/lib/Analysis/AssumptionCache.cpp b/llvm/lib/Analysis/AssumptionCache.cpp
index 0d8319ec37b81..3e0214e21ecdf 100644
--- a/llvm/lib/Analysis/AssumptionCache.cpp
+++ b/llvm/lib/Analysis/AssumptionCache.cpp
@@ -16,6 +16,7 @@
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
 #include "llvm/IR/BasicBlock.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/InstrTypes.h"
@@ -56,7 +57,7 @@ AssumptionCache::getOrInsertAffectedValues(Value *V) {
 }
 
 static void
-findAffectedValues(CallBase *CI,
+findAffectedValues(CallBase *CI, TargetTransformInfo *TTI,
                    SmallVectorImpl<AssumptionCache::ResultElem> &Affected) {
   // Note: This code must be kept in-sync with the code in
   // computeKnownBitsFromAssume in ValueTracking.
@@ -124,11 +125,19 @@ findAffectedValues(CallBase *CI,
         match(B, m_ConstantInt()))
       AddAffected(X);
   }
+
+  if (TTI) {
+    const Value *Ptr;
+    unsigned AS;
+    std::tie(Ptr, AS) = TTI->getPredicatedAddrSpace(Cond);
+    if (Ptr)
+      AddAffected(const_cast<Value *>(Ptr->stripInBoundsOffsets()));
+  }
 }
 
 void AssumptionCache::updateAffectedValues(AssumeInst *CI) {
   SmallVector<AssumptionCache::ResultElem, 16> Affected;
-  findAffectedValues(CI, Affected);
+  findAffectedValues(CI, TTI, Affected);
 
   for (auto &AV : Affected) {
     auto &AVV = getOrInsertAffectedValues(AV.Assume);
@@ -141,7 +150,7 @@ void AssumptionCache::updateAffectedValues(AssumeInst *CI) {
 
 void AssumptionCache::unregisterAssumption(AssumeInst *CI) {
   SmallVector<AssumptionCache::ResultElem, 16> Affected;
-  findAffectedValues(CI, Affected);
+  findAffectedValues(CI, TTI, Affected);
 
   for (auto &AV : Affected) {
     auto AVI = AffectedValues.find_as(AV.Assume);
@@ -248,6 +257,12 @@ void AssumptionCache::registerAssumption(AssumeInst *CI) {
   updateAffectedValues(CI);
 }
 
+AssumptionCache AssumptionAnalysis::run(Function &F,
+                                        FunctionAnalysisManager &FAM) {
+  auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
+  return AssumptionCache(F, &TTI);
+}
+
 AnalysisKey AssumptionAnalysis::Key;
 
 PreservedAnalyses AssumptionPrinterPass::run(Function &F,
@@ -278,10 +293,13 @@ AssumptionCache &AssumptionCacheTracker::getAssumptionCache(Function &F) {
   if (I != AssumptionCaches.end())
     return *I->second;
 
+  auto *TTIWP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>();
+  auto *TTI = TTIWP ? &TTIWP->getTTI(F) : nullptr;
+
   // Ok, build a new cache by scanning the function, insert it and the value
   // handle into our map, and return the newly populated cache.
   auto IP = AssumptionCaches.insert(std::make_pair(
-      FunctionCallbackVH(&F, this), std::make_unique<AssumptionCache>(F)));
+      FunctionCallbackVH(&F, this), std::make_unique<AssumptionCache>(F, TTI)));
   assert(IP.second && "Scanning function already in the map?");
   return *IP.first->second;
 }

diff  --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 3200c72546b75..8c5254dbf6156 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -268,6 +268,11 @@ unsigned TargetTransformInfo::getAssumedAddrSpace(const Value *V) const {
   return TTIImpl->getAssumedAddrSpace(V);
 }
 
+std::pair<const Value *, unsigned>
+TargetTransformInfo::getPredicatedAddrSpace(const Value *V) const {
+  return TTIImpl->getPredicatedAddrSpace(V);
+}
+
 Value *TargetTransformInfo::rewriteIntrinsicWithAddressSpace(
     IntrinsicInst *II, Value *OldV, Value *NewV) const {
   return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 3e41ae283df52..de11676279f24 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -36,8 +36,10 @@
 #include "llvm/CodeGen/Passes.h"
 #include "llvm/CodeGen/RegAllocRegistry.h"
 #include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/IntrinsicsAMDGPU.h"
 #include "llvm/IR/LegacyPassManager.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/IR/PatternMatch.h"
 #include "llvm/InitializePasses.h"
 #include "llvm/MC/TargetRegistry.h"
 #include "llvm/Passes/PassBuilder.h"
@@ -780,6 +782,33 @@ unsigned AMDGPUTargetMachine::getAssumedAddrSpace(const Value *V) const {
   return AMDGPUAS::GLOBAL_ADDRESS;
 }
 
+std::pair<const Value *, unsigned>
+AMDGPUTargetMachine::getPredicatedAddrSpace(const Value *V) const {
+  if (auto *II = dyn_cast<IntrinsicInst>(V)) {
+    switch (II->getIntrinsicID()) {
+    case Intrinsic::amdgcn_is_shared:
+      return std::make_pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
+    case Intrinsic::amdgcn_is_private:
+      return std::make_pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
+    default:
+      break;
+    }
+    return std::make_pair(nullptr, -1);
+  }
+  // Check the global pointer predication based on
+  // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
+  // the order of 'is_shared' and 'is_private' is not significant.
+  Value *Ptr;
+  if (match(
+          const_cast<Value *>(V),
+          m_c_And(m_Not(m_Intrinsic<Intrinsic::amdgcn_is_shared>(m_Value(Ptr))),
+                  m_Not(m_Intrinsic<Intrinsic::amdgcn_is_private>(
+                      m_Deferred(Ptr))))))
+    return std::make_pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
+
+  return std::make_pair(nullptr, -1);
+}
+
 //===----------------------------------------------------------------------===//
 // GCN Target Machine (SI+)
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h
index fb3ea10b1fd8d..0ff2db2a52d92 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.h
@@ -17,6 +17,7 @@
 #include "GCNSubtarget.h"
 #include "llvm/CodeGen/TargetPassConfig.h"
 #include "llvm/Target/TargetMachine.h"
+#include <utility>
 
 namespace llvm {
 
@@ -63,6 +64,9 @@ class AMDGPUTargetMachine : public LLVMTargetMachine {
   bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
 
   unsigned getAssumedAddrSpace(const Value *V) const override;
+
+  std::pair<const Value *, unsigned>
+  getPredicatedAddrSpace(const Value *V) const override;
 };
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index bca17f9693701..0a1c61a357958 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -23,6 +23,7 @@
 #include "llvm/Analysis/TargetTransformInfo.h"
 #include "llvm/CodeGen/Passes.h"
 #include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/IntrinsicsNVPTX.h"
 #include "llvm/IR/LegacyPassManager.h"
 #include "llvm/MC/TargetRegistry.h"
 #include "llvm/Pass.h"
@@ -240,6 +241,25 @@ NVPTXTargetMachine::getTargetTransformInfo(const Function &F) {
   return TargetTransformInfo(NVPTXTTIImpl(this, F));
 }
 
+std::pair<const Value *, unsigned>
+NVPTXTargetMachine::getPredicatedAddrSpace(const Value *V) const {
+  if (auto *II = dyn_cast<IntrinsicInst>(V)) {
+    switch (II->getIntrinsicID()) {
+    case Intrinsic::nvvm_isspacep_const:
+      return std::make_pair(II->getArgOperand(0), llvm::ADDRESS_SPACE_CONST);
+    case Intrinsic::nvvm_isspacep_global:
+      return std::make_pair(II->getArgOperand(0), llvm::ADDRESS_SPACE_GLOBAL);
+    case Intrinsic::nvvm_isspacep_local:
+      return std::make_pair(II->getArgOperand(0), llvm::ADDRESS_SPACE_LOCAL);
+    case Intrinsic::nvvm_isspacep_shared:
+      return std::make_pair(II->getArgOperand(0), llvm::ADDRESS_SPACE_SHARED);
+    default:
+      break;
+    }
+  }
+  return std::make_pair(nullptr, -1);
+}
+
 void NVPTXPassConfig::addEarlyCSEOrGVNPass() {
   if (getOptLevel() == CodeGenOpt::Aggressive)
     addPass(createGVNPass());

diff  --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
index 39647eb65c0c2..7a69197abcffb 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.h
@@ -16,6 +16,7 @@
 #include "ManagedStringPool.h"
 #include "NVPTXSubtarget.h"
 #include "llvm/Target/TargetMachine.h"
+#include <utility>
 
 namespace llvm {
 
@@ -69,6 +70,9 @@ class NVPTXTargetMachine : public LLVMTargetMachine {
   bool isMachineVerifierClean() const override {
     return false;
   }
+
+  std::pair<const Value *, unsigned>
+  getPredicatedAddrSpace(const Value *V) const override;
 }; // NVPTXTargetMachine.
 
 class NVPTXTargetMachine32 : public NVPTXTargetMachine {

diff  --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index 4bbc0da53f814..883d4afff3bd8 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -96,10 +96,13 @@
 #include "llvm/ADT/Optional.h"
 #include "llvm/ADT/SetVector.h"
 #include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AssumptionCache.h"
 #include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
 #include "llvm/IR/BasicBlock.h"
 #include "llvm/IR/Constant.h"
 #include "llvm/IR/Constants.h"
+#include "llvm/IR/Dominators.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/InstIterator.h"
@@ -115,6 +118,7 @@
 #include "llvm/IR/User.h"
 #include "llvm/IR/Value.h"
 #include "llvm/IR/ValueHandle.h"
+#include "llvm/InitializePasses.h"
 #include "llvm/Pass.h"
 #include "llvm/Support/Casting.h"
 #include "llvm/Support/CommandLine.h"
@@ -146,6 +150,14 @@ static const unsigned UninitializedAddressSpace =
 namespace {
 
 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
+// Different from ValueToAddrSpaceMapTy, where a new addrspace is inferred on
+// the *def* of a value, PredicatedAddrSpaceMapTy is map where a new
+// addrspace is inferred on the *use* of a pointer. This map is introduced to
+// infer addrspace from the addrspace predicate assumption built from assume
+// intrinsic. In that scenario, only specific uses (under valid assumption
+// context) could be inferred with a new addrspace.
+using PredicatedAddrSpaceMapTy =
+    DenseMap<std::pair<const Value *, const Value *>, unsigned>;
 using PostorderStackTy = llvm::SmallVector<PointerIntPair<Value *, 1, bool>, 4>;
 
 class InferAddressSpaces : public FunctionPass {
@@ -160,6 +172,8 @@ class InferAddressSpaces : public FunctionPass {
 
   void getAnalysisUsage(AnalysisUsage &AU) const override {
     AU.setPreservesCFG();
+    AU.addPreserved<DominatorTreeWrapperPass>();
+    AU.addRequired<AssumptionCacheTracker>();
     AU.addRequired<TargetTransformInfoWrapperPass>();
   }
 
@@ -167,6 +181,8 @@ class InferAddressSpaces : public FunctionPass {
 };
 
 class InferAddressSpacesImpl {
+  AssumptionCache &AC;
+  DominatorTree *DT = nullptr;
   const TargetTransformInfo *TTI = nullptr;
   const DataLayout *DL = nullptr;
 
@@ -174,21 +190,24 @@ class InferAddressSpacesImpl {
   /// possible.
   unsigned FlatAddrSpace = 0;
 
-  // Returns the new address space of V if updated; otherwise, returns None.
-  Optional<unsigned>
-  updateAddressSpace(const Value &V,
-                     const ValueToAddrSpaceMapTy &InferredAddrSpace) const;
+  // Try to update the address space of V. If V is updated, returns true and
+  // false otherwise.
+  bool updateAddressSpace(const Value &V,
+                          ValueToAddrSpaceMapTy &InferredAddrSpace,
+                          PredicatedAddrSpaceMapTy &PredicatedAS) const;
 
   // Tries to infer the specific address space of each address expression in
   // Postorder.
   void inferAddressSpaces(ArrayRef<WeakTrackingVH> Postorder,
-                          ValueToAddrSpaceMapTy *InferredAddrSpace) const;
+                          ValueToAddrSpaceMapTy &InferredAddrSpace,
+                          PredicatedAddrSpaceMapTy &PredicatedAS) const;
 
   bool isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const;
 
   Value *cloneInstructionWithNewAddressSpace(
       Instruction *I, unsigned NewAddrSpace,
       const ValueToValueMapTy &ValueWithNewAddrSpace,
+      const PredicatedAddrSpaceMapTy &PredicatedAS,
       SmallVectorImpl<const Use *> *UndefUsesToFix) const;
 
   // Changes the flat address expressions in function F to point to specific
@@ -196,7 +215,8 @@ class InferAddressSpacesImpl {
   // all flat expressions in the use-def graph of function F.
   bool rewriteWithNewAddressSpaces(
       const TargetTransformInfo &TTI, ArrayRef<WeakTrackingVH> Postorder,
-      const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const;
+      const ValueToAddrSpaceMapTy &InferredAddrSpace,
+      const PredicatedAddrSpaceMapTy &PredicatedAS, Function *F) const;
 
   void appendsFlatAddressExpressionToPostorderStack(
       Value *V, PostorderStackTy &PostorderStack,
@@ -211,14 +231,18 @@ class InferAddressSpacesImpl {
   std::vector<WeakTrackingVH> collectFlatAddressExpressions(Function &F) const;
 
   Value *cloneValueWithNewAddressSpace(
-    Value *V, unsigned NewAddrSpace,
-    const ValueToValueMapTy &ValueWithNewAddrSpace,
-    SmallVectorImpl<const Use *> *UndefUsesToFix) const;
+      Value *V, unsigned NewAddrSpace,
+      const ValueToValueMapTy &ValueWithNewAddrSpace,
+      const PredicatedAddrSpaceMapTy &PredicatedAS,
+      SmallVectorImpl<const Use *> *UndefUsesToFix) const;
   unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) const;
 
+  unsigned getPredicatedAddrSpace(const Value &V, Value *Opnd) const;
+
 public:
-  InferAddressSpacesImpl(const TargetTransformInfo *TTI, unsigned FlatAddrSpace)
-      : TTI(TTI), FlatAddrSpace(FlatAddrSpace) {}
+  InferAddressSpacesImpl(AssumptionCache &AC, DominatorTree *DT,
+                         const TargetTransformInfo *TTI, unsigned FlatAddrSpace)
+      : AC(AC), DT(DT), TTI(TTI), FlatAddrSpace(FlatAddrSpace) {}
   bool run(Function &F);
 };
 
@@ -232,8 +256,12 @@ void initializeInferAddressSpacesPass(PassRegistry &);
 
 } // end namespace llvm
 
-INITIALIZE_PASS(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
-                false, false)
+INITIALIZE_PASS_BEGIN(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
+                      false, false)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
+INITIALIZE_PASS_END(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
+                    false, false)
 
 // Check whether that's no-op pointer bicast using a pair of
 // `ptrtoint`/`inttoptr` due to the missing no-op pointer bitcast over
@@ -505,6 +533,7 @@ InferAddressSpacesImpl::collectFlatAddressExpressions(Function &F) const {
 static Value *operandWithNewAddressSpaceOrCreateUndef(
     const Use &OperandUse, unsigned NewAddrSpace,
     const ValueToValueMapTy &ValueWithNewAddrSpace,
+    const PredicatedAddrSpaceMapTy &PredicatedAS,
     SmallVectorImpl<const Use *> *UndefUsesToFix) {
   Value *Operand = OperandUse.get();
 
@@ -517,6 +546,18 @@ static Value *operandWithNewAddressSpaceOrCreateUndef(
   if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
     return NewOperand;
 
+  Instruction *Inst = cast<Instruction>(OperandUse.getUser());
+  auto I = PredicatedAS.find(std::make_pair(Inst, Operand));
+  if (I != PredicatedAS.end()) {
+    // Insert an addrspacecast on that operand before the user.
+    unsigned NewAS = I->second;
+    Type *NewPtrTy = PointerType::getWithSamePointeeType(
+        cast<PointerType>(Operand->getType()), NewAS);
+    auto *NewI = new AddrSpaceCastInst(Operand, NewPtrTy);
+    NewI->insertBefore(Inst);
+    return NewI;
+  }
+
   UndefUsesToFix->push_back(&OperandUse);
   return UndefValue::get(NewPtrTy);
 }
@@ -536,6 +577,7 @@ static Value *operandWithNewAddressSpaceOrCreateUndef(
 Value *InferAddressSpacesImpl::cloneInstructionWithNewAddressSpace(
     Instruction *I, unsigned NewAddrSpace,
     const ValueToValueMapTy &ValueWithNewAddrSpace,
+    const PredicatedAddrSpaceMapTy &PredicatedAS,
     SmallVectorImpl<const Use *> *UndefUsesToFix) const {
   Type *NewPtrType = PointerType::getWithSamePointeeType(
       cast<PointerType>(I->getType()), NewAddrSpace);
@@ -557,7 +599,7 @@ Value *InferAddressSpacesImpl::cloneInstructionWithNewAddressSpace(
     assert(II->getIntrinsicID() == Intrinsic::ptrmask);
     Value *NewPtr = operandWithNewAddressSpaceOrCreateUndef(
         II->getArgOperandUse(0), NewAddrSpace, ValueWithNewAddrSpace,
-        UndefUsesToFix);
+        PredicatedAS, UndefUsesToFix);
     Value *Rewrite =
         TTI->rewriteIntrinsicWithAddressSpace(II, II->getArgOperand(0), NewPtr);
     if (Rewrite) {
@@ -586,7 +628,8 @@ Value *InferAddressSpacesImpl::cloneInstructionWithNewAddressSpace(
       NewPointerOperands.push_back(nullptr);
     else
       NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
-                                     OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
+          OperandUse, NewAddrSpace, ValueWithNewAddrSpace, PredicatedAS,
+          UndefUsesToFix));
   }
 
   switch (I->getOpcode()) {
@@ -723,6 +766,7 @@ static Value *cloneConstantExprWithNewAddressSpace(
 Value *InferAddressSpacesImpl::cloneValueWithNewAddressSpace(
     Value *V, unsigned NewAddrSpace,
     const ValueToValueMapTy &ValueWithNewAddrSpace,
+    const PredicatedAddrSpaceMapTy &PredicatedAS,
     SmallVectorImpl<const Use *> *UndefUsesToFix) const {
   // All values in Postorder are flat address expressions.
   assert(V->getType()->getPointerAddressSpace() == FlatAddrSpace &&
@@ -730,7 +774,7 @@ Value *InferAddressSpacesImpl::cloneValueWithNewAddressSpace(
 
   if (Instruction *I = dyn_cast<Instruction>(V)) {
     Value *NewV = cloneInstructionWithNewAddressSpace(
-      I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix);
+        I, NewAddrSpace, ValueWithNewAddrSpace, PredicatedAS, UndefUsesToFix);
     if (Instruction *NewI = dyn_cast_or_null<Instruction>(NewV)) {
       if (NewI->getParent() == nullptr) {
         NewI->insertBefore(I);
@@ -778,46 +822,43 @@ bool InferAddressSpacesImpl::run(Function &F) {
   // Runs a data-flow analysis to refine the address spaces of every expression
   // in Postorder.
   ValueToAddrSpaceMapTy InferredAddrSpace;
-  inferAddressSpaces(Postorder, &InferredAddrSpace);
+  PredicatedAddrSpaceMapTy PredicatedAS;
+  inferAddressSpaces(Postorder, InferredAddrSpace, PredicatedAS);
 
   // Changes the address spaces of the flat address expressions who are inferred
   // to point to a specific address space.
-  return rewriteWithNewAddressSpaces(*TTI, Postorder, InferredAddrSpace, &F);
+  return rewriteWithNewAddressSpaces(*TTI, Postorder, InferredAddrSpace,
+                                     PredicatedAS, &F);
 }
 
 // Constants need to be tracked through RAUW to handle cases with nested
 // constant expressions, so wrap values in WeakTrackingVH.
 void InferAddressSpacesImpl::inferAddressSpaces(
     ArrayRef<WeakTrackingVH> Postorder,
-    ValueToAddrSpaceMapTy *InferredAddrSpace) const {
+    ValueToAddrSpaceMapTy &InferredAddrSpace,
+    PredicatedAddrSpaceMapTy &PredicatedAS) const {
   SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
   // Initially, all expressions are in the uninitialized address space.
   for (Value *V : Postorder)
-    (*InferredAddrSpace)[V] = UninitializedAddressSpace;
+    InferredAddrSpace[V] = UninitializedAddressSpace;
 
   while (!Worklist.empty()) {
     Value *V = Worklist.pop_back_val();
 
-    // Tries to update the address space of the stack top according to the
+    // Try to update the address space of the stack top according to the
     // address spaces of its operands.
-    LLVM_DEBUG(dbgs() << "Updating the address space of\n  " << *V << '\n');
-    Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace);
-    if (!NewAS.hasValue())
+    if (!updateAddressSpace(*V, InferredAddrSpace, PredicatedAS))
       continue;
-    // If any updates are made, grabs its users to the worklist because
-    // their address spaces can also be possibly updated.
-    LLVM_DEBUG(dbgs() << "  to " << NewAS.getValue() << '\n');
-    (*InferredAddrSpace)[V] = NewAS.getValue();
 
     for (Value *User : V->users()) {
       // Skip if User is already in the worklist.
       if (Worklist.count(User))
         continue;
 
-      auto Pos = InferredAddrSpace->find(User);
+      auto Pos = InferredAddrSpace.find(User);
       // Our algorithm only updates the address spaces of flat address
       // expressions, which are those in InferredAddrSpace.
-      if (Pos == InferredAddrSpace->end())
+      if (Pos == InferredAddrSpace.end())
         continue;
 
       // Function updateAddressSpace moves the address space down a lattice
@@ -831,10 +872,37 @@ void InferAddressSpacesImpl::inferAddressSpaces(
   }
 }
 
-Optional<unsigned> InferAddressSpacesImpl::updateAddressSpace(
-    const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) const {
+unsigned InferAddressSpacesImpl::getPredicatedAddrSpace(const Value &V,
+                                                        Value *Opnd) const {
+  const Instruction *I = dyn_cast<Instruction>(&V);
+  if (!I)
+    return UninitializedAddressSpace;
+
+  Opnd = Opnd->stripInBoundsOffsets();
+  for (auto &AssumeVH : AC.assumptionsFor(Opnd)) {
+    if (!AssumeVH)
+      continue;
+    CallInst *CI = cast<CallInst>(AssumeVH);
+    if (!isValidAssumeForContext(CI, I, DT))
+      continue;
+
+    const Value *Ptr;
+    unsigned AS;
+    std::tie(Ptr, AS) = TTI->getPredicatedAddrSpace(CI->getArgOperand(0));
+    if (Ptr)
+      return AS;
+  }
+
+  return UninitializedAddressSpace;
+}
+
+bool InferAddressSpacesImpl::updateAddressSpace(
+    const Value &V, ValueToAddrSpaceMapTy &InferredAddrSpace,
+    PredicatedAddrSpaceMapTy &PredicatedAS) const {
   assert(InferredAddrSpace.count(&V));
 
+  LLVM_DEBUG(dbgs() << "Updating the address space of\n  " << V << '\n');
+
   // The new inferred address space equals the join of the address spaces
   // of all its pointer operands.
   unsigned NewAS = UninitializedAddressSpace;
@@ -860,7 +928,7 @@ Optional<unsigned> InferAddressSpacesImpl::updateAddressSpace(
     // address space is known.
     if ((C1 && Src0AS == UninitializedAddressSpace) ||
         (C0 && Src1AS == UninitializedAddressSpace))
-      return None;
+      return false;
 
     if (C0 && isSafeToCastConstAddrSpace(C0, Src1AS))
       NewAS = Src1AS;
@@ -877,10 +945,23 @@ Optional<unsigned> InferAddressSpacesImpl::updateAddressSpace(
       // Otherwise, infer the address space from its pointer operands.
       for (Value *PtrOperand : getPointerOperands(V, *DL, TTI)) {
         auto I = InferredAddrSpace.find(PtrOperand);
-        unsigned OperandAS =
-            I != InferredAddrSpace.end()
-                ? I->second
-                : PtrOperand->getType()->getPointerAddressSpace();
+        unsigned OperandAS;
+        if (I == InferredAddrSpace.end()) {
+          OperandAS = PtrOperand->getType()->getPointerAddressSpace();
+          if (OperandAS == FlatAddrSpace) {
+            // Check AC for assumption dominating V.
+            unsigned AS = getPredicatedAddrSpace(V, PtrOperand);
+            if (AS != UninitializedAddressSpace) {
+              LLVM_DEBUG(dbgs()
+                         << "  deduce operand AS from the predicate addrspace "
+                         << AS << '\n');
+              OperandAS = AS;
+              // Record this use with the predicated AS.
+              PredicatedAS[std::make_pair(&V, PtrOperand)] = OperandAS;
+            }
+          }
+        } else
+          OperandAS = I->second;
 
         // join(flat, *) = flat. So we can break if NewAS is already flat.
         NewAS = joinAddressSpaces(NewAS, OperandAS);
@@ -893,8 +974,13 @@ Optional<unsigned> InferAddressSpacesImpl::updateAddressSpace(
   unsigned OldAS = InferredAddrSpace.lookup(&V);
   assert(OldAS != FlatAddrSpace);
   if (OldAS == NewAS)
-    return None;
-  return NewAS;
+    return false;
+
+  // If any updates are made, grabs its users to the worklist because
+  // their address spaces can also be possibly updated.
+  LLVM_DEBUG(dbgs() << "  to " << NewAS << '\n');
+  InferredAddrSpace[&V] = NewAS;
+  return true;
 }
 
 /// \p returns true if \p U is the pointer operand of a memory instruction with
@@ -1025,7 +1111,8 @@ static Value::use_iterator skipToNextUser(Value::use_iterator I,
 
 bool InferAddressSpacesImpl::rewriteWithNewAddressSpaces(
     const TargetTransformInfo &TTI, ArrayRef<WeakTrackingVH> Postorder,
-    const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const {
+    const ValueToAddrSpaceMapTy &InferredAddrSpace,
+    const PredicatedAddrSpaceMapTy &PredicatedAS, Function *F) const {
   // For each address expression to be modified, creates a clone of it with its
   // pointer operands converted to the new address space. Since the pointer
   // operands are converted, the clone is naturally in the new address space by
@@ -1041,8 +1128,9 @@ bool InferAddressSpacesImpl::rewriteWithNewAddressSpaces(
       continue;
 
     if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
-      Value *New = cloneValueWithNewAddressSpace(
-          V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix);
+      Value *New =
+          cloneValueWithNewAddressSpace(V, NewAddrSpace, ValueWithNewAddrSpace,
+                                        PredicatedAS, &UndefUsesToFix);
       if (New)
         ValueWithNewAddrSpace[V] = New;
     }
@@ -1199,7 +1287,10 @@ bool InferAddressSpaces::runOnFunction(Function &F) {
   if (skipFunction(F))
     return false;
 
+  auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+  DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
   return InferAddressSpacesImpl(
+             getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), DT,
              &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F),
              FlatAddrSpace)
       .run(F);
@@ -1217,11 +1308,14 @@ InferAddressSpacesPass::InferAddressSpacesPass(unsigned AddressSpace)
 PreservedAnalyses InferAddressSpacesPass::run(Function &F,
                                               FunctionAnalysisManager &AM) {
   bool Changed =
-      InferAddressSpacesImpl(&AM.getResult<TargetIRAnalysis>(F), FlatAddrSpace)
+      InferAddressSpacesImpl(AM.getResult<AssumptionAnalysis>(F),
+                             AM.getCachedResult<DominatorTreeAnalysis>(F),
+                             &AM.getResult<TargetIRAnalysis>(F), FlatAddrSpace)
           .run(F);
   if (Changed) {
     PreservedAnalyses PA;
     PA.preserveSet<CFGAnalyses>();
+    PA.preserve<DominatorTreeAnalysis>();
     return PA;
   }
   return PreservedAnalyses::all();

diff  --git a/llvm/test/Other/loop-pm-invalidation.ll b/llvm/test/Other/loop-pm-invalidation.ll
index 691abf7d73b34..ba01837af2223 100644
--- a/llvm/test/Other/loop-pm-invalidation.ll
+++ b/llvm/test/Other/loop-pm-invalidation.ll
@@ -22,6 +22,7 @@ define void @no_loops() {
 ; CHECK-LOOP-INV-NEXT: Running analysis: LoopAnalysis
 ; CHECK-LOOP-INV-NEXT: Running analysis: DominatorTreeAnalysis
 ; CHECK-LOOP-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-LOOP-INV-NEXT: Running pass: LCSSAPass
 ; CHECK-LOOP-INV-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}LoopAnalysis
 ; CHECK-LOOP-INV-NEXT: Invalidating analysis: LoopAnalysis
@@ -33,6 +34,7 @@ define void @no_loops() {
 ; CHECK-SCEV-INV-NEXT: Running analysis: LoopAnalysis
 ; CHECK-SCEV-INV-NEXT: Running analysis: DominatorTreeAnalysis
 ; CHECK-SCEV-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-SCEV-INV-NEXT: Running pass: LCSSAPass
 ; CHECK-SCEV-INV-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}ScalarEvolutionAnalysis
 ; CHECK-SCEV-INV-NEXT: Running pass: LoopSimplifyPass
@@ -47,11 +49,11 @@ define void @one_loop(i1* %ptr) {
 ; CHECK-LOOP-INV-NEXT: Running analysis: LoopAnalysis
 ; CHECK-LOOP-INV-NEXT: Running analysis: DominatorTreeAnalysis
 ; CHECK-LOOP-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-LOOP-INV-NEXT: Running pass: LCSSAPass
 ; CHECK-LOOP-INV-NEXT: Running analysis: AAManager
 ; CHECK-LOOP-INV-NEXT: Running analysis: TargetLibraryAnalysis
 ; CHECK-LOOP-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
-; CHECK-LOOP-INV-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-LOOP-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
 ; CHECK-LOOP-INV-NEXT: Running pass: NoOpLoopPass
 ; CHECK-LOOP-INV-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}LoopAnalysis
@@ -70,11 +72,11 @@ define void @one_loop(i1* %ptr) {
 ; CHECK-SCEV-INV-NEXT: Running analysis: LoopAnalysis
 ; CHECK-SCEV-INV-NEXT: Running analysis: DominatorTreeAnalysis
 ; CHECK-SCEV-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-SCEV-INV-NEXT: Running pass: LCSSAPass
 ; CHECK-SCEV-INV-NEXT: Running analysis: AAManager
 ; CHECK-SCEV-INV-NEXT: Running analysis: TargetLibraryAnalysis
 ; CHECK-SCEV-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
-; CHECK-SCEV-INV-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-SCEV-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
 ; CHECK-SCEV-INV-NEXT: Running pass: NoOpLoopPass
 ; CHECK-SCEV-INV-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}ScalarEvolutionAnalysis
@@ -103,11 +105,11 @@ define void @nested_loops(i1* %ptr) {
 ; CHECK-LOOP-INV-NEXT: Running analysis: LoopAnalysis
 ; CHECK-LOOP-INV-NEXT: Running analysis: DominatorTreeAnalysis
 ; CHECK-LOOP-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-LOOP-INV-NEXT: Running pass: LCSSAPass
 ; CHECK-LOOP-INV-NEXT: Running analysis: AAManager
 ; CHECK-LOOP-INV-NEXT: Running analysis: TargetLibraryAnalysis
 ; CHECK-LOOP-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
-; CHECK-LOOP-INV-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-LOOP-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
 ; CHECK-LOOP-INV-NEXT: Running pass: NoOpLoopPass
 ; CHECK-LOOP-INV-NEXT: Running pass: NoOpLoopPass
@@ -129,11 +131,11 @@ define void @nested_loops(i1* %ptr) {
 ; CHECK-SCEV-INV-NEXT: Running analysis: LoopAnalysis
 ; CHECK-SCEV-INV-NEXT: Running analysis: DominatorTreeAnalysis
 ; CHECK-SCEV-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-SCEV-INV-NEXT: Running pass: LCSSAPass
 ; CHECK-SCEV-INV-NEXT: Running analysis: AAManager
 ; CHECK-SCEV-INV-NEXT: Running analysis: TargetLibraryAnalysis
 ; CHECK-SCEV-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
-; CHECK-SCEV-INV-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-SCEV-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
 ; CHECK-SCEV-INV-NEXT: Running pass: NoOpLoopPass
 ; CHECK-SCEV-INV-NEXT: Running pass: NoOpLoopPass
@@ -172,11 +174,11 @@ define void @dead_loop() {
 ; CHECK-LOOP-INV-NEXT: Running analysis: LoopAnalysis
 ; CHECK-LOOP-INV-NEXT: Running analysis: DominatorTreeAnalysis
 ; CHECK-LOOP-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-LOOP-INV-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-LOOP-INV-NEXT: Running pass: LCSSAPass
 ; CHECK-LOOP-INV-NEXT: Running analysis: AAManager
 ; CHECK-LOOP-INV-NEXT: Running analysis: TargetLibraryAnalysis
 ; CHECK-LOOP-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
-; CHECK-LOOP-INV-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-LOOP-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
 ; CHECK-LOOP-INV-NEXT: Running pass: NoOpLoopPass
 ; CHECK-LOOP-INV-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}LoopAnalysis
@@ -195,11 +197,11 @@ define void @dead_loop() {
 ; CHECK-SCEV-INV-NEXT: Running analysis: LoopAnalysis
 ; CHECK-SCEV-INV-NEXT: Running analysis: DominatorTreeAnalysis
 ; CHECK-SCEV-INV-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-SCEV-INV-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-SCEV-INV-NEXT: Running pass: LCSSAPass
 ; CHECK-SCEV-INV-NEXT: Running analysis: AAManager
 ; CHECK-SCEV-INV-NEXT: Running analysis: TargetLibraryAnalysis
 ; CHECK-SCEV-INV-NEXT: Running analysis: ScalarEvolutionAnalysis
-; CHECK-SCEV-INV-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-SCEV-INV-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
 ; CHECK-SCEV-INV-NEXT: Running pass: NoOpLoopPass
 ; CHECK-SCEV-INV-NEXT: Running pass: InvalidateAnalysisPass<{{.*}}ScalarEvolutionAnalysis
@@ -216,11 +218,11 @@ define void @dead_loop() {
 ; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: LoopAnalysis
 ; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: DominatorTreeAnalysis
 ; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running pass: LCSSAPass
 ; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: AAManager
 ; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: TargetLibraryAnalysis
 ; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: ScalarEvolutionAnalysis
-; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Loop
 ; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running pass: NoOpLoopPass
 ; CHECK-SCEV-INV-AFTER-DELETE-NEXT: Running pass: LoopDeletionPass

diff  --git a/llvm/test/Other/new-pass-manager.ll b/llvm/test/Other/new-pass-manager.ll
index 8fa181ed704c0..f9cea34e22226 100644
--- a/llvm/test/Other/new-pass-manager.ll
+++ b/llvm/test/Other/new-pass-manager.ll
@@ -340,6 +340,7 @@
 ; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: LoopAnalysis
 ; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: DominatorTreeAnalysis
 ; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-REPEAT-LOOP-PASS-NEXT: Invalidating analysis: PreservedCFGCheckerAnalysis on foo
 ; CHECK-REPEAT-LOOP-PASS-NEXT: Running pass: LCSSAPass
 ; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: PreservedCFGCheckerAnalysis on foo
@@ -350,7 +351,6 @@
 ; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: TypeBasedAA
 ; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: OuterAnalysisManagerProxy
 ; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: ScalarEvolutionAnalysis
-; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}>
 ; CHECK-REPEAT-LOOP-PASS-NEXT: Running pass: RepeatedPass
 ; CHECK-REPEAT-LOOP-PASS-NEXT: Running pass: NoOpLoopPass

diff  --git a/llvm/test/Other/new-pm-lto-defaults.ll b/llvm/test/Other/new-pm-lto-defaults.ll
index a88525bf0762b..adb268cc7b1d9 100644
--- a/llvm/test/Other/new-pm-lto-defaults.ll
+++ b/llvm/test/Other/new-pm-lto-defaults.ll
@@ -51,6 +51,7 @@
 ; CHECK-O-NEXT: Running analysis: AAManager
 ; CHECK-O-NEXT: Running analysis: BasicAA
 ; CHECK-O1-NEXT: Running analysis: AssumptionAnalysis on foo
+; CHECK-O1-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-O1-NEXT: Running analysis: DominatorTreeAnalysis
 ; CHECK-O-NEXT: Running analysis: ScopedNoAliasAA
 ; CHECK-O-NEXT: Running analysis: TypeBasedAA

diff  --git a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
index 74beb8c588a45..b880fb2c0ca86 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
@@ -104,20 +104,16 @@
 ; CHECK-O-NEXT: Running analysis: AAManager
 ; CHECK-O-NEXT: Running analysis: BasicAA
 ; CHECK-O-NEXT: Running analysis: AssumptionAnalysis
+; CHECK-O-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-O-NEXT: Running analysis: DominatorTreeAnalysis
 ; CHECK-O-NEXT: Running analysis: ScopedNoAliasAA
 ; CHECK-O-NEXT: Running analysis: TypeBasedAA
 ; CHECK-O-NEXT: Running analysis: OuterAnalysisManagerProxy
 ; CHECK-O3-NEXT: Running pass: ArgumentPromotionPass
-; CHECK-O3-NEXT: Running analysis: TargetIRAnalysis
 ; CHECK-O2-NEXT: Running pass: OpenMPOptCGSCCPass
 ; CHECK-O3-NEXT: Running pass: OpenMPOptCGSCCPass
 ; CHECK-O-NEXT: Running pass: SROA
 ; CHECK-O-NEXT: Running pass: EarlyCSEPass
-; CHECK-O1-NEXT: Running analysis: TargetIRAnalysis on foo
-; CHECK-O2-NEXT: Running analysis: TargetIRAnalysis on foo
-; CHECK-Os-NEXT: Running analysis: TargetIRAnalysis on foo
-; CHECK-Oz-NEXT: Running analysis: TargetIRAnalysis on foo
 ; CHECK-O-NEXT: Running analysis: MemorySSAAnalysis
 ; CHECK-O23SZ-NEXT: Running pass: SpeculativeExecutionPass
 ; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll
new file mode 100644
index 0000000000000..33aa92e6b305e
--- /dev/null
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll
@@ -0,0 +1,93 @@
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces -o - %s | FileCheck %s
+
+; CHECK-LABEL: @f0
+; CHECK: addrspacecast float* {{%.*}} to float addrspace(3)*
+; CHECK: getelementptr inbounds float, float addrspace(3)*
+; CHECK: load float, float addrspace(3)*
+define float @f0(float* %p) {
+entry:
+  %0 = bitcast float* %p to i8*
+  %1 = call i1 @llvm.amdgcn.is.shared(i8* %0)
+  tail call void @llvm.assume(i1 %1)
+  %2 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom = zext i32 %2 to i64
+  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
+  %3 = load float, float* %arrayidx, align 4
+  ret float %3
+}
+
+; CHECK-LABEL: @f1
+; CHECK: addrspacecast float* {{%.*}} to float addrspace(5)*
+; CHECK: getelementptr inbounds float, float addrspace(5)*
+; CHECK: load float, float addrspace(5)*
+define float @f1(float* %p) {
+entry:
+  %0 = bitcast float* %p to i8*
+  %1 = call i1 @llvm.amdgcn.is.private(i8* %0)
+  tail call void @llvm.assume(i1 %1)
+  %2 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom = zext i32 %2 to i64
+  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
+  %3 = load float, float* %arrayidx, align 4
+  ret float %3
+}
+
+; CHECK-LABEL: @f2
+; CHECK: addrspacecast float* {{%.*}} to float addrspace(1)*
+; CHECK: getelementptr inbounds float, float addrspace(1)*
+; CHECK: load float, float addrspace(1)*
+define float @f2(float* %p) {
+entry:
+  %0 = bitcast float* %p to i8*
+  %1 = call i1 @llvm.amdgcn.is.private(i8* %0)
+  %2 = xor i1 %1, -1
+  %3 = call i1 @llvm.amdgcn.is.shared(i8* %0)
+  %4 = xor i1 %3, -1
+  %5 = and i1 %2, %4
+  tail call void @llvm.assume(i1 %5)
+  %6 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom = zext i32 %6 to i64
+  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
+  %7 = load float, float* %arrayidx, align 4
+  ret float %7
+}
+
+; CHECK-LABEL: @g0
+; CHECK: if.then:
+; CHECK: addrspacecast float* {{%.*}} to float addrspace(3)*
+; CHECK: getelementptr inbounds float, float addrspace(3)*
+; CHECK: load float, float addrspace(3)*
+; CHECK: if.end:
+; CHECK: getelementptr inbounds float, float*
+; CHECK: load float, float*
+define float @g0(i32 %c, float* %p) {
+entry:
+  %tobool.not = icmp eq i32 %c, 0
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:
+  %0 = bitcast float* %p to i8*
+  %1 = call i1 @llvm.amdgcn.is.shared(i8* %0)
+  tail call void @llvm.assume(i1 %1)
+  %2 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom = zext i32 %2 to i64
+  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
+  %3 = load float, float* %arrayidx, align 4
+  %add = fadd float %3, 0.
+  br label %if.end
+
+if.end:
+  %s = phi float [ %add, %if.then ], [ 0., %entry ]
+  %4 = tail call i32 @llvm.amdgcn.workitem.id.y()
+  %idxprom2 = zext i32 %4 to i64
+  %arrayidx2 = getelementptr inbounds float, float* %p, i64 %idxprom2
+  %5 = load float, float* %arrayidx2, align 4
+  %add2 = fadd float %s, %5
+  ret float %add2
+}
+
+declare void @llvm.assume(i1)
+declare i1 @llvm.amdgcn.is.shared(i8* nocapture)
+declare i1 @llvm.amdgcn.is.private(i8* nocapture)
+declare i32 @llvm.amdgcn.workitem.id.x()
+declare i32 @llvm.amdgcn.workitem.id.y()

diff  --git a/llvm/test/Transforms/InferAddressSpaces/NVPTX/builtin-assumed-addrspace.ll b/llvm/test/Transforms/InferAddressSpaces/NVPTX/builtin-assumed-addrspace.ll
new file mode 100644
index 0000000000000..5985823ef4482
--- /dev/null
+++ b/llvm/test/Transforms/InferAddressSpaces/NVPTX/builtin-assumed-addrspace.ll
@@ -0,0 +1,107 @@
+; RUN: opt -S -mtriple=nvptx64-nvidia-cuda -infer-address-spaces -o - %s | FileCheck %s
+
+; CHECK-LABEL: @f0
+; CHECK: addrspacecast float* {{%.*}} to float addrspace(4)*
+; CHECK: getelementptr inbounds float, float addrspace(4)*
+; CHECK: load float, float addrspace(4)*
+define float @f0(float* %p) {
+entry:
+  %0 = bitcast float* %p to i8*
+  %1 = call i1 @llvm.nvvm.isspacep.const(i8* %0)
+  tail call void @llvm.assume(i1 %1)
+  %2 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  %idxprom = zext i32 %2 to i64
+  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
+  %3 = load float, float* %arrayidx, align 4
+  ret float %3
+}
+
+; CHECK-LABEL: @f1
+; CHECK: addrspacecast float* {{%.*}} to float addrspace(1)*
+; CHECK: getelementptr inbounds float, float addrspace(1)*
+; CHECK: load float, float addrspace(1)*
+define float @f1(float* %p) {
+entry:
+  %0 = bitcast float* %p to i8*
+  %1 = call i1 @llvm.nvvm.isspacep.global(i8* %0)
+  tail call void @llvm.assume(i1 %1)
+  %2 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  %idxprom = zext i32 %2 to i64
+  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
+  %3 = load float, float* %arrayidx, align 4
+  ret float %3
+}
+
+; CHECK-LABEL: @f2
+; CHECK: addrspacecast float* {{%.*}} to float addrspace(5)*
+; CHECK: getelementptr inbounds float, float addrspace(5)*
+; CHECK: load float, float addrspace(5)*
+define float @f2(float* %p) {
+entry:
+  %0 = bitcast float* %p to i8*
+  %1 = call i1 @llvm.nvvm.isspacep.local(i8* %0)
+  tail call void @llvm.assume(i1 %1)
+  %2 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  %idxprom = zext i32 %2 to i64
+  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
+  %3 = load float, float* %arrayidx, align 4
+  ret float %3
+}
+
+; CHECK-LABEL: @f3
+; CHECK: addrspacecast float* {{%.*}} to float addrspace(3)*
+; CHECK: getelementptr inbounds float, float addrspace(3)*
+; CHECK: load float, float addrspace(3)*
+define float @f3(float* %p) {
+entry:
+  %0 = bitcast float* %p to i8*
+  %1 = call i1 @llvm.nvvm.isspacep.shared(i8* %0)
+  tail call void @llvm.assume(i1 %1)
+  %2 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  %idxprom = zext i32 %2 to i64
+  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
+  %3 = load float, float* %arrayidx, align 4
+  ret float %3
+}
+
+; CHECK-LABEL: @g0
+; CHECK: if.then:
+; CHECK: addrspacecast float* {{%.*}} to float addrspace(3)*
+; CHECK: getelementptr inbounds float, float addrspace(3)*
+; CHECK: load float, float addrspace(3)*
+; CHECK: if.end:
+; CHECK: getelementptr inbounds float, float*
+; CHECK: load float, float*
+define float @g0(i32 %c, float* %p) {
+entry:
+  %tobool.not = icmp eq i32 %c, 0
+  br i1 %tobool.not, label %if.end, label %if.then
+
+if.then:
+  %0 = bitcast float* %p to i8*
+  %1 = call i1 @llvm.nvvm.isspacep.shared(i8* %0)
+  tail call void @llvm.assume(i1 %1)
+  %2 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  %idxprom = zext i32 %2 to i64
+  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
+  %3 = load float, float* %arrayidx, align 4
+  %add = fadd float %3, 0.
+  br label %if.end
+
+if.end:
+  %s = phi float [ %add, %if.then ], [ 0., %entry ]
+  %4 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.y()
+  %idxprom2 = zext i32 %4 to i64
+  %arrayidx2 = getelementptr inbounds float, float* %p, i64 %idxprom2
+  %5 = load float, float* %arrayidx2, align 4
+  %add2 = fadd float %s, %5
+  ret float %add2
+}
+
+declare void @llvm.assume(i1)
+declare i1 @llvm.nvvm.isspacep.const(i8*)
+declare i1 @llvm.nvvm.isspacep.global(i8*)
+declare i1 @llvm.nvvm.isspacep.local(i8*)
+declare i1 @llvm.nvvm.isspacep.shared(i8*)
+declare i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+declare i32 @llvm.nvvm.read.ptx.sreg.tid.y()

diff  --git a/llvm/test/Transforms/LoopRotate/pr35210.ll b/llvm/test/Transforms/LoopRotate/pr35210.ll
index 8fbefdc722fa4..739ae73ded61a 100644
--- a/llvm/test/Transforms/LoopRotate/pr35210.ll
+++ b/llvm/test/Transforms/LoopRotate/pr35210.ll
@@ -11,11 +11,11 @@
 ; CHECK-NEXT: Running analysis: LoopAnalysis on f
 ; CHECK-NEXT: Running analysis: DominatorTreeAnalysis on f
 ; CHECK-NEXT: Running analysis: AssumptionAnalysis on f
+; CHECK-NEXT: Running analysis: TargetIRAnalysis on f
 ; CHECK-NEXT: Running pass: LCSSAPass on f
 ; CHECK-NEXT: Running analysis: AAManager on f
 ; CHECK-NEXT: Running analysis: TargetLibraryAnalysis on f
 ; CHECK-NEXT: Running analysis: ScalarEvolutionAnalysis on f
-; CHECK-NEXT: Running analysis: TargetIRAnalysis on f
 ; CHECK-NEXT: Running analysis: InnerAnalysisManagerProxy{{.*}} on f
 ; CHECK-NEXT: Running pass: LoopRotatePass on Loop at depth 1 containing: %bb<header><exiting>,%bb4<latch>
 ; CHECK-NEXT: Folding loop latch bb4 into bb
@@ -29,12 +29,12 @@
 ; MSSA-NEXT: Running analysis: LoopAnalysis on f
 ; MSSA-NEXT: Running analysis: DominatorTreeAnalysis on f
 ; MSSA-NEXT: Running analysis: AssumptionAnalysis on f
+; MSSA-NEXT: Running analysis: TargetIRAnalysis on f
 ; MSSA-NEXT: Running pass: LCSSAPass on f
 ; MSSA-NEXT: Running analysis: MemorySSAAnalysis on f
 ; MSSA-NEXT: Running analysis: AAManager on f
 ; MSSA-NEXT: Running analysis: TargetLibraryAnalysis on f
 ; MSSA-NEXT: Running analysis: ScalarEvolutionAnalysis on f
-; MSSA-NEXT: Running analysis: TargetIRAnalysis on f
 ; MSSA-NEXT: Running analysis: InnerAnalysisManagerProxy{{.*}} on f
 ; MSSA-NEXT: Running pass: LoopRotatePass on Loop at depth 1 containing: %bb<header><exiting>,%bb4<latch>
 ; MSSA-NEXT: Folding loop latch bb4 into bb

diff  --git a/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp b/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp
index 28ebb8cda01bb..5cceca0940282 100644
--- a/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp
+++ b/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp
@@ -518,8 +518,7 @@ TEST(AssumeQueryAPI, AssumptionCache) {
   BasicBlock::iterator First = F->begin()->begin();
   BasicBlock::iterator Second = F->begin()->begin();
   Second++;
-  AssumptionCacheTracker ACT;
-  AssumptionCache &AC = ACT.getAssumptionCache(*F);
+  AssumptionCache AC(*F);
   auto AR = AC.assumptionsFor(F->getArg(3));
   ASSERT_EQ(AR.size(), 0u);
   AR = AC.assumptionsFor(F->getArg(1));


        


More information about the llvm-commits mailing list