[llvm] [llvm] annotate interfaces in llvm/Analysis for DLL export (PR #136623)

Andrew Rogers via llvm-commits llvm-commits at lists.llvm.org
Tue May 20 10:01:55 PDT 2025


https://github.com/andrurogerz updated https://github.com/llvm/llvm-project/pull/136623

>From 8c57f250a68e94c4991a90013ad8db93b4e25578 Mon Sep 17 00:00:00 2001
From: Andrew Rogers <andrurogerz at gmail.com>
Date: Wed, 14 May 2025 11:19:34 -0700
Subject: [PATCH 1/3] [llvm] IDS auto codemod for Analysis library

---
 llvm/include/llvm/Analysis/AliasAnalysis.h    |  97 ++--
 .../llvm/Analysis/AliasAnalysisEvaluator.h    |   5 +-
 llvm/include/llvm/Analysis/AliasSetTracker.h  |  49 +-
 .../llvm/Analysis/AssumeBundleQueries.h       |  17 +-
 llvm/include/llvm/Analysis/AssumptionCache.h  |  19 +-
 .../llvm/Analysis/BasicAliasAnalysis.h        |  23 +-
 .../llvm/Analysis/BlockFrequencyInfo.h        |  51 +-
 .../llvm/Analysis/BranchProbabilityInfo.h     |  45 +-
 llvm/include/llvm/Analysis/CFG.h              |  21 +-
 llvm/include/llvm/Analysis/CFGPrinter.h       |  21 +-
 llvm/include/llvm/Analysis/CGSCCPassManager.h |  23 +-
 llvm/include/llvm/Analysis/CallGraph.h        |  35 +-
 llvm/include/llvm/Analysis/CallGraphSCCPass.h |  11 +-
 llvm/include/llvm/Analysis/CallPrinter.h      |   9 +-
 llvm/include/llvm/Analysis/CaptureTracking.h  |  21 +-
 llvm/include/llvm/Analysis/CodeMetrics.h      |   7 +-
 llvm/include/llvm/Analysis/ConstantFolding.h  |  57 +--
 llvm/include/llvm/Analysis/ConstraintSystem.h |   7 +-
 llvm/include/llvm/Analysis/CtxProfAnalysis.h  |  47 +-
 llvm/include/llvm/Analysis/DDG.h              |  25 +-
 llvm/include/llvm/Analysis/DXILResource.h     |  75 +--
 llvm/include/llvm/Analysis/DemandedBits.h     |  19 +-
 .../llvm/Analysis/DependenceAnalysis.h        |  47 +-
 llvm/include/llvm/Analysis/DomPrinter.h       |  17 +-
 llvm/include/llvm/Analysis/DomTreeUpdater.h   |   6 +-
 .../llvm/Analysis/EphemeralValuesCache.h      |   5 +-
 .../Analysis/FunctionPropertiesAnalysis.h     |  19 +-
 llvm/include/llvm/Analysis/GlobalsModRef.h    |  25 +-
 llvm/include/llvm/Analysis/HeatUtils.h        |   9 +-
 .../llvm/Analysis/IRSimilarityIdentifier.h    |  71 +--
 llvm/include/llvm/Analysis/IVDescriptors.h    |  37 +-
 llvm/include/llvm/Analysis/InlineAdvisor.h    |  41 +-
 llvm/include/llvm/Analysis/InlineCost.h       |  27 +-
 .../llvm/Analysis/InlineModelFeatureMaps.h    |  13 +-
 llvm/include/llvm/Analysis/InlineOrder.h      |   7 +-
 .../llvm/Analysis/InstSimplifyFolder.h        |   3 +-
 .../Analysis/InstructionPrecedenceTracking.h  |  19 +-
 .../llvm/Analysis/InstructionSimplify.h       |  93 ++--
 .../llvm/Analysis/InteractiveModelRunner.h    |   3 +-
 .../llvm/Analysis/LastRunTrackingAnalysis.h   |   5 +-
 llvm/include/llvm/Analysis/LazyCallGraph.h    |  67 +--
 llvm/include/llvm/Analysis/Loads.h            |  29 +-
 .../llvm/Analysis/LoopAccessAnalysis.h        |  81 ++--
 .../llvm/Analysis/LoopAnalysisManager.h       |   7 +-
 llvm/include/llvm/Analysis/LoopInfo.h         |  53 +--
 llvm/include/llvm/Analysis/LoopNestAnalysis.h |   7 +-
 llvm/include/llvm/Analysis/LoopPass.h         |   9 +-
 llvm/include/llvm/Analysis/MemoryBuiltins.h   |  93 ++--
 llvm/include/llvm/Analysis/MemoryLocation.h   |  27 +-
 .../include/llvm/Analysis/MemoryProfileInfo.h |  29 +-
 llvm/include/llvm/Analysis/MemorySSA.h        |  73 +--
 llvm/include/llvm/Analysis/MemorySSAUpdater.h |  47 +-
 .../llvm/Analysis/ModuleSummaryAnalysis.h     |  15 +-
 llvm/include/llvm/Analysis/MustExecute.h      |  31 +-
 .../llvm/Analysis/NoInferenceModelRunner.h    |   3 +-
 .../llvm/Analysis/OptimizationRemarkEmitter.h |  11 +-
 llvm/include/llvm/Analysis/PHITransAddr.h     |  11 +-
 llvm/include/llvm/Analysis/Passes.h           |   6 +-
 llvm/include/llvm/Analysis/PhiValues.h        |  19 +-
 llvm/include/llvm/Analysis/PostDominators.h   |  15 +-
 .../llvm/Analysis/ProfileSummaryInfo.h        |  37 +-
 llvm/include/llvm/Analysis/RegionPass.h       |   5 +-
 llvm/include/llvm/Analysis/RegionPrinter.h    |  11 +-
 llvm/include/llvm/Analysis/ScalarEvolution.h  | 391 ++++++++--------
 .../Analysis/ScalarEvolutionAliasAnalysis.h   |  11 +-
 .../Analysis/ScalarEvolutionExpressions.h     |  15 +-
 .../Analysis/ScalarEvolutionNormalization.h   |   7 +-
 llvm/include/llvm/Analysis/ScopedNoAliasAA.h  |  15 +-
 llvm/include/llvm/Analysis/SimplifyQuery.h    |   3 +-
 .../llvm/Analysis/StaticDataProfileInfo.h     |   9 +-
 llvm/include/llvm/Analysis/TargetFolder.h     |   3 +-
 .../include/llvm/Analysis/TargetLibraryInfo.h |  51 +-
 .../llvm/Analysis/TargetTransformInfo.h       | 439 +++++++++---------
 llvm/include/llvm/Analysis/TensorSpec.h       |   9 +-
 .../llvm/Analysis/TypeBasedAliasAnalysis.h    |  19 +-
 .../ImportedFunctionsInliningStatistics.h     |   7 +-
 llvm/include/llvm/Analysis/Utils/Local.h      |   2 +-
 .../llvm/Analysis/Utils/TrainingLogger.h      |  11 +-
 llvm/include/llvm/Analysis/ValueLattice.h     |   7 +-
 llvm/include/llvm/Analysis/ValueTracking.h    | 215 ++++-----
 llvm/include/llvm/Analysis/VectorUtils.h      |  77 +--
 llvm/include/llvm/Analysis/WithCache.h        |   3 +-
 82 files changed, 1595 insertions(+), 1516 deletions(-)

diff --git a/llvm/include/llvm/Analysis/AliasAnalysis.h b/llvm/include/llvm/Analysis/AliasAnalysis.h
index 16f54c394788d..4f1fdd69af8c4 100644
--- a/llvm/include/llvm/Analysis/AliasAnalysis.h
+++ b/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -37,6 +37,7 @@
 #ifndef LLVM_ANALYSIS_ALIASANALYSIS_H
 #define LLVM_ANALYSIS_ALIASANALYSIS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Analysis/MemoryLocation.h"
@@ -142,10 +143,10 @@ static_assert(sizeof(AliasResult) == 4,
               "AliasResult size is intended to be 4 bytes!");
 
 /// << operator for AliasResult.
-raw_ostream &operator<<(raw_ostream &OS, AliasResult AR);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, AliasResult AR);
 
 /// Virtual base class for providers of capture analysis.
-struct CaptureAnalysis {
+struct LLVM_ABI CaptureAnalysis {
   virtual ~CaptureAnalysis() = 0;
 
   /// Check whether Object is not captured before instruction I. If OrAt is
@@ -159,7 +160,7 @@ struct CaptureAnalysis {
 /// Context-free CaptureAnalysis provider, which computes and caches whether an
 /// object is captured in the function at all, but does not distinguish whether
 /// it was captured before or after the context instruction.
-class SimpleCaptureAnalysis final : public CaptureAnalysis {
+class LLVM_ABI SimpleCaptureAnalysis final : public CaptureAnalysis {
   SmallDenseMap<const Value *, bool, 8> IsCapturedCache;
 
 public:
@@ -170,7 +171,7 @@ class SimpleCaptureAnalysis final : public CaptureAnalysis {
 /// Context-sensitive CaptureAnalysis provider, which computes and caches the
 /// earliest common dominator closure of all captures. It provides a good
 /// approximation to a precise "captures before" analysis.
-class EarliestEscapeAnalysis final : public CaptureAnalysis {
+class LLVM_ABI EarliestEscapeAnalysis final : public CaptureAnalysis {
   DominatorTree &DT;
   const LoopInfo *LI;
 
@@ -315,9 +316,9 @@ class AAResults {
 public:
   // Make these results default constructable and movable. We have to spell
   // these out because MSVC won't synthesize them.
-  AAResults(const TargetLibraryInfo &TLI);
-  AAResults(AAResults &&Arg);
-  ~AAResults();
+  LLVM_ABI AAResults(const TargetLibraryInfo &TLI);
+  LLVM_ABI AAResults(AAResults &&Arg);
+  LLVM_ABI ~AAResults();
 
   /// Register a specific AA result.
   template <typename AAResultT> void addAAResult(AAResultT &AAResult) {
@@ -338,7 +339,7 @@ class AAResults {
   ///
   /// The aggregation is invalidated if any of the underlying analyses is
   /// invalidated.
-  bool invalidate(Function &F, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
                   FunctionAnalysisManager::Invalidator &Inv);
 
   //===--------------------------------------------------------------------===//
@@ -349,7 +350,7 @@ class AAResults {
   /// Returns an AliasResult indicating whether the two pointers are aliased to
   /// each other. This is the interface that must be implemented by specific
   /// alias analysis implementations.
-  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+  LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
 
   /// A convenience wrapper around the primary \c alias interface.
   AliasResult alias(const Value *V1, LocationSize V1Size, const Value *V2,
@@ -417,7 +418,7 @@ class AAResults {
   ///
   /// If IgnoreLocals is true, then this method returns NoModRef for memory
   /// that points to a local alloca.
-  ModRefInfo getModRefInfoMask(const MemoryLocation &Loc,
+  LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc,
                                bool IgnoreLocals = false);
 
   /// A convenience wrapper around the primary \c getModRefInfoMask
@@ -431,13 +432,13 @@ class AAResults {
   /// that these bits do not necessarily account for the overall behavior of
   /// the function, but rather only provide additional per-argument
   /// information.
-  ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
+  LLVM_ABI ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
 
   /// Return the behavior of the given call site.
-  MemoryEffects getMemoryEffects(const CallBase *Call);
+  LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call);
 
   /// Return the behavior when calling the given function.
-  MemoryEffects getMemoryEffects(const Function *F);
+  LLVM_ABI MemoryEffects getMemoryEffects(const Function *F);
 
   /// Checks if the specified call is known to never read or write memory.
   ///
@@ -519,11 +520,11 @@ class AAResults {
 
   /// Return information about whether a call and an instruction may refer to
   /// the same memory locations.
-  ModRefInfo getModRefInfo(const Instruction *I, const CallBase *Call);
+  LLVM_ABI ModRefInfo getModRefInfo(const Instruction *I, const CallBase *Call);
 
   /// Return information about whether two instructions may refer to the same
   /// memory locations.
-  ModRefInfo getModRefInfo(const Instruction *I1, const Instruction *I2);
+  LLVM_ABI ModRefInfo getModRefInfo(const Instruction *I1, const Instruction *I2);
 
   /// Return information about whether a particular call site modifies
   /// or reads the specified memory location \p MemLoc before instruction \p I
@@ -548,7 +549,7 @@ class AAResults {
 
   /// Check if it is possible for execution of the specified basic block to
   /// modify the location Loc.
-  bool canBasicBlockModify(const BasicBlock &BB, const MemoryLocation &Loc);
+  LLVM_ABI bool canBasicBlockModify(const BasicBlock &BB, const MemoryLocation &Loc);
 
   /// A convenience wrapper synthesizing a memory location.
   bool canBasicBlockModify(const BasicBlock &BB, const Value *P,
@@ -561,7 +562,7 @@ class AAResults {
   ///
   /// The instructions to consider are all of the instructions in the range of
   /// [I1,I2] INCLUSIVE. I1 and I2 must be in the same basic block.
-  bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
+  LLVM_ABI bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
                                  const MemoryLocation &Loc,
                                  const ModRefInfo Mode);
 
@@ -574,42 +575,42 @@ class AAResults {
 
   // CtxI can be nullptr, in which case the query is whether or not the aliasing
   // relationship holds through the entire function.
-  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+  LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                     AAQueryInfo &AAQI, const Instruction *CtxI = nullptr);
 
-  ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
+  LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
                                bool IgnoreLocals = false);
-  ModRefInfo getModRefInfo(const Instruction *I, const CallBase *Call2,
+  LLVM_ABI ModRefInfo getModRefInfo(const Instruction *I, const CallBase *Call2,
                            AAQueryInfo &AAQIP);
-  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
                            AAQueryInfo &AAQI);
-  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
                            AAQueryInfo &AAQI);
-  ModRefInfo getModRefInfo(const VAArgInst *V, const MemoryLocation &Loc,
+  LLVM_ABI ModRefInfo getModRefInfo(const VAArgInst *V, const MemoryLocation &Loc,
                            AAQueryInfo &AAQI);
-  ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc,
+  LLVM_ABI ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc,
                            AAQueryInfo &AAQI);
-  ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc,
+  LLVM_ABI ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc,
                            AAQueryInfo &AAQI);
-  ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc,
+  LLVM_ABI ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc,
                            AAQueryInfo &AAQI);
-  ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX,
+  LLVM_ABI ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX,
                            const MemoryLocation &Loc, AAQueryInfo &AAQI);
-  ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc,
+  LLVM_ABI ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc,
                            AAQueryInfo &AAQI);
-  ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc,
+  LLVM_ABI ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc,
                            AAQueryInfo &AAQI);
-  ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc,
+  LLVM_ABI ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc,
                            AAQueryInfo &AAQI);
-  ModRefInfo getModRefInfo(const Instruction *I,
+  LLVM_ABI ModRefInfo getModRefInfo(const Instruction *I,
                            const std::optional<MemoryLocation> &OptLoc,
                            AAQueryInfo &AAQIP);
-  ModRefInfo getModRefInfo(const Instruction *I1, const Instruction *I2,
+  LLVM_ABI ModRefInfo getModRefInfo(const Instruction *I1, const Instruction *I2,
                            AAQueryInfo &AAQI);
-  ModRefInfo callCapturesBefore(const Instruction *I,
+  LLVM_ABI ModRefInfo callCapturesBefore(const Instruction *I,
                                 const MemoryLocation &MemLoc, DominatorTree *DT,
                                 AAQueryInfo &AAQIP);
-  MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI);
+  LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI);
 
 private:
   class Concept;
@@ -708,7 +709,7 @@ using AliasAnalysis = AAResults;
 /// All of these methods model methods by the same name in the \c
 /// AAResults class. Only differences and specifics to how the
 /// implementations are called are documented here.
-class AAResults::Concept {
+class LLVM_ABI AAResults::Concept {
 public:
   virtual ~Concept() = 0;
 
@@ -869,7 +870,7 @@ class AAResultBase {
 };
 
 /// Return true if this pointer is returned by a noalias function.
-bool isNoAliasCall(const Value *V);
+LLVM_ABI bool isNoAliasCall(const Value *V);
 
 /// Return true if this pointer refers to a distinct and identifiable object.
 /// This returns true for:
@@ -878,32 +879,32 @@ bool isNoAliasCall(const Value *V);
 ///    ByVal and NoAlias Arguments
 ///    NoAlias returns (e.g. calls to malloc)
 ///
-bool isIdentifiedObject(const Value *V);
+LLVM_ABI bool isIdentifiedObject(const Value *V);
 
 /// Return true if V is umabigously identified at the function-level.
 /// Different IdentifiedFunctionLocals can't alias.
 /// Further, an IdentifiedFunctionLocal can not alias with any function
 /// arguments other than itself, which is not necessarily true for
 /// IdentifiedObjects.
-bool isIdentifiedFunctionLocal(const Value *V);
+LLVM_ABI bool isIdentifiedFunctionLocal(const Value *V);
 
 /// Return true if we know V to the base address of the corresponding memory
 /// object.  This implies that any address less than V must be out of bounds
 /// for the underlying object.  Note that just being isIdentifiedObject() is
 /// not enough - For example, a negative offset from a noalias argument or call
 /// can be inbounds w.r.t the actual underlying object.
-bool isBaseOfObject(const Value *V);
+LLVM_ABI bool isBaseOfObject(const Value *V);
 
 /// Returns true if the pointer is one which would have been considered an
 /// escape by isNonEscapingLocalObject.
-bool isEscapeSource(const Value *V);
+LLVM_ABI bool isEscapeSource(const Value *V);
 
 /// Return true if Object memory is not visible after an unwind, in the sense
 /// that program semantics cannot depend on Object containing any particular
 /// value on unwind. If the RequiresNoCaptureBeforeUnwind out parameter is set
 /// to true, then the memory is only not visible if the object has not been
 /// captured prior to the unwind. Otherwise it is not visible even if captured.
-bool isNotVisibleOnUnwind(const Value *Object,
+LLVM_ABI bool isNotVisibleOnUnwind(const Value *Object,
                           bool &RequiresNoCaptureBeforeUnwind);
 
 /// Return true if the Object is writable, in the sense that any location based
@@ -917,7 +918,7 @@ bool isNotVisibleOnUnwind(const Value *Object,
 /// using the dereferenceable(N) attribute. It does not necessarily hold for
 /// parts that are only known to be dereferenceable due to the presence of
 /// loads.
-bool isWritableObject(const Value *Object, bool &ExplicitlyDereferenceableOnly);
+LLVM_ABI bool isWritableObject(const Value *Object, bool &ExplicitlyDereferenceableOnly);
 
 /// A manager for alias analyses.
 ///
@@ -950,7 +951,7 @@ class AAManager : public AnalysisInfoMixin<AAManager> {
     ResultGetters.push_back(&getModuleAAResultImpl<AnalysisT>);
   }
 
-  Result run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM);
 
 private:
   friend AnalysisInfoMixin<AAManager>;
@@ -984,7 +985,7 @@ class AAManager : public AnalysisInfoMixin<AAManager> {
 
 /// A wrapper pass to provide the legacy pass manager access to a suitably
 /// prepared AAResults object.
-class AAResultsWrapperPass : public FunctionPass {
+class LLVM_ABI AAResultsWrapperPass : public FunctionPass {
   std::unique_ptr<AAResults> AAR;
 
 public:
@@ -1007,11 +1008,11 @@ struct ExternalAAWrapperPass : ImmutablePass {
 
   CallbackT CB;
 
-  static char ID;
+  LLVM_ABI static char ID;
 
-  ExternalAAWrapperPass();
+  LLVM_ABI ExternalAAWrapperPass();
 
-  explicit ExternalAAWrapperPass(CallbackT CB, bool RunEarly = false);
+  LLVM_ABI explicit ExternalAAWrapperPass(CallbackT CB, bool RunEarly = false);
 
   /// Flag indicating whether this external AA should run before Basic AA.
   ///
@@ -1042,7 +1043,7 @@ struct ExternalAAWrapperPass : ImmutablePass {
 /// object, and will receive a reference to the function wrapper pass, the
 /// function, and the AAResults object to populate. This should be used when
 /// setting up a custom pass pipeline to inject a hook into the AA results.
-ImmutablePass *createExternalAAWrapperPass(
+LLVM_ABI ImmutablePass *createExternalAAWrapperPass(
     std::function<void(Pass &, Function &, AAResults &)> Callback);
 
 } // end namespace llvm
diff --git a/llvm/include/llvm/Analysis/AliasAnalysisEvaluator.h b/llvm/include/llvm/Analysis/AliasAnalysisEvaluator.h
index e4f152c232aa6..4303d663faff8 100644
--- a/llvm/include/llvm/Analysis/AliasAnalysisEvaluator.h
+++ b/llvm/include/llvm/Analysis/AliasAnalysisEvaluator.h
@@ -24,6 +24,7 @@
 #ifndef LLVM_ANALYSIS_ALIASANALYSISEVALUATOR_H
 #define LLVM_ANALYSIS_ALIASANALYSISEVALUATOR_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/IR/PassManager.h"
 
 namespace llvm {
@@ -47,10 +48,10 @@ class AAEvaluator : public PassInfoMixin<AAEvaluator> {
         ModRefCount(Arg.ModRefCount) {
     Arg.FunctionCount = 0;
   }
-  ~AAEvaluator();
+  LLVM_ABI ~AAEvaluator();
 
   /// Run the pass over the function.
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
 private:
   void runInternal(Function &F, AAResults &AA);
diff --git a/llvm/include/llvm/Analysis/AliasSetTracker.h b/llvm/include/llvm/Analysis/AliasSetTracker.h
index e5817d2409bc6..548c059ada887 100644
--- a/llvm/include/llvm/Analysis/AliasSetTracker.h
+++ b/llvm/include/llvm/Analysis/AliasSetTracker.h
@@ -18,6 +18,7 @@
 #ifndef LLVM_ANALYSIS_ALIASSETTRACKER_H
 #define LLVM_ANALYSIS_ALIASSETTRACKER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/ilist.h"
@@ -113,7 +114,7 @@ class AliasSet : public ilist_node<AliasSet> {
   bool isForwardingAliasSet() const { return Forward; }
 
   /// Merge the specified alias set into this alias set.
-  void mergeSetIn(AliasSet &AS, AliasSetTracker &AST, BatchAAResults &BatchAA);
+  LLVM_ABI void mergeSetIn(AliasSet &AS, AliasSetTracker &AST, BatchAAResults &BatchAA);
 
   // Alias Set iteration - Allow access to all of the memory locations which are
   // part of this alias set.
@@ -127,17 +128,17 @@ class AliasSet : public ilist_node<AliasSet> {
   /// The order matches that of the memory locations, but duplicate pointer
   /// values are omitted.
   using PointerVector = SmallVector<const Value *, 8>;
-  PointerVector getPointers() const;
+  LLVM_ABI PointerVector getPointers() const;
 
-  void print(raw_ostream &OS) const;
-  void dump() const;
+  LLVM_ABI void print(raw_ostream &OS) const;
+  LLVM_ABI void dump() const;
 
 private:
   // Can only be created by AliasSetTracker.
   AliasSet()
       : RefCount(0), AliasAny(false), Access(NoAccess), Alias(SetMustAlias) {}
 
-  void removeFromTracker(AliasSetTracker &AST);
+  LLVM_ABI void removeFromTracker(AliasSetTracker &AST);
 
   void addMemoryLocation(AliasSetTracker &AST, const MemoryLocation &MemLoc,
                          bool KnownMustAlias = false);
@@ -146,10 +147,10 @@ class AliasSet : public ilist_node<AliasSet> {
 public:
   /// If the specified memory location "may" (or must) alias one of the members
   /// in the set return the appropriate AliasResult. Otherwise return NoAlias.
-  AliasResult aliasesMemoryLocation(const MemoryLocation &MemLoc,
+  LLVM_ABI AliasResult aliasesMemoryLocation(const MemoryLocation &MemLoc,
                                     BatchAAResults &AA) const;
 
-  ModRefInfo aliasesUnknownInst(const Instruction *Inst,
+  LLVM_ABI ModRefInfo aliasesUnknownInst(const Instruction *Inst,
                                 BatchAAResults &AA) const;
 };
 
@@ -183,18 +184,18 @@ class AliasSetTracker {
   ///   3. If the instruction aliases multiple sets, merge the sets, and add
   ///      the instruction to the result.
   ///
-  void add(const MemoryLocation &Loc);
-  void add(LoadInst *LI);
-  void add(StoreInst *SI);
-  void add(VAArgInst *VAAI);
-  void add(AnyMemSetInst *MSI);
-  void add(AnyMemTransferInst *MTI);
-  void add(Instruction *I);       // Dispatch to one of the other add methods...
-  void add(BasicBlock &BB);       // Add all instructions in basic block
-  void add(const AliasSetTracker &AST); // Add alias relations from another AST
-  void addUnknown(Instruction *I);
-
-  void clear();
+  LLVM_ABI void add(const MemoryLocation &Loc);
+  LLVM_ABI void add(LoadInst *LI);
+  LLVM_ABI void add(StoreInst *SI);
+  LLVM_ABI void add(VAArgInst *VAAI);
+  LLVM_ABI void add(AnyMemSetInst *MSI);
+  LLVM_ABI void add(AnyMemTransferInst *MTI);
+  LLVM_ABI void add(Instruction *I);       // Dispatch to one of the other add methods...
+  LLVM_ABI void add(BasicBlock &BB);       // Add all instructions in basic block
+  LLVM_ABI void add(const AliasSetTracker &AST); // Add alias relations from another AST
+  LLVM_ABI void addUnknown(Instruction *I);
+
+  LLVM_ABI void clear();
 
   /// Return the alias sets that are active.
   const ilist<AliasSet> &getAliasSets() const { return AliasSets; }
@@ -203,7 +204,7 @@ class AliasSetTracker {
   /// the memory location aliases two or more existing alias sets, will have
   /// the effect of merging those alias sets before the single resulting alias
   /// set is returned.
-  AliasSet &getAliasSetFor(const MemoryLocation &MemLoc);
+  LLVM_ABI AliasSet &getAliasSetFor(const MemoryLocation &MemLoc);
 
   /// Return the underlying alias analysis object used by this tracker.
   BatchAAResults &getAliasAnalysis() const { return AA; }
@@ -217,8 +218,8 @@ class AliasSetTracker {
   iterator begin() { return AliasSets.begin(); }
   iterator end()   { return AliasSets.end(); }
 
-  void print(raw_ostream &OS) const;
-  void dump() const;
+  LLVM_ABI void print(raw_ostream &OS) const;
+  LLVM_ABI void dump() const;
 
 private:
   friend class AliasSet;
@@ -270,8 +271,8 @@ class AliasSetsPrinterPass : public PassInfoMixin<AliasSetsPrinterPass> {
   raw_ostream &OS;
 
 public:
-  explicit AliasSetsPrinterPass(raw_ostream &OS);
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI explicit AliasSetsPrinterPass(raw_ostream &OS);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
diff --git a/llvm/include/llvm/Analysis/AssumeBundleQueries.h b/llvm/include/llvm/Analysis/AssumeBundleQueries.h
index 98e9207e4435a..ddffa4ce8dcc7 100644
--- a/llvm/include/llvm/Analysis/AssumeBundleQueries.h
+++ b/llvm/include/llvm/Analysis/AssumeBundleQueries.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_ASSUMEBUNDLEQUERIES_H
 #define LLVM_ANALYSIS_ASSUMEBUNDLEQUERIES_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/IR/IntrinsicInst.h"
 
@@ -38,7 +39,7 @@ enum AssumeBundleArg {
 ///
 /// Return true iff the queried attribute was found.
 /// If ArgVal is set. the argument will be stored to ArgVal.
-bool hasAttributeInAssume(AssumeInst &Assume, Value *IsOn, StringRef AttrName,
+LLVM_ABI bool hasAttributeInAssume(AssumeInst &Assume, Value *IsOn, StringRef AttrName,
                           uint64_t *ArgVal = nullptr);
 inline bool hasAttributeInAssume(AssumeInst &Assume, Value *IsOn,
                                  Attribute::AttrKind Kind,
@@ -86,7 +87,7 @@ using RetainedKnowledgeMap =
 /// many queries are going to be made on the same llvm.assume.
 /// String attributes are not inserted in the map.
 /// If the IR changes the map will be outdated.
-void fillMapFromAssume(AssumeInst &Assume, RetainedKnowledgeMap &Result);
+LLVM_ABI void fillMapFromAssume(AssumeInst &Assume, RetainedKnowledgeMap &Result);
 
 /// Represent one information held inside an operand bundle of an llvm.assume.
 /// AttrKind is the property that holds.
@@ -120,7 +121,7 @@ struct RetainedKnowledge {
 
 /// Retreive the information help by Assume on the operand at index Idx.
 /// Assume should be an llvm.assume and Idx should be in the operand bundle.
-RetainedKnowledge getKnowledgeFromOperandInAssume(AssumeInst &Assume,
+LLVM_ABI RetainedKnowledge getKnowledgeFromOperandInAssume(AssumeInst &Assume,
                                                   unsigned Idx);
 
 /// Retreive the information help by the Use U of an llvm.assume. the use should
@@ -141,16 +142,16 @@ constexpr StringRef IgnoreBundleTag = "ignore";
 ///
 /// the argument to the call of llvm.assume may still be useful even if the
 /// function returned true.
-bool isAssumeWithEmptyBundle(const AssumeInst &Assume);
+LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume);
 
 /// Return a valid Knowledge associated to the Use U if its Attribute kind is
 /// in AttrKinds.
-RetainedKnowledge getKnowledgeFromUse(const Use *U,
+LLVM_ABI RetainedKnowledge getKnowledgeFromUse(const Use *U,
                                       ArrayRef<Attribute::AttrKind> AttrKinds);
 
 /// Return a valid Knowledge associated to the Value V if its Attribute kind is
 /// in AttrKinds and it matches the Filter.
-RetainedKnowledge getKnowledgeForValue(
+LLVM_ABI RetainedKnowledge getKnowledgeForValue(
     const Value *V, ArrayRef<Attribute::AttrKind> AttrKinds,
     AssumptionCache &AC,
     function_ref<bool(RetainedKnowledge, Instruction *,
@@ -160,7 +161,7 @@ RetainedKnowledge getKnowledgeForValue(
 /// Return a valid Knowledge associated to the Value V if its Attribute kind is
 /// in AttrKinds and the knowledge is suitable to be used in the context of
 /// CtxI.
-RetainedKnowledge
+LLVM_ABI RetainedKnowledge
 getKnowledgeValidInContext(const Value *V,
                            ArrayRef<Attribute::AttrKind> AttrKinds,
                            AssumptionCache &AC, const Instruction *CtxI,
@@ -168,7 +169,7 @@ getKnowledgeValidInContext(const Value *V,
 
 /// This extracts the Knowledge from an element of an operand bundle.
 /// This is mostly for use in the assume builder.
-RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume,
+LLVM_ABI RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume,
                                          const CallBase::BundleOpInfo &BOI);
 
 } // namespace llvm
diff --git a/llvm/include/llvm/Analysis/AssumptionCache.h b/llvm/include/llvm/Analysis/AssumptionCache.h
index 96ae32da6743a..e20711542647f 100644
--- a/llvm/include/llvm/Analysis/AssumptionCache.h
+++ b/llvm/include/llvm/Analysis/AssumptionCache.h
@@ -15,6 +15,7 @@
 #ifndef LLVM_ANALYSIS_ASSUMPTIONCACHE_H
 #define LLVM_ANALYSIS_ASSUMPTIONCACHE_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseMapInfo.h"
@@ -66,7 +67,7 @@ class AssumptionCache {
   /// intrinsic.
   SmallVector<ResultElem, 4> AssumeHandles;
 
-  class AffectedValueCallbackVH final : public CallbackVH {
+  class LLVM_ABI AffectedValueCallbackVH final : public CallbackVH {
     AssumptionCache *AC;
 
     void deleted() override;
@@ -101,7 +102,7 @@ class AssumptionCache {
   bool Scanned = false;
 
   /// Scan the function for assumptions and add them to the cache.
-  void scanFunction();
+  LLVM_ABI void scanFunction();
 
 public:
   /// Construct an AssumptionCache from a function by scanning all of
@@ -120,15 +121,15 @@ class AssumptionCache {
   ///
   /// The call passed in must be an instruction within this function and must
   /// not already be in the cache.
-  void registerAssumption(AssumeInst *CI);
+  LLVM_ABI void registerAssumption(AssumeInst *CI);
 
   /// Remove an \@llvm.assume intrinsic from this function's cache if it has
   /// been added to the cache earlier.
-  void unregisterAssumption(AssumeInst *CI);
+  LLVM_ABI void unregisterAssumption(AssumeInst *CI);
 
   /// Update the cache of values being affected by this assumption (i.e.
   /// the values about which this assumption provides information).
-  void updateAffectedValues(AssumeInst *CI);
+  LLVM_ABI void updateAffectedValues(AssumeInst *CI);
 
   /// Clear the cache of \@llvm.assume intrinsics for a function.
   ///
@@ -178,7 +179,7 @@ class AssumptionAnalysis : public AnalysisInfoMixin<AssumptionAnalysis> {
 public:
   using Result = AssumptionCache;
 
-  AssumptionCache run(Function &F, FunctionAnalysisManager &);
+  LLVM_ABI AssumptionCache run(Function &F, FunctionAnalysisManager &);
 };
 
 /// Printer pass for the \c AssumptionAnalysis results.
@@ -188,7 +189,7 @@ class AssumptionPrinterPass : public PassInfoMixin<AssumptionPrinterPass> {
 public:
   explicit AssumptionPrinterPass(raw_ostream &OS) : OS(OS) {}
 
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
   static bool isRequired() { return true; }
 };
@@ -201,10 +202,10 @@ class AssumptionPrinterPass : public PassInfoMixin<AssumptionPrinterPass> {
 /// function is deleted. The nature of the AssumptionCache is that it is not
 /// invalidated by any changes to the function body and so this is sufficient
 /// to be conservatively correct.
-class AssumptionCacheTracker : public ImmutablePass {
+class LLVM_ABI AssumptionCacheTracker : public ImmutablePass {
   /// A callback value handle applied to function objects, which we use to
   /// delete our cache of intrinsics for a function when it is deleted.
-  class FunctionCallbackVH final : public CallbackVH {
+  class LLVM_ABI FunctionCallbackVH final : public CallbackVH {
     AssumptionCacheTracker *ACT;
 
     void deleted() override;
diff --git a/llvm/include/llvm/Analysis/BasicAliasAnalysis.h b/llvm/include/llvm/Analysis/BasicAliasAnalysis.h
index 7eca82729430d..10ec9260a32b8 100644
--- a/llvm/include/llvm/Analysis/BasicAliasAnalysis.h
+++ b/llvm/include/llvm/Analysis/BasicAliasAnalysis.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_BASICALIASANALYSIS_H
 #define LLVM_ANALYSIS_BASICALIASANALYSIS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/IR/PassManager.h"
@@ -65,16 +66,16 @@ class BasicAAResult : public AAResultBase {
         AC(Arg.AC), DT_(Arg.DT_) {}
 
   /// Handle invalidation events in the new pass manager.
-  bool invalidate(Function &Fn, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Function &Fn, const PreservedAnalyses &PA,
                   FunctionAnalysisManager::Invalidator &Inv);
 
-  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+  LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                     AAQueryInfo &AAQI, const Instruction *CtxI);
 
-  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
                            AAQueryInfo &AAQI);
 
-  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
                            AAQueryInfo &AAQI);
 
   /// Returns a bitmask that should be unconditionally applied to the ModRef
@@ -84,18 +85,18 @@ class BasicAAResult : public AAResultBase {
   ///
   /// If IgnoreLocals is true, then this method returns NoModRef for memory
   /// that points to a local alloca.
-  ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
+  LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
                                bool IgnoreLocals = false);
 
   /// Get the location associated with a pointer argument of a callsite.
-  ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
+  LLVM_ABI ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
 
   /// Returns the behavior when calling the given call site.
-  MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI);
+  LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI);
 
   /// Returns the behavior when calling the given function. For use when the
   /// call site is not known.
-  MemoryEffects getMemoryEffects(const Function *Fn);
+  LLVM_ABI MemoryEffects getMemoryEffects(const Function *Fn);
 
 private:
   struct DecomposedGEP;
@@ -157,11 +158,11 @@ class BasicAA : public AnalysisInfoMixin<BasicAA> {
 public:
   using Result = BasicAAResult;
 
-  BasicAAResult run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI BasicAAResult run(Function &F, FunctionAnalysisManager &AM);
 };
 
 /// Legacy wrapper pass to provide the BasicAAResult object.
-class BasicAAWrapperPass : public FunctionPass {
+class LLVM_ABI BasicAAWrapperPass : public FunctionPass {
   std::unique_ptr<BasicAAResult> Result;
 
   virtual void anchor();
@@ -178,7 +179,7 @@ class BasicAAWrapperPass : public FunctionPass {
   void getAnalysisUsage(AnalysisUsage &AU) const override;
 };
 
-FunctionPass *createBasicAAWrapperPass();
+LLVM_ABI FunctionPass *createBasicAAWrapperPass();
 
 } // end namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/BlockFrequencyInfo.h b/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
index 5191a96440335..1efeb7aa69849 100644
--- a/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
+++ b/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
 #define LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
 #include "llvm/Support/BlockFrequency.h"
@@ -40,74 +41,74 @@ class BlockFrequencyInfo {
   std::unique_ptr<ImplType> BFI;
 
 public:
-  BlockFrequencyInfo();
-  BlockFrequencyInfo(const Function &F, const BranchProbabilityInfo &BPI,
+  LLVM_ABI BlockFrequencyInfo();
+  LLVM_ABI BlockFrequencyInfo(const Function &F, const BranchProbabilityInfo &BPI,
                      const LoopInfo &LI);
   BlockFrequencyInfo(const BlockFrequencyInfo &) = delete;
   BlockFrequencyInfo &operator=(const BlockFrequencyInfo &) = delete;
-  BlockFrequencyInfo(BlockFrequencyInfo &&Arg);
-  BlockFrequencyInfo &operator=(BlockFrequencyInfo &&RHS);
-  ~BlockFrequencyInfo();
+  LLVM_ABI BlockFrequencyInfo(BlockFrequencyInfo &&Arg);
+  LLVM_ABI BlockFrequencyInfo &operator=(BlockFrequencyInfo &&RHS);
+  LLVM_ABI ~BlockFrequencyInfo();
 
   /// Handle invalidation explicitly.
-  bool invalidate(Function &F, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
                   FunctionAnalysisManager::Invalidator &);
 
-  const Function *getFunction() const;
-  const BranchProbabilityInfo *getBPI() const;
-  void view(StringRef = "BlockFrequencyDAGs") const;
+  LLVM_ABI const Function *getFunction() const;
+  LLVM_ABI const BranchProbabilityInfo *getBPI() const;
+  LLVM_ABI void view(StringRef = "BlockFrequencyDAGs") const;
 
   /// getblockFreq - Return block frequency. Return 0 if we don't have the
   /// information. Please note that initial frequency is equal to ENTRY_FREQ. It
   /// means that we should not rely on the value itself, but only on the
   /// comparison to the other block frequencies. We do this to avoid using of
   /// floating points.
-  BlockFrequency getBlockFreq(const BasicBlock *BB) const;
+  LLVM_ABI BlockFrequency getBlockFreq(const BasicBlock *BB) const;
 
   /// Returns the estimated profile count of \p BB.
   /// This computes the relative block frequency of \p BB and multiplies it by
   /// the enclosing function's count (if available) and returns the value.
-  std::optional<uint64_t>
+  LLVM_ABI std::optional<uint64_t>
   getBlockProfileCount(const BasicBlock *BB, bool AllowSynthetic = false) const;
 
   /// Returns the estimated profile count of \p Freq.
   /// This uses the frequency \p Freq and multiplies it by
   /// the enclosing function's count (if available) and returns the value.
-  std::optional<uint64_t> getProfileCountFromFreq(BlockFrequency Freq) const;
+  LLVM_ABI std::optional<uint64_t> getProfileCountFromFreq(BlockFrequency Freq) const;
 
   /// Returns true if \p BB is an irreducible loop header
   /// block. Otherwise false.
-  bool isIrrLoopHeader(const BasicBlock *BB);
+  LLVM_ABI bool isIrrLoopHeader(const BasicBlock *BB);
 
   // Set the frequency of the given basic block.
-  void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq);
+  LLVM_ABI void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq);
 
   /// Set the frequency of \p ReferenceBB to \p Freq and scale the frequencies
   /// of the blocks in \p BlocksToScale such that their frequencies relative
   /// to \p ReferenceBB remain unchanged.
-  void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq,
+  LLVM_ABI void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq,
                             SmallPtrSetImpl<BasicBlock *> &BlocksToScale);
 
   /// calculate - compute block frequency info for the given function.
-  void calculate(const Function &F, const BranchProbabilityInfo &BPI,
+  LLVM_ABI void calculate(const Function &F, const BranchProbabilityInfo &BPI,
                  const LoopInfo &LI);
 
-  BlockFrequency getEntryFreq() const;
-  void releaseMemory();
-  void print(raw_ostream &OS) const;
+  LLVM_ABI BlockFrequency getEntryFreq() const;
+  LLVM_ABI void releaseMemory();
+  LLVM_ABI void print(raw_ostream &OS) const;
 
   // Compare to the other BFI and verify they match.
-  void verifyMatch(BlockFrequencyInfo &Other) const;
+  LLVM_ABI void verifyMatch(BlockFrequencyInfo &Other) const;
 };
 
 /// Print the block frequency @p Freq relative to the current functions entry
 /// frequency. Returns a Printable object that can be piped via `<<` to a
 /// `raw_ostream`.
-Printable printBlockFreq(const BlockFrequencyInfo &BFI, BlockFrequency Freq);
+LLVM_ABI Printable printBlockFreq(const BlockFrequencyInfo &BFI, BlockFrequency Freq);
 
 /// Convenience function equivalent to calling
 /// `printBlockFreq(BFI, BFI.getBlocakFreq(&BB))`.
-Printable printBlockFreq(const BlockFrequencyInfo &BFI, const BasicBlock &BB);
+LLVM_ABI Printable printBlockFreq(const BlockFrequencyInfo &BFI, const BasicBlock &BB);
 
 /// Analysis pass which computes \c BlockFrequencyInfo.
 class BlockFrequencyAnalysis
@@ -121,7 +122,7 @@ class BlockFrequencyAnalysis
   using Result = BlockFrequencyInfo;
 
   /// Run the analysis pass over a function and produce BFI.
-  Result run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM);
 };
 
 /// Printer pass for the \c BlockFrequencyInfo results.
@@ -132,13 +133,13 @@ class BlockFrequencyPrinterPass
 public:
   explicit BlockFrequencyPrinterPass(raw_ostream &OS) : OS(OS) {}
 
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
   static bool isRequired() { return true; }
 };
 
 /// Legacy analysis pass which computes \c BlockFrequencyInfo.
-class BlockFrequencyInfoWrapperPass : public FunctionPass {
+class LLVM_ABI BlockFrequencyInfoWrapperPass : public FunctionPass {
   BlockFrequencyInfo BFI;
 
 public:
diff --git a/llvm/include/llvm/Analysis/BranchProbabilityInfo.h b/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
index fbaeac251bc0e..d2a113ac184bd 100644
--- a/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
+++ b/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
 #define LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseMapInfo.h"
 #include "llvm/ADT/DenseSet.h"
@@ -141,12 +142,12 @@ class BranchProbabilityInfo {
     return *this;
   }
 
-  bool invalidate(Function &, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Function &, const PreservedAnalyses &PA,
                   FunctionAnalysisManager::Invalidator &);
 
-  void releaseMemory();
+  LLVM_ABI void releaseMemory();
 
-  void print(raw_ostream &OS) const;
+  LLVM_ABI void print(raw_ostream &OS) const;
 
   /// Get an edge's probability, relative to other out-edges of the Src.
   ///
@@ -154,30 +155,30 @@ class BranchProbabilityInfo {
   /// (0%) and one (100%) of this edge executing, relative to other edges
   /// leaving the 'Src' block. The returned probability is never zero, and can
   /// only be one if the source block has only one successor.
-  BranchProbability getEdgeProbability(const BasicBlock *Src,
+  LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src,
                                        unsigned IndexInSuccessors) const;
 
   /// Get the probability of going from Src to Dst.
   ///
   /// It returns the sum of all probabilities for edges from Src to Dst.
-  BranchProbability getEdgeProbability(const BasicBlock *Src,
+  LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src,
                                        const BasicBlock *Dst) const;
 
-  BranchProbability getEdgeProbability(const BasicBlock *Src,
+  LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src,
                                        const_succ_iterator Dst) const;
 
   /// Test if an edge is hot relative to other out-edges of the Src.
   ///
   /// Check whether this edge out of the source block is 'hot'. We define hot
   /// as having a relative probability > 80%.
-  bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const;
+  LLVM_ABI bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const;
 
   /// Print an edge's probability.
   ///
   /// Retrieves an edge's probability similarly to \see getEdgeProbability, but
   /// then prints that probability to the provided stream. That stream is then
   /// returned.
-  raw_ostream &printEdgeProbability(raw_ostream &OS, const BasicBlock *Src,
+  LLVM_ABI raw_ostream &printEdgeProbability(raw_ostream &OS, const BasicBlock *Src,
                                     const BasicBlock *Dst) const;
 
 public:
@@ -186,29 +187,29 @@ class BranchProbabilityInfo {
   /// This allows a pass to explicitly set edge probabilities for a block. It
   /// can be used when updating the CFG to update the branch probability
   /// information.
-  void setEdgeProbability(const BasicBlock *Src,
+  LLVM_ABI void setEdgeProbability(const BasicBlock *Src,
                           const SmallVectorImpl<BranchProbability> &Probs);
 
   /// Copy outgoing edge probabilities from \p Src to \p Dst.
   ///
   /// This allows to keep probabilities unset for the destination if they were
   /// unset for source.
-  void copyEdgeProbabilities(BasicBlock *Src, BasicBlock *Dst);
+  LLVM_ABI void copyEdgeProbabilities(BasicBlock *Src, BasicBlock *Dst);
 
   /// Swap outgoing edges probabilities for \p Src with branch terminator
-  void swapSuccEdgesProbabilities(const BasicBlock *Src);
+  LLVM_ABI void swapSuccEdgesProbabilities(const BasicBlock *Src);
 
   static BranchProbability getBranchProbStackProtector(bool IsLikely) {
     static const BranchProbability LikelyProb((1u << 20) - 1, 1u << 20);
     return IsLikely ? LikelyProb : LikelyProb.getCompl();
   }
 
-  void calculate(const Function &F, const LoopInfo &LI,
+  LLVM_ABI void calculate(const Function &F, const LoopInfo &LI,
                  const TargetLibraryInfo *TLI, DominatorTree *DT,
                  PostDominatorTree *PDT);
 
   /// Forget analysis results for the given basic block.
-  void eraseBlock(const BasicBlock *BB);
+  LLVM_ABI void eraseBlock(const BasicBlock *BB);
 
   // Data structure to track SCCs for handling irreducible loops.
   class SccInfo {
@@ -237,12 +238,12 @@ class BranchProbabilityInfo {
     SccBlockTypeMaps SccBlocks;
 
   public:
-    explicit SccInfo(const Function &F);
+    LLVM_ABI explicit SccInfo(const Function &F);
 
     /// If \p BB belongs to some SCC then ID of that SCC is returned, otherwise
     /// -1 is returned. If \p BB belongs to more than one SCC at the same time
     /// result is undefined.
-    int getSCCNum(const BasicBlock *BB) const;
+    LLVM_ABI int getSCCNum(const BasicBlock *BB) const;
     /// Returns true if \p BB is a 'header' block in SCC with \p SccNum ID,
     /// false otherwise.
     bool isSCCHeader(const BasicBlock *BB, int SccNum) const {
@@ -256,18 +257,18 @@ class BranchProbabilityInfo {
     /// Fills in \p Enters vector with all such blocks that don't belong to
     /// SCC with \p SccNum ID but there is an edge to a block belonging to the
     /// SCC.
-    void getSccEnterBlocks(int SccNum,
+    LLVM_ABI void getSccEnterBlocks(int SccNum,
                            SmallVectorImpl<BasicBlock *> &Enters) const;
     /// Fills in \p Exits vector with all such blocks that don't belong to
     /// SCC with \p SccNum ID but there is an edge from a block belonging to the
     /// SCC.
-    void getSccExitBlocks(int SccNum,
+    LLVM_ABI void getSccExitBlocks(int SccNum,
                           SmallVectorImpl<BasicBlock *> &Exits) const;
 
   private:
     /// Returns \p BB's type according to classification given by SccBlockType
     /// enum. Please note that \p BB must belong to SSC with \p SccNum ID.
-    uint32_t getSccBlockType(const BasicBlock *BB, int SccNum) const;
+    LLVM_ABI uint32_t getSccBlockType(const BasicBlock *BB, int SccNum) const;
     /// Calculates \p BB's type and stores it in internal data structures for
     /// future use. Please note that \p BB must belong to SSC with \p SccNum ID.
     void calculateSccBlockType(const BasicBlock *BB, int SccNum);
@@ -297,7 +298,7 @@ class BranchProbabilityInfo {
   /// Helper class to keep basic block along with its loop data information.
   class LoopBlock {
   public:
-    explicit LoopBlock(const BasicBlock *BB, const LoopInfo &LI,
+    LLVM_ABI explicit LoopBlock(const BasicBlock *BB, const LoopInfo &LI,
                        const SccInfo &SccI);
 
     const BasicBlock *getBlock() const { return BB; }
@@ -432,7 +433,7 @@ class BranchProbabilityAnalysis
   using Result = BranchProbabilityInfo;
 
   /// Run the analysis pass over a function and produce BPI.
-  BranchProbabilityInfo run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI BranchProbabilityInfo run(Function &F, FunctionAnalysisManager &AM);
 };
 
 /// Printer pass for the \c BranchProbabilityAnalysis results.
@@ -443,13 +444,13 @@ class BranchProbabilityPrinterPass
 public:
   explicit BranchProbabilityPrinterPass(raw_ostream &OS) : OS(OS) {}
 
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
   static bool isRequired() { return true; }
 };
 
 /// Legacy analysis pass which computes \c BranchProbabilityInfo.
-class BranchProbabilityInfoWrapperPass : public FunctionPass {
+class LLVM_ABI BranchProbabilityInfoWrapperPass : public FunctionPass {
   BranchProbabilityInfo BPI;
 
 public:
diff --git a/llvm/include/llvm/Analysis/CFG.h b/llvm/include/llvm/Analysis/CFG.h
index 64e2079df9db2..24407d530e9d4 100644
--- a/llvm/include/llvm/Analysis/CFG.h
+++ b/llvm/include/llvm/Analysis/CFG.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_CFG_H
 #define LLVM_ANALYSIS_CFG_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/GraphTraits.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include <utility>
@@ -32,7 +33,7 @@ template <typename T> class SmallVectorImpl;
 /// computing dominators and loop info) analysis.
 ///
 /// The output is added to Result, as pairs of <from,to> edge info.
-void FindFunctionBackedges(
+LLVM_ABI void FindFunctionBackedges(
     const Function &F,
     SmallVectorImpl<std::pair<const BasicBlock *, const BasicBlock *> > &
         Result);
@@ -40,15 +41,15 @@ void FindFunctionBackedges(
 /// Search for the specified successor of basic block BB and return its position
 /// in the terminator instruction's list of successors.  It is an error to call
 /// this with a block that is not a successor.
-unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ);
+LLVM_ABI unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ);
 
 /// Return true if the specified edge is a critical edge. Critical edges are
 /// edges from a block with multiple successors to a block with multiple
 /// predecessors.
 ///
-bool isCriticalEdge(const Instruction *TI, unsigned SuccNum,
+LLVM_ABI bool isCriticalEdge(const Instruction *TI, unsigned SuccNum,
                     bool AllowIdenticalEdges = false);
-bool isCriticalEdge(const Instruction *TI, const BasicBlock *Succ,
+LLVM_ABI bool isCriticalEdge(const Instruction *TI, const BasicBlock *Succ,
                     bool AllowIdenticalEdges = false);
 
 /// Determine whether instruction 'To' is reachable from 'From', without passing
@@ -66,7 +67,7 @@ bool isCriticalEdge(const Instruction *TI, const BasicBlock *Succ,
 /// we find a block that dominates the block containing 'To'. DT is most useful
 /// on branchy code but not loops, and LI is most useful on code with loops but
 /// does not help on branchy code outside loops.
-bool isPotentiallyReachable(
+LLVM_ABI bool isPotentiallyReachable(
     const Instruction *From, const Instruction *To,
     const SmallPtrSetImpl<BasicBlock *> *ExclusionSet = nullptr,
     const DominatorTree *DT = nullptr, const LoopInfo *LI = nullptr);
@@ -77,7 +78,7 @@ bool isPotentiallyReachable(
 /// Determine whether there is a path from From to To within a single function.
 /// Returns false only if we can prove that once 'From' has been reached then
 /// 'To' can not be executed. Conservatively returns true.
-bool isPotentiallyReachable(
+LLVM_ABI bool isPotentiallyReachable(
     const BasicBlock *From, const BasicBlock *To,
     const SmallPtrSetImpl<BasicBlock *> *ExclusionSet = nullptr,
     const DominatorTree *DT = nullptr, const LoopInfo *LI = nullptr);
@@ -91,7 +92,7 @@ bool isPotentiallyReachable(
 /// in 'ExclusionSet'. Returns false only if we can prove that once any block
 /// in 'Worklist' has been reached then 'StopBB' can not be executed.
 /// Conservatively returns true.
-bool isPotentiallyReachableFromMany(
+LLVM_ABI bool isPotentiallyReachableFromMany(
     SmallVectorImpl<BasicBlock *> &Worklist, const BasicBlock *StopBB,
     const SmallPtrSetImpl<BasicBlock *> *ExclusionSet,
     const DominatorTree *DT = nullptr, const LoopInfo *LI = nullptr);
@@ -102,7 +103,7 @@ bool isPotentiallyReachableFromMany(
 /// only if we can prove that once any block in 'Worklist' has been reached then
 /// no blocks in 'StopSet' can be executed without passing through any blocks in
 /// 'ExclusionSet'. Conservatively returns true.
-bool isManyPotentiallyReachableFromMany(
+LLVM_ABI bool isManyPotentiallyReachableFromMany(
     SmallVectorImpl<BasicBlock *> &Worklist,
     const SmallPtrSetImpl<const BasicBlock *> &StopSet,
     const SmallPtrSetImpl<BasicBlock *> *ExclusionSet,
@@ -191,12 +192,12 @@ bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI) {
 //  - this edge is not a loop exit edge if encountered in a loop (and should
 //    be ignored)
 //  - must not be split for PGO instrumentation, for example.
-bool isPresplitCoroSuspendExitEdge(const BasicBlock &Src,
+LLVM_ABI bool isPresplitCoroSuspendExitEdge(const BasicBlock &Src,
                                    const BasicBlock &Dest);
 
 /// Return true if there is at least a path through which F can return, false if
 /// there is no such path.
-bool canReturn(const Function &F);
+LLVM_ABI bool canReturn(const Function &F);
 } // namespace llvm
 
 #endif
diff --git a/llvm/include/llvm/Analysis/CFGPrinter.h b/llvm/include/llvm/Analysis/CFGPrinter.h
index b844e3f11c4a5..b65dee7e93423 100644
--- a/llvm/include/llvm/Analysis/CFGPrinter.h
+++ b/llvm/include/llvm/Analysis/CFGPrinter.h
@@ -18,6 +18,7 @@
 #ifndef LLVM_ANALYSIS_CFGPRINTER_H
 #define LLVM_ANALYSIS_CFGPRINTER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/BlockFrequencyInfo.h"
 #include "llvm/Analysis/BranchProbabilityInfo.h"
 #include "llvm/Analysis/HeatUtils.h"
@@ -36,25 +37,25 @@ class ModuleSlotTracker;
 template <class GraphType> struct GraphTraits;
 class CFGViewerPass : public PassInfoMixin<CFGViewerPass> {
 public:
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
 class CFGOnlyViewerPass : public PassInfoMixin<CFGOnlyViewerPass> {
 public:
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
 class CFGPrinterPass : public PassInfoMixin<CFGPrinterPass> {
 public:
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
 class CFGOnlyPrinterPass : public PassInfoMixin<CFGOnlyPrinterPass> {
 public:
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
@@ -71,9 +72,9 @@ class DOTFuncInfo {
 
 public:
   DOTFuncInfo(const Function *F) : DOTFuncInfo(F, nullptr, nullptr, 0) {}
-  ~DOTFuncInfo();
+  LLVM_ABI ~DOTFuncInfo();
 
-  DOTFuncInfo(const Function *F, const BlockFrequencyInfo *BFI,
+  LLVM_ABI DOTFuncInfo(const Function *F, const BlockFrequencyInfo *BFI,
               const BranchProbabilityInfo *BPI, uint64_t MaxFreq);
 
   const BlockFrequencyInfo *getBFI() const { return BFI; }
@@ -82,7 +83,7 @@ class DOTFuncInfo {
 
   const Function *getFunction() const { return this->F; }
 
-  ModuleSlotTracker *getModuleSlotTracker();
+  LLVM_ABI ModuleSlotTracker *getModuleSlotTracker();
 
   uint64_t getMaxFreq() const { return MaxFreq; }
 
@@ -204,7 +205,7 @@ struct DOTGraphTraits<DOTFuncInfo *> : public DefaultDOTGraphTraits {
     return SimpleNodeLabelString(Node);
   }
 
-  static std::string getCompleteNodeLabel(
+  LLVM_ABI static std::string getCompleteNodeLabel(
       const BasicBlock *Node, DOTFuncInfo *,
       function_ref<void(raw_string_ostream &, const BasicBlock &)>
           HandleBasicBlock = {},
@@ -325,8 +326,8 @@ struct DOTGraphTraits<DOTFuncInfo *> : public DefaultDOTGraphTraits {
                         " fontname=\"Courier\"";
     return Attrs;
   }
-  bool isNodeHidden(const BasicBlock *Node, const DOTFuncInfo *CFGInfo);
-  void computeDeoptOrUnreachablePaths(const Function *F);
+  LLVM_ABI bool isNodeHidden(const BasicBlock *Node, const DOTFuncInfo *CFGInfo);
+  LLVM_ABI void computeDeoptOrUnreachablePaths(const Function *F);
 };
 } // namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/CGSCCPassManager.h b/llvm/include/llvm/Analysis/CGSCCPassManager.h
index 15b7f226fd828..a403d2016016e 100644
--- a/llvm/include/llvm/Analysis/CGSCCPassManager.h
+++ b/llvm/include/llvm/Analysis/CGSCCPassManager.h
@@ -88,6 +88,7 @@
 #ifndef LLVM_ANALYSIS_CGSCCPASSMANAGER_H
 #define LLVM_ANALYSIS_CGSCCPASSMANAGER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/MapVector.h"
 #include "llvm/Analysis/LazyCallGraph.h"
 #include "llvm/IR/PassManager.h"
@@ -125,7 +126,7 @@ using CGSCCAnalysisManager =
 // See the comments on the definition of the specialization for details on how
 // it differs from the primary template.
 template <>
-PreservedAnalyses
+LLVM_ABI PreservedAnalyses
 PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
             CGSCCUpdateResult &>::run(LazyCallGraph::SCC &InitialC,
                                       CGSCCAnalysisManager &AM,
@@ -187,7 +188,7 @@ template <> class CGSCCAnalysisManagerModuleProxy::Result {
   /// Regardless of whether this analysis is marked as preserved, all of the
   /// analyses in the \c CGSCCAnalysisManager are potentially invalidated based
   /// on the set of preserved analyses.
-  bool invalidate(Module &M, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Module &M, const PreservedAnalyses &PA,
                   ModuleAnalysisManager::Invalidator &Inv);
 
 private:
@@ -198,7 +199,7 @@ template <> class CGSCCAnalysisManagerModuleProxy::Result {
 /// Provide a specialized run method for the \c CGSCCAnalysisManagerModuleProxy
 /// so it can pass the lazy call graph to the result.
 template <>
-CGSCCAnalysisManagerModuleProxy::Result
+LLVM_ABI CGSCCAnalysisManagerModuleProxy::Result
 CGSCCAnalysisManagerModuleProxy::run(Module &M, ModuleAnalysisManager &AM);
 
 // Ensure the \c CGSCCAnalysisManagerModuleProxy is provided as an extern
@@ -335,7 +336,7 @@ class ModuleToPostOrderCGSCCPassAdaptor
   }
 
   /// Runs the CGSCC pass across every SCC in the module.
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
 
   void printPipeline(raw_ostream &OS,
                      function_ref<StringRef(StringRef)> MapClassName2PassName) {
@@ -388,7 +389,7 @@ class FunctionAnalysisManagerCGSCCProxy
       return *FAM;
     }
 
-    bool invalidate(LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
+    LLVM_ABI bool invalidate(LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
                     CGSCCAnalysisManager::Invalidator &Inv);
 
   private:
@@ -396,7 +397,7 @@ class FunctionAnalysisManagerCGSCCProxy
   };
 
   /// Computes the \c FunctionAnalysisManager and stores it in the result proxy.
-  Result run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &);
+  LLVM_ABI Result run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &);
 
 private:
   friend AnalysisInfoMixin<FunctionAnalysisManagerCGSCCProxy>;
@@ -416,7 +417,7 @@ using CGSCCAnalysisManagerFunctionProxy =
 /// routine provides a helper that updates the call graph in those ways
 /// including returning whether any changes were made and populating a CG
 /// update result struct for the overall CGSCC walk.
-LazyCallGraph::SCC &updateCGAndAnalysisManagerForFunctionPass(
+LLVM_ABI LazyCallGraph::SCC &updateCGAndAnalysisManagerForFunctionPass(
     LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N,
     CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
     FunctionAnalysisManager &FAM);
@@ -427,7 +428,7 @@ LazyCallGraph::SCC &updateCGAndAnalysisManagerForFunctionPass(
 /// routine provides a helper that updates the call graph in those ways
 /// including returning whether any changes were made and populating a CG
 /// update result struct for the overall CGSCC walk.
-LazyCallGraph::SCC &updateCGAndAnalysisManagerForCGSCCPass(
+LLVM_ABI LazyCallGraph::SCC &updateCGAndAnalysisManagerForCGSCCPass(
     LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N,
     CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
     FunctionAnalysisManager &FAM);
@@ -465,7 +466,7 @@ class CGSCCToFunctionPassAdaptor
   }
 
   /// Runs the function pass across every function in the module.
-  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+  LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
                         LazyCallGraph &CG, CGSCCUpdateResult &UR);
 
   void printPipeline(raw_ostream &OS,
@@ -519,7 +520,7 @@ createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass,
 class ShouldNotRunFunctionPassesAnalysis
     : public AnalysisInfoMixin<ShouldNotRunFunctionPassesAnalysis> {
 public:
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
   struct Result {};
 
   Result run(Function &F, FunctionAnalysisManager &FAM) { return Result(); }
@@ -551,7 +552,7 @@ class DevirtSCCRepeatedPass : public PassInfoMixin<DevirtSCCRepeatedPass> {
 
   /// Runs the wrapped pass up to \c MaxIterations on the SCC, iterating
   /// whenever an indirect call is refined.
-  PreservedAnalyses run(LazyCallGraph::SCC &InitialC, CGSCCAnalysisManager &AM,
+  LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &InitialC, CGSCCAnalysisManager &AM,
                         LazyCallGraph &CG, CGSCCUpdateResult &UR);
 
   void printPipeline(raw_ostream &OS,
diff --git a/llvm/include/llvm/Analysis/CallGraph.h b/llvm/include/llvm/Analysis/CallGraph.h
index 7f977db161c20..06e0e40cab280 100644
--- a/llvm/include/llvm/Analysis/CallGraph.h
+++ b/llvm/include/llvm/Analysis/CallGraph.h
@@ -45,6 +45,7 @@
 #ifndef LLVM_ANALYSIS_CALLGRAPH_H
 #define LLVM_ANALYSIS_CALLGRAPH_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/IR/InstrTypes.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/IR/ValueHandle.h"
@@ -86,12 +87,12 @@ class CallGraph {
   std::unique_ptr<CallGraphNode> CallsExternalNode;
 
 public:
-  explicit CallGraph(Module &M);
-  CallGraph(CallGraph &&Arg);
-  ~CallGraph();
+  LLVM_ABI explicit CallGraph(Module &M);
+  LLVM_ABI CallGraph(CallGraph &&Arg);
+  LLVM_ABI ~CallGraph();
 
-  void print(raw_ostream &OS) const;
-  void dump() const;
+  LLVM_ABI void print(raw_ostream &OS) const;
+  LLVM_ABI void dump() const;
 
   using iterator = FunctionMapTy::iterator;
   using const_iterator = FunctionMapTy::const_iterator;
@@ -99,7 +100,7 @@ class CallGraph {
   /// Returns the module the call graph corresponds to.
   Module &getModule() const { return M; }
 
-  bool invalidate(Module &, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Module &, const PreservedAnalyses &PA,
                   ModuleAnalysisManager::Invalidator &);
 
   inline iterator begin() { return FunctionMap.begin(); }
@@ -140,18 +141,18 @@ class CallGraph {
   /// destroyed.  This is only valid if the function does not call any other
   /// functions (ie, there are no edges in it's CGN).  The easiest way to do
   /// this is to dropAllReferences before calling this.
-  Function *removeFunctionFromModule(CallGraphNode *CGN);
+  LLVM_ABI Function *removeFunctionFromModule(CallGraphNode *CGN);
 
   /// Similar to operator[], but this will insert a new CallGraphNode for
   /// \c F if one does not already exist.
-  CallGraphNode *getOrInsertFunction(const Function *F);
+  LLVM_ABI CallGraphNode *getOrInsertFunction(const Function *F);
 
   /// Populate \p CGN based on the calls inside the associated function.
-  void populateCallGraphNode(CallGraphNode *CGN);
+  LLVM_ABI void populateCallGraphNode(CallGraphNode *CGN);
 
   /// Add a function to the call graph, and link the node to all of the
   /// functions that it calls.
-  void addToCallGraph(Function *F);
+  LLVM_ABI void addToCallGraph(Function *F);
 };
 
 /// A node in the call graph for a module.
@@ -209,8 +210,8 @@ class CallGraphNode {
   }
 
   /// Print out this call graph node.
-  void dump() const;
-  void print(raw_ostream &OS) const;
+  LLVM_ABI void dump() const;
+  LLVM_ABI void print(raw_ostream &OS) const;
 
   //===---------------------------------------------------------------------
   // Methods to keep a call graph up to date with a function that has been
@@ -249,13 +250,13 @@ class CallGraphNode {
 
   /// Removes one edge associated with a null callsite from this node to
   /// the specified callee function.
-  void removeOneAbstractEdgeTo(CallGraphNode *Callee);
+  LLVM_ABI void removeOneAbstractEdgeTo(CallGraphNode *Callee);
 
   /// Replaces the edge in the node for the specified call site with a
   /// new one.
   ///
   /// Note that this method takes linear time, so it should be used sparingly.
-  void replaceCallEdge(CallBase &Call, CallBase &NewCall,
+  LLVM_ABI void replaceCallEdge(CallBase &Call, CallBase &NewCall,
                        CallGraphNode *NewNode);
 
 private:
@@ -304,7 +305,7 @@ class CallGraphPrinterPass : public PassInfoMixin<CallGraphPrinterPass> {
 public:
   explicit CallGraphPrinterPass(raw_ostream &OS) : OS(OS) {}
 
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
 
   static bool isRequired() { return true; }
 };
@@ -317,7 +318,7 @@ class CallGraphSCCsPrinterPass
 public:
   explicit CallGraphSCCsPrinterPass(raw_ostream &OS) : OS(OS) {}
 
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
 
   static bool isRequired() { return true; }
 };
@@ -329,7 +330,7 @@ class CallGraphSCCsPrinterPass
 /// module pass which runs over a module of IR and produces the call graph. The
 /// call graph interface is entirelly a wrapper around a \c CallGraph object
 /// which is stored internally for each module.
-class CallGraphWrapperPass : public ModulePass {
+class LLVM_ABI CallGraphWrapperPass : public ModulePass {
   std::unique_ptr<CallGraph> G;
 
 public:
diff --git a/llvm/include/llvm/Analysis/CallGraphSCCPass.h b/llvm/include/llvm/Analysis/CallGraphSCCPass.h
index e8714bae8f4d9..cd663d5bea5ed 100644
--- a/llvm/include/llvm/Analysis/CallGraphSCCPass.h
+++ b/llvm/include/llvm/Analysis/CallGraphSCCPass.h
@@ -20,6 +20,7 @@
 #ifndef LLVM_ANALYSIS_CALLGRAPHSCCPASS_H
 #define LLVM_ANALYSIS_CALLGRAPHSCCPASS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/Pass.h"
 #include <vector>
@@ -31,7 +32,7 @@ class CallGraphNode;
 class CallGraphSCC;
 class PMStack;
 
-class CallGraphSCCPass : public Pass {
+class LLVM_ABI CallGraphSCCPass : public Pass {
 public:
   explicit CallGraphSCCPass(char &pid) : Pass(PT_CallGraphSCC, pid) {}
 
@@ -96,11 +97,11 @@ class CallGraphSCC {
 
   /// ReplaceNode - This informs the SCC and the pass manager that the specified
   /// Old node has been deleted, and New is to be used in its place.
-  void ReplaceNode(CallGraphNode *Old, CallGraphNode *New);
+  LLVM_ABI void ReplaceNode(CallGraphNode *Old, CallGraphNode *New);
 
   /// DeleteNode - This informs the SCC and the pass manager that the specified
   /// Old node has been deleted.
-  void DeleteNode(CallGraphNode *Old);
+  LLVM_ABI void DeleteNode(CallGraphNode *Old);
 
   using iterator = std::vector<CallGraphNode *>::const_iterator;
 
@@ -110,13 +111,13 @@ class CallGraphSCC {
   const CallGraph &getCallGraph() { return CG; }
 };
 
-void initializeDummyCGSCCPassPass(PassRegistry &);
+LLVM_ABI void initializeDummyCGSCCPassPass(PassRegistry &);
 
 /// This pass is required by interprocedural register allocation. It forces
 /// codegen to follow bottom up order on call graph.
 class DummyCGSCCPass : public CallGraphSCCPass {
 public:
-  static char ID;
+  LLVM_ABI static char ID;
 
   DummyCGSCCPass() : CallGraphSCCPass(ID) {
     PassRegistry &Registry = *PassRegistry::getPassRegistry();
diff --git a/llvm/include/llvm/Analysis/CallPrinter.h b/llvm/include/llvm/Analysis/CallPrinter.h
index 95cb5cc3ca862..91fa7d0410dad 100644
--- a/llvm/include/llvm/Analysis/CallPrinter.h
+++ b/llvm/include/llvm/Analysis/CallPrinter.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_CALLPRINTER_H
 #define LLVM_ANALYSIS_CALLPRINTER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/IR/PassManager.h"
 
 namespace llvm {
@@ -23,19 +24,19 @@ class ModulePass;
 /// Pass for printing the call graph to a dot file
 class CallGraphDOTPrinterPass : public PassInfoMixin<CallGraphDOTPrinterPass> {
 public:
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
 /// Pass for viewing the call graph
 class CallGraphViewerPass : public PassInfoMixin<CallGraphViewerPass> {
 public:
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
-ModulePass *createCallGraphViewerPass();
-ModulePass *createCallGraphDOTPrinterPass();
+LLVM_ABI ModulePass *createCallGraphViewerPass();
+LLVM_ABI ModulePass *createCallGraphDOTPrinterPass();
 
 } // end namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/CaptureTracking.h b/llvm/include/llvm/Analysis/CaptureTracking.h
index c0cea8c9fadb7..44fe9232b20f2 100644
--- a/llvm/include/llvm/Analysis/CaptureTracking.h
+++ b/llvm/include/llvm/Analysis/CaptureTracking.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_CAPTURETRACKING_H
 #define LLVM_ANALYSIS_CAPTURETRACKING_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/Support/ModRef.h"
 
@@ -31,7 +32,7 @@ namespace llvm {
   /// getDefaultMaxUsesToExploreForCaptureTracking - Return default value of
   /// the maximal number of uses to explore before giving up. It is used by
   /// PointerMayBeCaptured family analysis.
-  unsigned getDefaultMaxUsesToExploreForCaptureTracking();
+  LLVM_ABI unsigned getDefaultMaxUsesToExploreForCaptureTracking();
 
   /// PointerMayBeCaptured - Return true if this pointer value may be captured
   /// by the enclosing function (which is required to exist).  This routine can
@@ -44,7 +45,7 @@ namespace llvm {
   /// This function only considers captures of the passed value via its def-use
   /// chain, without considering captures of values it may be based on, or
   /// implicit captures such as for external globals.
-  bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures,
+  LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures,
                             unsigned MaxUsesToExplore = 0);
 
   /// Return which components of the pointer may be captured. Only consider
@@ -54,7 +55,7 @@ namespace llvm {
   /// This function only considers captures of the passed value via its def-use
   /// chain, without considering captures of values it may be based on, or
   /// implicit captures such as for external globals.
-  CaptureComponents PointerMayBeCaptured(
+  LLVM_ABI CaptureComponents PointerMayBeCaptured(
       const Value *V, bool ReturnCaptures, CaptureComponents Mask,
       function_ref<bool(CaptureComponents)> StopFn = capturesAnything,
       unsigned MaxUsesToExplore = 0);
@@ -73,7 +74,7 @@ namespace llvm {
   /// This function only considers captures of the passed value via its def-use
   /// chain, without considering captures of values it may be based on, or
   /// implicit captures such as for external globals.
-  bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures,
+  LLVM_ABI bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures,
                                   const Instruction *I, const DominatorTree *DT,
                                   bool IncludeI = false,
                                   unsigned MaxUsesToExplore = 0,
@@ -87,7 +88,7 @@ namespace llvm {
   /// This function only considers captures of the passed value via its def-use
   /// chain, without considering captures of values it may be based on, or
   /// implicit captures such as for external globals.
-  CaptureComponents PointerMayBeCapturedBefore(
+  LLVM_ABI CaptureComponents PointerMayBeCapturedBefore(
       const Value *V, bool ReturnCaptures, const Instruction *I,
       const DominatorTree *DT, bool IncludeI, CaptureComponents Mask,
       function_ref<bool(CaptureComponents)> StopFn = capturesAnything,
@@ -103,7 +104,7 @@ namespace llvm {
   // cycle.
   //
   // Only consider components that are part of \p Mask.
-  Instruction *FindEarliestCapture(const Value *V, Function &F,
+  LLVM_ABI Instruction *FindEarliestCapture(const Value *V, Function &F,
                                    bool ReturnCaptures, const DominatorTree &DT,
                                    CaptureComponents Mask,
                                    unsigned MaxUsesToExplore = 0);
@@ -133,7 +134,7 @@ namespace llvm {
   /// This callback is used in conjunction with PointerMayBeCaptured. In
   /// addition to the interface here, you'll need to provide your own getters
   /// to see whether anything was captured.
-  struct CaptureTracker {
+  struct LLVM_ABI CaptureTracker {
     /// Action returned from captures().
     enum Action {
       /// Stop the traversal.
@@ -178,7 +179,7 @@ namespace llvm {
   ///
   /// \p Base is the starting value of the capture analysis, which is
   /// relevant for address_is_null captures.
-  UseCaptureInfo DetermineUseCaptureKind(const Use &U, const Value *Base);
+  LLVM_ABI UseCaptureInfo DetermineUseCaptureKind(const Use &U, const Value *Base);
 
   /// PointerMayBeCaptured - Visit the value and the values derived from it and
   /// find values which appear to be capturing the pointer value. This feeds
@@ -189,12 +190,12 @@ namespace llvm {
   /// This function only considers captures of the passed value via its def-use
   /// chain, without considering captures of values it may be based on, or
   /// implicit captures such as for external globals.
-  void PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker,
+  LLVM_ABI void PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker,
                             unsigned MaxUsesToExplore = 0);
 
   /// Returns true if the pointer is to a function-local object that never
   /// escapes from the function.
-  bool isNonEscapingLocalObject(
+  LLVM_ABI bool isNonEscapingLocalObject(
       const Value *V,
       SmallDenseMap<const Value *, bool, 8> *IsCapturedCache = nullptr);
 } // end namespace llvm
diff --git a/llvm/include/llvm/Analysis/CodeMetrics.h b/llvm/include/llvm/Analysis/CodeMetrics.h
index a51d923eb44ed..22818e56334eb 100644
--- a/llvm/include/llvm/Analysis/CodeMetrics.h
+++ b/llvm/include/llvm/Analysis/CodeMetrics.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_CODEMETRICS_H
 #define LLVM_ANALYSIS_CODEMETRICS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/Support/InstructionCost.h"
 
@@ -77,18 +78,18 @@ struct CodeMetrics {
   unsigned NumRets = 0;
 
   /// Add information about a block to the current state.
-  void analyzeBasicBlock(const BasicBlock *BB, const TargetTransformInfo &TTI,
+  LLVM_ABI void analyzeBasicBlock(const BasicBlock *BB, const TargetTransformInfo &TTI,
                          const SmallPtrSetImpl<const Value *> &EphValues,
                          bool PrepareForLTO = false, const Loop *L = nullptr);
 
   /// Collect a loop's ephemeral values (those used only by an assume
   /// or similar intrinsics in the loop).
-  static void collectEphemeralValues(const Loop *L, AssumptionCache *AC,
+  LLVM_ABI static void collectEphemeralValues(const Loop *L, AssumptionCache *AC,
                                      SmallPtrSetImpl<const Value *> &EphValues);
 
   /// Collect a functions's ephemeral values (those used only by an
   /// assume or similar intrinsics in the function).
-  static void collectEphemeralValues(const Function *L, AssumptionCache *AC,
+  LLVM_ABI static void collectEphemeralValues(const Function *L, AssumptionCache *AC,
                                      SmallPtrSetImpl<const Value *> &EphValues);
 };
 
diff --git a/llvm/include/llvm/Analysis/ConstantFolding.h b/llvm/include/llvm/Analysis/ConstantFolding.h
index 706ba0d835cb1..ed71a4ee4bdfd 100644
--- a/llvm/include/llvm/Analysis/ConstantFolding.h
+++ b/llvm/include/llvm/Analysis/ConstantFolding.h
@@ -19,6 +19,7 @@
 #ifndef LLVM_ANALYSIS_CONSTANTFOLDING_H
 #define LLVM_ANALYSIS_CONSTANTFOLDING_H
 
+#include "llvm/Support/Compiler.h"
 #include <stdint.h>
 
 namespace llvm {
@@ -44,7 +45,7 @@ class Type;
 /// the constant. Because of constantexprs, this function is recursive.
 /// If the global is part of a dso_local_equivalent constant, return it through
 /// `Equiv` if it is provided.
-bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset,
+LLVM_ABI bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset,
                                 const DataLayout &DL,
                                 DSOLocalEquivalent **DSOEquiv = nullptr);
 
@@ -53,13 +54,13 @@ bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset,
 /// Note that this fails if not all of the operands are constant.  Otherwise,
 /// this function can only fail when attempting to fold instructions like loads
 /// and stores, which have no constant expression form.
-Constant *ConstantFoldInstruction(const Instruction *I, const DataLayout &DL,
+LLVM_ABI Constant *ConstantFoldInstruction(const Instruction *I, const DataLayout &DL,
                                   const TargetLibraryInfo *TLI = nullptr);
 
 /// ConstantFoldConstant - Fold the constant using the specified DataLayout.
 /// This function always returns a non-null constant: Either the folding result,
 /// or the original constant if further folding is not possible.
-Constant *ConstantFoldConstant(const Constant *C, const DataLayout &DL,
+LLVM_ABI Constant *ConstantFoldConstant(const Constant *C, const DataLayout &DL,
                                const TargetLibraryInfo *TLI = nullptr);
 
 /// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
@@ -74,7 +75,7 @@ Constant *ConstantFoldConstant(const Constant *C, const DataLayout &DL,
 /// all uses of the original operation are replaced by the constant-folded
 /// result. The \p AllowNonDeterministic parameter controls whether this is
 /// allowed.
-Constant *ConstantFoldInstOperands(const Instruction *I,
+LLVM_ABI Constant *ConstantFoldInstOperands(const Instruction *I,
                                    ArrayRef<Constant *> Ops,
                                    const DataLayout &DL,
                                    const TargetLibraryInfo *TLI = nullptr,
@@ -84,24 +85,24 @@ Constant *ConstantFoldInstOperands(const Instruction *I,
 /// specified operands. Returns null or a constant expression of the specified
 /// operands on failure.
 /// Denormal inputs may be flushed based on the denormal handling mode.
-Constant *ConstantFoldCompareInstOperands(
+LLVM_ABI Constant *ConstantFoldCompareInstOperands(
     unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL,
     const TargetLibraryInfo *TLI = nullptr, const Instruction *I = nullptr);
 
 /// Attempt to constant fold a unary operation with the specified operand.
 /// Returns null on failure.
-Constant *ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
+LLVM_ABI Constant *ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
                                      const DataLayout &DL);
 
 /// Attempt to constant fold a binary operation with the specified operands.
 /// Returns null or a constant expression of the specified operands on failure.
-Constant *ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
+LLVM_ABI Constant *ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
                                        Constant *RHS, const DataLayout &DL);
 
 /// Attempt to constant fold a floating point binary operation with the
 /// specified operands, applying the denormal handling mod to the operands.
 /// Returns null or a constant expression of the specified operands on failure.
-Constant *ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
+LLVM_ABI Constant *ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
                                      Constant *RHS, const DataLayout &DL,
                                      const Instruction *I,
                                      bool AllowNonDeterministic = true);
@@ -114,109 +115,109 @@ Constant *ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
 ///
 /// If the calling function's "denormal-fp-math" input mode is "dynamic" for the
 /// floating-point type, returns nullptr for denormal inputs.
-Constant *FlushFPConstant(Constant *Operand, const Instruction *I,
+LLVM_ABI Constant *FlushFPConstant(Constant *Operand, const Instruction *I,
                           bool IsOutput);
 
 /// Attempt to constant fold a select instruction with the specified
 /// operands. The constant result is returned if successful; if not, null is
 /// returned.
-Constant *ConstantFoldSelectInstruction(Constant *Cond, Constant *V1,
+LLVM_ABI Constant *ConstantFoldSelectInstruction(Constant *Cond, Constant *V1,
                                         Constant *V2);
 
 /// Attempt to constant fold a cast with the specified operand.  If it
 /// fails, it returns a constant expression of the specified operand.
-Constant *ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy,
+LLVM_ABI Constant *ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy,
                                   const DataLayout &DL);
 
 /// Constant fold a zext, sext or trunc, depending on IsSigned and whether the
 /// DestTy is wider or narrower than C. Returns nullptr on failure.
-Constant *ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned,
+LLVM_ABI Constant *ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned,
                                   const DataLayout &DL);
 
 /// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue
 /// instruction with the specified operands and indices.  The constant result is
 /// returned if successful; if not, null is returned.
-Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
+LLVM_ABI Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
                                              ArrayRef<unsigned> Idxs);
 
 /// Attempt to constant fold an extractvalue instruction with the
 /// specified operands and indices.  The constant result is returned if
 /// successful; if not, null is returned.
-Constant *ConstantFoldExtractValueInstruction(Constant *Agg,
+LLVM_ABI Constant *ConstantFoldExtractValueInstruction(Constant *Agg,
                                               ArrayRef<unsigned> Idxs);
 
 /// Attempt to constant fold an insertelement instruction with the
 /// specified operands and indices.  The constant result is returned if
 /// successful; if not, null is returned.
-Constant *ConstantFoldInsertElementInstruction(Constant *Val,
+LLVM_ABI Constant *ConstantFoldInsertElementInstruction(Constant *Val,
                                                Constant *Elt,
                                                Constant *Idx);
 
 /// Attempt to constant fold an extractelement instruction with the
 /// specified operands and indices.  The constant result is returned if
 /// successful; if not, null is returned.
-Constant *ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx);
+LLVM_ABI Constant *ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx);
 
 /// Attempt to constant fold a shufflevector instruction with the
 /// specified operands and mask.  See class ShuffleVectorInst for a description
 /// of the mask representation. The constant result is returned if successful;
 /// if not, null is returned.
-Constant *ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
+LLVM_ABI Constant *ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
                                                ArrayRef<int> Mask);
 
 /// Extract value of C at the given Offset reinterpreted as Ty. If bits past
 /// the end of C are accessed, they are assumed to be poison.
-Constant *ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset,
+LLVM_ABI Constant *ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset,
                                     const DataLayout &DL);
 
 /// Extract value of C reinterpreted as Ty. Same as previous API with zero
 /// offset.
-Constant *ConstantFoldLoadFromConst(Constant *C, Type *Ty,
+LLVM_ABI Constant *ConstantFoldLoadFromConst(Constant *C, Type *Ty,
                                     const DataLayout &DL);
 
 /// Return the value that a load from C with offset Offset would produce if it
 /// is constant and determinable. If this is not determinable, return null.
-Constant *ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset,
+LLVM_ABI Constant *ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset,
                                        const DataLayout &DL);
 
 /// Return the value that a load from C would produce if it is constant and
 /// determinable. If this is not determinable, return null.
-Constant *ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
+LLVM_ABI Constant *ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
                                        const DataLayout &DL);
 
 /// If C is a uniform value where all bits are the same (either all zero, all
 /// ones, all undef or all poison), return the corresponding uniform value in
 /// the new type. If the value is not uniform or the result cannot be
 /// represented, return null.
-Constant *ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty,
+LLVM_ABI Constant *ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty,
                                            const DataLayout &DL);
 
 /// canConstantFoldCallTo - Return true if its even possible to fold a call to
 /// the specified function.
-bool canConstantFoldCallTo(const CallBase *Call, const Function *F);
+LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F);
 
 /// ConstantFoldCall - Attempt to constant fold a call to the specified function
 /// with the specified arguments, returning null if unsuccessful.
-Constant *ConstantFoldCall(const CallBase *Call, Function *F,
+LLVM_ABI Constant *ConstantFoldCall(const CallBase *Call, Function *F,
                            ArrayRef<Constant *> Operands,
                            const TargetLibraryInfo *TLI = nullptr,
                            bool AllowNonDeterministic = true);
 
-Constant *ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS,
+LLVM_ABI Constant *ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS,
                                       Constant *RHS, Type *Ty,
                                       Instruction *FMFSource);
 
 /// ConstantFoldLoadThroughBitcast - try to cast constant to destination type
 /// returning null if unsuccessful. Can cast pointer to pointer or pointer to
 /// integer and vice versa if their sizes are equal.
-Constant *ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
+LLVM_ABI Constant *ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
                                          const DataLayout &DL);
 
 /// Check whether the given call has no side-effects.
 /// Specifically checks for math routimes which sometimes set errno.
-bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI);
+LLVM_ABI bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI);
 
-Constant *ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset);
+LLVM_ABI Constant *ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset);
 }
 
 #endif
diff --git a/llvm/include/llvm/Analysis/ConstraintSystem.h b/llvm/include/llvm/Analysis/ConstraintSystem.h
index 01eeadb17db9f..ddd378b60ec5d 100644
--- a/llvm/include/llvm/Analysis/ConstraintSystem.h
+++ b/llvm/include/llvm/Analysis/ConstraintSystem.h
@@ -9,6 +9,7 @@
 #ifndef LLVM_ANALYSIS_CONSTRAINTSYSTEM_H
 #define LLVM_ANALYSIS_CONSTRAINTSYSTEM_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
@@ -108,7 +109,7 @@ class ConstraintSystem {
   }
 
   /// Returns true if there may be a solution for the constraints in the system.
-  bool mayHaveSolution();
+  LLVM_ABI bool mayHaveSolution();
 
   static SmallVector<int64_t, 8> negate(SmallVector<int64_t, 8> R) {
     // The negated constraint R is obtained by multiplying by -1 and adding 1 to
@@ -143,7 +144,7 @@ class ConstraintSystem {
     return R;
   }
 
-  bool isConditionImplied(SmallVector<int64_t, 8> R) const;
+  LLVM_ABI bool isConditionImplied(SmallVector<int64_t, 8> R) const;
 
   SmallVector<int64_t> getLastConstraint() const {
     assert(!Constraints.empty() && "Constraint system is empty");
@@ -163,7 +164,7 @@ class ConstraintSystem {
   unsigned size() const { return Constraints.size(); }
 
   /// Print the constraints in the system.
-  void dump() const;
+  LLVM_ABI void dump() const;
 };
 } // namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/CtxProfAnalysis.h b/llvm/include/llvm/Analysis/CtxProfAnalysis.h
index aa582cfef1ad1..b65c0bd182e58 100644
--- a/llvm/include/llvm/Analysis/CtxProfAnalysis.h
+++ b/llvm/include/llvm/Analysis/CtxProfAnalysis.h
@@ -9,6 +9,7 @@
 #ifndef LLVM_ANALYSIS_CTXPROFANALYSIS_H
 #define LLVM_ANALYSIS_CTXPROFANALYSIS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SetVector.h"
 #include "llvm/IR/GlobalValue.h"
 #include "llvm/IR/InstrTypes.h"
@@ -47,7 +48,7 @@ class PGOContextualProfile {
   std::map<GlobalValue::GUID, FunctionInfo> FuncInfo;
 
   /// Get the GUID of this Function if it's defined in this module.
-  GlobalValue::GUID getDefinedFunctionGUID(const Function &F) const;
+  LLVM_ABI GlobalValue::GUID getDefinedFunctionGUID(const Function &F) const;
 
   // This is meant to be constructed from CtxProfAnalysis, which will also set
   // its state piecemeal.
@@ -65,7 +66,7 @@ class PGOContextualProfile {
 
   const PGOCtxProfile &profiles() const { return Profiles; }
 
-  bool isInSpecializedModule() const;
+  LLVM_ABI bool isInSpecializedModule() const;
 
   bool isFunctionKnown(const Function &F) const {
     return getDefinedFunctionGUID(F) != 0;
@@ -101,11 +102,11 @@ class PGOContextualProfile {
   using ConstVisitor = function_ref<void(const PGOCtxProfContext &)>;
   using Visitor = function_ref<void(PGOCtxProfContext &)>;
 
-  void update(Visitor, const Function &F);
-  void visit(ConstVisitor, const Function *F = nullptr) const;
+  LLVM_ABI void update(Visitor, const Function &F);
+  LLVM_ABI void visit(ConstVisitor, const Function *F = nullptr) const;
 
-  const CtxProfFlatProfile flatten() const;
-  const CtxProfFlatIndirectCallProfile flattenVirtCalls() const;
+  LLVM_ABI const CtxProfFlatProfile flatten() const;
+  LLVM_ABI const CtxProfFlatIndirectCallProfile flattenVirtCalls() const;
 
   bool invalidate(Module &, const PreservedAnalyses &PA,
                   ModuleAnalysisManager::Invalidator &) {
@@ -120,25 +121,25 @@ class CtxProfAnalysis : public AnalysisInfoMixin<CtxProfAnalysis> {
   const std::optional<StringRef> Profile;
 
 public:
-  static AnalysisKey Key;
-  explicit CtxProfAnalysis(std::optional<StringRef> Profile = std::nullopt);
+  LLVM_ABI static AnalysisKey Key;
+  LLVM_ABI explicit CtxProfAnalysis(std::optional<StringRef> Profile = std::nullopt);
 
   using Result = PGOContextualProfile;
 
-  PGOContextualProfile run(Module &M, ModuleAnalysisManager &MAM);
+  LLVM_ABI PGOContextualProfile run(Module &M, ModuleAnalysisManager &MAM);
 
   /// Get the instruction instrumenting a callsite, or nullptr if that cannot be
   /// found.
-  static InstrProfCallsite *getCallsiteInstrumentation(CallBase &CB);
+  LLVM_ABI static InstrProfCallsite *getCallsiteInstrumentation(CallBase &CB);
 
   /// Get the instruction instrumenting a BB, or nullptr if not present.
-  static InstrProfIncrementInst *getBBInstrumentation(BasicBlock &BB);
+  LLVM_ABI static InstrProfIncrementInst *getBBInstrumentation(BasicBlock &BB);
 
   /// Get the step instrumentation associated with a `select`
-  static InstrProfIncrementInstStep *getSelectInstrumentation(SelectInst &SI);
+  LLVM_ABI static InstrProfIncrementInstStep *getSelectInstrumentation(SelectInst &SI);
 
   // FIXME: refactor to an advisor model, and separate
-  static void collectIndirectCallPromotionList(
+  LLVM_ABI static void collectIndirectCallPromotionList(
       CallBase &IC, Result &Profile,
       SetVector<std::pair<CallBase *, Function *>> &Candidates);
 };
@@ -147,9 +148,9 @@ class CtxProfAnalysisPrinterPass
     : public PassInfoMixin<CtxProfAnalysisPrinterPass> {
 public:
   enum class PrintMode { Everything, YAML };
-  explicit CtxProfAnalysisPrinterPass(raw_ostream &OS);
+  LLVM_ABI explicit CtxProfAnalysisPrinterPass(raw_ostream &OS);
 
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
   static bool isRequired() { return true; }
 
 private:
@@ -166,23 +167,23 @@ class ProfileAnnotator {
   std::unique_ptr<ProfileAnnotatorImpl> PImpl;
 
 public:
-  ProfileAnnotator(const Function &F, ArrayRef<uint64_t> RawCounters);
-  uint64_t getBBCount(const BasicBlock &BB) const;
+  LLVM_ABI ProfileAnnotator(const Function &F, ArrayRef<uint64_t> RawCounters);
+  LLVM_ABI uint64_t getBBCount(const BasicBlock &BB) const;
 
   // Finds the true and false counts for the given select instruction. Returns
   // false if the select doesn't have instrumentation or if the count of the
   // parent BB is 0.
-  bool getSelectInstrProfile(SelectInst &SI, uint64_t &TrueCount,
+  LLVM_ABI bool getSelectInstrProfile(SelectInst &SI, uint64_t &TrueCount,
                              uint64_t &FalseCount) const;
   // Clears Profile and populates it with the edge weights, in the same order as
   // they need to appear in the MD_prof metadata. Also computes the max of those
   // weights an returns it in MaxCount. Returs false if:
   //   - the BB has less than 2 successors
   //   - the counts are 0
-  bool getOutgoingBranchWeights(BasicBlock &BB,
+  LLVM_ABI bool getOutgoingBranchWeights(BasicBlock &BB,
                                 SmallVectorImpl<uint64_t> &Profile,
                                 uint64_t &MaxCount) const;
-  ~ProfileAnnotator();
+  LLVM_ABI ~ProfileAnnotator();
 };
 
 /// Assign a GUID to functions as metadata. GUID calculation takes linkage into
@@ -200,10 +201,10 @@ class AssignGUIDPass : public PassInfoMixin<AssignGUIDPass> {
 
   /// Assign a GUID *if* one is not already assign, as a function metadata named
   /// `GUIDMetadataName`.
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
-  static const char *GUIDMetadataName;
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
+  LLVM_ABI static const char *GUIDMetadataName;
   // This should become GlobalValue::getGUID
-  static uint64_t getGUID(const Function &F);
+  LLVM_ABI static uint64_t getGUID(const Function &F);
 };
 
 } // namespace llvm
diff --git a/llvm/include/llvm/Analysis/DDG.h b/llvm/include/llvm/Analysis/DDG.h
index dfd84a9addb97..d0ec687103adc 100644
--- a/llvm/include/llvm/Analysis/DDG.h
+++ b/llvm/include/llvm/Analysis/DDG.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_DDG_H
 #define LLVM_ANALYSIS_DDG_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DirectedGraph.h"
 #include "llvm/Analysis/DependenceAnalysis.h"
@@ -41,7 +42,7 @@ class LPMUpdater;
 ///    nodes. The root node cannot be part of a pi-block.
 /// 4. Root node is a special node that connects to all components such that
 ///    there is always a path from it to any node in the graph.
-class DDGNode : public DDGNodeBase {
+class LLVM_ABI DDGNode : public DDGNodeBase {
 public:
   using InstructionListType = SmallVectorImpl<Instruction *>;
 
@@ -105,7 +106,7 @@ class RootDDGNode : public DDGNode {
 };
 
 /// Subclass of DDGNode representing single or multi-instruction nodes.
-class SimpleDDGNode : public DDGNode {
+class LLVM_ABI SimpleDDGNode : public DDGNode {
   friend class DDGBuilder;
 
 public:
@@ -167,7 +168,7 @@ class SimpleDDGNode : public DDGNode {
 /// {a -> b}, {b -> c, d}, {c -> a}
 /// the cycle a -> b -> c -> a is abstracted into a pi-block "p" as follows:
 /// {p -> d} with "p" containing: {a -> b}, {b -> c}, {c -> a}
-class PiBlockDDGNode : public DDGNode {
+class LLVM_ABI PiBlockDDGNode : public DDGNode {
 public:
   using PiNodeList = SmallVector<DDGNode *, 4>;
 
@@ -303,7 +304,7 @@ template <typename NodeType> class DependenceGraphInfo {
 using DDGInfo = DependenceGraphInfo<DDGNode>;
 
 /// Data Dependency Graph
-class DataDependenceGraph : public DDGBase, public DDGInfo {
+class LLVM_ABI DataDependenceGraph : public DDGBase, public DDGInfo {
   friend AbstractDependenceGraphBuilder<DataDependenceGraph>;
   friend class DDGBuilder;
 
@@ -343,7 +344,7 @@ class DataDependenceGraph : public DDGBase, public DDGInfo {
 ///
 /// For information about time complexity of the build algorithm see the
 /// comments near the declaration of AbstractDependenceGraphBuilder.
-class DDGBuilder : public AbstractDependenceGraphBuilder<DataDependenceGraph> {
+class LLVM_ABI DDGBuilder : public AbstractDependenceGraphBuilder<DataDependenceGraph> {
 public:
   DDGBuilder(DataDependenceGraph &G, DependenceInfo &D,
              const BasicBlockListType &BBs)
@@ -400,11 +401,11 @@ class DDGBuilder : public AbstractDependenceGraphBuilder<DataDependenceGraph> {
   bool shouldCreatePiBlocks() const final;
 };
 
-raw_ostream &operator<<(raw_ostream &OS, const DDGNode &N);
-raw_ostream &operator<<(raw_ostream &OS, const DDGNode::NodeKind K);
-raw_ostream &operator<<(raw_ostream &OS, const DDGEdge &E);
-raw_ostream &operator<<(raw_ostream &OS, const DDGEdge::EdgeKind K);
-raw_ostream &operator<<(raw_ostream &OS, const DataDependenceGraph &G);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, const DDGNode &N);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, const DDGNode::NodeKind K);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, const DDGEdge &E);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, const DDGEdge::EdgeKind K);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, const DataDependenceGraph &G);
 
 //===--------------------------------------------------------------------===//
 // DDG Analysis Passes
@@ -414,7 +415,7 @@ raw_ostream &operator<<(raw_ostream &OS, const DataDependenceGraph &G);
 class DDGAnalysis : public AnalysisInfoMixin<DDGAnalysis> {
 public:
   using Result = std::unique_ptr<DataDependenceGraph>;
-  Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR);
+  LLVM_ABI Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR);
 
 private:
   friend AnalysisInfoMixin<DDGAnalysis>;
@@ -425,7 +426,7 @@ class DDGAnalysis : public AnalysisInfoMixin<DDGAnalysis> {
 class DDGAnalysisPrinterPass : public PassInfoMixin<DDGAnalysisPrinterPass> {
 public:
   explicit DDGAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
-  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+  LLVM_ABI PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                         LoopStandardAnalysisResults &AR, LPMUpdater &U);
   static bool isRequired() { return true; }
 
diff --git a/llvm/include/llvm/Analysis/DXILResource.h b/llvm/include/llvm/Analysis/DXILResource.h
index b6efd82bb308e..00ae1837020f3 100644
--- a/llvm/include/llvm/Analysis/DXILResource.h
+++ b/llvm/include/llvm/Analysis/DXILResource.h
@@ -9,6 +9,7 @@
 #ifndef LLVM_ANALYSIS_DXILRESOURCE_H
 #define LLVM_ANALYSIS_DXILRESOURCE_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/MapVector.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/StringRef.h"
@@ -289,40 +290,40 @@ class ResourceTypeInfo {
   dxil::ResourceKind Kind;
 
 public:
-  ResourceTypeInfo(TargetExtType *HandleTy, const dxil::ResourceClass RC,
+  LLVM_ABI ResourceTypeInfo(TargetExtType *HandleTy, const dxil::ResourceClass RC,
                    const dxil::ResourceKind Kind);
   ResourceTypeInfo(TargetExtType *HandleTy)
       : ResourceTypeInfo(HandleTy, {}, dxil::ResourceKind::Invalid) {}
 
   TargetExtType *getHandleTy() const { return HandleTy; }
-  StructType *createElementStruct();
+  LLVM_ABI StructType *createElementStruct();
 
   // Conditions to check before accessing specific views.
-  bool isUAV() const;
-  bool isCBuffer() const;
-  bool isSampler() const;
-  bool isStruct() const;
-  bool isTyped() const;
-  bool isFeedback() const;
-  bool isMultiSample() const;
+  LLVM_ABI bool isUAV() const;
+  LLVM_ABI bool isCBuffer() const;
+  LLVM_ABI bool isSampler() const;
+  LLVM_ABI bool isStruct() const;
+  LLVM_ABI bool isTyped() const;
+  LLVM_ABI bool isFeedback() const;
+  LLVM_ABI bool isMultiSample() const;
 
   // Views into the type.
-  UAVInfo getUAV() const;
-  uint32_t getCBufferSize(const DataLayout &DL) const;
-  dxil::SamplerType getSamplerType() const;
-  StructInfo getStruct(const DataLayout &DL) const;
-  TypedInfo getTyped() const;
-  dxil::SamplerFeedbackType getFeedbackType() const;
-  uint32_t getMultiSampleCount() const;
+  LLVM_ABI UAVInfo getUAV() const;
+  LLVM_ABI uint32_t getCBufferSize(const DataLayout &DL) const;
+  LLVM_ABI dxil::SamplerType getSamplerType() const;
+  LLVM_ABI StructInfo getStruct(const DataLayout &DL) const;
+  LLVM_ABI TypedInfo getTyped() const;
+  LLVM_ABI dxil::SamplerFeedbackType getFeedbackType() const;
+  LLVM_ABI uint32_t getMultiSampleCount() const;
 
   dxil::ResourceClass getResourceClass() const { return RC; }
   dxil::ResourceKind getResourceKind() const { return Kind; }
 
-  bool operator==(const ResourceTypeInfo &RHS) const;
+  LLVM_ABI bool operator==(const ResourceTypeInfo &RHS) const;
   bool operator!=(const ResourceTypeInfo &RHS) const { return !(*this == RHS); }
-  bool operator<(const ResourceTypeInfo &RHS) const;
+  LLVM_ABI bool operator<(const ResourceTypeInfo &RHS) const;
 
-  void print(raw_ostream &OS, const DataLayout &DL) const;
+  LLVM_ABI void print(raw_ostream &OS, const DataLayout &DL) const;
 };
 
 //===----------------------------------------------------------------------===//
@@ -381,10 +382,10 @@ class ResourceInfo {
   StringRef getName() const { return Symbol ? Symbol->getName() : ""; }
 
   bool hasSymbol() const { return Symbol; }
-  GlobalVariable *createSymbol(Module &M, StructType *Ty, StringRef Name = "");
-  MDTuple *getAsMetadata(Module &M, dxil::ResourceTypeInfo &RTI) const;
+  LLVM_ABI GlobalVariable *createSymbol(Module &M, StructType *Ty, StringRef Name = "");
+  LLVM_ABI MDTuple *getAsMetadata(Module &M, dxil::ResourceTypeInfo &RTI) const;
 
-  std::pair<uint32_t, uint32_t>
+  LLVM_ABI std::pair<uint32_t, uint32_t>
   getAnnotateProps(Module &M, dxil::ResourceTypeInfo &RTI) const;
 
   bool operator==(const ResourceInfo &RHS) const {
@@ -396,7 +397,7 @@ class ResourceInfo {
     return Binding < RHS.Binding;
   }
 
-  void print(raw_ostream &OS, dxil::ResourceTypeInfo &RTI,
+  LLVM_ABI void print(raw_ostream &OS, dxil::ResourceTypeInfo &RTI,
              const DataLayout &DL) const;
 };
 
@@ -408,7 +409,7 @@ class DXILResourceTypeMap {
   DenseMap<TargetExtType *, dxil::ResourceTypeInfo> Infos;
 
 public:
-  bool invalidate(Module &M, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Module &M, const PreservedAnalyses &PA,
                   ModuleAnalysisManager::Invalidator &Inv);
 
   dxil::ResourceTypeInfo &operator[](TargetExtType *Ty) {
@@ -436,7 +437,7 @@ class DXILResourceTypeAnalysis
   }
 };
 
-class DXILResourceTypeWrapperPass : public ImmutablePass {
+class LLVM_ABI DXILResourceTypeWrapperPass : public ImmutablePass {
   DXILResourceTypeMap DRTM;
 
   virtual void anchor();
@@ -449,7 +450,7 @@ class DXILResourceTypeWrapperPass : public ImmutablePass {
   const DXILResourceTypeMap &getResourceTypeMap() const { return DRTM; }
 };
 
-ModulePass *createDXILResourceTypeWrapperPassPass();
+LLVM_ABI ModulePass *createDXILResourceTypeWrapperPassPass();
 
 //===----------------------------------------------------------------------===//
 
@@ -555,7 +556,7 @@ class DXILResourceMap {
 
   bool hasInvalidCounterDirection() const { return HasInvalidDirection; }
 
-  void print(raw_ostream &OS, DXILResourceTypeMap &DRTM,
+  LLVM_ABI void print(raw_ostream &OS, DXILResourceTypeMap &DRTM,
              const DataLayout &DL) const;
 
   friend class DXILResourceAnalysis;
@@ -571,7 +572,7 @@ class DXILResourceAnalysis : public AnalysisInfoMixin<DXILResourceAnalysis> {
   using Result = DXILResourceMap;
 
   /// Gather resource info for the module \c M.
-  DXILResourceMap run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI DXILResourceMap run(Module &M, ModuleAnalysisManager &AM);
 };
 
 /// Printer pass for the \c DXILResourceAnalysis results.
@@ -581,12 +582,12 @@ class DXILResourcePrinterPass : public PassInfoMixin<DXILResourcePrinterPass> {
 public:
   explicit DXILResourcePrinterPass(raw_ostream &OS) : OS(OS) {}
 
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
 
   static bool isRequired() { return true; }
 };
 
-class DXILResourceWrapperPass : public ModulePass {
+class LLVM_ABI DXILResourceWrapperPass : public ModulePass {
   std::unique_ptr<DXILResourceMap> Map;
   DXILResourceTypeMap *DRTM;
 
@@ -607,7 +608,7 @@ class DXILResourceWrapperPass : public ModulePass {
   void dump() const;
 };
 
-ModulePass *createDXILResourceWrapperPassPass();
+LLVM_ABI ModulePass *createDXILResourceWrapperPassPass();
 
 //===----------------------------------------------------------------------===//
 
@@ -653,14 +654,14 @@ class DXILResourceBindingInfo {
       FreeRanges.emplace_back(0, UINT32_MAX);
     }
     // Size == -1 means unbounded array
-    std::optional<uint32_t> findAvailableBinding(int32_t Size);
+    LLVM_ABI std::optional<uint32_t> findAvailableBinding(int32_t Size);
   };
 
   struct BindingSpaces {
     dxil::ResourceClass RC;
     llvm::SmallVector<RegisterSpace> Spaces;
     BindingSpaces(dxil::ResourceClass RC) : RC(RC) {}
-    RegisterSpace &getOrInsertSpace(uint32_t Space);
+    LLVM_ABI RegisterSpace &getOrInsertSpace(uint32_t Space);
   };
 
 private:
@@ -700,7 +701,7 @@ class DXILResourceBindingInfo {
   }
 
   // Size == -1 means unbounded array
-  std::optional<uint32_t> findAvailableBinding(dxil::ResourceClass RC,
+  LLVM_ABI std::optional<uint32_t> findAvailableBinding(dxil::ResourceClass RC,
                                                uint32_t Space, int32_t Size);
 
   friend class DXILResourceBindingAnalysis;
@@ -716,10 +717,10 @@ class DXILResourceBindingAnalysis
 public:
   using Result = DXILResourceBindingInfo;
 
-  DXILResourceBindingInfo run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI DXILResourceBindingInfo run(Module &M, ModuleAnalysisManager &AM);
 };
 
-class DXILResourceBindingWrapperPass : public ModulePass {
+class LLVM_ABI DXILResourceBindingWrapperPass : public ModulePass {
   std::unique_ptr<DXILResourceBindingInfo> BindingInfo;
 
 public:
@@ -736,7 +737,7 @@ class DXILResourceBindingWrapperPass : public ModulePass {
   void releaseMemory() override;
 };
 
-ModulePass *createDXILResourceBindingWrapperPassPass();
+LLVM_ABI ModulePass *createDXILResourceBindingWrapperPassPass();
 
 } // namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/DemandedBits.h b/llvm/include/llvm/Analysis/DemandedBits.h
index 0475c4ff07ad8..b7886e3a2a212 100644
--- a/llvm/include/llvm/Analysis/DemandedBits.h
+++ b/llvm/include/llvm/Analysis/DemandedBits.h
@@ -21,6 +21,7 @@
 #ifndef LLVM_ANALYSIS_DEMANDEDBITS_H
 #define LLVM_ANALYSIS_DEMANDEDBITS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallPtrSet.h"
@@ -51,29 +52,29 @@ class DemandedBits {
   ///
   /// Instructions that do not have integer or vector of integer type are
   /// accepted, but will always produce a mask with all bits set.
-  APInt getDemandedBits(Instruction *I);
+  LLVM_ABI APInt getDemandedBits(Instruction *I);
 
   /// Return the bits demanded from use U.
-  APInt getDemandedBits(Use *U);
+  LLVM_ABI APInt getDemandedBits(Use *U);
 
   /// Return true if, during analysis, I could not be reached.
-  bool isInstructionDead(Instruction *I);
+  LLVM_ABI bool isInstructionDead(Instruction *I);
 
   /// Return whether this use is dead by means of not having any demanded bits.
-  bool isUseDead(Use *U);
+  LLVM_ABI bool isUseDead(Use *U);
 
-  void print(raw_ostream &OS);
+  LLVM_ABI void print(raw_ostream &OS);
 
   /// Compute alive bits of one addition operand from alive output and known
   /// operand bits
-  static APInt determineLiveOperandBitsAdd(unsigned OperandNo,
+  LLVM_ABI static APInt determineLiveOperandBitsAdd(unsigned OperandNo,
                                            const APInt &AOut,
                                            const KnownBits &LHS,
                                            const KnownBits &RHS);
 
   /// Compute alive bits of one subtraction operand from alive output and known
   /// operand bits
-  static APInt determineLiveOperandBitsSub(unsigned OperandNo,
+  LLVM_ABI static APInt determineLiveOperandBitsSub(unsigned OperandNo,
                                            const APInt &AOut,
                                            const KnownBits &LHS,
                                            const KnownBits &RHS);
@@ -111,7 +112,7 @@ class DemandedBitsAnalysis : public AnalysisInfoMixin<DemandedBitsAnalysis> {
 
   /// Run the analysis pass over a function and produce demanded bits
   /// information.
-  DemandedBits run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI DemandedBits run(Function &F, FunctionAnalysisManager &AM);
 };
 
 /// Printer pass for DemandedBits
@@ -121,7 +122,7 @@ class DemandedBitsPrinterPass : public PassInfoMixin<DemandedBitsPrinterPass> {
 public:
   explicit DemandedBitsPrinterPass(raw_ostream &OS) : OS(OS) {}
 
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
   static bool isRequired() { return true; }
 };
diff --git a/llvm/include/llvm/Analysis/DependenceAnalysis.h b/llvm/include/llvm/Analysis/DependenceAnalysis.h
index 6b715ab62331e..8950f704c8851 100644
--- a/llvm/include/llvm/Analysis/DependenceAnalysis.h
+++ b/llvm/include/llvm/Analysis/DependenceAnalysis.h
@@ -39,6 +39,7 @@
 #ifndef LLVM_ANALYSIS_DEPENDENCEANALYSIS_H
 #define LLVM_ANALYSIS_DEPENDENCEANALYSIS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallBitVector.h"
 #include "llvm/Analysis/ScalarEvolution.h"
 #include "llvm/IR/Instructions.h"
@@ -67,7 +68,7 @@ namespace llvm {
   /// if successor edges for its source instruction. These sets are represented
   /// as singly-linked lists, with the "next" fields stored in the dependence
   /// itelf.
-  class Dependence {
+  class LLVM_ABI Dependence {
   protected:
     Dependence(Dependence &&) = default;
     Dependence &operator=(Dependence &&) = default;
@@ -228,7 +229,7 @@ namespace llvm {
   /// (for output, flow, and anti dependences), the dependence implies an
   /// ordering, where the source must precede the destination; in contrast,
   /// input dependences are unordered.
-  class FullDependence final : public Dependence {
+  class LLVM_ABI FullDependence final : public Dependence {
   public:
     FullDependence(Instruction *Source, Instruction *Destination,
                    const SCEVUnionPredicate &Assumes,
@@ -303,7 +304,7 @@ namespace llvm {
         : AA(AA), SE(SE), LI(LI), F(F) {}
 
     /// Handle transitive invalidation when the cached analysis results go away.
-    bool invalidate(Function &F, const PreservedAnalyses &PA,
+    LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
                     FunctionAnalysisManager::Invalidator &Inv);
 
     /// depends - Tests for a dependence between the Src and Dst instructions.
@@ -313,7 +314,7 @@ namespace llvm {
     /// solved at compilation time. By default UnderRuntimeAssumptions is false
     /// for a safe approximation of the dependence relation that does not
     /// require runtime checks.
-    std::unique_ptr<Dependence> depends(Instruction *Src, Instruction *Dst,
+    LLVM_ABI std::unique_ptr<Dependence> depends(Instruction *Src, Instruction *Dst,
                                         bool UnderRuntimeAssumptions = false);
 
     /// getSplitIteration - Give a dependence that's splittable at some
@@ -356,13 +357,13 @@ namespace llvm {
     ///
     /// breaks the dependence and allows us to vectorize/parallelize
     /// both loops.
-    const SCEV *getSplitIteration(const Dependence &Dep, unsigned Level);
+    LLVM_ABI const SCEV *getSplitIteration(const Dependence &Dep, unsigned Level);
 
     Function *getFunction() const { return F; }
 
     /// getRuntimeAssumptions - Returns all the runtime assumptions under which
     /// the dependence test is valid.
-    SCEVUnionPredicate getRuntimeAssumptions() const;
+    LLVM_ABI SCEVUnionPredicate getRuntimeAssumptions() const;
 
   private:
     AAResults *AA;
@@ -442,50 +443,50 @@ namespace llvm {
 
       /// getX - If constraint is a point <X, Y>, returns X.
       /// Otherwise assert.
-      const SCEV *getX() const;
+      LLVM_ABI const SCEV *getX() const;
 
       /// getY - If constraint is a point <X, Y>, returns Y.
       /// Otherwise assert.
-      const SCEV *getY() const;
+      LLVM_ABI const SCEV *getY() const;
 
       /// getA - If constraint is a line AX + BY = C, returns A.
       /// Otherwise assert.
-      const SCEV *getA() const;
+      LLVM_ABI const SCEV *getA() const;
 
       /// getB - If constraint is a line AX + BY = C, returns B.
       /// Otherwise assert.
-      const SCEV *getB() const;
+      LLVM_ABI const SCEV *getB() const;
 
       /// getC - If constraint is a line AX + BY = C, returns C.
       /// Otherwise assert.
-      const SCEV *getC() const;
+      LLVM_ABI const SCEV *getC() const;
 
       /// getD - If constraint is a distance, returns D.
       /// Otherwise assert.
-      const SCEV *getD() const;
+      LLVM_ABI const SCEV *getD() const;
 
       /// getAssociatedLoop - Returns the loop associated with this constraint.
-      const Loop *getAssociatedLoop() const;
+      LLVM_ABI const Loop *getAssociatedLoop() const;
 
       /// setPoint - Change a constraint to Point.
-      void setPoint(const SCEV *X, const SCEV *Y, const Loop *CurrentLoop);
+      LLVM_ABI void setPoint(const SCEV *X, const SCEV *Y, const Loop *CurrentLoop);
 
       /// setLine - Change a constraint to Line.
-      void setLine(const SCEV *A, const SCEV *B,
+      LLVM_ABI void setLine(const SCEV *A, const SCEV *B,
                    const SCEV *C, const Loop *CurrentLoop);
 
       /// setDistance - Change a constraint to Distance.
-      void setDistance(const SCEV *D, const Loop *CurrentLoop);
+      LLVM_ABI void setDistance(const SCEV *D, const Loop *CurrentLoop);
 
       /// setEmpty - Change a constraint to Empty.
-      void setEmpty();
+      LLVM_ABI void setEmpty();
 
       /// setAny - Change a constraint to Any.
-      void setAny(ScalarEvolution *SE);
+      LLVM_ABI void setAny(ScalarEvolution *SE);
 
       /// dump - For debugging purposes. Dumps the constraint
       /// out to OS.
-      void dump(raw_ostream &OS) const;
+      LLVM_ABI void dump(raw_ostream &OS) const;
     };
 
     /// establishNestingLevels - Examines the loop nesting of the Src and Dst
@@ -989,7 +990,7 @@ namespace llvm {
   class DependenceAnalysis : public AnalysisInfoMixin<DependenceAnalysis> {
   public:
     typedef DependenceInfo Result;
-    Result run(Function &F, FunctionAnalysisManager &FAM);
+    LLVM_ABI Result run(Function &F, FunctionAnalysisManager &FAM);
 
   private:
     static AnalysisKey Key;
@@ -1003,7 +1004,7 @@ namespace llvm {
                                   bool NormalizeResults = false)
         : OS(OS), NormalizeResults(NormalizeResults) {}
 
-    PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+    LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
 
     static bool isRequired() { return true; }
 
@@ -1013,7 +1014,7 @@ namespace llvm {
   }; // class DependenceAnalysisPrinterPass
 
   /// Legacy pass manager pass to access dependence information
-  class DependenceAnalysisWrapperPass : public FunctionPass {
+  class LLVM_ABI DependenceAnalysisWrapperPass : public FunctionPass {
   public:
     static char ID; // Class identification, replacement for typeinfo
     DependenceAnalysisWrapperPass();
@@ -1030,7 +1031,7 @@ namespace llvm {
 
   /// createDependenceAnalysisPass - This creates an instance of the
   /// DependenceAnalysis wrapper pass.
-  FunctionPass *createDependenceAnalysisWrapperPass();
+  LLVM_ABI FunctionPass *createDependenceAnalysisWrapperPass();
 
 } // namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/DomPrinter.h b/llvm/include/llvm/Analysis/DomPrinter.h
index 80fdfcd56c366..e01317ff57b75 100644
--- a/llvm/include/llvm/Analysis/DomPrinter.h
+++ b/llvm/include/llvm/Analysis/DomPrinter.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_DOMPRINTER_H
 #define LLVM_ANALYSIS_DOMPRINTER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/DOTGraphTraitsPass.h"
 #include "llvm/Analysis/PostDominators.h"
 #include "llvm/IR/Dominators.h"
@@ -119,14 +120,14 @@ struct PostDomOnlyPrinter final
 
 namespace llvm {
   class FunctionPass;
-  FunctionPass *createDomPrinterWrapperPassPass();
-  FunctionPass *createDomOnlyPrinterWrapperPassPass();
-  FunctionPass *createDomViewerWrapperPassPass();
-  FunctionPass *createDomOnlyViewerWrapperPassPass();
-  FunctionPass *createPostDomPrinterWrapperPassPass();
-  FunctionPass *createPostDomOnlyPrinterWrapperPassPass();
-  FunctionPass *createPostDomViewerWrapperPassPass();
-  FunctionPass *createPostDomOnlyViewerWrapperPassPass();
+  LLVM_ABI FunctionPass *createDomPrinterWrapperPassPass();
+  LLVM_ABI FunctionPass *createDomOnlyPrinterWrapperPassPass();
+  LLVM_ABI FunctionPass *createDomViewerWrapperPassPass();
+  LLVM_ABI FunctionPass *createDomOnlyViewerWrapperPassPass();
+  LLVM_ABI FunctionPass *createPostDomPrinterWrapperPassPass();
+  LLVM_ABI FunctionPass *createPostDomOnlyPrinterWrapperPassPass();
+  LLVM_ABI FunctionPass *createPostDomViewerWrapperPassPass();
+  LLVM_ABI FunctionPass *createPostDomOnlyViewerWrapperPassPass();
 } // End llvm namespace
 
 #endif
diff --git a/llvm/include/llvm/Analysis/DomTreeUpdater.h b/llvm/include/llvm/Analysis/DomTreeUpdater.h
index 0386262ba2b65..f13db43a67977 100644
--- a/llvm/include/llvm/Analysis/DomTreeUpdater.h
+++ b/llvm/include/llvm/Analysis/DomTreeUpdater.h
@@ -66,7 +66,7 @@ class DomTreeUpdater
   /// all available trees are up-to-date. Assert if any instruction of DelBB is
   /// modified while awaiting deletion. When both DT and PDT are nullptrs, DelBB
   /// will be queued until flush() is called.
-  void deleteBB(BasicBlock *DelBB);
+  LLVM_ABI void deleteBB(BasicBlock *DelBB);
 
   /// Delete DelBB. DelBB will be removed from its Parent and
   /// erased from available trees if it exists. Then the callback will
@@ -76,13 +76,13 @@ class DomTreeUpdater
   /// all available trees are up-to-date. Assert if any instruction of DelBB is
   /// modified while awaiting deletion. Multiple callbacks can be queued for one
   /// DelBB under Lazy UpdateStrategy.
-  void callbackDeleteBB(BasicBlock *DelBB,
+  LLVM_ABI void callbackDeleteBB(BasicBlock *DelBB,
                         std::function<void(BasicBlock *)> Callback);
 
   ///@}
 
   /// Debug method to help view the internal state of this class.
-  LLVM_DUMP_METHOD void dump() const;
+  LLVM_ABI LLVM_DUMP_METHOD void dump() const;
 
 private:
   class CallBackOnDeletion final : public CallbackVH {
diff --git a/llvm/include/llvm/Analysis/EphemeralValuesCache.h b/llvm/include/llvm/Analysis/EphemeralValuesCache.h
index 2b50de9d22259..4d136490535ed 100644
--- a/llvm/include/llvm/Analysis/EphemeralValuesCache.h
+++ b/llvm/include/llvm/Analysis/EphemeralValuesCache.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_EPHEMERALVALUESCACHE_H
 #define LLVM_ANALYSIS_EPHEMERALVALUESCACHE_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/IR/PassManager.h"
 
@@ -30,7 +31,7 @@ class EphemeralValuesCache {
   AssumptionCache &AC;
   bool Collected = false;
 
-  void collectEphemeralValues();
+  LLVM_ABI void collectEphemeralValues();
 
 public:
   EphemeralValuesCache(Function &F, AssumptionCache &AC) : F(F), AC(AC) {}
@@ -52,7 +53,7 @@ class EphemeralValuesAnalysis
 
 public:
   using Result = EphemeralValuesCache;
-  Result run(Function &F, FunctionAnalysisManager &FAM);
+  LLVM_ABI Result run(Function &F, FunctionAnalysisManager &FAM);
 };
 
 } // namespace llvm
diff --git a/llvm/include/llvm/Analysis/FunctionPropertiesAnalysis.h b/llvm/include/llvm/Analysis/FunctionPropertiesAnalysis.h
index af72f6e0f90b1..2b5631265780f 100644
--- a/llvm/include/llvm/Analysis/FunctionPropertiesAnalysis.h
+++ b/llvm/include/llvm/Analysis/FunctionPropertiesAnalysis.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_FUNCTIONPROPERTIESANALYSIS_H
 #define LLVM_ANALYSIS_FUNCTIONPROPERTIESANALYSIS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseSet.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/PassManager.h"
@@ -32,11 +33,11 @@ class FunctionPropertiesInfo {
   void reIncludeBB(const BasicBlock &BB);
 
 public:
-  static FunctionPropertiesInfo
+  LLVM_ABI static FunctionPropertiesInfo
   getFunctionPropertiesInfo(const Function &F, const DominatorTree &DT,
                             const LoopInfo &LI);
 
-  static FunctionPropertiesInfo
+  LLVM_ABI static FunctionPropertiesInfo
   getFunctionPropertiesInfo(Function &F, FunctionAnalysisManager &FAM);
 
   bool operator==(const FunctionPropertiesInfo &FPI) const {
@@ -47,7 +48,7 @@ class FunctionPropertiesInfo {
     return !(*this == FPI);
   }
 
-  void print(raw_ostream &OS) const;
+  LLVM_ABI void print(raw_ostream &OS) const;
 
   /// Number of basic blocks
   int64_t BasicBlockCount = 0;
@@ -143,11 +144,11 @@ class FunctionPropertiesAnalysis
     : public AnalysisInfoMixin<FunctionPropertiesAnalysis> {
 
 public:
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
   using Result = const FunctionPropertiesInfo;
 
-  FunctionPropertiesInfo run(Function &F, FunctionAnalysisManager &FAM);
+  LLVM_ABI FunctionPropertiesInfo run(Function &F, FunctionAnalysisManager &FAM);
 };
 
 /// Printer pass for the FunctionPropertiesAnalysis results.
@@ -158,7 +159,7 @@ class FunctionPropertiesPrinterPass
 public:
   explicit FunctionPropertiesPrinterPass(raw_ostream &OS) : OS(OS) {}
 
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
   static bool isRequired() { return true; }
 };
@@ -171,9 +172,9 @@ class FunctionPropertiesPrinterPass
 /// inlining.
 class FunctionPropertiesUpdater {
 public:
-  FunctionPropertiesUpdater(FunctionPropertiesInfo &FPI, CallBase &CB);
+  LLVM_ABI FunctionPropertiesUpdater(FunctionPropertiesInfo &FPI, CallBase &CB);
 
-  void finish(FunctionAnalysisManager &FAM) const;
+  LLVM_ABI void finish(FunctionAnalysisManager &FAM) const;
   bool finishAndTest(FunctionAnalysisManager &FAM) const {
     finish(FAM);
     return isUpdateValid(Caller, FPI, FAM);
@@ -184,7 +185,7 @@ class FunctionPropertiesUpdater {
   BasicBlock &CallSiteBB;
   Function &Caller;
 
-  static bool isUpdateValid(Function &F, const FunctionPropertiesInfo &FPI,
+  LLVM_ABI static bool isUpdateValid(Function &F, const FunctionPropertiesInfo &FPI,
                             FunctionAnalysisManager &FAM);
 
   DominatorTree &getUpdatedDominatorTree(FunctionAnalysisManager &FAM) const;
diff --git a/llvm/include/llvm/Analysis/GlobalsModRef.h b/llvm/include/llvm/Analysis/GlobalsModRef.h
index 36a95e095aaa5..519da4cfe0272 100644
--- a/llvm/include/llvm/Analysis/GlobalsModRef.h
+++ b/llvm/include/llvm/Analysis/GlobalsModRef.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_GLOBALSMODREF_H
 #define LLVM_ANALYSIS_GLOBALSMODREF_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/IR/ValueHandle.h"
@@ -56,7 +57,7 @@ class GlobalsAAResult : public AAResultBase {
   DenseMap<const Function *, unsigned> FunctionToSCCMap;
 
   /// Handle to clear this analysis on deletion of values.
-  struct DeletionCallbackHandle final : CallbackVH {
+  struct LLVM_ABI DeletionCallbackHandle final : CallbackVH {
     GlobalsAAResult *GAR;
     std::list<DeletionCallbackHandle>::iterator I;
 
@@ -79,13 +80,13 @@ class GlobalsAAResult : public AAResultBase {
   friend struct RecomputeGlobalsAAPass;
 
 public:
-  GlobalsAAResult(GlobalsAAResult &&Arg);
-  ~GlobalsAAResult();
+  LLVM_ABI GlobalsAAResult(GlobalsAAResult &&Arg);
+  LLVM_ABI ~GlobalsAAResult();
 
-  bool invalidate(Module &M, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Module &M, const PreservedAnalyses &PA,
                   ModuleAnalysisManager::Invalidator &);
 
-  static GlobalsAAResult
+  LLVM_ABI static GlobalsAAResult
   analyzeModule(Module &M,
                 std::function<const TargetLibraryInfo &(Function &F)> GetTLI,
                 CallGraph &CG);
@@ -93,18 +94,18 @@ class GlobalsAAResult : public AAResultBase {
   //------------------------------------------------
   // Implement the AliasAnalysis API
   //
-  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+  LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                     AAQueryInfo &AAQI, const Instruction *CtxI);
 
   using AAResultBase::getModRefInfo;
-  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
                            AAQueryInfo &AAQI);
 
   using AAResultBase::getMemoryEffects;
   /// getMemoryEffects - Return the behavior of the specified function if
   /// called from the specified call site.  The call site may be null in which
   /// case the most generic behavior of this function should be returned.
-  MemoryEffects getMemoryEffects(const Function *F);
+  LLVM_ABI MemoryEffects getMemoryEffects(const Function *F);
 
 private:
   FunctionInfo *getFunctionInfo(const Function *F);
@@ -132,15 +133,15 @@ class GlobalsAA : public AnalysisInfoMixin<GlobalsAA> {
 public:
   typedef GlobalsAAResult Result;
 
-  GlobalsAAResult run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI GlobalsAAResult run(Module &M, ModuleAnalysisManager &AM);
 };
 
 struct RecomputeGlobalsAAPass : PassInfoMixin<RecomputeGlobalsAAPass> {
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
 };
 
 /// Legacy wrapper pass to provide the GlobalsAAResult object.
-class GlobalsAAWrapperPass : public ModulePass {
+class LLVM_ABI GlobalsAAWrapperPass : public ModulePass {
   std::unique_ptr<GlobalsAAResult> Result;
 
 public:
@@ -161,7 +162,7 @@ class GlobalsAAWrapperPass : public ModulePass {
 // createGlobalsAAWrapperPass - This pass provides alias and mod/ref info for
 // global values that do not have their addresses taken.
 //
-ModulePass *createGlobalsAAWrapperPass();
+LLVM_ABI ModulePass *createGlobalsAAWrapperPass();
 }
 
 #endif
diff --git a/llvm/include/llvm/Analysis/HeatUtils.h b/llvm/include/llvm/Analysis/HeatUtils.h
index 9ecbbaf318da9..38266fbf8c44b 100644
--- a/llvm/include/llvm/Analysis/HeatUtils.h
+++ b/llvm/include/llvm/Analysis/HeatUtils.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_HEATUTILS_H
 #define LLVM_ANALYSIS_HEATUTILS_H
 
+#include "llvm/Support/Compiler.h"
 #include <cstdint>
 #include <string>
 
@@ -22,17 +23,17 @@ class BlockFrequencyInfo;
 class Function;
 
 // Returns number of calls of calledFunction by callerFunction.
-uint64_t
+LLVM_ABI uint64_t
 getNumOfCalls(Function &callerFunction, Function &calledFunction);
 
 // Returns the maximum frequency of a BB in a function.
-uint64_t getMaxFreq(const Function &F, const BlockFrequencyInfo *BFI);
+LLVM_ABI uint64_t getMaxFreq(const Function &F, const BlockFrequencyInfo *BFI);
 
 // Calculates heat color based on current and maximum frequencies.
-std::string getHeatColor(uint64_t freq, uint64_t maxFreq);
+LLVM_ABI std::string getHeatColor(uint64_t freq, uint64_t maxFreq);
 
 // Calculates heat color based on percent of "hotness".
-std::string getHeatColor(double percent);
+LLVM_ABI std::string getHeatColor(double percent);
 
 } // namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h b/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h
index eb1827850567d..19b29afb4f6a6 100644
--- a/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h
+++ b/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h
@@ -49,6 +49,7 @@
 #ifndef LLVM_ANALYSIS_IRSIMILARITYIDENTIFIER_H
 #define LLVM_ANALYSIS_IRSIMILARITYIDENTIFIER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/IR/InstVisitor.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/PassManager.h"
@@ -168,27 +169,27 @@ struct IRInstructionData
   /// operands. This extra information allows for similarity matching to make
   /// assertions that allow for more flexibility when checking for whether an
   /// Instruction performs the same operation.
-  IRInstructionData(Instruction &I, bool Legality, IRInstructionDataList &IDL);
-  IRInstructionData(IRInstructionDataList &IDL);
+  LLVM_ABI IRInstructionData(Instruction &I, bool Legality, IRInstructionDataList &IDL);
+  LLVM_ABI IRInstructionData(IRInstructionDataList &IDL);
 
   /// Fills data stuctures for IRInstructionData when it is constructed from a
   // reference or a pointer.
-  void initializeInstruction();
+  LLVM_ABI void initializeInstruction();
 
   /// Get the predicate that the compare instruction is using for hashing the
   /// instruction. the IRInstructionData must be wrapping a CmpInst.
-  CmpInst::Predicate getPredicate() const;
+  LLVM_ABI CmpInst::Predicate getPredicate() const;
 
   /// Get the callee name that the call instruction is using for hashing the
   /// instruction. The IRInstructionData must be wrapping a CallInst.
-  StringRef getCalleeName() const;
+  LLVM_ABI StringRef getCalleeName() const;
 
   /// A function that swaps the predicates to their less than form if they are
   /// in a greater than form. Otherwise, the predicate is unchanged.
   ///
   /// \param CI - The comparison operation to find a consistent preidcate for.
   /// \return the consistent comparison predicate. 
-  static CmpInst::Predicate predicateForConsistency(CmpInst *CI);
+  LLVM_ABI static CmpInst::Predicate predicateForConsistency(CmpInst *CI);
 
   /// For an IRInstructionData containing a branch, finds the
   /// relative distances from the source basic block to the target by taking
@@ -197,7 +198,7 @@ struct IRInstructionData
   ///
   /// \param BasicBlockToInteger - The mapping of basic blocks to their location
   /// in the module.
-  void
+  LLVM_ABI void
   setBranchSuccessors(DenseMap<BasicBlock *, unsigned> &BasicBlockToInteger);
 
   /// For an IRInstructionData containing a CallInst, set the function name
@@ -213,7 +214,7 @@ struct IRInstructionData
   ///
   /// \param MatchByName - A flag to mark whether we are using the called
   /// function name as a differentiating parameter.
-  void setCalleeName(bool MatchByName = true);
+  LLVM_ABI void setCalleeName(bool MatchByName = true);
 
   /// For an IRInstructionData containing a PHINode, finds the
   /// relative distances from the incoming basic block to the current block by
@@ -222,13 +223,13 @@ struct IRInstructionData
   ///
   /// \param BasicBlockToInteger - The mapping of basic blocks to their location
   /// in the module.
-  void
+  LLVM_ABI void
   setPHIPredecessors(DenseMap<BasicBlock *, unsigned> &BasicBlockToInteger);
 
   /// Get the BasicBlock based operands for PHINodes and BranchInsts.
   ///
   /// \returns A list of relevant BasicBlocks.
-  ArrayRef<Value *> getBlockOperVals();
+  LLVM_ABI ArrayRef<Value *> getBlockOperVals();
 
   /// Hashes \p Value based on its opcode, types, and operand types.
   /// Two IRInstructionData instances produce the same hash when they perform
@@ -309,7 +310,7 @@ struct IRInstructionDataList
 /// \param B - The second IRInstructionData class to compare
 /// \returns true if \p A and \p B are similar enough to be mapped to the same
 /// value.
-bool isClose(const IRInstructionData &A, const IRInstructionData &B);
+LLVM_ABI bool isClose(const IRInstructionData &A, const IRInstructionData &B);
 
 struct IRInstructionDataTraits : DenseMapInfo<IRInstructionData *> {
   static inline IRInstructionData *getEmptyKey() { return nullptr; }
@@ -427,7 +428,7 @@ struct IRInstructionMapper {
   /// \param IDL - The InstructionDataList that the IRInstructionData is
   /// inserted into.
   /// \returns An allocated IRInstructionData struct.
-  IRInstructionData *allocateIRInstructionData(Instruction &I, bool Legality,
+  LLVM_ABI IRInstructionData *allocateIRInstructionData(Instruction &I, bool Legality,
                                                IRInstructionDataList &IDL);
 
   /// Get an empty allocated IRInstructionData struct using the
@@ -436,12 +437,12 @@ struct IRInstructionMapper {
   /// \param IDL - The InstructionDataList that the IRInstructionData is
   /// inserted into.
   /// \returns An allocated IRInstructionData struct.
-  IRInstructionData *allocateIRInstructionData(IRInstructionDataList &IDL);
+  LLVM_ABI IRInstructionData *allocateIRInstructionData(IRInstructionDataList &IDL);
 
   /// Get an allocated IRInstructionDataList object using the IDLAllocator.
   ///
   /// \returns An allocated IRInstructionDataList object.
-  IRInstructionDataList *allocateIRInstructionDataList();
+  LLVM_ABI IRInstructionDataList *allocateIRInstructionDataList();
 
   IRInstructionDataList *IDL = nullptr;
 
@@ -470,7 +471,7 @@ struct IRInstructionMapper {
   /// \param [in] BB - The BasicBlock to be mapped to integers.
   /// \param [in,out] InstrList - Vector of IRInstructionData to append to.
   /// \param [in,out] IntegerMapping - Vector of unsigned integers to append to.
-  void convertToUnsignedVec(BasicBlock &BB,
+  LLVM_ABI void convertToUnsignedVec(BasicBlock &BB,
                             std::vector<IRInstructionData *> &InstrList,
                             std::vector<unsigned> &IntegerMapping);
 
@@ -481,7 +482,7 @@ struct IRInstructionMapper {
   /// append to.
   /// \param [in,out] InstrListForBB - Vector of InstructionData to append to.
   /// \returns The integer \p It was mapped to.
-  unsigned mapToLegalUnsigned(BasicBlock::iterator &It,
+  LLVM_ABI unsigned mapToLegalUnsigned(BasicBlock::iterator &It,
                               std::vector<unsigned> &IntegerMappingForBB,
                               std::vector<IRInstructionData *> &InstrListForBB);
 
@@ -494,7 +495,7 @@ struct IRInstructionMapper {
   /// \param End - true if creating a dummy IRInstructionData at the end of a
   /// basic block.
   /// \returns The integer \p It was mapped to.
-  unsigned mapToIllegalUnsigned(
+  LLVM_ABI unsigned mapToIllegalUnsigned(
       BasicBlock::iterator &It, std::vector<unsigned> &IntegerMappingForBB,
       std::vector<IRInstructionData *> &InstrListForBB, bool End = false);
 
@@ -682,7 +683,7 @@ class IRSimilarityCandidate {
   /// \param Len - The length of the region.
   /// \param FirstInstIt - The starting IRInstructionData of the region.
   /// \param LastInstIt - The ending IRInstructionData of the region.
-  IRSimilarityCandidate(unsigned StartIdx, unsigned Len,
+  LLVM_ABI IRSimilarityCandidate(unsigned StartIdx, unsigned Len,
                         IRInstructionData *FirstInstIt,
                         IRInstructionData *LastInstIt);
 
@@ -690,14 +691,14 @@ class IRSimilarityCandidate {
   /// \param B - The second IRInstructionCandidate to compare.
   /// \returns True when every IRInstructionData in \p A is similar to every
   /// IRInstructionData in \p B.
-  static bool isSimilar(const IRSimilarityCandidate &A,
+  LLVM_ABI static bool isSimilar(const IRSimilarityCandidate &A,
                         const IRSimilarityCandidate &B);
 
   /// \param [in] A - The first IRInstructionCandidate to compare.
   /// \param [in] B - The second IRInstructionCandidate to compare.
   /// \returns True when every IRInstructionData in \p A is structurally similar
   /// to \p B.
-  static bool compareStructure(const IRSimilarityCandidate &A,
+  LLVM_ABI static bool compareStructure(const IRSimilarityCandidate &A,
                                const IRSimilarityCandidate &B);
 
   /// \param [in] A - The first IRInstructionCandidate to compare.
@@ -708,7 +709,7 @@ class IRSimilarityCandidate {
   /// candidate \p B to candidate \A.
   /// \returns True when every IRInstructionData in \p A is structurally similar
   /// to \p B.
-  static bool
+  LLVM_ABI static bool
   compareStructure(const IRSimilarityCandidate &A,
                    const IRSimilarityCandidate &B,
                    DenseMap<unsigned, DenseSet<unsigned>> &ValueNumberMappingA,
@@ -750,7 +751,7 @@ class IRSimilarityCandidate {
   /// \param B - The second IRInstructionCandidate, operand values, and current
   /// operand mappings to compare.
   /// \returns true if the IRSimilarityCandidates operands are compatible.
-  static bool compareNonCommutativeOperandMapping(OperandMapping A,
+  LLVM_ABI static bool compareNonCommutativeOperandMapping(OperandMapping A,
                                                   OperandMapping B);
 
   /// Compare the operands in \p A and \p B and check that the current mapping
@@ -762,7 +763,7 @@ class IRSimilarityCandidate {
   /// \param B - The second IRInstructionCandidate, operand values, and current
   /// operand mappings to compare.
   /// \returns true if the IRSimilarityCandidates operands are compatible.
-  static bool compareCommutativeOperandMapping(OperandMapping A,
+  LLVM_ABI static bool compareCommutativeOperandMapping(OperandMapping A,
                                                OperandMapping B);
 
   /// Compare the GVN of the assignment value in corresponding instructions in
@@ -778,7 +779,7 @@ class IRSimilarityCandidate {
   /// \param [in,out] ValueNumberMappingB - A mapping of value numbers from 
   /// candidate \p B to candidate \A.
   /// \returns true if the IRSimilarityCandidates assignments are compatible.
-  static bool compareAssignmentMapping(
+  LLVM_ABI static bool compareAssignmentMapping(
       const unsigned InstValA, const unsigned &InstValB,
       DenseMap<unsigned, DenseSet<unsigned>> &ValueNumberMappingA,
       DenseMap<unsigned, DenseSet<unsigned>> &ValueNumberMappingB);
@@ -818,7 +819,7 @@ class IRSimilarityCandidate {
   /// \param B - The second IRInstructionCandidate, relative location value,
   /// and incoming block.
   /// \returns true if the relative locations match.
-  static bool checkRelativeLocations(RelativeLocMapping A,
+  LLVM_ABI static bool checkRelativeLocations(RelativeLocMapping A,
                                      RelativeLocMapping B);
 
   /// Create a mapping from the value numbering to a different separate set of
@@ -828,7 +829,7 @@ class IRSimilarityCandidate {
   ///
   /// \param [in, out] CurrCand - The IRSimilarityCandidate to create a
   /// canonical numbering for.
-  static void createCanonicalMappingFor(IRSimilarityCandidate &CurrCand);
+  LLVM_ABI static void createCanonicalMappingFor(IRSimilarityCandidate &CurrCand);
 
   /// Create a mapping for the value numbering of the calling
   /// IRSimilarityCandidate, to a different separate set of numbers, based on
@@ -843,7 +844,7 @@ class IRSimilarityCandidate {
   /// to \p SourceCand.
   /// \param FromSourceMapping - The mapping of value numbers from \p SoureCand
   /// to this candidate.
-  void createCanonicalRelationFrom(
+  LLVM_ABI void createCanonicalRelationFrom(
       IRSimilarityCandidate &SourceCand,
       DenseMap<unsigned, DenseSet<unsigned>> &ToSourceMapping,
       DenseMap<unsigned, DenseSet<unsigned>> &FromSourceMapping);
@@ -866,7 +867,7 @@ class IRSimilarityCandidate {
   /// to \p SourceCand.
   /// \param FromSourceMapping - The mapping of value numbers from \p SoureCand
   /// to this candidate.
-  void createCanonicalRelationFrom(
+  LLVM_ABI void createCanonicalRelationFrom(
       IRSimilarityCandidate &SourceCand,
       DenseMap<unsigned, unsigned> &OneToOne,
       DenseMap<unsigned, DenseSet<unsigned>> &ToSourceMapping,
@@ -886,7 +887,7 @@ class IRSimilarityCandidate {
   /// \p SourceCand.
   /// \param TargetCandLarge -  The IRSimilarityCandidate fully containing
   /// this Candidate.
-  void createCanonicalRelationFrom(
+  LLVM_ABI void createCanonicalRelationFrom(
       IRSimilarityCandidate &SourceCand,
       IRSimilarityCandidate &SourceCandLarge,
       IRSimilarityCandidate &TargetCandLarge);
@@ -918,7 +919,7 @@ class IRSimilarityCandidate {
   ///
   /// \returns true if the IRSimilarityCandidates do not have overlapping
   /// instructions.
-  static bool overlap(const IRSimilarityCandidate &A,
+  LLVM_ABI static bool overlap(const IRSimilarityCandidate &A,
                       const IRSimilarityCandidate &B);
 
   /// \returns the number of instructions in this Candidate.
@@ -1088,7 +1089,7 @@ class IRSimilarityIdentifier {
   //
   // \param [in] Modules - the modules to analyze.
   // \returns The groups of similarity ranges found in the modules.
-  SimilarityGroupList &
+  LLVM_ABI SimilarityGroupList &
   findSimilarity(ArrayRef<std::unique_ptr<Module>> Modules);
 
   // Find the IRSimilarityCandidates in the given Module grouped by structural
@@ -1096,7 +1097,7 @@ class IRSimilarityIdentifier {
   //
   // \param [in] M - the module to analyze.
   // \returns The groups of similarity ranges found in the module.
-  SimilarityGroupList &findSimilarity(Module &M);
+  LLVM_ABI SimilarityGroupList &findSimilarity(Module &M);
 
   // Clears \ref SimilarityCandidates if it is already filled by a previous run.
   void resetSimilarityCandidates() {
@@ -1156,7 +1157,7 @@ class IRSimilarityIdentifier {
 
 /// An analysis pass based on legacy pass manager that runs and returns
 /// IRSimilarityIdentifier run on the Module.
-class IRSimilarityIdentifierWrapperPass : public ModulePass {
+class LLVM_ABI IRSimilarityIdentifierWrapperPass : public ModulePass {
   std::unique_ptr<IRSimilarity::IRSimilarityIdentifier> IRSI;
 
 public:
@@ -1180,7 +1181,7 @@ class IRSimilarityAnalysis : public AnalysisInfoMixin<IRSimilarityAnalysis> {
 public:
   typedef IRSimilarity::IRSimilarityIdentifier Result;
 
-  Result run(Module &M, ModuleAnalysisManager &);
+  LLVM_ABI Result run(Module &M, ModuleAnalysisManager &);
 
 private:
   friend AnalysisInfoMixin<IRSimilarityAnalysis>;
@@ -1194,7 +1195,7 @@ class IRSimilarityAnalysisPrinterPass
 
 public:
   explicit IRSimilarityAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
diff --git a/llvm/include/llvm/Analysis/IVDescriptors.h b/llvm/include/llvm/Analysis/IVDescriptors.h
index 140edff13a67f..fe4003aa054a5 100644
--- a/llvm/include/llvm/Analysis/IVDescriptors.h
+++ b/llvm/include/llvm/Analysis/IVDescriptors.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_IVDESCRIPTORS_H
 #define LLVM_ANALYSIS_IVDESCRIPTORS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/IR/IntrinsicInst.h"
@@ -131,24 +132,24 @@ class RecurrenceDescriptor {
   /// advances the instruction pointer 'I' from the compare instruction to the
   /// select instruction and stores this pointer in 'PatternLastInst' member of
   /// the returned struct.
-  static InstDesc isRecurrenceInstr(Loop *L, PHINode *Phi, Instruction *I,
+  LLVM_ABI static InstDesc isRecurrenceInstr(Loop *L, PHINode *Phi, Instruction *I,
                                     RecurKind Kind, InstDesc &Prev,
                                     FastMathFlags FuncFMF, ScalarEvolution *SE);
 
   /// Returns true if instruction I has multiple uses in Insts
-  static bool hasMultipleUsesOf(Instruction *I,
+  LLVM_ABI static bool hasMultipleUsesOf(Instruction *I,
                                 SmallPtrSetImpl<Instruction *> &Insts,
                                 unsigned MaxNumUses);
 
   /// Returns true if all uses of the instruction I is within the Set.
-  static bool areAllUsesIn(Instruction *I, SmallPtrSetImpl<Instruction *> &Set);
+  LLVM_ABI static bool areAllUsesIn(Instruction *I, SmallPtrSetImpl<Instruction *> &Set);
 
   /// Returns a struct describing if the instruction is a llvm.(s/u)(min/max),
   /// llvm.minnum/maxnum or a Select(ICmp(X, Y), X, Y) pair of instructions
   /// corresponding to a min(X, Y) or max(X, Y), matching the recurrence kind \p
   /// Kind. \p Prev specifies the description of an already processed select
   /// instruction, so its corresponding cmp can be matched to it.
-  static InstDesc isMinMaxPattern(Instruction *I, RecurKind Kind,
+  LLVM_ABI static InstDesc isMinMaxPattern(Instruction *I, RecurKind Kind,
                                   const InstDesc &Prev);
 
   /// Returns a struct describing whether the instruction is either a
@@ -157,7 +158,7 @@ class RecurrenceDescriptor {
   /// where one of (X, Y) is a loop invariant integer and the other is a PHI
   /// value. \p Prev specifies the description of an already processed select
   /// instruction, so its corresponding cmp can be matched to it.
-  static InstDesc isAnyOfPattern(Loop *Loop, PHINode *OrigPhi, Instruction *I,
+  LLVM_ABI static InstDesc isAnyOfPattern(Loop *Loop, PHINode *OrigPhi, Instruction *I,
                                  InstDesc &Prev);
 
   /// Returns a struct describing whether the instruction is either a
@@ -167,21 +168,21 @@ class RecurrenceDescriptor {
   /// other is a PHI value.
   // TODO: Support non-monotonic variable. FindLast does not need be restricted
   // to increasing loop induction variables.
-  static InstDesc isFindLastIVPattern(Loop *TheLoop, PHINode *OrigPhi,
+  LLVM_ABI static InstDesc isFindLastIVPattern(Loop *TheLoop, PHINode *OrigPhi,
                                       Instruction *I, ScalarEvolution &SE);
 
   /// Returns a struct describing if the instruction is a
   /// Select(FCmp(X, Y), (Z = X op PHINode), PHINode) instruction pattern.
-  static InstDesc isConditionalRdxPattern(RecurKind Kind, Instruction *I);
+  LLVM_ABI static InstDesc isConditionalRdxPattern(RecurKind Kind, Instruction *I);
 
   /// Returns the opcode corresponding to the RecurrenceKind.
-  static unsigned getOpcode(RecurKind Kind);
+  LLVM_ABI static unsigned getOpcode(RecurKind Kind);
 
   /// Returns true if Phi is a reduction of type Kind and adds it to the
   /// RecurrenceDescriptor. If either \p DB is non-null or \p AC and \p DT are
   /// non-null, the minimal bit width needed to compute the reduction will be
   /// computed.
-  static bool
+  LLVM_ABI static bool
   AddReductionVar(PHINode *Phi, RecurKind Kind, Loop *TheLoop,
                   FastMathFlags FuncFMF, RecurrenceDescriptor &RedDes,
                   DemandedBits *DB = nullptr, AssumptionCache *AC = nullptr,
@@ -192,7 +193,7 @@ class RecurrenceDescriptor {
   /// non-null, the minimal bit width needed to compute the reduction will be
   /// computed. If \p SE is non-null, store instructions to loop invariant
   /// addresses are processed.
-  static bool
+  LLVM_ABI static bool
   isReductionPHI(PHINode *Phi, Loop *TheLoop, RecurrenceDescriptor &RedDes,
                  DemandedBits *DB = nullptr, AssumptionCache *AC = nullptr,
                  DominatorTree *DT = nullptr, ScalarEvolution *SE = nullptr);
@@ -206,7 +207,7 @@ class RecurrenceDescriptor {
   /// recurrence and so on). Note that this function optimistically assumes that
   /// uses of the recurrence can be re-ordered if necessary and users need to
   /// check and perform the re-ordering.
-  static bool isFixedOrderRecurrence(PHINode *Phi, Loop *TheLoop,
+  LLVM_ABI static bool isFixedOrderRecurrence(PHINode *Phi, Loop *TheLoop,
                                      DominatorTree *DT);
 
   RecurKind getRecurrenceKind() const { return Kind; }
@@ -227,10 +228,10 @@ class RecurrenceDescriptor {
   Instruction *getExactFPMathInst() const { return ExactFPMathInst; }
 
   /// Returns true if the recurrence kind is an integer kind.
-  static bool isIntegerRecurrenceKind(RecurKind Kind);
+  LLVM_ABI static bool isIntegerRecurrenceKind(RecurKind Kind);
 
   /// Returns true if the recurrence kind is a floating point kind.
-  static bool isFloatingPointRecurrenceKind(RecurKind Kind);
+  LLVM_ABI static bool isFloatingPointRecurrenceKind(RecurKind Kind);
 
   /// Returns true if the recurrence kind is an integer min/max kind.
   static bool isIntMinMaxRecurrenceKind(RecurKind Kind) {
@@ -292,7 +293,7 @@ class RecurrenceDescriptor {
 
   /// Attempts to find a chain of operations from Phi to LoopExitInst that can
   /// be treated as a set of reductions instructions for in-loop reductions.
-  SmallVector<Instruction *, 4> getReductionOpChain(PHINode *Phi,
+  LLVM_ABI SmallVector<Instruction *, 4> getReductionOpChain(PHINode *Phi,
                                                     Loop *L) const;
 
   /// Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
@@ -352,7 +353,7 @@ class InductionDescriptor {
   InductionKind getKind() const { return IK; }
   const SCEV *getStep() const { return Step; }
   BinaryOperator *getInductionBinOp() const { return InductionBinOp; }
-  ConstantInt *getConstIntStepValue() const;
+  LLVM_ABI ConstantInt *getConstIntStepValue() const;
 
   /// Returns true if \p Phi is an induction in the loop \p L. If \p Phi is an
   /// induction, the induction descriptor \p D will contain the data describing
@@ -363,7 +364,7 @@ class InductionDescriptor {
   /// analysis, it can be passed through \p Expr. If the def-use chain
   /// associated with the phi includes casts (that we know we can ignore
   /// under proper runtime checks), they are passed through \p CastsToIgnore.
-  static bool
+  LLVM_ABI static bool
   isInductionPHI(PHINode *Phi, const Loop *L, ScalarEvolution *SE,
                  InductionDescriptor &D, const SCEV *Expr = nullptr,
                  SmallVectorImpl<Instruction *> *CastsToIgnore = nullptr);
@@ -371,7 +372,7 @@ class InductionDescriptor {
   /// Returns true if \p Phi is a floating point induction in the loop \p L.
   /// If \p Phi is an induction, the induction descriptor \p D will contain
   /// the data describing this induction.
-  static bool isFPInductionPHI(PHINode *Phi, const Loop *L, ScalarEvolution *SE,
+  LLVM_ABI static bool isFPInductionPHI(PHINode *Phi, const Loop *L, ScalarEvolution *SE,
                                InductionDescriptor &D);
 
   /// Returns true if \p Phi is a loop \p L induction, in the context associated
@@ -380,7 +381,7 @@ class InductionDescriptor {
   /// induction.
   /// If \p Phi is an induction, \p D will contain the data describing this
   /// induction.
-  static bool isInductionPHI(PHINode *Phi, const Loop *L,
+  LLVM_ABI static bool isInductionPHI(PHINode *Phi, const Loop *L,
                              PredicatedScalarEvolution &PSE,
                              InductionDescriptor &D, bool Assume = false);
 
diff --git a/llvm/include/llvm/Analysis/InlineAdvisor.h b/llvm/include/llvm/Analysis/InlineAdvisor.h
index 18fb7377ff667..d635e341d26d2 100644
--- a/llvm/include/llvm/Analysis/InlineAdvisor.h
+++ b/llvm/include/llvm/Analysis/InlineAdvisor.h
@@ -9,6 +9,7 @@
 #ifndef LLVM_ANALYSIS_INLINEADVISOR_H
 #define LLVM_ANALYSIS_INLINEADVISOR_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/CGSCCPassManager.h"
 #include "llvm/Analysis/InlineCost.h"
 #include "llvm/Analysis/LazyCallGraph.h"
@@ -61,7 +62,7 @@ struct InlineContext {
   InlinePass Pass;
 };
 
-std::string AnnotateInlinePassName(InlineContext IC);
+LLVM_ABI std::string AnnotateInlinePassName(InlineContext IC);
 
 class InlineAdvisor;
 /// Capture state between an inlining decision having had been made, and
@@ -73,7 +74,7 @@ class InlineAdvisor;
 /// obligations.
 class InlineAdvice {
 public:
-  InlineAdvice(InlineAdvisor *Advisor, CallBase &CB,
+  LLVM_ABI InlineAdvice(InlineAdvisor *Advisor, CallBase &CB,
                OptimizationRemarkEmitter &ORE, bool IsInliningRecommended);
 
   InlineAdvice(InlineAdvice &&) = delete;
@@ -87,12 +88,12 @@ class InlineAdvice {
   /// behavior by implementing the corresponding record*Impl.
   ///
   /// Call after inlining succeeded, and did not result in deleting the callee.
-  void recordInlining();
+  LLVM_ABI void recordInlining();
 
   /// Call after inlining succeeded, and results in the callee being
   /// delete-able, meaning, it has no more users, and will be cleaned up
   /// subsequently.
-  void recordInliningWithCalleeDeleted();
+  LLVM_ABI void recordInliningWithCalleeDeleted();
 
   /// Call after the decision for a call site was to not inline.
   void recordUnsuccessfulInlining(const InlineResult &Result) {
@@ -140,7 +141,7 @@ class InlineAdvice {
   bool Recorded = false;
 };
 
-class DefaultInlineAdvice : public InlineAdvice {
+class LLVM_ABI DefaultInlineAdvice : public InlineAdvice {
 public:
   DefaultInlineAdvice(InlineAdvisor *Advisor, CallBase &CB,
                       std::optional<InlineCost> OIC,
@@ -160,7 +161,7 @@ class DefaultInlineAdvice : public InlineAdvice {
 };
 
 /// Interface for deciding whether to inline a call site or not.
-class InlineAdvisor {
+class LLVM_ABI InlineAdvisor {
 public:
   InlineAdvisor(InlineAdvisor &&) = delete;
   virtual ~InlineAdvisor();
@@ -226,7 +227,7 @@ class InlineAdvisor {
 /// The default (manual heuristics) implementation of the InlineAdvisor. This
 /// implementation does not need to keep state between inliner pass runs, and is
 /// reusable as-is for inliner pass test scenarios, as well as for regular use.
-class DefaultInlineAdvisor : public InlineAdvisor {
+class LLVM_ABI DefaultInlineAdvisor : public InlineAdvisor {
 public:
   DefaultInlineAdvisor(Module &M, FunctionAnalysisManager &FAM,
                        InlineParams Params, InlineContext IC)
@@ -279,7 +280,7 @@ class DefaultInlineAdvisor : public InlineAdvisor {
 class PluginInlineAdvisorAnalysis
     : public AnalysisInfoMixin<PluginInlineAdvisorAnalysis> {
 public:
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
   typedef InlineAdvisor *(*AdvisorFactory)(Module &M,
                                            FunctionAnalysisManager &FAM,
@@ -306,7 +307,7 @@ class PluginInlineAdvisorAnalysis
 /// needs to capture state right before inlining commences over a module.
 class InlineAdvisorAnalysis : public AnalysisInfoMixin<InlineAdvisorAnalysis> {
 public:
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
   InlineAdvisorAnalysis() = default;
   struct Result {
     Result(Module &M, ModuleAnalysisManager &MAM) : M(M), MAM(MAM) {}
@@ -317,7 +318,7 @@ class InlineAdvisorAnalysis : public AnalysisInfoMixin<InlineAdvisorAnalysis> {
       auto PAC = PA.getChecker<InlineAdvisorAnalysis>();
       return !PAC.preservedWhenStateless();
     }
-    bool tryCreate(InlineParams Params, InliningAdvisorMode Mode,
+    LLVM_ABI bool tryCreate(InlineParams Params, InliningAdvisorMode Mode,
                    const ReplayInlinerSettings &ReplaySettings,
                    InlineContext IC);
     InlineAdvisor *getAdvisor() const { return Advisor.get(); }
@@ -339,18 +340,18 @@ class InlineAdvisorAnalysisPrinterPass
 public:
   explicit InlineAdvisorAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
 
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
 
-  PreservedAnalyses run(LazyCallGraph::SCC &InitialC, CGSCCAnalysisManager &AM,
+  LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &InitialC, CGSCCAnalysisManager &AM,
                         LazyCallGraph &CG, CGSCCUpdateResult &UR);
   static bool isRequired() { return true; }
 };
 
-std::unique_ptr<InlineAdvisor>
+LLVM_ABI std::unique_ptr<InlineAdvisor>
 getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM,
                       std::function<bool(CallBase &)> GetDefaultAdvice);
 
-std::unique_ptr<InlineAdvisor>
+LLVM_ABI std::unique_ptr<InlineAdvisor>
 getDevelopmentModeAdvisor(Module &M, ModuleAnalysisManager &MAM,
                           std::function<bool(CallBase &)> GetDefaultAdvice);
 
@@ -361,32 +362,32 @@ getDevelopmentModeAdvisor(Module &M, ModuleAnalysisManager &MAM,
 /// CallSite. If we return the cost, we will emit an optimisation remark later
 /// using that cost, so we won't do so from this function. Return std::nullopt
 /// if inlining should not be attempted.
-std::optional<InlineCost>
+LLVM_ABI std::optional<InlineCost>
 shouldInline(CallBase &CB, TargetTransformInfo &CalleeTTI,
              function_ref<InlineCost(CallBase &CB)> GetInlineCost,
              OptimizationRemarkEmitter &ORE, bool EnableDeferral = true);
 
 /// Emit ORE message.
-void emitInlinedInto(OptimizationRemarkEmitter &ORE, DebugLoc DLoc,
+LLVM_ABI void emitInlinedInto(OptimizationRemarkEmitter &ORE, DebugLoc DLoc,
                      const BasicBlock *Block, const Function &Callee,
                      const Function &Caller, bool IsMandatory,
                      function_ref<void(OptimizationRemark &)> ExtraContext = {},
                      const char *PassName = nullptr);
 
 /// Emit ORE message based in cost (default heuristic).
-void emitInlinedIntoBasedOnCost(OptimizationRemarkEmitter &ORE, DebugLoc DLoc,
+LLVM_ABI void emitInlinedIntoBasedOnCost(OptimizationRemarkEmitter &ORE, DebugLoc DLoc,
                                 const BasicBlock *Block, const Function &Callee,
                                 const Function &Caller, const InlineCost &IC,
                                 bool ForProfileContext = false,
                                 const char *PassName = nullptr);
 
 /// Add location info to ORE message.
-void addLocationToRemarks(OptimizationRemark &Remark, DebugLoc DLoc);
+LLVM_ABI void addLocationToRemarks(OptimizationRemark &Remark, DebugLoc DLoc);
 
 /// Set the inline-remark attribute.
-void setInlineRemark(CallBase &CB, StringRef Message);
+LLVM_ABI void setInlineRemark(CallBase &CB, StringRef Message);
 
 /// Utility for extracting the inline cost message to a string.
-std::string inlineCostStr(const InlineCost &IC);
+LLVM_ABI std::string inlineCostStr(const InlineCost &IC);
 } // namespace llvm
 #endif // LLVM_ANALYSIS_INLINEADVISOR_H
diff --git a/llvm/include/llvm/Analysis/InlineCost.h b/llvm/include/llvm/Analysis/InlineCost.h
index 90ee75773957a..1dc00db103cf1 100644
--- a/llvm/include/llvm/Analysis/InlineCost.h
+++ b/llvm/include/llvm/Analysis/InlineCost.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_INLINECOST_H
 #define LLVM_ANALYSIS_INLINECOST_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/STLFunctionalExtras.h"
 #include "llvm/Analysis/InlineModelFeatureMaps.h"
@@ -45,7 +46,7 @@ const int OptMinSizeThreshold = 5;
 const int OptAggressiveThreshold = 250;
 
 // Various magic constants used to adjust heuristics.
-int getInstrCost();
+LLVM_ABI int getInstrCost();
 const int IndirectCallThreshold = 100;
 const int LoopPenalty = 25;
 const int ColdccPenalty = 2000;
@@ -239,16 +240,16 @@ struct InlineParams {
   std::optional<bool> AllowRecursiveCall = false;
 };
 
-std::optional<int> getStringFnAttrAsInt(CallBase &CB, StringRef AttrKind);
+LLVM_ABI std::optional<int> getStringFnAttrAsInt(CallBase &CB, StringRef AttrKind);
 
 /// Generate the parameters to tune the inline cost analysis based only on the
 /// commandline options.
-InlineParams getInlineParams();
+LLVM_ABI InlineParams getInlineParams();
 
 /// Generate the parameters to tune the inline cost analysis based on command
 /// line options. If -inline-threshold option is not explicitly passed,
 /// \p Threshold is used as the default threshold.
-InlineParams getInlineParams(int Threshold);
+LLVM_ABI InlineParams getInlineParams(int Threshold);
 
 /// Generate the parameters to tune the inline cost analysis based on command
 /// line options. If -inline-threshold option is not explicitly passed,
@@ -256,11 +257,11 @@ InlineParams getInlineParams(int Threshold);
 /// An \p OptLevel value above 3 is considered an aggressive optimization mode.
 /// \p SizeOptLevel of 1 corresponds to the -Os flag and 2 corresponds to
 /// the -Oz flag.
-InlineParams getInlineParams(unsigned OptLevel, unsigned SizeOptLevel);
+LLVM_ABI InlineParams getInlineParams(unsigned OptLevel, unsigned SizeOptLevel);
 
 /// Return the cost associated with a callsite, including parameter passing
 /// and the call/return instruction.
-int getCallsiteCost(const TargetTransformInfo &TTI, const CallBase &Call,
+LLVM_ABI int getCallsiteCost(const TargetTransformInfo &TTI, const CallBase &Call,
                     const DataLayout &DL);
 
 /// Get an InlineCost object representing the cost of inlining this
@@ -274,7 +275,7 @@ int getCallsiteCost(const TargetTransformInfo &TTI, const CallBase &Call,
 ///
 /// Also note that calling this function *dynamically* computes the cost of
 /// inlining the callsite. It is an expensive, heavyweight call.
-InlineCost getInlineCost(
+LLVM_ABI InlineCost getInlineCost(
     CallBase &Call, const InlineParams &Params, TargetTransformInfo &CalleeTTI,
     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
     function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
@@ -288,7 +289,7 @@ InlineCost getInlineCost(
 /// pointer. This behaves exactly as the version with no explicit callee
 /// parameter in all other respects.
 //
-InlineCost getInlineCost(
+LLVM_ABI InlineCost getInlineCost(
     CallBase &Call, Function *Callee, const InlineParams &Params,
     TargetTransformInfo &CalleeTTI,
     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
@@ -304,7 +305,7 @@ InlineCost getInlineCost(
 /// directives or incompatibilities detectable without needing callee traversal.
 /// Otherwise returns std::nullopt, meaning that inlining should be decided
 /// based on other criteria (e.g. cost modeling).
-std::optional<InlineResult> getAttributeBasedInliningDecision(
+LLVM_ABI std::optional<InlineResult> getAttributeBasedInliningDecision(
     CallBase &Call, Function *Callee, TargetTransformInfo &CalleeTTI,
     function_ref<const TargetLibraryInfo &(Function &)> GetTLI);
 
@@ -316,7 +317,7 @@ std::optional<InlineResult> getAttributeBasedInliningDecision(
 /// returns:
 /// - std::nullopt, if the inlining cannot happen (is illegal)
 /// - an integer, representing the cost.
-std::optional<int> getInliningCostEstimate(
+LLVM_ABI std::optional<int> getInliningCostEstimate(
     CallBase &Call, TargetTransformInfo &CalleeTTI,
     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
     function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
@@ -326,7 +327,7 @@ std::optional<int> getInliningCostEstimate(
 
 /// Get the expanded cost features. The features are returned unconditionally,
 /// even if inlining is impossible.
-std::optional<InlineCostFeatures> getInliningCostFeatures(
+LLVM_ABI std::optional<InlineCostFeatures> getInliningCostFeatures(
     CallBase &Call, TargetTransformInfo &CalleeTTI,
     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
     function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
@@ -335,7 +336,7 @@ std::optional<InlineCostFeatures> getInliningCostFeatures(
     OptimizationRemarkEmitter *ORE = nullptr);
 
 /// Minimal filter to detect invalid constructs for inlining.
-InlineResult isInlineViable(Function &Callee);
+LLVM_ABI InlineResult isInlineViable(Function &Callee);
 
 // This pass is used to annotate instructions during the inline process for
 // debugging and analysis. The main purpose of the pass is to see and test
@@ -346,7 +347,7 @@ struct InlineCostAnnotationPrinterPass
 
 public:
   explicit InlineCostAnnotationPrinterPass(raw_ostream &OS) : OS(OS) {}
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
   static bool isRequired() { return true; }
 };
 } // namespace llvm
diff --git a/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h b/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h
index acb6c21e48038..5e36946fd8db1 100644
--- a/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h
+++ b/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h
@@ -10,6 +10,7 @@
 #ifndef LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H
 #define LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/TensorSpec.h"
 
 #include <array>
@@ -153,13 +154,13 @@ inlineCostFeatureToMlFeature(InlineCostFeatureIndex Feature) {
 constexpr size_t NumberOfFeatures =
     static_cast<size_t>(FeatureIndex::NumberOfFeatures);
 
-extern const std::vector<TensorSpec> FeatureMap;
+LLVM_ABI extern const std::vector<TensorSpec> FeatureMap;
 
-extern const char *const DecisionName;
-extern const TensorSpec InlineDecisionSpec;
-extern const char *const DefaultDecisionName;
-extern const TensorSpec DefaultDecisionSpec;
-extern const char *const RewardName;
+LLVM_ABI extern const char *const DecisionName;
+LLVM_ABI extern const TensorSpec InlineDecisionSpec;
+LLVM_ABI extern const char *const DefaultDecisionName;
+LLVM_ABI extern const TensorSpec DefaultDecisionSpec;
+LLVM_ABI extern const char *const RewardName;
 
 using InlineFeatures = std::vector<int64_t>;
 
diff --git a/llvm/include/llvm/Analysis/InlineOrder.h b/llvm/include/llvm/Analysis/InlineOrder.h
index 498cef314b5c3..7f2bfe8b32ac2 100644
--- a/llvm/include/llvm/Analysis/InlineOrder.h
+++ b/llvm/include/llvm/Analysis/InlineOrder.h
@@ -9,6 +9,7 @@
 #ifndef LLVM_ANALYSIS_INLINEORDER_H
 #define LLVM_ANALYSIS_INLINEORDER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/InlineCost.h"
 #include <utility>
 
@@ -31,11 +32,11 @@ template <typename T> class InlineOrder {
   bool empty() { return !size(); }
 };
 
-std::unique_ptr<InlineOrder<std::pair<CallBase *, int>>>
+LLVM_ABI std::unique_ptr<InlineOrder<std::pair<CallBase *, int>>>
 getDefaultInlineOrder(FunctionAnalysisManager &FAM, const InlineParams &Params,
                       ModuleAnalysisManager &MAM, Module &M);
 
-std::unique_ptr<InlineOrder<std::pair<CallBase *, int>>>
+LLVM_ABI std::unique_ptr<InlineOrder<std::pair<CallBase *, int>>>
 getInlineOrder(FunctionAnalysisManager &FAM, const InlineParams &Params,
                ModuleAnalysisManager &MAM, Module &M);
 
@@ -51,7 +52,7 @@ getInlineOrder(FunctionAnalysisManager &FAM, const InlineParams &Params,
 class PluginInlineOrderAnalysis
     : public AnalysisInfoMixin<PluginInlineOrderAnalysis> {
 public:
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
   typedef std::unique_ptr<InlineOrder<std::pair<CallBase *, int>>> (
       *InlineOrderFactory)(FunctionAnalysisManager &FAM,
diff --git a/llvm/include/llvm/Analysis/InstSimplifyFolder.h b/llvm/include/llvm/Analysis/InstSimplifyFolder.h
index d4ae4dcc918cf..5df1eb47fa632 100644
--- a/llvm/include/llvm/Analysis/InstSimplifyFolder.h
+++ b/llvm/include/llvm/Analysis/InstSimplifyFolder.h
@@ -19,6 +19,7 @@
 #ifndef LLVM_ANALYSIS_INSTSIMPLIFYFOLDER_H
 #define LLVM_ANALYSIS_INSTSIMPLIFYFOLDER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/Analysis/InstructionSimplify.h"
 #include "llvm/Analysis/TargetFolder.h"
@@ -32,7 +33,7 @@ class Constant;
 /// InstSimplifyFolder - Use InstructionSimplify to fold operations to existing
 /// values. Also applies target-specific constant folding when not using
 /// InstructionSimplify.
-class InstSimplifyFolder final : public IRBuilderFolder {
+class LLVM_ABI InstSimplifyFolder final : public IRBuilderFolder {
   TargetFolder ConstFolder;
   SimplifyQuery SQ;
 
diff --git a/llvm/include/llvm/Analysis/InstructionPrecedenceTracking.h b/llvm/include/llvm/Analysis/InstructionPrecedenceTracking.h
index 7c17120375f9c..bafffaaa73dc5 100644
--- a/llvm/include/llvm/Analysis/InstructionPrecedenceTracking.h
+++ b/llvm/include/llvm/Analysis/InstructionPrecedenceTracking.h
@@ -20,6 +20,7 @@
 #ifndef LLVM_ANALYSIS_INSTRUCTIONPRECEDENCETRACKING_H
 #define LLVM_ANALYSIS_INSTRUCTIONPRECEDENCETRACKING_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 
 namespace llvm {
@@ -48,15 +49,15 @@ class InstructionPrecedenceTracking {
 protected:
   /// Returns the topmost special instruction from the block \p BB. Returns
   /// nullptr if there is no special instructions in the block.
-  const Instruction *getFirstSpecialInstruction(const BasicBlock *BB);
+  LLVM_ABI const Instruction *getFirstSpecialInstruction(const BasicBlock *BB);
 
   /// Returns true iff at least one instruction from the basic block \p BB is
   /// special.
-  bool hasSpecialInstructions(const BasicBlock *BB);
+  LLVM_ABI bool hasSpecialInstructions(const BasicBlock *BB);
 
   /// Returns true iff the first special instruction of \p Insn's block exists
   /// and dominates \p Insn.
-  bool isPreceededBySpecialInstruction(const Instruction *Insn);
+  LLVM_ABI bool isPreceededBySpecialInstruction(const Instruction *Insn);
 
   /// A predicate that defines whether or not the instruction \p Insn is
   /// considered special and needs to be tracked. Implementing this method in
@@ -71,19 +72,19 @@ class InstructionPrecedenceTracking {
   /// Notifies this tracking that we are going to insert a new instruction \p
   /// Inst to the basic block \p BB. It makes all necessary updates to internal
   /// caches to keep them consistent.
-  void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB);
+  LLVM_ABI void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB);
 
   /// Notifies this tracking that we are going to remove the instruction \p Inst
   /// It makes all necessary updates to internal caches to keep them consistent.
-  void removeInstruction(const Instruction *Inst);
+  LLVM_ABI void removeInstruction(const Instruction *Inst);
 
   /// Notifies this tracking that we are going to replace all uses of \p Inst.
   /// It makes all necessary updates to internal caches to keep them consistent.
   /// Should typically be called before a RAUW.
-  void removeUsersOf(const Instruction *Inst);
+  LLVM_ABI void removeUsersOf(const Instruction *Inst);
 
   /// Invalidates all information from this tracking.
-  void clear();
+  LLVM_ABI void clear();
 };
 
 /// This class allows to keep track on instructions with implicit control flow.
@@ -93,7 +94,7 @@ class InstructionPrecedenceTracking {
 /// is reached, then we need to make sure that there is no implicit control flow
 /// instruction (ICFI) preceding it. For example, this check is required if we
 /// perform PRE moving non-speculable instruction to other place.
-class ImplicitControlFlowTracking : public InstructionPrecedenceTracking {
+class LLVM_ABI ImplicitControlFlowTracking : public InstructionPrecedenceTracking {
 public:
   /// Returns the topmost instruction with implicit control flow from the given
   /// basic block. Returns nullptr if there is no such instructions in the block.
@@ -115,7 +116,7 @@ class ImplicitControlFlowTracking : public InstructionPrecedenceTracking {
   bool isSpecialInstruction(const Instruction *Insn) const override;
 };
 
-class MemoryWriteTracking : public InstructionPrecedenceTracking {
+class LLVM_ABI MemoryWriteTracking : public InstructionPrecedenceTracking {
 public:
   /// Returns the topmost instruction that may write memory from the given
   /// basic block. Returns nullptr if there is no such instructions in the block.
diff --git a/llvm/include/llvm/Analysis/InstructionSimplify.h b/llvm/include/llvm/Analysis/InstructionSimplify.h
index fa291eeef198b..2cae0bb2a75f0 100644
--- a/llvm/include/llvm/Analysis/InstructionSimplify.h
+++ b/llvm/include/llvm/Analysis/InstructionSimplify.h
@@ -31,6 +31,7 @@
 #ifndef LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
 #define LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/SimplifyQuery.h"
 #include "llvm/IR/FPEnv.h"
 
@@ -58,51 +59,51 @@ class Value;
 // Please use the SimplifyQuery versions in new code.
 
 /// Given operands for an Add, fold the result or return null.
-Value *simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW,
+LLVM_ABI Value *simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW,
                        const SimplifyQuery &Q);
 
 /// Given operands for a Sub, fold the result or return null.
-Value *simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW,
+LLVM_ABI Value *simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW,
                        const SimplifyQuery &Q);
 
 /// Given operands for a Mul, fold the result or return null.
-Value *simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW,
+LLVM_ABI Value *simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW,
                        const SimplifyQuery &Q);
 
 /// Given operands for an SDiv, fold the result or return null.
-Value *simplifySDivInst(Value *LHS, Value *RHS, bool IsExact,
+LLVM_ABI Value *simplifySDivInst(Value *LHS, Value *RHS, bool IsExact,
                         const SimplifyQuery &Q);
 
 /// Given operands for a UDiv, fold the result or return null.
-Value *simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact,
+LLVM_ABI Value *simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact,
                         const SimplifyQuery &Q);
 
 /// Given operands for an SRem, fold the result or return null.
-Value *simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
 
 /// Given operands for a URem, fold the result or return null.
-Value *simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
 
 /// Given operand for an FNeg, fold the result or return null.
-Value *simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q);
 
 
 /// Given operands for an FAdd, fold the result or return null.
-Value *
+LLVM_ABI Value *
 simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
                  const SimplifyQuery &Q,
                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
                  RoundingMode Rounding = RoundingMode::NearestTiesToEven);
 
 /// Given operands for an FSub, fold the result or return null.
-Value *
+LLVM_ABI Value *
 simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
                  const SimplifyQuery &Q,
                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
                  RoundingMode Rounding = RoundingMode::NearestTiesToEven);
 
 /// Given operands for an FMul, fold the result or return null.
-Value *
+LLVM_ABI Value *
 simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
                  const SimplifyQuery &Q,
                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
@@ -112,117 +113,117 @@ simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
 /// null. In contrast to simplifyFMulInst, this function will not perform
 /// simplifications whose unrounded results differ when rounded to the argument
 /// type.
-Value *simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF,
+LLVM_ABI Value *simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF,
                        const SimplifyQuery &Q,
                        fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
                        RoundingMode Rounding = RoundingMode::NearestTiesToEven);
 
 /// Given operands for an FDiv, fold the result or return null.
-Value *
+LLVM_ABI Value *
 simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
                  const SimplifyQuery &Q,
                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
                  RoundingMode Rounding = RoundingMode::NearestTiesToEven);
 
 /// Given operands for an FRem, fold the result or return null.
-Value *
+LLVM_ABI Value *
 simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
                  const SimplifyQuery &Q,
                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
                  RoundingMode Rounding = RoundingMode::NearestTiesToEven);
 
 /// Given operands for a Shl, fold the result or return null.
-Value *simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
+LLVM_ABI Value *simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
                        const SimplifyQuery &Q);
 
 /// Given operands for a LShr, fold the result or return null.
-Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
+LLVM_ABI Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
                         const SimplifyQuery &Q);
 
 /// Given operands for a AShr, fold the result or return nulll.
-Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
+LLVM_ABI Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
                         const SimplifyQuery &Q);
 
 /// Given operands for an And, fold the result or return null.
-Value *simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
 
 /// Given operands for an Or, fold the result or return null.
-Value *simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
 
 /// Given operands for an Xor, fold the result or return null.
-Value *simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
 
 /// Given operands for an ICmpInst, fold the result or return null.
-Value *simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS,
+LLVM_ABI Value *simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS,
                         const SimplifyQuery &Q);
 
 /// Given operands for an FCmpInst, fold the result or return null.
-Value *simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS,
+LLVM_ABI Value *simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS,
                         FastMathFlags FMF, const SimplifyQuery &Q);
 
 /// Given operands for a SelectInst, fold the result or return null.
-Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
+LLVM_ABI Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
                           const SimplifyQuery &Q);
 
 /// Given operands for a GetElementPtrInst, fold the result or return null.
-Value *simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
+LLVM_ABI Value *simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
                        GEPNoWrapFlags NW, const SimplifyQuery &Q);
 
 /// Given operands for an InsertValueInst, fold the result or return null.
-Value *simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
+LLVM_ABI Value *simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
                                const SimplifyQuery &Q);
 
 /// Given operands for an InsertElement, fold the result or return null.
-Value *simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx,
+LLVM_ABI Value *simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx,
                                  const SimplifyQuery &Q);
 
 /// Given operands for an ExtractValueInst, fold the result or return null.
-Value *simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
+LLVM_ABI Value *simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
                                 const SimplifyQuery &Q);
 
 /// Given operands for an ExtractElementInst, fold the result or return null.
-Value *simplifyExtractElementInst(Value *Vec, Value *Idx,
+LLVM_ABI Value *simplifyExtractElementInst(Value *Vec, Value *Idx,
                                   const SimplifyQuery &Q);
 
 /// Given operands for a CastInst, fold the result or return null.
-Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
+LLVM_ABI Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
                         const SimplifyQuery &Q);
 
 /// Given operands for a BinaryIntrinsic, fold the result or return null.
-Value *simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, Value *Op0,
+LLVM_ABI Value *simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, Value *Op0,
                                Value *Op1, const SimplifyQuery &Q,
                                const CallBase *Call);
 
 /// Given operands for a ShuffleVectorInst, fold the result or return null.
 /// See class ShuffleVectorInst for a description of the mask representation.
-Value *simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef<int> Mask,
+LLVM_ABI Value *simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef<int> Mask,
                                  Type *RetTy, const SimplifyQuery &Q);
 
 //=== Helper functions for higher up the class hierarchy.
 
 /// Given operands for a CmpInst, fold the result or return null.
-Value *simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS,
+LLVM_ABI Value *simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS,
                        const SimplifyQuery &Q);
 
 /// Given operand for a UnaryOperator, fold the result or return null.
-Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q);
 
 /// Given operand for a UnaryOperator, fold the result or return null.
 /// Try to use FastMathFlags when folding the result.
-Value *simplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
+LLVM_ABI Value *simplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
                     const SimplifyQuery &Q);
 
 /// Given operands for a BinaryOperator, fold the result or return null.
-Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+LLVM_ABI Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
                      const SimplifyQuery &Q);
 
 /// Given operands for a BinaryOperator, fold the result or return null.
 /// Try to use FastMathFlags when folding the result.
-Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, FastMathFlags FMF,
+LLVM_ABI Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, FastMathFlags FMF,
                      const SimplifyQuery &Q);
 
 /// Given a callsite, callee, and arguments, fold the result or return null.
-Value *simplifyCall(CallBase *Call, Value *Callee, ArrayRef<Value *> Args,
+LLVM_ABI Value *simplifyCall(CallBase *Call, Value *Callee, ArrayRef<Value *> Args,
                     const SimplifyQuery &Q);
 
 /// Given a constrained FP intrinsic call, tries to compute its simplified
@@ -232,23 +233,23 @@ Value *simplifyCall(CallBase *Call, Value *Callee, ArrayRef<Value *> Args,
 /// simplification succeeds that the intrinsic is side effect free. As a result,
 /// successful simplification can be used to delete the intrinsic not just
 /// replace its result.
-Value *simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q);
 
 /// Given an operand for a Freeze, see if we can fold the result.
 /// If not, this returns null.
-Value *simplifyFreezeInst(Value *Op, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyFreezeInst(Value *Op, const SimplifyQuery &Q);
 
 /// Given a load instruction and its pointer operand, fold the result or return
 /// null.
-Value *simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q);
 
 /// See if we can compute a simplified version of this instruction. If not,
 /// return null.
-Value *simplifyInstruction(Instruction *I, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyInstruction(Instruction *I, const SimplifyQuery &Q);
 
 /// Like \p simplifyInstruction but the operands of \p I are replaced with
 /// \p NewOps. Returns a simplified value, or null if none was found.
-Value *
+LLVM_ABI Value *
 simplifyInstructionWithOperands(Instruction *I, ArrayRef<Value *> NewOps,
                                 const SimplifyQuery &Q);
 
@@ -261,7 +262,7 @@ simplifyInstructionWithOperands(Instruction *I, ArrayRef<Value *> NewOps,
 /// If DropFlags is passed, then the replacement result is only valid if
 /// poison-generating flags/metadata on those instructions are dropped. This
 /// is only useful in conjunction with AllowRefinement=false.
-Value *
+LLVM_ABI Value *
 simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
                        const SimplifyQuery &Q, bool AllowRefinement,
                        SmallVectorImpl<Instruction *> *DropFlags = nullptr);
@@ -275,7 +276,7 @@ simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
 /// are added to it.
 ///
 /// The function returns true if any simplifications were performed.
-bool replaceAndRecursivelySimplify(
+LLVM_ABI bool replaceAndRecursivelySimplify(
     Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI = nullptr,
     const DominatorTree *DT = nullptr, AssumptionCache *AC = nullptr,
     SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr);
@@ -283,11 +284,11 @@ bool replaceAndRecursivelySimplify(
 // These helper functions return a SimplifyQuery structure that contains as
 // many of the optional analysis we use as are currently valid.  This is the
 // strongly preferred way of constructing SimplifyQuery in passes.
-const SimplifyQuery getBestSimplifyQuery(Pass &, Function &);
+LLVM_ABI const SimplifyQuery getBestSimplifyQuery(Pass &, Function &);
 template <class T, class... TArgs>
 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &,
                                          Function &);
-const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &,
+LLVM_ABI const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &,
                                          const DataLayout &);
 } // end namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/InteractiveModelRunner.h b/llvm/include/llvm/Analysis/InteractiveModelRunner.h
index 798a249b9e430..e431f653612eb 100644
--- a/llvm/include/llvm/Analysis/InteractiveModelRunner.h
+++ b/llvm/include/llvm/Analysis/InteractiveModelRunner.h
@@ -10,6 +10,7 @@
 #ifndef LLVM_ANALYSIS_INTERACTIVEMODELRUNNER_H
 #define LLVM_ANALYSIS_INTERACTIVEMODELRUNNER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/MLModelRunner.h"
 #include "llvm/Analysis/TensorSpec.h"
 #include "llvm/Analysis/Utils/TrainingLogger.h"
@@ -35,7 +36,7 @@ namespace llvm {
 /// the compiler - i.e. the "Inbound" - and then the "Outbound", to avoid
 /// deadlock. This is because the compiler first tries to open the inbound
 /// (which will hang until there's a writer on the other end).
-class InteractiveModelRunner : public MLModelRunner {
+class LLVM_ABI InteractiveModelRunner : public MLModelRunner {
 public:
   InteractiveModelRunner(LLVMContext &Ctx,
                          const std::vector<TensorSpec> &Inputs,
diff --git a/llvm/include/llvm/Analysis/LastRunTrackingAnalysis.h b/llvm/include/llvm/Analysis/LastRunTrackingAnalysis.h
index ef68bbfb47c8e..bcda85108f107 100644
--- a/llvm/include/llvm/Analysis/LastRunTrackingAnalysis.h
+++ b/llvm/include/llvm/Analysis/LastRunTrackingAnalysis.h
@@ -30,6 +30,7 @@
 #ifndef LLVM_ANALYSIS_LASTRUNTRACKINGANALYSIS_H
 #define LLVM_ANALYSIS_LASTRUNTRACKINGANALYSIS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/IR/PassManager.h"
 #include <functional>
@@ -80,8 +81,8 @@ class LastRunTrackingInfo {
   }
 
 private:
-  bool shouldSkipImpl(PassID ID, OptionPtr Ptr) const;
-  void updateImpl(PassID ID, bool Changed, CompatibilityCheckFn CheckFn);
+  LLVM_ABI bool shouldSkipImpl(PassID ID, OptionPtr Ptr) const;
+  LLVM_ABI void updateImpl(PassID ID, bool Changed, CompatibilityCheckFn CheckFn);
 
   DenseMap<PassID, CompatibilityCheckFn> TrackedPasses;
 };
diff --git a/llvm/include/llvm/Analysis/LazyCallGraph.h b/llvm/include/llvm/Analysis/LazyCallGraph.h
index 289e9c3990bcc..5bd853af0a182 100644
--- a/llvm/include/llvm/Analysis/LazyCallGraph.h
+++ b/llvm/include/llvm/Analysis/LazyCallGraph.h
@@ -34,6 +34,7 @@
 #ifndef LLVM_ANALYSIS_LAZYCALLGRAPH_H
 #define LLVM_ANALYSIS_LAZYCALLGRAPH_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/Any.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/DenseMap.h"
@@ -382,7 +383,7 @@ class LazyCallGraph {
     Node(LazyCallGraph &G, Function &F) : G(&G), F(&F) {}
 
     /// Implementation of the scan when populating.
-    EdgeSequence &populateSlow();
+    LLVM_ABI EdgeSequence &populateSlow();
 
     /// Internal helper to directly replace the function with a new one.
     ///
@@ -621,14 +622,14 @@ class LazyCallGraph {
     ///
     /// CAUTION: This method walks every edge in the \c RefSCC, it can be very
     /// expensive.
-    bool isParentOf(const RefSCC &RC) const;
+    LLVM_ABI bool isParentOf(const RefSCC &RC) const;
 
     /// Test if this RefSCC is an ancestor of \a RC.
     ///
     /// CAUTION: This method walks the directed graph of edges as far as
     /// necessary to find a possible path to the argument. In the worst case
     /// this may walk the entire graph and can be extremely expensive.
-    bool isAncestorOf(const RefSCC &RC) const;
+    LLVM_ABI bool isAncestorOf(const RefSCC &RC) const;
 
     /// Test if this RefSCC is a child of \a RC.
     ///
@@ -683,7 +684,7 @@ class LazyCallGraph {
     /// position within this RefSCC's postorder list. Any SCCs merged are
     /// merged into the TargetN's SCC in order to preserve reachability analyses
     /// which took place on that SCC.
-    bool switchInternalEdgeToCall(
+    LLVM_ABI bool switchInternalEdgeToCall(
         Node &SourceN, Node &TargetN,
         function_ref<void(ArrayRef<SCC *> MergedSCCs)> MergeCB = {});
 
@@ -693,7 +694,7 @@ class LazyCallGraph {
     /// If SourceN and TargetN in separate SCCs within this RefSCC, changing
     /// the call edge between them to a ref edge is a trivial operation that
     /// does not require any structural changes to the call graph.
-    void switchTrivialInternalEdgeToRef(Node &SourceN, Node &TargetN);
+    LLVM_ABI void switchTrivialInternalEdgeToRef(Node &SourceN, Node &TargetN);
 
     /// Make an existing internal call edge within a single SCC into a ref
     /// edge.
@@ -713,20 +714,20 @@ class LazyCallGraph {
     ///
     /// Note that if SourceN and TargetN are in separate SCCs, the simpler
     /// routine `switchTrivialInternalEdgeToRef` should be used instead.
-    iterator_range<iterator> switchInternalEdgeToRef(Node &SourceN,
+    LLVM_ABI iterator_range<iterator> switchInternalEdgeToRef(Node &SourceN,
                                                      Node &TargetN);
 
     /// Make an existing outgoing ref edge into a call edge.
     ///
     /// Note that this is trivial as there are no cyclic impacts and there
     /// remains a reference edge.
-    void switchOutgoingEdgeToCall(Node &SourceN, Node &TargetN);
+    LLVM_ABI void switchOutgoingEdgeToCall(Node &SourceN, Node &TargetN);
 
     /// Make an existing outgoing call edge into a ref edge.
     ///
     /// This is trivial as there are no cyclic impacts and there remains
     /// a reference edge.
-    void switchOutgoingEdgeToRef(Node &SourceN, Node &TargetN);
+    LLVM_ABI void switchOutgoingEdgeToRef(Node &SourceN, Node &TargetN);
 
     /// Insert a ref edge from one node in this RefSCC to another in this
     /// RefSCC.
@@ -740,7 +741,7 @@ class LazyCallGraph {
     /// should be to first insert the necessary ref edge, and then to switch it
     /// to a call edge if needed and handle any invalidation that results. See
     /// the \c switchInternalEdgeToCall routine for details.
-    void insertInternalRefEdge(Node &SourceN, Node &TargetN);
+    LLVM_ABI void insertInternalRefEdge(Node &SourceN, Node &TargetN);
 
     /// Insert an edge whose parent is in this RefSCC and child is in some
     /// child RefSCC.
@@ -748,7 +749,7 @@ class LazyCallGraph {
     /// There must be an existing path from the \p SourceN to the \p TargetN.
     /// This operation is inexpensive and does not change the set of SCCs and
     /// RefSCCs in the graph.
-    void insertOutgoingEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
+    LLVM_ABI void insertOutgoingEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
 
     /// Insert an edge whose source is in a descendant RefSCC and target is in
     /// this RefSCC.
@@ -775,7 +776,7 @@ class LazyCallGraph {
     /// FIXME: We could possibly optimize this quite a bit for cases where the
     /// caller and callee are very nearby in the graph. See comments in the
     /// implementation for details, but that use case might impact users.
-    SmallVector<RefSCC *, 1> insertIncomingRefEdge(Node &SourceN,
+    LLVM_ABI SmallVector<RefSCC *, 1> insertIncomingRefEdge(Node &SourceN,
                                                    Node &TargetN);
 
     /// Remove an edge whose source is in this RefSCC and target is *not*.
@@ -788,7 +789,7 @@ class LazyCallGraph {
     /// This operation does not change the cyclic structure of the graph and so
     /// is very inexpensive. It may change the connectivity graph of the SCCs
     /// though, so be careful calling this while iterating over them.
-    void removeOutgoingEdge(Node &SourceN, Node &TargetN);
+    LLVM_ABI void removeOutgoingEdge(Node &SourceN, Node &TargetN);
 
     /// Remove a list of ref edges which are entirely within this RefSCC.
     ///
@@ -828,7 +829,7 @@ class LazyCallGraph {
     /// effort has been made to minimize the overhead of common cases such as
     /// self-edges and edge removals which result in a spanning tree with no
     /// more cycles.
-    [[nodiscard]] SmallVector<RefSCC *, 1>
+    [[nodiscard]] LLVM_ABI SmallVector<RefSCC *, 1>
     removeInternalRefEdges(ArrayRef<std::pair<Node *, Node *>> Edges);
 
     /// A convenience wrapper around the above to handle trivial cases of
@@ -840,7 +841,7 @@ class LazyCallGraph {
     ///
     /// To further make calling this convenient, it also handles inserting
     /// already existing edges.
-    void insertTrivialCallEdge(Node &SourceN, Node &TargetN);
+    LLVM_ABI void insertTrivialCallEdge(Node &SourceN, Node &TargetN);
 
     /// A convenience wrapper around the above to handle trivial cases of
     /// inserting a new ref edge.
@@ -851,7 +852,7 @@ class LazyCallGraph {
     ///
     /// To further make calling this convenient, it also handles inserting
     /// already existing edges.
-    void insertTrivialRefEdge(Node &SourceN, Node &TargetN);
+    LLVM_ABI void insertTrivialRefEdge(Node &SourceN, Node &TargetN);
 
     /// Directly replace a node's function with a new function.
     ///
@@ -862,7 +863,7 @@ class LazyCallGraph {
     /// It requires that the old function in the provided node have zero uses
     /// and the new function must have calls and references to it establishing
     /// an equivalent graph.
-    void replaceNodeFunction(Node &N, Function &NewF);
+    LLVM_ABI void replaceNodeFunction(Node &N, Function &NewF);
 
     ///@}
   };
@@ -934,24 +935,24 @@ class LazyCallGraph {
   /// This sets up the graph and computes all of the entry points of the graph.
   /// No function definitions are scanned until their nodes in the graph are
   /// requested during traversal.
-  LazyCallGraph(Module &M,
+  LLVM_ABI LazyCallGraph(Module &M,
                 function_ref<TargetLibraryInfo &(Function &)> GetTLI);
 
-  LazyCallGraph(LazyCallGraph &&G);
-  LazyCallGraph &operator=(LazyCallGraph &&RHS);
+  LLVM_ABI LazyCallGraph(LazyCallGraph &&G);
+  LLVM_ABI LazyCallGraph &operator=(LazyCallGraph &&RHS);
 
 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
   /// Verify that every RefSCC is valid.
   void verify();
 #endif
 
-  bool invalidate(Module &, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Module &, const PreservedAnalyses &PA,
                   ModuleAnalysisManager::Invalidator &);
 
   EdgeSequence::iterator begin() { return EntryEdges.begin(); }
   EdgeSequence::iterator end() { return EntryEdges.end(); }
 
-  void buildRefSCCs();
+  LLVM_ABI void buildRefSCCs();
 
   postorder_ref_scc_iterator postorder_ref_scc_begin() {
     if (!EntryEdges.empty())
@@ -1029,7 +1030,7 @@ class LazyCallGraph {
   /// below.
 
   /// Update the call graph after inserting a new edge.
-  void insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
+  LLVM_ABI void insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
 
   /// Update the call graph after inserting a new edge.
   void insertEdge(Function &Source, Function &Target, Edge::Kind EK) {
@@ -1037,7 +1038,7 @@ class LazyCallGraph {
   }
 
   /// Update the call graph after deleting an edge.
-  void removeEdge(Node &SourceN, Node &TargetN);
+  LLVM_ABI void removeEdge(Node &SourceN, Node &TargetN);
 
   /// Update the call graph after deleting an edge.
   void removeEdge(Function &Source, Function &Target) {
@@ -1058,13 +1059,13 @@ class LazyCallGraph {
   /// These functions should have already been passed to markDeadFunction().
   /// This is done as a batch to prevent compile time blowup as a result of
   /// handling a single function at a time.
-  void removeDeadFunctions(ArrayRef<Function *> DeadFs);
+  LLVM_ABI void removeDeadFunctions(ArrayRef<Function *> DeadFs);
 
   /// Mark a function as dead to be removed later by removeDeadFunctions().
   ///
   /// The function body should have no incoming or outgoing call or ref edges.
   /// For example, a function with a single "unreachable" instruction.
-  void markDeadFunction(Function &F);
+  LLVM_ABI void markDeadFunction(Function &F);
 
   /// Add a new function split/outlined from an existing function.
   ///
@@ -1077,7 +1078,7 @@ class LazyCallGraph {
   /// The new function may also reference the original function.
   /// It may end up in a parent SCC in the case that the original function's
   /// edge to the new function is a ref edge, and the edge back is a call edge.
-  void addSplitFunction(Function &OriginalFunction, Function &NewFunction);
+  LLVM_ABI void addSplitFunction(Function &OriginalFunction, Function &NewFunction);
 
   /// Add new ref-recursive functions split/outlined from an existing function.
   ///
@@ -1087,7 +1088,7 @@ class LazyCallGraph {
   ///
   /// The original function must reference (not call) all new functions.
   /// All new functions must reference (not call) each other.
-  void addSplitRefRecursiveFunctions(Function &OriginalFunction,
+  LLVM_ABI void addSplitRefRecursiveFunctions(Function &OriginalFunction,
                                      ArrayRef<Function *> NewFunctions);
 
   ///@}
@@ -1106,7 +1107,7 @@ class LazyCallGraph {
   /// updates that set with every constant visited.
   ///
   /// For each defined function, calls \p Callback with that function.
-  static void visitReferences(SmallVectorImpl<Constant *> &Worklist,
+  LLVM_ABI static void visitReferences(SmallVectorImpl<Constant *> &Worklist,
                               SmallPtrSetImpl<Constant *> &Visited,
                               function_ref<void(Function &)> Callback);
 
@@ -1153,7 +1154,7 @@ class LazyCallGraph {
 
   /// Helper to insert a new function, with an already looked-up entry in
   /// the NodeMap.
-  Node &insertInto(Function &F, Node *&MappedN);
+  LLVM_ABI Node &insertInto(Function &F, Node *&MappedN);
 
   /// Helper to initialize a new node created outside of creating SCCs and add
   /// it to the NodeMap if necessary. For example, useful when a function is
@@ -1287,9 +1288,9 @@ class LazyCallGraphPrinterPass
   raw_ostream &OS;
 
 public:
-  explicit LazyCallGraphPrinterPass(raw_ostream &OS);
+  LLVM_ABI explicit LazyCallGraphPrinterPass(raw_ostream &OS);
 
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
 
   static bool isRequired() { return true; }
 };
@@ -1302,9 +1303,9 @@ class LazyCallGraphDOTPrinterPass
   raw_ostream &OS;
 
 public:
-  explicit LazyCallGraphDOTPrinterPass(raw_ostream &OS);
+  LLVM_ABI explicit LazyCallGraphDOTPrinterPass(raw_ostream &OS);
 
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
 
   static bool isRequired() { return true; }
 };
diff --git a/llvm/include/llvm/Analysis/Loads.h b/llvm/include/llvm/Analysis/Loads.h
index 639070c07897b..c44cc9810881d 100644
--- a/llvm/include/llvm/Analysis/Loads.h
+++ b/llvm/include/llvm/Analysis/Loads.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_LOADS_H
 #define LLVM_ANALYSIS_LOADS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/IR/BasicBlock.h"
 #include "llvm/Support/CommandLine.h"
 
@@ -34,7 +35,7 @@ class TargetLibraryInfo;
 /// Return true if this is always a dereferenceable pointer. If the context
 /// instruction is specified perform context-sensitive analysis and return true
 /// if the pointer is dereferenceable at the specified instruction.
-bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL,
+LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL,
                               const Instruction *CtxI = nullptr,
                               AssumptionCache *AC = nullptr,
                               const DominatorTree *DT = nullptr,
@@ -44,7 +45,7 @@ bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL,
 /// greater or equal than requested. If the context instruction is specified
 /// performs context-sensitive analysis and returns true if the pointer is
 /// dereferenceable at the specified instruction.
-bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
+LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
                                         Align Alignment, const DataLayout &DL,
                                         const Instruction *CtxI = nullptr,
                                         AssumptionCache *AC = nullptr,
@@ -55,7 +56,7 @@ bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
 /// greater or equal than requested. If the context instruction is specified
 /// performs context-sensitive analysis and returns true if the pointer is
 /// dereferenceable at the specified instruction.
-bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment,
+LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment,
                                         const APInt &Size, const DataLayout &DL,
                                         const Instruction *CtxI = nullptr,
                                         AssumptionCache *AC = nullptr,
@@ -70,7 +71,7 @@ bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment,
 /// If it is not obviously safe to load from the specified pointer, we do a
 /// quick local scan of the basic block containing ScanFrom, to determine if
 /// the address is already accessed.
-bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size,
+LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size,
                                  const DataLayout &DL, Instruction *ScanFrom,
                                  AssumptionCache *AC = nullptr,
                                  const DominatorTree *DT = nullptr,
@@ -83,14 +84,14 @@ bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size,
 /// that required by the header itself and could be hoisted into the header
 /// if desired.)  This is more powerful than the variants above when the
 /// address loaded from is analyzeable by SCEV.
-bool isDereferenceableAndAlignedInLoop(
+LLVM_ABI bool isDereferenceableAndAlignedInLoop(
     LoadInst *LI, Loop *L, ScalarEvolution &SE, DominatorTree &DT,
     AssumptionCache *AC = nullptr,
     SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr);
 
 /// Return true if the loop \p L cannot fault on any iteration and only
 /// contains read-only memory accesses.
-bool isDereferenceableReadOnlyLoop(
+LLVM_ABI bool isDereferenceableReadOnlyLoop(
     Loop *L, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC,
     SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr);
 
@@ -102,7 +103,7 @@ bool isDereferenceableReadOnlyLoop(
 /// If it is not obviously safe to load from the specified pointer, we do a
 /// quick local scan of the basic block containing ScanFrom, to determine if
 /// the address is already accessed.
-bool isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
+LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
                                  const DataLayout &DL, Instruction *ScanFrom,
                                  AssumptionCache *AC = nullptr,
                                  const DominatorTree *DT = nullptr,
@@ -113,11 +114,11 @@ bool isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
 /// dereferenceability and alignment must be proven separately.  Note: This
 /// is only needed for raw reasoning; if you use the interface below
 /// (isSafeToSpeculativelyExecute), this is handled internally.
-bool mustSuppressSpeculation(const LoadInst &LI);
+LLVM_ABI bool mustSuppressSpeculation(const LoadInst &LI);
 
 /// The default number of maximum instructions to scan in the block, used by
 /// FindAvailableLoadedValue().
-extern cl::opt<unsigned> DefMaxInstsToScan;
+LLVM_ABI extern cl::opt<unsigned> DefMaxInstsToScan;
 
 /// Scan backwards to see if we have the value of the given load available
 /// locally within a small number of instructions.
@@ -143,7 +144,7 @@ extern cl::opt<unsigned> DefMaxInstsToScan;
 /// location in memory, as opposed to the value operand of a store.
 ///
 /// \returns The found value, or nullptr if no value is found.
-Value *FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB,
+LLVM_ABI Value *FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB,
                                 BasicBlock::iterator &ScanFrom,
                                 unsigned MaxInstsToScan = DefMaxInstsToScan,
                                 BatchAAResults *AA = nullptr,
@@ -154,7 +155,7 @@ Value *FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB,
 /// FindAvailableLoadedValue() for the case where we are not interested in
 /// finding the closest clobbering instruction if no available load is found.
 /// This overload cannot be used to scan across multiple blocks.
-Value *FindAvailableLoadedValue(LoadInst *Load, BatchAAResults &AA,
+LLVM_ABI Value *FindAvailableLoadedValue(LoadInst *Load, BatchAAResults &AA,
                                 bool *IsLoadCSE,
                                 unsigned MaxInstsToScan = DefMaxInstsToScan);
 
@@ -181,7 +182,7 @@ Value *FindAvailableLoadedValue(LoadInst *Load, BatchAAResults &AA,
 /// location in memory, as opposed to the value operand of a store.
 ///
 /// \returns The found value, or nullptr if no value is found.
-Value *findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *AccessTy,
+LLVM_ABI Value *findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *AccessTy,
                                  bool AtLeastAtomic, BasicBlock *ScanBB,
                                  BasicBlock::iterator &ScanFrom,
                                  unsigned MaxInstsToScan, BatchAAResults *AA,
@@ -194,9 +195,9 @@ Value *findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *AccessTy,
 /// instructions, as well as when we are replacing with a null pointer.
 /// Additionally it also allows replacement of pointers when both pointers have
 /// the same underlying object.
-bool canReplacePointersIfEqual(const Value *From, const Value *To,
+LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To,
                                const DataLayout &DL);
-bool canReplacePointersInUseIfEqual(const Use &U, const Value *To,
+LLVM_ABI bool canReplacePointersInUseIfEqual(const Use &U, const Value *To,
                                     const DataLayout &DL);
 }
 
diff --git a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
index fea2ede8b5ab4..78515e3701461 100644
--- a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
+++ b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
 #define LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/EquivalenceClasses.h"
 #include "llvm/Analysis/ScalarEvolution.h"
 #include "llvm/IR/DiagnosticInfo.h"
@@ -32,25 +33,25 @@ class TargetTransformInfo;
 /// Loop Access Analysis.
 struct VectorizerParams {
   /// Maximum SIMD width.
-  static const unsigned MaxVectorWidth;
+  LLVM_ABI static const unsigned MaxVectorWidth;
 
   /// VF as overridden by the user.
-  static unsigned VectorizationFactor;
+  LLVM_ABI static unsigned VectorizationFactor;
   /// Interleave factor as overridden by the user.
-  static unsigned VectorizationInterleave;
+  LLVM_ABI static unsigned VectorizationInterleave;
   /// True if force-vector-interleave was specified by the user.
-  static bool isInterleaveForced();
+  LLVM_ABI static bool isInterleaveForced();
 
   /// \When performing memory disambiguation checks at runtime do not
   /// make more than this number of comparisons.
-  static unsigned RuntimeMemoryCheckThreshold;
+  LLVM_ABI static unsigned RuntimeMemoryCheckThreshold;
 
   // When creating runtime checks for nested loops, where possible try to
   // write the checks in a form that allows them to be easily hoisted out of
   // the outermost loop. For example, we can do this by expanding the range of
   // addresses considered to include the entire nested loop so that they are
   // loop invariant.
-  static bool HoistRuntimeChecks;
+  LLVM_ABI static bool HoistRuntimeChecks;
 };
 
 /// Checks memory dependences among accesses to the same underlying
@@ -144,7 +145,7 @@ class MemoryDepChecker {
     };
 
     /// String version of the types.
-    static const char *DepName[];
+    LLVM_ABI static const char *DepName[];
 
     /// Index of the source of the dependence in the InstMap vector.
     unsigned Source;
@@ -162,19 +163,19 @@ class MemoryDepChecker {
     Instruction *getDestination(const MemoryDepChecker &DepChecker) const;
 
     /// Dependence types that don't prevent vectorization.
-    static VectorizationSafetyStatus isSafeForVectorization(DepType Type);
+    LLVM_ABI static VectorizationSafetyStatus isSafeForVectorization(DepType Type);
 
     /// Lexically forward dependence.
-    bool isForward() const;
+    LLVM_ABI bool isForward() const;
     /// Lexically backward dependence.
-    bool isBackward() const;
+    LLVM_ABI bool isBackward() const;
 
     /// May be a lexically backward dependence type (includes Unknown).
-    bool isPossiblyBackward() const;
+    LLVM_ABI bool isPossiblyBackward() const;
 
     /// Print the dependence.  \p Instr is used to map the instruction
     /// indices to instructions.
-    void print(raw_ostream &OS, unsigned Depth,
+    LLVM_ABI void print(raw_ostream &OS, unsigned Depth,
                const SmallVectorImpl<Instruction *> &Instrs) const;
   };
 
@@ -186,16 +187,16 @@ class MemoryDepChecker {
 
   /// Register the location (instructions are given increasing numbers)
   /// of a write access.
-  void addAccess(StoreInst *SI);
+  LLVM_ABI void addAccess(StoreInst *SI);
 
   /// Register the location (instructions are given increasing numbers)
   /// of a write access.
-  void addAccess(LoadInst *LI);
+  LLVM_ABI void addAccess(LoadInst *LI);
 
   /// Check whether the dependencies between the accesses are safe.
   ///
   /// Only checks sets with elements in \p CheckDeps.
-  bool areDepsSafe(const DepCandidates &AccessSets,
+  LLVM_ABI bool areDepsSafe(const DepCandidates &AccessSets,
                    const MemAccessInfoList &CheckDeps);
 
   /// No memory dependence was encountered that would inhibit
@@ -265,7 +266,7 @@ class MemoryDepChecker {
   }
 
   /// Find the set of instructions that read or write via \p Ptr.
-  SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
+  LLVM_ABI SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
                                                          bool isWrite) const;
 
   /// Return the program order indices for the access location (Ptr, IsWrite).
@@ -432,7 +433,7 @@ class RuntimePointerChecking;
 struct RuntimeCheckingPtrGroup {
   /// Create a new pointer checking group containing a single
   /// pointer, with index \p Index in RtCheck.
-  RuntimeCheckingPtrGroup(unsigned Index,
+  LLVM_ABI RuntimeCheckingPtrGroup(unsigned Index,
                           const RuntimePointerChecking &RtCheck);
 
   /// Tries to add the pointer recorded in RtCheck at index
@@ -440,8 +441,8 @@ struct RuntimeCheckingPtrGroup {
   /// to a checking group if we will still be able to get
   /// the upper and lower bounds of the check. Returns true in case
   /// of success, false otherwise.
-  bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck);
-  bool addPointer(unsigned Index, const SCEV *Start, const SCEV *End,
+  LLVM_ABI bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck);
+  LLVM_ABI bool addPointer(unsigned Index, const SCEV *Start, const SCEV *End,
                   unsigned AS, bool NeedsFreeze, ScalarEvolution &SE);
 
   /// The SCEV expression which represents the upper bound of all the
@@ -529,7 +530,7 @@ class RuntimePointerChecking {
   /// according to the assumptions that we've made during the analysis.
   /// The method might also version the pointer stride according to \p Strides,
   /// and add new predicates to \p PSE.
-  void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy,
+  LLVM_ABI void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy,
               bool WritePtr, unsigned DepSetId, unsigned ASId,
               PredicatedScalarEvolution &PSE, bool NeedsFreeze);
 
@@ -538,7 +539,7 @@ class RuntimePointerChecking {
 
   /// Generate the checks and store it.  This also performs the grouping
   /// of pointers to reduce the number of memchecks necessary.
-  void generateChecks(MemoryDepChecker::DepCandidates &DepCands,
+  LLVM_ABI void generateChecks(MemoryDepChecker::DepCandidates &DepCands,
                       bool UseDependencies);
 
   /// Returns the checks that generateChecks created. They can be used to ensure
@@ -560,7 +561,7 @@ class RuntimePointerChecking {
 
   /// Decide if we need to add a check between two groups of pointers,
   /// according to needsChecking.
-  bool needsChecking(const RuntimeCheckingPtrGroup &M,
+  LLVM_ABI bool needsChecking(const RuntimeCheckingPtrGroup &M,
                      const RuntimeCheckingPtrGroup &N) const;
 
   /// Returns the number of run-time checks required according to
@@ -568,10 +569,10 @@ class RuntimePointerChecking {
   unsigned getNumberOfChecks() const { return Checks.size(); }
 
   /// Print the list run-time memory checks necessary.
-  void print(raw_ostream &OS, unsigned Depth = 0) const;
+  LLVM_ABI void print(raw_ostream &OS, unsigned Depth = 0) const;
 
   /// Print \p Checks.
-  void printChecks(raw_ostream &OS,
+  LLVM_ABI void printChecks(raw_ostream &OS,
                    const SmallVectorImpl<RuntimePointerCheck> &Checks,
                    unsigned Depth = 0) const;
 
@@ -588,13 +589,13 @@ class RuntimePointerChecking {
   ///
   /// \p PtrToPartition contains the partition number for pointers (-1 if the
   /// pointer belongs to multiple partitions).
-  static bool
+  LLVM_ABI static bool
   arePointersInSamePartition(const SmallVectorImpl<int> &PtrToPartition,
                              unsigned PtrIdx1, unsigned PtrIdx2);
 
   /// Decide whether we need to issue a run-time check for pointer at
   /// index \p I and \p J to prove their independence.
-  bool needsChecking(unsigned I, unsigned J) const;
+  LLVM_ABI bool needsChecking(unsigned I, unsigned J) const;
 
   /// Return PointerInfo for pointer at index \p PtrIdx.
   const PointerInfo &getPointerInfo(unsigned PtrIdx) const {
@@ -660,7 +661,7 @@ class RuntimePointerChecking {
 /// PSE must be emitted in order for the results of this analysis to be valid.
 class LoopAccessInfo {
 public:
-  LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI,
+  LLVM_ABI LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI,
                  const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT,
                  LoopInfo *LI);
 
@@ -689,11 +690,11 @@ class LoopAccessInfo {
 
   /// Return true if the block BB needs to be predicated in order for the loop
   /// to be vectorized.
-  static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
+  LLVM_ABI static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
                                     DominatorTree *DT);
 
   /// Returns true if value \p V is loop invariant.
-  bool isInvariant(Value *V) const;
+  LLVM_ABI bool isInvariant(Value *V) const;
 
   unsigned getNumStores() const { return NumStores; }
   unsigned getNumLoads() const { return NumLoads;}
@@ -720,7 +721,7 @@ class LoopAccessInfo {
   }
 
   /// Print the information about the memory accesses in the loop.
-  void print(raw_ostream &OS, unsigned Depth = 0) const;
+  LLVM_ABI void print(raw_ostream &OS, unsigned Depth = 0) const;
 
   /// Return true if the loop has memory dependence involving two stores to an
   /// invariant address, else return false.
@@ -822,7 +823,7 @@ class LoopAccessInfo {
 ///
 /// \p PtrToStride provides the mapping between the pointer value and its
 /// stride as collected by LoopVectorizationLegality::collectStridedAccess.
-const SCEV *
+LLVM_ABI const SCEV *
 replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
                           const DenseMap<Value *, const SCEV *> &PtrToStride,
                           Value *Ptr);
@@ -842,7 +843,7 @@ replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
 /// Note that the analysis results are defined if-and-only-if the original
 /// memory access was defined.  If that access was dead, or UB, then the
 /// result of this function is undefined.
-std::optional<int64_t>
+LLVM_ABI std::optional<int64_t>
 getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr,
              const Loop *Lp,
              const DenseMap<Value *, const SCEV *> &StridesMap = DenseMap<Value *, const SCEV *>(),
@@ -853,7 +854,7 @@ getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr,
 /// is a simple API that does not depend on the analysis pass.
 /// \param StrictCheck Ensure that the calculated distance matches the
 /// type-based one after all the bitcasts removal in the provided pointers.
-std::optional<int64_t>
+LLVM_ABI std::optional<int64_t>
 getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB,
                 const DataLayout &DL, ScalarEvolution &SE,
                 bool StrictCheck = false, bool CheckType = true);
@@ -868,13 +869,13 @@ getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB,
 /// sorted indices in \p SortedIndices as a[i+0], a[i+1], a[i+4], a[i+7] and
 /// saves the mask for actual memory accesses in program order in
 /// \p SortedIndices as <1,2,0,3>
-bool sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy, const DataLayout &DL,
+LLVM_ABI bool sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy, const DataLayout &DL,
                      ScalarEvolution &SE,
                      SmallVectorImpl<unsigned> &SortedIndices);
 
 /// Returns true if the memory operations \p A and \p B are consecutive.
 /// This is a simple API that does not depend on the analysis pass.
-bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
+LLVM_ABI bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
                          ScalarEvolution &SE, bool CheckType = true);
 
 /// Calculate Start and End points of memory access.
@@ -890,7 +891,7 @@ bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
 ///
 /// There is no conflict when the intervals are disjoint:
 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
-std::pair<const SCEV *, const SCEV *> getStartAndEndForAccess(
+LLVM_ABI std::pair<const SCEV *, const SCEV *> getStartAndEndForAccess(
     const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *MaxBECount,
     ScalarEvolution *SE,
     DenseMap<std::pair<const SCEV *, Type *>,
@@ -914,11 +915,11 @@ class LoopAccessInfoManager {
                         const TargetLibraryInfo *TLI)
       : SE(SE), AA(AA), DT(DT), LI(LI), TTI(TTI), TLI(TLI) {}
 
-  const LoopAccessInfo &getInfo(Loop &L);
+  LLVM_ABI const LoopAccessInfo &getInfo(Loop &L);
 
-  void clear();
+  LLVM_ABI void clear();
 
-  bool invalidate(Function &F, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
                   FunctionAnalysisManager::Invalidator &Inv);
 };
 
@@ -937,7 +938,7 @@ class LoopAccessAnalysis
 public:
   typedef LoopAccessInfoManager Result;
 
-  Result run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM);
 };
 
 inline Instruction *MemoryDepChecker::Dependence::getSource(
diff --git a/llvm/include/llvm/Analysis/LoopAnalysisManager.h b/llvm/include/llvm/Analysis/LoopAnalysisManager.h
index a760ea98d7cfb..e6cdb9e0727e4 100644
--- a/llvm/include/llvm/Analysis/LoopAnalysisManager.h
+++ b/llvm/include/llvm/Analysis/LoopAnalysisManager.h
@@ -29,6 +29,7 @@
 #ifndef LLVM_ANALYSIS_LOOPANALYSISMANAGER_H
 #define LLVM_ANALYSIS_LOOPANALYSISMANAGER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/IR/PassManager.h"
 
 namespace llvm {
@@ -131,7 +132,7 @@ template <> class LoopAnalysisManagerFunctionProxy::Result {
   /// If the necessary loop infrastructure is not preserved, this will forcibly
   /// clear all of the cached analysis results that are keyed on the \c
   /// LoopInfo for this function.
-  bool invalidate(Function &F, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
                   FunctionAnalysisManager::Invalidator &Inv);
 
 private:
@@ -143,7 +144,7 @@ template <> class LoopAnalysisManagerFunctionProxy::Result {
 /// Provide a specialized run method for the \c LoopAnalysisManagerFunctionProxy
 /// so it can pass the \c LoopInfo to the result.
 template <>
-LoopAnalysisManagerFunctionProxy::Result
+LLVM_ABI LoopAnalysisManagerFunctionProxy::Result
 LoopAnalysisManagerFunctionProxy::run(Function &F, FunctionAnalysisManager &AM);
 
 // Ensure the \c LoopAnalysisManagerFunctionProxy is provided as an extern
@@ -158,7 +159,7 @@ typedef OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
     FunctionAnalysisManagerLoopProxy;
 
 /// Returns the minimum set of Analyses that all loop passes must preserve.
-PreservedAnalyses getLoopPassPreservedAnalyses();
+LLVM_ABI PreservedAnalyses getLoopPassPreservedAnalyses();
 }
 
 #endif // LLVM_ANALYSIS_LOOPANALYSISMANAGER_H
diff --git a/llvm/include/llvm/Analysis/LoopInfo.h b/llvm/include/llvm/Analysis/LoopInfo.h
index 814c61bf4c350..4ca07c8a2746e 100644
--- a/llvm/include/llvm/Analysis/LoopInfo.h
+++ b/llvm/include/llvm/Analysis/LoopInfo.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_LOOPINFO_H
 #define LLVM_ANALYSIS_LOOPINFO_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/GraphTraits.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/PassManager.h"
@@ -157,7 +158,7 @@ class LLVM_ABI Loop : public LoopBase<BasicBlock, Loop> {
     /// - the final value of the induction variable can be found
     ///
     /// Else std::nullopt.
-    static std::optional<Loop::LoopBounds>
+    LLVM_ABI static std::optional<Loop::LoopBounds>
     getBounds(const Loop &L, PHINode &IndVar, ScalarEvolution &SE);
 
     /// Get the initial value of the loop induction variable.
@@ -206,7 +207,7 @@ class LLVM_ABI Loop : public LoopBase<BasicBlock, Loop> {
     /// The predicate would be sgt if both (1) and (2) are satisfied.
     /// getCanonicalPredicate() returns sgt for this example.
     /// Note: The IR is not changed.
-    ICmpInst::Predicate getCanonicalPredicate() const;
+    LLVM_ABI ICmpInst::Predicate getCanonicalPredicate() const;
 
     /// An enum for the direction of the loop
     /// - for (int i = 0; i < ub; ++i)  --> Increasing
@@ -215,7 +216,7 @@ class LLVM_ABI Loop : public LoopBase<BasicBlock, Loop> {
     enum class Direction { Increasing, Decreasing, Unknown };
 
     /// Get the direction of the loop.
-    Direction getDirection() const;
+    LLVM_ABI Direction getDirection() const;
 
   private:
     LoopBounds(const Loop &Loop, Value &I, Instruction &SI, Value *SV, Value &F,
@@ -414,7 +415,7 @@ class LoopInfo : public LoopInfoBase<BasicBlock, Loop> {
 
 public:
   LoopInfo() = default;
-  explicit LoopInfo(const DominatorTreeBase<BasicBlock, false> &DomTree);
+  LLVM_ABI explicit LoopInfo(const DominatorTreeBase<BasicBlock, false> &DomTree);
 
   LoopInfo(LoopInfo &&Arg) : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
   LoopInfo &operator=(LoopInfo &&RHS) {
@@ -423,7 +424,7 @@ class LoopInfo : public LoopInfoBase<BasicBlock, Loop> {
   }
 
   /// Handle invalidation explicitly.
-  bool invalidate(Function &F, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
                   FunctionAnalysisManager::Invalidator &);
 
   // Most of the public interface is provided via LoopInfoBase.
@@ -432,7 +433,7 @@ class LoopInfo : public LoopInfoBase<BasicBlock, Loop> {
   /// the loop forest and parent loops for each block so that \c L is no longer
   /// referenced, but does not actually delete \c L immediately. The pointer
   /// will remain valid until this LoopInfo's memory is released.
-  void erase(Loop *L);
+  LLVM_ABI void erase(Loop *L);
 
   /// Returns true if replacing From with To everywhere is guaranteed to
   /// preserve LCSSA form.
@@ -532,7 +533,7 @@ class LoopInfo : public LoopInfoBase<BasicBlock, Loop> {
   // to be inserted at the beginning of the block.  Note that V is assumed to
   // dominate ExitBB, and ExitBB must be the exit block of some loop.  The
   // IR is assumed to be in LCSSA form before the planned insertion.
-  bool wouldBeOutOfLoopUseRequiringLCSSA(const Value *V,
+  LLVM_ABI bool wouldBeOutOfLoopUseRequiringLCSSA(const Value *V,
                                          const BasicBlock *ExitBB) const;
 };
 
@@ -541,7 +542,7 @@ class LoopInfo : public LoopInfoBase<BasicBlock, Loop> {
 /// The flag enables checks which are expensive and are disabled by default
 /// unless the `EXPENSIVE_CHECKS` macro is defined.  The `-verify-loop-info`
 /// flag allows the checks to be enabled selectively without re-compilation.
-extern bool VerifyLoopInfo;
+LLVM_ABI extern bool VerifyLoopInfo;
 
 // Allow clients to walk the list of nested loops...
 template <> struct GraphTraits<const Loop *> {
@@ -570,7 +571,7 @@ class LoopAnalysis : public AnalysisInfoMixin<LoopAnalysis> {
 public:
   typedef LoopInfo Result;
 
-  LoopInfo run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI LoopInfo run(Function &F, FunctionAnalysisManager &AM);
 };
 
 /// Printer pass for the \c LoopAnalysis results.
@@ -579,18 +580,18 @@ class LoopPrinterPass : public PassInfoMixin<LoopPrinterPass> {
 
 public:
   explicit LoopPrinterPass(raw_ostream &OS) : OS(OS) {}
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
 /// Verifier pass for the \c LoopAnalysis results.
 struct LoopVerifierPass : public PassInfoMixin<LoopVerifierPass> {
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
 /// The legacy pass manager's analysis pass to compute loop information.
-class LoopInfoWrapperPass : public FunctionPass {
+class LLVM_ABI LoopInfoWrapperPass : public FunctionPass {
   LoopInfo LI;
 
 public:
@@ -614,56 +615,56 @@ class LoopInfoWrapperPass : public FunctionPass {
 };
 
 /// Function to print a loop's contents as LLVM's text IR assembly.
-void printLoop(Loop &L, raw_ostream &OS, const std::string &Banner = "");
+LLVM_ABI void printLoop(Loop &L, raw_ostream &OS, const std::string &Banner = "");
 
 /// Find and return the loop attribute node for the attribute @p Name in
 /// @p LoopID. Return nullptr if there is no such attribute.
-MDNode *findOptionMDForLoopID(MDNode *LoopID, StringRef Name);
+LLVM_ABI MDNode *findOptionMDForLoopID(MDNode *LoopID, StringRef Name);
 
 /// Find string metadata for a loop.
 ///
 /// Returns the MDNode where the first operand is the metadata's name. The
 /// following operands are the metadata's values. If no metadata with @p Name is
 /// found, return nullptr.
-MDNode *findOptionMDForLoop(const Loop *TheLoop, StringRef Name);
+LLVM_ABI MDNode *findOptionMDForLoop(const Loop *TheLoop, StringRef Name);
 
-std::optional<bool> getOptionalBoolLoopAttribute(const Loop *TheLoop,
+LLVM_ABI std::optional<bool> getOptionalBoolLoopAttribute(const Loop *TheLoop,
                                                  StringRef Name);
 
 /// Returns true if Name is applied to TheLoop and enabled.
-bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name);
+LLVM_ABI bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name);
 
 /// Find named metadata for a loop with an integer value.
-std::optional<int> getOptionalIntLoopAttribute(const Loop *TheLoop,
+LLVM_ABI std::optional<int> getOptionalIntLoopAttribute(const Loop *TheLoop,
                                                StringRef Name);
 
 /// Find named metadata for a loop with an integer value. Return \p Default if
 /// not set.
-int getIntLoopAttribute(const Loop *TheLoop, StringRef Name, int Default = 0);
+LLVM_ABI int getIntLoopAttribute(const Loop *TheLoop, StringRef Name, int Default = 0);
 
 /// Find string metadata for loop
 ///
 /// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
 /// operand or null otherwise.  If the string metadata is not found return
 /// Optional's not-a-value.
-std::optional<const MDOperand *> findStringMetadataForLoop(const Loop *TheLoop,
+LLVM_ABI std::optional<const MDOperand *> findStringMetadataForLoop(const Loop *TheLoop,
                                                            StringRef Name);
 
 /// Find the convergence heart of the loop.
-CallBase *getLoopConvergenceHeart(const Loop *TheLoop);
+LLVM_ABI CallBase *getLoopConvergenceHeart(const Loop *TheLoop);
 
 /// Look for the loop attribute that requires progress within the loop.
 /// Note: Most consumers probably want "isMustProgress" which checks
 /// the containing function attribute too.
-bool hasMustProgress(const Loop *L);
+LLVM_ABI bool hasMustProgress(const Loop *L);
 
 /// Return true if this loop can be assumed to make progress.  (i.e. can't
 /// be infinite without side effects without also being undefined)
-bool isMustProgress(const Loop *L);
+LLVM_ABI bool isMustProgress(const Loop *L);
 
 /// Return true if this loop can be assumed to run for a finite number of
 /// iterations.
-bool isFinite(const Loop *L);
+LLVM_ABI bool isFinite(const Loop *L);
 
 /// Return whether an MDNode might represent an access group.
 ///
@@ -672,7 +673,7 @@ bool isFinite(const Loop *L);
 /// MDNodes are designed immutable -- would require creating a new MDNode). Note
 /// that this is not a sufficient condition: not every distinct and empty NDNode
 /// is representing an access group.
-bool isValidAsAccessGroup(MDNode *AccGroup);
+LLVM_ABI bool isValidAsAccessGroup(MDNode *AccGroup);
 
 /// Create a new LoopID after the loop has been transformed.
 ///
@@ -689,7 +690,7 @@ bool isValidAsAccessGroup(MDNode *AccGroup);
 /// @param AddAttrs       Add these loop attributes to the new LoopID.
 ///
 /// @return A new LoopID that can be applied using Loop::setLoopID().
-llvm::MDNode *
+LLVM_ABI llvm::MDNode *
 makePostTransformationMetadata(llvm::LLVMContext &Context, MDNode *OrigLoopID,
                                llvm::ArrayRef<llvm::StringRef> RemovePrefixes,
                                llvm::ArrayRef<llvm::MDNode *> AddAttrs);
diff --git a/llvm/include/llvm/Analysis/LoopNestAnalysis.h b/llvm/include/llvm/Analysis/LoopNestAnalysis.h
index 22d5cb6ca15f5..be14aadd626ef 100644
--- a/llvm/include/llvm/Analysis/LoopNestAnalysis.h
+++ b/llvm/include/llvm/Analysis/LoopNestAnalysis.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_LOOPNESTANALYSIS_H
 #define LLVM_ANALYSIS_LOOPNESTANALYSIS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Analysis/LoopAnalysisManager.h"
 #include "llvm/Analysis/LoopInfo.h"
@@ -195,7 +196,7 @@ class LLVM_ABI LoopNest {
                                                     ScalarEvolution &SE);
 };
 
-raw_ostream &operator<<(raw_ostream &, const LoopNest &);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &, const LoopNest &);
 
 /// This analysis provides information for a loop nest. The analysis runs on
 /// demand and can be initiated via AM.getResult<LoopNestAnalysis>.
@@ -205,7 +206,7 @@ class LoopNestAnalysis : public AnalysisInfoMixin<LoopNestAnalysis> {
 
 public:
   using Result = LoopNest;
-  Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR);
+  LLVM_ABI Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR);
 };
 
 /// Printer pass for the \c LoopNest results.
@@ -215,7 +216,7 @@ class LoopNestPrinterPass : public PassInfoMixin<LoopNestPrinterPass> {
 public:
   explicit LoopNestPrinterPass(raw_ostream &OS) : OS(OS) {}
 
-  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+  LLVM_ABI PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                         LoopStandardAnalysisResults &AR, LPMUpdater &U);
 
   static bool isRequired() { return true; }
diff --git a/llvm/include/llvm/Analysis/LoopPass.h b/llvm/include/llvm/Analysis/LoopPass.h
index c5f08d0ae8af6..20fd3b10bd442 100644
--- a/llvm/include/llvm/Analysis/LoopPass.h
+++ b/llvm/include/llvm/Analysis/LoopPass.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_LOOPPASS_H
 #define LLVM_ANALYSIS_LOOPPASS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/IR/LegacyPassManagers.h"
 #include "llvm/Pass.h"
 #include <deque>
@@ -25,7 +26,7 @@ class LoopInfo;
 class LPPassManager;
 class Function;
 
-class LoopPass : public Pass {
+class LLVM_ABI LoopPass : public Pass {
 public:
   explicit LoopPass(char &pid) : Pass(PT_Loop, pid) {}
 
@@ -73,7 +74,7 @@ class LoopPass : public Pass {
   bool skipLoop(const Loop *L) const;
 };
 
-class LPPassManager : public FunctionPass, public PMDataManager {
+class LLVM_ABI LPPassManager : public FunctionPass, public PMDataManager {
 public:
   static char ID;
   explicit LPPassManager();
@@ -122,8 +123,8 @@ class LPPassManager : public FunctionPass, public PMDataManager {
 // LPPassManager to check if current pass preserves LCSSA form, and if it does
 // pass manager calls lcssa verification for the current loop.
 struct LCSSAVerificationPass : public FunctionPass {
-  static char ID;
-  LCSSAVerificationPass();
+  LLVM_ABI static char ID;
+  LLVM_ABI LCSSAVerificationPass();
 
   bool runOnFunction(Function &F) override { return false; }
 
diff --git a/llvm/include/llvm/Analysis/MemoryBuiltins.h b/llvm/include/llvm/Analysis/MemoryBuiltins.h
index 3e2f79550d91d..1199ae650338c 100644
--- a/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_MEMORYBUILTINS_H
 #define LLVM_ANALYSIS_MEMORYBUILTINS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallPtrSet.h"
@@ -52,38 +53,38 @@ class Value;
 /// Tests if a value is a call or invoke to a library function that
 /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
 /// like).
-bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI);
-bool isAllocationFn(const Value *V,
+LLVM_ABI bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI);
+LLVM_ABI bool isAllocationFn(const Value *V,
                     function_ref<const TargetLibraryInfo &(Function &)> GetTLI);
 
 /// Tests if a value is a call or invoke to a library function that
 /// allocates memory via new.
-bool isNewLikeFn(const Value *V, const TargetLibraryInfo *TLI);
+LLVM_ABI bool isNewLikeFn(const Value *V, const TargetLibraryInfo *TLI);
 
 /// Tests if a value is a call or invoke to a library function that
 /// allocates memory similar to malloc or calloc.
-bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI);
+LLVM_ABI bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI);
 
 /// Tests if a value is a call or invoke to a library function that
 /// allocates memory (either malloc, calloc, or strdup like).
-bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI);
+LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI);
 
 /// Tests if a function is a call or invoke to a library function that
 /// reallocates memory (e.g., realloc).
-bool isReallocLikeFn(const Function *F);
+LLVM_ABI bool isReallocLikeFn(const Function *F);
 
 /// If this is a call to a realloc function, return the reallocated operand.
-Value *getReallocatedOperand(const CallBase *CB);
+LLVM_ABI Value *getReallocatedOperand(const CallBase *CB);
 
 //===----------------------------------------------------------------------===//
 //  free Call Utility Functions.
 //
 
 /// isLibFreeFunction - Returns true if the function is a builtin free()
-bool isLibFreeFunction(const Function *F, const LibFunc TLIFn);
+LLVM_ABI bool isLibFreeFunction(const Function *F, const LibFunc TLIFn);
 
 /// If this if a call to a free function, return the freed operand.
-Value *getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI);
+LLVM_ABI Value *getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI);
 
 //===----------------------------------------------------------------------===//
 //  Properties of allocation functions
@@ -98,20 +99,20 @@ Value *getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI);
 /// Note: *Removable* really does mean removable; it does not mean observable.
 /// A language (e.g. C++) can allow removing allocations without allowing
 /// insertion or speculative execution of allocation routines.
-bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI);
+LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI);
 
 /// Gets the alignment argument for an aligned_alloc-like function, using either
 /// built-in knowledge based on fuction names/signatures or allocalign
 /// attributes. Note: the Value returned may not indicate a valid alignment, per
 /// the definition of the allocalign attribute.
-Value *getAllocAlignment(const CallBase *V, const TargetLibraryInfo *TLI);
+LLVM_ABI Value *getAllocAlignment(const CallBase *V, const TargetLibraryInfo *TLI);
 
 /// Return the size of the requested allocation. With a trivial mapper, this is
 /// similar to calling getObjectSize(..., Exact), but without looking through
 /// calls that return their argument. A mapper function can be used to replace
 /// one Value* (operand to the allocation) with another. This is useful when
 /// doing abstract interpretation.
-std::optional<APInt> getAllocSize(
+LLVM_ABI std::optional<APInt> getAllocSize(
     const CallBase *CB, const TargetLibraryInfo *TLI,
     function_ref<const Value *(const Value *)> Mapper = [](const Value *V) {
       return V;
@@ -120,14 +121,14 @@ std::optional<APInt> getAllocSize(
 /// If this is a call to an allocation function that initializes memory to a
 /// fixed value, return said value in the requested type.  Otherwise, return
 /// nullptr.
-Constant *getInitialValueOfAllocation(const Value *V,
+LLVM_ABI Constant *getInitialValueOfAllocation(const Value *V,
                                       const TargetLibraryInfo *TLI,
                                       Type *Ty);
 
 /// If a function is part of an allocation family (e.g.
 /// malloc/realloc/calloc/free), return the identifier for its family
 /// of functions.
-std::optional<StringRef> getAllocationFamily(const Value *I,
+LLVM_ABI std::optional<StringRef> getAllocationFamily(const Value *I,
                                              const TargetLibraryInfo *TLI);
 
 //===----------------------------------------------------------------------===//
@@ -172,16 +173,16 @@ struct ObjectSizeOpts {
 /// WARNING: The object size returned is the allocation size.  This does not
 /// imply dereferenceability at site of use since the object may be freeed in
 /// between.
-bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
+LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
                    const TargetLibraryInfo *TLI, ObjectSizeOpts Opts = {});
 
 /// Try to turn a call to \@llvm.objectsize into an integer value of the given
 /// Type. Returns null on failure. If MustSucceed is true, this function will
 /// not return null, and may return conservative values governed by the second
 /// argument of the call to objectsize.
-Value *lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL,
+LLVM_ABI Value *lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL,
                            const TargetLibraryInfo *TLI, bool MustSucceed);
-Value *lowerObjectSizeCall(
+LLVM_ABI Value *lowerObjectSizeCall(
     IntrinsicInst *ObjectSize, const DataLayout &DL,
     const TargetLibraryInfo *TLI, AAResults *AA, bool MustSucceed,
     SmallVectorImpl<Instruction *> *InsertedInstructions = nullptr);
@@ -264,27 +265,27 @@ class ObjectSizeOffsetVisitor
   static OffsetSpan unknown() { return OffsetSpan(); }
 
 public:
-  ObjectSizeOffsetVisitor(const DataLayout &DL, const TargetLibraryInfo *TLI,
+  LLVM_ABI ObjectSizeOffsetVisitor(const DataLayout &DL, const TargetLibraryInfo *TLI,
                           LLVMContext &Context, ObjectSizeOpts Options = {});
 
-  SizeOffsetAPInt compute(Value *V);
+  LLVM_ABI SizeOffsetAPInt compute(Value *V);
 
   // These are "private", except they can't actually be made private. Only
   // compute() should be used by external users.
-  OffsetSpan visitAllocaInst(AllocaInst &I);
-  OffsetSpan visitArgument(Argument &A);
-  OffsetSpan visitCallBase(CallBase &CB);
-  OffsetSpan visitConstantPointerNull(ConstantPointerNull &);
-  OffsetSpan visitExtractElementInst(ExtractElementInst &I);
-  OffsetSpan visitExtractValueInst(ExtractValueInst &I);
-  OffsetSpan visitGlobalAlias(GlobalAlias &GA);
-  OffsetSpan visitGlobalVariable(GlobalVariable &GV);
-  OffsetSpan visitIntToPtrInst(IntToPtrInst &);
-  OffsetSpan visitLoadInst(LoadInst &I);
-  OffsetSpan visitPHINode(PHINode &);
-  OffsetSpan visitSelectInst(SelectInst &I);
-  OffsetSpan visitUndefValue(UndefValue &);
-  OffsetSpan visitInstruction(Instruction &I);
+  LLVM_ABI OffsetSpan visitAllocaInst(AllocaInst &I);
+  LLVM_ABI OffsetSpan visitArgument(Argument &A);
+  LLVM_ABI OffsetSpan visitCallBase(CallBase &CB);
+  LLVM_ABI OffsetSpan visitConstantPointerNull(ConstantPointerNull &);
+  LLVM_ABI OffsetSpan visitExtractElementInst(ExtractElementInst &I);
+  LLVM_ABI OffsetSpan visitExtractValueInst(ExtractValueInst &I);
+  LLVM_ABI OffsetSpan visitGlobalAlias(GlobalAlias &GA);
+  LLVM_ABI OffsetSpan visitGlobalVariable(GlobalVariable &GV);
+  LLVM_ABI OffsetSpan visitIntToPtrInst(IntToPtrInst &);
+  LLVM_ABI OffsetSpan visitLoadInst(LoadInst &I);
+  LLVM_ABI OffsetSpan visitPHINode(PHINode &);
+  LLVM_ABI OffsetSpan visitSelectInst(SelectInst &I);
+  LLVM_ABI OffsetSpan visitUndefValue(UndefValue &);
+  LLVM_ABI OffsetSpan visitInstruction(Instruction &I);
 
 private:
   OffsetSpan
@@ -304,7 +305,7 @@ struct SizeOffsetWeakTrackingVH;
 struct SizeOffsetValue : public SizeOffsetType<Value *, SizeOffsetValue> {
   SizeOffsetValue() : SizeOffsetType(nullptr, nullptr) {}
   SizeOffsetValue(Value *Size, Value *Offset) : SizeOffsetType(Size, Offset) {}
-  SizeOffsetValue(const SizeOffsetWeakTrackingVH &SOT);
+  LLVM_ABI SizeOffsetValue(const SizeOffsetWeakTrackingVH &SOT);
 
   static bool known(Value *V) { return V != nullptr; }
 };
@@ -345,24 +346,24 @@ class ObjectSizeOffsetEvaluator
   SizeOffsetValue compute_(Value *V);
 
 public:
-  ObjectSizeOffsetEvaluator(const DataLayout &DL, const TargetLibraryInfo *TLI,
+  LLVM_ABI ObjectSizeOffsetEvaluator(const DataLayout &DL, const TargetLibraryInfo *TLI,
                             LLVMContext &Context, ObjectSizeOpts EvalOpts = {});
 
   static SizeOffsetValue unknown() { return SizeOffsetValue(); }
 
-  SizeOffsetValue compute(Value *V);
+  LLVM_ABI SizeOffsetValue compute(Value *V);
 
   // The individual instruction visitors should be treated as private.
-  SizeOffsetValue visitAllocaInst(AllocaInst &I);
-  SizeOffsetValue visitCallBase(CallBase &CB);
-  SizeOffsetValue visitExtractElementInst(ExtractElementInst &I);
-  SizeOffsetValue visitExtractValueInst(ExtractValueInst &I);
-  SizeOffsetValue visitGEPOperator(GEPOperator &GEP);
-  SizeOffsetValue visitIntToPtrInst(IntToPtrInst &);
-  SizeOffsetValue visitLoadInst(LoadInst &I);
-  SizeOffsetValue visitPHINode(PHINode &PHI);
-  SizeOffsetValue visitSelectInst(SelectInst &I);
-  SizeOffsetValue visitInstruction(Instruction &I);
+  LLVM_ABI SizeOffsetValue visitAllocaInst(AllocaInst &I);
+  LLVM_ABI SizeOffsetValue visitCallBase(CallBase &CB);
+  LLVM_ABI SizeOffsetValue visitExtractElementInst(ExtractElementInst &I);
+  LLVM_ABI SizeOffsetValue visitExtractValueInst(ExtractValueInst &I);
+  LLVM_ABI SizeOffsetValue visitGEPOperator(GEPOperator &GEP);
+  LLVM_ABI SizeOffsetValue visitIntToPtrInst(IntToPtrInst &);
+  LLVM_ABI SizeOffsetValue visitLoadInst(LoadInst &I);
+  LLVM_ABI SizeOffsetValue visitPHINode(PHINode &PHI);
+  LLVM_ABI SizeOffsetValue visitSelectInst(SelectInst &I);
+  LLVM_ABI SizeOffsetValue visitInstruction(Instruction &I);
 };
 
 } // end namespace llvm
diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h
index 2de56018b49b5..1c6b158d01bc1 100644
--- a/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -15,6 +15,7 @@
 #ifndef LLVM_ANALYSIS_MEMORYLOCATION_H
 #define LLVM_ANALYSIS_MEMORYLOCATION_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMapInfo.h"
 #include "llvm/IR/Metadata.h"
 #include "llvm/Support/TypeSize.h"
@@ -191,7 +192,7 @@ class LocationSize {
   // - values that don't exist against values that do, and
   // - precise values to imprecise values
 
-  void print(raw_ostream &OS) const;
+  LLVM_ABI void print(raw_ostream &OS) const;
 
   // Returns an opaque value that represents this LocationSize. Cannot be
   // reliably converted back into a LocationSize.
@@ -239,29 +240,29 @@ class MemoryLocation {
 
   /// Return a location with information about the memory reference by the given
   /// instruction.
-  static MemoryLocation get(const LoadInst *LI);
-  static MemoryLocation get(const StoreInst *SI);
-  static MemoryLocation get(const VAArgInst *VI);
-  static MemoryLocation get(const AtomicCmpXchgInst *CXI);
-  static MemoryLocation get(const AtomicRMWInst *RMWI);
+  LLVM_ABI static MemoryLocation get(const LoadInst *LI);
+  LLVM_ABI static MemoryLocation get(const StoreInst *SI);
+  LLVM_ABI static MemoryLocation get(const VAArgInst *VI);
+  LLVM_ABI static MemoryLocation get(const AtomicCmpXchgInst *CXI);
+  LLVM_ABI static MemoryLocation get(const AtomicRMWInst *RMWI);
   static MemoryLocation get(const Instruction *Inst) {
     return *MemoryLocation::getOrNone(Inst);
   }
-  static std::optional<MemoryLocation> getOrNone(const Instruction *Inst);
+  LLVM_ABI static std::optional<MemoryLocation> getOrNone(const Instruction *Inst);
 
   /// Return a location representing the source of a memory transfer.
-  static MemoryLocation getForSource(const MemTransferInst *MTI);
-  static MemoryLocation getForSource(const AnyMemTransferInst *MTI);
+  LLVM_ABI static MemoryLocation getForSource(const MemTransferInst *MTI);
+  LLVM_ABI static MemoryLocation getForSource(const AnyMemTransferInst *MTI);
 
   /// Return a location representing the destination of a memory set or
   /// transfer.
-  static MemoryLocation getForDest(const MemIntrinsic *MI);
-  static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
-  static std::optional<MemoryLocation> getForDest(const CallBase *CI,
+  LLVM_ABI static MemoryLocation getForDest(const MemIntrinsic *MI);
+  LLVM_ABI static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
+  LLVM_ABI static std::optional<MemoryLocation> getForDest(const CallBase *CI,
                                                   const TargetLibraryInfo &TLI);
 
   /// Return a location representing a particular argument of a call.
-  static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
+  LLVM_ABI static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
                                        const TargetLibraryInfo *TLI);
   static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
                                        const TargetLibraryInfo &TLI) {
diff --git a/llvm/include/llvm/Analysis/MemoryProfileInfo.h b/llvm/include/llvm/Analysis/MemoryProfileInfo.h
index 9fcb81a0a1b4c..93bcc47454bb3 100644
--- a/llvm/include/llvm/Analysis/MemoryProfileInfo.h
+++ b/llvm/include/llvm/Analysis/MemoryProfileInfo.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_MEMORYPROFILEINFO_H
 #define LLVM_ANALYSIS_MEMORYPROFILEINFO_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/IR/InstrTypes.h"
 #include "llvm/IR/Metadata.h"
 #include "llvm/ProfileData/MemProfCommon.h"
@@ -22,29 +23,29 @@ namespace llvm {
 namespace memprof {
 
 /// Return the allocation type for a given set of memory profile values.
-AllocationType getAllocType(uint64_t TotalLifetimeAccessDensity,
+LLVM_ABI AllocationType getAllocType(uint64_t TotalLifetimeAccessDensity,
                             uint64_t AllocCount, uint64_t TotalLifetime);
 
 /// Build callstack metadata from the provided list of call stack ids. Returns
 /// the resulting metadata node.
-MDNode *buildCallstackMetadata(ArrayRef<uint64_t> CallStack, LLVMContext &Ctx);
+LLVM_ABI MDNode *buildCallstackMetadata(ArrayRef<uint64_t> CallStack, LLVMContext &Ctx);
 
 /// Build metadata from the provided list of full stack id and profiled size, to
 /// use when reporting of hinted sizes is enabled.
-MDNode *buildContextSizeMetadata(ArrayRef<ContextTotalSize> ContextSizeInfo,
+LLVM_ABI MDNode *buildContextSizeMetadata(ArrayRef<ContextTotalSize> ContextSizeInfo,
                                  LLVMContext &Ctx);
 
 /// Returns the stack node from an MIB metadata node.
-MDNode *getMIBStackNode(const MDNode *MIB);
+LLVM_ABI MDNode *getMIBStackNode(const MDNode *MIB);
 
 /// Returns the allocation type from an MIB metadata node.
-AllocationType getMIBAllocType(const MDNode *MIB);
+LLVM_ABI AllocationType getMIBAllocType(const MDNode *MIB);
 
 /// Returns the string to use in attributes with the given type.
-std::string getAllocTypeAttributeString(AllocationType Type);
+LLVM_ABI std::string getAllocTypeAttributeString(AllocationType Type);
 
 /// True if the AllocTypes bitmask contains just a single type.
-bool hasSingleAllocType(uint8_t AllocTypes);
+LLVM_ABI bool hasSingleAllocType(uint8_t AllocTypes);
 
 /// Class to build a trie of call stack contexts for a particular profiled
 /// allocation call, along with their associated allocation types.
@@ -118,12 +119,12 @@ class CallStackTrie {
   /// matching via a debug location hash), expected to be in order from the
   /// allocation call down to the bottom of the call stack (i.e. callee to
   /// caller order).
-  void addCallStack(AllocationType AllocType, ArrayRef<uint64_t> StackIds,
+  LLVM_ABI void addCallStack(AllocationType AllocType, ArrayRef<uint64_t> StackIds,
                     std::vector<ContextTotalSize> ContextSizeInfo = {});
 
   /// Add the call stack context along with its allocation type from the MIB
   /// metadata to the Trie.
-  void addCallStack(MDNode *MIB);
+  LLVM_ABI void addCallStack(MDNode *MIB);
 
   /// Build and attach the minimal necessary MIB metadata. If the alloc has a
   /// single allocation type, add a function attribute instead. The reason for
@@ -132,12 +133,12 @@ class CallStackTrie {
   /// cloning or another optimization to distinguish the allocation types,
   /// which is lower overhead and more direct than maintaining this metadata.
   /// Returns true if memprof metadata attached, false if not (attribute added).
-  bool buildAndAttachMIBMetadata(CallBase *CI);
+  LLVM_ABI bool buildAndAttachMIBMetadata(CallBase *CI);
 
   /// Add an attribute for the given allocation type to the call instruction.
   /// If hinted by reporting is enabled, a message is emitted with the given
   /// descriptor used to identify the category of single allocation type.
-  void addSingleAllocTypeAttribute(CallBase *CI, AllocationType AT,
+  LLVM_ABI void addSingleAllocTypeAttribute(CallBase *CI, AllocationType AT,
                                    StringRef Descriptor);
 };
 
@@ -216,11 +217,11 @@ CallStack<NodeT, IteratorT>::beginAfterSharedPrefix(const CallStack &Other) {
 
 /// Specializations for iterating through IR metadata stack contexts.
 template <>
-CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::CallStackIterator(
+LLVM_ABI CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::CallStackIterator(
     const MDNode *N, bool End);
 template <>
-uint64_t CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::operator*();
-template <> uint64_t CallStack<MDNode, MDNode::op_iterator>::back() const;
+LLVM_ABI uint64_t CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::operator*();
+template <> LLVM_ABI uint64_t CallStack<MDNode, MDNode::op_iterator>::back() const;
 
 } // end namespace memprof
 } // end namespace llvm
diff --git a/llvm/include/llvm/Analysis/MemorySSA.h b/llvm/include/llvm/Analysis/MemorySSA.h
index 09fc34af60dc3..c73e119351e99 100644
--- a/llvm/include/llvm/Analysis/MemorySSA.h
+++ b/llvm/include/llvm/Analysis/MemorySSA.h
@@ -85,6 +85,7 @@
 #ifndef LLVM_ANALYSIS_MEMORYSSA_H
 #define LLVM_ANALYSIS_MEMORYSSA_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
@@ -160,8 +161,8 @@ class MemoryAccess
 
   BasicBlock *getBlock() const { return Block; }
 
-  void print(raw_ostream &OS) const;
-  void dump() const;
+  LLVM_ABI void print(raw_ostream &OS) const;
+  LLVM_ABI void dump() const;
 
   /// The user iterators for a memory access
   using iterator = user_iterator;
@@ -323,7 +324,7 @@ class MemoryUse final : public MemoryUseOrDef {
     return MA->getValueID() == MemoryUseVal;
   }
 
-  void print(raw_ostream &OS) const;
+  LLVM_ABI void print(raw_ostream &OS) const;
 
   void setOptimized(MemoryAccess *DMA) {
     OptimizedID = DMA->getID();
@@ -406,7 +407,7 @@ class MemoryDef final : public MemoryUseOrDef {
     setOperand(1, nullptr);
   }
 
-  void print(raw_ostream &OS) const;
+  LLVM_ABI void print(raw_ostream &OS) const;
 
   unsigned getID() const { return ID; }
 
@@ -628,7 +629,7 @@ class MemoryPhi final : public MemoryAccess {
     return V->getValueID() == MemoryPhiVal;
   }
 
-  void print(raw_ostream &OS) const;
+  LLVM_ABI void print(raw_ostream &OS) const;
 
   unsigned getID() const { return ID; }
 
@@ -700,17 +701,17 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
 /// accesses.
 class MemorySSA {
 public:
-  MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
-  MemorySSA(Loop &, AliasAnalysis *, DominatorTree *);
+  LLVM_ABI MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
+  LLVM_ABI MemorySSA(Loop &, AliasAnalysis *, DominatorTree *);
 
   // MemorySSA must remain where it's constructed; Walkers it creates store
   // pointers to it.
   MemorySSA(MemorySSA &&) = delete;
 
-  ~MemorySSA();
+  LLVM_ABI ~MemorySSA();
 
-  MemorySSAWalker *getWalker();
-  MemorySSAWalker *getSkipSelfWalker();
+  LLVM_ABI MemorySSAWalker *getWalker();
+  LLVM_ABI MemorySSAWalker *getSkipSelfWalker();
 
   /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
   /// access associated with it. If passed a basic block gets the memory phi
@@ -726,8 +727,8 @@ class MemorySSA {
 
   DominatorTree &getDomTree() const { return *DT; }
 
-  void dump() const;
-  void print(raw_ostream &) const;
+  LLVM_ABI void dump() const;
+  LLVM_ABI void print(raw_ostream &) const;
 
   /// Return true if \p MA represents the live on entry value
   ///
@@ -770,20 +771,20 @@ class MemorySSA {
 
   /// Given two memory accesses in the same basic block, determine
   /// whether MemoryAccess \p A dominates MemoryAccess \p B.
-  bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
+  LLVM_ABI bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
 
   /// Given two memory accesses in potentially different blocks,
   /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
-  bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
+  LLVM_ABI bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
 
   /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
   /// dominates Use \p B.
-  bool dominates(const MemoryAccess *A, const Use &B) const;
+  LLVM_ABI bool dominates(const MemoryAccess *A, const Use &B) const;
 
   enum class VerificationLevel { Fast, Full };
   /// Verify that MemorySSA is self consistent (IE definitions dominate
   /// all uses, uses appear in the right places).  This is used by unit tests.
-  void verifyMemorySSA(VerificationLevel = VerificationLevel::Fast) const;
+  LLVM_ABI void verifyMemorySSA(VerificationLevel = VerificationLevel::Fast) const;
 
   /// Used in various insertion functions to specify whether we are talking
   /// about the beginning or end of a block.
@@ -794,7 +795,7 @@ class MemorySSA {
   /// not happened yet for this MemorySSA instance. This should be done if you
   /// plan to query the clobbering access for most uses, or if you walk the
   /// def-use chain of uses.
-  void ensureOptimizedUses();
+  LLVM_ABI void ensureOptimizedUses();
 
   AliasAnalysis &getAA() { return *AA; }
 
@@ -824,8 +825,8 @@ class MemorySSA {
   // machinsations.  They do not always leave the IR in a correct state, and
   // relies on the updater to fixup what it breaks, so it is not public.
 
-  void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
-  void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point);
+  LLVM_ABI void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
+  LLVM_ABI void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point);
 
   // Rename the dominator tree branch rooted at BB.
   void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
@@ -833,13 +834,13 @@ class MemorySSA {
     renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
   }
 
-  void removeFromLookups(MemoryAccess *);
-  void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
-  void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
+  LLVM_ABI void removeFromLookups(MemoryAccess *);
+  LLVM_ABI void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
+  LLVM_ABI void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
                                InsertionPlace);
-  void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
+  LLVM_ABI void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
                              AccessList::iterator);
-  MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *,
+  LLVM_ABI MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *,
                                       const MemoryUseOrDef *Template = nullptr,
                                       bool CreationMustSucceed = true);
 
@@ -867,7 +868,7 @@ class MemorySSA {
   void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
   MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
   void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
-  void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
+  LLVM_ABI void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
                   SmallPtrSetImpl<BasicBlock *> &Visited,
                   bool SkipVisited = false, bool RenameAllUses = false);
   AccessList *getOrCreateAccessList(const BasicBlock *);
@@ -910,7 +911,7 @@ class MemorySSA {
 /// The checks which this flag enables is exensive and disabled by default
 /// unless `EXPENSIVE_CHECKS` is defined.  The flag `-verify-memoryssa` can be
 /// used to selectively enable the verification without re-compilation.
-extern bool VerifyMemorySSA;
+LLVM_ABI extern bool VerifyMemorySSA;
 
 // Internal MemorySSA utils, for use by MemorySSA classes and walkers
 class MemorySSAUtil {
@@ -919,7 +920,7 @@ class MemorySSAUtil {
   friend class MemorySSAWalker;
 
   // This function should not be used by new passes.
-  static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
+  LLVM_ABI static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
                                   AliasAnalysis &AA);
 };
 
@@ -941,11 +942,11 @@ class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
 
     std::unique_ptr<MemorySSA> MSSA;
 
-    bool invalidate(Function &F, const PreservedAnalyses &PA,
+    LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
                     FunctionAnalysisManager::Invalidator &Inv);
   };
 
-  Result run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM);
 };
 
 /// Printer pass for \c MemorySSA.
@@ -957,7 +958,7 @@ class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
   explicit MemorySSAPrinterPass(raw_ostream &OS, bool EnsureOptimizedUses)
       : OS(OS), EnsureOptimizedUses(EnsureOptimizedUses) {}
 
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
   static bool isRequired() { return true; }
 };
@@ -970,19 +971,19 @@ class MemorySSAWalkerPrinterPass
 public:
   explicit MemorySSAWalkerPrinterPass(raw_ostream &OS) : OS(OS) {}
 
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
   static bool isRequired() { return true; }
 };
 
 /// Verifier pass for \c MemorySSA.
 struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
 /// Legacy analysis pass which computes \c MemorySSA.
-class MemorySSAWrapperPass : public FunctionPass {
+class LLVM_ABI MemorySSAWrapperPass : public FunctionPass {
 public:
   MemorySSAWrapperPass();
 
@@ -1015,7 +1016,7 @@ class MemorySSAWrapperPass : public FunctionPass {
 /// standardized interface to getting and using that info.
 class MemorySSAWalker {
 public:
-  MemorySSAWalker(MemorySSA *);
+  LLVM_ABI MemorySSAWalker(MemorySSA *);
   virtual ~MemorySSAWalker() = default;
 
   using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
@@ -1100,7 +1101,7 @@ class MemorySSAWalker {
 
 /// A MemorySSAWalker that does no alias queries, or anything else. It
 /// simply returns the links as they were constructed by the builder.
-class DoNothingMemorySSAWalker final : public MemorySSAWalker {
+class LLVM_ABI DoNothingMemorySSAWalker final : public MemorySSAWalker {
 public:
   // Keep the overrides below from hiding the Instruction overload of
   // getClobberingMemoryAccess.
@@ -1261,7 +1262,7 @@ class upward_defs_iterator
   /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
   /// loop. In particular, this guarantees that it only references a single
   /// MemoryLocation during execution of the containing function.
-  bool IsGuaranteedLoopInvariant(const Value *Ptr) const;
+  LLVM_ABI bool IsGuaranteedLoopInvariant(const Value *Ptr) const;
 
   void fillInCurrentPair() {
     CurrentPair.first = *DefIterator;
diff --git a/llvm/include/llvm/Analysis/MemorySSAUpdater.h b/llvm/include/llvm/Analysis/MemorySSAUpdater.h
index b8e08f4b7842f..31f61b0d45e95 100644
--- a/llvm/include/llvm/Analysis/MemorySSAUpdater.h
+++ b/llvm/include/llvm/Analysis/MemorySSAUpdater.h
@@ -31,6 +31,7 @@
 #ifndef LLVM_ANALYSIS_MEMORYSSAUPDATER_H
 #define LLVM_ANALYSIS_MEMORYSSAUPDATER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallSet.h"
 #include "llvm/ADT/SmallVector.h"
@@ -82,52 +83,52 @@ class MemorySSAUpdater {
   /// if (foo) { store b }
   /// load a
   /// Where a mayalias b, *does* require RenameUses be set to true.
-  void insertDef(MemoryDef *Def, bool RenameUses = false);
-  void insertUse(MemoryUse *Use, bool RenameUses = false);
+  LLVM_ABI void insertDef(MemoryDef *Def, bool RenameUses = false);
+  LLVM_ABI void insertUse(MemoryUse *Use, bool RenameUses = false);
   /// Update the MemoryPhi in `To` following an edge deletion between `From` and
   /// `To`. If `To` becomes unreachable, a call to removeBlocks should be made.
-  void removeEdge(BasicBlock *From, BasicBlock *To);
+  LLVM_ABI void removeEdge(BasicBlock *From, BasicBlock *To);
   /// Update the MemoryPhi in `To` to have a single incoming edge from `From`,
   /// following a CFG change that replaced multiple edges (switch) with a direct
   /// branch.
-  void removeDuplicatePhiEdgesBetween(const BasicBlock *From,
+  LLVM_ABI void removeDuplicatePhiEdgesBetween(const BasicBlock *From,
                                       const BasicBlock *To);
   /// Update MemorySSA when inserting a unique backedge block for a loop.
-  void updatePhisWhenInsertingUniqueBackedgeBlock(BasicBlock *LoopHeader,
+  LLVM_ABI void updatePhisWhenInsertingUniqueBackedgeBlock(BasicBlock *LoopHeader,
                                                   BasicBlock *LoopPreheader,
                                                   BasicBlock *BackedgeBlock);
   /// Update MemorySSA after a loop was cloned, given the blocks in RPO order,
   /// the exit blocks and a 1:1 mapping of all blocks and instructions
   /// cloned. This involves duplicating all defs and uses in the cloned blocks
   /// Updating phi nodes in exit block successors is done separately.
-  void updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
+  LLVM_ABI void updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
                            ArrayRef<BasicBlock *> ExitBlocks,
                            const ValueToValueMapTy &VM,
                            bool IgnoreIncomingWithNoClones = false);
   // Block BB was fully or partially cloned into its predecessor P1. Map
   // contains the 1:1 mapping of instructions cloned and VM[BB]=P1.
-  void updateForClonedBlockIntoPred(BasicBlock *BB, BasicBlock *P1,
+  LLVM_ABI void updateForClonedBlockIntoPred(BasicBlock *BB, BasicBlock *P1,
                                     const ValueToValueMapTy &VM);
   /// Update phi nodes in exit block successors following cloning. Exit blocks
   /// that were not cloned don't have additional predecessors added.
-  void updateExitBlocksForClonedLoop(ArrayRef<BasicBlock *> ExitBlocks,
+  LLVM_ABI void updateExitBlocksForClonedLoop(ArrayRef<BasicBlock *> ExitBlocks,
                                      const ValueToValueMapTy &VMap,
                                      DominatorTree &DT);
-  void updateExitBlocksForClonedLoop(
+  LLVM_ABI void updateExitBlocksForClonedLoop(
       ArrayRef<BasicBlock *> ExitBlocks,
       ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT);
 
   /// Apply CFG updates, analogous with the DT edge updates. By default, the
   /// DT is assumed to be already up to date. If UpdateDTFirst is true, first
   /// update the DT with the same updates.
-  void applyUpdates(ArrayRef<CFGUpdate> Updates, DominatorTree &DT,
+  LLVM_ABI void applyUpdates(ArrayRef<CFGUpdate> Updates, DominatorTree &DT,
                     bool UpdateDTFirst = false);
   /// Apply CFG insert updates, analogous with the DT edge updates.
-  void applyInsertUpdates(ArrayRef<CFGUpdate> Updates, DominatorTree &DT);
+  LLVM_ABI void applyInsertUpdates(ArrayRef<CFGUpdate> Updates, DominatorTree &DT);
 
-  void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where);
-  void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where);
-  void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
+  LLVM_ABI void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where);
+  LLVM_ABI void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where);
+  LLVM_ABI void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
                    MemorySSA::InsertionPlace Where);
   /// `From` block was spliced into `From` and `To`. There is a CFG edge from
   /// `From` to `To`. Move all accesses from `From` to `To` starting at
@@ -142,7 +143,7 @@ class MemorySSAUpdater {
   /// |      |        |------|  <- Start
   /// |      |        |  To  |
   /// |------|        |------|
-  void moveAllAfterSpliceBlocks(BasicBlock *From, BasicBlock *To,
+  LLVM_ABI void moveAllAfterSpliceBlocks(BasicBlock *From, BasicBlock *To,
                                 Instruction *Start);
   /// `From` block was merged into `To`. There is a CFG edge from `To` to
   /// `From`.`To` still branches to `From`, but all instructions were moved and
@@ -158,14 +159,14 @@ class MemorySSAUpdater {
   /// |------|        |      |  <- Start
   /// | From |        |      |
   /// |------|        |------|
-  void moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
+  LLVM_ABI void moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
                                Instruction *Start);
   /// A new empty BasicBlock (New) now branches directly to Old. Some of
   /// Old's predecessors (Preds) are now branching to New instead of Old.
   /// If New is the only predecessor, move Old's Phi, if present, to New.
   /// Otherwise, add a new Phi in New with appropriate incoming values, and
   /// update the incoming values in Old's Phi node too, if present.
-  void wireOldPredecessorsToNewImmediatePredecessor(
+  LLVM_ABI void wireOldPredecessorsToNewImmediatePredecessor(
       BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
       bool IdenticalEdgesWereMerged = true);
   // The below are utility functions. Other than creation of accesses to pass
@@ -188,7 +189,7 @@ class MemorySSAUpdater {
   ///
   /// Note: If a MemoryAccess already exists for I, this function will make it
   /// inaccessible and it *must* have removeMemoryAccess called on it.
-  MemoryAccess *createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition,
+  LLVM_ABI MemoryAccess *createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition,
                                        const BasicBlock *BB,
                                        MemorySSA::InsertionPlace Point,
                                        bool CreationMustSucceed = true);
@@ -196,13 +197,13 @@ class MemorySSAUpdater {
   /// Create a MemoryAccess in MemorySSA before an existing MemoryAccess.
   ///
   /// See createMemoryAccessInBB() for usage details.
-  MemoryUseOrDef *createMemoryAccessBefore(Instruction *I,
+  LLVM_ABI MemoryUseOrDef *createMemoryAccessBefore(Instruction *I,
                                            MemoryAccess *Definition,
                                            MemoryUseOrDef *InsertPt);
   /// Create a MemoryAccess in MemorySSA after an existing MemoryAccess.
   ///
   /// See createMemoryAccessInBB() for usage details.
-  MemoryUseOrDef *createMemoryAccessAfter(Instruction *I,
+  LLVM_ABI MemoryUseOrDef *createMemoryAccessAfter(Instruction *I,
                                           MemoryAccess *Definition,
                                           MemoryAccess *InsertPt);
 
@@ -212,7 +213,7 @@ class MemorySSAUpdater {
   /// associated with it is erased from the program.  For example, if a store or
   /// load is simply erased (not replaced), removeMemoryAccess should be called
   /// on the MemoryAccess for that store/load.
-  void removeMemoryAccess(MemoryAccess *, bool OptimizePhis = false);
+  LLVM_ABI void removeMemoryAccess(MemoryAccess *, bool OptimizePhis = false);
 
   /// Remove MemoryAccess for a given instruction, if a MemoryAccess exists.
   /// This should be called when an instruction (load/store) is deleted from
@@ -229,12 +230,12 @@ class MemorySSAUpdater {
   /// Deleted blocks still have successor info, but their predecessor edges and
   /// Phi nodes may already be updated. Instructions in DeadBlocks should be
   /// deleted after this call.
-  void removeBlocks(const SmallSetVector<BasicBlock *, 8> &DeadBlocks);
+  LLVM_ABI void removeBlocks(const SmallSetVector<BasicBlock *, 8> &DeadBlocks);
 
   /// Instruction I will be changed to an unreachable. Remove all accesses in
   /// I's block that follow I (inclusive), and update the Phis in the blocks'
   /// successors.
-  void changeToUnreachable(const Instruction *I);
+  LLVM_ABI void changeToUnreachable(const Instruction *I);
 
   /// Get handle on MemorySSA.
   MemorySSA* getMemorySSA() const { return MSSA; }
diff --git a/llvm/include/llvm/Analysis/ModuleSummaryAnalysis.h b/llvm/include/llvm/Analysis/ModuleSummaryAnalysis.h
index e36dea58cec45..62fb4a7544973 100644
--- a/llvm/include/llvm/Analysis/ModuleSummaryAnalysis.h
+++ b/llvm/include/llvm/Analysis/ModuleSummaryAnalysis.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_MODULESUMMARYANALYSIS_H
 #define LLVM_ANALYSIS_MODULESUMMARYANALYSIS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/IR/ModuleSummaryIndex.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
@@ -33,7 +34,7 @@ class StackSafetyInfo;
 /// BlockFrequencyInfo for a given function, that can be provided via
 /// a std::function callback. Otherwise, this routine will manually construct
 /// that information.
-ModuleSummaryIndex buildModuleSummaryIndex(
+LLVM_ABI ModuleSummaryIndex buildModuleSummaryIndex(
     const Module &M,
     std::function<BlockFrequencyInfo *(const Function &F)> GetBFICallback,
     ProfileSummaryInfo *PSI,
@@ -50,11 +51,11 @@ class ModuleSummaryIndexAnalysis
 public:
   using Result = ModuleSummaryIndex;
 
-  Result run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI Result run(Module &M, ModuleAnalysisManager &AM);
 };
 
 /// Legacy wrapper pass to provide the ModuleSummaryIndex object.
-class ModuleSummaryIndexWrapperPass : public ModulePass {
+class LLVM_ABI ModuleSummaryIndexWrapperPass : public ModulePass {
   std::optional<ModuleSummaryIndex> Index;
 
 public:
@@ -76,10 +77,10 @@ class ModuleSummaryIndexWrapperPass : public ModulePass {
 // createModuleSummaryIndexWrapperPass - This pass builds a ModuleSummaryIndex
 // object for the module, to be written to bitcode or LLVM assembly.
 //
-ModulePass *createModuleSummaryIndexWrapperPass();
+LLVM_ABI ModulePass *createModuleSummaryIndexWrapperPass();
 
 /// Legacy wrapper pass to provide the ModuleSummaryIndex object.
-class ImmutableModuleSummaryIndexWrapperPass : public ImmutablePass {
+class LLVM_ABI ImmutableModuleSummaryIndexWrapperPass : public ImmutablePass {
   const ModuleSummaryIndex *Index;
 
 public:
@@ -96,12 +97,12 @@ class ImmutableModuleSummaryIndexWrapperPass : public ImmutablePass {
 // ImmutableModuleSummaryIndexWrapperPass - This pass wrap provided
 // ModuleSummaryIndex object for the module, to be used by other passes.
 //
-ImmutablePass *
+LLVM_ABI ImmutablePass *
 createImmutableModuleSummaryIndexWrapperPass(const ModuleSummaryIndex *Index);
 
 /// Returns true if the instruction could have memprof metadata, used to ensure
 /// consistency between summary analysis and the ThinLTO backend processing.
-bool mayHaveMemprofSummary(const CallBase *CB);
+LLVM_ABI bool mayHaveMemprofSummary(const CallBase *CB);
 
 } // end namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/MustExecute.h b/llvm/include/llvm/Analysis/MustExecute.h
index 8ac3c5eb653cd..0211864099f53 100644
--- a/llvm/include/llvm/Analysis/MustExecute.h
+++ b/llvm/include/llvm/Analysis/MustExecute.h
@@ -23,6 +23,7 @@
 #ifndef LLVM_ANALYSIS_MUSTEXECUTE_H
 #define LLVM_ANALYSIS_MUSTEXECUTE_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseSet.h"
 #include "llvm/Analysis/InstructionPrecedenceTracking.h"
@@ -62,14 +63,14 @@ class LoopSafetyInfo {
 
 protected:
   /// Computes block colors.
-  void computeBlockColors(const Loop *CurLoop);
+  LLVM_ABI void computeBlockColors(const Loop *CurLoop);
 
 public:
   /// Returns block colors map that is used to update funclet operand bundles.
-  const DenseMap<BasicBlock *, ColorVector> &getBlockColors() const;
+  LLVM_ABI const DenseMap<BasicBlock *, ColorVector> &getBlockColors() const;
 
   /// Copy colors of block \p Old into the block \p New.
-  void copyColors(BasicBlock *New, BasicBlock *Old);
+  LLVM_ABI void copyColors(BasicBlock *New, BasicBlock *Old);
 
   /// Returns true iff the block \p BB potentially may throw exception. It can
   /// be false-positive in cases when we want to avoid complex analysis.
@@ -81,7 +82,7 @@ class LoopSafetyInfo {
 
   /// Return true if we must reach the block \p BB under assumption that the
   /// loop \p CurLoop is entered.
-  bool allLoopPathsLeadToBlock(const Loop *CurLoop, const BasicBlock *BB,
+  LLVM_ABI bool allLoopPathsLeadToBlock(const Loop *CurLoop, const BasicBlock *BB,
                                const DominatorTree *DT) const;
 
   /// Computes safety information for a loop checks loop body & header for
@@ -106,7 +107,7 @@ class LoopSafetyInfo {
 /// Simple and conservative implementation of LoopSafetyInfo that can give
 /// false-positive answers to its queries in order to avoid complicated
 /// analysis.
-class SimpleLoopSafetyInfo: public LoopSafetyInfo {
+class LLVM_ABI SimpleLoopSafetyInfo: public LoopSafetyInfo {
   bool MayThrow = false;       // The current loop contains an instruction which
                                // may throw.
   bool HeaderMayThrow = false; // Same as previous, but specific to loop header
@@ -128,7 +129,7 @@ class SimpleLoopSafetyInfo: public LoopSafetyInfo {
 /// that should be invalidated by calling the methods insertInstructionTo and
 /// removeInstruction whenever we modify a basic block's contents by adding or
 /// removing instructions.
-class ICFLoopSafetyInfo: public LoopSafetyInfo {
+class LLVM_ABI ICFLoopSafetyInfo: public LoopSafetyInfo {
   bool MayThrow = false;       // The current loop contains an instruction which
                                // may throw.
   // Contains information about implicit control flow in this loop's blocks.
@@ -168,7 +169,7 @@ class ICFLoopSafetyInfo: public LoopSafetyInfo {
   void removeInstruction(const Instruction *Inst);
 };
 
-bool mayContainIrreducibleControl(const Function &F, const LoopInfo *LI);
+LLVM_ABI bool mayContainIrreducibleControl(const Function &F, const LoopInfo *LI);
 
 struct MustBeExecutedContextExplorer;
 
@@ -338,7 +339,7 @@ struct MustBeExecutedIterator {
       DenseSet<PointerIntPair<const Instruction *, 1, ExplorationDirection>>;
 
   /// Private constructors.
-  MustBeExecutedIterator(ExplorerTy &Explorer, const Instruction *I);
+  LLVM_ABI MustBeExecutedIterator(ExplorerTy &Explorer, const Instruction *I);
 
   /// Reset the iterator to its initial state pointing at \p I.
   void reset(const Instruction *I);
@@ -350,7 +351,7 @@ struct MustBeExecutedIterator {
   ///
   /// \return The next instruction in the must be executed context, or nullptr
   ///         if none was found.
-  const Instruction *advance();
+  LLVM_ABI const Instruction *advance();
 
   /// A set to track the visited instructions in order to deal with endless
   /// loops and recursion.
@@ -490,7 +491,7 @@ struct MustBeExecutedContextExplorer {
   ///                        executed context.
   /// \param PP              The program point for which the next instruction
   ///                        that is guaranteed to execute is determined.
-  const Instruction *
+  LLVM_ABI const Instruction *
   getMustBeExecutedNextInstruction(MustBeExecutedIterator &It,
                                    const Instruction *PP);
   /// Return the previous instr. that is guaranteed to be executed before \p PP.
@@ -499,15 +500,15 @@ struct MustBeExecutedContextExplorer {
   ///                        executed context.
   /// \param PP              The program point for which the previous instr.
   ///                        that is guaranteed to execute is determined.
-  const Instruction *
+  LLVM_ABI const Instruction *
   getMustBeExecutedPrevInstruction(MustBeExecutedIterator &It,
                                    const Instruction *PP);
 
   /// Find the next join point from \p InitBB in forward direction.
-  const BasicBlock *findForwardJoinPoint(const BasicBlock *InitBB);
+  LLVM_ABI const BasicBlock *findForwardJoinPoint(const BasicBlock *InitBB);
 
   /// Find the next join point from \p InitBB in backward direction.
-  const BasicBlock *findBackwardJoinPoint(const BasicBlock *InitBB);
+  LLVM_ABI const BasicBlock *findBackwardJoinPoint(const BasicBlock *InitBB);
 
   /// Parameter that limit the performed exploration. See the constructor for
   /// their meaning.
@@ -545,7 +546,7 @@ class MustExecutePrinterPass : public PassInfoMixin<MustExecutePrinterPass> {
 
 public:
   MustExecutePrinterPass(raw_ostream &OS) : OS(OS) {}
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
@@ -555,7 +556,7 @@ class MustBeExecutedContextPrinterPass
 
 public:
   MustBeExecutedContextPrinterPass(raw_ostream &OS) : OS(OS) {}
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
diff --git a/llvm/include/llvm/Analysis/NoInferenceModelRunner.h b/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
index 035fb2234b245..fdfde7d385df0 100644
--- a/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
+++ b/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
@@ -10,6 +10,7 @@
 #ifndef LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
 #define LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/MLModelRunner.h"
 namespace llvm {
 class TensorSpec;
@@ -19,7 +20,7 @@ class TensorSpec;
 /// 'run'.
 class NoInferenceModelRunner : public MLModelRunner {
 public:
-  NoInferenceModelRunner(LLVMContext &Ctx,
+  LLVM_ABI NoInferenceModelRunner(LLVMContext &Ctx,
                          const std::vector<TensorSpec> &Inputs);
 
   static bool classof(const MLModelRunner *R) {
diff --git a/llvm/include/llvm/Analysis/OptimizationRemarkEmitter.h b/llvm/include/llvm/Analysis/OptimizationRemarkEmitter.h
index 64e6442bb92f8..a34404ede7268 100644
--- a/llvm/include/llvm/Analysis/OptimizationRemarkEmitter.h
+++ b/llvm/include/llvm/Analysis/OptimizationRemarkEmitter.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_OPTIMIZATIONREMARKEMITTER_H
 #define LLVM_ANALYSIS_OPTIMIZATIONREMARKEMITTER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/BlockFrequencyInfo.h"
 #include "llvm/IR/DiagnosticInfo.h"
 #include "llvm/IR/Function.h"
@@ -45,7 +46,7 @@ class OptimizationRemarkEmitter {
   /// operation since BFI and all its required analyses are computed.  This is
   /// for example useful for CGSCC passes that can't use function analyses
   /// passes in the old PM.
-  OptimizationRemarkEmitter(const Function *F);
+  LLVM_ABI OptimizationRemarkEmitter(const Function *F);
 
   OptimizationRemarkEmitter(OptimizationRemarkEmitter &&Arg)
       : F(Arg.F), BFI(Arg.BFI) {}
@@ -57,7 +58,7 @@ class OptimizationRemarkEmitter {
   }
 
   /// Handle invalidation events in the new pass manager.
-  bool invalidate(Function &F, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
                   FunctionAnalysisManager::Invalidator &Inv);
 
   /// Return true iff at least *some* remarks are enabled.
@@ -68,7 +69,7 @@ class OptimizationRemarkEmitter {
 
   /// Output the remark via the diagnostic handler and to the
   /// optimization record file.
-  void emit(DiagnosticInfoOptimizationBase &OptDiag);
+  LLVM_ABI void emit(DiagnosticInfoOptimizationBase &OptDiag);
   /// Also allow r-value for OptDiag to allow emitting a temporarily-constructed
   /// diagnostic.
   void emit(DiagnosticInfoOptimizationBase &&OptDiag) { emit(OptDiag); }
@@ -145,7 +146,7 @@ using setExtraArgs = DiagnosticInfoOptimizationBase::setExtraArgs;
 /// Note that this pass shouldn't generally be marked as preserved by other
 /// passes.  It's holding onto BFI, so if the pass does not preserve BFI, BFI
 /// could be freed.
-class OptimizationRemarkEmitterWrapperPass : public FunctionPass {
+class LLVM_ABI OptimizationRemarkEmitterWrapperPass : public FunctionPass {
   std::unique_ptr<OptimizationRemarkEmitter> ORE;
 
 public:
@@ -173,7 +174,7 @@ class OptimizationRemarkEmitterAnalysis
   typedef OptimizationRemarkEmitter Result;
 
   /// Run the analysis pass over a function and produce BFI.
-  Result run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM);
 };
 } // namespace llvm
 #endif // LLVM_ANALYSIS_OPTIMIZATIONREMARKEMITTER_H
diff --git a/llvm/include/llvm/Analysis/PHITransAddr.h b/llvm/include/llvm/Analysis/PHITransAddr.h
index de9c3c4fd2921..2c46887b1346a 100644
--- a/llvm/include/llvm/Analysis/PHITransAddr.h
+++ b/llvm/include/llvm/Analysis/PHITransAddr.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_PHITRANSADDR_H
 #define LLVM_ANALYSIS_PHITRANSADDR_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/IR/Instruction.h"
 
@@ -70,12 +71,12 @@ class PHITransAddr {
   /// isPotentiallyPHITranslatable - If this needs PHI translation, return true
   /// if we have some hope of doing it.  This should be used as a filter to
   /// avoid calling PHITranslateValue in hopeless situations.
-  bool isPotentiallyPHITranslatable() const;
+  LLVM_ABI bool isPotentiallyPHITranslatable() const;
 
   /// translateValue - PHI translate the current address up the CFG from
   /// CurBB to Pred, updating our state to reflect any needed changes.  If
   /// 'MustDominate' is true, the translated value must dominate PredBB.
-  Value *translateValue(BasicBlock *CurBB, BasicBlock *PredBB,
+  LLVM_ABI Value *translateValue(BasicBlock *CurBB, BasicBlock *PredBB,
                         const DominatorTree *DT, bool MustDominate);
 
   /// translateWithInsertion - PHI translate this value into the specified
@@ -85,16 +86,16 @@ class PHITransAddr {
   /// All newly created instructions are added to the NewInsts list.  This
   /// returns null on failure.
   ///
-  Value *translateWithInsertion(BasicBlock *CurBB, BasicBlock *PredBB,
+  LLVM_ABI Value *translateWithInsertion(BasicBlock *CurBB, BasicBlock *PredBB,
                                 const DominatorTree &DT,
                                 SmallVectorImpl<Instruction *> &NewInsts);
 
-  void dump() const;
+  LLVM_ABI void dump() const;
 
   /// verify - Check internal consistency of this data structure.  If the
   /// structure is valid, it returns true.  If invalid, it prints errors and
   /// returns false.
-  bool verify() const;
+  LLVM_ABI bool verify() const;
 
 private:
   Value *translateSubExpr(Value *V, BasicBlock *CurBB, BasicBlock *PredBB,
diff --git a/llvm/include/llvm/Analysis/Passes.h b/llvm/include/llvm/Analysis/Passes.h
index 06077fd0d03e4..52b798c4d2607 100644
--- a/llvm/include/llvm/Analysis/Passes.h
+++ b/llvm/include/llvm/Analysis/Passes.h
@@ -23,21 +23,21 @@ namespace llvm {
   //
   /// createLazyValueInfoPass - This creates an instance of the LazyValueInfo
   /// pass.
-  FunctionPass *createLazyValueInfoPass();
+  LLVM_ABI FunctionPass *createLazyValueInfoPass();
 
   //===--------------------------------------------------------------------===//
   //
   // createDependenceAnalysisWrapperPass - This creates an instance of the
   // DependenceAnalysisWrapper pass.
   //
-  FunctionPass *createDependenceAnalysisWrapperPass();
+  LLVM_ABI FunctionPass *createDependenceAnalysisWrapperPass();
 
   //===--------------------------------------------------------------------===//
   //
   // createRegionInfoPass - This pass finds all single entry single exit regions
   // in a function and builds the region hierarchy.
   //
-  FunctionPass *createRegionInfoPass();
+  LLVM_ABI FunctionPass *createRegionInfoPass();
 }
 
 #endif
diff --git a/llvm/include/llvm/Analysis/PhiValues.h b/llvm/include/llvm/Analysis/PhiValues.h
index a749af30be9e3..c690da072d31e 100644
--- a/llvm/include/llvm/Analysis/PhiValues.h
+++ b/llvm/include/llvm/Analysis/PhiValues.h
@@ -19,6 +19,7 @@
 #ifndef LLVM_ANALYSIS_PHIVALUES_H
 #define LLVM_ANALYSIS_PHIVALUES_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseSet.h"
 #include "llvm/ADT/SetVector.h"
@@ -48,7 +49,7 @@ class PhiValues {
   ///
   /// This returns the cached value if PN has previously been processed,
   /// otherwise it processes it first.
-  const ValueSet &getValuesForPhi(const PHINode *PN);
+  LLVM_ABI const ValueSet &getValuesForPhi(const PHINode *PN);
 
   /// Notify PhiValues that the cached information using V is no longer valid
   ///
@@ -56,16 +57,16 @@ class PhiValues {
   /// (and the phis that use that phi) become invalid. A user of PhiValues has
   /// to notify it of this by calling invalidateValue on either the operand or
   /// the phi, which will then clear the relevant cached information.
-  void invalidateValue(const Value *V);
+  LLVM_ABI void invalidateValue(const Value *V);
 
   /// Free the memory used by this class.
-  void releaseMemory();
+  LLVM_ABI void releaseMemory();
 
   /// Print out the values currently in the cache.
-  void print(raw_ostream &OS) const;
+  LLVM_ABI void print(raw_ostream &OS) const;
 
   /// Handle invalidation events in the new pass manager.
-  bool invalidate(Function &, const PreservedAnalyses &,
+  LLVM_ABI bool invalidate(Function &, const PreservedAnalyses &,
                   FunctionAnalysisManager::Invalidator &);
 
 private:
@@ -87,7 +88,7 @@ class PhiValues {
   /// A CallbackVH to notify PhiValues when a value is deleted or replaced, so
   /// that the cached information for that value can be cleared to avoid
   /// dangling pointers to invalid values.
-  class PhiValuesCallbackVH final : public CallbackVH {
+  class LLVM_ABI PhiValuesCallbackVH final : public CallbackVH {
     PhiValues *PV;
     void deleted() override;
     void allUsesReplacedWith(Value *New) override;
@@ -118,7 +119,7 @@ class PhiValuesAnalysis : public AnalysisInfoMixin<PhiValuesAnalysis> {
 
 public:
   using Result = PhiValues;
-  PhiValues run(Function &F, FunctionAnalysisManager &);
+  LLVM_ABI PhiValues run(Function &F, FunctionAnalysisManager &);
 };
 
 /// A pass for printing the PhiValues for a function.
@@ -131,12 +132,12 @@ class PhiValuesPrinterPass : public PassInfoMixin<PhiValuesPrinterPass> {
 
 public:
   explicit PhiValuesPrinterPass(raw_ostream &OS) : OS(OS) {}
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
 /// Wrapper pass for the legacy pass manager
-class PhiValuesWrapperPass : public FunctionPass {
+class LLVM_ABI PhiValuesWrapperPass : public FunctionPass {
   std::unique_ptr<PhiValues> Result;
 
 public:
diff --git a/llvm/include/llvm/Analysis/PostDominators.h b/llvm/include/llvm/Analysis/PostDominators.h
index 92e30f82501c1..e354a63ccb62f 100644
--- a/llvm/include/llvm/Analysis/PostDominators.h
+++ b/llvm/include/llvm/Analysis/PostDominators.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_POSTDOMINATORS_H
 #define LLVM_ANALYSIS_POSTDOMINATORS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DepthFirstIterator.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/PassManager.h"
@@ -32,7 +33,7 @@ class PostDominatorTree : public PostDomTreeBase<BasicBlock> {
   PostDominatorTree() = default;
   explicit PostDominatorTree(Function &F) { recalculate(F); }
   /// Handle invalidation explicitly.
-  bool invalidate(Function &F, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
                   FunctionAnalysisManager::Invalidator &);
 
   // Ensure base-class overloads are visible.
@@ -40,7 +41,7 @@ class PostDominatorTree : public PostDomTreeBase<BasicBlock> {
 
   /// Return true if \p I1 dominates \p I2. This checks if \p I2 comes before
   /// \p I1 if they belongs to the same basic block.
-  bool dominates(const Instruction *I1, const Instruction *I2) const;
+  LLVM_ABI bool dominates(const Instruction *I1, const Instruction *I2) const;
 };
 
 /// Analysis pass which computes a \c PostDominatorTree.
@@ -56,7 +57,7 @@ class PostDominatorTreeAnalysis
 
   /// Run the analysis pass over a function and produce a post dominator
   ///        tree.
-  PostDominatorTree run(Function &F, FunctionAnalysisManager &);
+  LLVM_ABI PostDominatorTree run(Function &F, FunctionAnalysisManager &);
 };
 
 /// Printer pass for the \c PostDominatorTree.
@@ -65,14 +66,14 @@ class PostDominatorTreePrinterPass
   raw_ostream &OS;
 
 public:
-  explicit PostDominatorTreePrinterPass(raw_ostream &OS);
+  LLVM_ABI explicit PostDominatorTreePrinterPass(raw_ostream &OS);
 
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
   static bool isRequired() { return true; }
 };
 
-struct PostDominatorTreeWrapperPass : public FunctionPass {
+struct LLVM_ABI PostDominatorTreeWrapperPass : public FunctionPass {
   static char ID; // Pass identification, replacement for typeid
 
   PostDominatorTree DT;
@@ -95,7 +96,7 @@ struct PostDominatorTreeWrapperPass : public FunctionPass {
   void print(raw_ostream &OS, const Module*) const override;
 };
 
-FunctionPass* createPostDomTree();
+LLVM_ABI FunctionPass* createPostDomTree();
 
 template <> struct GraphTraits<PostDominatorTree*>
   : public GraphTraits<DomTreeNode*> {
diff --git a/llvm/include/llvm/Analysis/ProfileSummaryInfo.h b/llvm/include/llvm/Analysis/ProfileSummaryInfo.h
index f9ea8d71316d9..04cc53642fd47 100644
--- a/llvm/include/llvm/Analysis/ProfileSummaryInfo.h
+++ b/llvm/include/llvm/Analysis/ProfileSummaryInfo.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_PROFILESUMMARYINFO_H
 #define LLVM_ANALYSIS_PROFILESUMMARYINFO_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/Instructions.h"
@@ -65,7 +66,7 @@ class ProfileSummaryInfo {
 
   /// If a summary is provided as argument, use that. Otherwise,
   /// if the `Summary` member is null, attempt to refresh.
-  void refresh(std::unique_ptr<ProfileSummary> &&Other = nullptr);
+  LLVM_ABI void refresh(std::unique_ptr<ProfileSummary> &&Other = nullptr);
 
   /// Returns true if profile summary is available.
   bool hasProfileSummary() const { return Summary != nullptr; }
@@ -100,15 +101,15 @@ class ProfileSummaryInfo {
   }
 
   /// Returns the profile count for \p CallInst.
-  std::optional<uint64_t> getProfileCount(const CallBase &CallInst,
+  LLVM_ABI std::optional<uint64_t> getProfileCount(const CallBase &CallInst,
                                           BlockFrequencyInfo *BFI,
                                           bool AllowSynthetic = false) const;
   /// Returns true if module \c M has partial-profile sample profile.
-  bool hasPartialSampleProfile() const;
+  LLVM_ABI bool hasPartialSampleProfile() const;
   /// Returns true if the working set size of the code is considered huge.
-  bool hasHugeWorkingSetSize() const;
+  LLVM_ABI bool hasHugeWorkingSetSize() const;
   /// Returns true if the working set size of the code is considered large.
-  bool hasLargeWorkingSetSize() const;
+  LLVM_ABI bool hasLargeWorkingSetSize() const;
   /// Returns true if \p F has hot function entry. If it returns false, it
   /// either means it is not hot or it is unknown whether it is hot or not (for
   /// example, no profile data is available).
@@ -141,7 +142,7 @@ class ProfileSummaryInfo {
     return false;
   }
   /// Returns true if \p F has cold function entry.
-  bool isFunctionEntryCold(const Function *F) const;
+  LLVM_ABI bool isFunctionEntryCold(const Function *F) const;
   /// Returns true if \p F contains only cold code.
   template <typename FuncT, typename BFIT>
   bool isFunctionColdInCallGraph(const FuncT *F, BFIT &BFI) const {
@@ -161,7 +162,7 @@ class ProfileSummaryInfo {
     return true;
   }
   /// Returns true if the hotness of \p F is unknown.
-  bool isFunctionHotnessUnknown(const Function &F) const;
+  LLVM_ABI bool isFunctionHotnessUnknown(const Function &F) const;
   /// Returns true if \p F contains hot code with regard to a given hot
   /// percentile cutoff value.
   template <typename FuncT, typename BFIT>
@@ -179,19 +180,19 @@ class ProfileSummaryInfo {
         PercentileCutoff, F, BFI);
   }
   /// Returns true if count \p C is considered hot.
-  bool isHotCount(uint64_t C) const;
+  LLVM_ABI bool isHotCount(uint64_t C) const;
   /// Returns true if count \p C is considered cold.
-  bool isColdCount(uint64_t C) const;
+  LLVM_ABI bool isColdCount(uint64_t C) const;
   /// Returns true if count \p C is considered hot with regard to a given
   /// hot percentile cutoff value.
   /// PercentileCutoff is encoded as a 6 digit decimal fixed point number, where
   /// the first two digits are the whole part. E.g. 995000 for 99.5 percentile.
-  bool isHotCountNthPercentile(int PercentileCutoff, uint64_t C) const;
+  LLVM_ABI bool isHotCountNthPercentile(int PercentileCutoff, uint64_t C) const;
   /// Returns true if count \p C is considered cold with regard to a given
   /// cold percentile cutoff value.
   /// PercentileCutoff is encoded as a 6 digit decimal fixed point number, where
   /// the first two digits are the whole part. E.g. 995000 for 99.5 percentile.
-  bool isColdCountNthPercentile(int PercentileCutoff, uint64_t C) const;
+  LLVM_ABI bool isColdCountNthPercentile(int PercentileCutoff, uint64_t C) const;
 
   /// Returns true if BasicBlock \p BB is considered hot.
   template <typename BBType, typename BFIT>
@@ -244,15 +245,15 @@ class ProfileSummaryInfo {
                                                       BlockFreq, BFI);
   }
   /// Returns true if the call site \p CB is considered hot.
-  bool isHotCallSite(const CallBase &CB, BlockFrequencyInfo *BFI) const;
+  LLVM_ABI bool isHotCallSite(const CallBase &CB, BlockFrequencyInfo *BFI) const;
   /// Returns true if call site \p CB is considered cold.
-  bool isColdCallSite(const CallBase &CB, BlockFrequencyInfo *BFI) const;
+  LLVM_ABI bool isColdCallSite(const CallBase &CB, BlockFrequencyInfo *BFI) const;
   /// Returns HotCountThreshold if set. Recompute HotCountThreshold
   /// if not set.
-  uint64_t getOrCompHotCountThreshold() const;
+  LLVM_ABI uint64_t getOrCompHotCountThreshold() const;
   /// Returns ColdCountThreshold if set. Recompute HotCountThreshold
   /// if not set.
-  uint64_t getOrCompColdCountThreshold() const;
+  LLVM_ABI uint64_t getOrCompColdCountThreshold() const;
   /// Returns HotCountThreshold if set.
   uint64_t getHotCountThreshold() const {
     return HotCountThreshold.value_or(0);
@@ -351,7 +352,7 @@ ProfileSummaryInfo::getEntryCount<MachineFunction>(
     const MachineFunction *F) const;
 
 /// An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
-class ProfileSummaryInfoWrapperPass : public ImmutablePass {
+class LLVM_ABI ProfileSummaryInfoWrapperPass : public ImmutablePass {
   std::unique_ptr<ProfileSummaryInfo> PSI;
 
 public:
@@ -374,7 +375,7 @@ class ProfileSummaryAnalysis
 public:
   typedef ProfileSummaryInfo Result;
 
-  Result run(Module &M, ModuleAnalysisManager &);
+  LLVM_ABI Result run(Module &M, ModuleAnalysisManager &);
 
 private:
   friend AnalysisInfoMixin<ProfileSummaryAnalysis>;
@@ -388,7 +389,7 @@ class ProfileSummaryPrinterPass
 
 public:
   explicit ProfileSummaryPrinterPass(raw_ostream &OS) : OS(OS) {}
-  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
diff --git a/llvm/include/llvm/Analysis/RegionPass.h b/llvm/include/llvm/Analysis/RegionPass.h
index dd5e6a1a3b249..e4e0431426b0a 100644
--- a/llvm/include/llvm/Analysis/RegionPass.h
+++ b/llvm/include/llvm/Analysis/RegionPass.h
@@ -15,6 +15,7 @@
 #ifndef LLVM_ANALYSIS_REGIONPASS_H
 #define LLVM_ANALYSIS_REGIONPASS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/IR/LegacyPassManagers.h"
 #include "llvm/Pass.h"
 #include <deque>
@@ -29,7 +30,7 @@ class RegionInfo;
 /// A pass that runs on each Region in a function.
 ///
 /// RegionPass is managed by RGPassManager.
-class RegionPass : public Pass {
+class LLVM_ABI RegionPass : public Pass {
 public:
   explicit RegionPass(char &pid) : Pass(PT_Region, pid) {}
 
@@ -84,7 +85,7 @@ class RegionPass : public Pass {
 };
 
 /// The pass manager to schedule RegionPasses.
-class RGPassManager : public FunctionPass, public PMDataManager {
+class LLVM_ABI RGPassManager : public FunctionPass, public PMDataManager {
   std::deque<Region*> RQ;
   RegionInfo *RI;
   Region *CurrentRegion;
diff --git a/llvm/include/llvm/Analysis/RegionPrinter.h b/llvm/include/llvm/Analysis/RegionPrinter.h
index 814d085baf0ad..3a1d11d8fd4bc 100644
--- a/llvm/include/llvm/Analysis/RegionPrinter.h
+++ b/llvm/include/llvm/Analysis/RegionPrinter.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_REGIONPRINTER_H
 #define LLVM_ANALYSIS_REGIONPRINTER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/DOTGraphTraits.h"
 
 namespace llvm {
@@ -22,16 +23,16 @@ namespace llvm {
   class RegionInfo;
   class RegionNode;
 
-  FunctionPass *createRegionViewerPass();
-  FunctionPass *createRegionOnlyViewerPass();
-  FunctionPass *createRegionPrinterPass();
-  FunctionPass *createRegionOnlyPrinterPass();
+  LLVM_ABI FunctionPass *createRegionViewerPass();
+  LLVM_ABI FunctionPass *createRegionOnlyViewerPass();
+  LLVM_ABI FunctionPass *createRegionPrinterPass();
+  LLVM_ABI FunctionPass *createRegionOnlyPrinterPass();
 
   template <>
   struct DOTGraphTraits<RegionNode *> : public DefaultDOTGraphTraits {
     DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
 
-    std::string getNodeLabel(RegionNode *Node, RegionNode *Graph);
+    LLVM_ABI std::string getNodeLabel(RegionNode *Node, RegionNode *Graph);
   };
 
 #ifndef NDEBUG
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index 339bdfeb4956a..54806dac125e8 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -20,6 +20,7 @@
 #ifndef LLVM_ANALYSIS_SCALAREVOLUTION_H
 #define LLVM_ANALYSIS_SCALAREVOLUTION_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/DenseMap.h"
@@ -63,7 +64,7 @@ class TargetLibraryInfo;
 class Type;
 enum SCEVTypes : unsigned short;
 
-extern bool VerifySCEV;
+LLVM_ABI extern bool VerifySCEV;
 
 /// This class represents an analyzed expression in the program.  These are
 /// opaque objects that the client is not allowed to do much with directly.
@@ -140,22 +141,22 @@ class SCEV : public FoldingSetNode {
   SCEVTypes getSCEVType() const { return SCEVType; }
 
   /// Return the LLVM type of this SCEV expression.
-  Type *getType() const;
+  LLVM_ABI Type *getType() const;
 
   /// Return operands of this SCEV expression.
-  ArrayRef<const SCEV *> operands() const;
+  LLVM_ABI ArrayRef<const SCEV *> operands() const;
 
   /// Return true if the expression is a constant zero.
-  bool isZero() const;
+  LLVM_ABI bool isZero() const;
 
   /// Return true if the expression is a constant one.
-  bool isOne() const;
+  LLVM_ABI bool isOne() const;
 
   /// Return true if the expression is a constant all-ones value.
-  bool isAllOnesValue() const;
+  LLVM_ABI bool isAllOnesValue() const;
 
   /// Return true if the specified scev is negated, but not a constant.
-  bool isNonConstantNegative() const;
+  LLVM_ABI bool isNonConstantNegative() const;
 
   // Returns estimated size of the mathematical expression represented by this
   // SCEV. The rules of its calculation are following:
@@ -172,10 +173,10 @@ class SCEV : public FoldingSetNode {
 
   /// Print out the internal representation of this scalar to the specified
   /// stream.  This should really only be used for debugging purposes.
-  void print(raw_ostream &OS) const;
+  LLVM_ABI void print(raw_ostream &OS) const;
 
   /// This method is used for debugging.
-  void dump() const;
+  LLVM_ABI void dump() const;
 };
 
 // Specialize FoldingSetTrait for SCEV to avoid needing to compute
@@ -203,10 +204,10 @@ inline raw_ostream &operator<<(raw_ostream &OS, const SCEV &S) {
 /// traversal loop, you will get one of these.  None of the standard SCEV
 /// operations are valid on this class, it is just a marker.
 struct SCEVCouldNotCompute : public SCEV {
-  SCEVCouldNotCompute();
+  LLVM_ABI SCEVCouldNotCompute();
 
   /// Methods for support type inquiry through isa, cast, and dyn_cast:
-  static bool classof(const SCEV *S);
+  LLVM_ABI static bool classof(const SCEV *S);
 };
 
 /// This class represents an assumption made using SCEV expressions which can
@@ -228,7 +229,7 @@ class SCEVPredicate : public FoldingSetNode {
   SCEVPredicate &operator=(const SCEVPredicate &) = default;
 
 public:
-  SCEVPredicate(const FoldingSetNodeIDRef ID, SCEVPredicateKind Kind);
+  LLVM_ABI SCEVPredicate(const FoldingSetNodeIDRef ID, SCEVPredicateKind Kind);
 
   SCEVPredicateKind getKind() const { return Kind; }
 
@@ -274,7 +275,7 @@ struct FoldingSetTrait<SCEVPredicate> : DefaultFoldingSetTrait<SCEVPredicate> {
 
 /// This class represents an assumption that the expression LHS Pred RHS
 /// evaluates to true, and this can be checked at run-time.
-class SCEVComparePredicate final : public SCEVPredicate {
+class LLVM_ABI SCEVComparePredicate final : public SCEVPredicate {
   /// We assume that LHS Pred RHS is true.
   const ICmpInst::Predicate Pred;
   const SCEV *LHS;
@@ -314,7 +315,7 @@ class SCEVComparePredicate final : public SCEVPredicate {
 /// predicated backedge taken count of X, we only guarantee that {0,+,1} has
 /// nusw in the first X iterations. {0,+,1} may still wrap in the loop if we
 /// have more than X iterations.
-class SCEVWrapPredicate final : public SCEVPredicate {
+class LLVM_ABI SCEVWrapPredicate final : public SCEVPredicate {
 public:
   /// Similar to SCEV::NoWrapFlags, but with slightly different semantics
   /// for FlagNUSW. The increment is considered to be signed, and a + b
@@ -409,7 +410,7 @@ class SCEVWrapPredicate final : public SCEVPredicate {
 ///
 /// NB! Unlike other SCEVPredicate sub-classes this class does not live in the
 /// ScalarEvolution::Preds folding set.  This is why the \c add function is sound.
-class SCEVUnionPredicate final : public SCEVPredicate {
+class LLVM_ABI SCEVUnionPredicate final : public SCEVPredicate {
 private:
   using PredicateMap =
       DenseMap<const SCEV *, SmallVector<const SCEVPredicate *, 4>>;
@@ -481,10 +482,10 @@ class ScalarEvolution {
     return TestFlags == maskFlags(Flags, TestFlags);
   };
 
-  ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC,
+  LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC,
                   DominatorTree &DT, LoopInfo &LI);
-  ScalarEvolution(ScalarEvolution &&Arg);
-  ~ScalarEvolution();
+  LLVM_ABI ScalarEvolution(ScalarEvolution &&Arg);
+  LLVM_ABI ~ScalarEvolution();
 
   LLVMContext &getContext() const { return F.getContext(); }
 
@@ -492,19 +493,19 @@ class ScalarEvolution {
   /// framework. This primarily includes integer types, and it can optionally
   /// include pointer types if the ScalarEvolution class has access to
   /// target-specific information.
-  bool isSCEVable(Type *Ty) const;
+  LLVM_ABI bool isSCEVable(Type *Ty) const;
 
   /// Return the size in bits of the specified type, for which isSCEVable must
   /// return true.
-  uint64_t getTypeSizeInBits(Type *Ty) const;
+  LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const;
 
   /// Return a type with the same bitwidth as the given type and which
   /// represents how SCEV will treat the given type, for which isSCEVable must
   /// return true. For pointer types, this is the pointer-sized integer type.
-  Type *getEffectiveSCEVType(Type *Ty) const;
+  LLVM_ABI Type *getEffectiveSCEVType(Type *Ty) const;
 
   // Returns a wider type among {Ty1, Ty2}.
-  Type *getWiderType(Type *Ty1, Type *Ty2) const;
+  LLVM_ABI Type *getWiderType(Type *Ty1, Type *Ty2) const;
 
   /// Return true if there exists a point in the program at which both
   /// A and B could be operands to the same instruction.
@@ -520,16 +521,16 @@ class ScalarEvolution {
   ///     loop { v2 = load @global2; }
   /// }
   /// No SCEV with operand V1, and v2 can exist in this program.
-  bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B);
+  LLVM_ABI bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B);
 
   /// Return true if the SCEV is a scAddRecExpr or it contains
   /// scAddRecExpr. The result will be cached in HasRecMap.
-  bool containsAddRecurrence(const SCEV *S);
+  LLVM_ABI bool containsAddRecurrence(const SCEV *S);
 
   /// Is operation \p BinOp between \p LHS and \p RHS provably does not have
   /// a signed/unsigned overflow (\p Signed)? If \p CtxI is specified, the
   /// no-overflow fact should be true in the context of this instruction.
-  bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
+  LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
                        const SCEV *LHS, const SCEV *RHS,
                        const Instruction *CtxI = nullptr);
 
@@ -538,43 +539,43 @@ class ScalarEvolution {
   /// Does not mutate the original instruction. Returns std::nullopt if it could
   /// not deduce more precise flags than the instruction already has, otherwise
   /// returns proven flags.
-  std::optional<SCEV::NoWrapFlags>
+  LLVM_ABI std::optional<SCEV::NoWrapFlags>
   getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO);
 
   /// Notify this ScalarEvolution that \p User directly uses SCEVs in \p Ops.
-  void registerUser(const SCEV *User, ArrayRef<const SCEV *> Ops);
+  LLVM_ABI void registerUser(const SCEV *User, ArrayRef<const SCEV *> Ops);
 
   /// Return true if the SCEV expression contains an undef value.
-  bool containsUndefs(const SCEV *S) const;
+  LLVM_ABI bool containsUndefs(const SCEV *S) const;
 
   /// Return true if the SCEV expression contains a Value that has been
   /// optimised out and is now a nullptr.
-  bool containsErasedValue(const SCEV *S) const;
+  LLVM_ABI bool containsErasedValue(const SCEV *S) const;
 
   /// Return a SCEV expression for the full generality of the specified
   /// expression.
-  const SCEV *getSCEV(Value *V);
+  LLVM_ABI const SCEV *getSCEV(Value *V);
 
   /// Return an existing SCEV for V if there is one, otherwise return nullptr.
-  const SCEV *getExistingSCEV(Value *V);
-
-  const SCEV *getConstant(ConstantInt *V);
-  const SCEV *getConstant(const APInt &Val);
-  const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
-  const SCEV *getLosslessPtrToIntExpr(const SCEV *Op, unsigned Depth = 0);
-  const SCEV *getPtrToIntExpr(const SCEV *Op, Type *Ty);
-  const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
-  const SCEV *getVScale(Type *Ty);
-  const SCEV *getElementCount(Type *Ty, ElementCount EC);
-  const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
-  const SCEV *getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
+  LLVM_ABI const SCEV *getExistingSCEV(Value *V);
+
+  LLVM_ABI const SCEV *getConstant(ConstantInt *V);
+  LLVM_ABI const SCEV *getConstant(const APInt &Val);
+  LLVM_ABI const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
+  LLVM_ABI const SCEV *getLosslessPtrToIntExpr(const SCEV *Op, unsigned Depth = 0);
+  LLVM_ABI const SCEV *getPtrToIntExpr(const SCEV *Op, Type *Ty);
+  LLVM_ABI const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
+  LLVM_ABI const SCEV *getVScale(Type *Ty);
+  LLVM_ABI const SCEV *getElementCount(Type *Ty, ElementCount EC);
+  LLVM_ABI const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
+  LLVM_ABI const SCEV *getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
                                     unsigned Depth = 0);
-  const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
-  const SCEV *getSignExtendExprImpl(const SCEV *Op, Type *Ty,
+  LLVM_ABI const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
+  LLVM_ABI const SCEV *getSignExtendExprImpl(const SCEV *Op, Type *Ty,
                                     unsigned Depth = 0);
-  const SCEV *getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty);
-  const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
-  const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
+  LLVM_ABI const SCEV *getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty);
+  LLVM_ABI const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
+  LLVM_ABI const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
                          SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                          unsigned Depth = 0);
   const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
@@ -589,7 +590,7 @@ class ScalarEvolution {
     SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
     return getAddExpr(Ops, Flags, Depth);
   }
-  const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
+  LLVM_ABI const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
                          SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                          unsigned Depth = 0);
   const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
@@ -604,12 +605,12 @@ class ScalarEvolution {
     SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
     return getMulExpr(Ops, Flags, Depth);
   }
-  const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
-  const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
-  const SCEV *getURemExpr(const SCEV *LHS, const SCEV *RHS);
-  const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L,
+  LLVM_ABI const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI const SCEV *getURemExpr(const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L,
                             SCEV::NoWrapFlags Flags);
-  const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
+  LLVM_ABI const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
                             const Loop *L, SCEV::NoWrapFlags Flags);
   const SCEV *getAddRecExpr(const SmallVectorImpl<const SCEV *> &Operands,
                             const Loop *L, SCEV::NoWrapFlags Flags) {
@@ -621,7 +622,7 @@ class ScalarEvolution {
   /// Predicates. If successful return these <AddRecExpr, Predicates>;
   /// The function is intended to be called from PSCEV (the caller will decide
   /// whether to actually add the predicates and carry out the rewrites).
-  std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
+  LLVM_ABI std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
   createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI);
 
   /// Returns an expression for a GEP
@@ -629,25 +630,25 @@ class ScalarEvolution {
   /// \p GEP The GEP. The indices contained in the GEP itself are ignored,
   /// instead we use IndexExprs.
   /// \p IndexExprs The expressions for the indices.
-  const SCEV *getGEPExpr(GEPOperator *GEP,
+  LLVM_ABI const SCEV *getGEPExpr(GEPOperator *GEP,
                          const SmallVectorImpl<const SCEV *> &IndexExprs);
-  const SCEV *getAbsExpr(const SCEV *Op, bool IsNSW);
-  const SCEV *getMinMaxExpr(SCEVTypes Kind,
+  LLVM_ABI const SCEV *getAbsExpr(const SCEV *Op, bool IsNSW);
+  LLVM_ABI const SCEV *getMinMaxExpr(SCEVTypes Kind,
                             SmallVectorImpl<const SCEV *> &Operands);
-  const SCEV *getSequentialMinMaxExpr(SCEVTypes Kind,
+  LLVM_ABI const SCEV *getSequentialMinMaxExpr(SCEVTypes Kind,
                                       SmallVectorImpl<const SCEV *> &Operands);
-  const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
-  const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
-  const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
-  const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
-  const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
-  const SCEV *getSMinExpr(SmallVectorImpl<const SCEV *> &Operands);
-  const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS,
+  LLVM_ABI const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
+  LLVM_ABI const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
+  LLVM_ABI const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI const SCEV *getSMinExpr(SmallVectorImpl<const SCEV *> &Operands);
+  LLVM_ABI const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS,
                           bool Sequential = false);
-  const SCEV *getUMinExpr(SmallVectorImpl<const SCEV *> &Operands,
+  LLVM_ABI const SCEV *getUMinExpr(SmallVectorImpl<const SCEV *> &Operands,
                           bool Sequential = false);
-  const SCEV *getUnknown(Value *V);
-  const SCEV *getCouldNotCompute();
+  LLVM_ABI const SCEV *getUnknown(Value *V);
+  LLVM_ABI const SCEV *getCouldNotCompute();
 
   /// Return a SCEV for the constant 0 of a specific type.
   const SCEV *getZero(Type *Ty) { return getConstant(Ty, 0); }
@@ -667,23 +668,23 @@ class ScalarEvolution {
   }
 
   /// Return an expression for a TypeSize.
-  const SCEV *getSizeOfExpr(Type *IntTy, TypeSize Size);
+  LLVM_ABI const SCEV *getSizeOfExpr(Type *IntTy, TypeSize Size);
 
   /// Return an expression for the alloc size of AllocTy that is type IntTy
-  const SCEV *getSizeOfExpr(Type *IntTy, Type *AllocTy);
+  LLVM_ABI const SCEV *getSizeOfExpr(Type *IntTy, Type *AllocTy);
 
   /// Return an expression for the store size of StoreTy that is type IntTy
-  const SCEV *getStoreSizeOfExpr(Type *IntTy, Type *StoreTy);
+  LLVM_ABI const SCEV *getStoreSizeOfExpr(Type *IntTy, Type *StoreTy);
 
   /// Return an expression for offsetof on the given field with type IntTy
-  const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo);
+  LLVM_ABI const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo);
 
   /// Return the SCEV object corresponding to -V.
-  const SCEV *getNegativeSCEV(const SCEV *V,
+  LLVM_ABI const SCEV *getNegativeSCEV(const SCEV *V,
                               SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
 
   /// Return the SCEV object corresponding to ~V.
-  const SCEV *getNotSCEV(const SCEV *V);
+  LLVM_ABI const SCEV *getNotSCEV(const SCEV *V);
 
   /// Return LHS-RHS.  Minus is represented in SCEV as A+B*-1.
   ///
@@ -692,7 +693,7 @@ class ScalarEvolution {
   /// To compute the difference between two unrelated pointers, you can
   /// explicitly convert the arguments using getPtrToIntExpr(), for pointer
   /// types that support it.
-  const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
+  LLVM_ABI const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
                            SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                            unsigned Depth = 0);
 
@@ -704,59 +705,59 @@ class ScalarEvolution {
   /// umin(N, 1) + floor((N - umin(N, 1)) / D)
   ///
   /// A denominator of zero or poison is handled the same way as getUDivExpr().
-  const SCEV *getUDivCeilSCEV(const SCEV *N, const SCEV *D);
+  LLVM_ABI const SCEV *getUDivCeilSCEV(const SCEV *N, const SCEV *D);
 
   /// Return a SCEV corresponding to a conversion of the input value to the
   /// specified type.  If the type must be extended, it is zero extended.
-  const SCEV *getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
+  LLVM_ABI const SCEV *getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
                                       unsigned Depth = 0);
 
   /// Return a SCEV corresponding to a conversion of the input value to the
   /// specified type.  If the type must be extended, it is sign extended.
-  const SCEV *getTruncateOrSignExtend(const SCEV *V, Type *Ty,
+  LLVM_ABI const SCEV *getTruncateOrSignExtend(const SCEV *V, Type *Ty,
                                       unsigned Depth = 0);
 
   /// Return a SCEV corresponding to a conversion of the input value to the
   /// specified type.  If the type must be extended, it is zero extended.  The
   /// conversion must not be narrowing.
-  const SCEV *getNoopOrZeroExtend(const SCEV *V, Type *Ty);
+  LLVM_ABI const SCEV *getNoopOrZeroExtend(const SCEV *V, Type *Ty);
 
   /// Return a SCEV corresponding to a conversion of the input value to the
   /// specified type.  If the type must be extended, it is sign extended.  The
   /// conversion must not be narrowing.
-  const SCEV *getNoopOrSignExtend(const SCEV *V, Type *Ty);
+  LLVM_ABI const SCEV *getNoopOrSignExtend(const SCEV *V, Type *Ty);
 
   /// Return a SCEV corresponding to a conversion of the input value to the
   /// specified type. If the type must be extended, it is extended with
   /// unspecified bits. The conversion must not be narrowing.
-  const SCEV *getNoopOrAnyExtend(const SCEV *V, Type *Ty);
+  LLVM_ABI const SCEV *getNoopOrAnyExtend(const SCEV *V, Type *Ty);
 
   /// Return a SCEV corresponding to a conversion of the input value to the
   /// specified type.  The conversion must not be widening.
-  const SCEV *getTruncateOrNoop(const SCEV *V, Type *Ty);
+  LLVM_ABI const SCEV *getTruncateOrNoop(const SCEV *V, Type *Ty);
 
   /// Promote the operands to the wider of the types using zero-extension, and
   /// then perform a umax operation with them.
-  const SCEV *getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI const SCEV *getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS);
 
   /// Promote the operands to the wider of the types using zero-extension, and
   /// then perform a umin operation with them.
-  const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS,
+  LLVM_ABI const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS,
                                          bool Sequential = false);
 
   /// Promote the operands to the wider of the types using zero-extension, and
   /// then perform a umin operation with them. N-ary function.
-  const SCEV *getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
+  LLVM_ABI const SCEV *getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
                                          bool Sequential = false);
 
   /// Transitively follow the chain of pointer-type operands until reaching a
   /// SCEV that does not have a single pointer operand. This returns a
   /// SCEVUnknown pointer for well-formed pointer-type expressions, but corner
   /// cases do exist.
-  const SCEV *getPointerBase(const SCEV *V);
+  LLVM_ABI const SCEV *getPointerBase(const SCEV *V);
 
   /// Compute an expression equivalent to S - getPointerBase(S).
-  const SCEV *removePointerBase(const SCEV *S);
+  LLVM_ABI const SCEV *removePointerBase(const SCEV *S);
 
   /// Return a SCEV expression for the specified value at the specified scope
   /// in the program.  The L value specifies a loop nest to evaluate the
@@ -768,30 +769,30 @@ class ScalarEvolution {
   ///
   /// In the case that a relevant loop exit value cannot be computed, the
   /// original value V is returned.
-  const SCEV *getSCEVAtScope(const SCEV *S, const Loop *L);
+  LLVM_ABI const SCEV *getSCEVAtScope(const SCEV *S, const Loop *L);
 
   /// This is a convenience function which does getSCEVAtScope(getSCEV(V), L).
-  const SCEV *getSCEVAtScope(Value *V, const Loop *L);
+  LLVM_ABI const SCEV *getSCEVAtScope(Value *V, const Loop *L);
 
   /// Test whether entry to the loop is protected by a conditional between LHS
   /// and RHS.  This is used to help avoid max expressions in loop trip
   /// counts, and to eliminate casts.
-  bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred,
+  LLVM_ABI bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred,
                                 const SCEV *LHS, const SCEV *RHS);
 
   /// Test whether entry to the basic block is protected by a conditional
   /// between LHS and RHS.
-  bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred,
+  LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred,
                                       const SCEV *LHS, const SCEV *RHS);
 
   /// Test whether the backedge of the loop is protected by a conditional
   /// between LHS and RHS.  This is used to eliminate casts.
-  bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred,
+  LLVM_ABI bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred,
                                    const SCEV *LHS, const SCEV *RHS);
 
   /// A version of getTripCountFromExitCount below which always picks an
   /// evaluation type which can not result in overflow.
-  const SCEV *getTripCountFromExitCount(const SCEV *ExitCount);
+  LLVM_ABI const SCEV *getTripCountFromExitCount(const SCEV *ExitCount);
 
   /// Convert from an "exit count" (i.e. "backedge taken count") to a "trip
   /// count".  A "trip count" is the number of times the header of the loop
@@ -800,14 +801,14 @@ class ScalarEvolution {
   /// expression can overflow if ExitCount = UINT_MAX.  If EvalTy is not wide
   /// enough to hold the result without overflow, result unsigned wraps with
   /// 2s-complement semantics.  ex: EC = 255 (i8), TC = 0 (i8)
-  const SCEV *getTripCountFromExitCount(const SCEV *ExitCount, Type *EvalTy,
+  LLVM_ABI const SCEV *getTripCountFromExitCount(const SCEV *ExitCount, Type *EvalTy,
                                         const Loop *L);
 
   /// Returns the exact trip count of the loop if we can compute it, and
   /// the result is a small constant.  '0' is used to represent an unknown
   /// or non-constant trip count.  Note that a trip count is simply one more
   /// than the backedge taken count for the loop.
-  unsigned getSmallConstantTripCount(const Loop *L);
+  LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L);
 
   /// Return the exact trip count for this loop if we exit through ExitingBlock.
   /// '0' is used to represent an unknown or non-constant trip count.  Note
@@ -818,14 +819,14 @@ class ScalarEvolution {
   /// before taking the branch. For loops with multiple exits, it may not be
   /// the number times that the loop header executes if the loop exits
   /// prematurely via another branch.
-  unsigned getSmallConstantTripCount(const Loop *L,
+  LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L,
                                      const BasicBlock *ExitingBlock);
 
   /// Returns the upper bound of the loop trip count as a normal unsigned
   /// value.
   /// Returns 0 if the trip count is unknown, not constant or requires
   /// SCEV predicates and \p Predicates is nullptr.
-  unsigned getSmallConstantMaxTripCount(
+  LLVM_ABI unsigned getSmallConstantMaxTripCount(
       const Loop *L,
       SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr);
 
@@ -835,13 +836,13 @@ class ScalarEvolution {
   /// unknown or not guaranteed to be the multiple of a constant., Will also
   /// return 1 if the trip count is very large (>= 2^32).
   /// Note that the argument is an exit count for loop L, NOT a trip count.
-  unsigned getSmallConstantTripMultiple(const Loop *L,
+  LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L,
                                         const SCEV *ExitCount);
 
   /// Returns the largest constant divisor of the trip count of the
   /// loop.  Will return 1 if no trip count could be computed, or if a
   /// divisor could not be found.
-  unsigned getSmallConstantTripMultiple(const Loop *L);
+  LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L);
 
   /// Returns the largest constant divisor of the trip count of this loop as a
   /// normal unsigned value, if possible. This means that the actual trip
@@ -849,7 +850,7 @@ class ScalarEvolution {
   /// count could very well be zero as well!). As explained in the comments
   /// for getSmallConstantTripCount, this assumes that control exits the loop
   /// via ExitingBlock.
-  unsigned getSmallConstantTripMultiple(const Loop *L,
+  LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L,
                                         const BasicBlock *ExitingBlock);
 
   /// The terms "backedge taken count" and "exit count" are used
@@ -871,12 +872,12 @@ class ScalarEvolution {
   /// getBackedgeTakenCount.  The loop is guaranteed to exit (via *some* exit)
   /// before the backedge is executed (ExitCount + 1) times.  Note that there
   /// is no guarantee about *which* exit is taken on the exiting iteration.
-  const SCEV *getExitCount(const Loop *L, const BasicBlock *ExitingBlock,
+  LLVM_ABI const SCEV *getExitCount(const Loop *L, const BasicBlock *ExitingBlock,
                            ExitCountKind Kind = Exact);
 
   /// Same as above except this uses the predicated backedge taken info and
   /// may require predicates.
-  const SCEV *
+  LLVM_ABI const SCEV *
   getPredicatedExitCount(const Loop *L, const BasicBlock *ExitingBlock,
                          SmallVectorImpl<const SCEVPredicate *> *Predicates,
                          ExitCountKind Kind = Exact);
@@ -891,13 +892,13 @@ class ScalarEvolution {
   /// Note that it is not valid to call this method on a loop without a
   /// loop-invariant backedge-taken count (see
   /// hasLoopInvariantBackedgeTakenCount).
-  const SCEV *getBackedgeTakenCount(const Loop *L, ExitCountKind Kind = Exact);
+  LLVM_ABI const SCEV *getBackedgeTakenCount(const Loop *L, ExitCountKind Kind = Exact);
 
   /// Similar to getBackedgeTakenCount, except it will add a set of
   /// SCEV predicates to Predicates that are required to be true in order for
   /// the answer to be correct. Predicates can be checked with run-time
   /// checks and can be used to perform loop versioning.
-  const SCEV *getPredicatedBackedgeTakenCount(
+  LLVM_ABI const SCEV *getPredicatedBackedgeTakenCount(
       const Loop *L, SmallVectorImpl<const SCEVPredicate *> &Predicates);
 
   /// When successful, this returns a SCEVConstant that is greater than or equal
@@ -912,7 +913,7 @@ class ScalarEvolution {
   /// SCEV predicates to Predicates that are required to be true in order for
   /// the answer to be correct. Predicates can be checked with run-time
   /// checks and can be used to perform loop versioning.
-  const SCEV *getPredicatedConstantMaxBackedgeTakenCount(
+  LLVM_ABI const SCEV *getPredicatedConstantMaxBackedgeTakenCount(
       const Loop *L, SmallVectorImpl<const SCEVPredicate *> &Predicates);
 
   /// When successful, this returns a SCEV that is greater than or equal
@@ -927,70 +928,70 @@ class ScalarEvolution {
   /// SCEV predicates to Predicates that are required to be true in order for
   /// the answer to be correct. Predicates can be checked with run-time
   /// checks and can be used to perform loop versioning.
-  const SCEV *getPredicatedSymbolicMaxBackedgeTakenCount(
+  LLVM_ABI const SCEV *getPredicatedSymbolicMaxBackedgeTakenCount(
       const Loop *L, SmallVectorImpl<const SCEVPredicate *> &Predicates);
 
   /// Return true if the backedge taken count is either the value returned by
   /// getConstantMaxBackedgeTakenCount or zero.
-  bool isBackedgeTakenCountMaxOrZero(const Loop *L);
+  LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L);
 
   /// Return true if the specified loop has an analyzable loop-invariant
   /// backedge-taken count.
-  bool hasLoopInvariantBackedgeTakenCount(const Loop *L);
+  LLVM_ABI bool hasLoopInvariantBackedgeTakenCount(const Loop *L);
 
   // This method should be called by the client when it made any change that
   // would invalidate SCEV's answers, and the client wants to remove all loop
   // information held internally by ScalarEvolution. This is intended to be used
   // when the alternative to forget a loop is too expensive (i.e. large loop
   // bodies).
-  void forgetAllLoops();
+  LLVM_ABI void forgetAllLoops();
 
   /// This method should be called by the client when it has changed a loop in
   /// a way that may effect ScalarEvolution's ability to compute a trip count,
   /// or if the loop is deleted.  This call is potentially expensive for large
   /// loop bodies.
-  void forgetLoop(const Loop *L);
+  LLVM_ABI void forgetLoop(const Loop *L);
 
   // This method invokes forgetLoop for the outermost loop of the given loop
   // \p L, making ScalarEvolution forget about all this subtree. This needs to
   // be done whenever we make a transform that may affect the parameters of the
   // outer loop, such as exit counts for branches.
-  void forgetTopmostLoop(const Loop *L);
+  LLVM_ABI void forgetTopmostLoop(const Loop *L);
 
   /// This method should be called by the client when it has changed a value
   /// in a way that may effect its value, or which may disconnect it from a
   /// def-use chain linking it to a loop.
-  void forgetValue(Value *V);
+  LLVM_ABI void forgetValue(Value *V);
 
   /// Forget LCSSA phi node V of loop L to which a new predecessor was added,
   /// such that it may no longer be trivial.
-  void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V);
+  LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V);
 
   /// Called when the client has changed the disposition of values in
   /// this loop.
   ///
   /// We don't have a way to invalidate per-loop dispositions. Clear and
   /// recompute is simpler.
-  void forgetLoopDispositions();
+  LLVM_ABI void forgetLoopDispositions();
 
   /// Called when the client has changed the disposition of values in
   /// a loop or block.
   ///
   /// We don't have a way to invalidate per-loop/per-block dispositions. Clear
   /// and recompute is simpler.
-  void forgetBlockAndLoopDispositions(Value *V = nullptr);
+  LLVM_ABI void forgetBlockAndLoopDispositions(Value *V = nullptr);
 
   /// Determine the minimum number of zero bits that S is guaranteed to end in
   /// (at every loop iteration).  It is, at the same time, the minimum number
   /// of times S is divisible by 2.  For example, given {4,+,8} it returns 2.
   /// If S is guaranteed to be 0, it returns the bitwidth of S.
-  uint32_t getMinTrailingZeros(const SCEV *S);
+  LLVM_ABI uint32_t getMinTrailingZeros(const SCEV *S);
 
   /// Returns the max constant multiple of S.
-  APInt getConstantMultiple(const SCEV *S);
+  LLVM_ABI APInt getConstantMultiple(const SCEV *S);
 
   // Returns the max constant multiple of S. If S is exactly 0, return 1.
-  APInt getNonZeroConstantMultiple(const SCEV *S);
+  LLVM_ABI APInt getNonZeroConstantMultiple(const SCEV *S);
 
   /// Determine the unsigned range for a particular SCEV.
   /// NOTE: This returns a copy of the reference returned by getRangeRef.
@@ -1025,30 +1026,30 @@ class ScalarEvolution {
   }
 
   /// Test if the given expression is known to be negative.
-  bool isKnownNegative(const SCEV *S);
+  LLVM_ABI bool isKnownNegative(const SCEV *S);
 
   /// Test if the given expression is known to be positive.
-  bool isKnownPositive(const SCEV *S);
+  LLVM_ABI bool isKnownPositive(const SCEV *S);
 
   /// Test if the given expression is known to be non-negative.
-  bool isKnownNonNegative(const SCEV *S);
+  LLVM_ABI bool isKnownNonNegative(const SCEV *S);
 
   /// Test if the given expression is known to be non-positive.
-  bool isKnownNonPositive(const SCEV *S);
+  LLVM_ABI bool isKnownNonPositive(const SCEV *S);
 
   /// Test if the given expression is known to be non-zero.
-  bool isKnownNonZero(const SCEV *S);
+  LLVM_ABI bool isKnownNonZero(const SCEV *S);
 
   /// Test if the given expression is known to be a power of 2.  OrNegative
   /// allows matching negative power of 2s, and OrZero allows matching 0.
-  bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero = false,
+  LLVM_ABI bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero = false,
                               bool OrNegative = false);
 
   /// Check that \p S is a multiple of \p M. When \p S is an AddRecExpr, \p S is
   /// a multiple of \p M if \p S starts with a multiple of \p M and at every
   /// iteration step \p S only adds multiples of \p M. \p Assumptions records
   /// the runtime predicates under which \p S is a multiple of \p M.
-  bool isKnownMultipleOf(const SCEV *S, uint64_t M,
+  LLVM_ABI bool isKnownMultipleOf(const SCEV *S, uint64_t M,
                          SmallVectorImpl<const SCEVPredicate *> &Assumptions);
 
   /// Splits SCEV expression \p S into two SCEVs. One of them is obtained from
@@ -1067,7 +1068,7 @@ class ScalarEvolution {
   /// 0 (initial value) for the first element and to {1, +, 1}<L1> (post
   /// increment value) for the second one. In both cases AddRec expression
   /// related to L2 remains the same.
-  std::pair<const SCEV *, const SCEV *> SplitIntoInitAndPostInc(const Loop *L,
+  LLVM_ABI std::pair<const SCEV *, const SCEV *> SplitIntoInitAndPostInc(const Loop *L,
                                                                 const SCEV *S);
 
   /// We'd like to check the predicate on every iteration of the most dominated
@@ -1088,33 +1089,33 @@ class ScalarEvolution {
   ///    so we can assert on that.
   /// e. Return true if isLoopEntryGuardedByCond(Pred, E(LHS), E(RHS)) &&
   ///                   isLoopBackedgeGuardedByCond(Pred, B(LHS), B(RHS))
-  bool isKnownViaInduction(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS);
 
   /// Test if the given expression is known to satisfy the condition described
   /// by Pred, LHS, and RHS.
-  bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS);
 
   /// Check whether the condition described by Pred, LHS, and RHS is true or
   /// false. If we know it, return the evaluation of this condition. If neither
   /// is proved, return std::nullopt.
-  std::optional<bool> evaluatePredicate(CmpPredicate Pred, const SCEV *LHS,
+  LLVM_ABI std::optional<bool> evaluatePredicate(CmpPredicate Pred, const SCEV *LHS,
                                         const SCEV *RHS);
 
   /// Test if the given expression is known to satisfy the condition described
   /// by Pred, LHS, and RHS in the given Context.
-  bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS,
+  LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS,
                           const Instruction *CtxI);
 
   /// Check whether the condition described by Pred, LHS, and RHS is true or
   /// false in the given \p Context. If we know it, return the evaluation of
   /// this condition. If neither is proved, return std::nullopt.
-  std::optional<bool> evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS,
+  LLVM_ABI std::optional<bool> evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS,
                                           const SCEV *RHS,
                                           const Instruction *CtxI);
 
   /// Test if the condition described by Pred, LHS, RHS is known to be true on
   /// every iteration of the loop of the recurrency LHS.
-  bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS,
+  LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS,
                                const SCEV *RHS);
 
   /// Information about the number of loop iterations for which a loop exit's
@@ -1138,13 +1139,13 @@ class ScalarEvolution {
     /// Construct either an exact exit limit from a constant, or an unknown
     /// one from a SCEVCouldNotCompute.  No other types of SCEVs are allowed
     /// as arguments and asserts enforce that internally.
-    /*implicit*/ ExitLimit(const SCEV *E);
+    /*implicit*/ LLVM_ABI ExitLimit(const SCEV *E);
 
-    ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken,
+    LLVM_ABI ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken,
               const SCEV *SymbolicMaxNotTaken, bool MaxOrZero,
               ArrayRef<ArrayRef<const SCEVPredicate *>> PredLists = {});
 
-    ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken,
+    LLVM_ABI ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken,
               const SCEV *SymbolicMaxNotTaken, bool MaxOrZero,
               ArrayRef<const SCEVPredicate *> PredList);
 
@@ -1171,7 +1172,7 @@ class ScalarEvolution {
   ///
   /// If \p AllowPredicates is set, this call will try to use a minimal set of
   /// SCEV predicates in order to return an exact answer.
-  ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond,
+  LLVM_ABI ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond,
                                      bool ExitIfTrue, bool ControlsOnlyExit,
                                      bool AllowPredicates = false);
 
@@ -1190,7 +1191,7 @@ class ScalarEvolution {
   /// Some(MonotonicallyIncreasing) and Some(MonotonicallyDecreasing)
   /// respectively. If we could not prove either of these facts, returns
   /// std::nullopt.
-  std::optional<MonotonicPredicateType>
+  LLVM_ABI std::optional<MonotonicPredicateType>
   getMonotonicPredicateType(const SCEVAddRecExpr *LHS,
                             ICmpInst::Predicate Pred);
 
@@ -1205,7 +1206,7 @@ class ScalarEvolution {
   /// If the result of the predicate LHS `Pred` RHS is loop invariant with
   /// respect to L, return a LoopInvariantPredicate with LHS and RHS being
   /// invariants, available at L's entry. Otherwise, return std::nullopt.
-  std::optional<LoopInvariantPredicate>
+  LLVM_ABI std::optional<LoopInvariantPredicate>
   getLoopInvariantPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS,
                             const Loop *L, const Instruction *CtxI = nullptr);
 
@@ -1214,14 +1215,14 @@ class ScalarEvolution {
   /// return a LoopInvariantPredicate with LHS and RHS being invariants,
   /// available at L's entry. Otherwise, return std::nullopt. The predicate
   /// should be the loop's exit condition.
-  std::optional<LoopInvariantPredicate>
+  LLVM_ABI std::optional<LoopInvariantPredicate>
   getLoopInvariantExitCondDuringFirstIterations(CmpPredicate Pred,
                                                 const SCEV *LHS,
                                                 const SCEV *RHS, const Loop *L,
                                                 const Instruction *CtxI,
                                                 const SCEV *MaxIter);
 
-  std::optional<LoopInvariantPredicate>
+  LLVM_ABI std::optional<LoopInvariantPredicate>
   getLoopInvariantExitCondDuringFirstIterationsImpl(
       CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
       const Instruction *CtxI, const SCEV *MaxIter);
@@ -1230,69 +1231,69 @@ class ScalarEvolution {
   /// iff any changes were made. If the operands are provably equal or
   /// unequal, LHS and RHS are set to the same value and Pred is set to either
   /// ICMP_EQ or ICMP_NE.
-  bool SimplifyICmpOperands(CmpPredicate &Pred, const SCEV *&LHS,
+  LLVM_ABI bool SimplifyICmpOperands(CmpPredicate &Pred, const SCEV *&LHS,
                             const SCEV *&RHS, unsigned Depth = 0);
 
   /// Return the "disposition" of the given SCEV with respect to the given
   /// loop.
-  LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L);
+  LLVM_ABI LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L);
 
   /// Return true if the value of the given SCEV is unchanging in the
   /// specified loop.
-  bool isLoopInvariant(const SCEV *S, const Loop *L);
+  LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L);
 
   /// Determine if the SCEV can be evaluated at loop's entry. It is true if it
   /// doesn't depend on a SCEVUnknown of an instruction which is dominated by
   /// the header of loop L.
-  bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L);
+  LLVM_ABI bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L);
 
   /// Return true if the given SCEV changes value in a known way in the
   /// specified loop.  This property being true implies that the value is
   /// variant in the loop AND that we can emit an expression to compute the
   /// value of the expression at any particular loop iteration.
-  bool hasComputableLoopEvolution(const SCEV *S, const Loop *L);
+  LLVM_ABI bool hasComputableLoopEvolution(const SCEV *S, const Loop *L);
 
   /// Return the "disposition" of the given SCEV with respect to the given
   /// block.
-  BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB);
+  LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB);
 
   /// Return true if elements that makes up the given SCEV dominate the
   /// specified basic block.
-  bool dominates(const SCEV *S, const BasicBlock *BB);
+  LLVM_ABI bool dominates(const SCEV *S, const BasicBlock *BB);
 
   /// Return true if elements that makes up the given SCEV properly dominate
   /// the specified basic block.
-  bool properlyDominates(const SCEV *S, const BasicBlock *BB);
+  LLVM_ABI bool properlyDominates(const SCEV *S, const BasicBlock *BB);
 
   /// Test whether the given SCEV has Op as a direct or indirect operand.
-  bool hasOperand(const SCEV *S, const SCEV *Op) const;
+  LLVM_ABI bool hasOperand(const SCEV *S, const SCEV *Op) const;
 
   /// Return the size of an element read or written by Inst.
-  const SCEV *getElementSize(Instruction *Inst);
+  LLVM_ABI const SCEV *getElementSize(Instruction *Inst);
 
-  void print(raw_ostream &OS) const;
-  void verify() const;
-  bool invalidate(Function &F, const PreservedAnalyses &PA,
+  LLVM_ABI void print(raw_ostream &OS) const;
+  LLVM_ABI void verify() const;
+  LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
                   FunctionAnalysisManager::Invalidator &Inv);
 
   /// Return the DataLayout associated with the module this SCEV instance is
   /// operating on.
   const DataLayout &getDataLayout() const { return DL; }
 
-  const SCEVPredicate *getEqualPredicate(const SCEV *LHS, const SCEV *RHS);
-  const SCEVPredicate *getComparePredicate(ICmpInst::Predicate Pred,
+  LLVM_ABI const SCEVPredicate *getEqualPredicate(const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI const SCEVPredicate *getComparePredicate(ICmpInst::Predicate Pred,
                                            const SCEV *LHS, const SCEV *RHS);
 
-  const SCEVPredicate *
+  LLVM_ABI const SCEVPredicate *
   getWrapPredicate(const SCEVAddRecExpr *AR,
                    SCEVWrapPredicate::IncrementWrapFlags AddedFlags);
 
   /// Re-writes the SCEV according to the Predicates in \p A.
-  const SCEV *rewriteUsingPredicate(const SCEV *S, const Loop *L,
+  LLVM_ABI const SCEV *rewriteUsingPredicate(const SCEV *S, const Loop *L,
                                     const SCEVPredicate &A);
   /// Tries to convert the \p S expression to an AddRec expression,
   /// adding additional predicates to \p Preds as required.
-  const SCEVAddRecExpr *convertSCEVToAddRecWithPredicates(
+  LLVM_ABI const SCEVAddRecExpr *convertSCEVToAddRecWithPredicates(
       const SCEV *S, const Loop *L,
       SmallVectorImpl<const SCEVPredicate *> &Preds);
 
@@ -1303,13 +1304,13 @@ class ScalarEvolution {
   /// frugal here since we just bail out of actually constructing and
   /// canonicalizing an expression in the cases where the result isn't going
   /// to be a constant.
-  std::optional<APInt> computeConstantDifference(const SCEV *LHS,
+  LLVM_ABI std::optional<APInt> computeConstantDifference(const SCEV *LHS,
                                                  const SCEV *RHS);
 
   /// Update no-wrap flags of an AddRec. This may drop the cached info about
   /// this AddRec (such as range info) in case if new flags may potentially
   /// sharpen it.
-  void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags);
+  LLVM_ABI void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags);
 
   class LoopGuards {
     DenseMap<const SCEV *, const SCEV *> RewriteMap;
@@ -1341,15 +1342,15 @@ class ScalarEvolution {
   public:
     /// Collect rewrite map for loop guards for loop \p L, together with flags
     /// indicating if NUW and NSW can be preserved during rewriting.
-    static LoopGuards collect(const Loop *L, ScalarEvolution &SE);
+    LLVM_ABI static LoopGuards collect(const Loop *L, ScalarEvolution &SE);
 
     /// Try to apply the collected loop guards to \p Expr.
-    const SCEV *rewrite(const SCEV *Expr) const;
+    LLVM_ABI const SCEV *rewrite(const SCEV *Expr) const;
   };
 
   /// Try to apply information from loop guards for \p L to \p Expr.
-  const SCEV *applyLoopGuards(const SCEV *Expr, const Loop *L);
-  const SCEV *applyLoopGuards(const SCEV *Expr, const LoopGuards &Guards);
+  LLVM_ABI const SCEV *applyLoopGuards(const SCEV *Expr, const Loop *L);
+  LLVM_ABI const SCEV *applyLoopGuards(const SCEV *Expr, const LoopGuards &Guards);
 
   /// Return true if the loop has no abnormal exits. That is, if the loop
   /// is not infinite, it must exit through an explicit edge in the CFG.
@@ -1361,18 +1362,18 @@ class ScalarEvolution {
 
   /// Return true if this loop is finite by assumption.  That is,
   /// to be infinite, it must also be undefined.
-  bool loopIsFiniteByAssumption(const Loop *L);
+  LLVM_ABI bool loopIsFiniteByAssumption(const Loop *L);
 
   /// Return the set of Values that, if poison, will definitively result in S
   /// being poison as well. The returned set may be incomplete, i.e. there can
   /// be additional Values that also result in S being poison.
-  void getPoisonGeneratingValues(SmallPtrSetImpl<const Value *> &Result,
+  LLVM_ABI void getPoisonGeneratingValues(SmallPtrSetImpl<const Value *> &Result,
                                  const SCEV *S);
 
   /// Check whether it is poison-safe to represent the expression S using the
   /// instruction I. If such a replacement is performed, the poison flags of
   /// instructions in DropPoisonGeneratingInsts must be dropped.
-  bool canReuseInstruction(
+  LLVM_ABI bool canReuseInstruction(
       const SCEV *S, Instruction *I,
       SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts);
 
@@ -1403,7 +1404,7 @@ class ScalarEvolution {
 private:
   /// A CallbackVH to arrange for ScalarEvolution to be notified whenever a
   /// Value is deleted.
-  class SCEVCallbackVH final : public CallbackVH {
+  class LLVM_ABI SCEVCallbackVH final : public CallbackVH {
     ScalarEvolution *SE;
 
     void deleted() override;
@@ -1550,7 +1551,7 @@ class ScalarEvolution {
     bool isComplete() const { return IsComplete; }
     const SCEV *getConstantMax() const { return ConstantMax; }
 
-    const ExitNotTakenInfo *getExitNotTaken(
+    LLVM_ABI const ExitNotTakenInfo *getExitNotTaken(
         const BasicBlock *ExitingBlock,
         SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr) const;
 
@@ -1562,7 +1563,7 @@ class ScalarEvolution {
     using EdgeExitInfo = std::pair<BasicBlock *, ExitLimit>;
 
     /// Initialize BackedgeTakenInfo from a list of exact exit counts.
-    BackedgeTakenInfo(ArrayRef<EdgeExitInfo> ExitCounts, bool IsComplete,
+    LLVM_ABI BackedgeTakenInfo(ArrayRef<EdgeExitInfo> ExitCounts, bool IsComplete,
                       const SCEV *ConstantMax, bool MaxOrZero);
 
     /// Test whether this BackedgeTakenInfo contains any computed information,
@@ -1593,7 +1594,7 @@ class ScalarEvolution {
     /// If we allowed SCEV predicates to be generated when populating this
     /// vector, this information can contain them and therefore a
     /// SCEVPredicate argument should be added to getExact.
-    const SCEV *getExact(
+    LLVM_ABI const SCEV *getExact(
         const Loop *L, ScalarEvolution *SE,
         SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr) const;
 
@@ -1612,7 +1613,7 @@ class ScalarEvolution {
     }
 
     /// Get the constant max backedge taken count for the loop.
-    const SCEV *getConstantMax(
+    LLVM_ABI const SCEV *getConstantMax(
         ScalarEvolution *SE,
         SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr) const;
 
@@ -1627,7 +1628,7 @@ class ScalarEvolution {
     }
 
     /// Get the symbolic max backedge taken count for the loop.
-    const SCEV *getSymbolicMax(
+    LLVM_ABI const SCEV *getSymbolicMax(
         const Loop *L, ScalarEvolution *SE,
         SmallVectorImpl<const SCEVPredicate *> *Predicates = nullptr);
 
@@ -1643,7 +1644,7 @@ class ScalarEvolution {
 
     /// Return true if the number of times this backedge is taken is either the
     /// value returned by getConstantMax or zero.
-    bool isConstantMaxOrZero(ScalarEvolution *SE) const;
+    LLVM_ABI bool isConstantMaxOrZero(ScalarEvolution *SE) const;
   };
 
   /// Cache the backedge-taken count of the loops for this function as they
@@ -1697,7 +1698,7 @@ class ScalarEvolution {
   DenseMap<const Loop *, LoopProperties> LoopPropertiesCache;
 
   /// Return a \c LoopProperties instance for \p L, creating one if necessary.
-  LoopProperties getLoopProperties(const Loop *L);
+  LLVM_ABI LoopProperties getLoopProperties(const Loop *L);
 
   bool loopHasNoSideEffects(const Loop *L) {
     return getLoopProperties(L).HasNoSideEffects;
@@ -1740,7 +1741,7 @@ class ScalarEvolution {
   /// Determine the range for a particular SCEV.
   /// NOTE: This returns a reference to an entry in a cache. It must be
   /// copied if its needed for longer.
-  const ConstantRange &getRangeRef(const SCEV *S, RangeSignHint Hint,
+  LLVM_ABI const ConstantRange &getRangeRef(const SCEV *S, RangeSignHint Hint,
                                    unsigned Depth = 0);
 
   /// Determine the range for a particular SCEV, but evaluates ranges for
@@ -1868,11 +1869,11 @@ class ScalarEvolution {
     ExitLimitCache(const Loop *L, bool ExitIfTrue, bool AllowPredicates)
         : L(L), ExitIfTrue(ExitIfTrue), AllowPredicates(AllowPredicates) {}
 
-    std::optional<ExitLimit> find(const Loop *L, Value *ExitCond,
+    LLVM_ABI std::optional<ExitLimit> find(const Loop *L, Value *ExitCond,
                                   bool ExitIfTrue, bool ControlsOnlyExit,
                                   bool AllowPredicates);
 
-    void insert(const Loop *L, Value *ExitCond, bool ExitIfTrue,
+    LLVM_ABI void insert(const Loop *L, Value *ExitCond, bool ExitIfTrue,
                 bool ControlsOnlyExit, bool AllowPredicates,
                 const ExitLimit &EL);
   };
@@ -2330,14 +2331,14 @@ class ScalarEvolutionAnalysis
 public:
   using Result = ScalarEvolution;
 
-  ScalarEvolution run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI ScalarEvolution run(Function &F, FunctionAnalysisManager &AM);
 };
 
 /// Verifier pass for the \c ScalarEvolutionAnalysis results.
 class ScalarEvolutionVerifierPass
     : public PassInfoMixin<ScalarEvolutionVerifierPass> {
 public:
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
   static bool isRequired() { return true; }
 };
 
@@ -2349,12 +2350,12 @@ class ScalarEvolutionPrinterPass
 public:
   explicit ScalarEvolutionPrinterPass(raw_ostream &OS) : OS(OS) {}
 
-  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 
   static bool isRequired() { return true; }
 };
 
-class ScalarEvolutionWrapperPass : public FunctionPass {
+class LLVM_ABI ScalarEvolutionWrapperPass : public FunctionPass {
   std::unique_ptr<ScalarEvolution> SE;
 
 public:
@@ -2387,55 +2388,55 @@ class ScalarEvolutionWrapperPass : public FunctionPass {
 ///   - lowers the number of expression rewrites.
 class PredicatedScalarEvolution {
 public:
-  PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L);
+  LLVM_ABI PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L);
 
-  const SCEVPredicate &getPredicate() const;
+  LLVM_ABI const SCEVPredicate &getPredicate() const;
 
   /// Returns the SCEV expression of V, in the context of the current SCEV
   /// predicate.  The order of transformations applied on the expression of V
   /// returned by ScalarEvolution is guaranteed to be preserved, even when
   /// adding new predicates.
-  const SCEV *getSCEV(Value *V);
+  LLVM_ABI const SCEV *getSCEV(Value *V);
 
   /// Get the (predicated) backedge count for the analyzed loop.
-  const SCEV *getBackedgeTakenCount();
+  LLVM_ABI const SCEV *getBackedgeTakenCount();
 
   /// Get the (predicated) symbolic max backedge count for the analyzed loop.
-  const SCEV *getSymbolicMaxBackedgeTakenCount();
+  LLVM_ABI const SCEV *getSymbolicMaxBackedgeTakenCount();
 
   /// Returns the upper bound of the loop trip count as a normal unsigned
   /// value, or 0 if the trip count is unknown.
-  unsigned getSmallConstantMaxTripCount();
+  LLVM_ABI unsigned getSmallConstantMaxTripCount();
 
   /// Adds a new predicate.
-  void addPredicate(const SCEVPredicate &Pred);
+  LLVM_ABI void addPredicate(const SCEVPredicate &Pred);
 
   /// Attempts to produce an AddRecExpr for V by adding additional SCEV
   /// predicates. If we can't transform the expression into an AddRecExpr we
   /// return nullptr and not add additional SCEV predicates to the current
   /// context.
-  const SCEVAddRecExpr *getAsAddRec(Value *V);
+  LLVM_ABI const SCEVAddRecExpr *getAsAddRec(Value *V);
 
   /// Proves that V doesn't overflow by adding SCEV predicate.
-  void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags);
+  LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags);
 
   /// Returns true if we've proved that V doesn't wrap by means of a SCEV
   /// predicate.
-  bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags);
+  LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags);
 
   /// Returns the ScalarEvolution analysis used.
   ScalarEvolution *getSE() const { return &SE; }
 
   /// We need to explicitly define the copy constructor because of FlagsMap.
-  PredicatedScalarEvolution(const PredicatedScalarEvolution &);
+  LLVM_ABI PredicatedScalarEvolution(const PredicatedScalarEvolution &);
 
   /// Print the SCEV mappings done by the Predicated Scalar Evolution.
   /// The printed text is indented by \p Depth.
-  void print(raw_ostream &OS, unsigned Depth) const;
+  LLVM_ABI void print(raw_ostream &OS, unsigned Depth) const;
 
   /// Check if \p AR1 and \p AR2 are equal, while taking into account
   /// Equal predicates in Preds.
-  bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1,
+  LLVM_ABI bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1,
                                 const SCEVAddRecExpr *AR2) const;
 
 private:
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h b/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
index 53959b6c69ca5..4b44539f4f33f 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_SCALAREVOLUTIONALIASANALYSIS_H
 #define LLVM_ANALYSIS_SCALAREVOLUTIONALIASANALYSIS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Pass.h"
 
@@ -31,10 +32,10 @@ class SCEVAAResult : public AAResultBase {
   explicit SCEVAAResult(ScalarEvolution &SE) : SE(SE) {}
   SCEVAAResult(SCEVAAResult &&Arg) : AAResultBase(std::move(Arg)), SE(Arg.SE) {}
 
-  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+  LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                     AAQueryInfo &AAQI, const Instruction *CtxI);
 
-  bool invalidate(Function &F, const PreservedAnalyses &PA,
+  LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
                   FunctionAnalysisManager::Invalidator &Inv);
 
 private:
@@ -49,11 +50,11 @@ class SCEVAA : public AnalysisInfoMixin<SCEVAA> {
 public:
   typedef SCEVAAResult Result;
 
-  SCEVAAResult run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI SCEVAAResult run(Function &F, FunctionAnalysisManager &AM);
 };
 
 /// Legacy wrapper pass to provide the SCEVAAResult object.
-class SCEVAAWrapperPass : public FunctionPass {
+class LLVM_ABI SCEVAAWrapperPass : public FunctionPass {
   std::unique_ptr<SCEVAAResult> Result;
 
 public:
@@ -69,7 +70,7 @@ class SCEVAAWrapperPass : public FunctionPass {
 };
 
 /// Creates an instance of \c SCEVAAWrapperPass.
-FunctionPass *createSCEVAAWrapperPass();
+LLVM_ABI FunctionPass *createSCEVAAWrapperPass();
 
 }
 
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index 872746b7df5ca..41795057df10b 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
 #define LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
@@ -105,7 +106,7 @@ class SCEVCastExpr : public SCEV {
   const SCEV *Op;
   Type *Ty;
 
-  SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op,
+  LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op,
                Type *ty);
 
 public:
@@ -140,7 +141,7 @@ class SCEVPtrToIntExpr : public SCEVCastExpr {
 /// This is the base class for unary integral cast operator classes.
 class SCEVIntegralCastExpr : public SCEVCastExpr {
 protected:
-  SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
+  LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
                        const SCEV *op, Type *ty);
 
 public:
@@ -394,11 +395,11 @@ class SCEVAddRecExpr : public SCEVNAryExpr {
 
   /// Return the value of this chain of recurrences at the specified
   /// iteration number.
-  const SCEV *evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const;
+  LLVM_ABI const SCEV *evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const;
 
   /// Return the value of this chain of recurrences at the specified iteration
   /// number. Takes an explicit list of operands to represent an AddRec.
-  static const SCEV *evaluateAtIteration(ArrayRef<const SCEV *> Operands,
+  LLVM_ABI static const SCEV *evaluateAtIteration(ArrayRef<const SCEV *> Operands,
                                          const SCEV *It, ScalarEvolution &SE);
 
   /// Return the number of iterations of this loop that produce
@@ -407,12 +408,12 @@ class SCEVAddRecExpr : public SCEVNAryExpr {
   /// where the value is not in the condition, thus computing the
   /// exit count.  If the iteration count can't be computed, an
   /// instance of SCEVCouldNotCompute is returned.
-  const SCEV *getNumIterationsInRange(const ConstantRange &Range,
+  LLVM_ABI const SCEV *getNumIterationsInRange(const ConstantRange &Range,
                                       ScalarEvolution &SE) const;
 
   /// Return an expression representing the value of this expression
   /// one iteration of the loop ahead.
-  const SCEVAddRecExpr *getPostIncExpr(ScalarEvolution &SE) const;
+  LLVM_ABI const SCEVAddRecExpr *getPostIncExpr(ScalarEvolution &SE) const;
 
   /// Methods for support type inquiry through isa, cast, and dyn_cast:
   static bool classof(const SCEV *S) {
@@ -574,7 +575,7 @@ class SCEVSequentialUMinExpr : public SCEVSequentialMinMaxExpr {
 /// This means that we are dealing with an entirely unknown SCEV
 /// value, and only represent it as its LLVM Value.  This is the
 /// "bottom" value for the analysis.
-class SCEVUnknown final : public SCEV, private CallbackVH {
+class LLVM_ABI SCEVUnknown final : public SCEV, private CallbackVH {
   friend class ScalarEvolution;
 
   /// The parent ScalarEvolution value. This is used to update the
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h b/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h
index b34db8f5a03a3..9a455108712e3 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h
@@ -35,6 +35,7 @@
 #ifndef LLVM_ANALYSIS_SCALAREVOLUTIONNORMALIZATION_H
 #define LLVM_ANALYSIS_SCALAREVOLUTIONNORMALIZATION_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/STLFunctionalExtras.h"
 #include "llvm/ADT/SmallPtrSet.h"
 
@@ -52,18 +53,18 @@ typedef function_ref<bool(const SCEVAddRecExpr *)> NormalizePredTy;
 /// Normalize \p S to be post-increment for all loops present in \p
 /// Loops. Returns nullptr if the result is not invertible and \p
 /// CheckInvertible is true.
-const SCEV *normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops,
+LLVM_ABI const SCEV *normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops,
                                    ScalarEvolution &SE,
                                    bool CheckInvertible = true);
 
 /// Normalize \p S for all add recurrence sub-expressions for which \p
 /// Pred returns true.
-const SCEV *normalizeForPostIncUseIf(const SCEV *S, NormalizePredTy Pred,
+LLVM_ABI const SCEV *normalizeForPostIncUseIf(const SCEV *S, NormalizePredTy Pred,
                                      ScalarEvolution &SE);
 
 /// Denormalize \p S to be post-increment for all loops present in \p
 /// Loops.
-const SCEV *denormalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops,
+LLVM_ABI const SCEV *denormalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops,
                                      ScalarEvolution &SE);
 } // namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/ScopedNoAliasAA.h b/llvm/include/llvm/Analysis/ScopedNoAliasAA.h
index 96afe3ce6ecdf..8df4b6d76a488 100644
--- a/llvm/include/llvm/Analysis/ScopedNoAliasAA.h
+++ b/llvm/include/llvm/Analysis/ScopedNoAliasAA.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_SCOPEDNOALIASAA_H
 #define LLVM_ANALYSIS_SCOPEDNOALIASAA_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
@@ -36,14 +37,14 @@ class ScopedNoAliasAAResult : public AAResultBase {
     return false;
   }
 
-  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+  LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                     AAQueryInfo &AAQI, const Instruction *CtxI);
-  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
                            AAQueryInfo &AAQI);
-  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
                            AAQueryInfo &AAQI);
 
-  void collectScopedDomains(const MDNode *NoAlias,
+  LLVM_ABI void collectScopedDomains(const MDNode *NoAlias,
                             SmallPtrSetImpl<const MDNode *> &Domains) const;
 
 private:
@@ -59,11 +60,11 @@ class ScopedNoAliasAA : public AnalysisInfoMixin<ScopedNoAliasAA> {
 public:
   using Result = ScopedNoAliasAAResult;
 
-  ScopedNoAliasAAResult run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI ScopedNoAliasAAResult run(Function &F, FunctionAnalysisManager &AM);
 };
 
 /// Legacy wrapper pass to provide the ScopedNoAliasAAResult object.
-class ScopedNoAliasAAWrapperPass : public ImmutablePass {
+class LLVM_ABI ScopedNoAliasAAWrapperPass : public ImmutablePass {
   std::unique_ptr<ScopedNoAliasAAResult> Result;
 
 public:
@@ -84,7 +85,7 @@ class ScopedNoAliasAAWrapperPass : public ImmutablePass {
 // createScopedNoAliasAAWrapperPass - This pass implements metadata-based
 // scoped noalias analysis.
 //
-ImmutablePass *createScopedNoAliasAAWrapperPass();
+LLVM_ABI ImmutablePass *createScopedNoAliasAAWrapperPass();
 
 } // end namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/SimplifyQuery.h b/llvm/include/llvm/Analysis/SimplifyQuery.h
index e8f43c8c2e91f..62149d3a579d6 100644
--- a/llvm/include/llvm/Analysis/SimplifyQuery.h
+++ b/llvm/include/llvm/Analysis/SimplifyQuery.h
@@ -9,6 +9,7 @@
 #ifndef LLVM_ANALYSIS_SIMPLIFYQUERY_H
 #define LLVM_ANALYSIS_SIMPLIFYQUERY_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/IR/Operator.h"
 
@@ -117,7 +118,7 @@ struct SimplifyQuery {
 
   /// If CanUseUndef is true, returns whether \p V is undef.
   /// Otherwise always return false.
-  bool isUndefValue(Value *V) const;
+  LLVM_ABI bool isUndefValue(Value *V) const;
 
   SimplifyQuery getWithoutDomCondCache() const {
     SimplifyQuery Copy(*this);
diff --git a/llvm/include/llvm/Analysis/StaticDataProfileInfo.h b/llvm/include/llvm/Analysis/StaticDataProfileInfo.h
index 9e2e5fbfc6761..4c4e62be469b9 100644
--- a/llvm/include/llvm/Analysis/StaticDataProfileInfo.h
+++ b/llvm/include/llvm/Analysis/StaticDataProfileInfo.h
@@ -1,6 +1,7 @@
 #ifndef LLVM_ANALYSIS_STATICDATAPROFILEINFO_H
 #define LLVM_ANALYSIS_STATICDATAPROFILEINFO_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseSet.h"
 #include "llvm/Analysis/ProfileSummaryInfo.h"
@@ -22,7 +23,7 @@ class StaticDataProfileInfo {
   DenseSet<const Constant *> ConstantWithoutCounts;
 
   /// If \p C has a count, return it. Otherwise, return std::nullopt.
-  std::optional<uint64_t> getConstantProfileCount(const Constant *C) const;
+  LLVM_ABI std::optional<uint64_t> getConstantProfileCount(const Constant *C) const;
 
 public:
   StaticDataProfileInfo() = default;
@@ -31,7 +32,7 @@ class StaticDataProfileInfo {
   /// C in a saturating way, and clamp the count to \p getInstrMaxCountValue if
   /// the result exceeds it. Otherwise, mark the constant as having no profile
   /// count.
-  void addConstantProfileCount(const Constant *C,
+  LLVM_ABI void addConstantProfileCount(const Constant *C,
                                std::optional<uint64_t> Count);
 
   /// Return a section prefix for the constant \p C based on its profile count.
@@ -42,13 +43,13 @@ class StaticDataProfileInfo {
   ///   - If it has a cold count, return "unlikely".
   ///   - Otherwise (e.g. it's used by lukewarm functions), return an empty
   ///     string.
-  StringRef getConstantSectionPrefix(const Constant *C,
+  LLVM_ABI StringRef getConstantSectionPrefix(const Constant *C,
                                      const ProfileSummaryInfo *PSI) const;
 };
 
 /// This wraps the StaticDataProfileInfo object as an immutable pass, for a
 /// backend pass to operate on.
-class StaticDataProfileInfoWrapperPass : public ImmutablePass {
+class LLVM_ABI StaticDataProfileInfoWrapperPass : public ImmutablePass {
 public:
   static char ID;
   StaticDataProfileInfoWrapperPass();
diff --git a/llvm/include/llvm/Analysis/TargetFolder.h b/llvm/include/llvm/Analysis/TargetFolder.h
index 4c78211b5c935..244b667aefba5 100644
--- a/llvm/include/llvm/Analysis/TargetFolder.h
+++ b/llvm/include/llvm/Analysis/TargetFolder.h
@@ -18,6 +18,7 @@
 #ifndef LLVM_ANALYSIS_TARGETFOLDER_H
 #define LLVM_ANALYSIS_TARGETFOLDER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/Analysis/ConstantFolding.h"
 #include "llvm/IR/Constants.h"
@@ -31,7 +32,7 @@ class DataLayout;
 class Type;
 
 /// TargetFolder - Create constants with target dependent folding.
-class TargetFolder final : public IRBuilderFolder {
+class LLVM_ABI TargetFolder final : public IRBuilderFolder {
   const DataLayout &DL;
 
   /// Fold - Fold the constant using target specific information.
diff --git a/llvm/include/llvm/Analysis/TargetLibraryInfo.h b/llvm/include/llvm/Analysis/TargetLibraryInfo.h
index 4c23eaad2ae28..5a26910a48674 100644
--- a/llvm/include/llvm/Analysis/TargetLibraryInfo.h
+++ b/llvm/include/llvm/Analysis/TargetLibraryInfo.h
@@ -9,6 +9,7 @@
 #ifndef LLVM_ANALYSIS_TARGETLIBRARYINFO_H
 #define LLVM_ANALYSIS_TARGETLIBRARYINFO_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/InstrTypes.h"
@@ -65,7 +66,7 @@ class VecDesc {
 
   /// Returns a vector function ABI variant string on the form:
   ///    _ZGV<isa><mask><vlen><vparams>_<scalarname>(<vectorname>)
-  std::string getVectorFunctionABIVariantString() const;
+  LLVM_ABI std::string getVectorFunctionABIVariantString() const;
 };
 
   enum LibFunc : unsigned {
@@ -87,7 +88,7 @@ class TargetLibraryInfoImpl {
 
   unsigned char AvailableArray[(NumLibFuncs+3)/4];
   DenseMap<unsigned, std::string> CustomNames;
-  static StringLiteral const StandardNames[NumLibFuncs];
+  LLVM_ABI static StringLiteral const StandardNames[NumLibFuncs];
   bool ShouldExtI32Param, ShouldExtI32Return, ShouldSignExtI32Param, ShouldSignExtI32Return;
   unsigned SizeOfInt;
 
@@ -112,7 +113,7 @@ class TargetLibraryInfoImpl {
 
   /// Return true if the function type FTy is valid for the library function
   /// F, regardless of whether the function is available.
-  bool isValidProtoForLibFunc(const FunctionType &FTy, LibFunc F,
+  LLVM_ABI bool isValidProtoForLibFunc(const FunctionType &FTy, LibFunc F,
                               const Module &M) const;
 
 public:
@@ -135,20 +136,20 @@ class TargetLibraryInfoImpl {
     AMDLIBM      // AMD Math Vector library.
   };
 
-  TargetLibraryInfoImpl();
-  explicit TargetLibraryInfoImpl(const Triple &T);
+  LLVM_ABI TargetLibraryInfoImpl();
+  LLVM_ABI explicit TargetLibraryInfoImpl(const Triple &T);
 
   // Provide value semantics.
-  TargetLibraryInfoImpl(const TargetLibraryInfoImpl &TLI);
-  TargetLibraryInfoImpl(TargetLibraryInfoImpl &&TLI);
-  TargetLibraryInfoImpl &operator=(const TargetLibraryInfoImpl &TLI);
-  TargetLibraryInfoImpl &operator=(TargetLibraryInfoImpl &&TLI);
+  LLVM_ABI TargetLibraryInfoImpl(const TargetLibraryInfoImpl &TLI);
+  LLVM_ABI TargetLibraryInfoImpl(TargetLibraryInfoImpl &&TLI);
+  LLVM_ABI TargetLibraryInfoImpl &operator=(const TargetLibraryInfoImpl &TLI);
+  LLVM_ABI TargetLibraryInfoImpl &operator=(TargetLibraryInfoImpl &&TLI);
 
   /// Searches for a particular function name.
   ///
   /// If it is one of the known library functions, return true and set F to the
   /// corresponding value.
-  bool getLibFunc(StringRef funcName, LibFunc &F) const;
+  LLVM_ABI bool getLibFunc(StringRef funcName, LibFunc &F) const;
 
   /// Searches for a particular function name, also checking that its type is
   /// valid for the library function matching that name.
@@ -157,11 +158,11 @@ class TargetLibraryInfoImpl {
   /// corresponding value.
   ///
   /// FDecl is assumed to have a parent Module when using this function.
-  bool getLibFunc(const Function &FDecl, LibFunc &F) const;
+  LLVM_ABI bool getLibFunc(const Function &FDecl, LibFunc &F) const;
 
   /// Searches for a function name using an Instruction \p Opcode.
   /// Currently, only the frem instruction is supported.
-  bool getLibFunc(unsigned int Opcode, Type *Ty, LibFunc &F) const;
+  LLVM_ABI bool getLibFunc(unsigned int Opcode, Type *Ty, LibFunc &F) const;
 
   /// Forces a function to be marked as unavailable.
   void setUnavailable(LibFunc F) {
@@ -188,15 +189,15 @@ class TargetLibraryInfoImpl {
   /// Disables all builtins.
   ///
   /// This can be used for options like -fno-builtin.
-  void disableAllFunctions();
+  LLVM_ABI void disableAllFunctions();
 
   /// Add a set of scalar -> vector mappings, queryable via
   /// getVectorizedFunction and getScalarizedFunction.
-  void addVectorizableFunctions(ArrayRef<VecDesc> Fns);
+  LLVM_ABI void addVectorizableFunctions(ArrayRef<VecDesc> Fns);
 
   /// Calls addVectorizableFunctions with a known preset of functions for the
   /// given vector library.
-  void addVectorizableFunctionsFromVecLib(enum VectorLibrary VecLib,
+  LLVM_ABI void addVectorizableFunctionsFromVecLib(enum VectorLibrary VecLib,
                                           const llvm::Triple &TargetTriple);
 
   /// Return true if the function F has a vector equivalent with vectorization
@@ -208,17 +209,17 @@ class TargetLibraryInfoImpl {
 
   /// Return true if the function F has a vector equivalent with any
   /// vectorization factor.
-  bool isFunctionVectorizable(StringRef F) const;
+  LLVM_ABI bool isFunctionVectorizable(StringRef F) const;
 
   /// Return the name of the equivalent of F, vectorized with factor VF. If no
   /// such mapping exists, return the empty string.
-  StringRef getVectorizedFunction(StringRef F, const ElementCount &VF,
+  LLVM_ABI StringRef getVectorizedFunction(StringRef F, const ElementCount &VF,
                                   bool Masked) const;
 
   /// Return a pointer to a VecDesc object holding all info for scalar to vector
   /// mappings in TLI for the equivalent of F, vectorized with factor VF.
   /// If no such mapping exists, return nullpointer.
-  const VecDesc *getVectorMappingInfo(StringRef F, const ElementCount &VF,
+  LLVM_ABI const VecDesc *getVectorMappingInfo(StringRef F, const ElementCount &VF,
                                       bool Masked) const;
 
   /// Set to true iff i32 parameters to library functions should have signext
@@ -249,10 +250,10 @@ class TargetLibraryInfoImpl {
 
   /// Returns the size of the wchar_t type in bytes or 0 if the size is unknown.
   /// This queries the 'wchar_size' metadata.
-  unsigned getWCharSize(const Module &M) const;
+  LLVM_ABI unsigned getWCharSize(const Module &M) const;
 
   /// Returns the size of the size_t type in bits.
-  unsigned getSizeTSize(const Module &M) const;
+  LLVM_ABI unsigned getSizeTSize(const Module &M) const;
 
   /// Get size of a C-level int or unsigned int, in bits.
   unsigned getIntSize() const {
@@ -266,13 +267,13 @@ class TargetLibraryInfoImpl {
 
   /// Returns the largest vectorization factor used in the list of
   /// vector functions.
-  void getWidestVF(StringRef ScalarF, ElementCount &FixedVF,
+  LLVM_ABI void getWidestVF(StringRef ScalarF, ElementCount &FixedVF,
                    ElementCount &Scalable) const;
 
   /// Returns true if call site / callee has cdecl-compatible calling
   /// conventions.
-  static bool isCallingConvCCompatible(CallBase *CI);
-  static bool isCallingConvCCompatible(Function *Callee);
+  LLVM_ABI static bool isCallingConvCCompatible(CallBase *CI);
+  LLVM_ABI static bool isCallingConvCCompatible(Function *Callee);
 };
 
 /// Provides information about what library functions are available for
@@ -630,7 +631,7 @@ class TargetLibraryAnalysis : public AnalysisInfoMixin<TargetLibraryAnalysis> {
   TargetLibraryAnalysis(TargetLibraryInfoImpl BaselineInfoImpl)
       : BaselineInfoImpl(std::move(BaselineInfoImpl)) {}
 
-  TargetLibraryInfo run(const Function &F, FunctionAnalysisManager &);
+  LLVM_ABI TargetLibraryInfo run(const Function &F, FunctionAnalysisManager &);
 
 private:
   friend AnalysisInfoMixin<TargetLibraryAnalysis>;
@@ -639,7 +640,7 @@ class TargetLibraryAnalysis : public AnalysisInfoMixin<TargetLibraryAnalysis> {
   std::optional<TargetLibraryInfoImpl> BaselineInfoImpl;
 };
 
-class TargetLibraryInfoWrapperPass : public ImmutablePass {
+class LLVM_ABI TargetLibraryInfoWrapperPass : public ImmutablePass {
   TargetLibraryAnalysis TLA;
   std::optional<TargetLibraryInfo> TLI;
 
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 1aed98e8f50db..f142935c158f4 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -21,6 +21,7 @@
 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
 #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/Analysis/IVDescriptors.h"
@@ -96,7 +97,7 @@ struct MemIntrinsicInfo {
 /// Attributes of a target dependent hardware loop.
 struct HardwareLoopInfo {
   HardwareLoopInfo() = delete;
-  HardwareLoopInfo(Loop *L);
+  LLVM_ABI HardwareLoopInfo(Loop *L);
   Loop *L = nullptr;
   BasicBlock *ExitBlock = nullptr;
   BranchInst *ExitBranch = nullptr;
@@ -111,10 +112,10 @@ struct HardwareLoopInfo {
   bool PerformEntryTest = false;  // Generate the intrinsic which also performs
                                   // icmp ne zero on the loop counter value and
                                   // produces an i1 to guard the loop entry.
-  bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI,
+  LLVM_ABI bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI,
                                DominatorTree &DT, bool ForceNestedLoop = false,
                                bool ForceHardwareLoopPHI = false);
-  bool canAnalyze(LoopInfo &LI);
+  LLVM_ABI bool canAnalyze(LoopInfo &LI);
 };
 
 class IntrinsicCostAttributes {
@@ -130,20 +131,20 @@ class IntrinsicCostAttributes {
   TargetLibraryInfo const *LibInfo = nullptr;
 
 public:
-  IntrinsicCostAttributes(
+  LLVM_ABI IntrinsicCostAttributes(
       Intrinsic::ID Id, const CallBase &CI,
       InstructionCost ScalarCost = InstructionCost::getInvalid(),
       bool TypeBasedOnly = false, TargetLibraryInfo const *LibInfo = nullptr);
 
-  IntrinsicCostAttributes(
+  LLVM_ABI IntrinsicCostAttributes(
       Intrinsic::ID Id, Type *RTy, ArrayRef<Type *> Tys,
       FastMathFlags Flags = FastMathFlags(), const IntrinsicInst *I = nullptr,
       InstructionCost ScalarCost = InstructionCost::getInvalid());
 
-  IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
+  LLVM_ABI IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
                           ArrayRef<const Value *> Args);
 
-  IntrinsicCostAttributes(
+  LLVM_ABI IntrinsicCostAttributes(
       Intrinsic::ID Id, Type *RTy, ArrayRef<const Value *> Args,
       ArrayRef<Type *> Tys, FastMathFlags Flags = FastMathFlags(),
       const IntrinsicInst *I = nullptr,
@@ -219,7 +220,7 @@ class TargetTransformInfo {
   enum PartialReductionExtendKind { PR_None, PR_SignExtend, PR_ZeroExtend };
 
   /// Get the kind of extension that an instruction represents.
-  static PartialReductionExtendKind
+  LLVM_ABI static PartialReductionExtendKind
   getPartialReductionExtendKind(Instruction *I);
 
   /// Construct a TTI object using a type implementing the \c Concept
@@ -227,7 +228,7 @@ class TargetTransformInfo {
   ///
   /// This is used by targets to construct a TTI wrapping their target-specific
   /// implementation that encodes appropriate costs for their target.
-  explicit TargetTransformInfo(
+  LLVM_ABI explicit TargetTransformInfo(
       std::unique_ptr<const TargetTransformInfoImplBase> Impl);
 
   /// Construct a baseline TTI object using a minimal implementation of
@@ -235,15 +236,15 @@ class TargetTransformInfo {
   ///
   /// The TTI implementation will reflect the information in the DataLayout
   /// provided if non-null.
-  explicit TargetTransformInfo(const DataLayout &DL);
+  LLVM_ABI explicit TargetTransformInfo(const DataLayout &DL);
 
   // Provide move semantics.
-  TargetTransformInfo(TargetTransformInfo &&Arg);
-  TargetTransformInfo &operator=(TargetTransformInfo &&RHS);
+  LLVM_ABI TargetTransformInfo(TargetTransformInfo &&Arg);
+  LLVM_ABI TargetTransformInfo &operator=(TargetTransformInfo &&RHS);
 
   // We need to define the destructor out-of-line to define our sub-classes
   // out-of-line.
-  ~TargetTransformInfo();
+  LLVM_ABI ~TargetTransformInfo();
 
   /// Handle the invalidation of this information.
   ///
@@ -308,7 +309,7 @@ class TargetTransformInfo {
   /// folded into the addressing mode of a load/store. If AccessType is null,
   /// then the resulting target type based off of PointeeType will be used as an
   /// approximation.
-  InstructionCost
+  LLVM_ABI InstructionCost
   getGEPCost(Type *PointeeType, const Value *Ptr,
              ArrayRef<const Value *> Operands, Type *AccessType = nullptr,
              TargetCostKind CostKind = TCK_SizeAndLatency) const;
@@ -347,7 +348,7 @@ class TargetTransformInfo {
   /// chain of loads or stores within same block) operations set when lowered.
   /// \p AccessTy is the type of the loads/stores that will ultimately use the
   /// \p Ptrs.
-  InstructionCost getPointersChainCost(
+  LLVM_ABI InstructionCost getPointersChainCost(
       ArrayRef<const Value *> Ptrs, const Value *Base,
       const PointersChainInfo &Info, Type *AccessTy,
       TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
@@ -358,20 +359,20 @@ class TargetTransformInfo {
   ///
   /// TODO: This is a rather blunt instrument.  Perhaps altering the costs of
   /// individual classes of instructions would be better.
-  unsigned getInliningThresholdMultiplier() const;
+  LLVM_ABI unsigned getInliningThresholdMultiplier() const;
 
-  unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const;
-  unsigned getInliningCostBenefitAnalysisProfitableMultiplier() const;
+  LLVM_ABI unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const;
+  LLVM_ABI unsigned getInliningCostBenefitAnalysisProfitableMultiplier() const;
 
   /// \returns The bonus of inlining the last call to a static function.
-  int getInliningLastCallToStaticBonus() const;
+  LLVM_ABI int getInliningLastCallToStaticBonus() const;
 
   /// \returns A value to be added to the inlining threshold.
-  unsigned adjustInliningThreshold(const CallBase *CB) const;
+  LLVM_ABI unsigned adjustInliningThreshold(const CallBase *CB) const;
 
   /// \returns The cost of having an Alloca in the caller if not inlined, to be
   /// added to the threshold
-  unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const;
+  LLVM_ABI unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const;
 
   /// \returns Vector bonus in percent.
   ///
@@ -383,20 +384,20 @@ class TargetTransformInfo {
   /// principled bonuses.
   /// FIXME: It would be nice to base the bonus values on something more
   /// scientific. A target may has no bonus on vector instructions.
-  int getInlinerVectorBonusPercent() const;
+  LLVM_ABI int getInlinerVectorBonusPercent() const;
 
   /// \return the expected cost of a memcpy, which could e.g. depend on the
   /// source/destination type and alignment and the number of bytes copied.
-  InstructionCost getMemcpyCost(const Instruction *I) const;
+  LLVM_ABI InstructionCost getMemcpyCost(const Instruction *I) const;
 
   /// Returns the maximum memset / memcpy size in bytes that still makes it
   /// profitable to inline the call.
-  uint64_t getMaxMemIntrinsicInlineSizeThreshold() const;
+  LLVM_ABI uint64_t getMaxMemIntrinsicInlineSizeThreshold() const;
 
   /// \return The estimated number of case clusters when lowering \p 'SI'.
   /// \p JTSize Set a jump table size only when \p SI is suitable for a jump
   /// table.
-  unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
+  LLVM_ABI unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
                                             unsigned &JTSize,
                                             ProfileSummaryInfo *PSI,
                                             BlockFrequencyInfo *BFI) const;
@@ -414,7 +415,7 @@ class TargetTransformInfo {
   ///
   /// The returned cost is defined in terms of \c TargetCostConstants, see its
   /// comments for a detailed explanation of the cost values.
-  InstructionCost getInstructionCost(const User *U,
+  LLVM_ABI InstructionCost getInstructionCost(const User *U,
                                      ArrayRef<const Value *> Operands,
                                      TargetCostKind CostKind) const;
 
@@ -428,13 +429,13 @@ class TargetTransformInfo {
 
   /// If a branch or a select condition is skewed in one direction by more than
   /// this factor, it is very likely to be predicted correctly.
-  BranchProbability getPredictableBranchThreshold() const;
+  LLVM_ABI BranchProbability getPredictableBranchThreshold() const;
 
   /// Returns estimated penalty of a branch misprediction in latency. Indicates
   /// how aggressive the target wants for eliminating unpredictable branches. A
   /// zero return value means extra optimization applied to them should be
   /// minimal.
-  InstructionCost getBranchMispredictPenalty() const;
+  LLVM_ABI InstructionCost getBranchMispredictPenalty() const;
 
   /// Return true if branch divergence exists.
   ///
@@ -445,25 +446,25 @@ class TargetTransformInfo {
   /// If \p F is passed, provides a context function. If \p F is known to only
   /// execute in a single threaded environment, the target may choose to skip
   /// uniformity analysis and assume all values are uniform.
-  bool hasBranchDivergence(const Function *F = nullptr) const;
+  LLVM_ABI bool hasBranchDivergence(const Function *F = nullptr) const;
 
   /// Returns whether V is a source of divergence.
   ///
   /// This function provides the target-dependent information for
   /// the target-independent UniformityAnalysis.
-  bool isSourceOfDivergence(const Value *V) const;
+  LLVM_ABI bool isSourceOfDivergence(const Value *V) const;
 
   // Returns true for the target specific
   // set of operations which produce uniform result
   // even taking non-uniform arguments
-  bool isAlwaysUniform(const Value *V) const;
+  LLVM_ABI bool isAlwaysUniform(const Value *V) const;
 
   /// Query the target whether the specified address space cast from FromAS to
   /// ToAS is valid.
-  bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
+  LLVM_ABI bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
 
   /// Return false if a \p AS0 address cannot possibly alias a \p AS1 address.
-  bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const;
+  LLVM_ABI bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const;
 
   /// Returns the address space ID for a target's 'flat' address space. Note
   /// this is not necessarily the same as addrspace(0), which LLVM sometimes
@@ -481,26 +482,26 @@ class TargetTransformInfo {
   ///
   /// \returns ~0u if the target does not have such a flat address space to
   /// optimize away.
-  unsigned getFlatAddressSpace() const;
+  LLVM_ABI unsigned getFlatAddressSpace() const;
 
   /// Return any intrinsic address operand indexes which may be rewritten if
   /// they use a flat address space pointer.
   ///
   /// \returns true if the intrinsic was handled.
-  bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
+  LLVM_ABI bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
                                   Intrinsic::ID IID) const;
 
-  bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
+  LLVM_ABI bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
 
   /// Return true if globals in this address space can have initializers other
   /// than `undef`.
-  bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const;
+  LLVM_ABI bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const;
 
-  unsigned getAssumedAddrSpace(const Value *V) const;
+  LLVM_ABI unsigned getAssumedAddrSpace(const Value *V) const;
 
-  bool isSingleThreaded() const;
+  LLVM_ABI bool isSingleThreaded() const;
 
-  std::pair<const Value *, unsigned>
+  LLVM_ABI std::pair<const Value *, unsigned>
   getPredicatedAddrSpace(const Value *V) const;
 
   /// Rewrite intrinsic call \p II such that \p OldV will be replaced with \p
@@ -508,7 +509,7 @@ class TargetTransformInfo {
   /// operand index that collectFlatAddressOperands returned for the intrinsic.
   /// \returns nullptr if the intrinsic was not handled. Otherwise, returns the
   /// new value (which may be the original \p II with modified operands).
-  Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
+  LLVM_ABI Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
                                           Value *NewV) const;
 
   /// Test whether calls to a function lower to actual program function
@@ -523,7 +524,7 @@ class TargetTransformInfo {
   /// and execution-speed costs. This would allow modelling the core of this
   /// query more accurately as a call is a single small instruction, but
   /// incurs significant execution cost.
-  bool isLoweredToCall(const Function *F) const;
+  LLVM_ABI bool isLoweredToCall(const Function *F) const;
 
   struct LSRCost {
     /// TODO: Some of these could be merged. Also, a lexical ordering
@@ -638,23 +639,23 @@ class TargetTransformInfo {
   /// Get target-customized preferences for the generic loop unrolling
   /// transformation. The caller will initialize UP with the current
   /// target-independent defaults.
-  void getUnrollingPreferences(Loop *L, ScalarEvolution &,
+  LLVM_ABI void getUnrollingPreferences(Loop *L, ScalarEvolution &,
                                UnrollingPreferences &UP,
                                OptimizationRemarkEmitter *ORE) const;
 
   /// Query the target whether it would be profitable to convert the given loop
   /// into a hardware loop.
-  bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
+  LLVM_ABI bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
                                 AssumptionCache &AC, TargetLibraryInfo *LibInfo,
                                 HardwareLoopInfo &HWLoopInfo) const;
 
   // Query the target for which minimum vectorization factor epilogue
   // vectorization should be considered.
-  unsigned getEpilogueVectorizationMinVF() const;
+  LLVM_ABI unsigned getEpilogueVectorizationMinVF() const;
 
   /// Query the target whether it would be prefered to create a predicated
   /// vector loop, which can avoid the need to emit a scalar epilogue loop.
-  bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const;
+  LLVM_ABI bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const;
 
   /// Query the target what the preferred style of tail folding is.
   /// \param IVUpdateMayOverflow Tells whether it is known if the IV update
@@ -662,7 +663,7 @@ class TargetTransformInfo {
   /// Targets can use this information to select a more optimal tail folding
   /// style. The value conservatively defaults to true, such that no assumptions
   /// are made on overflow.
-  TailFoldingStyle
+  LLVM_ABI TailFoldingStyle
   getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const;
 
   // Parameters that control the loop peeling transformation
@@ -688,7 +689,7 @@ class TargetTransformInfo {
   /// Get target-customized preferences for the generic loop peeling
   /// transformation. The caller will initialize \p PP with the current
   /// target-independent defaults with information from \p L and \p SE.
-  void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
+  LLVM_ABI void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
                              PeelingPreferences &PP) const;
 
   /// Targets can implement their own combinations for target-specific
@@ -698,16 +699,16 @@ class TargetTransformInfo {
   /// \returns std::nullopt to not do anything target specific or a value that
   /// will be returned from the InstCombiner. It is possible to return null and
   /// stop further processing of the intrinsic by returning nullptr.
-  std::optional<Instruction *> instCombineIntrinsic(InstCombiner & IC,
+  LLVM_ABI std::optional<Instruction *> instCombineIntrinsic(InstCombiner & IC,
                                                     IntrinsicInst & II) const;
   /// Can be used to implement target-specific instruction combining.
   /// \see instCombineIntrinsic
-  std::optional<Value *> simplifyDemandedUseBitsIntrinsic(
+  LLVM_ABI std::optional<Value *> simplifyDemandedUseBitsIntrinsic(
       InstCombiner & IC, IntrinsicInst & II, APInt DemandedMask,
       KnownBits & Known, bool &KnownBitsComputed) const;
   /// Can be used to implement target-specific instruction combining.
   /// \see instCombineIntrinsic
-  std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
+  LLVM_ABI std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
       InstCombiner & IC, IntrinsicInst & II, APInt DemandedElts,
       APInt & UndefElts, APInt & UndefElts2, APInt & UndefElts3,
       std::function<void(Instruction *, unsigned, APInt, APInt &)>
@@ -730,19 +731,19 @@ class TargetTransformInfo {
   /// Return true if the specified immediate is legal add immediate, that
   /// is the target has add instructions which can add a register with the
   /// immediate without having to materialize the immediate into a register.
-  bool isLegalAddImmediate(int64_t Imm) const;
+  LLVM_ABI bool isLegalAddImmediate(int64_t Imm) const;
 
   /// Return true if adding the specified scalable immediate is legal, that is
   /// the target has add instructions which can add a register with the
   /// immediate (multiplied by vscale) without having to materialize the
   /// immediate into a register.
-  bool isLegalAddScalableImmediate(int64_t Imm) const;
+  LLVM_ABI bool isLegalAddScalableImmediate(int64_t Imm) const;
 
   /// Return true if the specified immediate is legal icmp immediate,
   /// that is the target has icmp instructions which can compare a register
   /// against the immediate without having to materialize the immediate into a
   /// register.
-  bool isLegalICmpImmediate(int64_t Imm) const;
+  LLVM_ABI bool isLegalICmpImmediate(int64_t Imm) const;
 
   /// Return true if the addressing mode represented by AM is legal for
   /// this target, for a load/store of the specified type.
@@ -754,35 +755,35 @@ class TargetTransformInfo {
   /// a scalable offset.
   ///
   /// TODO: Handle pre/postinc as well.
-  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+  LLVM_ABI bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                              bool HasBaseReg, int64_t Scale,
                              unsigned AddrSpace = 0, Instruction *I = nullptr,
                              int64_t ScalableOffset = 0) const;
 
   /// Return true if LSR cost of C1 is lower than C2.
-  bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
+  LLVM_ABI bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
                      const TargetTransformInfo::LSRCost &C2) const;
 
   /// Return true if LSR major cost is number of registers. Targets which
   /// implement their own isLSRCostLess and unset number of registers as major
   /// cost should return false, otherwise return true.
-  bool isNumRegsMajorCostOfLSR() const;
+  LLVM_ABI bool isNumRegsMajorCostOfLSR() const;
 
   /// Return true if LSR should drop a found solution if it's calculated to be
   /// less profitable than the baseline.
-  bool shouldDropLSRSolutionIfLessProfitable() const;
+  LLVM_ABI bool shouldDropLSRSolutionIfLessProfitable() const;
 
   /// \returns true if LSR should not optimize a chain that includes \p I.
-  bool isProfitableLSRChainElement(Instruction *I) const;
+  LLVM_ABI bool isProfitableLSRChainElement(Instruction *I) const;
 
   /// Return true if the target can fuse a compare and branch.
   /// Loop-strength-reduction (LSR) uses that knowledge to adjust its cost
   /// calculation for the instructions in a loop.
-  bool canMacroFuseCmp() const;
+  LLVM_ABI bool canMacroFuseCmp() const;
 
   /// Return true if the target can save a compare for loop count, for example
   /// hardware loop saves a compare.
-  bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
+  LLVM_ABI bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
                   DominatorTree *DT, AssumptionCache *AC,
                   TargetLibraryInfo *LibInfo) const;
 
@@ -793,52 +794,52 @@ class TargetTransformInfo {
   };
 
   /// Return the preferred addressing mode LSR should make efforts to generate.
-  AddressingModeKind getPreferredAddressingMode(const Loop *L,
+  LLVM_ABI AddressingModeKind getPreferredAddressingMode(const Loop *L,
                                                 ScalarEvolution *SE) const;
 
   /// Return true if the target supports masked store.
-  bool isLegalMaskedStore(Type *DataType, Align Alignment,
+  LLVM_ABI bool isLegalMaskedStore(Type *DataType, Align Alignment,
                           unsigned AddressSpace) const;
   /// Return true if the target supports masked load.
-  bool isLegalMaskedLoad(Type *DataType, Align Alignment,
+  LLVM_ABI bool isLegalMaskedLoad(Type *DataType, Align Alignment,
                          unsigned AddressSpace) const;
 
   /// Return true if the target supports nontemporal store.
-  bool isLegalNTStore(Type *DataType, Align Alignment) const;
+  LLVM_ABI bool isLegalNTStore(Type *DataType, Align Alignment) const;
   /// Return true if the target supports nontemporal load.
-  bool isLegalNTLoad(Type *DataType, Align Alignment) const;
+  LLVM_ABI bool isLegalNTLoad(Type *DataType, Align Alignment) const;
 
   /// \Returns true if the target supports broadcasting a load to a vector of
   /// type <NumElements x ElementTy>.
-  bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const;
+  LLVM_ABI bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const;
 
   /// Return true if the target supports masked scatter.
-  bool isLegalMaskedScatter(Type *DataType, Align Alignment) const;
+  LLVM_ABI bool isLegalMaskedScatter(Type *DataType, Align Alignment) const;
   /// Return true if the target supports masked gather.
-  bool isLegalMaskedGather(Type *DataType, Align Alignment) const;
+  LLVM_ABI bool isLegalMaskedGather(Type *DataType, Align Alignment) const;
   /// Return true if the target forces scalarizing of llvm.masked.gather
   /// intrinsics.
-  bool forceScalarizeMaskedGather(VectorType *Type, Align Alignment) const;
+  LLVM_ABI bool forceScalarizeMaskedGather(VectorType *Type, Align Alignment) const;
   /// Return true if the target forces scalarizing of llvm.masked.scatter
   /// intrinsics.
-  bool forceScalarizeMaskedScatter(VectorType *Type, Align Alignment) const;
+  LLVM_ABI bool forceScalarizeMaskedScatter(VectorType *Type, Align Alignment) const;
 
   /// Return true if the target supports masked compress store.
-  bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const;
+  LLVM_ABI bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const;
   /// Return true if the target supports masked expand load.
-  bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const;
+  LLVM_ABI bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const;
 
   /// Return true if the target supports strided load.
-  bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const;
+  LLVM_ABI bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const;
 
   /// Return true is the target supports interleaved access for the given vector
   /// type \p VTy, interleave factor \p Factor, alignment \p Alignment and
   /// address space \p AddrSpace.
-  bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,
+  LLVM_ABI bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,
                                     Align Alignment, unsigned AddrSpace) const;
 
   // Return true if the target supports masked vector histograms.
-  bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) const;
+  LLVM_ABI bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) const;
 
   /// Return true if this is an alternating opcode pattern that can be lowered
   /// to a single instruction on the target. In X86 this is for the addsub
@@ -847,28 +848,28 @@ class TargetTransformInfo {
   /// selected by \p OpcodeMask. The mask contains one bit per lane and is a `0`
   /// when \p Opcode0 is selected and `1` when Opcode1 is selected.
   /// \p VecTy is the vector type of the instruction to be generated.
-  bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
+  LLVM_ABI bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
                        const SmallBitVector &OpcodeMask) const;
 
   /// Return true if we should be enabling ordered reductions for the target.
-  bool enableOrderedReductions() const;
+  LLVM_ABI bool enableOrderedReductions() const;
 
   /// Return true if the target has a unified operation to calculate division
   /// and remainder. If so, the additional implicit multiplication and
   /// subtraction required to calculate a remainder from division are free. This
   /// can enable more aggressive transformations for division and remainder than
   /// would typically be allowed using throughput or size cost models.
-  bool hasDivRemOp(Type *DataType, bool IsSigned) const;
+  LLVM_ABI bool hasDivRemOp(Type *DataType, bool IsSigned) const;
 
   /// Return true if the given instruction (assumed to be a memory access
   /// instruction) has a volatile variant. If that's the case then we can avoid
   /// addrspacecast to generic AS for volatile loads/stores. Default
   /// implementation returns false, which prevents address space inference for
   /// volatile loads/stores.
-  bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const;
+  LLVM_ABI bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const;
 
   /// Return true if target doesn't mind addresses in vectors.
-  bool prefersVectorizedAddressing() const;
+  LLVM_ABI bool prefersVectorizedAddressing() const;
 
   /// Return the cost of the scaling factor used in the addressing
   /// mode represented by AM for this target, for a load/store
@@ -876,7 +877,7 @@ class TargetTransformInfo {
   /// If the AM is supported, the return value must be >= 0.
   /// If the AM is not supported, it returns a negative value.
   /// TODO: Handle pre/postinc as well.
-  InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
+  LLVM_ABI InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
                                        StackOffset BaseOffset, bool HasBaseReg,
                                        int64_t Scale,
                                        unsigned AddrSpace = 0) const;
@@ -885,62 +886,62 @@ class TargetTransformInfo {
   /// Instruction* based TTI queries to isLegalAddressingMode(). This is
   /// needed on SystemZ, where e.g. a memcpy can only have a 12 bit unsigned
   /// immediate offset and no index register.
-  bool LSRWithInstrQueries() const;
+  LLVM_ABI bool LSRWithInstrQueries() const;
 
   /// Return true if it's free to truncate a value of type Ty1 to type
   /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
   /// by referencing its sub-register AX.
-  bool isTruncateFree(Type *Ty1, Type *Ty2) const;
+  LLVM_ABI bool isTruncateFree(Type *Ty1, Type *Ty2) const;
 
   /// Return true if it is profitable to hoist instruction in the
   /// then/else to before if.
-  bool isProfitableToHoist(Instruction *I) const;
+  LLVM_ABI bool isProfitableToHoist(Instruction *I) const;
 
-  bool useAA() const;
+  LLVM_ABI bool useAA() const;
 
   /// Return true if this type is legal.
-  bool isTypeLegal(Type *Ty) const;
+  LLVM_ABI bool isTypeLegal(Type *Ty) const;
 
   /// Returns the estimated number of registers required to represent \p Ty.
-  unsigned getRegUsageForType(Type *Ty) const;
+  LLVM_ABI unsigned getRegUsageForType(Type *Ty) const;
 
   /// Return true if switches should be turned into lookup tables for the
   /// target.
-  bool shouldBuildLookupTables() const;
+  LLVM_ABI bool shouldBuildLookupTables() const;
 
   /// Return true if switches should be turned into lookup tables
   /// containing this constant value for the target.
-  bool shouldBuildLookupTablesForConstant(Constant *C) const;
+  LLVM_ABI bool shouldBuildLookupTablesForConstant(Constant *C) const;
 
   /// Return true if lookup tables should be turned into relative lookup tables.
-  bool shouldBuildRelLookupTables() const;
+  LLVM_ABI bool shouldBuildRelLookupTables() const;
 
   /// Return true if the input function which is cold at all call sites,
   ///  should use coldcc calling convention.
-  bool useColdCCForColdCall(Function &F) const;
+  LLVM_ABI bool useColdCCForColdCall(Function &F) const;
 
-  bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const;
+  LLVM_ABI bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const;
 
   /// Identifies if the vector form of the intrinsic has a scalar operand.
-  bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
+  LLVM_ABI bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
                                           unsigned ScalarOpdIdx) const;
 
   /// Identifies if the vector form of the intrinsic is overloaded on the type
   /// of the operand at index \p OpdIdx, or on the return type if \p OpdIdx is
   /// -1.
-  bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
+  LLVM_ABI bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
                                               int OpdIdx) const;
 
   /// Identifies if the vector form of the intrinsic that returns a struct is
   /// overloaded at the struct element index \p RetIdx.
-  bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID,
+  LLVM_ABI bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID,
                                                         int RetIdx) const;
 
   /// Estimate the overhead of scalarizing an instruction. Insert and Extract
   /// are set if the demanded result elements need to be inserted and/or
   /// extracted from vectors.  The involved values may be passed in VL if
   /// Insert is true.
-  InstructionCost getScalarizationOverhead(VectorType *Ty,
+  LLVM_ABI InstructionCost getScalarizationOverhead(VectorType *Ty,
                                            const APInt &DemandedElts,
                                            bool Insert, bool Extract,
                                            TTI::TargetCostKind CostKind,
@@ -950,7 +951,7 @@ class TargetTransformInfo {
   /// Estimate the overhead of scalarizing an instructions unique
   /// non-constant operands. The (potentially vector) types to use for each of
   /// argument are passes via Tys.
-  InstructionCost
+  LLVM_ABI InstructionCost
   getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
                                    ArrayRef<Type *> Tys,
                                    TTI::TargetCostKind CostKind) const;
@@ -958,16 +959,16 @@ class TargetTransformInfo {
   /// If target has efficient vector element load/store instructions, it can
   /// return true here so that insertion/extraction costs are not added to
   /// the scalarization cost of a load/store.
-  bool supportsEfficientVectorElementLoadStore() const;
+  LLVM_ABI bool supportsEfficientVectorElementLoadStore() const;
 
   /// If the target supports tail calls.
-  bool supportsTailCalls() const;
+  LLVM_ABI bool supportsTailCalls() const;
 
   /// If target supports tail call on \p CB
-  bool supportsTailCallFor(const CallBase *CB) const;
+  LLVM_ABI bool supportsTailCallFor(const CallBase *CB) const;
 
   /// Don't restrict interleaved unrolling to small loops.
-  bool enableAggressiveInterleaving(bool LoopHasReductions) const;
+  LLVM_ABI bool enableAggressiveInterleaving(bool LoopHasReductions) const;
 
   /// Returns options for expansion of memcmp. IsZeroCmp is
   // true if this is the expansion of memcmp(p1, p2, s) == 0.
@@ -1007,25 +1008,25 @@ class TargetTransformInfo {
     // merged into one block
     SmallVector<unsigned, 4> AllowedTailExpansions;
   };
-  MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
+  LLVM_ABI MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
                                                bool IsZeroCmp) const;
 
   /// Should the Select Optimization pass be enabled and ran.
-  bool enableSelectOptimize() const;
+  LLVM_ABI bool enableSelectOptimize() const;
 
   /// Should the Select Optimization pass treat the given instruction like a
   /// select, potentially converting it to a conditional branch. This can
   /// include select-like instructions like or(zext(c), x) that can be converted
   /// to selects.
-  bool shouldTreatInstructionLikeSelect(const Instruction *I) const;
+  LLVM_ABI bool shouldTreatInstructionLikeSelect(const Instruction *I) const;
 
   /// Enable matching of interleaved access groups.
-  bool enableInterleavedAccessVectorization() const;
+  LLVM_ABI bool enableInterleavedAccessVectorization() const;
 
   /// Enable matching of interleaved access groups that contain predicated
   /// accesses or gaps and therefore vectorized using masked
   /// vector loads/stores.
-  bool enableMaskedInterleavedAccessVectorization() const;
+  LLVM_ABI bool enableMaskedInterleavedAccessVectorization() const;
 
   /// Indicate that it is potentially unsafe to automatically vectorize
   /// floating-point operations because the semantics of vector and scalar
@@ -1034,19 +1035,19 @@ class TargetTransformInfo {
   /// platform, scalar floating-point math does.
   /// This applies to floating-point math operations and calls, not memory
   /// operations, shuffles, or casts.
-  bool isFPVectorizationPotentiallyUnsafe() const;
+  LLVM_ABI bool isFPVectorizationPotentiallyUnsafe() const;
 
   /// Determine if the target supports unaligned memory accesses.
-  bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
+  LLVM_ABI bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
                                       unsigned AddressSpace = 0,
                                       Align Alignment = Align(1),
                                       unsigned *Fast = nullptr) const;
 
   /// Return hardware support for population count.
-  PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
+  LLVM_ABI PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
 
   /// Return true if the hardware has a fast square-root instruction.
-  bool haveFastSqrt(Type *Ty) const;
+  LLVM_ABI bool haveFastSqrt(Type *Ty) const;
 
   /// Return true if the cost of the instruction is too high to speculatively
   /// execute and should be kept behind a branch.
@@ -1054,31 +1055,31 @@ class TargetTransformInfo {
   /// targets might report a low TCK_SizeAndLatency value that is incompatible
   /// with the fixed TCC_Expensive value.
   /// NOTE: This assumes the instruction passes isSafeToSpeculativelyExecute().
-  bool isExpensiveToSpeculativelyExecute(const Instruction *I) const;
+  LLVM_ABI bool isExpensiveToSpeculativelyExecute(const Instruction *I) const;
 
   /// Return true if it is faster to check if a floating-point value is NaN
   /// (or not-NaN) versus a comparison against a constant FP zero value.
   /// Targets should override this if materializing a 0.0 for comparison is
   /// generally as cheap as checking for ordered/unordered.
-  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const;
+  LLVM_ABI bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const;
 
   /// Return the expected cost of supporting the floating point operation
   /// of the specified type.
-  InstructionCost getFPOpCost(Type *Ty) const;
+  LLVM_ABI InstructionCost getFPOpCost(Type *Ty) const;
 
   /// Return the expected cost of materializing for the given integer
   /// immediate of the specified type.
-  InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
+  LLVM_ABI InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
                                 TargetCostKind CostKind) const;
 
   /// Return the expected cost of materialization for the given integer
   /// immediate of the specified type for a given instruction. The cost can be
   /// zero if the immediate can be folded into the specified instruction.
-  InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
+  LLVM_ABI InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
                                     const APInt &Imm, Type *Ty,
                                     TargetCostKind CostKind,
                                     Instruction *Inst = nullptr) const;
-  InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
+  LLVM_ABI InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                       const APInt &Imm, Type *Ty,
                                       TargetCostKind CostKind) const;
 
@@ -1089,7 +1090,7 @@ class TargetTransformInfo {
   /// with another such as Thumb. This return value is used as a penalty when
   /// the total costs for a constant is calculated (the bigger the cost, the
   /// more beneficial constant hoisting is).
-  InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
+  LLVM_ABI InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
                                         const APInt &Imm, Type *Ty) const;
 
   /// It can be advantageous to detach complex constants from their uses to make
@@ -1098,7 +1099,7 @@ class TargetTransformInfo {
   /// underlying operation. The motivating example is divides whereby hoisting
   /// constants prevents the code generator's ability to transform them into
   /// combinations of simpler operations.
-  bool preferToKeepConstantsAttached(const Instruction &Inst,
+  LLVM_ABI bool preferToKeepConstantsAttached(const Instruction &Inst,
                                      const Function &Fn) const;
 
   /// @}
@@ -1167,11 +1168,11 @@ class TargetTransformInfo {
   };
 
   /// \return the number of registers in the target-provided register class.
-  unsigned getNumberOfRegisters(unsigned ClassID) const;
+  LLVM_ABI unsigned getNumberOfRegisters(unsigned ClassID) const;
 
   /// \return true if the target supports load/store that enables fault
   /// suppression of memory operands when the source condition is false.
-  bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const;
+  LLVM_ABI bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const;
 
   /// \return the target-provided register class ID for the provided type,
   /// accounting for type promotion and other type-legalization techniques that
@@ -1184,28 +1185,28 @@ class TargetTransformInfo {
   /// don't necessarily map onto the register classes used by the backend.
   /// FIXME: It's not currently possible to determine how many registers
   /// are used by the provided type.
-  unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const;
+  LLVM_ABI unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const;
 
   /// \return the target-provided register class name
-  const char *getRegisterClassName(unsigned ClassID) const;
+  LLVM_ABI const char *getRegisterClassName(unsigned ClassID) const;
 
   enum RegisterKind { RGK_Scalar, RGK_FixedWidthVector, RGK_ScalableVector };
 
   /// \return The width of the largest scalar or vector register type.
-  TypeSize getRegisterBitWidth(RegisterKind K) const;
+  LLVM_ABI TypeSize getRegisterBitWidth(RegisterKind K) const;
 
   /// \return The width of the smallest vector register type.
-  unsigned getMinVectorRegisterBitWidth() const;
+  LLVM_ABI unsigned getMinVectorRegisterBitWidth() const;
 
   /// \return The maximum value of vscale if the target specifies an
   ///  architectural maximum vector length, and std::nullopt otherwise.
-  std::optional<unsigned> getMaxVScale() const;
+  LLVM_ABI std::optional<unsigned> getMaxVScale() const;
 
   /// \return the value of vscale to tune the cost model for.
-  std::optional<unsigned> getVScaleForTuning() const;
+  LLVM_ABI std::optional<unsigned> getVScaleForTuning() const;
 
   /// \return true if vscale is known to be a power of 2
-  bool isVScaleKnownToBeAPowerOfTwo() const;
+  LLVM_ABI bool isVScaleKnownToBeAPowerOfTwo() const;
 
   /// \return True if the vectorization factor should be chosen to
   /// make the vector of the smallest element type match the size of a
@@ -1214,18 +1215,18 @@ class TargetTransformInfo {
   /// If false, the vectorization factor will be chosen based on the
   /// size of the widest element type.
   /// \p K Register Kind for vectorization.
-  bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const;
+  LLVM_ABI bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const;
 
   /// \return The minimum vectorization factor for types of given element
   /// bit width, or 0 if there is no minimum VF. The returned value only
   /// applies when shouldMaximizeVectorBandwidth returns true.
   /// If IsScalable is true, the returned ElementCount must be a scalable VF.
-  ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const;
+  LLVM_ABI ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const;
 
   /// \return The maximum vectorization factor for types of given element
   /// bit width and opcode, or 0 if there is no maximum VF.
   /// Currently only used by the SLP vectorizer.
-  unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
+  LLVM_ABI unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
 
   /// \return The minimum vectorization factor for the store instruction. Given
   /// the initial estimation of the minimum vector factor and store value type,
@@ -1235,17 +1236,17 @@ class TargetTransformInfo {
   /// \param ScalarMemTy Scalar memory type of the store operation.
   /// \param ScalarValTy Scalar type of the stored value.
   /// Currently only used by the SLP vectorizer.
-  unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
+  LLVM_ABI unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
                              Type *ScalarValTy) const;
 
   /// \return True if it should be considered for address type promotion.
   /// \p AllowPromotionWithoutCommonHeader Set true if promoting \p I is
   /// profitable without finding other extensions fed by the same input.
-  bool shouldConsiderAddressTypePromotion(
+  LLVM_ABI bool shouldConsiderAddressTypePromotion(
       const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const;
 
   /// \return The size of a cache line in bytes.
-  unsigned getCacheLineSize() const;
+  LLVM_ABI unsigned getCacheLineSize() const;
 
   /// The possible cache levels
   enum class CacheLevel {
@@ -1258,18 +1259,18 @@ class TargetTransformInfo {
   };
 
   /// \return The size of the cache level in bytes, if available.
-  std::optional<unsigned> getCacheSize(CacheLevel Level) const;
+  LLVM_ABI std::optional<unsigned> getCacheSize(CacheLevel Level) const;
 
   /// \return The associativity of the cache level, if available.
-  std::optional<unsigned> getCacheAssociativity(CacheLevel Level) const;
+  LLVM_ABI std::optional<unsigned> getCacheAssociativity(CacheLevel Level) const;
 
   /// \return The minimum architectural page size for the target.
-  std::optional<unsigned> getMinPageSize() const;
+  LLVM_ABI std::optional<unsigned> getMinPageSize() const;
 
   /// \return How much before a load we should place the prefetch
   /// instruction.  This is currently measured in number of
   /// instructions.
-  unsigned getPrefetchDistance() const;
+  LLVM_ABI unsigned getPrefetchDistance() const;
 
   /// Some HW prefetchers can handle accesses up to a certain constant stride.
   /// Sometimes prefetching is beneficial even below the HW prefetcher limit,
@@ -1288,20 +1289,20 @@ class TargetTransformInfo {
   /// \return This is the minimum stride in bytes where it makes sense to start
   ///         adding SW prefetches. The default is 1, i.e. prefetch with any
   ///         stride.
-  unsigned getMinPrefetchStride(unsigned NumMemAccesses,
+  LLVM_ABI unsigned getMinPrefetchStride(unsigned NumMemAccesses,
                                 unsigned NumStridedMemAccesses,
                                 unsigned NumPrefetches, bool HasCall) const;
 
   /// \return The maximum number of iterations to prefetch ahead.  If
   /// the required number of iterations is more than this number, no
   /// prefetching is performed.
-  unsigned getMaxPrefetchIterationsAhead() const;
+  LLVM_ABI unsigned getMaxPrefetchIterationsAhead() const;
 
   /// \return True if prefetching should also be done for writes.
-  bool enableWritePrefetching() const;
+  LLVM_ABI bool enableWritePrefetching() const;
 
   /// \return if target want to issue a prefetch in address space \p AS.
-  bool shouldPrefetchAddressSpace(unsigned AS) const;
+  LLVM_ABI bool shouldPrefetchAddressSpace(unsigned AS) const;
 
   /// \return The cost of a partial reduction, which is a reduction from a
   /// vector to another vector with fewer elements of larger size. They are
@@ -1310,7 +1311,7 @@ class TargetTransformInfo {
   /// two extends. An example of an operation that uses a partial reduction is a
   /// dot product, which reduces two vectors to another of 4 times fewer and 4
   /// times larger elements.
-  InstructionCost
+  LLVM_ABI InstructionCost
   getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB,
                           Type *AccumType, ElementCount VF,
                           PartialReductionExtendKind OpAExtend,
@@ -1320,10 +1321,10 @@ class TargetTransformInfo {
   /// \return The maximum interleave factor that any transform should try to
   /// perform for this target. This number depends on the level of parallelism
   /// and the number of execution units in the CPU.
-  unsigned getMaxInterleaveFactor(ElementCount VF) const;
+  LLVM_ABI unsigned getMaxInterleaveFactor(ElementCount VF) const;
 
   /// Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
-  static OperandValueInfo getOperandInfo(const Value *V);
+  LLVM_ABI static OperandValueInfo getOperandInfo(const Value *V);
 
   /// This is an approximation of reciprocal throughput of a math/logic op.
   /// A higher cost indicates less expected throughput.
@@ -1343,7 +1344,7 @@ class TargetTransformInfo {
   /// provide even more information.
   /// \p TLibInfo is used to search for platform specific vector library
   /// functions for instructions that might be converted to calls (e.g. frem).
-  InstructionCost getArithmeticInstrCost(
+  LLVM_ABI InstructionCost getArithmeticInstrCost(
       unsigned Opcode, Type *Ty,
       TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
       TTI::OperandValueInfo Opd1Info = {TTI::OK_AnyValue, TTI::OP_None},
@@ -1358,7 +1359,7 @@ class TargetTransformInfo {
   /// selected by \p OpcodeMask. The mask contains one bit per lane and is a `0`
   /// when \p Opcode0 is selected and `1` when Opcode1 is selected.
   /// \p VecTy is the vector type of the instruction to be generated.
-  InstructionCost getAltInstrCost(
+  LLVM_ABI InstructionCost getAltInstrCost(
       VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
       const SmallBitVector &OpcodeMask,
       TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
@@ -1371,7 +1372,7 @@ class TargetTransformInfo {
   /// passed through \p Args, which helps improve the cost estimation in some
   /// cases, like in broadcast loads.
   /// NOTE: For subvector extractions Tp represents the source type.
-  InstructionCost
+  LLVM_ABI InstructionCost
   getShuffleCost(ShuffleKind Kind, VectorType *Tp, ArrayRef<int> Mask = {},
                  TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
                  int Index = 0, VectorType *SubTp = nullptr,
@@ -1412,12 +1413,12 @@ class TargetTransformInfo {
   /// determine the context from some instruction.
   /// \returns the CastContextHint for ZExt/SExt/Trunc, None if \p I is nullptr,
   /// or if it's another type of cast.
-  static CastContextHint getCastContextHint(const Instruction *I);
+  LLVM_ABI static CastContextHint getCastContextHint(const Instruction *I);
 
   /// \return The expected cost of cast instructions, such as bitcast, trunc,
   /// zext, etc. If there is an existing instruction that holds Opcode, it
   /// may be passed in the 'I' parameter.
-  InstructionCost
+  LLVM_ABI InstructionCost
   getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
                    TTI::CastContextHint CCH,
                    TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
@@ -1425,13 +1426,13 @@ class TargetTransformInfo {
 
   /// \return The expected cost of a sign- or zero-extended vector extract. Use
   /// Index = -1 to indicate that there is no information about the index value.
-  InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
+  LLVM_ABI InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
                                            VectorType *VecTy, unsigned Index,
                                            TTI::TargetCostKind CostKind) const;
 
   /// \return The expected cost of control-flow related instructions such as
   /// Phi, Ret, Br, Switch.
-  InstructionCost
+  LLVM_ABI InstructionCost
   getCFInstrCost(unsigned Opcode,
                  TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
                  const Instruction *I = nullptr) const;
@@ -1443,7 +1444,7 @@ class TargetTransformInfo {
   /// types are passed, \p VecPred must be used for all lanes.  For a
   /// comparison, the two operands are the natural values.  For a select, the
   /// two operands are the *value* operands, not the condition operand.
-  InstructionCost
+  LLVM_ABI InstructionCost
   getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
                      CmpInst::Predicate VecPred,
                      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
@@ -1456,7 +1457,7 @@ class TargetTransformInfo {
   /// This is used when the instruction is not available; a typical use
   /// case is to provision the cost of vectorization/scalarization in
   /// vectorizer passes.
-  InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
+  LLVM_ABI InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
                                      TTI::TargetCostKind CostKind,
                                      unsigned Index = -1,
                                      const Value *Op0 = nullptr,
@@ -1471,7 +1472,7 @@ class TargetTransformInfo {
   /// vector with 'Scalar' being the value being extracted,'User' being the user
   /// of the extract(nullptr if user is not known before vectorization) and
   /// 'Idx' being the extract lane.
-  InstructionCost getVectorInstrCost(
+  LLVM_ABI InstructionCost getVectorInstrCost(
       unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
       Value *Scalar,
       ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx) const;
@@ -1482,14 +1483,14 @@ class TargetTransformInfo {
   ///
   /// A typical suitable use case is cost estimation when vector instruction
   /// exists (e.g., from basic blocks during transformation).
-  InstructionCost getVectorInstrCost(const Instruction &I, Type *Val,
+  LLVM_ABI InstructionCost getVectorInstrCost(const Instruction &I, Type *Val,
                                      TTI::TargetCostKind CostKind,
                                      unsigned Index = -1) const;
 
   /// \return The expected cost of aggregate inserts and extracts. This is
   /// used when the instruction is not available; a typical use case is to
   /// provision the cost of vectorization/scalarization in vectorizer passes.
-  InstructionCost getInsertExtractValueCost(unsigned Opcode,
+  LLVM_ABI InstructionCost getInsertExtractValueCost(unsigned Opcode,
                                             TTI::TargetCostKind CostKind) const;
 
   /// \return The cost of replication shuffle of \p VF elements typed \p EltTy
@@ -1497,13 +1498,13 @@ class TargetTransformInfo {
   ///
   /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
   ///   <0,0,0,1,1,1,2,2,2,3,3,3>
-  InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
+  LLVM_ABI InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
                                             int VF,
                                             const APInt &DemandedDstElts,
                                             TTI::TargetCostKind CostKind) const;
 
   /// \return The cost of Load and Store instructions.
-  InstructionCost
+  LLVM_ABI InstructionCost
   getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
                   unsigned AddressSpace,
                   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
@@ -1511,14 +1512,14 @@ class TargetTransformInfo {
                   const Instruction *I = nullptr) const;
 
   /// \return The cost of VP Load and Store instructions.
-  InstructionCost
+  LLVM_ABI InstructionCost
   getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
                     unsigned AddressSpace,
                     TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
                     const Instruction *I = nullptr) const;
 
   /// \return The cost of masked Load and Store instructions.
-  InstructionCost getMaskedMemoryOpCost(
+  LLVM_ABI InstructionCost getMaskedMemoryOpCost(
       unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
       TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
 
@@ -1531,7 +1532,7 @@ class TargetTransformInfo {
   /// \p Alignment - alignment of single element
   /// \p I - the optional original context instruction, if one exists, e.g. the
   ///        load/store to transform or the call to the gather/scatter intrinsic
-  InstructionCost getGatherScatterOpCost(
+  LLVM_ABI InstructionCost getGatherScatterOpCost(
       unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
       Align Alignment, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
       const Instruction *I = nullptr) const;
@@ -1544,7 +1545,7 @@ class TargetTransformInfo {
   /// \p Alignment - alignment of single element
   /// \p I - the optional original context instruction, if one exists, e.g. the
   ///        load/store to transform or the call to the gather/scatter intrinsic
-  InstructionCost getExpandCompressMemoryOpCost(
+  LLVM_ABI InstructionCost getExpandCompressMemoryOpCost(
       unsigned Opcode, Type *DataTy, bool VariableMask, Align Alignment,
       TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
       const Instruction *I = nullptr) const;
@@ -1558,7 +1559,7 @@ class TargetTransformInfo {
   /// \p Alignment - alignment of single element
   /// \p I - the optional original context instruction, if one exists, e.g. the
   ///        load/store to transform or the call to the gather/scatter intrinsic
-  InstructionCost getStridedMemoryOpCost(
+  LLVM_ABI InstructionCost getStridedMemoryOpCost(
       unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
       Align Alignment, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
       const Instruction *I = nullptr) const;
@@ -1573,7 +1574,7 @@ class TargetTransformInfo {
   /// \p AddressSpace is address space of the pointer.
   /// \p UseMaskForCond indicates if the memory access is predicated.
   /// \p UseMaskForGaps indicates if gaps should be masked.
-  InstructionCost getInterleavedMemoryOpCost(
+  LLVM_ABI InstructionCost getInterleavedMemoryOpCost(
       unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
       Align Alignment, unsigned AddressSpace,
       TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
@@ -1609,11 +1610,11 @@ class TargetTransformInfo {
   ///   This is only the case for FP operations and when reassociation is not
   ///   allowed.
   ///
-  InstructionCost getArithmeticReductionCost(
+  LLVM_ABI InstructionCost getArithmeticReductionCost(
       unsigned Opcode, VectorType *Ty, std::optional<FastMathFlags> FMF,
       TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
 
-  InstructionCost getMinMaxReductionCost(
+  LLVM_ABI InstructionCost getMinMaxReductionCost(
       Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF = FastMathFlags(),
       TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
 
@@ -1622,7 +1623,7 @@ class TargetTransformInfo {
   /// extensions. This is the cost of as:
   /// ResTy vecreduce.add(mul (A, B)).
   /// ResTy vecreduce.add(mul(ext(Ty A), ext(Ty B)).
-  InstructionCost getMulAccReductionCost(
+  LLVM_ABI InstructionCost getMulAccReductionCost(
       bool IsUnsigned, Type *ResTy, VectorType *Ty,
       TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
 
@@ -1630,7 +1631,7 @@ class TargetTransformInfo {
   /// getArithmeticReductionCost of a reduction with an extension.
   /// This is the cost of as:
   /// ResTy vecreduce.opcode(ext(Ty A)).
-  InstructionCost getExtendedReductionCost(
+  LLVM_ABI InstructionCost getExtendedReductionCost(
       unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty,
       std::optional<FastMathFlags> FMF,
       TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
@@ -1638,17 +1639,17 @@ class TargetTransformInfo {
   /// \returns The cost of Intrinsic instructions. Analyses the real arguments.
   /// Three cases are handled: 1. scalar instruction 2. vector instruction
   /// 3. scalar instruction which is to be vectorized.
-  InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
+  LLVM_ABI InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
                                         TTI::TargetCostKind CostKind) const;
 
   /// \returns The cost of Call instructions.
-  InstructionCost getCallInstrCost(
+  LLVM_ABI InstructionCost getCallInstrCost(
       Function *F, Type *RetTy, ArrayRef<Type *> Tys,
       TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) const;
 
   /// \returns The number of pieces into which the provided type must be
   /// split during legalization. Zero is returned when the answer is unknown.
-  unsigned getNumberOfParts(Type *Tp) const;
+  LLVM_ABI unsigned getNumberOfParts(Type *Tp) const;
 
   /// \returns The cost of the address computation. For most targets this can be
   /// merged into the instruction indexing mode. Some targets might want to
@@ -1657,7 +1658,7 @@ class TargetTransformInfo {
   /// The 'SE' parameter holds pointer for the scalar evolution object which
   /// is used in order to get the Ptr step value in case of constant stride.
   /// The 'Ptr' parameter holds SCEV of the access pointer.
-  InstructionCost getAddressComputationCost(Type *Ty,
+  LLVM_ABI InstructionCost getAddressComputationCost(Type *Ty,
                                             ScalarEvolution *SE = nullptr,
                                             const SCEV *Ptr = nullptr) const;
 
@@ -1666,27 +1667,27 @@ class TargetTransformInfo {
   ///
   /// Some types may require the use of register classes that do not have
   /// any callee-saved registers, so would require a spill and fill.
-  InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
+  LLVM_ABI InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
 
   /// \returns True if the intrinsic is a supported memory intrinsic.  Info
   /// will contain additional information - whether the intrinsic may write
   /// or read to memory, volatility and the pointer.  Info is undefined
   /// if false is returned.
-  bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
+  LLVM_ABI bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
 
   /// \returns The maximum element size, in bytes, for an element
   /// unordered-atomic memory intrinsic.
-  unsigned getAtomicMemIntrinsicMaxElementSize() const;
+  LLVM_ABI unsigned getAtomicMemIntrinsicMaxElementSize() const;
 
   /// \returns A value which is the result of the given memory intrinsic.  New
   /// instructions may be created to extract the result from the given intrinsic
   /// memory operation.  Returns nullptr if the target cannot create a result
   /// from the given intrinsic.
-  Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
+  LLVM_ABI Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
                                            Type *ExpectedType) const;
 
   /// \returns The type to use in a loop expansion of a memcpy call.
-  Type *getMemcpyLoopLoweringType(
+  LLVM_ABI Type *getMemcpyLoopLoweringType(
       LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
       unsigned DestAddrSpace, Align SrcAlign, Align DestAlign,
       std::optional<uint32_t> AtomicElementSize = std::nullopt) const;
@@ -1697,7 +1698,7 @@ class TargetTransformInfo {
   /// Calculates the operand types to use when copying \p RemainingBytes of
   /// memory, where source and destination alignments are \p SrcAlign and
   /// \p DestAlign respectively.
-  void getMemcpyLoopResidualLoweringType(
+  LLVM_ABI void getMemcpyLoopResidualLoweringType(
       SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
       unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
       Align SrcAlign, Align DestAlign,
@@ -1705,7 +1706,7 @@ class TargetTransformInfo {
 
   /// \returns True if the two functions have compatible attributes for inlining
   /// purposes.
-  bool areInlineCompatible(const Function *Caller,
+  LLVM_ABI bool areInlineCompatible(const Function *Caller,
                            const Function *Callee) const;
 
   /// Returns a penalty for invoking call \p Call in \p F.
@@ -1714,14 +1715,14 @@ class TargetTransformInfo {
   /// penalty of calling H from F, e.g. after inlining G into F.
   /// \p DefaultCallPenalty is passed to give a default penalty that
   /// the target can amend or override.
-  unsigned getInlineCallPenalty(const Function *F, const CallBase &Call,
+  LLVM_ABI unsigned getInlineCallPenalty(const Function *F, const CallBase &Call,
                                 unsigned DefaultCallPenalty) const;
 
   /// \returns True if the caller and callee agree on how \p Types will be
   /// passed to or returned from the callee.
   /// to the callee.
   /// \param Types List of types to check.
-  bool areTypesABICompatible(const Function *Caller, const Function *Callee,
+  LLVM_ABI bool areTypesABICompatible(const Function *Caller, const Function *Callee,
                              const ArrayRef<Type *> &Types) const;
 
   /// The type of load/store indexing.
@@ -1734,60 +1735,60 @@ class TargetTransformInfo {
   };
 
   /// \returns True if the specified indexed load for the given type is legal.
-  bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const;
+  LLVM_ABI bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const;
 
   /// \returns True if the specified indexed store for the given type is legal.
-  bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const;
+  LLVM_ABI bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const;
 
   /// \returns The bitwidth of the largest vector type that should be used to
   /// load/store in the given address space.
-  unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
+  LLVM_ABI unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
 
   /// \returns True if the load instruction is legal to vectorize.
-  bool isLegalToVectorizeLoad(LoadInst *LI) const;
+  LLVM_ABI bool isLegalToVectorizeLoad(LoadInst *LI) const;
 
   /// \returns True if the store instruction is legal to vectorize.
-  bool isLegalToVectorizeStore(StoreInst *SI) const;
+  LLVM_ABI bool isLegalToVectorizeStore(StoreInst *SI) const;
 
   /// \returns True if it is legal to vectorize the given load chain.
-  bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
+  LLVM_ABI bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
                                    unsigned AddrSpace) const;
 
   /// \returns True if it is legal to vectorize the given store chain.
-  bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
+  LLVM_ABI bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
                                     unsigned AddrSpace) const;
 
   /// \returns True if it is legal to vectorize the given reduction kind.
-  bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
+  LLVM_ABI bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
                                    ElementCount VF) const;
 
   /// \returns True if the given type is supported for scalable vectors
-  bool isElementTypeLegalForScalableVector(Type *Ty) const;
+  LLVM_ABI bool isElementTypeLegalForScalableVector(Type *Ty) const;
 
   /// \returns The new vector factor value if the target doesn't support \p
   /// SizeInBytes loads or has a better vector factor.
-  unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
+  LLVM_ABI unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
                                unsigned ChainSizeInBytes,
                                VectorType *VecTy) const;
 
   /// \returns The new vector factor value if the target doesn't support \p
   /// SizeInBytes stores or has a better vector factor.
-  unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
+  LLVM_ABI unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
                                 unsigned ChainSizeInBytes,
                                 VectorType *VecTy) const;
 
   /// \returns True if the targets prefers fixed width vectorization if the
   /// loop vectorizer's cost-model assigns an equal cost to the fixed and
   /// scalable version of the vectorized loop.
-  bool preferFixedOverScalableIfEqualCost() const;
+  LLVM_ABI bool preferFixedOverScalableIfEqualCost() const;
 
   /// \returns True if target prefers SLP vectorizer with altermate opcode
   /// vectorization, false - otherwise.
-  bool preferAlternateOpcodeVectorization() const;
+  LLVM_ABI bool preferAlternateOpcodeVectorization() const;
 
   /// \returns True if the target prefers reductions of \p Kind to be performed
   /// in the loop.
-  bool preferInLoopReduction(RecurKind Kind, Type *Ty) const;
+  LLVM_ABI bool preferInLoopReduction(RecurKind Kind, Type *Ty) const;
 
   /// \returns True if the target prefers reductions select kept in the loop
   /// when tail folding. i.e.
@@ -1800,36 +1801,36 @@ class TargetTransformInfo {
   /// As opposed to the normal scheme of p = phi (0, a) which allows the select
   /// to be pulled out of the loop. If the select(.., add, ..) can be predicated
   /// by the target, this can lead to cleaner code generation.
-  bool preferPredicatedReductionSelect() const;
+  LLVM_ABI bool preferPredicatedReductionSelect() const;
 
   /// Return true if the loop vectorizer should consider vectorizing an
   /// otherwise scalar epilogue loop.
-  bool preferEpilogueVectorization() const;
+  LLVM_ABI bool preferEpilogueVectorization() const;
 
   /// \returns True if the target wants to expand the given reduction intrinsic
   /// into a shuffle sequence.
-  bool shouldExpandReduction(const IntrinsicInst *II) const;
+  LLVM_ABI bool shouldExpandReduction(const IntrinsicInst *II) const;
 
   enum struct ReductionShuffle { SplitHalf, Pairwise };
 
   /// \returns The shuffle sequence pattern used to expand the given reduction
   /// intrinsic.
-  ReductionShuffle
+  LLVM_ABI ReductionShuffle
   getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const;
 
   /// \returns the size cost of rematerializing a GlobalValue address relative
   /// to a stack reload.
-  unsigned getGISelRematGlobalCost() const;
+  LLVM_ABI unsigned getGISelRematGlobalCost() const;
 
   /// \returns the lower bound of a trip count to decide on vectorization
   /// while tail-folding.
-  unsigned getMinTripCountTailFoldingThreshold() const;
+  LLVM_ABI unsigned getMinTripCountTailFoldingThreshold() const;
 
   /// \returns True if the target supports scalable vectors.
-  bool supportsScalableVectors() const;
+  LLVM_ABI bool supportsScalableVectors() const;
 
   /// \return true when scalable vectorization is preferred.
-  bool enableScalableVectorization() const;
+  LLVM_ABI bool enableScalableVectorization() const;
 
   /// \name Vector Predication Information
   /// @{
@@ -1837,7 +1838,7 @@ class TargetTransformInfo {
   /// in hardware, for the given opcode and type/alignment. (see LLVM Language
   /// Reference - "Vector Predication Intrinsics").
   /// Use of %evl is discouraged when that is not the case.
-  bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
+  LLVM_ABI bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
                              Align Alignment) const;
 
   /// Return true if sinking I's operands to the same basic block as I is
@@ -1845,7 +1846,7 @@ class TargetTransformInfo {
   /// instruction during instruction selection. After calling the function
   /// \p Ops contains the Uses to sink ordered by dominance (dominating users
   /// come first).
-  bool isProfitableToSinkOperands(Instruction *I,
+  LLVM_ABI bool isProfitableToSinkOperands(Instruction *I,
                                   SmallVectorImpl<Use *> &Ops) const;
 
   /// Return true if it's significantly cheaper to shift a vector by a uniform
@@ -1853,7 +1854,7 @@ class TargetTransformInfo {
   /// AVX2 for example, there is a "psllw" instruction for the former case, but
   /// no simple instruction for a general "a << b" operation on vectors.
   /// This should also apply to lowering for vector funnel shifts (rotates).
-  bool isVectorShiftByScalarCheap(Type *Ty) const;
+  LLVM_ABI bool isVectorShiftByScalarCheap(Type *Ty) const;
 
   struct VPLegalization {
     enum VPTransform {
@@ -1886,7 +1887,7 @@ class TargetTransformInfo {
 
   /// \returns How the target needs this vector-predicated operation to be
   /// transformed.
-  VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const;
+  LLVM_ABI VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const;
   /// @}
 
   /// \returns Whether a 32-bit branch instruction is available in Arm or Thumb
@@ -1898,26 +1899,26 @@ class TargetTransformInfo {
   ///
   /// For non-Arm targets, this function isn't used. It defaults to returning
   /// false, but it shouldn't matter what it returns anyway.
-  bool hasArmWideBranch(bool Thumb) const;
+  LLVM_ABI bool hasArmWideBranch(bool Thumb) const;
 
   /// Returns a bitmask constructed from the target-features or fmv-features
   /// metadata of a function.
-  uint64_t getFeatureMask(const Function &F) const;
+  LLVM_ABI uint64_t getFeatureMask(const Function &F) const;
 
   /// Returns true if this is an instance of a function with multiple versions.
-  bool isMultiversionedFunction(const Function &F) const;
+  LLVM_ABI bool isMultiversionedFunction(const Function &F) const;
 
   /// \return The maximum number of function arguments the target supports.
-  unsigned getMaxNumArgs() const;
+  LLVM_ABI unsigned getMaxNumArgs() const;
 
   /// \return For an array of given Size, return alignment boundary to
   /// pad to. Default is no padding.
-  unsigned getNumBytesToPadGlobalArray(unsigned Size, Type *ArrayType) const;
+  LLVM_ABI unsigned getNumBytesToPadGlobalArray(unsigned Size, Type *ArrayType) const;
 
   /// @}
 
   /// Collect kernel launch bounds for \p F into \p LB.
-  void collectKernelLaunchBounds(
+  LLVM_ABI void collectKernelLaunchBounds(
       const Function &F,
       SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const;
 
@@ -1944,13 +1945,13 @@ class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
   ///
   /// This will use the module's datalayout to construct a baseline
   /// conservative TTI result.
-  TargetIRAnalysis();
+  LLVM_ABI TargetIRAnalysis();
 
   /// Construct an IR analysis pass around a target-provide callback.
   ///
   /// The callback will be called with a particular function for which the TTI
   /// is needed and must return a TTI object for that function.
-  TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
+  LLVM_ABI TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
 
   // Value semantics. We spell out the constructors for MSVC.
   TargetIRAnalysis(const TargetIRAnalysis &Arg)
@@ -1966,7 +1967,7 @@ class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
     return *this;
   }
 
-  Result run(const Function &F, FunctionAnalysisManager &);
+  LLVM_ABI Result run(const Function &F, FunctionAnalysisManager &);
 
 private:
   friend AnalysisInfoMixin<TargetIRAnalysis>;
@@ -1992,7 +1993,7 @@ class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
 ///
 /// This pass can be constructed from a TTI object which it stores internally
 /// and is queried by passes.
-class TargetTransformInfoWrapperPass : public ImmutablePass {
+class LLVM_ABI TargetTransformInfoWrapperPass : public ImmutablePass {
   TargetIRAnalysis TIRA;
   std::optional<TargetTransformInfo> TTI;
 
@@ -2016,7 +2017,7 @@ class TargetTransformInfoWrapperPass : public ImmutablePass {
 ///
 /// This analysis pass just holds the TTI instance and makes it available to
 /// clients.
-ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
+LLVM_ABI ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
 
 } // namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/TensorSpec.h b/llvm/include/llvm/Analysis/TensorSpec.h
index 91b0ad2b22f81..f4e95843cebf1 100644
--- a/llvm/include/llvm/Analysis/TensorSpec.h
+++ b/llvm/include/llvm/Analysis/TensorSpec.h
@@ -9,6 +9,7 @@
 #ifndef LLVM_ANALYSIS_TENSORSPEC_H
 #define LLVM_ANALYSIS_TENSORSPEC_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Config/llvm-config.h"
 
 #include "llvm/ADT/StringMap.h"
@@ -95,10 +96,10 @@ class TensorSpec final {
       : TensorSpec(NewName, Other.Port, Other.Type, Other.ElementSize,
                    Other.Shape) {}
 
-  void toJSON(json::OStream &OS) const;
+  LLVM_ABI void toJSON(json::OStream &OS) const;
 
 private:
-  TensorSpec(const std::string &Name, int Port, TensorType Type,
+  LLVM_ABI TensorSpec(const std::string &Name, int Port, TensorType Type,
              size_t ElementSize, const std::vector<int64_t> &Shape);
 
   template <typename T> static TensorType getDataType();
@@ -112,7 +113,7 @@ class TensorSpec final {
 };
 
 /// For debugging.
-std::string tensorValueToString(const char *Buffer, const TensorSpec &Spec);
+LLVM_ABI std::string tensorValueToString(const char *Buffer, const TensorSpec &Spec);
 
 /// Construct a TensorSpec from a JSON dictionary of the form:
 /// { "name": <string>,
@@ -121,7 +122,7 @@ std::string tensorValueToString(const char *Buffer, const TensorSpec &Spec);
 ///   "shape": <array of ints> }
 /// For the "type" field, see the C++ primitive types used in
 /// TFUTILS_SUPPORTED_TYPES.
-std::optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
+LLVM_ABI std::optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
                                                 const json::Value &Value);
 
 #define TFUTILS_GETDATATYPE_DEF(T, Name)                                       \
diff --git a/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h b/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h
index e70f35174e4ca..8b570a7ed0073 100644
--- a/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h
+++ b/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h
@@ -15,6 +15,7 @@
 #ifndef LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
 #define LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
@@ -46,16 +47,16 @@ class TypeBasedAAResult : public AAResultBase {
     return false;
   }
 
-  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
+  LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                     AAQueryInfo &AAQI, const Instruction *CtxI);
-  ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
+  LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
                                bool IgnoreLocals);
 
-  MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI);
-  MemoryEffects getMemoryEffects(const Function *F);
-  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
+  LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI);
+  LLVM_ABI MemoryEffects getMemoryEffects(const Function *F);
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
                            AAQueryInfo &AAQI);
-  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
                            AAQueryInfo &AAQI);
 
 private:
@@ -75,11 +76,11 @@ class TypeBasedAA : public AnalysisInfoMixin<TypeBasedAA> {
 public:
   using Result = TypeBasedAAResult;
 
-  TypeBasedAAResult run(Function &F, FunctionAnalysisManager &AM);
+  LLVM_ABI TypeBasedAAResult run(Function &F, FunctionAnalysisManager &AM);
 };
 
 /// Legacy wrapper pass to provide the TypeBasedAAResult object.
-class TypeBasedAAWrapperPass : public ImmutablePass {
+class LLVM_ABI TypeBasedAAWrapperPass : public ImmutablePass {
   std::unique_ptr<TypeBasedAAResult> Result;
 
 public:
@@ -100,7 +101,7 @@ class TypeBasedAAWrapperPass : public ImmutablePass {
 // createTypeBasedAAWrapperPass - This pass implements metadata-based
 // type-based alias analysis.
 //
-ImmutablePass *createTypeBasedAAWrapperPass();
+LLVM_ABI ImmutablePass *createTypeBasedAAWrapperPass();
 
 } // end namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h b/llvm/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h
index 45ef4dbe2155e..25d6477613556 100644
--- a/llvm/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h
+++ b/llvm/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h
@@ -12,6 +12,7 @@
 #ifndef LLVM_ANALYSIS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
 #define LLVM_ANALYSIS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/StringMap.h"
 #include "llvm/ADT/StringRef.h"
@@ -66,13 +67,13 @@ class ImportedFunctionsInliningStatistics {
       const ImportedFunctionsInliningStatistics &) = delete;
 
   /// Set information like AllFunctions, ImportedFunctions, ModuleName.
-  void setModuleInfo(const Module &M);
+  LLVM_ABI void setModuleInfo(const Module &M);
   /// Record inline of @param Callee to @param Caller for statistis.
-  void recordInline(const Function &Caller, const Function &Callee);
+  LLVM_ABI void recordInline(const Function &Caller, const Function &Callee);
   /// Dump stats computed with InlinerStatistics class.
   /// If @param Verbose is true then separate statistics for every inlined
   /// function will be printed.
-  void dump(bool Verbose);
+  LLVM_ABI void dump(bool Verbose);
 
 private:
   /// Creates new Node in NodeMap and sets attributes, or returns existed one.
diff --git a/llvm/include/llvm/Analysis/Utils/Local.h b/llvm/include/llvm/Analysis/Utils/Local.h
index e1dbfd3e5f37c..b07057af62196 100644
--- a/llvm/include/llvm/Analysis/Utils/Local.h
+++ b/llvm/include/llvm/Analysis/Utils/Local.h
@@ -26,7 +26,7 @@ class Value;
 /// pointer). Return the result as a signed integer of intptr size.
 /// When NoAssumptions is true, no assumptions about index computation not
 /// overflowing is made.
-Value *emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP,
+LLVM_ABI Value *emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP,
                      bool NoAssumptions = false);
 
 } // namespace llvm
diff --git a/llvm/include/llvm/Analysis/Utils/TrainingLogger.h b/llvm/include/llvm/Analysis/Utils/TrainingLogger.h
index 8f46779a732d1..45903f60bac87 100644
--- a/llvm/include/llvm/Analysis/Utils/TrainingLogger.h
+++ b/llvm/include/llvm/Analysis/Utils/TrainingLogger.h
@@ -53,6 +53,7 @@
 #ifndef LLVM_ANALYSIS_UTILS_TRAININGLOGGER_H
 #define LLVM_ANALYSIS_UTILS_TRAININGLOGGER_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Config/llvm-config.h"
 
 #include "llvm/ADT/StringMap.h"
@@ -100,7 +101,7 @@ class Logger final {
   void writeTensor(const TensorSpec &Spec, const char *RawData) {
     OS->write(RawData, Spec.getTotalTensorBufferSize());
   }
-  void logRewardImpl(const char *RawData);
+  LLVM_ABI void logRewardImpl(const char *RawData);
 
 public:
   /// Construct a Logger. If IncludeReward is false, then logReward or
@@ -109,14 +110,14 @@ class Logger final {
   /// NOTE: the FeatureSpecs are expected to be in the same order (i.e. have
   /// corresponding indices) with any MLModelRunner implementations
   /// corresponding to the model being trained/logged.
-  Logger(std::unique_ptr<raw_ostream> OS,
+  LLVM_ABI Logger(std::unique_ptr<raw_ostream> OS,
          const std::vector<TensorSpec> &FeatureSpecs,
          const TensorSpec &RewardSpec, bool IncludeReward,
          std::optional<TensorSpec> AdviceSpec = std::nullopt);
 
-  void switchContext(StringRef Name);
-  void startObservation();
-  void endObservation();
+  LLVM_ABI void switchContext(StringRef Name);
+  LLVM_ABI void startObservation();
+  LLVM_ABI void endObservation();
   void flush() { OS->flush(); }
 
   const std::string &currentContext() const { return CurrentContext; }
diff --git a/llvm/include/llvm/Analysis/ValueLattice.h b/llvm/include/llvm/Analysis/ValueLattice.h
index 9357a15f7619f..a04714c829336 100644
--- a/llvm/include/llvm/Analysis/ValueLattice.h
+++ b/llvm/include/llvm/Analysis/ValueLattice.h
@@ -9,6 +9,7 @@
 #ifndef LLVM_ANALYSIS_VALUELATTICE_H
 #define LLVM_ANALYSIS_VALUELATTICE_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/IR/ConstantRange.h"
 #include "llvm/IR/Constants.h"
 
@@ -467,7 +468,7 @@ class ValueLatticeElement {
   // Compares this symbolic value with Other using Pred and returns either
   /// true, false or undef constants, or nullptr if the comparison cannot be
   /// evaluated.
-  Constant *getCompare(CmpInst::Predicate Pred, Type *Ty,
+  LLVM_ABI Constant *getCompare(CmpInst::Predicate Pred, Type *Ty,
                        const ValueLatticeElement &Other,
                        const DataLayout &DL) const;
 
@@ -486,7 +487,7 @@ class ValueLatticeElement {
   ///   as not confuse the rest of LVI.  Ideally, we'd always return Undefined,
   ///   but we do not make this guarantee.  TODO: This would be a useful
   ///   enhancement.
-  ValueLatticeElement intersect(const ValueLatticeElement &Other) const;
+  LLVM_ABI ValueLatticeElement intersect(const ValueLatticeElement &Other) const;
 
   unsigned getNumRangeExtensions() const { return NumRangeExtensions; }
   void setNumRangeExtensions(unsigned N) { NumRangeExtensions = N; }
@@ -495,6 +496,6 @@ class ValueLatticeElement {
 static_assert(sizeof(ValueLatticeElement) <= 40,
               "size of ValueLatticeElement changed unexpectedly");
 
-raw_ostream &operator<<(raw_ostream &OS, const ValueLatticeElement &Val);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, const ValueLatticeElement &Val);
 } // end namespace llvm
 #endif
diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h
index 61dbb07e7128e..d7f0a58c9defb 100644
--- a/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/llvm/include/llvm/Analysis/ValueTracking.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_VALUETRACKING_H
 #define LLVM_ANALYSIS_VALUETRACKING_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/SimplifyQuery.h"
 #include "llvm/Analysis/WithCache.h"
 #include "llvm/IR/Constants.h"
@@ -52,59 +53,59 @@ constexpr unsigned MaxAnalysisRecursionDepth = 6;
 /// where V is a vector, the known zero and known one values are the
 /// same width as the vector element, and the bit is set only if it is true
 /// for all of the elements in the vector.
-void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL,
+LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL,
                       unsigned Depth = 0, AssumptionCache *AC = nullptr,
                       const Instruction *CxtI = nullptr,
                       const DominatorTree *DT = nullptr,
                       bool UseInstrInfo = true);
 
 /// Returns the known bits rather than passing by reference.
-KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
+LLVM_ABI KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
                            unsigned Depth = 0, AssumptionCache *AC = nullptr,
                            const Instruction *CxtI = nullptr,
                            const DominatorTree *DT = nullptr,
                            bool UseInstrInfo = true);
 
 /// Returns the known bits rather than passing by reference.
-KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
+LLVM_ABI KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
                            const DataLayout &DL, unsigned Depth = 0,
                            AssumptionCache *AC = nullptr,
                            const Instruction *CxtI = nullptr,
                            const DominatorTree *DT = nullptr,
                            bool UseInstrInfo = true);
 
-KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
+LLVM_ABI KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
                            unsigned Depth, const SimplifyQuery &Q);
 
 KnownBits computeKnownBits(const Value *V, unsigned Depth,
                            const SimplifyQuery &Q);
 
-void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
+LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
                       const SimplifyQuery &Q);
 
 /// Compute known bits from the range metadata.
 /// \p KnownZero the set of bits that are known to be zero
 /// \p KnownOne the set of bits that are known to be one
-void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known);
+LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known);
 
 /// Merge bits known from context-dependent facts into Known.
-void computeKnownBitsFromContext(const Value *V, KnownBits &Known,
+LLVM_ABI void computeKnownBitsFromContext(const Value *V, KnownBits &Known,
                                  unsigned Depth, const SimplifyQuery &Q);
 
 /// Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
-KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I,
+LLVM_ABI KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I,
                                        const KnownBits &KnownLHS,
                                        const KnownBits &KnownRHS,
                                        unsigned Depth, const SimplifyQuery &SQ);
 
 /// Adjust \p Known for the given select \p Arm to include information from the
 /// select \p Cond.
-void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm,
+LLVM_ABI void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm,
                                  bool Invert, unsigned Depth,
                                  const SimplifyQuery &Q);
 
 /// Return true if LHS and RHS have no common bits set.
-bool haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
+LLVM_ABI bool haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
                          const WithCache<const Value *> &RHSCache,
                          const SimplifyQuery &SQ);
 
@@ -113,19 +114,19 @@ bool haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
 /// of two when defined. Supports values with integer or pointer type and
 /// vectors of integers. If 'OrZero' is set, then return true if the given
 /// value is either a power of two or zero.
-bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
+LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
                             bool OrZero = false, unsigned Depth = 0,
                             AssumptionCache *AC = nullptr,
                             const Instruction *CxtI = nullptr,
                             const DominatorTree *DT = nullptr,
                             bool UseInstrInfo = true);
 
-bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
+LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
                             const SimplifyQuery &Q);
 
-bool isOnlyUsedInZeroComparison(const Instruction *CxtI);
+LLVM_ABI bool isOnlyUsedInZeroComparison(const Instruction *CxtI);
 
-bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
+LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
 
 /// Return true if the given value is known to be non-zero when defined. For
 /// vectors, return true if every element is known to be non-zero when
@@ -133,13 +134,13 @@ bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
 /// specified, perform context-sensitive analysis and return true if the
 /// pointer couldn't possibly be null at the specified instruction.
 /// Supports values with integer or pointer type and vectors of integers.
-bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth = 0);
+LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth = 0);
 
 /// Return true if the two given values are negation.
 /// Currently can recoginze Value pair:
 /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
 /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
-bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false,
+LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false,
                      bool AllowPoison = true);
 
 /// Return true iff:
@@ -147,25 +148,25 @@ bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false,
 /// 2. X is true implies Y is false.
 /// 3. X is false implies Y is true.
 /// Otherwise, return false.
-bool isKnownInversion(const Value *X, const Value *Y);
+LLVM_ABI bool isKnownInversion(const Value *X, const Value *Y);
 
 /// Returns true if the give value is known to be non-negative.
-bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ,
+LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ,
                         unsigned Depth = 0);
 
 /// Returns true if the given value is known be positive (i.e. non-negative
 /// and non-zero).
-bool isKnownPositive(const Value *V, const SimplifyQuery &SQ,
+LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ,
                      unsigned Depth = 0);
 
 /// Returns true if the given value is known be negative (i.e. non-positive
 /// and non-zero).
-bool isKnownNegative(const Value *V, const SimplifyQuery &SQ,
+LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ,
                      unsigned Depth = 0);
 
 /// Return true if the given values are known to be non-equal when defined.
 /// Supports scalar integer types only.
-bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ,
+LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ,
                      unsigned Depth = 0);
 
 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
@@ -177,7 +178,7 @@ bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ,
 /// where V is a vector, the mask, known zero, and known one values are the
 /// same width as the vector element, and the bit is set only if it is true
 /// for all of the elements in the vector.
-bool MaskedValueIsZero(const Value *V, const APInt &Mask,
+LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask,
                        const SimplifyQuery &SQ, unsigned Depth = 0);
 
 /// Return the number of times the sign bit of the register is replicated into
@@ -187,7 +188,7 @@ bool MaskedValueIsZero(const Value *V, const APInt &Mask,
 /// equal to each other, so we return 3. For vectors, return the number of
 /// sign bits for the vector element with the mininum number of known sign
 /// bits.
-unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
+LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
                             unsigned Depth = 0, AssumptionCache *AC = nullptr,
                             const Instruction *CxtI = nullptr,
                             const DominatorTree *DT = nullptr,
@@ -196,7 +197,7 @@ unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
 /// Get the upper bound on bit size for this Value \p Op as a signed integer.
 /// i.e.  x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)).
 /// Similar to the APInt::getSignificantBits function.
-unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
+LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
                                    unsigned Depth = 0,
                                    AssumptionCache *AC = nullptr,
                                    const Instruction *CxtI = nullptr,
@@ -204,13 +205,13 @@ unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
 
 /// Map a call instruction to an intrinsic ID.  Libcalls which have equivalent
 /// intrinsics are treated as-if they were intrinsics.
-Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
+LLVM_ABI Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
                                       const TargetLibraryInfo *TLI);
 
 /// Given an exploded icmp instruction, return true if the comparison only
 /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
 /// the result of the comparison is true when the input value is signed.
-bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
+LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
                     bool &TrueIfSigned);
 
 /// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
@@ -221,11 +222,11 @@ bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
 ///
 /// If \p LookThroughSrc is false, ignore the source value (i.e. the first pair
 /// element will always be LHS.
-std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
+LLVM_ABI std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
                                                 const Function &F, Value *LHS,
                                                 Value *RHS,
                                                 bool LookThroughSrc = true);
-std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
+LLVM_ABI std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
                                                 const Function &F, Value *LHS,
                                                 const APFloat *ConstRHS,
                                                 bool LookThroughSrc = true);
@@ -246,13 +247,13 @@ std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
 /// If \p LookThroughSrc is false, ignore the source value (i.e. the first pair
 /// element will always be LHS.
 ///
-std::tuple<Value *, FPClassTest, FPClassTest>
+LLVM_ABI std::tuple<Value *, FPClassTest, FPClassTest>
 fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
                  Value *RHS, bool LookThroughSrc = true);
-std::tuple<Value *, FPClassTest, FPClassTest>
+LLVM_ABI std::tuple<Value *, FPClassTest, FPClassTest>
 fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
                  FPClassTest RHS, bool LookThroughSrc = true);
-std::tuple<Value *, FPClassTest, FPClassTest>
+LLVM_ABI std::tuple<Value *, FPClassTest, FPClassTest>
 fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
                  const APFloat &RHS, bool LookThroughSrc = true);
 
@@ -266,14 +267,14 @@ fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
 /// point classes should be queried. Queries not specified in \p
 /// InterestedClasses should be reliable if they are determined during the
 /// query.
-KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts,
+LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts,
                                  FPClassTest InterestedClasses, unsigned Depth,
                                  const SimplifyQuery &SQ);
 
-KnownFPClass computeKnownFPClass(const Value *V, FPClassTest InterestedClasses,
+LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, FPClassTest InterestedClasses,
                                  unsigned Depth, const SimplifyQuery &SQ);
 
-KnownFPClass computeKnownFPClass(const Value *V, const DataLayout &DL,
+LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const DataLayout &DL,
                                  FPClassTest InterestedClasses = fcAllFlags,
                                  unsigned Depth = 0,
                                  const TargetLibraryInfo *TLI = nullptr,
@@ -283,19 +284,19 @@ KnownFPClass computeKnownFPClass(const Value *V, const DataLayout &DL,
                                  bool UseInstrInfo = true);
 
 /// Wrapper to account for known fast math flags at the use instruction.
-KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts,
+LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts,
                                  FastMathFlags FMF,
                                  FPClassTest InterestedClasses, unsigned Depth,
                                  const SimplifyQuery &SQ);
 
-KnownFPClass computeKnownFPClass(const Value *V, FastMathFlags FMF,
+LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, FastMathFlags FMF,
                                  FPClassTest InterestedClasses, unsigned Depth,
                                  const SimplifyQuery &SQ);
 
 /// Return true if we can prove that the specified FP value is never equal to
 /// -0.0. Users should use caution when considering PreserveSign
 /// denormal-fp-math.
-bool cannotBeNegativeZero(const Value *V, unsigned Depth,
+LLVM_ABI bool cannotBeNegativeZero(const Value *V, unsigned Depth,
                           const SimplifyQuery &SQ);
 
 /// Return true if we can prove that the specified FP value is either NaN or
@@ -306,28 +307,28 @@ bool cannotBeNegativeZero(const Value *V, unsigned Depth,
 ///       -0 --> true
 ///   x > +0 --> true
 ///   x < -0 --> false
-bool cannotBeOrderedLessThanZero(const Value *V, unsigned Depth,
+LLVM_ABI bool cannotBeOrderedLessThanZero(const Value *V, unsigned Depth,
                                  const SimplifyQuery &SQ);
 
 /// Return true if the floating-point scalar value is not an infinity or if
 /// the floating-point vector value has no infinities. Return false if a value
 /// could ever be infinity.
-bool isKnownNeverInfinity(const Value *V, unsigned Depth,
+LLVM_ABI bool isKnownNeverInfinity(const Value *V, unsigned Depth,
                           const SimplifyQuery &SQ);
 
 /// Return true if the floating-point value can never contain a NaN or infinity.
-bool isKnownNeverInfOrNaN(const Value *V, unsigned Depth,
+LLVM_ABI bool isKnownNeverInfOrNaN(const Value *V, unsigned Depth,
                           const SimplifyQuery &SQ);
 
 /// Return true if the floating-point scalar value is not a NaN or if the
 /// floating-point vector value has no NaN elements. Return false if a value
 /// could ever be NaN.
-bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ);
+LLVM_ABI bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ);
 
 /// Return false if we can prove that the specified FP value's sign bit is 0.
 /// Return true if we can prove that the specified FP value's sign bit is 1.
 /// Otherwise return std::nullopt.
-std::optional<bool> computeKnownFPSignBit(const Value *V, unsigned Depth,
+LLVM_ABI std::optional<bool> computeKnownFPSignBit(const Value *V, unsigned Depth,
                                           const SimplifyQuery &SQ);
 
 /// If the specified value can be set by repeating the same byte in memory,
@@ -336,7 +337,7 @@ std::optional<bool> computeKnownFPSignBit(const Value *V, unsigned Depth,
 /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
 /// i16 0x1234), return null. If the value is entirely undef and padding,
 /// return undef.
-Value *isBytewiseValue(Value *V, const DataLayout &DL);
+LLVM_ABI Value *isBytewiseValue(Value *V, const DataLayout &DL);
 
 /// Given an aggregate and an sequence of indices, see if the scalar value
 /// indexed is already around as a register, for example if it were inserted
@@ -344,7 +345,7 @@ Value *isBytewiseValue(Value *V, const DataLayout &DL);
 ///
 /// If InsertBefore is not empty, this function will duplicate (modified)
 /// insertvalues when a part of a nested struct is extracted.
-Value *FindInsertedValue(
+LLVM_ABI Value *FindInsertedValue(
     Value *V, ArrayRef<unsigned> idx_range,
     std::optional<BasicBlock::iterator> InsertBefore = std::nullopt);
 
@@ -373,7 +374,7 @@ GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
 
 /// Returns true if the GEP is based on a pointer to a string (array of
 // \p CharSize integers) and is indexing into this string.
-bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize = 8);
+LLVM_ABI bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize = 8);
 
 /// Represents offset+length into a ConstantDataArray.
 struct ConstantDataArraySlice {
@@ -403,7 +404,7 @@ struct ConstantDataArraySlice {
 /// Returns true if the value \p V is a pointer into a ConstantDataArray.
 /// If successful \p Slice will point to a ConstantDataArray info object
 /// with an appropriate offset.
-bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
+LLVM_ABI bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
                               unsigned ElementSize, uint64_t Offset = 0);
 
 /// This function computes the length of a null-terminated C string pointed to
@@ -412,18 +413,18 @@ bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
 /// character by default. If TrimAtNul is set to false, then this returns any
 /// trailing null characters as well as any other characters that come after
 /// it.
-bool getConstantStringInfo(const Value *V, StringRef &Str,
+LLVM_ABI bool getConstantStringInfo(const Value *V, StringRef &Str,
                            bool TrimAtNul = true);
 
 /// If we can compute the length of the string pointed to by the specified
 /// pointer, return 'len+1'.  If we can't, return 0.
-uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
+LLVM_ABI uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
 
 /// This function returns call pointer argument that is considered the same by
 /// aliasing rules. You CAN'T use it to replace one value with another. If
 /// \p MustPreserveNullness is true, the call must preserve the nullness of
 /// the pointer.
-const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call,
+LLVM_ABI const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call,
                                                   bool MustPreserveNullness);
 inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call,
                                                    bool MustPreserveNullness) {
@@ -437,7 +438,7 @@ inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call,
 /// considered as capture. The arguments are not marked as returned neither,
 /// because it would make it useless. If \p MustPreserveNullness is true,
 /// the intrinsic must preserve the nullness of the pointer.
-bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
+LLVM_ABI bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
     const CallBase *Call, bool MustPreserveNullness);
 
 /// This method strips off any GEP address adjustments, pointer casts
@@ -445,7 +446,7 @@ bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
 /// original object being addressed. Note that the returned value has pointer
 /// type if the specified value does. If the \p MaxLookup value is non-zero, it
 /// limits the number of instructions to be stripped off.
-const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6);
+LLVM_ABI const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6);
 inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) {
   // Force const to avoid infinite recursion.
   const Value *VConst = V;
@@ -454,7 +455,7 @@ inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) {
 
 /// Like getUnderlyingObject(), but will try harder to find a single underlying
 /// object. In particular, this function also looks through selects and phis.
-const Value *getUnderlyingObjectAggressive(const Value *V);
+LLVM_ABI const Value *getUnderlyingObjectAggressive(const Value *V);
 
 /// This method is similar to getUnderlyingObject except that it can
 /// look through phi and select instructions and return multiple objects.
@@ -484,29 +485,29 @@ const Value *getUnderlyingObjectAggressive(const Value *V);
 /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
 /// should not assume that Curr and Prev share the same underlying object thus
 /// it shouldn't look through the phi above.
-void getUnderlyingObjects(const Value *V,
+LLVM_ABI void getUnderlyingObjects(const Value *V,
                           SmallVectorImpl<const Value *> &Objects,
                           const LoopInfo *LI = nullptr, unsigned MaxLookup = 6);
 
 /// This is a wrapper around getUnderlyingObjects and adds support for basic
 /// ptrtoint+arithmetic+inttoptr sequences.
-bool getUnderlyingObjectsForCodeGen(const Value *V,
+LLVM_ABI bool getUnderlyingObjectsForCodeGen(const Value *V,
                                     SmallVectorImpl<Value *> &Objects);
 
 /// Returns unique alloca where the value comes from, or nullptr.
 /// If OffsetZero is true check that V points to the begining of the alloca.
-AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
+LLVM_ABI AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
 inline const AllocaInst *findAllocaForValue(const Value *V,
                                             bool OffsetZero = false) {
   return findAllocaForValue(const_cast<Value *>(V), OffsetZero);
 }
 
 /// Return true if the only users of this pointer are lifetime markers.
-bool onlyUsedByLifetimeMarkers(const Value *V);
+LLVM_ABI bool onlyUsedByLifetimeMarkers(const Value *V);
 
 /// Return true if the only users of this pointer are lifetime markers or
 /// droppable instructions.
-bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
+LLVM_ABI bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
 
 /// Return true if the instruction doesn't potentially cross vector lanes. This
 /// condition is weaker than checking that the instruction is lanewise: lanewise
@@ -514,7 +515,7 @@ bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
 /// include the case where there is a different operation on each lane, as long
 /// as the operation only uses data from that lane. An example of an operation
 /// that is not lanewise, but doesn't cross vector lanes is insertelement.
-bool isNotCrossLaneOperation(const Instruction *I);
+LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I);
 
 /// Return true if the instruction does not have any effects besides
 /// calculating the result and does not have undefined behavior.
@@ -547,7 +548,7 @@ bool isNotCrossLaneOperation(const Instruction *I);
 ///
 /// This method can return true for instructions that read memory;
 /// for such instructions, moving them may change the resulting value.
-bool isSafeToSpeculativelyExecute(const Instruction *I,
+LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I,
                                   const Instruction *CtxI = nullptr,
                                   AssumptionCache *AC = nullptr,
                                   const DominatorTree *DT = nullptr,
@@ -593,7 +594,7 @@ inline bool isSafeToSpeculativelyExecuteWithVariableReplaced(
 ///   function may have said that the instruction wouldn't be speculatable.
 ///   This behavior is a shortcoming in the current implementation and not
 ///   intentional.
-bool isSafeToSpeculativelyExecuteWithOpcode(
+LLVM_ABI bool isSafeToSpeculativelyExecuteWithOpcode(
     unsigned Opcode, const Instruction *Inst, const Instruction *CtxI = nullptr,
     AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr,
     const TargetLibraryInfo *TLI = nullptr, bool UseVariableInfo = true,
@@ -607,11 +608,11 @@ bool isSafeToSpeculativelyExecuteWithOpcode(
 ///   dependent instructions.
 /// * Control dependence arises for example if the instruction may fault
 ///   if lifted above a throwing call or infinite loop.
-bool mayHaveNonDefUseDependency(const Instruction &I);
+LLVM_ABI bool mayHaveNonDefUseDependency(const Instruction &I);
 
 /// Return true if it is an intrinsic that cannot be speculated but also
 /// cannot trap.
-bool isAssumeLikeIntrinsic(const Instruction *I);
+LLVM_ABI bool isAssumeLikeIntrinsic(const Instruction *I);
 
 /// Return true if it is valid to use the assumptions provided by an
 /// assume intrinsic, I, at the point in the control-flow identified by the
@@ -620,7 +621,7 @@ bool isAssumeLikeIntrinsic(const Instruction *I);
 /// to optimize away its argument. If the caller can ensure that this won't
 /// happen, it can call with AllowEphemerals set to true to get more valid
 /// assumptions.
-bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
+LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
                              const DominatorTree *DT = nullptr,
                              bool AllowEphemerals = false);
 
@@ -635,39 +636,39 @@ enum class OverflowResult {
   NeverOverflows,
 };
 
-OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS,
+LLVM_ABI OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS,
                                              const SimplifyQuery &SQ,
                                              bool IsNSW = false);
-OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
+LLVM_ABI OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
                                            const SimplifyQuery &SQ);
-OverflowResult
+LLVM_ABI OverflowResult
 computeOverflowForUnsignedAdd(const WithCache<const Value *> &LHS,
                               const WithCache<const Value *> &RHS,
                               const SimplifyQuery &SQ);
-OverflowResult computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
+LLVM_ABI OverflowResult computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
                                            const WithCache<const Value *> &RHS,
                                            const SimplifyQuery &SQ);
 /// This version also leverages the sign bit of Add if known.
-OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
+LLVM_ABI OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
                                            const SimplifyQuery &SQ);
-OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
+LLVM_ABI OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
                                              const SimplifyQuery &SQ);
-OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
+LLVM_ABI OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
                                            const SimplifyQuery &SQ);
 
 /// Returns true if the arithmetic part of the \p WO 's result is
 /// used only along the paths control dependent on the computation
 /// not overflowing, \p WO being an <op>.with.overflow intrinsic.
-bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
+LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
                                const DominatorTree &DT);
 
 /// Determine the possible constant range of vscale with the given bit width,
 /// based on the vscale_range function attribute.
-ConstantRange getVScaleRange(const Function *F, unsigned BitWidth);
+LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth);
 
 /// Determine the possible constant range of an integer or vector of integer
 /// value. This is intended as a cheap, non-recursive check.
-ConstantRange computeConstantRange(const Value *V, bool ForSigned,
+LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned,
                                    bool UseInstrInfo = true,
                                    AssumptionCache *AC = nullptr,
                                    const Instruction *CtxI = nullptr,
@@ -675,7 +676,7 @@ ConstantRange computeConstantRange(const Value *V, bool ForSigned,
                                    unsigned Depth = 0);
 
 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
-ConstantRange
+LLVM_ABI ConstantRange
 computeConstantRangeIncludingKnownBits(const WithCache<const Value *> &V,
                                        bool ForSigned, const SimplifyQuery &SQ);
 
@@ -692,31 +693,31 @@ computeConstantRangeIncludingKnownBits(const WithCache<const Value *> &V,
 /// Undefined behavior is assumed not to happen, so e.g. division is
 /// guaranteed to transfer execution to the following instruction even
 /// though division by zero might cause undefined behavior.
-bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
+LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
 
 /// Returns true if this block does not contain a potential implicit exit.
 /// This is equivelent to saying that all instructions within the basic block
 /// are guaranteed to transfer execution to their successor within the basic
 /// block. This has the same assumptions w.r.t. undefined behavior as the
 /// instruction variant of this function.
-bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
+LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
 
 /// Return true if every instruction in the range (Begin, End) is
 /// guaranteed to transfer execution to its static successor. \p ScanLimit
 /// bounds the search to avoid scanning huge blocks.
-bool isGuaranteedToTransferExecutionToSuccessor(
+LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(
     BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
     unsigned ScanLimit = 32);
 
 /// Same as previous, but with range expressed via iterator_range.
-bool isGuaranteedToTransferExecutionToSuccessor(
+LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(
     iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit = 32);
 
 /// Return true if this function can prove that the instruction I
 /// is executed for every iteration of the loop L.
 ///
 /// Note that this currently only considers the loop header.
-bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
+LLVM_ABI bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
                                             const Loop *L);
 
 /// Return true if \p PoisonOp's user yields poison or raises UB if its
@@ -728,12 +729,12 @@ bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
 ///
 /// To filter out operands that raise UB on poison, you can use
 /// getGuaranteedNonPoisonOp.
-bool propagatesPoison(const Use &PoisonOp);
+LLVM_ABI bool propagatesPoison(const Use &PoisonOp);
 
 /// Return true if the given instruction must trigger undefined behavior
 /// when I is executed with any operands which appear in KnownPoison holding
 /// a poison value at the point of execution.
-bool mustTriggerUB(const Instruction *I,
+LLVM_ABI bool mustTriggerUB(const Instruction *I,
                    const SmallPtrSetImpl<const Value *> &KnownPoison);
 
 /// Return true if this function can prove that if Inst is executed
@@ -742,8 +743,8 @@ bool mustTriggerUB(const Instruction *I,
 ///
 /// Note that this currently only considers the basic block that is
 /// the parent of Inst.
-bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
-bool programUndefinedIfPoison(const Instruction *Inst);
+LLVM_ABI bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
+LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst);
 
 /// canCreateUndefOrPoison returns true if Op can create undef or poison from
 /// non-undef & non-poison operands.
@@ -763,14 +764,14 @@ bool programUndefinedIfPoison(const Instruction *Inst);
 ///
 /// canCreatePoison returns true if Op can create poison from non-poison
 /// operands.
-bool canCreateUndefOrPoison(const Operator *Op,
+LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op,
                             bool ConsiderFlagsAndMetadata = true);
-bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata = true);
+LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata = true);
 
 /// Return true if V is poison given that ValAssumedPoison is already poison.
 /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
 /// impliesPoison returns true.
-bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
+LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
 
 /// Return true if this function can prove that V does not have undef bits
 /// and is never poison. If V is an aggregate value or vector, check whether
@@ -781,14 +782,14 @@ bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
 /// If CtxI and DT are specified this method performs flow-sensitive analysis
 /// and returns true if it is guaranteed to be never undef or poison
 /// immediately before the CtxI.
-bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
+LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
                                       AssumptionCache *AC = nullptr,
                                       const Instruction *CtxI = nullptr,
                                       const DominatorTree *DT = nullptr,
                                       unsigned Depth = 0);
 
 /// Returns true if V cannot be poison, but may be undef.
-bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
+LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
                                const Instruction *CtxI = nullptr,
                                const DominatorTree *DT = nullptr,
                                unsigned Depth = 0);
@@ -803,7 +804,7 @@ inline bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
 }
 
 /// Returns true if V cannot be undef, but may be poison.
-bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC = nullptr,
+LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC = nullptr,
                               const Instruction *CtxI = nullptr,
                               const DominatorTree *DT = nullptr,
                               unsigned Depth = 0);
@@ -815,7 +816,7 @@ bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC = nullptr,
 /// be added at a location which is control equivalent with OnPathTo (such as
 /// immediately before it) without introducing UB which didn't previously
 /// exist.  Note that a false result conveys no information.
-bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
+LLVM_ABI bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
                                    Instruction *OnPathTo,
                                    DominatorTree *DT);
 
@@ -823,7 +824,7 @@ bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
 /// form with the strictness flipped predicate. Return the new predicate and
 /// corresponding constant RHS if possible. Otherwise return std::nullopt.
 /// E.g., (icmp sgt X, 0) -> (icmp sle X, 1).
-std::optional<std::pair<CmpPredicate, Constant *>>
+LLVM_ABI std::optional<std::pair<CmpPredicate, Constant *>>
 getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C);
 
 /// Specific patterns of select instructions we can match.
@@ -882,7 +883,7 @@ struct SelectPatternResult {
 ///
 /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
 ///
-SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
+LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
                                        Instruction::CastOps *CastOp = nullptr,
                                        unsigned Depth = 0);
 
@@ -898,40 +899,40 @@ inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS,
 
 /// Determine the pattern that a select with the given compare as its
 /// predicate and given values as its true/false operands would match.
-SelectPatternResult matchDecomposedSelectPattern(
+LLVM_ABI SelectPatternResult matchDecomposedSelectPattern(
     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
     Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);
 
 /// Determine the pattern for predicate `X Pred Y ? X : Y`.
-SelectPatternResult
+LLVM_ABI SelectPatternResult
 getSelectPattern(CmpInst::Predicate Pred,
                  SelectPatternNaNBehavior NaNBehavior = SPNB_NA,
                  bool Ordered = false);
 
 /// Return the canonical comparison predicate for the specified
 /// minimum/maximum flavor.
-CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered = false);
+LLVM_ABI CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered = false);
 
 /// Convert given `SPF` to equivalent min/max intrinsic.
 /// Caller must ensure `SPF` is an integer min or max pattern.
-Intrinsic::ID getMinMaxIntrinsic(SelectPatternFlavor SPF);
+LLVM_ABI Intrinsic::ID getMinMaxIntrinsic(SelectPatternFlavor SPF);
 
 /// Return the inverse minimum/maximum flavor of the specified flavor.
 /// For example, signed minimum is the inverse of signed maximum.
-SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
+LLVM_ABI SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
 
-Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID);
+LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID);
 
 /// Return the minimum or maximum constant value for the specified integer
 /// min/max flavor and type.
-APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth);
+LLVM_ABI APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth);
 
 /// Check if the values in \p VL are select instructions that can be converted
 /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a
 /// conversion is possible, together with a bool indicating whether all select
 /// conditions are only used by the selects. Otherwise return
 /// Intrinsic::not_intrinsic.
-std::pair<Intrinsic::ID, bool>
+LLVM_ABI std::pair<Intrinsic::ID, bool>
 canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
 
 /// Attempt to match a simple first order recurrence cycle of the form:
@@ -958,11 +959,11 @@ canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
 ///
 /// NOTE: This is intentional simple.  If you want the ability to analyze
 /// non-trivial loop conditons, see ScalarEvolution instead.
-bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start,
+LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start,
                            Value *&Step);
 
 /// Analogous to the above, but starting from the binary operator
-bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, Value *&Start,
+LLVM_ABI bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, Value *&Start,
                            Value *&Step);
 
 /// Return true if RHS is known to be implied true by LHS.  Return false if
@@ -975,11 +976,11 @@ bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, Value *&Start,
 ///  T | T | F
 ///  F | T | T
 /// (A)
-std::optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
+LLVM_ABI std::optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
                                        const DataLayout &DL,
                                        bool LHSIsTrue = true,
                                        unsigned Depth = 0);
-std::optional<bool> isImpliedCondition(const Value *LHS, CmpPredicate RHSPred,
+LLVM_ABI std::optional<bool> isImpliedCondition(const Value *LHS, CmpPredicate RHSPred,
                                        const Value *RHSOp0, const Value *RHSOp1,
                                        const DataLayout &DL,
                                        bool LHSIsTrue = true,
@@ -987,10 +988,10 @@ std::optional<bool> isImpliedCondition(const Value *LHS, CmpPredicate RHSPred,
 
 /// Return the boolean condition value in the context of the given instruction
 /// if it is known based on dominating conditions.
-std::optional<bool> isImpliedByDomCondition(const Value *Cond,
+LLVM_ABI std::optional<bool> isImpliedByDomCondition(const Value *Cond,
                                             const Instruction *ContextI,
                                             const DataLayout &DL);
-std::optional<bool> isImpliedByDomCondition(CmpPredicate Pred, const Value *LHS,
+LLVM_ABI std::optional<bool> isImpliedByDomCondition(CmpPredicate Pred, const Value *LHS,
                                             const Value *RHS,
                                             const Instruction *ContextI,
                                             const DataLayout &DL);
@@ -998,7 +999,7 @@ std::optional<bool> isImpliedByDomCondition(CmpPredicate Pred, const Value *LHS,
 /// Call \p InsertAffected on all Values whose known bits / value may be
 /// affected by the condition \p Cond. Used by AssumptionCache and
 /// DomConditionCache.
-void findValuesAffectedByCondition(Value *Cond, bool IsAssume,
+LLVM_ABI void findValuesAffectedByCondition(Value *Cond, bool IsAssume,
                                    function_ref<void(Value *)> InsertAffected);
 
 } // end namespace llvm
diff --git a/llvm/include/llvm/Analysis/VectorUtils.h b/llvm/include/llvm/Analysis/VectorUtils.h
index 01137481c92ba..7d10bfc32ccec 100644
--- a/llvm/include/llvm/Analysis/VectorUtils.h
+++ b/llvm/include/llvm/Analysis/VectorUtils.h
@@ -13,6 +13,7 @@
 #ifndef LLVM_ANALYSIS_VECTORUTILS_H
 #define LLVM_ANALYSIS_VECTORUTILS_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/MapVector.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Analysis/LoopAccessAnalysis.h"
@@ -134,7 +135,7 @@ typedef unsigned ID;
 /// isVectorIntrinsicWithScalarOpAtArg) for the vector form of the intrinsic.
 ///
 /// Note: isTriviallyVectorizable implies isTriviallyScalarizable.
-bool isTriviallyVectorizable(Intrinsic::ID ID);
+LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID);
 
 /// Identify if the intrinsic is trivially scalarizable.
 /// This method returns true following the same predicates of
@@ -144,48 +145,48 @@ bool isTriviallyVectorizable(Intrinsic::ID ID);
 /// intrinsic is redundant, but we want to implement scalarization of the
 /// vector. To prevent the requirement that an intrinsic also implements
 /// vectorization we provide this seperate function.
-bool isTriviallyScalarizable(Intrinsic::ID ID, const TargetTransformInfo *TTI);
+LLVM_ABI bool isTriviallyScalarizable(Intrinsic::ID ID, const TargetTransformInfo *TTI);
 
 /// Identifies if the vector form of the intrinsic has a scalar operand.
 /// \p TTI is used to consider target specific intrinsics, if no target specific
 /// intrinsics will be considered then it is appropriate to pass in nullptr.
-bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx,
+LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx,
                                         const TargetTransformInfo *TTI);
 
 /// Identifies if the vector form of the intrinsic is overloaded on the type of
 /// the operand at index \p OpdIdx, or on the return type if \p OpdIdx is -1.
 /// \p TTI is used to consider target specific intrinsics, if no target specific
 /// intrinsics will be considered then it is appropriate to pass in nullptr.
-bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx,
+LLVM_ABI bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx,
                                             const TargetTransformInfo *TTI);
 
 /// Identifies if the vector form of the intrinsic that returns a struct is
 /// overloaded at the struct element index \p RetIdx. /// \p TTI is used to
 /// consider target specific intrinsics, if no target specific intrinsics
 /// will be considered then it is appropriate to pass in nullptr.
-bool isVectorIntrinsicWithStructReturnOverloadAtField(
+LLVM_ABI bool isVectorIntrinsicWithStructReturnOverloadAtField(
     Intrinsic::ID ID, int RetIdx, const TargetTransformInfo *TTI);
 
 /// Returns intrinsic ID for call.
 /// For the input call instruction it finds mapping intrinsic and returns
 /// its intrinsic ID, in case it does not found it return not_intrinsic.
-Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI,
+LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI,
                                           const TargetLibraryInfo *TLI);
 
 /// Given a vector and an element number, see if the scalar value is
 /// already around as a register, for example if it were inserted then extracted
 /// from the vector.
-Value *findScalarElement(Value *V, unsigned EltNo);
+LLVM_ABI Value *findScalarElement(Value *V, unsigned EltNo);
 
 /// If all non-negative \p Mask elements are the same value, return that value.
 /// If all elements are negative (undefined) or \p Mask contains different
 /// non-negative values, return -1.
-int getSplatIndex(ArrayRef<int> Mask);
+LLVM_ABI int getSplatIndex(ArrayRef<int> Mask);
 
 /// Get splat value if the input is a splat vector or return nullptr.
 /// The value may be extracted from a splat constants vector or from
 /// a sequence of instructions that broadcast a single value into a vector.
-Value *getSplatValue(const Value *V);
+LLVM_ABI Value *getSplatValue(const Value *V);
 
 /// Return true if each element of the vector value \p V is poisoned or equal to
 /// every other non-poisoned element. If an index element is specified, either
@@ -193,13 +194,13 @@ Value *getSplatValue(const Value *V);
 /// poisoned and equal to every other non-poisoned element.
 /// This may be more powerful than the related getSplatValue() because it is
 /// not limited by finding a scalar source value to a splatted vector.
-bool isSplatValue(const Value *V, int Index = -1, unsigned Depth = 0);
+LLVM_ABI bool isSplatValue(const Value *V, int Index = -1, unsigned Depth = 0);
 
 /// Transform a shuffle mask's output demanded element mask into demanded
 /// element masks for the 2 operands, returns false if the mask isn't valid.
 /// Both \p DemandedLHS and \p DemandedRHS are initialised to [SrcWidth].
 /// \p AllowUndefElts permits "-1" indices to be treated as undef.
-bool getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask,
+LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask,
                             const APInt &DemandedElts, APInt &DemandedLHS,
                             APInt &DemandedRHS, bool AllowUndefElts = false);
 
@@ -209,7 +210,7 @@ bool getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask,
 /// the vector, with all other elements being undefined.  An identity shuffle
 /// will be matched a slide by 0.  The output parameter provides the source
 /// (-1 means no source), and slide direction for each slide.
-bool isMaskedSlidePair(ArrayRef<int> Mask, int NumElts,
+LLVM_ABI bool isMaskedSlidePair(ArrayRef<int> Mask, int NumElts,
                        std::array<std::pair<int, int>, 2> &SrcInfo);
 
 /// Replace each shuffle mask index with the scaled sequential indices for an
@@ -223,7 +224,7 @@ bool isMaskedSlidePair(ArrayRef<int> Mask, int NumElts,
 /// This is the reverse process of widening shuffle mask elements, but it always
 /// succeeds because the indexes can always be multiplied (scaled up) to map to
 /// narrower vector elements.
-void narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
+LLVM_ABI void narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
                            SmallVectorImpl<int> &ScaledMask);
 
 /// Try to transform a shuffle mask by replacing elements with the scaled index
@@ -241,26 +242,26 @@ void narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
 /// This is the reverse process of narrowing shuffle mask elements if it
 /// succeeds. This transform is not always possible because indexes may not
 /// divide evenly (scale down) to map to wider vector elements.
-bool widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
+LLVM_ABI bool widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
                           SmallVectorImpl<int> &ScaledMask);
 
 /// A variant of the previous method which is specialized for Scale=2, and
 /// treats -1 as undef and allows widening when a wider element is partially
 /// undef in the narrow form of the mask.  This transformation discards
 /// information about which bytes in the original shuffle were undef.
-bool widenShuffleMaskElts(ArrayRef<int> M, SmallVectorImpl<int> &NewMask);
+LLVM_ABI bool widenShuffleMaskElts(ArrayRef<int> M, SmallVectorImpl<int> &NewMask);
 
 /// Attempt to narrow/widen the \p Mask shuffle mask to the \p NumDstElts target
 /// width. Internally this will call narrowShuffleMaskElts/widenShuffleMaskElts.
 /// This will assert unless NumDstElts is a multiple of Mask.size (or
 /// vice-versa). Returns false on failure, and ScaledMask will be in an
 /// undefined state.
-bool scaleShuffleMaskElts(unsigned NumDstElts, ArrayRef<int> Mask,
+LLVM_ABI bool scaleShuffleMaskElts(unsigned NumDstElts, ArrayRef<int> Mask,
                           SmallVectorImpl<int> &ScaledMask);
 
 /// Repetitively apply `widenShuffleMaskElts()` for as long as it succeeds,
 /// to get the shuffle mask with widest possible elements.
-void getShuffleMaskWithWidestElts(ArrayRef<int> Mask,
+LLVM_ABI void getShuffleMaskWithWidestElts(ArrayRef<int> Mask,
                                   SmallVectorImpl<int> &ScaledMask);
 
 /// Splits and processes shuffle mask depending on the number of input and
@@ -275,7 +276,7 @@ void getShuffleMaskWithWidestElts(ArrayRef<int> Mask,
 /// \param NumOfSrcRegs Number of source registers.
 /// \param NumOfDestRegs Number of destination registers.
 /// \param NumOfUsedRegs Number of actually used destination registers.
-void processShuffleMasks(
+LLVM_ABI void processShuffleMasks(
     ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
     unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
     function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction,
@@ -294,7 +295,7 @@ void processShuffleMasks(
 /// \param DemandedElts   the demanded elements mask for the operation
 /// \param DemandedLHS    the demanded elements mask for the left operand
 /// \param DemandedRHS    the demanded elements mask for the right operand
-void getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth,
+LLVM_ABI void getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth,
                                          const APInt &DemandedElts,
                                          APInt &DemandedLHS,
                                          APInt &DemandedRHS);
@@ -333,7 +334,7 @@ void getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth,
 ///
 /// If the optional TargetTransformInfo is provided, this function tries harder
 /// to do less work by only looking at illegal types.
-MapVector<Instruction*, uint64_t>
+LLVM_ABI MapVector<Instruction*, uint64_t>
 computeMinimumValueSizes(ArrayRef<BasicBlock*> Blocks,
                          DemandedBits &DB,
                          const TargetTransformInfo *TTI=nullptr);
@@ -342,7 +343,7 @@ computeMinimumValueSizes(ArrayRef<BasicBlock*> Blocks,
 ///
 /// If the list contains just one access group, it is returned directly. If the
 /// list is empty, returns nullptr.
-MDNode *uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2);
+LLVM_ABI MDNode *uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2);
 
 /// Compute the access-group list of access groups that @p Inst1 and @p Inst2
 /// are both in. If either instruction does not access memory at all, it is
@@ -350,14 +351,14 @@ MDNode *uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2);
 ///
 /// If the list contains just one access group, it is returned directly. If the
 /// list is empty, returns nullptr.
-MDNode *intersectAccessGroups(const Instruction *Inst1,
+LLVM_ABI MDNode *intersectAccessGroups(const Instruction *Inst1,
                               const Instruction *Inst2);
 
 /// Add metadata from \p Inst to \p Metadata, if it can be preserved after
 /// vectorization. It can be preserved after vectorization if the kind is one of
 /// [MD_tbaa, MD_alias_scope, MD_noalias, MD_fpmath, MD_nontemporal,
 /// MD_access_group, MD_mmra].
-void getMetadataToPropagate(
+LLVM_ABI void getMetadataToPropagate(
     Instruction *Inst,
     SmallVectorImpl<std::pair<unsigned, MDNode *>> &Metadata);
 
@@ -369,7 +370,7 @@ void getMetadataToPropagate(
 /// metadata for M equal to the intersection value.
 ///
 /// This function always sets a (possibly null) value for each K in Kinds.
-Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL);
+LLVM_ABI Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL);
 
 /// Create a mask that filters the members of an interleave group where there
 /// are gaps.
@@ -382,7 +383,7 @@ Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL);
 /// Note: The result is a mask of 0's and 1's, as opposed to the other
 /// create[*]Mask() utilities which create a shuffle mask (mask that
 /// consists of indices).
-Constant *createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
+LLVM_ABI Constant *createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
                                const InterleaveGroup<Instruction> &Group);
 
 /// Create a mask with replicated elements.
@@ -397,7 +398,7 @@ Constant *createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
 ///
 ///   <0,0,0,1,1,1,2,2,2,3,3,3>
-llvm::SmallVector<int, 16> createReplicatedMask(unsigned ReplicationFactor,
+LLVM_ABI llvm::SmallVector<int, 16> createReplicatedMask(unsigned ReplicationFactor,
                                                 unsigned VF);
 
 /// Create an interleave shuffle mask.
@@ -411,7 +412,7 @@ llvm::SmallVector<int, 16> createReplicatedMask(unsigned ReplicationFactor,
 /// For example, the mask for VF = 4 and NumVecs = 2 is:
 ///
 ///   <0, 4, 1, 5, 2, 6, 3, 7>.
-llvm::SmallVector<int, 16> createInterleaveMask(unsigned VF, unsigned NumVecs);
+LLVM_ABI llvm::SmallVector<int, 16> createInterleaveMask(unsigned VF, unsigned NumVecs);
 
 /// Create a stride shuffle mask.
 ///
@@ -425,7 +426,7 @@ llvm::SmallVector<int, 16> createInterleaveMask(unsigned VF, unsigned NumVecs);
 /// For example, the mask for Start = 0, Stride = 2, and VF = 4 is:
 ///
 ///   <0, 2, 4, 6>
-llvm::SmallVector<int, 16> createStrideMask(unsigned Start, unsigned Stride,
+LLVM_ABI llvm::SmallVector<int, 16> createStrideMask(unsigned Start, unsigned Stride,
                                             unsigned VF);
 
 /// Create a sequential shuffle mask.
@@ -439,13 +440,13 @@ llvm::SmallVector<int, 16> createStrideMask(unsigned Start, unsigned Stride,
 /// For example, the mask for Start = 0, NumInsts = 4, and NumUndefs = 4 is:
 ///
 ///   <0, 1, 2, 3, undef, undef, undef, undef>
-llvm::SmallVector<int, 16>
+LLVM_ABI llvm::SmallVector<int, 16>
 createSequentialMask(unsigned Start, unsigned NumInts, unsigned NumUndefs);
 
 /// Given a shuffle mask for a binary shuffle, create the equivalent shuffle
 /// mask assuming both operands are identical. This assumes that the unary
 /// shuffle will use elements from operand 0 (operand 1 will be unused).
-llvm::SmallVector<int, 16> createUnaryMask(ArrayRef<int> Mask,
+LLVM_ABI llvm::SmallVector<int, 16> createUnaryMask(ArrayRef<int> Mask,
                                            unsigned NumElts);
 
 /// Concatenate a list of vectors.
@@ -455,26 +456,26 @@ llvm::SmallVector<int, 16> createUnaryMask(ArrayRef<int> Mask,
 /// their element types should be the same. The number of elements in the
 /// vectors should also be the same; however, if the last vector has fewer
 /// elements, it will be padded with undefs.
-Value *concatenateVectors(IRBuilderBase &Builder, ArrayRef<Value *> Vecs);
+LLVM_ABI Value *concatenateVectors(IRBuilderBase &Builder, ArrayRef<Value *> Vecs);
 
 /// Given a mask vector of i1, Return true if all of the elements of this
 /// predicate mask are known to be false or undef.  That is, return true if all
 /// lanes can be assumed inactive.
-bool maskIsAllZeroOrUndef(Value *Mask);
+LLVM_ABI bool maskIsAllZeroOrUndef(Value *Mask);
 
 /// Given a mask vector of i1, Return true if all of the elements of this
 /// predicate mask are known to be true or undef.  That is, return true if all
 /// lanes can be assumed active.
-bool maskIsAllOneOrUndef(Value *Mask);
+LLVM_ABI bool maskIsAllOneOrUndef(Value *Mask);
 
 /// Given a mask vector of i1, Return true if any of the elements of this
 /// predicate mask are known to be true or undef.  That is, return true if at
 /// least one lane can be assumed active.
-bool maskContainsAllOneOrUndef(Value *Mask);
+LLVM_ABI bool maskContainsAllOneOrUndef(Value *Mask);
 
 /// Given a mask vector of the form <Y x i1>, return an APInt (of bitwidth Y)
 /// for each lane which may be active.
-APInt possiblyDemandedEltsInMask(Value *Mask);
+LLVM_ABI APInt possiblyDemandedEltsInMask(Value *Mask);
 
 /// The group of interleaved loads/stores sharing the same stride and
 /// close to each other.
@@ -657,7 +658,7 @@ class InterleavedAccessInfo {
   /// groups. Substitute symbolic strides using \p Strides.
   /// Consider also predicated loads/stores in the analysis if
   /// \p EnableMaskedInterleavedGroup is true.
-  void analyzeInterleaving(bool EnableMaskedInterleavedGroup);
+  LLVM_ABI void analyzeInterleaving(bool EnableMaskedInterleavedGroup);
 
   /// Invalidate groups, e.g., in case all blocks in loop will be predicated
   /// contrary to original assumption. Although we currently prevent group
@@ -705,7 +706,7 @@ class InterleavedAccessInfo {
   /// Invalidate groups that require a scalar epilogue (due to gaps). This can
   /// happen when optimizing for size forbids a scalar epilogue, and the gap
   /// cannot be filtered by masking the load/store.
-  void invalidateGroupsRequiringScalarEpilogue();
+  LLVM_ABI void invalidateGroupsRequiringScalarEpilogue();
 
   /// Returns true if we have any interleave groups.
   bool hasGroups() const { return !InterleaveGroups.empty(); }
@@ -794,7 +795,7 @@ class InterleavedAccessInfo {
       const DenseMap<Value *, const SCEV *> &Strides);
 
   /// Returns true if \p Stride is allowed in an interleaved group.
-  static bool isStrided(int Stride);
+  LLVM_ABI static bool isStrided(int Stride);
 
   /// Returns true if \p BB is a predicated block.
   bool isPredicated(BasicBlock *BB) const {
diff --git a/llvm/include/llvm/Analysis/WithCache.h b/llvm/include/llvm/Analysis/WithCache.h
index 7bd680bf097af..fccd1a718ab3b 100644
--- a/llvm/include/llvm/Analysis/WithCache.h
+++ b/llvm/include/llvm/Analysis/WithCache.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_ANALYSIS_WITHCACHE_H
 #define LLVM_ANALYSIS_WITHCACHE_H
 
+#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/PointerIntPair.h"
 #include "llvm/IR/Value.h"
 #include "llvm/Support/KnownBits.h"
@@ -21,7 +22,7 @@
 
 namespace llvm {
 struct SimplifyQuery;
-KnownBits computeKnownBits(const Value *V, unsigned Depth,
+LLVM_ABI KnownBits computeKnownBits(const Value *V, unsigned Depth,
                            const SimplifyQuery &Q);
 
 template <typename Arg> class WithCache {

>From 3344d1ccc70199634af8fb91ca023ab4038451a0 Mon Sep 17 00:00:00 2001
From: Andrew Rogers <andrurogerz at gmail.com>
Date: Wed, 14 May 2025 11:21:17 -0700
Subject: [PATCH 2/3] [llvm] manual fix-ups to IDS codemod of Analysis library

---
 llvm/include/llvm/Analysis/AliasAnalysis.h          |  2 +-
 llvm/include/llvm/Analysis/AssumptionCache.h        |  2 +-
 llvm/include/llvm/Analysis/BlockFrequencyInfo.h     |  2 +-
 llvm/include/llvm/Analysis/BranchProbabilityInfo.h  |  2 +-
 llvm/include/llvm/Analysis/CGSCCPassManager.h       | 10 +++++-----
 llvm/include/llvm/Analysis/DependenceGraphBuilder.h |  3 ++-
 llvm/include/llvm/Analysis/DomTreeUpdater.h         | 13 +++++++------
 llvm/include/llvm/Analysis/GlobalsModRef.h          |  2 +-
 .../include/llvm/Analysis/LastRunTrackingAnalysis.h |  2 +-
 llvm/include/llvm/Analysis/LazyCallGraph.h          |  2 +-
 llvm/include/llvm/Analysis/LoopAnalysisManager.h    |  6 +++---
 llvm/include/llvm/Analysis/LoopInfo.h               |  6 +++---
 llvm/include/llvm/Analysis/LoopUnrollAnalyzer.h     | 13 +++++++------
 llvm/include/llvm/Analysis/MemorySSA.h              |  2 +-
 llvm/include/llvm/Analysis/PostDominators.h         |  2 +-
 llvm/include/llvm/Analysis/ScalarEvolution.h        | 10 +++++-----
 llvm/include/llvm/Analysis/TargetLibraryInfo.h      |  2 +-
 llvm/include/llvm/Analysis/TargetTransformInfo.h    |  2 +-
 llvm/include/llvm/Analysis/TensorSpec.h             |  2 +-
 llvm/include/llvm/Analysis/Utils/Local.h            |  2 ++
 llvm/include/llvm/Analysis/ValueTracking.h          |  2 +-
 llvm/lib/Analysis/CGSCCPassManager.cpp              | 11 ++++++-----
 llvm/lib/Analysis/DomTreeUpdater.cpp                |  9 +++++----
 llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp    |  3 ++-
 llvm/lib/Analysis/LoopAnalysisManager.cpp           |  9 +++++----
 llvm/lib/Analysis/LoopInfo.cpp                      |  5 +++--
 llvm/lib/Analysis/MemoryProfileInfo.cpp             | 11 ++++++-----
 llvm/lib/Analysis/ModuleSummaryAnalysis.cpp         |  3 ++-
 llvm/lib/Analysis/ProfileSummaryInfo.cpp            |  3 ++-
 llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp |  3 ++-
 .../Analysis/FunctionPropertiesAnalysisTest.cpp     |  7 ++++---
 .../Analysis/IRSimilarityIdentifierTest.cpp         |  5 +++--
 llvm/unittests/Analysis/MemoryProfileInfoTest.cpp   | 10 +++++-----
 llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp  |  3 ++-
 34 files changed, 94 insertions(+), 77 deletions(-)

diff --git a/llvm/include/llvm/Analysis/AliasAnalysis.h b/llvm/include/llvm/Analysis/AliasAnalysis.h
index 4f1fdd69af8c4..768c55d0f8ea3 100644
--- a/llvm/include/llvm/Analysis/AliasAnalysis.h
+++ b/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -956,7 +956,7 @@ class AAManager : public AnalysisInfoMixin<AAManager> {
 private:
   friend AnalysisInfoMixin<AAManager>;
 
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
   SmallVector<void (*)(Function &F, FunctionAnalysisManager &AM,
                        AAResults &AAResults),
diff --git a/llvm/include/llvm/Analysis/AssumptionCache.h b/llvm/include/llvm/Analysis/AssumptionCache.h
index e20711542647f..8d4d716c63da1 100644
--- a/llvm/include/llvm/Analysis/AssumptionCache.h
+++ b/llvm/include/llvm/Analysis/AssumptionCache.h
@@ -174,7 +174,7 @@ class AssumptionCache {
 class AssumptionAnalysis : public AnalysisInfoMixin<AssumptionAnalysis> {
   friend AnalysisInfoMixin<AssumptionAnalysis>;
 
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
 public:
   using Result = AssumptionCache;
diff --git a/llvm/include/llvm/Analysis/BlockFrequencyInfo.h b/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
index 1efeb7aa69849..5012a94ce1c22 100644
--- a/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
+++ b/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
@@ -115,7 +115,7 @@ class BlockFrequencyAnalysis
     : public AnalysisInfoMixin<BlockFrequencyAnalysis> {
   friend AnalysisInfoMixin<BlockFrequencyAnalysis>;
 
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
 public:
   /// Provide the result type for this analysis pass.
diff --git a/llvm/include/llvm/Analysis/BranchProbabilityInfo.h b/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
index d2a113ac184bd..3951388606563 100644
--- a/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
+++ b/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
@@ -426,7 +426,7 @@ class BranchProbabilityAnalysis
     : public AnalysisInfoMixin<BranchProbabilityAnalysis> {
   friend AnalysisInfoMixin<BranchProbabilityAnalysis>;
 
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
 public:
   /// Provide the result type for this analysis pass.
diff --git a/llvm/include/llvm/Analysis/CGSCCPassManager.h b/llvm/include/llvm/Analysis/CGSCCPassManager.h
index a403d2016016e..cd42840eaaff3 100644
--- a/llvm/include/llvm/Analysis/CGSCCPassManager.h
+++ b/llvm/include/llvm/Analysis/CGSCCPassManager.h
@@ -109,9 +109,9 @@ class Module;
 #define DEBUG_TYPE "cgscc"
 
 /// Extern template declaration for the analysis set for this IR unit.
-extern template class AllAnalysesOn<LazyCallGraph::SCC>;
+extern template class LLVM_TEMPLATE_ABI AllAnalysesOn<LazyCallGraph::SCC>;
 
-extern template class AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
+extern template class LLVM_TEMPLATE_ABI AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
 
 /// The CGSCC analysis manager.
 ///
@@ -206,7 +206,7 @@ CGSCCAnalysisManagerModuleProxy::run(Module &M, ModuleAnalysisManager &AM);
 // template.
 extern template class InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;
 
-extern template class OuterAnalysisManagerProxy<
+extern template class LLVM_TEMPLATE_ABI OuterAnalysisManagerProxy<
     ModuleAnalysisManager, LazyCallGraph::SCC, LazyCallGraph &>;
 
 /// A proxy from a \c ModuleAnalysisManager to an \c SCC.
@@ -402,10 +402,10 @@ class FunctionAnalysisManagerCGSCCProxy
 private:
   friend AnalysisInfoMixin<FunctionAnalysisManagerCGSCCProxy>;
 
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 };
 
-extern template class OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
+extern template class LLVM_TEMPLATE_ABI OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
 
 /// A proxy from a \c CGSCCAnalysisManager to a \c Function.
 using CGSCCAnalysisManagerFunctionProxy =
diff --git a/llvm/include/llvm/Analysis/DependenceGraphBuilder.h b/llvm/include/llvm/Analysis/DependenceGraphBuilder.h
index 98f1764e14b0e..8dd7224b0487f 100644
--- a/llvm/include/llvm/Analysis/DependenceGraphBuilder.h
+++ b/llvm/include/llvm/Analysis/DependenceGraphBuilder.h
@@ -17,6 +17,7 @@
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/EquivalenceClasses.h"
 #include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -28,7 +29,7 @@ class Instruction;
 /// DDG-like graphs. The client code is expected to inherit from this class and
 /// define concrete implementation for each of the pure virtual functions used
 /// in the high-level algorithm.
-template <class GraphType> class AbstractDependenceGraphBuilder {
+template <class GraphType> class LLVM_ABI AbstractDependenceGraphBuilder {
 protected:
   using BasicBlockListType = SmallVectorImpl<BasicBlock *>;
 
diff --git a/llvm/include/llvm/Analysis/DomTreeUpdater.h b/llvm/include/llvm/Analysis/DomTreeUpdater.h
index f13db43a67977..206f66a8ef564 100644
--- a/llvm/include/llvm/Analysis/DomTreeUpdater.h
+++ b/llvm/include/llvm/Analysis/DomTreeUpdater.h
@@ -23,8 +23,12 @@
 
 namespace llvm {
 
+class DomTreeUpdater;
 class PostDominatorTree;
 
+extern template class LLVM_TEMPLATE_ABI GenericDomTreeUpdater<DomTreeUpdater, DominatorTree,
+                                            PostDominatorTree>;
+
 class DomTreeUpdater
     : public GenericDomTreeUpdater<DomTreeUpdater, DominatorTree,
                                    PostDominatorTree> {
@@ -114,17 +118,14 @@ class DomTreeUpdater
   bool forceFlushDeletedBB();
 };
 
-extern template class GenericDomTreeUpdater<DomTreeUpdater, DominatorTree,
-                                            PostDominatorTree>;
-
-extern template void
+extern template LLVM_TEMPLATE_ABI void
 GenericDomTreeUpdater<DomTreeUpdater, DominatorTree,
                       PostDominatorTree>::recalculate(Function &F);
 
-extern template void
+extern template LLVM_TEMPLATE_ABI void
 GenericDomTreeUpdater<DomTreeUpdater, DominatorTree, PostDominatorTree>::
     applyUpdatesImpl</*IsForward=*/true>();
-extern template void
+extern template LLVM_TEMPLATE_ABI void
 GenericDomTreeUpdater<DomTreeUpdater, DominatorTree, PostDominatorTree>::
     applyUpdatesImpl</*IsForward=*/false>();
 } // namespace llvm
diff --git a/llvm/include/llvm/Analysis/GlobalsModRef.h b/llvm/include/llvm/Analysis/GlobalsModRef.h
index 519da4cfe0272..b3d7defd8d05c 100644
--- a/llvm/include/llvm/Analysis/GlobalsModRef.h
+++ b/llvm/include/llvm/Analysis/GlobalsModRef.h
@@ -128,7 +128,7 @@ class GlobalsAAResult : public AAResultBase {
 /// Analysis pass providing a never-invalidated alias analysis result.
 class GlobalsAA : public AnalysisInfoMixin<GlobalsAA> {
   friend AnalysisInfoMixin<GlobalsAA>;
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
 public:
   typedef GlobalsAAResult Result;
diff --git a/llvm/include/llvm/Analysis/LastRunTrackingAnalysis.h b/llvm/include/llvm/Analysis/LastRunTrackingAnalysis.h
index bcda85108f107..bb4b260382317 100644
--- a/llvm/include/llvm/Analysis/LastRunTrackingAnalysis.h
+++ b/llvm/include/llvm/Analysis/LastRunTrackingAnalysis.h
@@ -91,7 +91,7 @@ class LastRunTrackingInfo {
 class LastRunTrackingAnalysis final
     : public AnalysisInfoMixin<LastRunTrackingAnalysis> {
   friend AnalysisInfoMixin<LastRunTrackingAnalysis>;
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
 public:
   using Result = LastRunTrackingInfo;
diff --git a/llvm/include/llvm/Analysis/LazyCallGraph.h b/llvm/include/llvm/Analysis/LazyCallGraph.h
index 5bd853af0a182..fa29d42064f91 100644
--- a/llvm/include/llvm/Analysis/LazyCallGraph.h
+++ b/llvm/include/llvm/Analysis/LazyCallGraph.h
@@ -1260,7 +1260,7 @@ template <> struct GraphTraits<LazyCallGraph *> {
 class LazyCallGraphAnalysis : public AnalysisInfoMixin<LazyCallGraphAnalysis> {
   friend AnalysisInfoMixin<LazyCallGraphAnalysis>;
 
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
 public:
   /// Inform generic clients of the result type.
diff --git a/llvm/include/llvm/Analysis/LoopAnalysisManager.h b/llvm/include/llvm/Analysis/LoopAnalysisManager.h
index e6cdb9e0727e4..3d2a5a9d5a7c4 100644
--- a/llvm/include/llvm/Analysis/LoopAnalysisManager.h
+++ b/llvm/include/llvm/Analysis/LoopAnalysisManager.h
@@ -65,9 +65,9 @@ struct LoopStandardAnalysisResults {
 };
 
 /// Extern template declaration for the analysis set for this IR unit.
-extern template class AllAnalysesOn<Loop>;
+extern template class LLVM_TEMPLATE_ABI AllAnalysesOn<Loop>;
 
-extern template class AnalysisManager<Loop, LoopStandardAnalysisResults &>;
+extern template class LLVM_TEMPLATE_ABI AnalysisManager<Loop, LoopStandardAnalysisResults &>;
 /// The loop analysis manager.
 ///
 /// See the documentation for the AnalysisManager template for detail
@@ -151,7 +151,7 @@ LoopAnalysisManagerFunctionProxy::run(Function &F, FunctionAnalysisManager &AM);
 // template.
 extern template class InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
 
-extern template class OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
+extern template class LLVM_TEMPLATE_ABI OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
                                                 LoopStandardAnalysisResults &>;
 /// A proxy from a \c FunctionAnalysisManager to a \c Loop.
 typedef OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
diff --git a/llvm/include/llvm/Analysis/LoopInfo.h b/llvm/include/llvm/Analysis/LoopInfo.h
index 4ca07c8a2746e..4df4d9595c273 100644
--- a/llvm/include/llvm/Analysis/LoopInfo.h
+++ b/llvm/include/llvm/Analysis/LoopInfo.h
@@ -33,7 +33,7 @@ class ScalarEvolution;
 class raw_ostream;
 
 // Implementation in Support/GenericLoopInfoImpl.h
-extern template class LoopBase<BasicBlock, Loop>;
+extern template class LLVM_TEMPLATE_ABI LoopBase<BasicBlock, Loop>;
 
 /// Represents a single loop in the control flow graph.  Note that not all SCCs
 /// in the CFG are necessarily loops.
@@ -403,7 +403,7 @@ class LLVM_ABI Loop : public LoopBase<BasicBlock, Loop> {
 };
 
 // Implementation in Support/GenericLoopInfoImpl.h
-extern template class LoopInfoBase<BasicBlock, Loop>;
+extern template class LLVM_TEMPLATE_ABI LoopInfoBase<BasicBlock, Loop>;
 
 class LoopInfo : public LoopInfoBase<BasicBlock, Loop> {
   typedef LoopInfoBase<BasicBlock, Loop> BaseT;
@@ -566,7 +566,7 @@ template <> struct GraphTraits<Loop *> {
 /// Analysis pass that exposes the \c LoopInfo for a function.
 class LoopAnalysis : public AnalysisInfoMixin<LoopAnalysis> {
   friend AnalysisInfoMixin<LoopAnalysis>;
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
 public:
   typedef LoopInfo Result;
diff --git a/llvm/include/llvm/Analysis/LoopUnrollAnalyzer.h b/llvm/include/llvm/Analysis/LoopUnrollAnalyzer.h
index 12b906ec9dd58..86f3db236ac2c 100644
--- a/llvm/include/llvm/Analysis/LoopUnrollAnalyzer.h
+++ b/llvm/include/llvm/Analysis/LoopUnrollAnalyzer.h
@@ -19,6 +19,7 @@
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/Analysis/ScalarEvolution.h"
 #include "llvm/IR/InstVisitor.h"
+#include "llvm/Support/Compiler.h"
 
 // This class is used to get an estimate of the optimization effects that we
 // could get from complete loop unrolling. It comes from the fact that some
@@ -83,12 +84,12 @@ class UnrolledInstAnalyzer : private InstVisitor<UnrolledInstAnalyzer, bool> {
 
   bool simplifyInstWithSCEV(Instruction *I);
 
-  bool visitInstruction(Instruction &I);
-  bool visitBinaryOperator(BinaryOperator &I);
-  bool visitLoad(LoadInst &I);
-  bool visitCastInst(CastInst &I);
-  bool visitCmpInst(CmpInst &I);
-  bool visitPHINode(PHINode &PN);
+  LLVM_ABI bool visitInstruction(Instruction &I);
+  LLVM_ABI bool visitBinaryOperator(BinaryOperator &I);
+  LLVM_ABI bool visitLoad(LoadInst &I);
+  LLVM_ABI bool visitCastInst(CastInst &I);
+  LLVM_ABI bool visitCmpInst(CmpInst &I);
+  LLVM_ABI bool visitPHINode(PHINode &PN);
 };
 }
 #endif
diff --git a/llvm/include/llvm/Analysis/MemorySSA.h b/llvm/include/llvm/Analysis/MemorySSA.h
index c73e119351e99..889e4254266ff 100644
--- a/llvm/include/llvm/Analysis/MemorySSA.h
+++ b/llvm/include/llvm/Analysis/MemorySSA.h
@@ -929,7 +929,7 @@ class MemorySSAUtil {
 class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
   friend AnalysisInfoMixin<MemorySSAAnalysis>;
 
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
 public:
   // Wrap MemorySSA result to ensure address stability of internal MemorySSA
diff --git a/llvm/include/llvm/Analysis/PostDominators.h b/llvm/include/llvm/Analysis/PostDominators.h
index e354a63ccb62f..b7439b93abc62 100644
--- a/llvm/include/llvm/Analysis/PostDominators.h
+++ b/llvm/include/llvm/Analysis/PostDominators.h
@@ -49,7 +49,7 @@ class PostDominatorTreeAnalysis
     : public AnalysisInfoMixin<PostDominatorTreeAnalysis> {
   friend AnalysisInfoMixin<PostDominatorTreeAnalysis>;
 
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
 public:
   /// Provide the result type for this analysis pass.
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index 54806dac125e8..b207ef0d840ae 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -1980,7 +1980,7 @@ class ScalarEvolution {
   /// whenever the given FoundCondValue value evaluates to true in given
   /// Context. If Context is nullptr, then the found predicate is true
   /// everywhere. LHS and FoundLHS may have different type width.
-  bool isImpliedCond(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS,
+  LLVM_ABI bool isImpliedCond(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS,
                      const Value *FoundCondValue, bool Inverse,
                      const Instruction *Context = nullptr);
 
@@ -1988,7 +1988,7 @@ class ScalarEvolution {
   /// whenever the given FoundCondValue value evaluates to true in given
   /// Context. If Context is nullptr, then the found predicate is true
   /// everywhere. LHS and FoundLHS must have same type width.
-  bool isImpliedCondBalancedTypes(CmpPredicate Pred, const SCEV *LHS,
+  LLVM_ABI bool isImpliedCondBalancedTypes(CmpPredicate Pred, const SCEV *LHS,
                                   const SCEV *RHS, CmpPredicate FoundPred,
                                   const SCEV *FoundLHS, const SCEV *FoundRHS,
                                   const Instruction *CtxI);
@@ -1997,7 +1997,7 @@ class ScalarEvolution {
   /// whenever the condition described by FoundPred, FoundLHS, FoundRHS is
   /// true in given Context. If Context is nullptr, then the found predicate is
   /// true everywhere.
-  bool isImpliedCond(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS,
+  LLVM_ABI bool isImpliedCond(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS,
                      CmpPredicate FoundPred, const SCEV *FoundLHS,
                      const SCEV *FoundRHS,
                      const Instruction *Context = nullptr);
@@ -2278,7 +2278,7 @@ class ScalarEvolution {
 
   /// Try to match the pattern generated by getURemExpr(A, B). If successful,
   /// Assign A and B to LHS and RHS, respectively.
-  bool matchURem(const SCEV *Expr, const SCEV *&LHS, const SCEV *&RHS);
+  LLVM_ABI bool matchURem(const SCEV *Expr, const SCEV *&LHS, const SCEV *&RHS);
 
   /// Look for a SCEV expression with type `SCEVType` and operands `Ops` in
   /// `UniqueSCEVs`.  Return if found, else nullptr.
@@ -2326,7 +2326,7 @@ class ScalarEvolutionAnalysis
     : public AnalysisInfoMixin<ScalarEvolutionAnalysis> {
   friend AnalysisInfoMixin<ScalarEvolutionAnalysis>;
 
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
 public:
   using Result = ScalarEvolution;
diff --git a/llvm/include/llvm/Analysis/TargetLibraryInfo.h b/llvm/include/llvm/Analysis/TargetLibraryInfo.h
index 5a26910a48674..064b96de85851 100644
--- a/llvm/include/llvm/Analysis/TargetLibraryInfo.h
+++ b/llvm/include/llvm/Analysis/TargetLibraryInfo.h
@@ -635,7 +635,7 @@ class TargetLibraryAnalysis : public AnalysisInfoMixin<TargetLibraryAnalysis> {
 
 private:
   friend AnalysisInfoMixin<TargetLibraryAnalysis>;
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
   std::optional<TargetLibraryInfoImpl> BaselineInfoImpl;
 };
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index f142935c158f4..6d1852b88f465 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -1971,7 +1971,7 @@ class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
 
 private:
   friend AnalysisInfoMixin<TargetIRAnalysis>;
-  static AnalysisKey Key;
+  LLVM_ABI static AnalysisKey Key;
 
   /// The callback used to produce a result.
   ///
diff --git a/llvm/include/llvm/Analysis/TensorSpec.h b/llvm/include/llvm/Analysis/TensorSpec.h
index f4e95843cebf1..d39b196363e67 100644
--- a/llvm/include/llvm/Analysis/TensorSpec.h
+++ b/llvm/include/llvm/Analysis/TensorSpec.h
@@ -126,7 +126,7 @@ LLVM_ABI std::optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
                                                 const json::Value &Value);
 
 #define TFUTILS_GETDATATYPE_DEF(T, Name)                                       \
-  template <> TensorType TensorSpec::getDataType<T>();
+  template <> LLVM_ABI TensorType TensorSpec::getDataType<T>();
 SUPPORTED_TENSOR_TYPES(TFUTILS_GETDATATYPE_DEF)
 
 #undef TFUTILS_GETDATATYPE_DEF
diff --git a/llvm/include/llvm/Analysis/Utils/Local.h b/llvm/include/llvm/Analysis/Utils/Local.h
index b07057af62196..4cd805fcbe5fb 100644
--- a/llvm/include/llvm/Analysis/Utils/Local.h
+++ b/llvm/include/llvm/Analysis/Utils/Local.h
@@ -14,6 +14,8 @@
 #ifndef LLVM_ANALYSIS_UTILS_LOCAL_H
 #define LLVM_ANALYSIS_UTILS_LOCAL_H
 
+#include "llvm/Support/Compiler.h"
+
 namespace llvm {
 
 class DataLayout;
diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h
index d7f0a58c9defb..aa1e012af5ef5 100644
--- a/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/llvm/include/llvm/Analysis/ValueTracking.h
@@ -77,7 +77,7 @@ LLVM_ABI KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
 LLVM_ABI KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
                            unsigned Depth, const SimplifyQuery &Q);
 
-KnownBits computeKnownBits(const Value *V, unsigned Depth,
+LLVM_ABI KnownBits computeKnownBits(const Value *V, unsigned Depth,
                            const SimplifyQuery &Q);
 
 LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
diff --git a/llvm/lib/Analysis/CGSCCPassManager.cpp b/llvm/lib/Analysis/CGSCCPassManager.cpp
index 30b6c1613d3ad..d70a60615d714 100644
--- a/llvm/lib/Analysis/CGSCCPassManager.cpp
+++ b/llvm/lib/Analysis/CGSCCPassManager.cpp
@@ -24,6 +24,7 @@
 #include "llvm/IR/ValueHandle.h"
 #include "llvm/Support/Casting.h"
 #include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/raw_ostream.h"
@@ -47,14 +48,14 @@ static cl::opt<bool> AbortOnMaxDevirtIterationsReached(
 AnalysisKey ShouldNotRunFunctionPassesAnalysis::Key;
 
 // Explicit instantiations for the core proxy templates.
-template class AllAnalysesOn<LazyCallGraph::SCC>;
-template class AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
+template class LLVM_EXPORT_TEMPLATE AllAnalysesOn<LazyCallGraph::SCC>;
+template class LLVM_EXPORT_TEMPLATE AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
 template class PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager,
                            LazyCallGraph &, CGSCCUpdateResult &>;
-template class InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;
-template class OuterAnalysisManagerProxy<ModuleAnalysisManager,
+template class LLVM_EXPORT_TEMPLATE InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;
+template class LLVM_EXPORT_TEMPLATE OuterAnalysisManagerProxy<ModuleAnalysisManager,
                                          LazyCallGraph::SCC, LazyCallGraph &>;
-template class OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
+template class LLVM_EXPORT_TEMPLATE OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
 
 /// Explicitly specialize the pass manager run method to handle call graph
 /// updates.
diff --git a/llvm/lib/Analysis/DomTreeUpdater.cpp b/llvm/lib/Analysis/DomTreeUpdater.cpp
index 588944428616c..7bf83afb3dad1 100644
--- a/llvm/lib/Analysis/DomTreeUpdater.cpp
+++ b/llvm/lib/Analysis/DomTreeUpdater.cpp
@@ -16,22 +16,23 @@
 #include "llvm/Analysis/PostDominators.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/Instructions.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/GenericDomTree.h"
 #include <functional>
 
 namespace llvm {
 
-template class GenericDomTreeUpdater<DomTreeUpdater, DominatorTree,
+template class LLVM_EXPORT_TEMPLATE GenericDomTreeUpdater<DomTreeUpdater, DominatorTree,
                                      PostDominatorTree>;
 
-template void
+template LLVM_EXPORT_TEMPLATE void
 GenericDomTreeUpdater<DomTreeUpdater, DominatorTree,
                       PostDominatorTree>::recalculate(Function &F);
 
-template void
+template LLVM_EXPORT_TEMPLATE void
 GenericDomTreeUpdater<DomTreeUpdater, DominatorTree, PostDominatorTree>::
     applyUpdatesImpl</*IsForward=*/true>();
-template void
+template LLVM_EXPORT_TEMPLATE void
 GenericDomTreeUpdater<DomTreeUpdater, DominatorTree, PostDominatorTree>::
     applyUpdatesImpl</*IsForward=*/false>();
 
diff --git a/llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp b/llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp
index f5960073e93b6..9d044c8a35910 100644
--- a/llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp
+++ b/llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp
@@ -21,12 +21,13 @@
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
 #include <deque>
 
 using namespace llvm;
 
 namespace llvm {
-cl::opt<bool> EnableDetailedFunctionProperties(
+LLVM_ABI cl::opt<bool> EnableDetailedFunctionProperties(
     "enable-detailed-function-properties", cl::Hidden, cl::init(false),
     cl::desc("Whether or not to compute detailed function properties."));
 
diff --git a/llvm/lib/Analysis/LoopAnalysisManager.cpp b/llvm/lib/Analysis/LoopAnalysisManager.cpp
index 74c318ee5b975..06980b91c39ef 100644
--- a/llvm/lib/Analysis/LoopAnalysisManager.cpp
+++ b/llvm/lib/Analysis/LoopAnalysisManager.cpp
@@ -13,6 +13,7 @@
 #include "llvm/Analysis/ScalarEvolution.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/PassManagerImpl.h"
+#include "llvm/Support/Compiler.h"
 #include <optional>
 
 using namespace llvm;
@@ -20,10 +21,10 @@ using namespace llvm;
 namespace llvm {
 // Explicit template instantiations and specialization definitions for core
 // template typedefs.
-template class AllAnalysesOn<Loop>;
-template class AnalysisManager<Loop, LoopStandardAnalysisResults &>;
-template class InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
-template class OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
+template class LLVM_EXPORT_TEMPLATE AllAnalysesOn<Loop>;
+template class LLVM_EXPORT_TEMPLATE AnalysisManager<Loop, LoopStandardAnalysisResults &>;
+template class LLVM_EXPORT_TEMPLATE InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
+template class LLVM_EXPORT_TEMPLATE OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
                                          LoopStandardAnalysisResults &>;
 
 bool LoopAnalysisManagerFunctionProxy::Result::invalidate(
diff --git a/llvm/lib/Analysis/LoopInfo.cpp b/llvm/lib/Analysis/LoopInfo.cpp
index 3ef9fb282afdb..901cfe03ecd33 100644
--- a/llvm/lib/Analysis/LoopInfo.cpp
+++ b/llvm/lib/Analysis/LoopInfo.cpp
@@ -36,13 +36,14 @@
 #include "llvm/IR/PrintPasses.h"
 #include "llvm/InitializePasses.h"
 #include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/GenericLoopInfoImpl.h"
 #include "llvm/Support/raw_ostream.h"
 using namespace llvm;
 
 // Explicitly instantiate methods in LoopInfoImpl.h for IR-level Loops.
-template class llvm::LoopBase<BasicBlock, Loop>;
-template class llvm::LoopInfoBase<BasicBlock, Loop>;
+template class LLVM_EXPORT_TEMPLATE llvm::LoopBase<BasicBlock, Loop>;
+template class LLVM_EXPORT_TEMPLATE llvm::LoopInfoBase<BasicBlock, Loop>;
 
 // Always verify loopinfo if expensive checking is enabled.
 #ifdef EXPENSIVE_CHECKS
diff --git a/llvm/lib/Analysis/MemoryProfileInfo.cpp b/llvm/lib/Analysis/MemoryProfileInfo.cpp
index f9145854784f2..773d0b2f53e09 100644
--- a/llvm/lib/Analysis/MemoryProfileInfo.cpp
+++ b/llvm/lib/Analysis/MemoryProfileInfo.cpp
@@ -13,6 +13,7 @@
 #include "llvm/Analysis/MemoryProfileInfo.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/Format.h"
 
 using namespace llvm;
@@ -22,7 +23,7 @@ using namespace llvm::memprof;
 
 // Upper bound on lifetime access density (accesses per byte per lifetime sec)
 // for marking an allocation cold.
-cl::opt<float> MemProfLifetimeAccessDensityColdThreshold(
+LLVM_ABI cl::opt<float> MemProfLifetimeAccessDensityColdThreshold(
     "memprof-lifetime-access-density-cold-threshold", cl::init(0.05),
     cl::Hidden,
     cl::desc("The threshold the lifetime access density (accesses per byte per "
@@ -30,20 +31,20 @@ cl::opt<float> MemProfLifetimeAccessDensityColdThreshold(
 
 // Lower bound on lifetime to mark an allocation cold (in addition to accesses
 // per byte per sec above). This is to avoid pessimizing short lived objects.
-cl::opt<unsigned> MemProfAveLifetimeColdThreshold(
+LLVM_ABI cl::opt<unsigned> MemProfAveLifetimeColdThreshold(
     "memprof-ave-lifetime-cold-threshold", cl::init(200), cl::Hidden,
     cl::desc("The average lifetime (s) for an allocation to be considered "
              "cold"));
 
 // Lower bound on average lifetime accesses density (total life time access
 // density / alloc count) for marking an allocation hot.
-cl::opt<unsigned> MemProfMinAveLifetimeAccessDensityHotThreshold(
+LLVM_ABI cl::opt<unsigned> MemProfMinAveLifetimeAccessDensityHotThreshold(
     "memprof-min-ave-lifetime-access-density-hot-threshold", cl::init(1000),
     cl::Hidden,
     cl::desc("The minimum TotalLifetimeAccessDensity / AllocCount for an "
              "allocation to be considered hot"));
 
-cl::opt<bool>
+LLVM_ABI cl::opt<bool>
     MemProfUseHotHints("memprof-use-hot-hints", cl::init(false), cl::Hidden,
                        cl::desc("Enable use of hot hints (only supported for "
                                 "unambigously hot allocations)"));
@@ -55,7 +56,7 @@ cl::opt<bool> MemProfReportHintedSizes(
 // This is useful if we have enabled reporting of hinted sizes, and want to get
 // information from the indexing step for all contexts (especially for testing),
 // or have specified a value less than 100% for -memprof-cloning-cold-threshold.
-cl::opt<bool> MemProfKeepAllNotColdContexts(
+LLVM_ABI cl::opt<bool> MemProfKeepAllNotColdContexts(
     "memprof-keep-all-not-cold-contexts", cl::init(false), cl::Hidden,
     cl::desc("Keep all non-cold contexts (increases cloning overheads)"));
 
diff --git a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
index d7e12dc80d7c2..59fa1a4b03c37 100644
--- a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
+++ b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
@@ -51,6 +51,7 @@
 #include "llvm/Pass.h"
 #include "llvm/Support/Casting.h"
 #include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/FileSystem.h"
 #include <cassert>
 #include <cstdint>
@@ -85,7 +86,7 @@ static cl::opt<bool> EnableMemProfIndirectCallSupport(
     cl::desc(
         "Enable MemProf support for summarizing and cloning indirect calls"));
 
-extern cl::opt<bool> ScalePartialSampleProfileWorkingSetSize;
+LLVM_ABI extern cl::opt<bool> ScalePartialSampleProfileWorkingSetSize;
 
 extern cl::opt<unsigned> MaxNumVTableAnnotations;
 
diff --git a/llvm/lib/Analysis/ProfileSummaryInfo.cpp b/llvm/lib/Analysis/ProfileSummaryInfo.cpp
index e098c7a448ab0..e8d4e37a4eb7e 100644
--- a/llvm/lib/Analysis/ProfileSummaryInfo.cpp
+++ b/llvm/lib/Analysis/ProfileSummaryInfo.cpp
@@ -20,6 +20,7 @@
 #include "llvm/InitializePasses.h"
 #include "llvm/ProfileData/ProfileCommon.h"
 #include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
 #include <optional>
 using namespace llvm;
 
@@ -27,7 +28,7 @@ static cl::opt<bool> PartialProfile(
     "partial-profile", cl::Hidden, cl::init(false),
     cl::desc("Specify the current profile is used as a partial profile."));
 
-cl::opt<bool> ScalePartialSampleProfileWorkingSetSize(
+LLVM_ABI cl::opt<bool> ScalePartialSampleProfileWorkingSetSize(
     "scale-partial-sample-profile-working-set-size", cl::Hidden, cl::init(true),
     cl::desc(
         "If true, scale the working set size of the partial sample profile "
diff --git a/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp b/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp
index 43c8b36b16073..5fd2ecc4f29b6 100644
--- a/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp
+++ b/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp
@@ -13,6 +13,7 @@
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Module.h"
 #include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/Regex.h"
 #include "llvm/Support/SourceMgr.h"
 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
@@ -22,7 +23,7 @@
 using namespace llvm;
 
 namespace llvm {
-extern cl::opt<bool> ShouldPreserveAllAttributes;
+LLVM_ABI extern cl::opt<bool> ShouldPreserveAllAttributes;
 } // namespace llvm
 
 static void RunTest(
diff --git a/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp b/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp
index 574ad7c8430e3..0720d935b0362 100644
--- a/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp
+++ b/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp
@@ -17,6 +17,7 @@
 #include "llvm/IR/PassManager.h"
 #include "llvm/Passes/PassBuilder.h"
 #include "llvm/Passes/StandardInstrumentations.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/SourceMgr.h"
 #include "llvm/Transforms/Utils/Cloning.h"
 #include "gtest/gtest.h"
@@ -25,9 +26,9 @@
 using namespace llvm;
 
 namespace llvm {
-extern cl::opt<bool> EnableDetailedFunctionProperties;
-extern cl::opt<bool> BigBasicBlockInstructionThreshold;
-extern cl::opt<bool> MediumBasicBlockInstrutionThreshold;
+LLVM_ABI extern cl::opt<bool> EnableDetailedFunctionProperties;
+LLVM_ABI extern cl::opt<bool> BigBasicBlockInstructionThreshold;
+LLVM_ABI extern cl::opt<bool> MediumBasicBlockInstrutionThreshold;
 } // namespace llvm
 
 namespace {
diff --git a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
index b7ef23f4ea102..9edb21f71d0af 100644
--- a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
+++ b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
@@ -17,14 +17,15 @@
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Module.h"
 #include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/SourceMgr.h"
 #include "gtest/gtest.h"
 
 using namespace llvm;
 using namespace IRSimilarity;
 
-extern llvm::cl::opt<bool> UseNewDbgInfoFormat;
-extern cl::opt<bool> UseNewDbgInfoFormat;
+LLVM_ABI extern llvm::cl::opt<bool> UseNewDbgInfoFormat;
+LLVM_ABI extern cl::opt<bool> UseNewDbgInfoFormat;
 
 static std::unique_ptr<Module> makeLLVMModule(LLVMContext &Context,
                                               StringRef ModuleStr) {
diff --git a/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp b/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp
index 170dca0ebcc53..4c5f57ca91b08 100644
--- a/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp
+++ b/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp
@@ -23,11 +23,11 @@
 using namespace llvm;
 using namespace llvm::memprof;
 
-extern cl::opt<float> MemProfLifetimeAccessDensityColdThreshold;
-extern cl::opt<unsigned> MemProfAveLifetimeColdThreshold;
-extern cl::opt<unsigned> MemProfMinAveLifetimeAccessDensityHotThreshold;
-extern cl::opt<bool> MemProfUseHotHints;
-extern cl::opt<bool> MemProfKeepAllNotColdContexts;
+LLVM_ABI extern cl::opt<float> MemProfLifetimeAccessDensityColdThreshold;
+LLVM_ABI extern cl::opt<unsigned> MemProfAveLifetimeColdThreshold;
+LLVM_ABI extern cl::opt<unsigned> MemProfMinAveLifetimeAccessDensityHotThreshold;
+LLVM_ABI extern cl::opt<bool> MemProfUseHotHints;
+LLVM_ABI extern cl::opt<bool> MemProfKeepAllNotColdContexts;
 
 namespace {
 
diff --git a/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp b/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp
index 519389d8e0b19..45dc50ec0839b 100644
--- a/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp
+++ b/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp
@@ -18,13 +18,14 @@
 #include "llvm/IR/MDBuilder.h"
 #include "llvm/IR/Module.h"
 #include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/DataTypes.h"
 #include "llvm/Support/FormatVariadic.h"
 #include "llvm/Support/SourceMgr.h"
 #include "llvm/Support/raw_ostream.h"
 #include "gtest/gtest.h"
 
-extern llvm::cl::opt<bool> ScalePartialSampleProfileWorkingSetSize;
+LLVM_ABI extern llvm::cl::opt<bool> ScalePartialSampleProfileWorkingSetSize;
 
 namespace llvm {
 namespace {

>From 7cadbcaa204d1026e19942561050849938aa3101 Mon Sep 17 00:00:00 2001
From: Andrew Rogers <andrurogerz at gmail.com>
Date: Wed, 14 May 2025 11:22:17 -0700
Subject: [PATCH 3/3] [llvm] clang-format changes to Analysis library

---
 llvm/include/llvm/Analysis/AliasAnalysis.h    | 101 +++--
 .../llvm/Analysis/AliasAnalysisEvaluator.h    |   2 +-
 llvm/include/llvm/Analysis/AliasSetTracker.h  |  17 +-
 .../llvm/Analysis/AssumeBundleQueries.h       |  29 +-
 llvm/include/llvm/Analysis/AssumptionCache.h  |   2 +-
 .../llvm/Analysis/BasicAliasAnalysis.h        |  26 +-
 .../llvm/Analysis/BlockFrequencyInfo.h        |  25 +-
 .../llvm/Analysis/BranchProbabilityInfo.h     |  34 +-
 llvm/include/llvm/Analysis/CFG.h              |  14 +-
 llvm/include/llvm/Analysis/CFGPrinter.h       |   7 +-
 llvm/include/llvm/Analysis/CGSCCPassManager.h |  25 +-
 llvm/include/llvm/Analysis/CallGraph.h        |   6 +-
 llvm/include/llvm/Analysis/CallGraphSCCPass.h |   2 +-
 llvm/include/llvm/Analysis/CallPrinter.h      |   2 +-
 llvm/include/llvm/Analysis/CaptureTracking.h  |  25 +-
 llvm/include/llvm/Analysis/CodeMetrics.h      |  19 +-
 llvm/include/llvm/Analysis/ConstantFolding.h  | 100 +++--
 llvm/include/llvm/Analysis/ConstraintSystem.h |   2 +-
 llvm/include/llvm/Analysis/CtxProfAnalysis.h  |  14 +-
 llvm/include/llvm/Analysis/DDG.h              |  11 +-
 llvm/include/llvm/Analysis/DXILResource.h     |  20 +-
 llvm/include/llvm/Analysis/DemandedBits.h     |  14 +-
 .../llvm/Analysis/DependenceAnalysis.h        |  19 +-
 llvm/include/llvm/Analysis/DomPrinter.h       |   2 +-
 llvm/include/llvm/Analysis/DomTreeUpdater.h   |   6 +-
 .../llvm/Analysis/EphemeralValuesCache.h      |   2 +-
 .../Analysis/FunctionPropertiesAnalysis.h     |  10 +-
 llvm/include/llvm/Analysis/GlobalsModRef.h    |  14 +-
 llvm/include/llvm/Analysis/HeatUtils.h        |   4 +-
 .../llvm/Analysis/IRSimilarityIdentifier.h    |  63 +--
 llvm/include/llvm/Analysis/IVDescriptors.h    |  41 +-
 llvm/include/llvm/Analysis/InlineAdvisor.h    |  34 +-
 llvm/include/llvm/Analysis/InlineCost.h       |   9 +-
 .../llvm/Analysis/InlineModelFeatureMaps.h    |   2 +-
 llvm/include/llvm/Analysis/InlineOrder.h      |   2 +-
 .../llvm/Analysis/InstSimplifyFolder.h        |   2 +-
 .../Analysis/InstructionPrecedenceTracking.h  |   8 +-
 .../llvm/Analysis/InstructionSimplify.h       | 104 ++---
 .../llvm/Analysis/InteractiveModelRunner.h    |   2 +-
 .../llvm/Analysis/LastRunTrackingAnalysis.h   |   5 +-
 llvm/include/llvm/Analysis/LazyCallGraph.h    |  25 +-
 llvm/include/llvm/Analysis/Loads.h            |  80 ++--
 .../llvm/Analysis/LoopAccessAnalysis.h        |  57 +--
 .../llvm/Analysis/LoopAnalysisManager.h       |  11 +-
 llvm/include/llvm/Analysis/LoopInfo.h         |  26 +-
 llvm/include/llvm/Analysis/LoopNestAnalysis.h |   8 +-
 llvm/include/llvm/Analysis/LoopPass.h         |   2 +-
 llvm/include/llvm/Analysis/MemoryBuiltins.h   |  47 ++-
 llvm/include/llvm/Analysis/MemoryLocation.h   |  14 +-
 .../include/llvm/Analysis/MemoryProfileInfo.h |  29 +-
 llvm/include/llvm/Analysis/MemorySSA.h        |  37 +-
 llvm/include/llvm/Analysis/MemorySSAUpdater.h |  50 +--
 .../llvm/Analysis/ModuleSummaryAnalysis.h     |   2 +-
 llvm/include/llvm/Analysis/MustExecute.h      |  14 +-
 .../llvm/Analysis/NoInferenceModelRunner.h    |   4 +-
 .../llvm/Analysis/OptimizationRemarkEmitter.h |   4 +-
 llvm/include/llvm/Analysis/PHITransAddr.h     |  11 +-
 llvm/include/llvm/Analysis/PhiValues.h        |   4 +-
 llvm/include/llvm/Analysis/PostDominators.h   |   6 +-
 .../llvm/Analysis/ProfileSummaryInfo.h        |  17 +-
 llvm/include/llvm/Analysis/RegionPass.h       |   2 +-
 llvm/include/llvm/Analysis/ScalarEvolution.h  | 238 ++++++-----
 .../Analysis/ScalarEvolutionAliasAnalysis.h   |  10 +-
 .../Analysis/ScalarEvolutionExpressions.h     |  18 +-
 .../Analysis/ScalarEvolutionNormalization.h   |  19 +-
 llvm/include/llvm/Analysis/ScopedNoAliasAA.h  |  23 +-
 llvm/include/llvm/Analysis/SimplifyQuery.h    |   2 +-
 .../llvm/Analysis/StaticDataProfileInfo.h     |  11 +-
 llvm/include/llvm/Analysis/TargetFolder.h     |   3 +-
 .../include/llvm/Analysis/TargetLibraryInfo.h |  17 +-
 .../llvm/Analysis/TargetTransformInfo.h       | 349 +++++++--------
 llvm/include/llvm/Analysis/TensorSpec.h       |  11 +-
 .../llvm/Analysis/TypeBasedAliasAnalysis.h    |  23 +-
 .../ImportedFunctionsInliningStatistics.h     |   2 +-
 llvm/include/llvm/Analysis/Utils/Local.h      |   4 +-
 .../llvm/Analysis/Utils/TrainingLogger.h      |   8 +-
 llvm/include/llvm/Analysis/ValueLattice.h     |  12 +-
 llvm/include/llvm/Analysis/ValueTracking.h    | 399 +++++++++---------
 llvm/include/llvm/Analysis/VectorUtils.h      |  73 ++--
 llvm/include/llvm/Analysis/WithCache.h        |   4 +-
 llvm/lib/Analysis/CGSCCPassManager.cpp        |  13 +-
 llvm/lib/Analysis/DomTreeUpdater.cpp          |   4 +-
 llvm/lib/Analysis/LoopAnalysisManager.cpp     |  10 +-
 .../Analysis/MemoryProfileInfoTest.cpp        |   3 +-
 84 files changed, 1383 insertions(+), 1149 deletions(-)

diff --git a/llvm/include/llvm/Analysis/AliasAnalysis.h b/llvm/include/llvm/Analysis/AliasAnalysis.h
index 768c55d0f8ea3..d8d88639b85a1 100644
--- a/llvm/include/llvm/Analysis/AliasAnalysis.h
+++ b/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -37,13 +37,13 @@
 #ifndef LLVM_ANALYSIS_ALIASANALYSIS_H
 #define LLVM_ANALYSIS_ALIASANALYSIS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Analysis/MemoryLocation.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/ModRef.h"
 #include <cstdint>
 #include <functional>
@@ -340,7 +340,7 @@ class AAResults {
   /// The aggregation is invalidated if any of the underlying analyses is
   /// invalidated.
   LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
-                  FunctionAnalysisManager::Invalidator &Inv);
+                           FunctionAnalysisManager::Invalidator &Inv);
 
   //===--------------------------------------------------------------------===//
   /// \name Alias Queries
@@ -350,7 +350,8 @@ class AAResults {
   /// Returns an AliasResult indicating whether the two pointers are aliased to
   /// each other. This is the interface that must be implemented by specific
   /// alias analysis implementations.
-  LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+  LLVM_ABI AliasResult alias(const MemoryLocation &LocA,
+                             const MemoryLocation &LocB);
 
   /// A convenience wrapper around the primary \c alias interface.
   AliasResult alias(const Value *V1, LocationSize V1Size, const Value *V2,
@@ -419,7 +420,7 @@ class AAResults {
   /// If IgnoreLocals is true, then this method returns NoModRef for memory
   /// that points to a local alloca.
   LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc,
-                               bool IgnoreLocals = false);
+                                        bool IgnoreLocals = false);
 
   /// A convenience wrapper around the primary \c getModRefInfoMask
   /// interface.
@@ -524,7 +525,8 @@ class AAResults {
 
   /// Return information about whether two instructions may refer to the same
   /// memory locations.
-  LLVM_ABI ModRefInfo getModRefInfo(const Instruction *I1, const Instruction *I2);
+  LLVM_ABI ModRefInfo getModRefInfo(const Instruction *I1,
+                                    const Instruction *I2);
 
   /// Return information about whether a particular call site modifies
   /// or reads the specified memory location \p MemLoc before instruction \p I
@@ -549,7 +551,8 @@ class AAResults {
 
   /// Check if it is possible for execution of the specified basic block to
   /// modify the location Loc.
-  LLVM_ABI bool canBasicBlockModify(const BasicBlock &BB, const MemoryLocation &Loc);
+  LLVM_ABI bool canBasicBlockModify(const BasicBlock &BB,
+                                    const MemoryLocation &Loc);
 
   /// A convenience wrapper synthesizing a memory location.
   bool canBasicBlockModify(const BasicBlock &BB, const Value *P,
@@ -562,9 +565,10 @@ class AAResults {
   ///
   /// The instructions to consider are all of the instructions in the range of
   /// [I1,I2] INCLUSIVE. I1 and I2 must be in the same basic block.
-  LLVM_ABI bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
-                                 const MemoryLocation &Loc,
-                                 const ModRefInfo Mode);
+  LLVM_ABI bool canInstructionRangeModRef(const Instruction &I1,
+                                          const Instruction &I2,
+                                          const MemoryLocation &Loc,
+                                          const ModRefInfo Mode);
 
   /// A convenience wrapper synthesizing a memory location.
   bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
@@ -575,42 +579,54 @@ class AAResults {
 
   // CtxI can be nullptr, in which case the query is whether or not the aliasing
   // relationship holds through the entire function.
-  LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
-                    AAQueryInfo &AAQI, const Instruction *CtxI = nullptr);
+  LLVM_ABI AliasResult alias(const MemoryLocation &LocA,
+                             const MemoryLocation &LocB, AAQueryInfo &AAQI,
+                             const Instruction *CtxI = nullptr);
 
-  LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
-                               bool IgnoreLocals = false);
+  LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc,
+                                        AAQueryInfo &AAQI,
+                                        bool IgnoreLocals = false);
   LLVM_ABI ModRefInfo getModRefInfo(const Instruction *I, const CallBase *Call2,
-                           AAQueryInfo &AAQIP);
-  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
-                           AAQueryInfo &AAQI);
-  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
-                           AAQueryInfo &AAQI);
-  LLVM_ABI ModRefInfo getModRefInfo(const VAArgInst *V, const MemoryLocation &Loc,
-                           AAQueryInfo &AAQI);
-  LLVM_ABI ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc,
-                           AAQueryInfo &AAQI);
-  LLVM_ABI ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc,
-                           AAQueryInfo &AAQI);
-  LLVM_ABI ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc,
-                           AAQueryInfo &AAQI);
+                                    AAQueryInfo &AAQIP);
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call,
+                                    const MemoryLocation &Loc,
+                                    AAQueryInfo &AAQI);
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call1,
+                                    const CallBase *Call2, AAQueryInfo &AAQI);
+  LLVM_ABI ModRefInfo getModRefInfo(const VAArgInst *V,
+                                    const MemoryLocation &Loc,
+                                    AAQueryInfo &AAQI);
+  LLVM_ABI ModRefInfo getModRefInfo(const LoadInst *L,
+                                    const MemoryLocation &Loc,
+                                    AAQueryInfo &AAQI);
+  LLVM_ABI ModRefInfo getModRefInfo(const StoreInst *S,
+                                    const MemoryLocation &Loc,
+                                    AAQueryInfo &AAQI);
+  LLVM_ABI ModRefInfo getModRefInfo(const FenceInst *S,
+                                    const MemoryLocation &Loc,
+                                    AAQueryInfo &AAQI);
   LLVM_ABI ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX,
-                           const MemoryLocation &Loc, AAQueryInfo &AAQI);
-  LLVM_ABI ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc,
-                           AAQueryInfo &AAQI);
-  LLVM_ABI ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc,
-                           AAQueryInfo &AAQI);
-  LLVM_ABI ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc,
-                           AAQueryInfo &AAQI);
+                                    const MemoryLocation &Loc,
+                                    AAQueryInfo &AAQI);
+  LLVM_ABI ModRefInfo getModRefInfo(const AtomicRMWInst *RMW,
+                                    const MemoryLocation &Loc,
+                                    AAQueryInfo &AAQI);
+  LLVM_ABI ModRefInfo getModRefInfo(const CatchPadInst *I,
+                                    const MemoryLocation &Loc,
+                                    AAQueryInfo &AAQI);
+  LLVM_ABI ModRefInfo getModRefInfo(const CatchReturnInst *I,
+                                    const MemoryLocation &Loc,
+                                    AAQueryInfo &AAQI);
   LLVM_ABI ModRefInfo getModRefInfo(const Instruction *I,
-                           const std::optional<MemoryLocation> &OptLoc,
-                           AAQueryInfo &AAQIP);
-  LLVM_ABI ModRefInfo getModRefInfo(const Instruction *I1, const Instruction *I2,
-                           AAQueryInfo &AAQI);
+                                    const std::optional<MemoryLocation> &OptLoc,
+                                    AAQueryInfo &AAQIP);
+  LLVM_ABI ModRefInfo getModRefInfo(const Instruction *I1,
+                                    const Instruction *I2, AAQueryInfo &AAQI);
   LLVM_ABI ModRefInfo callCapturesBefore(const Instruction *I,
-                                const MemoryLocation &MemLoc, DominatorTree *DT,
-                                AAQueryInfo &AAQIP);
-  LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI);
+                                         const MemoryLocation &MemLoc,
+                                         DominatorTree *DT, AAQueryInfo &AAQIP);
+  LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call,
+                                          AAQueryInfo &AAQI);
 
 private:
   class Concept;
@@ -905,7 +921,7 @@ LLVM_ABI bool isEscapeSource(const Value *V);
 /// to true, then the memory is only not visible if the object has not been
 /// captured prior to the unwind. Otherwise it is not visible even if captured.
 LLVM_ABI bool isNotVisibleOnUnwind(const Value *Object,
-                          bool &RequiresNoCaptureBeforeUnwind);
+                                   bool &RequiresNoCaptureBeforeUnwind);
 
 /// Return true if the Object is writable, in the sense that any location based
 /// on this pointer that can be loaded can also be stored to without trapping.
@@ -918,7 +934,8 @@ LLVM_ABI bool isNotVisibleOnUnwind(const Value *Object,
 /// using the dereferenceable(N) attribute. It does not necessarily hold for
 /// parts that are only known to be dereferenceable due to the presence of
 /// loads.
-LLVM_ABI bool isWritableObject(const Value *Object, bool &ExplicitlyDereferenceableOnly);
+LLVM_ABI bool isWritableObject(const Value *Object,
+                               bool &ExplicitlyDereferenceableOnly);
 
 /// A manager for alias analyses.
 ///
diff --git a/llvm/include/llvm/Analysis/AliasAnalysisEvaluator.h b/llvm/include/llvm/Analysis/AliasAnalysisEvaluator.h
index 4303d663faff8..d8b02bd42e7d8 100644
--- a/llvm/include/llvm/Analysis/AliasAnalysisEvaluator.h
+++ b/llvm/include/llvm/Analysis/AliasAnalysisEvaluator.h
@@ -24,8 +24,8 @@
 #ifndef LLVM_ANALYSIS_ALIASANALYSISEVALUATOR_H
 #define LLVM_ANALYSIS_ALIASANALYSISEVALUATOR_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 class AAResults;
diff --git a/llvm/include/llvm/Analysis/AliasSetTracker.h b/llvm/include/llvm/Analysis/AliasSetTracker.h
index 548c059ada887..7d461b230478b 100644
--- a/llvm/include/llvm/Analysis/AliasSetTracker.h
+++ b/llvm/include/llvm/Analysis/AliasSetTracker.h
@@ -18,7 +18,6 @@
 #ifndef LLVM_ANALYSIS_ALIASSETTRACKER_H
 #define LLVM_ANALYSIS_ALIASSETTRACKER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/ilist.h"
@@ -26,6 +25,7 @@
 #include "llvm/Analysis/MemoryLocation.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Compiler.h"
 #include <cassert>
 #include <vector>
 
@@ -114,7 +114,8 @@ class AliasSet : public ilist_node<AliasSet> {
   bool isForwardingAliasSet() const { return Forward; }
 
   /// Merge the specified alias set into this alias set.
-  LLVM_ABI void mergeSetIn(AliasSet &AS, AliasSetTracker &AST, BatchAAResults &BatchAA);
+  LLVM_ABI void mergeSetIn(AliasSet &AS, AliasSetTracker &AST,
+                           BatchAAResults &BatchAA);
 
   // Alias Set iteration - Allow access to all of the memory locations which are
   // part of this alias set.
@@ -148,10 +149,10 @@ class AliasSet : public ilist_node<AliasSet> {
   /// If the specified memory location "may" (or must) alias one of the members
   /// in the set return the appropriate AliasResult. Otherwise return NoAlias.
   LLVM_ABI AliasResult aliasesMemoryLocation(const MemoryLocation &MemLoc,
-                                    BatchAAResults &AA) const;
+                                             BatchAAResults &AA) const;
 
   LLVM_ABI ModRefInfo aliasesUnknownInst(const Instruction *Inst,
-                                BatchAAResults &AA) const;
+                                         BatchAAResults &AA) const;
 };
 
 inline raw_ostream& operator<<(raw_ostream &OS, const AliasSet &AS) {
@@ -190,9 +191,11 @@ class AliasSetTracker {
   LLVM_ABI void add(VAArgInst *VAAI);
   LLVM_ABI void add(AnyMemSetInst *MSI);
   LLVM_ABI void add(AnyMemTransferInst *MTI);
-  LLVM_ABI void add(Instruction *I);       // Dispatch to one of the other add methods...
-  LLVM_ABI void add(BasicBlock &BB);       // Add all instructions in basic block
-  LLVM_ABI void add(const AliasSetTracker &AST); // Add alias relations from another AST
+  LLVM_ABI void
+  add(Instruction *I); // Dispatch to one of the other add methods...
+  LLVM_ABI void add(BasicBlock &BB); // Add all instructions in basic block
+  LLVM_ABI void
+  add(const AliasSetTracker &AST); // Add alias relations from another AST
   LLVM_ABI void addUnknown(Instruction *I);
 
   LLVM_ABI void clear();
diff --git a/llvm/include/llvm/Analysis/AssumeBundleQueries.h b/llvm/include/llvm/Analysis/AssumeBundleQueries.h
index ddffa4ce8dcc7..0d39339c4f7d7 100644
--- a/llvm/include/llvm/Analysis/AssumeBundleQueries.h
+++ b/llvm/include/llvm/Analysis/AssumeBundleQueries.h
@@ -14,9 +14,9 @@
 #ifndef LLVM_ANALYSIS_ASSUMEBUNDLEQUERIES_H
 #define LLVM_ANALYSIS_ASSUMEBUNDLEQUERIES_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/IR/IntrinsicInst.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 class AssumptionCache;
@@ -39,8 +39,9 @@ enum AssumeBundleArg {
 ///
 /// Return true iff the queried attribute was found.
 /// If ArgVal is set. the argument will be stored to ArgVal.
-LLVM_ABI bool hasAttributeInAssume(AssumeInst &Assume, Value *IsOn, StringRef AttrName,
-                          uint64_t *ArgVal = nullptr);
+LLVM_ABI bool hasAttributeInAssume(AssumeInst &Assume, Value *IsOn,
+                                   StringRef AttrName,
+                                   uint64_t *ArgVal = nullptr);
 inline bool hasAttributeInAssume(AssumeInst &Assume, Value *IsOn,
                                  Attribute::AttrKind Kind,
                                  uint64_t *ArgVal = nullptr) {
@@ -87,7 +88,8 @@ using RetainedKnowledgeMap =
 /// many queries are going to be made on the same llvm.assume.
 /// String attributes are not inserted in the map.
 /// If the IR changes the map will be outdated.
-LLVM_ABI void fillMapFromAssume(AssumeInst &Assume, RetainedKnowledgeMap &Result);
+LLVM_ABI void fillMapFromAssume(AssumeInst &Assume,
+                                RetainedKnowledgeMap &Result);
 
 /// Represent one information held inside an operand bundle of an llvm.assume.
 /// AttrKind is the property that holds.
@@ -122,7 +124,7 @@ struct RetainedKnowledge {
 /// Retreive the information help by Assume on the operand at index Idx.
 /// Assume should be an llvm.assume and Idx should be in the operand bundle.
 LLVM_ABI RetainedKnowledge getKnowledgeFromOperandInAssume(AssumeInst &Assume,
-                                                  unsigned Idx);
+                                                           unsigned Idx);
 
 /// Retreive the information help by the Use U of an llvm.assume. the use should
 /// be in the operand bundle.
@@ -146,8 +148,8 @@ LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume);
 
 /// Return a valid Knowledge associated to the Use U if its Attribute kind is
 /// in AttrKinds.
-LLVM_ABI RetainedKnowledge getKnowledgeFromUse(const Use *U,
-                                      ArrayRef<Attribute::AttrKind> AttrKinds);
+LLVM_ABI RetainedKnowledge
+getKnowledgeFromUse(const Use *U, ArrayRef<Attribute::AttrKind> AttrKinds);
 
 /// Return a valid Knowledge associated to the Value V if its Attribute kind is
 /// in AttrKinds and it matches the Filter.
@@ -161,16 +163,15 @@ LLVM_ABI RetainedKnowledge getKnowledgeForValue(
 /// Return a valid Knowledge associated to the Value V if its Attribute kind is
 /// in AttrKinds and the knowledge is suitable to be used in the context of
 /// CtxI.
-LLVM_ABI RetainedKnowledge
-getKnowledgeValidInContext(const Value *V,
-                           ArrayRef<Attribute::AttrKind> AttrKinds,
-                           AssumptionCache &AC, const Instruction *CtxI,
-                           const DominatorTree *DT = nullptr);
+LLVM_ABI RetainedKnowledge getKnowledgeValidInContext(
+    const Value *V, ArrayRef<Attribute::AttrKind> AttrKinds,
+    AssumptionCache &AC, const Instruction *CtxI,
+    const DominatorTree *DT = nullptr);
 
 /// This extracts the Knowledge from an element of an operand bundle.
 /// This is mostly for use in the assume builder.
-LLVM_ABI RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume,
-                                         const CallBase::BundleOpInfo &BOI);
+LLVM_ABI RetainedKnowledge
+getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI);
 
 } // namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/AssumptionCache.h b/llvm/include/llvm/Analysis/AssumptionCache.h
index 8d4d716c63da1..1b026ef76a45e 100644
--- a/llvm/include/llvm/Analysis/AssumptionCache.h
+++ b/llvm/include/llvm/Analysis/AssumptionCache.h
@@ -15,7 +15,6 @@
 #ifndef LLVM_ANALYSIS_ASSUMPTIONCACHE_H
 #define LLVM_ANALYSIS_ASSUMPTIONCACHE_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseMapInfo.h"
@@ -23,6 +22,7 @@
 #include "llvm/IR/PassManager.h"
 #include "llvm/IR/ValueHandle.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include <memory>
 
 namespace llvm {
diff --git a/llvm/include/llvm/Analysis/BasicAliasAnalysis.h b/llvm/include/llvm/Analysis/BasicAliasAnalysis.h
index 10ec9260a32b8..49712e6b5b29f 100644
--- a/llvm/include/llvm/Analysis/BasicAliasAnalysis.h
+++ b/llvm/include/llvm/Analysis/BasicAliasAnalysis.h
@@ -13,11 +13,11 @@
 #ifndef LLVM_ANALYSIS_BASICALIASANALYSIS_H
 #define LLVM_ANALYSIS_BASICALIASANALYSIS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include <memory>
 #include <utility>
 
@@ -67,16 +67,18 @@ class BasicAAResult : public AAResultBase {
 
   /// Handle invalidation events in the new pass manager.
   LLVM_ABI bool invalidate(Function &Fn, const PreservedAnalyses &PA,
-                  FunctionAnalysisManager::Invalidator &Inv);
+                           FunctionAnalysisManager::Invalidator &Inv);
 
-  LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
-                    AAQueryInfo &AAQI, const Instruction *CtxI);
+  LLVM_ABI AliasResult alias(const MemoryLocation &LocA,
+                             const MemoryLocation &LocB, AAQueryInfo &AAQI,
+                             const Instruction *CtxI);
 
-  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
-                           AAQueryInfo &AAQI);
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call,
+                                    const MemoryLocation &Loc,
+                                    AAQueryInfo &AAQI);
 
-  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
-                           AAQueryInfo &AAQI);
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call1,
+                                    const CallBase *Call2, AAQueryInfo &AAQI);
 
   /// Returns a bitmask that should be unconditionally applied to the ModRef
   /// info of a memory location. This allows us to eliminate Mod and/or Ref
@@ -85,14 +87,16 @@ class BasicAAResult : public AAResultBase {
   ///
   /// If IgnoreLocals is true, then this method returns NoModRef for memory
   /// that points to a local alloca.
-  LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
-                               bool IgnoreLocals = false);
+  LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc,
+                                        AAQueryInfo &AAQI,
+                                        bool IgnoreLocals = false);
 
   /// Get the location associated with a pointer argument of a callsite.
   LLVM_ABI ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
 
   /// Returns the behavior when calling the given call site.
-  LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI);
+  LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call,
+                                          AAQueryInfo &AAQI);
 
   /// Returns the behavior when calling the given function. For use when the
   /// call site is not known.
diff --git a/llvm/include/llvm/Analysis/BlockFrequencyInfo.h b/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
index 5012a94ce1c22..860ceb4e6cce6 100644
--- a/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
+++ b/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
@@ -13,10 +13,10 @@
 #ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
 #define LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
 #include "llvm/Support/BlockFrequency.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/Printable.h"
 #include <cstdint>
 #include <memory>
@@ -42,8 +42,9 @@ class BlockFrequencyInfo {
 
 public:
   LLVM_ABI BlockFrequencyInfo();
-  LLVM_ABI BlockFrequencyInfo(const Function &F, const BranchProbabilityInfo &BPI,
-                     const LoopInfo &LI);
+  LLVM_ABI BlockFrequencyInfo(const Function &F,
+                              const BranchProbabilityInfo &BPI,
+                              const LoopInfo &LI);
   BlockFrequencyInfo(const BlockFrequencyInfo &) = delete;
   BlockFrequencyInfo &operator=(const BlockFrequencyInfo &) = delete;
   LLVM_ABI BlockFrequencyInfo(BlockFrequencyInfo &&Arg);
@@ -52,7 +53,7 @@ class BlockFrequencyInfo {
 
   /// Handle invalidation explicitly.
   LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
-                  FunctionAnalysisManager::Invalidator &);
+                           FunctionAnalysisManager::Invalidator &);
 
   LLVM_ABI const Function *getFunction() const;
   LLVM_ABI const BranchProbabilityInfo *getBPI() const;
@@ -74,7 +75,8 @@ class BlockFrequencyInfo {
   /// Returns the estimated profile count of \p Freq.
   /// This uses the frequency \p Freq and multiplies it by
   /// the enclosing function's count (if available) and returns the value.
-  LLVM_ABI std::optional<uint64_t> getProfileCountFromFreq(BlockFrequency Freq) const;
+  LLVM_ABI std::optional<uint64_t>
+  getProfileCountFromFreq(BlockFrequency Freq) const;
 
   /// Returns true if \p BB is an irreducible loop header
   /// block. Otherwise false.
@@ -86,12 +88,13 @@ class BlockFrequencyInfo {
   /// Set the frequency of \p ReferenceBB to \p Freq and scale the frequencies
   /// of the blocks in \p BlocksToScale such that their frequencies relative
   /// to \p ReferenceBB remain unchanged.
-  LLVM_ABI void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq,
-                            SmallPtrSetImpl<BasicBlock *> &BlocksToScale);
+  LLVM_ABI void
+  setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq,
+                       SmallPtrSetImpl<BasicBlock *> &BlocksToScale);
 
   /// calculate - compute block frequency info for the given function.
   LLVM_ABI void calculate(const Function &F, const BranchProbabilityInfo &BPI,
-                 const LoopInfo &LI);
+                          const LoopInfo &LI);
 
   LLVM_ABI BlockFrequency getEntryFreq() const;
   LLVM_ABI void releaseMemory();
@@ -104,11 +107,13 @@ class BlockFrequencyInfo {
 /// Print the block frequency @p Freq relative to the current functions entry
 /// frequency. Returns a Printable object that can be piped via `<<` to a
 /// `raw_ostream`.
-LLVM_ABI Printable printBlockFreq(const BlockFrequencyInfo &BFI, BlockFrequency Freq);
+LLVM_ABI Printable printBlockFreq(const BlockFrequencyInfo &BFI,
+                                  BlockFrequency Freq);
 
 /// Convenience function equivalent to calling
 /// `printBlockFreq(BFI, BFI.getBlocakFreq(&BB))`.
-LLVM_ABI Printable printBlockFreq(const BlockFrequencyInfo &BFI, const BasicBlock &BB);
+LLVM_ABI Printable printBlockFreq(const BlockFrequencyInfo &BFI,
+                                  const BasicBlock &BB);
 
 /// Analysis pass which computes \c BlockFrequencyInfo.
 class BlockFrequencyAnalysis
diff --git a/llvm/include/llvm/Analysis/BranchProbabilityInfo.h b/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
index 3951388606563..83a92c5eb39a3 100644
--- a/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
+++ b/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
@@ -13,7 +13,6 @@
 #ifndef LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
 #define LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseMapInfo.h"
 #include "llvm/ADT/DenseSet.h"
@@ -23,6 +22,7 @@
 #include "llvm/IR/ValueHandle.h"
 #include "llvm/Pass.h"
 #include "llvm/Support/BranchProbability.h"
+#include "llvm/Support/Compiler.h"
 #include <cassert>
 #include <cstdint>
 #include <memory>
@@ -143,7 +143,7 @@ class BranchProbabilityInfo {
   }
 
   LLVM_ABI bool invalidate(Function &, const PreservedAnalyses &PA,
-                  FunctionAnalysisManager::Invalidator &);
+                           FunctionAnalysisManager::Invalidator &);
 
   LLVM_ABI void releaseMemory();
 
@@ -155,17 +155,17 @@ class BranchProbabilityInfo {
   /// (0%) and one (100%) of this edge executing, relative to other edges
   /// leaving the 'Src' block. The returned probability is never zero, and can
   /// only be one if the source block has only one successor.
-  LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src,
-                                       unsigned IndexInSuccessors) const;
+  LLVM_ABI BranchProbability
+  getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const;
 
   /// Get the probability of going from Src to Dst.
   ///
   /// It returns the sum of all probabilities for edges from Src to Dst.
   LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src,
-                                       const BasicBlock *Dst) const;
+                                                const BasicBlock *Dst) const;
 
   LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src,
-                                       const_succ_iterator Dst) const;
+                                                const_succ_iterator Dst) const;
 
   /// Test if an edge is hot relative to other out-edges of the Src.
   ///
@@ -178,8 +178,9 @@ class BranchProbabilityInfo {
   /// Retrieves an edge's probability similarly to \see getEdgeProbability, but
   /// then prints that probability to the provided stream. That stream is then
   /// returned.
-  LLVM_ABI raw_ostream &printEdgeProbability(raw_ostream &OS, const BasicBlock *Src,
-                                    const BasicBlock *Dst) const;
+  LLVM_ABI raw_ostream &printEdgeProbability(raw_ostream &OS,
+                                             const BasicBlock *Src,
+                                             const BasicBlock *Dst) const;
 
 public:
   /// Set the raw probabilities for all edges from the given block.
@@ -187,8 +188,9 @@ class BranchProbabilityInfo {
   /// This allows a pass to explicitly set edge probabilities for a block. It
   /// can be used when updating the CFG to update the branch probability
   /// information.
-  LLVM_ABI void setEdgeProbability(const BasicBlock *Src,
-                          const SmallVectorImpl<BranchProbability> &Probs);
+  LLVM_ABI void
+  setEdgeProbability(const BasicBlock *Src,
+                     const SmallVectorImpl<BranchProbability> &Probs);
 
   /// Copy outgoing edge probabilities from \p Src to \p Dst.
   ///
@@ -205,8 +207,8 @@ class BranchProbabilityInfo {
   }
 
   LLVM_ABI void calculate(const Function &F, const LoopInfo &LI,
-                 const TargetLibraryInfo *TLI, DominatorTree *DT,
-                 PostDominatorTree *PDT);
+                          const TargetLibraryInfo *TLI, DominatorTree *DT,
+                          PostDominatorTree *PDT);
 
   /// Forget analysis results for the given basic block.
   LLVM_ABI void eraseBlock(const BasicBlock *BB);
@@ -257,13 +259,13 @@ class BranchProbabilityInfo {
     /// Fills in \p Enters vector with all such blocks that don't belong to
     /// SCC with \p SccNum ID but there is an edge to a block belonging to the
     /// SCC.
-    LLVM_ABI void getSccEnterBlocks(int SccNum,
-                           SmallVectorImpl<BasicBlock *> &Enters) const;
+    LLVM_ABI void
+    getSccEnterBlocks(int SccNum, SmallVectorImpl<BasicBlock *> &Enters) const;
     /// Fills in \p Exits vector with all such blocks that don't belong to
     /// SCC with \p SccNum ID but there is an edge from a block belonging to the
     /// SCC.
     LLVM_ABI void getSccExitBlocks(int SccNum,
-                          SmallVectorImpl<BasicBlock *> &Exits) const;
+                                   SmallVectorImpl<BasicBlock *> &Exits) const;
 
   private:
     /// Returns \p BB's type according to classification given by SccBlockType
@@ -299,7 +301,7 @@ class BranchProbabilityInfo {
   class LoopBlock {
   public:
     LLVM_ABI explicit LoopBlock(const BasicBlock *BB, const LoopInfo &LI,
-                       const SccInfo &SccI);
+                                const SccInfo &SccI);
 
     const BasicBlock *getBlock() const { return BB; }
     BasicBlock *getBlock() { return const_cast<BasicBlock *>(BB); }
diff --git a/llvm/include/llvm/Analysis/CFG.h b/llvm/include/llvm/Analysis/CFG.h
index 24407d530e9d4..f1e8cb5225caa 100644
--- a/llvm/include/llvm/Analysis/CFG.h
+++ b/llvm/include/llvm/Analysis/CFG.h
@@ -14,9 +14,9 @@
 #ifndef LLVM_ANALYSIS_CFG_H
 #define LLVM_ANALYSIS_CFG_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/GraphTraits.h"
 #include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/Compiler.h"
 #include <utility>
 
 namespace llvm {
@@ -35,22 +35,22 @@ template <typename T> class SmallVectorImpl;
 /// The output is added to Result, as pairs of <from,to> edge info.
 LLVM_ABI void FindFunctionBackedges(
     const Function &F,
-    SmallVectorImpl<std::pair<const BasicBlock *, const BasicBlock *> > &
-        Result);
+    SmallVectorImpl<std::pair<const BasicBlock *, const BasicBlock *>> &Result);
 
 /// Search for the specified successor of basic block BB and return its position
 /// in the terminator instruction's list of successors.  It is an error to call
 /// this with a block that is not a successor.
-LLVM_ABI unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ);
+LLVM_ABI unsigned GetSuccessorNumber(const BasicBlock *BB,
+                                     const BasicBlock *Succ);
 
 /// Return true if the specified edge is a critical edge. Critical edges are
 /// edges from a block with multiple successors to a block with multiple
 /// predecessors.
 ///
 LLVM_ABI bool isCriticalEdge(const Instruction *TI, unsigned SuccNum,
-                    bool AllowIdenticalEdges = false);
+                             bool AllowIdenticalEdges = false);
 LLVM_ABI bool isCriticalEdge(const Instruction *TI, const BasicBlock *Succ,
-                    bool AllowIdenticalEdges = false);
+                             bool AllowIdenticalEdges = false);
 
 /// Determine whether instruction 'To' is reachable from 'From', without passing
 /// through any blocks in ExclusionSet, returning true if uncertain.
@@ -193,7 +193,7 @@ bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI) {
 //    be ignored)
 //  - must not be split for PGO instrumentation, for example.
 LLVM_ABI bool isPresplitCoroSuspendExitEdge(const BasicBlock &Src,
-                                   const BasicBlock &Dest);
+                                            const BasicBlock &Dest);
 
 /// Return true if there is at least a path through which F can return, false if
 /// there is no such path.
diff --git a/llvm/include/llvm/Analysis/CFGPrinter.h b/llvm/include/llvm/Analysis/CFGPrinter.h
index b65dee7e93423..ec26da87eb916 100644
--- a/llvm/include/llvm/Analysis/CFGPrinter.h
+++ b/llvm/include/llvm/Analysis/CFGPrinter.h
@@ -18,7 +18,6 @@
 #ifndef LLVM_ANALYSIS_CFGPRINTER_H
 #define LLVM_ANALYSIS_CFGPRINTER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/BlockFrequencyInfo.h"
 #include "llvm/Analysis/BranchProbabilityInfo.h"
 #include "llvm/Analysis/HeatUtils.h"
@@ -28,6 +27,7 @@
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/IR/ProfDataUtils.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/DOTGraphTraits.h"
 #include "llvm/Support/FormatVariadic.h"
 
@@ -75,7 +75,7 @@ class DOTFuncInfo {
   LLVM_ABI ~DOTFuncInfo();
 
   LLVM_ABI DOTFuncInfo(const Function *F, const BlockFrequencyInfo *BFI,
-              const BranchProbabilityInfo *BPI, uint64_t MaxFreq);
+                       const BranchProbabilityInfo *BPI, uint64_t MaxFreq);
 
   const BlockFrequencyInfo *getBFI() const { return BFI; }
 
@@ -326,7 +326,8 @@ struct DOTGraphTraits<DOTFuncInfo *> : public DefaultDOTGraphTraits {
                         " fontname=\"Courier\"";
     return Attrs;
   }
-  LLVM_ABI bool isNodeHidden(const BasicBlock *Node, const DOTFuncInfo *CFGInfo);
+  LLVM_ABI bool isNodeHidden(const BasicBlock *Node,
+                             const DOTFuncInfo *CFGInfo);
   LLVM_ABI void computeDeoptOrUnreachablePaths(const Function *F);
 };
 } // namespace llvm
diff --git a/llvm/include/llvm/Analysis/CGSCCPassManager.h b/llvm/include/llvm/Analysis/CGSCCPassManager.h
index cd42840eaaff3..87cfd9e6064e9 100644
--- a/llvm/include/llvm/Analysis/CGSCCPassManager.h
+++ b/llvm/include/llvm/Analysis/CGSCCPassManager.h
@@ -88,11 +88,11 @@
 #ifndef LLVM_ANALYSIS_CGSCCPASSMANAGER_H
 #define LLVM_ANALYSIS_CGSCCPASSMANAGER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/MapVector.h"
 #include "llvm/Analysis/LazyCallGraph.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/raw_ostream.h"
 #include <cassert>
 #include <utility>
@@ -111,7 +111,8 @@ class Module;
 /// Extern template declaration for the analysis set for this IR unit.
 extern template class LLVM_TEMPLATE_ABI AllAnalysesOn<LazyCallGraph::SCC>;
 
-extern template class LLVM_TEMPLATE_ABI AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
+extern template class LLVM_TEMPLATE_ABI
+    AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
 
 /// The CGSCC analysis manager.
 ///
@@ -189,7 +190,7 @@ template <> class CGSCCAnalysisManagerModuleProxy::Result {
   /// analyses in the \c CGSCCAnalysisManager are potentially invalidated based
   /// on the set of preserved analyses.
   LLVM_ABI bool invalidate(Module &M, const PreservedAnalyses &PA,
-                  ModuleAnalysisManager::Invalidator &Inv);
+                           ModuleAnalysisManager::Invalidator &Inv);
 
 private:
   CGSCCAnalysisManager *InnerAM;
@@ -390,14 +391,15 @@ class FunctionAnalysisManagerCGSCCProxy
     }
 
     LLVM_ABI bool invalidate(LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
-                    CGSCCAnalysisManager::Invalidator &Inv);
+                             CGSCCAnalysisManager::Invalidator &Inv);
 
   private:
     FunctionAnalysisManager *FAM;
   };
 
   /// Computes the \c FunctionAnalysisManager and stores it in the result proxy.
-  LLVM_ABI Result run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &);
+  LLVM_ABI Result run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                      LazyCallGraph &);
 
 private:
   friend AnalysisInfoMixin<FunctionAnalysisManagerCGSCCProxy>;
@@ -405,7 +407,8 @@ class FunctionAnalysisManagerCGSCCProxy
   LLVM_ABI static AnalysisKey Key;
 };
 
-extern template class LLVM_TEMPLATE_ABI OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
+extern template class LLVM_TEMPLATE_ABI
+    OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
 
 /// A proxy from a \c CGSCCAnalysisManager to a \c Function.
 using CGSCCAnalysisManagerFunctionProxy =
@@ -466,8 +469,9 @@ class CGSCCToFunctionPassAdaptor
   }
 
   /// Runs the function pass across every function in the module.
-  LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
-                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
+  LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &C,
+                                 CGSCCAnalysisManager &AM, LazyCallGraph &CG,
+                                 CGSCCUpdateResult &UR);
 
   void printPipeline(raw_ostream &OS,
                      function_ref<StringRef(StringRef)> MapClassName2PassName) {
@@ -552,8 +556,9 @@ class DevirtSCCRepeatedPass : public PassInfoMixin<DevirtSCCRepeatedPass> {
 
   /// Runs the wrapped pass up to \c MaxIterations on the SCC, iterating
   /// whenever an indirect call is refined.
-  LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &InitialC, CGSCCAnalysisManager &AM,
-                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
+  LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &InitialC,
+                                 CGSCCAnalysisManager &AM, LazyCallGraph &CG,
+                                 CGSCCUpdateResult &UR);
 
   void printPipeline(raw_ostream &OS,
                      function_ref<StringRef(StringRef)> MapClassName2PassName) {
diff --git a/llvm/include/llvm/Analysis/CallGraph.h b/llvm/include/llvm/Analysis/CallGraph.h
index 06e0e40cab280..916f75118b343 100644
--- a/llvm/include/llvm/Analysis/CallGraph.h
+++ b/llvm/include/llvm/Analysis/CallGraph.h
@@ -45,11 +45,11 @@
 #ifndef LLVM_ANALYSIS_CALLGRAPH_H
 #define LLVM_ANALYSIS_CALLGRAPH_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/IR/InstrTypes.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/IR/ValueHandle.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include <cassert>
 #include <map>
 #include <memory>
@@ -101,7 +101,7 @@ class CallGraph {
   Module &getModule() const { return M; }
 
   LLVM_ABI bool invalidate(Module &, const PreservedAnalyses &PA,
-                  ModuleAnalysisManager::Invalidator &);
+                           ModuleAnalysisManager::Invalidator &);
 
   inline iterator begin() { return FunctionMap.begin(); }
   inline iterator end() { return FunctionMap.end(); }
@@ -257,7 +257,7 @@ class CallGraphNode {
   ///
   /// Note that this method takes linear time, so it should be used sparingly.
   LLVM_ABI void replaceCallEdge(CallBase &Call, CallBase &NewCall,
-                       CallGraphNode *NewNode);
+                                CallGraphNode *NewNode);
 
 private:
   friend class CallGraph;
diff --git a/llvm/include/llvm/Analysis/CallGraphSCCPass.h b/llvm/include/llvm/Analysis/CallGraphSCCPass.h
index cd663d5bea5ed..cf6fd1946668b 100644
--- a/llvm/include/llvm/Analysis/CallGraphSCCPass.h
+++ b/llvm/include/llvm/Analysis/CallGraphSCCPass.h
@@ -20,9 +20,9 @@
 #ifndef LLVM_ANALYSIS_CALLGRAPHSCCPASS_H
 #define LLVM_ANALYSIS_CALLGRAPHSCCPASS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include <vector>
 
 namespace llvm {
diff --git a/llvm/include/llvm/Analysis/CallPrinter.h b/llvm/include/llvm/Analysis/CallPrinter.h
index 91fa7d0410dad..8f28d8af4671f 100644
--- a/llvm/include/llvm/Analysis/CallPrinter.h
+++ b/llvm/include/llvm/Analysis/CallPrinter.h
@@ -14,8 +14,8 @@
 #ifndef LLVM_ANALYSIS_CALLPRINTER_H
 #define LLVM_ANALYSIS_CALLPRINTER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
diff --git a/llvm/include/llvm/Analysis/CaptureTracking.h b/llvm/include/llvm/Analysis/CaptureTracking.h
index 44fe9232b20f2..ed160ca3596e8 100644
--- a/llvm/include/llvm/Analysis/CaptureTracking.h
+++ b/llvm/include/llvm/Analysis/CaptureTracking.h
@@ -13,8 +13,8 @@
 #ifndef LLVM_ANALYSIS_CAPTURETRACKING_H
 #define LLVM_ANALYSIS_CAPTURETRACKING_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/ModRef.h"
 
 namespace llvm {
@@ -46,7 +46,7 @@ namespace llvm {
   /// chain, without considering captures of values it may be based on, or
   /// implicit captures such as for external globals.
   LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures,
-                            unsigned MaxUsesToExplore = 0);
+                                     unsigned MaxUsesToExplore = 0);
 
   /// Return which components of the pointer may be captured. Only consider
   /// components that are part of \p Mask. Once \p StopFn on the accumulated
@@ -75,10 +75,11 @@ namespace llvm {
   /// chain, without considering captures of values it may be based on, or
   /// implicit captures such as for external globals.
   LLVM_ABI bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures,
-                                  const Instruction *I, const DominatorTree *DT,
-                                  bool IncludeI = false,
-                                  unsigned MaxUsesToExplore = 0,
-                                  const LoopInfo *LI = nullptr);
+                                           const Instruction *I,
+                                           const DominatorTree *DT,
+                                           bool IncludeI = false,
+                                           unsigned MaxUsesToExplore = 0,
+                                           const LoopInfo *LI = nullptr);
 
   /// Return which components of the pointer may be captured on the path to
   /// \p I. Only consider components that are part of \p Mask. Once \p StopFn
@@ -105,9 +106,10 @@ namespace llvm {
   //
   // Only consider components that are part of \p Mask.
   LLVM_ABI Instruction *FindEarliestCapture(const Value *V, Function &F,
-                                   bool ReturnCaptures, const DominatorTree &DT,
-                                   CaptureComponents Mask,
-                                   unsigned MaxUsesToExplore = 0);
+                                            bool ReturnCaptures,
+                                            const DominatorTree &DT,
+                                            CaptureComponents Mask,
+                                            unsigned MaxUsesToExplore = 0);
 
   /// Capture information for a specific Use.
   struct UseCaptureInfo {
@@ -179,7 +181,8 @@ namespace llvm {
   ///
   /// \p Base is the starting value of the capture analysis, which is
   /// relevant for address_is_null captures.
-  LLVM_ABI UseCaptureInfo DetermineUseCaptureKind(const Use &U, const Value *Base);
+  LLVM_ABI UseCaptureInfo DetermineUseCaptureKind(const Use &U,
+                                                  const Value *Base);
 
   /// PointerMayBeCaptured - Visit the value and the values derived from it and
   /// find values which appear to be capturing the pointer value. This feeds
@@ -191,7 +194,7 @@ namespace llvm {
   /// chain, without considering captures of values it may be based on, or
   /// implicit captures such as for external globals.
   LLVM_ABI void PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker,
-                            unsigned MaxUsesToExplore = 0);
+                                     unsigned MaxUsesToExplore = 0);
 
   /// Returns true if the pointer is to a function-local object that never
   /// escapes from the function.
diff --git a/llvm/include/llvm/Analysis/CodeMetrics.h b/llvm/include/llvm/Analysis/CodeMetrics.h
index 22818e56334eb..cf8523f540fe5 100644
--- a/llvm/include/llvm/Analysis/CodeMetrics.h
+++ b/llvm/include/llvm/Analysis/CodeMetrics.h
@@ -14,8 +14,8 @@
 #ifndef LLVM_ANALYSIS_CODEMETRICS_H
 #define LLVM_ANALYSIS_CODEMETRICS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/InstructionCost.h"
 
 namespace llvm {
@@ -78,19 +78,22 @@ struct CodeMetrics {
   unsigned NumRets = 0;
 
   /// Add information about a block to the current state.
-  LLVM_ABI void analyzeBasicBlock(const BasicBlock *BB, const TargetTransformInfo &TTI,
-                         const SmallPtrSetImpl<const Value *> &EphValues,
-                         bool PrepareForLTO = false, const Loop *L = nullptr);
+  LLVM_ABI void
+  analyzeBasicBlock(const BasicBlock *BB, const TargetTransformInfo &TTI,
+                    const SmallPtrSetImpl<const Value *> &EphValues,
+                    bool PrepareForLTO = false, const Loop *L = nullptr);
 
   /// Collect a loop's ephemeral values (those used only by an assume
   /// or similar intrinsics in the loop).
-  LLVM_ABI static void collectEphemeralValues(const Loop *L, AssumptionCache *AC,
-                                     SmallPtrSetImpl<const Value *> &EphValues);
+  LLVM_ABI static void
+  collectEphemeralValues(const Loop *L, AssumptionCache *AC,
+                         SmallPtrSetImpl<const Value *> &EphValues);
 
   /// Collect a functions's ephemeral values (those used only by an
   /// assume or similar intrinsics in the function).
-  LLVM_ABI static void collectEphemeralValues(const Function *L, AssumptionCache *AC,
-                                     SmallPtrSetImpl<const Value *> &EphValues);
+  LLVM_ABI static void
+  collectEphemeralValues(const Function *L, AssumptionCache *AC,
+                         SmallPtrSetImpl<const Value *> &EphValues);
 };
 
 }
diff --git a/llvm/include/llvm/Analysis/ConstantFolding.h b/llvm/include/llvm/Analysis/ConstantFolding.h
index ed71a4ee4bdfd..dcbac8a301025 100644
--- a/llvm/include/llvm/Analysis/ConstantFolding.h
+++ b/llvm/include/llvm/Analysis/ConstantFolding.h
@@ -45,23 +45,25 @@ class Type;
 /// the constant. Because of constantexprs, this function is recursive.
 /// If the global is part of a dso_local_equivalent constant, return it through
 /// `Equiv` if it is provided.
-LLVM_ABI bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset,
-                                const DataLayout &DL,
-                                DSOLocalEquivalent **DSOEquiv = nullptr);
+LLVM_ABI bool
+IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset,
+                           const DataLayout &DL,
+                           DSOLocalEquivalent **DSOEquiv = nullptr);
 
 /// ConstantFoldInstruction - Try to constant fold the specified instruction.
 /// If successful, the constant result is returned, if not, null is returned.
 /// Note that this fails if not all of the operands are constant.  Otherwise,
 /// this function can only fail when attempting to fold instructions like loads
 /// and stores, which have no constant expression form.
-LLVM_ABI Constant *ConstantFoldInstruction(const Instruction *I, const DataLayout &DL,
-                                  const TargetLibraryInfo *TLI = nullptr);
+LLVM_ABI Constant *
+ConstantFoldInstruction(const Instruction *I, const DataLayout &DL,
+                        const TargetLibraryInfo *TLI = nullptr);
 
 /// ConstantFoldConstant - Fold the constant using the specified DataLayout.
 /// This function always returns a non-null constant: Either the folding result,
 /// or the original constant if further folding is not possible.
 LLVM_ABI Constant *ConstantFoldConstant(const Constant *C, const DataLayout &DL,
-                               const TargetLibraryInfo *TLI = nullptr);
+                                        const TargetLibraryInfo *TLI = nullptr);
 
 /// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
 /// specified operands.  If successful, the constant result is returned, if not,
@@ -75,11 +77,9 @@ LLVM_ABI Constant *ConstantFoldConstant(const Constant *C, const DataLayout &DL,
 /// all uses of the original operation are replaced by the constant-folded
 /// result. The \p AllowNonDeterministic parameter controls whether this is
 /// allowed.
-LLVM_ABI Constant *ConstantFoldInstOperands(const Instruction *I,
-                                   ArrayRef<Constant *> Ops,
-                                   const DataLayout &DL,
-                                   const TargetLibraryInfo *TLI = nullptr,
-                                   bool AllowNonDeterministic = true);
+LLVM_ABI Constant *ConstantFoldInstOperands(
+    const Instruction *I, ArrayRef<Constant *> Ops, const DataLayout &DL,
+    const TargetLibraryInfo *TLI = nullptr, bool AllowNonDeterministic = true);
 
 /// Attempt to constant fold a compare instruction (icmp/fcmp) with the
 /// specified operands. Returns null or a constant expression of the specified
@@ -92,20 +92,21 @@ LLVM_ABI Constant *ConstantFoldCompareInstOperands(
 /// Attempt to constant fold a unary operation with the specified operand.
 /// Returns null on failure.
 LLVM_ABI Constant *ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
-                                     const DataLayout &DL);
+                                              const DataLayout &DL);
 
 /// Attempt to constant fold a binary operation with the specified operands.
 /// Returns null or a constant expression of the specified operands on failure.
 LLVM_ABI Constant *ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
-                                       Constant *RHS, const DataLayout &DL);
+                                                Constant *RHS,
+                                                const DataLayout &DL);
 
 /// Attempt to constant fold a floating point binary operation with the
 /// specified operands, applying the denormal handling mod to the operands.
 /// Returns null or a constant expression of the specified operands on failure.
-LLVM_ABI Constant *ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
-                                     Constant *RHS, const DataLayout &DL,
-                                     const Instruction *I,
-                                     bool AllowNonDeterministic = true);
+LLVM_ABI Constant *
+ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS,
+                           const DataLayout &DL, const Instruction *I,
+                           bool AllowNonDeterministic = true);
 
 /// Attempt to flush float point constant according to denormal mode set in the
 /// instruction's parent function attributes. If so, return a zero with the
@@ -116,81 +117,86 @@ LLVM_ABI Constant *ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
 /// If the calling function's "denormal-fp-math" input mode is "dynamic" for the
 /// floating-point type, returns nullptr for denormal inputs.
 LLVM_ABI Constant *FlushFPConstant(Constant *Operand, const Instruction *I,
-                          bool IsOutput);
+                                   bool IsOutput);
 
 /// Attempt to constant fold a select instruction with the specified
 /// operands. The constant result is returned if successful; if not, null is
 /// returned.
 LLVM_ABI Constant *ConstantFoldSelectInstruction(Constant *Cond, Constant *V1,
-                                        Constant *V2);
+                                                 Constant *V2);
 
 /// Attempt to constant fold a cast with the specified operand.  If it
 /// fails, it returns a constant expression of the specified operand.
-LLVM_ABI Constant *ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy,
-                                  const DataLayout &DL);
+LLVM_ABI Constant *ConstantFoldCastOperand(unsigned Opcode, Constant *C,
+                                           Type *DestTy, const DataLayout &DL);
 
 /// Constant fold a zext, sext or trunc, depending on IsSigned and whether the
 /// DestTy is wider or narrower than C. Returns nullptr on failure.
-LLVM_ABI Constant *ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned,
-                                  const DataLayout &DL);
+LLVM_ABI Constant *ConstantFoldIntegerCast(Constant *C, Type *DestTy,
+                                           bool IsSigned, const DataLayout &DL);
 
 /// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue
 /// instruction with the specified operands and indices.  The constant result is
 /// returned if successful; if not, null is returned.
-LLVM_ABI Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
-                                             ArrayRef<unsigned> Idxs);
+LLVM_ABI Constant *ConstantFoldInsertValueInstruction(Constant *Agg,
+                                                      Constant *Val,
+                                                      ArrayRef<unsigned> Idxs);
 
 /// Attempt to constant fold an extractvalue instruction with the
 /// specified operands and indices.  The constant result is returned if
 /// successful; if not, null is returned.
 LLVM_ABI Constant *ConstantFoldExtractValueInstruction(Constant *Agg,
-                                              ArrayRef<unsigned> Idxs);
+                                                       ArrayRef<unsigned> Idxs);
 
 /// Attempt to constant fold an insertelement instruction with the
 /// specified operands and indices.  The constant result is returned if
 /// successful; if not, null is returned.
 LLVM_ABI Constant *ConstantFoldInsertElementInstruction(Constant *Val,
-                                               Constant *Elt,
-                                               Constant *Idx);
+                                                        Constant *Elt,
+                                                        Constant *Idx);
 
 /// Attempt to constant fold an extractelement instruction with the
 /// specified operands and indices.  The constant result is returned if
 /// successful; if not, null is returned.
-LLVM_ABI Constant *ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx);
+LLVM_ABI Constant *ConstantFoldExtractElementInstruction(Constant *Val,
+                                                         Constant *Idx);
 
 /// Attempt to constant fold a shufflevector instruction with the
 /// specified operands and mask.  See class ShuffleVectorInst for a description
 /// of the mask representation. The constant result is returned if successful;
 /// if not, null is returned.
-LLVM_ABI Constant *ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
-                                               ArrayRef<int> Mask);
+LLVM_ABI Constant *ConstantFoldShuffleVectorInstruction(Constant *V1,
+                                                        Constant *V2,
+                                                        ArrayRef<int> Mask);
 
 /// Extract value of C at the given Offset reinterpreted as Ty. If bits past
 /// the end of C are accessed, they are assumed to be poison.
-LLVM_ABI Constant *ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset,
-                                    const DataLayout &DL);
+LLVM_ABI Constant *ConstantFoldLoadFromConst(Constant *C, Type *Ty,
+                                             const APInt &Offset,
+                                             const DataLayout &DL);
 
 /// Extract value of C reinterpreted as Ty. Same as previous API with zero
 /// offset.
 LLVM_ABI Constant *ConstantFoldLoadFromConst(Constant *C, Type *Ty,
-                                    const DataLayout &DL);
+                                             const DataLayout &DL);
 
 /// Return the value that a load from C with offset Offset would produce if it
 /// is constant and determinable. If this is not determinable, return null.
-LLVM_ABI Constant *ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset,
-                                       const DataLayout &DL);
+LLVM_ABI Constant *ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
+                                                APInt Offset,
+                                                const DataLayout &DL);
 
 /// Return the value that a load from C would produce if it is constant and
 /// determinable. If this is not determinable, return null.
 LLVM_ABI Constant *ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
-                                       const DataLayout &DL);
+                                                const DataLayout &DL);
 
 /// If C is a uniform value where all bits are the same (either all zero, all
 /// ones, all undef or all poison), return the corresponding uniform value in
 /// the new type. If the value is not uniform or the result cannot be
 /// represented, return null.
 LLVM_ABI Constant *ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty,
-                                           const DataLayout &DL);
+                                                    const DataLayout &DL);
 
 /// canConstantFoldCallTo - Return true if its even possible to fold a call to
 /// the specified function.
@@ -199,25 +205,27 @@ LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F);
 /// ConstantFoldCall - Attempt to constant fold a call to the specified function
 /// with the specified arguments, returning null if unsuccessful.
 LLVM_ABI Constant *ConstantFoldCall(const CallBase *Call, Function *F,
-                           ArrayRef<Constant *> Operands,
-                           const TargetLibraryInfo *TLI = nullptr,
-                           bool AllowNonDeterministic = true);
+                                    ArrayRef<Constant *> Operands,
+                                    const TargetLibraryInfo *TLI = nullptr,
+                                    bool AllowNonDeterministic = true);
 
 LLVM_ABI Constant *ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS,
-                                      Constant *RHS, Type *Ty,
-                                      Instruction *FMFSource);
+                                               Constant *RHS, Type *Ty,
+                                               Instruction *FMFSource);
 
 /// ConstantFoldLoadThroughBitcast - try to cast constant to destination type
 /// returning null if unsuccessful. Can cast pointer to pointer or pointer to
 /// integer and vice versa if their sizes are equal.
 LLVM_ABI Constant *ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
-                                         const DataLayout &DL);
+                                                  const DataLayout &DL);
 
 /// Check whether the given call has no side-effects.
 /// Specifically checks for math routimes which sometimes set errno.
-LLVM_ABI bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI);
+LLVM_ABI bool isMathLibCallNoop(const CallBase *Call,
+                                const TargetLibraryInfo *TLI);
 
-LLVM_ABI Constant *ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset);
+LLVM_ABI Constant *ReadByteArrayFromGlobal(const GlobalVariable *GV,
+                                           uint64_t Offset);
 }
 
 #endif
diff --git a/llvm/include/llvm/Analysis/ConstraintSystem.h b/llvm/include/llvm/Analysis/ConstraintSystem.h
index ddd378b60ec5d..307ad50e81fec 100644
--- a/llvm/include/llvm/Analysis/ConstraintSystem.h
+++ b/llvm/include/llvm/Analysis/ConstraintSystem.h
@@ -9,10 +9,10 @@
 #ifndef LLVM_ANALYSIS_CONSTRAINTSYSTEM_H
 #define LLVM_ANALYSIS_CONSTRAINTSYSTEM_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/MathExtras.h"
 
 #include <string>
diff --git a/llvm/include/llvm/Analysis/CtxProfAnalysis.h b/llvm/include/llvm/Analysis/CtxProfAnalysis.h
index b65c0bd182e58..5c9823b0f6cc1 100644
--- a/llvm/include/llvm/Analysis/CtxProfAnalysis.h
+++ b/llvm/include/llvm/Analysis/CtxProfAnalysis.h
@@ -9,13 +9,13 @@
 #ifndef LLVM_ANALYSIS_CTXPROFANALYSIS_H
 #define LLVM_ANALYSIS_CTXPROFANALYSIS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SetVector.h"
 #include "llvm/IR/GlobalValue.h"
 #include "llvm/IR/InstrTypes.h"
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/ProfileData/PGOCtxProfReader.h"
+#include "llvm/Support/Compiler.h"
 #include <optional>
 
 namespace llvm {
@@ -122,7 +122,8 @@ class CtxProfAnalysis : public AnalysisInfoMixin<CtxProfAnalysis> {
 
 public:
   LLVM_ABI static AnalysisKey Key;
-  LLVM_ABI explicit CtxProfAnalysis(std::optional<StringRef> Profile = std::nullopt);
+  LLVM_ABI explicit CtxProfAnalysis(
+      std::optional<StringRef> Profile = std::nullopt);
 
   using Result = PGOContextualProfile;
 
@@ -136,7 +137,8 @@ class CtxProfAnalysis : public AnalysisInfoMixin<CtxProfAnalysis> {
   LLVM_ABI static InstrProfIncrementInst *getBBInstrumentation(BasicBlock &BB);
 
   /// Get the step instrumentation associated with a `select`
-  LLVM_ABI static InstrProfIncrementInstStep *getSelectInstrumentation(SelectInst &SI);
+  LLVM_ABI static InstrProfIncrementInstStep *
+  getSelectInstrumentation(SelectInst &SI);
 
   // FIXME: refactor to an advisor model, and separate
   LLVM_ABI static void collectIndirectCallPromotionList(
@@ -174,15 +176,15 @@ class ProfileAnnotator {
   // false if the select doesn't have instrumentation or if the count of the
   // parent BB is 0.
   LLVM_ABI bool getSelectInstrProfile(SelectInst &SI, uint64_t &TrueCount,
-                             uint64_t &FalseCount) const;
+                                      uint64_t &FalseCount) const;
   // Clears Profile and populates it with the edge weights, in the same order as
   // they need to appear in the MD_prof metadata. Also computes the max of those
   // weights an returns it in MaxCount. Returs false if:
   //   - the BB has less than 2 successors
   //   - the counts are 0
   LLVM_ABI bool getOutgoingBranchWeights(BasicBlock &BB,
-                                SmallVectorImpl<uint64_t> &Profile,
-                                uint64_t &MaxCount) const;
+                                         SmallVectorImpl<uint64_t> &Profile,
+                                         uint64_t &MaxCount) const;
   LLVM_ABI ~ProfileAnnotator();
 };
 
diff --git a/llvm/include/llvm/Analysis/DDG.h b/llvm/include/llvm/Analysis/DDG.h
index d0ec687103adc..443f84862a98c 100644
--- a/llvm/include/llvm/Analysis/DDG.h
+++ b/llvm/include/llvm/Analysis/DDG.h
@@ -13,12 +13,12 @@
 #ifndef LLVM_ANALYSIS_DDG_H
 #define LLVM_ANALYSIS_DDG_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DirectedGraph.h"
 #include "llvm/Analysis/DependenceAnalysis.h"
 #include "llvm/Analysis/DependenceGraphBuilder.h"
 #include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 class Function;
@@ -344,7 +344,8 @@ class LLVM_ABI DataDependenceGraph : public DDGBase, public DDGInfo {
 ///
 /// For information about time complexity of the build algorithm see the
 /// comments near the declaration of AbstractDependenceGraphBuilder.
-class LLVM_ABI DDGBuilder : public AbstractDependenceGraphBuilder<DataDependenceGraph> {
+class LLVM_ABI DDGBuilder
+    : public AbstractDependenceGraphBuilder<DataDependenceGraph> {
 public:
   DDGBuilder(DataDependenceGraph &G, DependenceInfo &D,
              const BasicBlockListType &BBs)
@@ -415,7 +416,8 @@ LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, const DataDependenceGraph &G);
 class DDGAnalysis : public AnalysisInfoMixin<DDGAnalysis> {
 public:
   using Result = std::unique_ptr<DataDependenceGraph>;
-  LLVM_ABI Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR);
+  LLVM_ABI Result run(Loop &L, LoopAnalysisManager &AM,
+                      LoopStandardAnalysisResults &AR);
 
 private:
   friend AnalysisInfoMixin<DDGAnalysis>;
@@ -427,7 +429,8 @@ class DDGAnalysisPrinterPass : public PassInfoMixin<DDGAnalysisPrinterPass> {
 public:
   explicit DDGAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
   LLVM_ABI PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
-                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+                                 LoopStandardAnalysisResults &AR,
+                                 LPMUpdater &U);
   static bool isRequired() { return true; }
 
 private:
diff --git a/llvm/include/llvm/Analysis/DXILResource.h b/llvm/include/llvm/Analysis/DXILResource.h
index 00ae1837020f3..03ea0b6362421 100644
--- a/llvm/include/llvm/Analysis/DXILResource.h
+++ b/llvm/include/llvm/Analysis/DXILResource.h
@@ -9,7 +9,6 @@
 #ifndef LLVM_ANALYSIS_DXILRESOURCE_H
 #define LLVM_ANALYSIS_DXILRESOURCE_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/MapVector.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/StringRef.h"
@@ -18,6 +17,7 @@
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
 #include "llvm/Support/Alignment.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/DXILABI.h"
 #include <climits>
 #include <cstdint>
@@ -290,8 +290,9 @@ class ResourceTypeInfo {
   dxil::ResourceKind Kind;
 
 public:
-  LLVM_ABI ResourceTypeInfo(TargetExtType *HandleTy, const dxil::ResourceClass RC,
-                   const dxil::ResourceKind Kind);
+  LLVM_ABI ResourceTypeInfo(TargetExtType *HandleTy,
+                            const dxil::ResourceClass RC,
+                            const dxil::ResourceKind Kind);
   ResourceTypeInfo(TargetExtType *HandleTy)
       : ResourceTypeInfo(HandleTy, {}, dxil::ResourceKind::Invalid) {}
 
@@ -382,7 +383,8 @@ class ResourceInfo {
   StringRef getName() const { return Symbol ? Symbol->getName() : ""; }
 
   bool hasSymbol() const { return Symbol; }
-  LLVM_ABI GlobalVariable *createSymbol(Module &M, StructType *Ty, StringRef Name = "");
+  LLVM_ABI GlobalVariable *createSymbol(Module &M, StructType *Ty,
+                                        StringRef Name = "");
   LLVM_ABI MDTuple *getAsMetadata(Module &M, dxil::ResourceTypeInfo &RTI) const;
 
   LLVM_ABI std::pair<uint32_t, uint32_t>
@@ -398,7 +400,7 @@ class ResourceInfo {
   }
 
   LLVM_ABI void print(raw_ostream &OS, dxil::ResourceTypeInfo &RTI,
-             const DataLayout &DL) const;
+                      const DataLayout &DL) const;
 };
 
 } // namespace dxil
@@ -410,7 +412,7 @@ class DXILResourceTypeMap {
 
 public:
   LLVM_ABI bool invalidate(Module &M, const PreservedAnalyses &PA,
-                  ModuleAnalysisManager::Invalidator &Inv);
+                           ModuleAnalysisManager::Invalidator &Inv);
 
   dxil::ResourceTypeInfo &operator[](TargetExtType *Ty) {
     auto It = Infos.find(Ty);
@@ -557,7 +559,7 @@ class DXILResourceMap {
   bool hasInvalidCounterDirection() const { return HasInvalidDirection; }
 
   LLVM_ABI void print(raw_ostream &OS, DXILResourceTypeMap &DRTM,
-             const DataLayout &DL) const;
+                      const DataLayout &DL) const;
 
   friend class DXILResourceAnalysis;
   friend class DXILResourceWrapperPass;
@@ -701,8 +703,8 @@ class DXILResourceBindingInfo {
   }
 
   // Size == -1 means unbounded array
-  LLVM_ABI std::optional<uint32_t> findAvailableBinding(dxil::ResourceClass RC,
-                                               uint32_t Space, int32_t Size);
+  LLVM_ABI std::optional<uint32_t>
+  findAvailableBinding(dxil::ResourceClass RC, uint32_t Space, int32_t Size);
 
   friend class DXILResourceBindingAnalysis;
   friend class DXILResourceBindingWrapperPass;
diff --git a/llvm/include/llvm/Analysis/DemandedBits.h b/llvm/include/llvm/Analysis/DemandedBits.h
index b7886e3a2a212..249b2d014cab9 100644
--- a/llvm/include/llvm/Analysis/DemandedBits.h
+++ b/llvm/include/llvm/Analysis/DemandedBits.h
@@ -21,11 +21,11 @@
 #ifndef LLVM_ANALYSIS_DEMANDEDBITS_H
 #define LLVM_ANALYSIS_DEMANDEDBITS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -68,16 +68,16 @@ class DemandedBits {
   /// Compute alive bits of one addition operand from alive output and known
   /// operand bits
   LLVM_ABI static APInt determineLiveOperandBitsAdd(unsigned OperandNo,
-                                           const APInt &AOut,
-                                           const KnownBits &LHS,
-                                           const KnownBits &RHS);
+                                                    const APInt &AOut,
+                                                    const KnownBits &LHS,
+                                                    const KnownBits &RHS);
 
   /// Compute alive bits of one subtraction operand from alive output and known
   /// operand bits
   LLVM_ABI static APInt determineLiveOperandBitsSub(unsigned OperandNo,
-                                           const APInt &AOut,
-                                           const KnownBits &LHS,
-                                           const KnownBits &RHS);
+                                                    const APInt &AOut,
+                                                    const KnownBits &LHS,
+                                                    const KnownBits &RHS);
 
 private:
   void performAnalysis();
diff --git a/llvm/include/llvm/Analysis/DependenceAnalysis.h b/llvm/include/llvm/Analysis/DependenceAnalysis.h
index 8950f704c8851..b42e4bba185fb 100644
--- a/llvm/include/llvm/Analysis/DependenceAnalysis.h
+++ b/llvm/include/llvm/Analysis/DependenceAnalysis.h
@@ -39,12 +39,12 @@
 #ifndef LLVM_ANALYSIS_DEPENDENCEANALYSIS_H
 #define LLVM_ANALYSIS_DEPENDENCEANALYSIS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallBitVector.h"
 #include "llvm/Analysis/ScalarEvolution.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
   class AAResults;
@@ -305,7 +305,7 @@ namespace llvm {
 
     /// Handle transitive invalidation when the cached analysis results go away.
     LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
-                    FunctionAnalysisManager::Invalidator &Inv);
+                             FunctionAnalysisManager::Invalidator &Inv);
 
     /// depends - Tests for a dependence between the Src and Dst instructions.
     /// Returns NULL if no dependence; otherwise, returns a Dependence (or a
@@ -314,8 +314,9 @@ namespace llvm {
     /// solved at compilation time. By default UnderRuntimeAssumptions is false
     /// for a safe approximation of the dependence relation that does not
     /// require runtime checks.
-    LLVM_ABI std::unique_ptr<Dependence> depends(Instruction *Src, Instruction *Dst,
-                                        bool UnderRuntimeAssumptions = false);
+    LLVM_ABI std::unique_ptr<Dependence>
+    depends(Instruction *Src, Instruction *Dst,
+            bool UnderRuntimeAssumptions = false);
 
     /// getSplitIteration - Give a dependence that's splittable at some
     /// particular level, return the iteration that should be used to split
@@ -357,7 +358,8 @@ namespace llvm {
     ///
     /// breaks the dependence and allows us to vectorize/parallelize
     /// both loops.
-    LLVM_ABI const SCEV *getSplitIteration(const Dependence &Dep, unsigned Level);
+    LLVM_ABI const SCEV *getSplitIteration(const Dependence &Dep,
+                                           unsigned Level);
 
     Function *getFunction() const { return F; }
 
@@ -469,11 +471,12 @@ namespace llvm {
       LLVM_ABI const Loop *getAssociatedLoop() const;
 
       /// setPoint - Change a constraint to Point.
-      LLVM_ABI void setPoint(const SCEV *X, const SCEV *Y, const Loop *CurrentLoop);
+      LLVM_ABI void setPoint(const SCEV *X, const SCEV *Y,
+                             const Loop *CurrentLoop);
 
       /// setLine - Change a constraint to Line.
-      LLVM_ABI void setLine(const SCEV *A, const SCEV *B,
-                   const SCEV *C, const Loop *CurrentLoop);
+      LLVM_ABI void setLine(const SCEV *A, const SCEV *B, const SCEV *C,
+                            const Loop *CurrentLoop);
 
       /// setDistance - Change a constraint to Distance.
       LLVM_ABI void setDistance(const SCEV *D, const Loop *CurrentLoop);
diff --git a/llvm/include/llvm/Analysis/DomPrinter.h b/llvm/include/llvm/Analysis/DomPrinter.h
index e01317ff57b75..9c99300087fce 100644
--- a/llvm/include/llvm/Analysis/DomPrinter.h
+++ b/llvm/include/llvm/Analysis/DomPrinter.h
@@ -14,10 +14,10 @@
 #ifndef LLVM_ANALYSIS_DOMPRINTER_H
 #define LLVM_ANALYSIS_DOMPRINTER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/DOTGraphTraitsPass.h"
 #include "llvm/Analysis/PostDominators.h"
 #include "llvm/IR/Dominators.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
diff --git a/llvm/include/llvm/Analysis/DomTreeUpdater.h b/llvm/include/llvm/Analysis/DomTreeUpdater.h
index 206f66a8ef564..db4fcd6c1bb70 100644
--- a/llvm/include/llvm/Analysis/DomTreeUpdater.h
+++ b/llvm/include/llvm/Analysis/DomTreeUpdater.h
@@ -26,8 +26,8 @@ namespace llvm {
 class DomTreeUpdater;
 class PostDominatorTree;
 
-extern template class LLVM_TEMPLATE_ABI GenericDomTreeUpdater<DomTreeUpdater, DominatorTree,
-                                            PostDominatorTree>;
+extern template class LLVM_TEMPLATE_ABI
+    GenericDomTreeUpdater<DomTreeUpdater, DominatorTree, PostDominatorTree>;
 
 class DomTreeUpdater
     : public GenericDomTreeUpdater<DomTreeUpdater, DominatorTree,
@@ -81,7 +81,7 @@ class DomTreeUpdater
   /// modified while awaiting deletion. Multiple callbacks can be queued for one
   /// DelBB under Lazy UpdateStrategy.
   LLVM_ABI void callbackDeleteBB(BasicBlock *DelBB,
-                        std::function<void(BasicBlock *)> Callback);
+                                 std::function<void(BasicBlock *)> Callback);
 
   ///@}
 
diff --git a/llvm/include/llvm/Analysis/EphemeralValuesCache.h b/llvm/include/llvm/Analysis/EphemeralValuesCache.h
index 4d136490535ed..f99d157c8e2c9 100644
--- a/llvm/include/llvm/Analysis/EphemeralValuesCache.h
+++ b/llvm/include/llvm/Analysis/EphemeralValuesCache.h
@@ -14,9 +14,9 @@
 #ifndef LLVM_ANALYSIS_EPHEMERALVALUESCACHE_H
 #define LLVM_ANALYSIS_EPHEMERALVALUESCACHE_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
diff --git a/llvm/include/llvm/Analysis/FunctionPropertiesAnalysis.h b/llvm/include/llvm/Analysis/FunctionPropertiesAnalysis.h
index 2b5631265780f..babb6d9d6cf0c 100644
--- a/llvm/include/llvm/Analysis/FunctionPropertiesAnalysis.h
+++ b/llvm/include/llvm/Analysis/FunctionPropertiesAnalysis.h
@@ -14,10 +14,10 @@
 #ifndef LLVM_ANALYSIS_FUNCTIONPROPERTIESANALYSIS_H
 #define LLVM_ANALYSIS_FUNCTIONPROPERTIESANALYSIS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseSet.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 class BasicBlock;
@@ -148,7 +148,8 @@ class FunctionPropertiesAnalysis
 
   using Result = const FunctionPropertiesInfo;
 
-  LLVM_ABI FunctionPropertiesInfo run(Function &F, FunctionAnalysisManager &FAM);
+  LLVM_ABI FunctionPropertiesInfo run(Function &F,
+                                      FunctionAnalysisManager &FAM);
 };
 
 /// Printer pass for the FunctionPropertiesAnalysis results.
@@ -185,8 +186,9 @@ class FunctionPropertiesUpdater {
   BasicBlock &CallSiteBB;
   Function &Caller;
 
-  LLVM_ABI static bool isUpdateValid(Function &F, const FunctionPropertiesInfo &FPI,
-                            FunctionAnalysisManager &FAM);
+  LLVM_ABI static bool isUpdateValid(Function &F,
+                                     const FunctionPropertiesInfo &FPI,
+                                     FunctionAnalysisManager &FAM);
 
   DominatorTree &getUpdatedDominatorTree(FunctionAnalysisManager &FAM) const;
 
diff --git a/llvm/include/llvm/Analysis/GlobalsModRef.h b/llvm/include/llvm/Analysis/GlobalsModRef.h
index b3d7defd8d05c..10275ac0d784c 100644
--- a/llvm/include/llvm/Analysis/GlobalsModRef.h
+++ b/llvm/include/llvm/Analysis/GlobalsModRef.h
@@ -13,11 +13,11 @@
 #ifndef LLVM_ANALYSIS_GLOBALSMODREF_H
 #define LLVM_ANALYSIS_GLOBALSMODREF_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/IR/ValueHandle.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include <list>
 
 namespace llvm {
@@ -84,7 +84,7 @@ class GlobalsAAResult : public AAResultBase {
   LLVM_ABI ~GlobalsAAResult();
 
   LLVM_ABI bool invalidate(Module &M, const PreservedAnalyses &PA,
-                  ModuleAnalysisManager::Invalidator &);
+                           ModuleAnalysisManager::Invalidator &);
 
   LLVM_ABI static GlobalsAAResult
   analyzeModule(Module &M,
@@ -94,12 +94,14 @@ class GlobalsAAResult : public AAResultBase {
   //------------------------------------------------
   // Implement the AliasAnalysis API
   //
-  LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
-                    AAQueryInfo &AAQI, const Instruction *CtxI);
+  LLVM_ABI AliasResult alias(const MemoryLocation &LocA,
+                             const MemoryLocation &LocB, AAQueryInfo &AAQI,
+                             const Instruction *CtxI);
 
   using AAResultBase::getModRefInfo;
-  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
-                           AAQueryInfo &AAQI);
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call,
+                                    const MemoryLocation &Loc,
+                                    AAQueryInfo &AAQI);
 
   using AAResultBase::getMemoryEffects;
   /// getMemoryEffects - Return the behavior of the specified function if
diff --git a/llvm/include/llvm/Analysis/HeatUtils.h b/llvm/include/llvm/Analysis/HeatUtils.h
index 38266fbf8c44b..179862c3df0ed 100644
--- a/llvm/include/llvm/Analysis/HeatUtils.h
+++ b/llvm/include/llvm/Analysis/HeatUtils.h
@@ -23,8 +23,8 @@ class BlockFrequencyInfo;
 class Function;
 
 // Returns number of calls of calledFunction by callerFunction.
-LLVM_ABI uint64_t
-getNumOfCalls(Function &callerFunction, Function &calledFunction);
+LLVM_ABI uint64_t getNumOfCalls(Function &callerFunction,
+                                Function &calledFunction);
 
 // Returns the maximum frequency of a BB in a function.
 LLVM_ABI uint64_t getMaxFreq(const Function &F, const BlockFrequencyInfo *BFI);
diff --git a/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h b/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h
index 19b29afb4f6a6..80f22cbc3618e 100644
--- a/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h
+++ b/llvm/include/llvm/Analysis/IRSimilarityIdentifier.h
@@ -49,12 +49,12 @@
 #ifndef LLVM_ANALYSIS_IRSIMILARITYIDENTIFIER_H
 #define LLVM_ANALYSIS_IRSIMILARITYIDENTIFIER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/IR/InstVisitor.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
 #include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
 #include <optional>
 
 namespace llvm {
@@ -169,7 +169,8 @@ struct IRInstructionData
   /// operands. This extra information allows for similarity matching to make
   /// assertions that allow for more flexibility when checking for whether an
   /// Instruction performs the same operation.
-  LLVM_ABI IRInstructionData(Instruction &I, bool Legality, IRInstructionDataList &IDL);
+  LLVM_ABI IRInstructionData(Instruction &I, bool Legality,
+                             IRInstructionDataList &IDL);
   LLVM_ABI IRInstructionData(IRInstructionDataList &IDL);
 
   /// Fills data stuctures for IRInstructionData when it is constructed from a
@@ -188,7 +189,7 @@ struct IRInstructionData
   /// in a greater than form. Otherwise, the predicate is unchanged.
   ///
   /// \param CI - The comparison operation to find a consistent preidcate for.
-  /// \return the consistent comparison predicate. 
+  /// \return the consistent comparison predicate.
   LLVM_ABI static CmpInst::Predicate predicateForConsistency(CmpInst *CI);
 
   /// For an IRInstructionData containing a branch, finds the
@@ -428,8 +429,9 @@ struct IRInstructionMapper {
   /// \param IDL - The InstructionDataList that the IRInstructionData is
   /// inserted into.
   /// \returns An allocated IRInstructionData struct.
-  LLVM_ABI IRInstructionData *allocateIRInstructionData(Instruction &I, bool Legality,
-                                               IRInstructionDataList &IDL);
+  LLVM_ABI IRInstructionData *
+  allocateIRInstructionData(Instruction &I, bool Legality,
+                            IRInstructionDataList &IDL);
 
   /// Get an empty allocated IRInstructionData struct using the
   /// InstDataAllocator.
@@ -437,7 +439,8 @@ struct IRInstructionMapper {
   /// \param IDL - The InstructionDataList that the IRInstructionData is
   /// inserted into.
   /// \returns An allocated IRInstructionData struct.
-  LLVM_ABI IRInstructionData *allocateIRInstructionData(IRInstructionDataList &IDL);
+  LLVM_ABI IRInstructionData *
+  allocateIRInstructionData(IRInstructionDataList &IDL);
 
   /// Get an allocated IRInstructionDataList object using the IDLAllocator.
   ///
@@ -471,9 +474,10 @@ struct IRInstructionMapper {
   /// \param [in] BB - The BasicBlock to be mapped to integers.
   /// \param [in,out] InstrList - Vector of IRInstructionData to append to.
   /// \param [in,out] IntegerMapping - Vector of unsigned integers to append to.
-  LLVM_ABI void convertToUnsignedVec(BasicBlock &BB,
-                            std::vector<IRInstructionData *> &InstrList,
-                            std::vector<unsigned> &IntegerMapping);
+  LLVM_ABI void
+  convertToUnsignedVec(BasicBlock &BB,
+                       std::vector<IRInstructionData *> &InstrList,
+                       std::vector<unsigned> &IntegerMapping);
 
   /// Maps an Instruction to a legal integer.
   ///
@@ -482,9 +486,10 @@ struct IRInstructionMapper {
   /// append to.
   /// \param [in,out] InstrListForBB - Vector of InstructionData to append to.
   /// \returns The integer \p It was mapped to.
-  LLVM_ABI unsigned mapToLegalUnsigned(BasicBlock::iterator &It,
-                              std::vector<unsigned> &IntegerMappingForBB,
-                              std::vector<IRInstructionData *> &InstrListForBB);
+  LLVM_ABI unsigned
+  mapToLegalUnsigned(BasicBlock::iterator &It,
+                     std::vector<unsigned> &IntegerMappingForBB,
+                     std::vector<IRInstructionData *> &InstrListForBB);
 
   /// Maps an Instruction to an illegal integer.
   ///
@@ -684,22 +689,22 @@ class IRSimilarityCandidate {
   /// \param FirstInstIt - The starting IRInstructionData of the region.
   /// \param LastInstIt - The ending IRInstructionData of the region.
   LLVM_ABI IRSimilarityCandidate(unsigned StartIdx, unsigned Len,
-                        IRInstructionData *FirstInstIt,
-                        IRInstructionData *LastInstIt);
+                                 IRInstructionData *FirstInstIt,
+                                 IRInstructionData *LastInstIt);
 
   /// \param A - The first IRInstructionCandidate to compare.
   /// \param B - The second IRInstructionCandidate to compare.
   /// \returns True when every IRInstructionData in \p A is similar to every
   /// IRInstructionData in \p B.
   LLVM_ABI static bool isSimilar(const IRSimilarityCandidate &A,
-                        const IRSimilarityCandidate &B);
+                                 const IRSimilarityCandidate &B);
 
   /// \param [in] A - The first IRInstructionCandidate to compare.
   /// \param [in] B - The second IRInstructionCandidate to compare.
   /// \returns True when every IRInstructionData in \p A is structurally similar
   /// to \p B.
   LLVM_ABI static bool compareStructure(const IRSimilarityCandidate &A,
-                               const IRSimilarityCandidate &B);
+                                        const IRSimilarityCandidate &B);
 
   /// \param [in] A - The first IRInstructionCandidate to compare.
   /// \param [in] B - The second IRInstructionCandidate to compare.
@@ -752,7 +757,7 @@ class IRSimilarityCandidate {
   /// operand mappings to compare.
   /// \returns true if the IRSimilarityCandidates operands are compatible.
   LLVM_ABI static bool compareNonCommutativeOperandMapping(OperandMapping A,
-                                                  OperandMapping B);
+                                                           OperandMapping B);
 
   /// Compare the operands in \p A and \p B and check that the current mapping
   /// of global value numbers from \p A to \p B and \p B to \A is consistent
@@ -764,7 +769,7 @@ class IRSimilarityCandidate {
   /// operand mappings to compare.
   /// \returns true if the IRSimilarityCandidates operands are compatible.
   LLVM_ABI static bool compareCommutativeOperandMapping(OperandMapping A,
-                                               OperandMapping B);
+                                                        OperandMapping B);
 
   /// Compare the GVN of the assignment value in corresponding instructions in
   /// IRSimilarityCandidates \p A and \p B and check that there exists a mapping
@@ -820,7 +825,7 @@ class IRSimilarityCandidate {
   /// and incoming block.
   /// \returns true if the relative locations match.
   LLVM_ABI static bool checkRelativeLocations(RelativeLocMapping A,
-                                     RelativeLocMapping B);
+                                              RelativeLocMapping B);
 
   /// Create a mapping from the value numbering to a different separate set of
   /// numbers. This will serve as a guide for relating one candidate to another.
@@ -829,7 +834,8 @@ class IRSimilarityCandidate {
   ///
   /// \param [in, out] CurrCand - The IRSimilarityCandidate to create a
   /// canonical numbering for.
-  LLVM_ABI static void createCanonicalMappingFor(IRSimilarityCandidate &CurrCand);
+  LLVM_ABI static void
+  createCanonicalMappingFor(IRSimilarityCandidate &CurrCand);
 
   /// Create a mapping for the value numbering of the calling
   /// IRSimilarityCandidate, to a different separate set of numbers, based on
@@ -848,7 +854,7 @@ class IRSimilarityCandidate {
       IRSimilarityCandidate &SourceCand,
       DenseMap<unsigned, DenseSet<unsigned>> &ToSourceMapping,
       DenseMap<unsigned, DenseSet<unsigned>> &FromSourceMapping);
-  
+
   /// Create a mapping for the value numbering of the calling
   /// IRSimilarityCandidate, to a different separate set of numbers, based on
   /// the canonical ordering in \p SourceCand. These are defined based on the
@@ -868,11 +874,10 @@ class IRSimilarityCandidate {
   /// \param FromSourceMapping - The mapping of value numbers from \p SoureCand
   /// to this candidate.
   LLVM_ABI void createCanonicalRelationFrom(
-      IRSimilarityCandidate &SourceCand,
-      DenseMap<unsigned, unsigned> &OneToOne,
+      IRSimilarityCandidate &SourceCand, DenseMap<unsigned, unsigned> &OneToOne,
       DenseMap<unsigned, DenseSet<unsigned>> &ToSourceMapping,
       DenseMap<unsigned, DenseSet<unsigned>> &FromSourceMapping);
-  
+
   /// Create a mapping for the value numbering of the calling
   /// IRSimilarityCandidate, to a different separate set of numbers, based on
   /// the canonical ordering in \p SourceCand. These are defined based on the
@@ -887,10 +892,10 @@ class IRSimilarityCandidate {
   /// \p SourceCand.
   /// \param TargetCandLarge -  The IRSimilarityCandidate fully containing
   /// this Candidate.
-  LLVM_ABI void createCanonicalRelationFrom(
-      IRSimilarityCandidate &SourceCand,
-      IRSimilarityCandidate &SourceCandLarge,
-      IRSimilarityCandidate &TargetCandLarge);
+  LLVM_ABI void
+  createCanonicalRelationFrom(IRSimilarityCandidate &SourceCand,
+                              IRSimilarityCandidate &SourceCandLarge,
+                              IRSimilarityCandidate &TargetCandLarge);
 
   /// \param [in,out] BBSet - The set to track the basic blocks.
   void getBasicBlocks(DenseSet<BasicBlock *> &BBSet) const {
@@ -920,7 +925,7 @@ class IRSimilarityCandidate {
   /// \returns true if the IRSimilarityCandidates do not have overlapping
   /// instructions.
   LLVM_ABI static bool overlap(const IRSimilarityCandidate &A,
-                      const IRSimilarityCandidate &B);
+                               const IRSimilarityCandidate &B);
 
   /// \returns the number of instructions in this Candidate.
   unsigned getLength() const { return Len; }
diff --git a/llvm/include/llvm/Analysis/IVDescriptors.h b/llvm/include/llvm/Analysis/IVDescriptors.h
index fe4003aa054a5..199b35ed6c310 100644
--- a/llvm/include/llvm/Analysis/IVDescriptors.h
+++ b/llvm/include/llvm/Analysis/IVDescriptors.h
@@ -13,11 +13,11 @@
 #ifndef LLVM_ANALYSIS_IVDESCRIPTORS_H
 #define LLVM_ANALYSIS_IVDESCRIPTORS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -132,17 +132,18 @@ class RecurrenceDescriptor {
   /// advances the instruction pointer 'I' from the compare instruction to the
   /// select instruction and stores this pointer in 'PatternLastInst' member of
   /// the returned struct.
-  LLVM_ABI static InstDesc isRecurrenceInstr(Loop *L, PHINode *Phi, Instruction *I,
-                                    RecurKind Kind, InstDesc &Prev,
-                                    FastMathFlags FuncFMF, ScalarEvolution *SE);
+  LLVM_ABI static InstDesc
+  isRecurrenceInstr(Loop *L, PHINode *Phi, Instruction *I, RecurKind Kind,
+                    InstDesc &Prev, FastMathFlags FuncFMF, ScalarEvolution *SE);
 
   /// Returns true if instruction I has multiple uses in Insts
   LLVM_ABI static bool hasMultipleUsesOf(Instruction *I,
-                                SmallPtrSetImpl<Instruction *> &Insts,
-                                unsigned MaxNumUses);
+                                         SmallPtrSetImpl<Instruction *> &Insts,
+                                         unsigned MaxNumUses);
 
   /// Returns true if all uses of the instruction I is within the Set.
-  LLVM_ABI static bool areAllUsesIn(Instruction *I, SmallPtrSetImpl<Instruction *> &Set);
+  LLVM_ABI static bool areAllUsesIn(Instruction *I,
+                                    SmallPtrSetImpl<Instruction *> &Set);
 
   /// Returns a struct describing if the instruction is a llvm.(s/u)(min/max),
   /// llvm.minnum/maxnum or a Select(ICmp(X, Y), X, Y) pair of instructions
@@ -150,7 +151,7 @@ class RecurrenceDescriptor {
   /// Kind. \p Prev specifies the description of an already processed select
   /// instruction, so its corresponding cmp can be matched to it.
   LLVM_ABI static InstDesc isMinMaxPattern(Instruction *I, RecurKind Kind,
-                                  const InstDesc &Prev);
+                                           const InstDesc &Prev);
 
   /// Returns a struct describing whether the instruction is either a
   ///   Select(ICmp(A, B), X, Y), or
@@ -158,8 +159,8 @@ class RecurrenceDescriptor {
   /// where one of (X, Y) is a loop invariant integer and the other is a PHI
   /// value. \p Prev specifies the description of an already processed select
   /// instruction, so its corresponding cmp can be matched to it.
-  LLVM_ABI static InstDesc isAnyOfPattern(Loop *Loop, PHINode *OrigPhi, Instruction *I,
-                                 InstDesc &Prev);
+  LLVM_ABI static InstDesc isAnyOfPattern(Loop *Loop, PHINode *OrigPhi,
+                                          Instruction *I, InstDesc &Prev);
 
   /// Returns a struct describing whether the instruction is either a
   ///   Select(ICmp(A, B), X, Y), or
@@ -169,11 +170,13 @@ class RecurrenceDescriptor {
   // TODO: Support non-monotonic variable. FindLast does not need be restricted
   // to increasing loop induction variables.
   LLVM_ABI static InstDesc isFindLastIVPattern(Loop *TheLoop, PHINode *OrigPhi,
-                                      Instruction *I, ScalarEvolution &SE);
+                                               Instruction *I,
+                                               ScalarEvolution &SE);
 
   /// Returns a struct describing if the instruction is a
   /// Select(FCmp(X, Y), (Z = X op PHINode), PHINode) instruction pattern.
-  LLVM_ABI static InstDesc isConditionalRdxPattern(RecurKind Kind, Instruction *I);
+  LLVM_ABI static InstDesc isConditionalRdxPattern(RecurKind Kind,
+                                                   Instruction *I);
 
   /// Returns the opcode corresponding to the RecurrenceKind.
   LLVM_ABI static unsigned getOpcode(RecurKind Kind);
@@ -208,7 +211,7 @@ class RecurrenceDescriptor {
   /// uses of the recurrence can be re-ordered if necessary and users need to
   /// check and perform the re-ordering.
   LLVM_ABI static bool isFixedOrderRecurrence(PHINode *Phi, Loop *TheLoop,
-                                     DominatorTree *DT);
+                                              DominatorTree *DT);
 
   RecurKind getRecurrenceKind() const { return Kind; }
 
@@ -294,7 +297,7 @@ class RecurrenceDescriptor {
   /// Attempts to find a chain of operations from Phi to LoopExitInst that can
   /// be treated as a set of reductions instructions for in-loop reductions.
   LLVM_ABI SmallVector<Instruction *, 4> getReductionOpChain(PHINode *Phi,
-                                                    Loop *L) const;
+                                                             Loop *L) const;
 
   /// Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
   static bool isFMulAddIntrinsic(Instruction *I) {
@@ -372,8 +375,9 @@ class InductionDescriptor {
   /// Returns true if \p Phi is a floating point induction in the loop \p L.
   /// If \p Phi is an induction, the induction descriptor \p D will contain
   /// the data describing this induction.
-  LLVM_ABI static bool isFPInductionPHI(PHINode *Phi, const Loop *L, ScalarEvolution *SE,
-                               InductionDescriptor &D);
+  LLVM_ABI static bool isFPInductionPHI(PHINode *Phi, const Loop *L,
+                                        ScalarEvolution *SE,
+                                        InductionDescriptor &D);
 
   /// Returns true if \p Phi is a loop \p L induction, in the context associated
   /// with the run-time predicate of PSE. If \p Assume is true, this can add
@@ -382,8 +386,9 @@ class InductionDescriptor {
   /// If \p Phi is an induction, \p D will contain the data describing this
   /// induction.
   LLVM_ABI static bool isInductionPHI(PHINode *Phi, const Loop *L,
-                             PredicatedScalarEvolution &PSE,
-                             InductionDescriptor &D, bool Assume = false);
+                                      PredicatedScalarEvolution &PSE,
+                                      InductionDescriptor &D,
+                                      bool Assume = false);
 
   /// Returns floating-point induction operator that does not allow
   /// reassociation (transforming the induction requires an override of normal
diff --git a/llvm/include/llvm/Analysis/InlineAdvisor.h b/llvm/include/llvm/Analysis/InlineAdvisor.h
index d635e341d26d2..9d15136e81d10 100644
--- a/llvm/include/llvm/Analysis/InlineAdvisor.h
+++ b/llvm/include/llvm/Analysis/InlineAdvisor.h
@@ -9,11 +9,11 @@
 #ifndef LLVM_ANALYSIS_INLINEADVISOR_H
 #define LLVM_ANALYSIS_INLINEADVISOR_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/CGSCCPassManager.h"
 #include "llvm/Analysis/InlineCost.h"
 #include "llvm/Analysis/LazyCallGraph.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
 #include <memory>
 
 namespace llvm {
@@ -75,7 +75,8 @@ class InlineAdvisor;
 class InlineAdvice {
 public:
   LLVM_ABI InlineAdvice(InlineAdvisor *Advisor, CallBase &CB,
-               OptimizationRemarkEmitter &ORE, bool IsInliningRecommended);
+                        OptimizationRemarkEmitter &ORE,
+                        bool IsInliningRecommended);
 
   InlineAdvice(InlineAdvice &&) = delete;
   InlineAdvice(const InlineAdvice &) = delete;
@@ -319,8 +320,8 @@ class InlineAdvisorAnalysis : public AnalysisInfoMixin<InlineAdvisorAnalysis> {
       return !PAC.preservedWhenStateless();
     }
     LLVM_ABI bool tryCreate(InlineParams Params, InliningAdvisorMode Mode,
-                   const ReplayInlinerSettings &ReplaySettings,
-                   InlineContext IC);
+                            const ReplayInlinerSettings &ReplaySettings,
+                            InlineContext IC);
     InlineAdvisor *getAdvisor() const { return Advisor.get(); }
 
   private:
@@ -342,8 +343,9 @@ class InlineAdvisorAnalysisPrinterPass
 
   LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
 
-  LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &InitialC, CGSCCAnalysisManager &AM,
-                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
+  LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &InitialC,
+                                 CGSCCAnalysisManager &AM, LazyCallGraph &CG,
+                                 CGSCCUpdateResult &UR);
   static bool isRequired() { return true; }
 };
 
@@ -368,18 +370,18 @@ shouldInline(CallBase &CB, TargetTransformInfo &CalleeTTI,
              OptimizationRemarkEmitter &ORE, bool EnableDeferral = true);
 
 /// Emit ORE message.
-LLVM_ABI void emitInlinedInto(OptimizationRemarkEmitter &ORE, DebugLoc DLoc,
-                     const BasicBlock *Block, const Function &Callee,
-                     const Function &Caller, bool IsMandatory,
-                     function_ref<void(OptimizationRemark &)> ExtraContext = {},
-                     const char *PassName = nullptr);
+LLVM_ABI void
+emitInlinedInto(OptimizationRemarkEmitter &ORE, DebugLoc DLoc,
+                const BasicBlock *Block, const Function &Callee,
+                const Function &Caller, bool IsMandatory,
+                function_ref<void(OptimizationRemark &)> ExtraContext = {},
+                const char *PassName = nullptr);
 
 /// Emit ORE message based in cost (default heuristic).
-LLVM_ABI void emitInlinedIntoBasedOnCost(OptimizationRemarkEmitter &ORE, DebugLoc DLoc,
-                                const BasicBlock *Block, const Function &Callee,
-                                const Function &Caller, const InlineCost &IC,
-                                bool ForProfileContext = false,
-                                const char *PassName = nullptr);
+LLVM_ABI void emitInlinedIntoBasedOnCost(
+    OptimizationRemarkEmitter &ORE, DebugLoc DLoc, const BasicBlock *Block,
+    const Function &Callee, const Function &Caller, const InlineCost &IC,
+    bool ForProfileContext = false, const char *PassName = nullptr);
 
 /// Add location info to ORE message.
 LLVM_ABI void addLocationToRemarks(OptimizationRemark &Remark, DebugLoc DLoc);
diff --git a/llvm/include/llvm/Analysis/InlineCost.h b/llvm/include/llvm/Analysis/InlineCost.h
index 1dc00db103cf1..93b0a8d3cef04 100644
--- a/llvm/include/llvm/Analysis/InlineCost.h
+++ b/llvm/include/llvm/Analysis/InlineCost.h
@@ -13,11 +13,11 @@
 #ifndef LLVM_ANALYSIS_INLINECOST_H
 #define LLVM_ANALYSIS_INLINECOST_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/STLFunctionalExtras.h"
 #include "llvm/Analysis/InlineModelFeatureMaps.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
 #include <cassert>
 #include <climits>
 #include <optional>
@@ -240,7 +240,8 @@ struct InlineParams {
   std::optional<bool> AllowRecursiveCall = false;
 };
 
-LLVM_ABI std::optional<int> getStringFnAttrAsInt(CallBase &CB, StringRef AttrKind);
+LLVM_ABI std::optional<int> getStringFnAttrAsInt(CallBase &CB,
+                                                 StringRef AttrKind);
 
 /// Generate the parameters to tune the inline cost analysis based only on the
 /// commandline options.
@@ -261,8 +262,8 @@ LLVM_ABI InlineParams getInlineParams(unsigned OptLevel, unsigned SizeOptLevel);
 
 /// Return the cost associated with a callsite, including parameter passing
 /// and the call/return instruction.
-LLVM_ABI int getCallsiteCost(const TargetTransformInfo &TTI, const CallBase &Call,
-                    const DataLayout &DL);
+LLVM_ABI int getCallsiteCost(const TargetTransformInfo &TTI,
+                             const CallBase &Call, const DataLayout &DL);
 
 /// Get an InlineCost object representing the cost of inlining this
 /// callsite.
diff --git a/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h b/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h
index 5e36946fd8db1..961d5091bf9f3 100644
--- a/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h
+++ b/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h
@@ -10,8 +10,8 @@
 #ifndef LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H
 #define LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/TensorSpec.h"
+#include "llvm/Support/Compiler.h"
 
 #include <array>
 #include <vector>
diff --git a/llvm/include/llvm/Analysis/InlineOrder.h b/llvm/include/llvm/Analysis/InlineOrder.h
index 7f2bfe8b32ac2..bc96d546fda7a 100644
--- a/llvm/include/llvm/Analysis/InlineOrder.h
+++ b/llvm/include/llvm/Analysis/InlineOrder.h
@@ -9,8 +9,8 @@
 #ifndef LLVM_ANALYSIS_INLINEORDER_H
 #define LLVM_ANALYSIS_INLINEORDER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/InlineCost.h"
+#include "llvm/Support/Compiler.h"
 #include <utility>
 
 namespace llvm {
diff --git a/llvm/include/llvm/Analysis/InstSimplifyFolder.h b/llvm/include/llvm/Analysis/InstSimplifyFolder.h
index 5df1eb47fa632..d60c5167a06a2 100644
--- a/llvm/include/llvm/Analysis/InstSimplifyFolder.h
+++ b/llvm/include/llvm/Analysis/InstSimplifyFolder.h
@@ -19,13 +19,13 @@
 #ifndef LLVM_ANALYSIS_INSTSIMPLIFYFOLDER_H
 #define LLVM_ANALYSIS_INSTSIMPLIFYFOLDER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/Analysis/InstructionSimplify.h"
 #include "llvm/Analysis/TargetFolder.h"
 #include "llvm/IR/CmpPredicate.h"
 #include "llvm/IR/IRBuilderFolder.h"
 #include "llvm/IR/Instruction.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 class Constant;
diff --git a/llvm/include/llvm/Analysis/InstructionPrecedenceTracking.h b/llvm/include/llvm/Analysis/InstructionPrecedenceTracking.h
index bafffaaa73dc5..16ac8d1f185f2 100644
--- a/llvm/include/llvm/Analysis/InstructionPrecedenceTracking.h
+++ b/llvm/include/llvm/Analysis/InstructionPrecedenceTracking.h
@@ -20,8 +20,8 @@
 #ifndef LLVM_ANALYSIS_INSTRUCTIONPRECEDENCETRACKING_H
 #define LLVM_ANALYSIS_INSTRUCTIONPRECEDENCETRACKING_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -72,7 +72,8 @@ class InstructionPrecedenceTracking {
   /// Notifies this tracking that we are going to insert a new instruction \p
   /// Inst to the basic block \p BB. It makes all necessary updates to internal
   /// caches to keep them consistent.
-  LLVM_ABI void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB);
+  LLVM_ABI void insertInstructionTo(const Instruction *Inst,
+                                    const BasicBlock *BB);
 
   /// Notifies this tracking that we are going to remove the instruction \p Inst
   /// It makes all necessary updates to internal caches to keep them consistent.
@@ -94,7 +95,8 @@ class InstructionPrecedenceTracking {
 /// is reached, then we need to make sure that there is no implicit control flow
 /// instruction (ICFI) preceding it. For example, this check is required if we
 /// perform PRE moving non-speculable instruction to other place.
-class LLVM_ABI ImplicitControlFlowTracking : public InstructionPrecedenceTracking {
+class LLVM_ABI ImplicitControlFlowTracking
+    : public InstructionPrecedenceTracking {
 public:
   /// Returns the topmost instruction with implicit control flow from the given
   /// basic block. Returns nullptr if there is no such instructions in the block.
diff --git a/llvm/include/llvm/Analysis/InstructionSimplify.h b/llvm/include/llvm/Analysis/InstructionSimplify.h
index 2cae0bb2a75f0..4d3e9aad6686c 100644
--- a/llvm/include/llvm/Analysis/InstructionSimplify.h
+++ b/llvm/include/llvm/Analysis/InstructionSimplify.h
@@ -31,9 +31,9 @@
 #ifndef LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
 #define LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/SimplifyQuery.h"
 #include "llvm/IR/FPEnv.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -60,33 +60,35 @@ class Value;
 
 /// Given operands for an Add, fold the result or return null.
 LLVM_ABI Value *simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW,
-                       const SimplifyQuery &Q);
+                                const SimplifyQuery &Q);
 
 /// Given operands for a Sub, fold the result or return null.
 LLVM_ABI Value *simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW,
-                       const SimplifyQuery &Q);
+                                const SimplifyQuery &Q);
 
 /// Given operands for a Mul, fold the result or return null.
 LLVM_ABI Value *simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW,
-                       const SimplifyQuery &Q);
+                                const SimplifyQuery &Q);
 
 /// Given operands for an SDiv, fold the result or return null.
 LLVM_ABI Value *simplifySDivInst(Value *LHS, Value *RHS, bool IsExact,
-                        const SimplifyQuery &Q);
+                                 const SimplifyQuery &Q);
 
 /// Given operands for a UDiv, fold the result or return null.
 LLVM_ABI Value *simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact,
-                        const SimplifyQuery &Q);
+                                 const SimplifyQuery &Q);
 
 /// Given operands for an SRem, fold the result or return null.
-LLVM_ABI Value *simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifySRemInst(Value *LHS, Value *RHS,
+                                 const SimplifyQuery &Q);
 
 /// Given operands for a URem, fold the result or return null.
-LLVM_ABI Value *simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyURemInst(Value *LHS, Value *RHS,
+                                 const SimplifyQuery &Q);
 
 /// Given operand for an FNeg, fold the result or return null.
-LLVM_ABI Value *simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q);
-
+LLVM_ABI Value *simplifyFNegInst(Value *Op, FastMathFlags FMF,
+                                 const SimplifyQuery &Q);
 
 /// Given operands for an FAdd, fold the result or return null.
 LLVM_ABI Value *
@@ -113,10 +115,11 @@ simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
 /// null. In contrast to simplifyFMulInst, this function will not perform
 /// simplifications whose unrounded results differ when rounded to the argument
 /// type.
-LLVM_ABI Value *simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF,
-                       const SimplifyQuery &Q,
-                       fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
-                       RoundingMode Rounding = RoundingMode::NearestTiesToEven);
+LLVM_ABI Value *
+simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF,
+                const SimplifyQuery &Q,
+                fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
+                RoundingMode Rounding = RoundingMode::NearestTiesToEven);
 
 /// Given operands for an FDiv, fold the result or return null.
 LLVM_ABI Value *
@@ -134,15 +137,15 @@ simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
 
 /// Given operands for a Shl, fold the result or return null.
 LLVM_ABI Value *simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
-                       const SimplifyQuery &Q);
+                                const SimplifyQuery &Q);
 
 /// Given operands for a LShr, fold the result or return null.
 LLVM_ABI Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
-                        const SimplifyQuery &Q);
+                                 const SimplifyQuery &Q);
 
 /// Given operands for a AShr, fold the result or return nulll.
 LLVM_ABI Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
-                        const SimplifyQuery &Q);
+                                 const SimplifyQuery &Q);
 
 /// Given operands for an And, fold the result or return null.
 LLVM_ABI Value *simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
@@ -155,76 +158,81 @@ LLVM_ABI Value *simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
 
 /// Given operands for an ICmpInst, fold the result or return null.
 LLVM_ABI Value *simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS,
-                        const SimplifyQuery &Q);
+                                 const SimplifyQuery &Q);
 
 /// Given operands for an FCmpInst, fold the result or return null.
 LLVM_ABI Value *simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS,
-                        FastMathFlags FMF, const SimplifyQuery &Q);
+                                 FastMathFlags FMF, const SimplifyQuery &Q);
 
 /// Given operands for a SelectInst, fold the result or return null.
 LLVM_ABI Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
-                          const SimplifyQuery &Q);
+                                   const SimplifyQuery &Q);
 
 /// Given operands for a GetElementPtrInst, fold the result or return null.
-LLVM_ABI Value *simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
-                       GEPNoWrapFlags NW, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyGEPInst(Type *SrcTy, Value *Ptr,
+                                ArrayRef<Value *> Indices, GEPNoWrapFlags NW,
+                                const SimplifyQuery &Q);
 
 /// Given operands for an InsertValueInst, fold the result or return null.
-LLVM_ABI Value *simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
-                               const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyInsertValueInst(Value *Agg, Value *Val,
+                                        ArrayRef<unsigned> Idxs,
+                                        const SimplifyQuery &Q);
 
 /// Given operands for an InsertElement, fold the result or return null.
 LLVM_ABI Value *simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx,
-                                 const SimplifyQuery &Q);
+                                          const SimplifyQuery &Q);
 
 /// Given operands for an ExtractValueInst, fold the result or return null.
 LLVM_ABI Value *simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
-                                const SimplifyQuery &Q);
+                                         const SimplifyQuery &Q);
 
 /// Given operands for an ExtractElementInst, fold the result or return null.
 LLVM_ABI Value *simplifyExtractElementInst(Value *Vec, Value *Idx,
-                                  const SimplifyQuery &Q);
+                                           const SimplifyQuery &Q);
 
 /// Given operands for a CastInst, fold the result or return null.
 LLVM_ABI Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
-                        const SimplifyQuery &Q);
+                                 const SimplifyQuery &Q);
 
 /// Given operands for a BinaryIntrinsic, fold the result or return null.
-LLVM_ABI Value *simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, Value *Op0,
-                               Value *Op1, const SimplifyQuery &Q,
-                               const CallBase *Call);
+LLVM_ABI Value *simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType,
+                                        Value *Op0, Value *Op1,
+                                        const SimplifyQuery &Q,
+                                        const CallBase *Call);
 
 /// Given operands for a ShuffleVectorInst, fold the result or return null.
 /// See class ShuffleVectorInst for a description of the mask representation.
-LLVM_ABI Value *simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef<int> Mask,
-                                 Type *RetTy, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyShuffleVectorInst(Value *Op0, Value *Op1,
+                                          ArrayRef<int> Mask, Type *RetTy,
+                                          const SimplifyQuery &Q);
 
 //=== Helper functions for higher up the class hierarchy.
 
 /// Given operands for a CmpInst, fold the result or return null.
 LLVM_ABI Value *simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS,
-                       const SimplifyQuery &Q);
+                                const SimplifyQuery &Q);
 
 /// Given operand for a UnaryOperator, fold the result or return null.
-LLVM_ABI Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyUnOp(unsigned Opcode, Value *Op,
+                             const SimplifyQuery &Q);
 
 /// Given operand for a UnaryOperator, fold the result or return null.
 /// Try to use FastMathFlags when folding the result.
 LLVM_ABI Value *simplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
-                    const SimplifyQuery &Q);
+                             const SimplifyQuery &Q);
 
 /// Given operands for a BinaryOperator, fold the result or return null.
 LLVM_ABI Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
-                     const SimplifyQuery &Q);
+                              const SimplifyQuery &Q);
 
 /// Given operands for a BinaryOperator, fold the result or return null.
 /// Try to use FastMathFlags when folding the result.
-LLVM_ABI Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, FastMathFlags FMF,
-                     const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+                              FastMathFlags FMF, const SimplifyQuery &Q);
 
 /// Given a callsite, callee, and arguments, fold the result or return null.
-LLVM_ABI Value *simplifyCall(CallBase *Call, Value *Callee, ArrayRef<Value *> Args,
-                    const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyCall(CallBase *Call, Value *Callee,
+                             ArrayRef<Value *> Args, const SimplifyQuery &Q);
 
 /// Given a constrained FP intrinsic call, tries to compute its simplified
 /// version. Returns a simplified result or null.
@@ -233,7 +241,8 @@ LLVM_ABI Value *simplifyCall(CallBase *Call, Value *Callee, ArrayRef<Value *> Ar
 /// simplification succeeds that the intrinsic is side effect free. As a result,
 /// successful simplification can be used to delete the intrinsic not just
 /// replace its result.
-LLVM_ABI Value *simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyConstrainedFPCall(CallBase *Call,
+                                          const SimplifyQuery &Q);
 
 /// Given an operand for a Freeze, see if we can fold the result.
 /// If not, this returns null.
@@ -241,7 +250,8 @@ LLVM_ABI Value *simplifyFreezeInst(Value *Op, const SimplifyQuery &Q);
 
 /// Given a load instruction and its pointer operand, fold the result or return
 /// null.
-LLVM_ABI Value *simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyLoadInst(LoadInst *LI, Value *PtrOp,
+                                 const SimplifyQuery &Q);
 
 /// See if we can compute a simplified version of this instruction. If not,
 /// return null.
@@ -249,9 +259,9 @@ LLVM_ABI Value *simplifyInstruction(Instruction *I, const SimplifyQuery &Q);
 
 /// Like \p simplifyInstruction but the operands of \p I are replaced with
 /// \p NewOps. Returns a simplified value, or null if none was found.
-LLVM_ABI Value *
-simplifyInstructionWithOperands(Instruction *I, ArrayRef<Value *> NewOps,
-                                const SimplifyQuery &Q);
+LLVM_ABI Value *simplifyInstructionWithOperands(Instruction *I,
+                                                ArrayRef<Value *> NewOps,
+                                                const SimplifyQuery &Q);
 
 /// See if V simplifies when its operand Op is replaced with RepOp. If not,
 /// return null.
@@ -289,7 +299,7 @@ template <class T, class... TArgs>
 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &,
                                          Function &);
 LLVM_ABI const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &,
-                                         const DataLayout &);
+                                                  const DataLayout &);
 } // end namespace llvm
 
 #endif
diff --git a/llvm/include/llvm/Analysis/InteractiveModelRunner.h b/llvm/include/llvm/Analysis/InteractiveModelRunner.h
index e431f653612eb..66473ae64ad23 100644
--- a/llvm/include/llvm/Analysis/InteractiveModelRunner.h
+++ b/llvm/include/llvm/Analysis/InteractiveModelRunner.h
@@ -10,10 +10,10 @@
 #ifndef LLVM_ANALYSIS_INTERACTIVEMODELRUNNER_H
 #define LLVM_ANALYSIS_INTERACTIVEMODELRUNNER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/MLModelRunner.h"
 #include "llvm/Analysis/TensorSpec.h"
 #include "llvm/Analysis/Utils/TrainingLogger.h"
+#include "llvm/Support/Compiler.h"
 #include <system_error>
 
 namespace llvm {
diff --git a/llvm/include/llvm/Analysis/LastRunTrackingAnalysis.h b/llvm/include/llvm/Analysis/LastRunTrackingAnalysis.h
index bb4b260382317..31fb1856e0f28 100644
--- a/llvm/include/llvm/Analysis/LastRunTrackingAnalysis.h
+++ b/llvm/include/llvm/Analysis/LastRunTrackingAnalysis.h
@@ -30,9 +30,9 @@
 #ifndef LLVM_ANALYSIS_LASTRUNTRACKINGANALYSIS_H
 #define LLVM_ANALYSIS_LASTRUNTRACKINGANALYSIS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
 #include <functional>
 
 namespace llvm {
@@ -82,7 +82,8 @@ class LastRunTrackingInfo {
 
 private:
   LLVM_ABI bool shouldSkipImpl(PassID ID, OptionPtr Ptr) const;
-  LLVM_ABI void updateImpl(PassID ID, bool Changed, CompatibilityCheckFn CheckFn);
+  LLVM_ABI void updateImpl(PassID ID, bool Changed,
+                           CompatibilityCheckFn CheckFn);
 
   DenseMap<PassID, CompatibilityCheckFn> TrackedPasses;
 };
diff --git a/llvm/include/llvm/Analysis/LazyCallGraph.h b/llvm/include/llvm/Analysis/LazyCallGraph.h
index fa29d42064f91..80ee72b72836d 100644
--- a/llvm/include/llvm/Analysis/LazyCallGraph.h
+++ b/llvm/include/llvm/Analysis/LazyCallGraph.h
@@ -34,7 +34,6 @@
 #ifndef LLVM_ANALYSIS_LAZYCALLGRAPH_H
 #define LLVM_ANALYSIS_LAZYCALLGRAPH_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/Any.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/DenseMap.h"
@@ -47,6 +46,7 @@
 #include "llvm/Analysis/TargetLibraryInfo.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/raw_ostream.h"
 #include <cassert>
 #include <iterator>
@@ -715,7 +715,7 @@ class LazyCallGraph {
     /// Note that if SourceN and TargetN are in separate SCCs, the simpler
     /// routine `switchTrivialInternalEdgeToRef` should be used instead.
     LLVM_ABI iterator_range<iterator> switchInternalEdgeToRef(Node &SourceN,
-                                                     Node &TargetN);
+                                                              Node &TargetN);
 
     /// Make an existing outgoing ref edge into a call edge.
     ///
@@ -749,7 +749,8 @@ class LazyCallGraph {
     /// There must be an existing path from the \p SourceN to the \p TargetN.
     /// This operation is inexpensive and does not change the set of SCCs and
     /// RefSCCs in the graph.
-    LLVM_ABI void insertOutgoingEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);
+    LLVM_ABI void insertOutgoingEdge(Node &SourceN, Node &TargetN,
+                                     Edge::Kind EK);
 
     /// Insert an edge whose source is in a descendant RefSCC and target is in
     /// this RefSCC.
@@ -777,7 +778,7 @@ class LazyCallGraph {
     /// caller and callee are very nearby in the graph. See comments in the
     /// implementation for details, but that use case might impact users.
     LLVM_ABI SmallVector<RefSCC *, 1> insertIncomingRefEdge(Node &SourceN,
-                                                   Node &TargetN);
+                                                            Node &TargetN);
 
     /// Remove an edge whose source is in this RefSCC and target is *not*.
     ///
@@ -936,7 +937,7 @@ class LazyCallGraph {
   /// No function definitions are scanned until their nodes in the graph are
   /// requested during traversal.
   LLVM_ABI LazyCallGraph(Module &M,
-                function_ref<TargetLibraryInfo &(Function &)> GetTLI);
+                         function_ref<TargetLibraryInfo &(Function &)> GetTLI);
 
   LLVM_ABI LazyCallGraph(LazyCallGraph &&G);
   LLVM_ABI LazyCallGraph &operator=(LazyCallGraph &&RHS);
@@ -947,7 +948,7 @@ class LazyCallGraph {
 #endif
 
   LLVM_ABI bool invalidate(Module &, const PreservedAnalyses &PA,
-                  ModuleAnalysisManager::Invalidator &);
+                           ModuleAnalysisManager::Invalidator &);
 
   EdgeSequence::iterator begin() { return EntryEdges.begin(); }
   EdgeSequence::iterator end() { return EntryEdges.end(); }
@@ -1078,7 +1079,8 @@ class LazyCallGraph {
   /// The new function may also reference the original function.
   /// It may end up in a parent SCC in the case that the original function's
   /// edge to the new function is a ref edge, and the edge back is a call edge.
-  LLVM_ABI void addSplitFunction(Function &OriginalFunction, Function &NewFunction);
+  LLVM_ABI void addSplitFunction(Function &OriginalFunction,
+                                 Function &NewFunction);
 
   /// Add new ref-recursive functions split/outlined from an existing function.
   ///
@@ -1088,8 +1090,9 @@ class LazyCallGraph {
   ///
   /// The original function must reference (not call) all new functions.
   /// All new functions must reference (not call) each other.
-  LLVM_ABI void addSplitRefRecursiveFunctions(Function &OriginalFunction,
-                                     ArrayRef<Function *> NewFunctions);
+  LLVM_ABI void
+  addSplitRefRecursiveFunctions(Function &OriginalFunction,
+                                ArrayRef<Function *> NewFunctions);
 
   ///@}
 
@@ -1108,8 +1111,8 @@ class LazyCallGraph {
   ///
   /// For each defined function, calls \p Callback with that function.
   LLVM_ABI static void visitReferences(SmallVectorImpl<Constant *> &Worklist,
-                              SmallPtrSetImpl<Constant *> &Visited,
-                              function_ref<void(Function &)> Callback);
+                                       SmallPtrSetImpl<Constant *> &Visited,
+                                       function_ref<void(Function &)> Callback);
 
   ///@}
 
diff --git a/llvm/include/llvm/Analysis/Loads.h b/llvm/include/llvm/Analysis/Loads.h
index c44cc9810881d..84564563de8e3 100644
--- a/llvm/include/llvm/Analysis/Loads.h
+++ b/llvm/include/llvm/Analysis/Loads.h
@@ -13,9 +13,9 @@
 #ifndef LLVM_ANALYSIS_LOADS_H
 #define LLVM_ANALYSIS_LOADS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/IR/BasicBlock.h"
 #include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -35,33 +35,30 @@ class TargetLibraryInfo;
 /// Return true if this is always a dereferenceable pointer. If the context
 /// instruction is specified perform context-sensitive analysis and return true
 /// if the pointer is dereferenceable at the specified instruction.
-LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL,
-                              const Instruction *CtxI = nullptr,
-                              AssumptionCache *AC = nullptr,
-                              const DominatorTree *DT = nullptr,
-                              const TargetLibraryInfo *TLI = nullptr);
+LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty,
+                                       const DataLayout &DL,
+                                       const Instruction *CtxI = nullptr,
+                                       AssumptionCache *AC = nullptr,
+                                       const DominatorTree *DT = nullptr,
+                                       const TargetLibraryInfo *TLI = nullptr);
 
 /// Returns true if V is always a dereferenceable pointer with alignment
 /// greater or equal than requested. If the context instruction is specified
 /// performs context-sensitive analysis and returns true if the pointer is
 /// dereferenceable at the specified instruction.
-LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
-                                        Align Alignment, const DataLayout &DL,
-                                        const Instruction *CtxI = nullptr,
-                                        AssumptionCache *AC = nullptr,
-                                        const DominatorTree *DT = nullptr,
-                                        const TargetLibraryInfo *TLI = nullptr);
+LLVM_ABI bool isDereferenceableAndAlignedPointer(
+    const Value *V, Type *Ty, Align Alignment, const DataLayout &DL,
+    const Instruction *CtxI = nullptr, AssumptionCache *AC = nullptr,
+    const DominatorTree *DT = nullptr, const TargetLibraryInfo *TLI = nullptr);
 
 /// Returns true if V is always dereferenceable for Size byte with alignment
 /// greater or equal than requested. If the context instruction is specified
 /// performs context-sensitive analysis and returns true if the pointer is
 /// dereferenceable at the specified instruction.
-LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment,
-                                        const APInt &Size, const DataLayout &DL,
-                                        const Instruction *CtxI = nullptr,
-                                        AssumptionCache *AC = nullptr,
-                                        const DominatorTree *DT = nullptr,
-                                        const TargetLibraryInfo *TLI = nullptr);
+LLVM_ABI bool isDereferenceableAndAlignedPointer(
+    const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
+    const Instruction *CtxI = nullptr, AssumptionCache *AC = nullptr,
+    const DominatorTree *DT = nullptr, const TargetLibraryInfo *TLI = nullptr);
 
 /// Return true if we know that executing a load from this value cannot trap.
 ///
@@ -71,11 +68,10 @@ LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment
 /// If it is not obviously safe to load from the specified pointer, we do a
 /// quick local scan of the basic block containing ScanFrom, to determine if
 /// the address is already accessed.
-LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size,
-                                 const DataLayout &DL, Instruction *ScanFrom,
-                                 AssumptionCache *AC = nullptr,
-                                 const DominatorTree *DT = nullptr,
-                                 const TargetLibraryInfo *TLI = nullptr);
+LLVM_ABI bool isSafeToLoadUnconditionally(
+    Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
+    Instruction *ScanFrom, AssumptionCache *AC = nullptr,
+    const DominatorTree *DT = nullptr, const TargetLibraryInfo *TLI = nullptr);
 
 /// Return true if we can prove that the given load (which is assumed to be
 /// within the specified loop) would access only dereferenceable memory, and
@@ -103,11 +99,10 @@ LLVM_ABI bool isDereferenceableReadOnlyLoop(
 /// If it is not obviously safe to load from the specified pointer, we do a
 /// quick local scan of the basic block containing ScanFrom, to determine if
 /// the address is already accessed.
-LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
-                                 const DataLayout &DL, Instruction *ScanFrom,
-                                 AssumptionCache *AC = nullptr,
-                                 const DominatorTree *DT = nullptr,
-                                 const TargetLibraryInfo *TLI = nullptr);
+LLVM_ABI bool isSafeToLoadUnconditionally(
+    Value *V, Type *Ty, Align Alignment, const DataLayout &DL,
+    Instruction *ScanFrom, AssumptionCache *AC = nullptr,
+    const DominatorTree *DT = nullptr, const TargetLibraryInfo *TLI = nullptr);
 
 /// Return true if speculation of the given load must be suppressed to avoid
 /// ordering or interfering with an active sanitizer.  If not suppressed,
@@ -144,20 +139,18 @@ LLVM_ABI extern cl::opt<unsigned> DefMaxInstsToScan;
 /// location in memory, as opposed to the value operand of a store.
 ///
 /// \returns The found value, or nullptr if no value is found.
-LLVM_ABI Value *FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB,
-                                BasicBlock::iterator &ScanFrom,
-                                unsigned MaxInstsToScan = DefMaxInstsToScan,
-                                BatchAAResults *AA = nullptr,
-                                bool *IsLoadCSE = nullptr,
-                                unsigned *NumScanedInst = nullptr);
+LLVM_ABI Value *FindAvailableLoadedValue(
+    LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom,
+    unsigned MaxInstsToScan = DefMaxInstsToScan, BatchAAResults *AA = nullptr,
+    bool *IsLoadCSE = nullptr, unsigned *NumScanedInst = nullptr);
 
 /// This overload provides a more efficient implementation of
 /// FindAvailableLoadedValue() for the case where we are not interested in
 /// finding the closest clobbering instruction if no available load is found.
 /// This overload cannot be used to scan across multiple blocks.
-LLVM_ABI Value *FindAvailableLoadedValue(LoadInst *Load, BatchAAResults &AA,
-                                bool *IsLoadCSE,
-                                unsigned MaxInstsToScan = DefMaxInstsToScan);
+LLVM_ABI Value *
+FindAvailableLoadedValue(LoadInst *Load, BatchAAResults &AA, bool *IsLoadCSE,
+                         unsigned MaxInstsToScan = DefMaxInstsToScan);
 
 /// Scan backwards to see if we have the value of the given pointer available
 /// locally within a small number of instructions.
@@ -182,11 +175,10 @@ LLVM_ABI Value *FindAvailableLoadedValue(LoadInst *Load, BatchAAResults &AA,
 /// location in memory, as opposed to the value operand of a store.
 ///
 /// \returns The found value, or nullptr if no value is found.
-LLVM_ABI Value *findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *AccessTy,
-                                 bool AtLeastAtomic, BasicBlock *ScanBB,
-                                 BasicBlock::iterator &ScanFrom,
-                                 unsigned MaxInstsToScan, BatchAAResults *AA,
-                                 bool *IsLoadCSE, unsigned *NumScanedInst);
+LLVM_ABI Value *findAvailablePtrLoadStore(
+    const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic,
+    BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan,
+    BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst);
 
 /// Returns true if a pointer value \p From can be replaced with another pointer
 /// value \To if they are deemed equal through some means (e.g. information from
@@ -196,9 +188,9 @@ LLVM_ABI Value *findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *Acces
 /// Additionally it also allows replacement of pointers when both pointers have
 /// the same underlying object.
 LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To,
-                               const DataLayout &DL);
+                                        const DataLayout &DL);
 LLVM_ABI bool canReplacePointersInUseIfEqual(const Use &U, const Value *To,
-                                    const DataLayout &DL);
+                                             const DataLayout &DL);
 }
 
 #endif
diff --git a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
index 78515e3701461..2ed770ebeecc9 100644
--- a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
+++ b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
@@ -14,10 +14,10 @@
 #ifndef LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
 #define LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/EquivalenceClasses.h"
 #include "llvm/Analysis/ScalarEvolution.h"
 #include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/Support/Compiler.h"
 #include <optional>
 #include <variant>
 
@@ -163,7 +163,8 @@ class MemoryDepChecker {
     Instruction *getDestination(const MemoryDepChecker &DepChecker) const;
 
     /// Dependence types that don't prevent vectorization.
-    LLVM_ABI static VectorizationSafetyStatus isSafeForVectorization(DepType Type);
+    LLVM_ABI static VectorizationSafetyStatus
+    isSafeForVectorization(DepType Type);
 
     /// Lexically forward dependence.
     LLVM_ABI bool isForward() const;
@@ -176,7 +177,7 @@ class MemoryDepChecker {
     /// Print the dependence.  \p Instr is used to map the instruction
     /// indices to instructions.
     LLVM_ABI void print(raw_ostream &OS, unsigned Depth,
-               const SmallVectorImpl<Instruction *> &Instrs) const;
+                        const SmallVectorImpl<Instruction *> &Instrs) const;
   };
 
   MemoryDepChecker(PredicatedScalarEvolution &PSE, const Loop *L,
@@ -197,7 +198,7 @@ class MemoryDepChecker {
   ///
   /// Only checks sets with elements in \p CheckDeps.
   LLVM_ABI bool areDepsSafe(const DepCandidates &AccessSets,
-                   const MemAccessInfoList &CheckDeps);
+                            const MemAccessInfoList &CheckDeps);
 
   /// No memory dependence was encountered that would inhibit
   /// vectorization.
@@ -266,8 +267,8 @@ class MemoryDepChecker {
   }
 
   /// Find the set of instructions that read or write via \p Ptr.
-  LLVM_ABI SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
-                                                         bool isWrite) const;
+  LLVM_ABI SmallVector<Instruction *, 4>
+  getInstructionsForAccess(Value *Ptr, bool isWrite) const;
 
   /// Return the program order indices for the access location (Ptr, IsWrite).
   /// Returns an empty ArrayRef if there are no accesses for the location.
@@ -434,16 +435,17 @@ struct RuntimeCheckingPtrGroup {
   /// Create a new pointer checking group containing a single
   /// pointer, with index \p Index in RtCheck.
   LLVM_ABI RuntimeCheckingPtrGroup(unsigned Index,
-                          const RuntimePointerChecking &RtCheck);
+                                   const RuntimePointerChecking &RtCheck);
 
   /// Tries to add the pointer recorded in RtCheck at index
   /// \p Index to this pointer checking group. We can only add a pointer
   /// to a checking group if we will still be able to get
   /// the upper and lower bounds of the check. Returns true in case
   /// of success, false otherwise.
-  LLVM_ABI bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck);
+  LLVM_ABI bool addPointer(unsigned Index,
+                           const RuntimePointerChecking &RtCheck);
   LLVM_ABI bool addPointer(unsigned Index, const SCEV *Start, const SCEV *End,
-                  unsigned AS, bool NeedsFreeze, ScalarEvolution &SE);
+                           unsigned AS, bool NeedsFreeze, ScalarEvolution &SE);
 
   /// The SCEV expression which represents the upper bound of all the
   /// pointers in this group.
@@ -530,9 +532,10 @@ class RuntimePointerChecking {
   /// according to the assumptions that we've made during the analysis.
   /// The method might also version the pointer stride according to \p Strides,
   /// and add new predicates to \p PSE.
-  LLVM_ABI void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy,
-              bool WritePtr, unsigned DepSetId, unsigned ASId,
-              PredicatedScalarEvolution &PSE, bool NeedsFreeze);
+  LLVM_ABI void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
+                       Type *AccessTy, bool WritePtr, unsigned DepSetId,
+                       unsigned ASId, PredicatedScalarEvolution &PSE,
+                       bool NeedsFreeze);
 
   /// No run-time memory checking is necessary.
   bool empty() const { return Pointers.empty(); }
@@ -540,7 +543,7 @@ class RuntimePointerChecking {
   /// Generate the checks and store it.  This also performs the grouping
   /// of pointers to reduce the number of memchecks necessary.
   LLVM_ABI void generateChecks(MemoryDepChecker::DepCandidates &DepCands,
-                      bool UseDependencies);
+                               bool UseDependencies);
 
   /// Returns the checks that generateChecks created. They can be used to ensure
   /// no read/write accesses overlap across all loop iterations.
@@ -562,7 +565,7 @@ class RuntimePointerChecking {
   /// Decide if we need to add a check between two groups of pointers,
   /// according to needsChecking.
   LLVM_ABI bool needsChecking(const RuntimeCheckingPtrGroup &M,
-                     const RuntimeCheckingPtrGroup &N) const;
+                              const RuntimeCheckingPtrGroup &N) const;
 
   /// Returns the number of run-time checks required according to
   /// needsChecking.
@@ -573,8 +576,8 @@ class RuntimePointerChecking {
 
   /// Print \p Checks.
   LLVM_ABI void printChecks(raw_ostream &OS,
-                   const SmallVectorImpl<RuntimePointerCheck> &Checks,
-                   unsigned Depth = 0) const;
+                            const SmallVectorImpl<RuntimePointerCheck> &Checks,
+                            unsigned Depth = 0) const;
 
   /// This flag indicates if we need to add the runtime check.
   bool Need = false;
@@ -661,9 +664,10 @@ class RuntimePointerChecking {
 /// PSE must be emitted in order for the results of this analysis to be valid.
 class LoopAccessInfo {
 public:
-  LLVM_ABI LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI,
-                 const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT,
-                 LoopInfo *LI);
+  LLVM_ABI LoopAccessInfo(Loop *L, ScalarEvolution *SE,
+                          const TargetTransformInfo *TTI,
+                          const TargetLibraryInfo *TLI, AAResults *AA,
+                          DominatorTree *DT, LoopInfo *LI);
 
   /// Return true we can analyze the memory accesses in the loop and there are
   /// no memory dependence cycles. Note that for dependences between loads &
@@ -691,7 +695,7 @@ class LoopAccessInfo {
   /// Return true if the block BB needs to be predicated in order for the loop
   /// to be vectorized.
   LLVM_ABI static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
-                                    DominatorTree *DT);
+                                             DominatorTree *DT);
 
   /// Returns true if value \p V is loop invariant.
   LLVM_ABI bool isInvariant(Value *V) const;
@@ -846,7 +850,8 @@ replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
 LLVM_ABI std::optional<int64_t>
 getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr,
              const Loop *Lp,
-             const DenseMap<Value *, const SCEV *> &StridesMap = DenseMap<Value *, const SCEV *>(),
+             const DenseMap<Value *, const SCEV *> &StridesMap =
+                 DenseMap<Value *, const SCEV *>(),
              bool Assume = false, bool ShouldCheckWrap = true);
 
 /// Returns the distance between the pointers \p PtrA and \p PtrB iff they are
@@ -869,14 +874,14 @@ getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB,
 /// sorted indices in \p SortedIndices as a[i+0], a[i+1], a[i+4], a[i+7] and
 /// saves the mask for actual memory accesses in program order in
 /// \p SortedIndices as <1,2,0,3>
-LLVM_ABI bool sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy, const DataLayout &DL,
-                     ScalarEvolution &SE,
-                     SmallVectorImpl<unsigned> &SortedIndices);
+LLVM_ABI bool sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
+                              const DataLayout &DL, ScalarEvolution &SE,
+                              SmallVectorImpl<unsigned> &SortedIndices);
 
 /// Returns true if the memory operations \p A and \p B are consecutive.
 /// This is a simple API that does not depend on the analysis pass.
 LLVM_ABI bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
-                         ScalarEvolution &SE, bool CheckType = true);
+                                  ScalarEvolution &SE, bool CheckType = true);
 
 /// Calculate Start and End points of memory access.
 /// Let's assume A is the first access and B is a memory access on N-th loop
@@ -920,7 +925,7 @@ class LoopAccessInfoManager {
   LLVM_ABI void clear();
 
   LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
-                  FunctionAnalysisManager::Invalidator &Inv);
+                           FunctionAnalysisManager::Invalidator &Inv);
 };
 
 /// This analysis provides dependence information for the memory
diff --git a/llvm/include/llvm/Analysis/LoopAnalysisManager.h b/llvm/include/llvm/Analysis/LoopAnalysisManager.h
index 3d2a5a9d5a7c4..a825ada05df11 100644
--- a/llvm/include/llvm/Analysis/LoopAnalysisManager.h
+++ b/llvm/include/llvm/Analysis/LoopAnalysisManager.h
@@ -29,8 +29,8 @@
 #ifndef LLVM_ANALYSIS_LOOPANALYSISMANAGER_H
 #define LLVM_ANALYSIS_LOOPANALYSISMANAGER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -67,7 +67,8 @@ struct LoopStandardAnalysisResults {
 /// Extern template declaration for the analysis set for this IR unit.
 extern template class LLVM_TEMPLATE_ABI AllAnalysesOn<Loop>;
 
-extern template class LLVM_TEMPLATE_ABI AnalysisManager<Loop, LoopStandardAnalysisResults &>;
+extern template class LLVM_TEMPLATE_ABI
+    AnalysisManager<Loop, LoopStandardAnalysisResults &>;
 /// The loop analysis manager.
 ///
 /// See the documentation for the AnalysisManager template for detail
@@ -133,7 +134,7 @@ template <> class LoopAnalysisManagerFunctionProxy::Result {
   /// clear all of the cached analysis results that are keyed on the \c
   /// LoopInfo for this function.
   LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
-                  FunctionAnalysisManager::Invalidator &Inv);
+                           FunctionAnalysisManager::Invalidator &Inv);
 
 private:
   LoopAnalysisManager *InnerAM;
@@ -151,8 +152,8 @@ LoopAnalysisManagerFunctionProxy::run(Function &F, FunctionAnalysisManager &AM);
 // template.
 extern template class InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
 
-extern template class LLVM_TEMPLATE_ABI OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
-                                                LoopStandardAnalysisResults &>;
+extern template class LLVM_TEMPLATE_ABI OuterAnalysisManagerProxy<
+    FunctionAnalysisManager, Loop, LoopStandardAnalysisResults &>;
 /// A proxy from a \c FunctionAnalysisManager to a \c Loop.
 typedef OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
                                   LoopStandardAnalysisResults &>
diff --git a/llvm/include/llvm/Analysis/LoopInfo.h b/llvm/include/llvm/Analysis/LoopInfo.h
index 4df4d9595c273..072ddad546bf3 100644
--- a/llvm/include/llvm/Analysis/LoopInfo.h
+++ b/llvm/include/llvm/Analysis/LoopInfo.h
@@ -13,11 +13,11 @@
 #ifndef LLVM_ANALYSIS_LOOPINFO_H
 #define LLVM_ANALYSIS_LOOPINFO_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/GraphTraits.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/GenericLoopInfo.h"
 #include <optional>
 #include <utility>
@@ -415,7 +415,8 @@ class LoopInfo : public LoopInfoBase<BasicBlock, Loop> {
 
 public:
   LoopInfo() = default;
-  LLVM_ABI explicit LoopInfo(const DominatorTreeBase<BasicBlock, false> &DomTree);
+  LLVM_ABI explicit LoopInfo(
+      const DominatorTreeBase<BasicBlock, false> &DomTree);
 
   LoopInfo(LoopInfo &&Arg) : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
   LoopInfo &operator=(LoopInfo &&RHS) {
@@ -425,7 +426,7 @@ class LoopInfo : public LoopInfoBase<BasicBlock, Loop> {
 
   /// Handle invalidation explicitly.
   LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
-                  FunctionAnalysisManager::Invalidator &);
+                           FunctionAnalysisManager::Invalidator &);
 
   // Most of the public interface is provided via LoopInfoBase.
 
@@ -533,8 +534,9 @@ class LoopInfo : public LoopInfoBase<BasicBlock, Loop> {
   // to be inserted at the beginning of the block.  Note that V is assumed to
   // dominate ExitBB, and ExitBB must be the exit block of some loop.  The
   // IR is assumed to be in LCSSA form before the planned insertion.
-  LLVM_ABI bool wouldBeOutOfLoopUseRequiringLCSSA(const Value *V,
-                                         const BasicBlock *ExitBB) const;
+  LLVM_ABI bool
+  wouldBeOutOfLoopUseRequiringLCSSA(const Value *V,
+                                    const BasicBlock *ExitBB) const;
 };
 
 /// Enable verification of loop info.
@@ -615,7 +617,8 @@ class LLVM_ABI LoopInfoWrapperPass : public FunctionPass {
 };
 
 /// Function to print a loop's contents as LLVM's text IR assembly.
-LLVM_ABI void printLoop(Loop &L, raw_ostream &OS, const std::string &Banner = "");
+LLVM_ABI void printLoop(Loop &L, raw_ostream &OS,
+                        const std::string &Banner = "");
 
 /// Find and return the loop attribute node for the attribute @p Name in
 /// @p LoopID. Return nullptr if there is no such attribute.
@@ -629,26 +632,27 @@ LLVM_ABI MDNode *findOptionMDForLoopID(MDNode *LoopID, StringRef Name);
 LLVM_ABI MDNode *findOptionMDForLoop(const Loop *TheLoop, StringRef Name);
 
 LLVM_ABI std::optional<bool> getOptionalBoolLoopAttribute(const Loop *TheLoop,
-                                                 StringRef Name);
+                                                          StringRef Name);
 
 /// Returns true if Name is applied to TheLoop and enabled.
 LLVM_ABI bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name);
 
 /// Find named metadata for a loop with an integer value.
 LLVM_ABI std::optional<int> getOptionalIntLoopAttribute(const Loop *TheLoop,
-                                               StringRef Name);
+                                                        StringRef Name);
 
 /// Find named metadata for a loop with an integer value. Return \p Default if
 /// not set.
-LLVM_ABI int getIntLoopAttribute(const Loop *TheLoop, StringRef Name, int Default = 0);
+LLVM_ABI int getIntLoopAttribute(const Loop *TheLoop, StringRef Name,
+                                 int Default = 0);
 
 /// Find string metadata for loop
 ///
 /// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
 /// operand or null otherwise.  If the string metadata is not found return
 /// Optional's not-a-value.
-LLVM_ABI std::optional<const MDOperand *> findStringMetadataForLoop(const Loop *TheLoop,
-                                                           StringRef Name);
+LLVM_ABI std::optional<const MDOperand *>
+findStringMetadataForLoop(const Loop *TheLoop, StringRef Name);
 
 /// Find the convergence heart of the loop.
 LLVM_ABI CallBase *getLoopConvergenceHeart(const Loop *TheLoop);
diff --git a/llvm/include/llvm/Analysis/LoopNestAnalysis.h b/llvm/include/llvm/Analysis/LoopNestAnalysis.h
index be14aadd626ef..6018b34c0244f 100644
--- a/llvm/include/llvm/Analysis/LoopNestAnalysis.h
+++ b/llvm/include/llvm/Analysis/LoopNestAnalysis.h
@@ -14,10 +14,10 @@
 #ifndef LLVM_ANALYSIS_LOOPNESTANALYSIS_H
 #define LLVM_ANALYSIS_LOOPNESTANALYSIS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Analysis/LoopAnalysisManager.h"
 #include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -206,7 +206,8 @@ class LoopNestAnalysis : public AnalysisInfoMixin<LoopNestAnalysis> {
 
 public:
   using Result = LoopNest;
-  LLVM_ABI Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR);
+  LLVM_ABI Result run(Loop &L, LoopAnalysisManager &AM,
+                      LoopStandardAnalysisResults &AR);
 };
 
 /// Printer pass for the \c LoopNest results.
@@ -217,7 +218,8 @@ class LoopNestPrinterPass : public PassInfoMixin<LoopNestPrinterPass> {
   explicit LoopNestPrinterPass(raw_ostream &OS) : OS(OS) {}
 
   LLVM_ABI PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
-                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
+                                 LoopStandardAnalysisResults &AR,
+                                 LPMUpdater &U);
 
   static bool isRequired() { return true; }
 };
diff --git a/llvm/include/llvm/Analysis/LoopPass.h b/llvm/include/llvm/Analysis/LoopPass.h
index 20fd3b10bd442..e0301fecff084 100644
--- a/llvm/include/llvm/Analysis/LoopPass.h
+++ b/llvm/include/llvm/Analysis/LoopPass.h
@@ -14,9 +14,9 @@
 #ifndef LLVM_ANALYSIS_LOOPPASS_H
 #define LLVM_ANALYSIS_LOOPPASS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/IR/LegacyPassManagers.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include <deque>
 
 namespace llvm {
diff --git a/llvm/include/llvm/Analysis/MemoryBuiltins.h b/llvm/include/llvm/Analysis/MemoryBuiltins.h
index 1199ae650338c..0f0605e4f01b2 100644
--- a/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -14,7 +14,6 @@
 #ifndef LLVM_ANALYSIS_MEMORYBUILTINS_H
 #define LLVM_ANALYSIS_MEMORYBUILTINS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallPtrSet.h"
@@ -22,6 +21,7 @@
 #include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/InstVisitor.h"
 #include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Compiler.h"
 #include <cstdint>
 #include <optional>
 #include <utility>
@@ -54,8 +54,9 @@ class Value;
 /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
 /// like).
 LLVM_ABI bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI);
-LLVM_ABI bool isAllocationFn(const Value *V,
-                    function_ref<const TargetLibraryInfo &(Function &)> GetTLI);
+LLVM_ABI bool
+isAllocationFn(const Value *V,
+               function_ref<const TargetLibraryInfo &(Function &)> GetTLI);
 
 /// Tests if a value is a call or invoke to a library function that
 /// allocates memory via new.
@@ -63,7 +64,8 @@ LLVM_ABI bool isNewLikeFn(const Value *V, const TargetLibraryInfo *TLI);
 
 /// Tests if a value is a call or invoke to a library function that
 /// allocates memory similar to malloc or calloc.
-LLVM_ABI bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI);
+LLVM_ABI bool isMallocOrCallocLikeFn(const Value *V,
+                                     const TargetLibraryInfo *TLI);
 
 /// Tests if a value is a call or invoke to a library function that
 /// allocates memory (either malloc, calloc, or strdup like).
@@ -84,7 +86,8 @@ LLVM_ABI Value *getReallocatedOperand(const CallBase *CB);
 LLVM_ABI bool isLibFreeFunction(const Function *F, const LibFunc TLIFn);
 
 /// If this if a call to a free function, return the freed operand.
-LLVM_ABI Value *getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI);
+LLVM_ABI Value *getFreedOperand(const CallBase *CB,
+                                const TargetLibraryInfo *TLI);
 
 //===----------------------------------------------------------------------===//
 //  Properties of allocation functions
@@ -105,7 +108,8 @@ LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI);
 /// built-in knowledge based on fuction names/signatures or allocalign
 /// attributes. Note: the Value returned may not indicate a valid alignment, per
 /// the definition of the allocalign attribute.
-LLVM_ABI Value *getAllocAlignment(const CallBase *V, const TargetLibraryInfo *TLI);
+LLVM_ABI Value *getAllocAlignment(const CallBase *V,
+                                  const TargetLibraryInfo *TLI);
 
 /// Return the size of the requested allocation. With a trivial mapper, this is
 /// similar to calling getObjectSize(..., Exact), but without looking through
@@ -122,14 +126,14 @@ LLVM_ABI std::optional<APInt> getAllocSize(
 /// fixed value, return said value in the requested type.  Otherwise, return
 /// nullptr.
 LLVM_ABI Constant *getInitialValueOfAllocation(const Value *V,
-                                      const TargetLibraryInfo *TLI,
-                                      Type *Ty);
+                                               const TargetLibraryInfo *TLI,
+                                               Type *Ty);
 
 /// If a function is part of an allocation family (e.g.
 /// malloc/realloc/calloc/free), return the identifier for its family
 /// of functions.
-LLVM_ABI std::optional<StringRef> getAllocationFamily(const Value *I,
-                                             const TargetLibraryInfo *TLI);
+LLVM_ABI std::optional<StringRef>
+getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI);
 
 //===----------------------------------------------------------------------===//
 //  Utility functions to compute size of objects.
@@ -173,15 +177,18 @@ struct ObjectSizeOpts {
 /// WARNING: The object size returned is the allocation size.  This does not
 /// imply dereferenceability at site of use since the object may be freeed in
 /// between.
-LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
-                   const TargetLibraryInfo *TLI, ObjectSizeOpts Opts = {});
+LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size,
+                            const DataLayout &DL, const TargetLibraryInfo *TLI,
+                            ObjectSizeOpts Opts = {});
 
 /// Try to turn a call to \@llvm.objectsize into an integer value of the given
 /// Type. Returns null on failure. If MustSucceed is true, this function will
 /// not return null, and may return conservative values governed by the second
 /// argument of the call to objectsize.
-LLVM_ABI Value *lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL,
-                           const TargetLibraryInfo *TLI, bool MustSucceed);
+LLVM_ABI Value *lowerObjectSizeCall(IntrinsicInst *ObjectSize,
+                                    const DataLayout &DL,
+                                    const TargetLibraryInfo *TLI,
+                                    bool MustSucceed);
 LLVM_ABI Value *lowerObjectSizeCall(
     IntrinsicInst *ObjectSize, const DataLayout &DL,
     const TargetLibraryInfo *TLI, AAResults *AA, bool MustSucceed,
@@ -265,8 +272,10 @@ class ObjectSizeOffsetVisitor
   static OffsetSpan unknown() { return OffsetSpan(); }
 
 public:
-  LLVM_ABI ObjectSizeOffsetVisitor(const DataLayout &DL, const TargetLibraryInfo *TLI,
-                          LLVMContext &Context, ObjectSizeOpts Options = {});
+  LLVM_ABI ObjectSizeOffsetVisitor(const DataLayout &DL,
+                                   const TargetLibraryInfo *TLI,
+                                   LLVMContext &Context,
+                                   ObjectSizeOpts Options = {});
 
   LLVM_ABI SizeOffsetAPInt compute(Value *V);
 
@@ -346,8 +355,10 @@ class ObjectSizeOffsetEvaluator
   SizeOffsetValue compute_(Value *V);
 
 public:
-  LLVM_ABI ObjectSizeOffsetEvaluator(const DataLayout &DL, const TargetLibraryInfo *TLI,
-                            LLVMContext &Context, ObjectSizeOpts EvalOpts = {});
+  LLVM_ABI ObjectSizeOffsetEvaluator(const DataLayout &DL,
+                                     const TargetLibraryInfo *TLI,
+                                     LLVMContext &Context,
+                                     ObjectSizeOpts EvalOpts = {});
 
   static SizeOffsetValue unknown() { return SizeOffsetValue(); }
 
diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h
index 1c6b158d01bc1..360d945939c39 100644
--- a/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -15,9 +15,9 @@
 #ifndef LLVM_ANALYSIS_MEMORYLOCATION_H
 #define LLVM_ANALYSIS_MEMORYLOCATION_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMapInfo.h"
 #include "llvm/IR/Metadata.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/TypeSize.h"
 
 #include <optional>
@@ -248,7 +248,8 @@ class MemoryLocation {
   static MemoryLocation get(const Instruction *Inst) {
     return *MemoryLocation::getOrNone(Inst);
   }
-  LLVM_ABI static std::optional<MemoryLocation> getOrNone(const Instruction *Inst);
+  LLVM_ABI static std::optional<MemoryLocation>
+  getOrNone(const Instruction *Inst);
 
   /// Return a location representing the source of a memory transfer.
   LLVM_ABI static MemoryLocation getForSource(const MemTransferInst *MTI);
@@ -258,12 +259,13 @@ class MemoryLocation {
   /// transfer.
   LLVM_ABI static MemoryLocation getForDest(const MemIntrinsic *MI);
   LLVM_ABI static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
-  LLVM_ABI static std::optional<MemoryLocation> getForDest(const CallBase *CI,
-                                                  const TargetLibraryInfo &TLI);
+  LLVM_ABI static std::optional<MemoryLocation>
+  getForDest(const CallBase *CI, const TargetLibraryInfo &TLI);
 
   /// Return a location representing a particular argument of a call.
-  LLVM_ABI static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
-                                       const TargetLibraryInfo *TLI);
+  LLVM_ABI static MemoryLocation getForArgument(const CallBase *Call,
+                                                unsigned ArgIdx,
+                                                const TargetLibraryInfo *TLI);
   static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
                                        const TargetLibraryInfo &TLI) {
     return getForArgument(Call, ArgIdx, &TLI);
diff --git a/llvm/include/llvm/Analysis/MemoryProfileInfo.h b/llvm/include/llvm/Analysis/MemoryProfileInfo.h
index 93bcc47454bb3..dd7bde318bb1a 100644
--- a/llvm/include/llvm/Analysis/MemoryProfileInfo.h
+++ b/llvm/include/llvm/Analysis/MemoryProfileInfo.h
@@ -13,10 +13,10 @@
 #ifndef LLVM_ANALYSIS_MEMORYPROFILEINFO_H
 #define LLVM_ANALYSIS_MEMORYPROFILEINFO_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/IR/InstrTypes.h"
 #include "llvm/IR/Metadata.h"
 #include "llvm/ProfileData/MemProfCommon.h"
+#include "llvm/Support/Compiler.h"
 #include <map>
 
 namespace llvm {
@@ -24,16 +24,19 @@ namespace memprof {
 
 /// Return the allocation type for a given set of memory profile values.
 LLVM_ABI AllocationType getAllocType(uint64_t TotalLifetimeAccessDensity,
-                            uint64_t AllocCount, uint64_t TotalLifetime);
+                                     uint64_t AllocCount,
+                                     uint64_t TotalLifetime);
 
 /// Build callstack metadata from the provided list of call stack ids. Returns
 /// the resulting metadata node.
-LLVM_ABI MDNode *buildCallstackMetadata(ArrayRef<uint64_t> CallStack, LLVMContext &Ctx);
+LLVM_ABI MDNode *buildCallstackMetadata(ArrayRef<uint64_t> CallStack,
+                                        LLVMContext &Ctx);
 
 /// Build metadata from the provided list of full stack id and profiled size, to
 /// use when reporting of hinted sizes is enabled.
-LLVM_ABI MDNode *buildContextSizeMetadata(ArrayRef<ContextTotalSize> ContextSizeInfo,
-                                 LLVMContext &Ctx);
+LLVM_ABI MDNode *
+buildContextSizeMetadata(ArrayRef<ContextTotalSize> ContextSizeInfo,
+                         LLVMContext &Ctx);
 
 /// Returns the stack node from an MIB metadata node.
 LLVM_ABI MDNode *getMIBStackNode(const MDNode *MIB);
@@ -119,8 +122,9 @@ class CallStackTrie {
   /// matching via a debug location hash), expected to be in order from the
   /// allocation call down to the bottom of the call stack (i.e. callee to
   /// caller order).
-  LLVM_ABI void addCallStack(AllocationType AllocType, ArrayRef<uint64_t> StackIds,
-                    std::vector<ContextTotalSize> ContextSizeInfo = {});
+  LLVM_ABI void
+  addCallStack(AllocationType AllocType, ArrayRef<uint64_t> StackIds,
+               std::vector<ContextTotalSize> ContextSizeInfo = {});
 
   /// Add the call stack context along with its allocation type from the MIB
   /// metadata to the Trie.
@@ -139,7 +143,7 @@ class CallStackTrie {
   /// If hinted by reporting is enabled, a message is emitted with the given
   /// descriptor used to identify the category of single allocation type.
   LLVM_ABI void addSingleAllocTypeAttribute(CallBase *CI, AllocationType AT,
-                                   StringRef Descriptor);
+                                            StringRef Descriptor);
 };
 
 /// Helper class to iterate through stack ids in both metadata (memprof MIB and
@@ -217,11 +221,14 @@ CallStack<NodeT, IteratorT>::beginAfterSharedPrefix(const CallStack &Other) {
 
 /// Specializations for iterating through IR metadata stack contexts.
 template <>
-LLVM_ABI CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::CallStackIterator(
+LLVM_ABI
+CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::CallStackIterator(
     const MDNode *N, bool End);
 template <>
-LLVM_ABI uint64_t CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::operator*();
-template <> LLVM_ABI uint64_t CallStack<MDNode, MDNode::op_iterator>::back() const;
+LLVM_ABI uint64_t
+CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::operator*();
+template <>
+LLVM_ABI uint64_t CallStack<MDNode, MDNode::op_iterator>::back() const;
 
 } // end namespace memprof
 } // end namespace llvm
diff --git a/llvm/include/llvm/Analysis/MemorySSA.h b/llvm/include/llvm/Analysis/MemorySSA.h
index 889e4254266ff..cbb942f022244 100644
--- a/llvm/include/llvm/Analysis/MemorySSA.h
+++ b/llvm/include/llvm/Analysis/MemorySSA.h
@@ -85,7 +85,6 @@
 #ifndef LLVM_ANALYSIS_MEMORYSSA_H
 #define LLVM_ANALYSIS_MEMORYSSA_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
@@ -99,6 +98,7 @@
 #include "llvm/IR/Type.h"
 #include "llvm/IR/User.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include <algorithm>
 #include <cassert>
 #include <cstddef>
@@ -771,7 +771,8 @@ class MemorySSA {
 
   /// Given two memory accesses in the same basic block, determine
   /// whether MemoryAccess \p A dominates MemoryAccess \p B.
-  LLVM_ABI bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
+  LLVM_ABI bool locallyDominates(const MemoryAccess *A,
+                                 const MemoryAccess *B) const;
 
   /// Given two memory accesses in potentially different blocks,
   /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
@@ -784,7 +785,8 @@ class MemorySSA {
   enum class VerificationLevel { Fast, Full };
   /// Verify that MemorySSA is self consistent (IE definitions dominate
   /// all uses, uses appear in the right places).  This is used by unit tests.
-  LLVM_ABI void verifyMemorySSA(VerificationLevel = VerificationLevel::Fast) const;
+  LLVM_ABI void
+      verifyMemorySSA(VerificationLevel = VerificationLevel::Fast) const;
 
   /// Used in various insertion functions to specify whether we are talking
   /// about the beginning or end of a block.
@@ -825,8 +827,10 @@ class MemorySSA {
   // machinsations.  They do not always leave the IR in a correct state, and
   // relies on the updater to fixup what it breaks, so it is not public.
 
-  LLVM_ABI void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
-  LLVM_ABI void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point);
+  LLVM_ABI void moveTo(MemoryUseOrDef *What, BasicBlock *BB,
+                       AccessList::iterator Where);
+  LLVM_ABI void moveTo(MemoryAccess *What, BasicBlock *BB,
+                       InsertionPlace Point);
 
   // Rename the dominator tree branch rooted at BB.
   void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
@@ -837,12 +841,13 @@ class MemorySSA {
   LLVM_ABI void removeFromLookups(MemoryAccess *);
   LLVM_ABI void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
   LLVM_ABI void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
-                               InsertionPlace);
+                                        InsertionPlace);
   LLVM_ABI void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
-                             AccessList::iterator);
-  LLVM_ABI MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *,
-                                      const MemoryUseOrDef *Template = nullptr,
-                                      bool CreationMustSucceed = true);
+                                      AccessList::iterator);
+  LLVM_ABI MemoryUseOrDef *
+  createDefinedAccess(Instruction *, MemoryAccess *,
+                      const MemoryUseOrDef *Template = nullptr,
+                      bool CreationMustSucceed = true);
 
 private:
   class ClobberWalkerBase;
@@ -869,8 +874,9 @@ class MemorySSA {
   MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
   void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
   LLVM_ABI void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
-                  SmallPtrSetImpl<BasicBlock *> &Visited,
-                  bool SkipVisited = false, bool RenameAllUses = false);
+                           SmallPtrSetImpl<BasicBlock *> &Visited,
+                           bool SkipVisited = false,
+                           bool RenameAllUses = false);
   AccessList *getOrCreateAccessList(const BasicBlock *);
   DefsList *getOrCreateDefsList(const BasicBlock *);
   void renumberBlock(const BasicBlock *) const;
@@ -920,8 +926,9 @@ class MemorySSAUtil {
   friend class MemorySSAWalker;
 
   // This function should not be used by new passes.
-  LLVM_ABI static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
-                                  AliasAnalysis &AA);
+  LLVM_ABI static bool defClobbersUseOrDef(MemoryDef *MD,
+                                           const MemoryUseOrDef *MU,
+                                           AliasAnalysis &AA);
 };
 
 /// An analysis that produces \c MemorySSA for a function.
@@ -943,7 +950,7 @@ class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
     std::unique_ptr<MemorySSA> MSSA;
 
     LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
-                    FunctionAnalysisManager::Invalidator &Inv);
+                             FunctionAnalysisManager::Invalidator &Inv);
   };
 
   LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM);
diff --git a/llvm/include/llvm/Analysis/MemorySSAUpdater.h b/llvm/include/llvm/Analysis/MemorySSAUpdater.h
index 31f61b0d45e95..96bf99922d848 100644
--- a/llvm/include/llvm/Analysis/MemorySSAUpdater.h
+++ b/llvm/include/llvm/Analysis/MemorySSAUpdater.h
@@ -31,7 +31,6 @@
 #ifndef LLVM_ANALYSIS_MEMORYSSAUPDATER_H
 #define LLVM_ANALYSIS_MEMORYSSAUPDATER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallSet.h"
 #include "llvm/ADT/SmallVector.h"
@@ -39,6 +38,7 @@
 #include "llvm/IR/ValueHandle.h"
 #include "llvm/IR/ValueMap.h"
 #include "llvm/Support/CFGDiff.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -92,28 +92,29 @@ class MemorySSAUpdater {
   /// following a CFG change that replaced multiple edges (switch) with a direct
   /// branch.
   LLVM_ABI void removeDuplicatePhiEdgesBetween(const BasicBlock *From,
-                                      const BasicBlock *To);
+                                               const BasicBlock *To);
   /// Update MemorySSA when inserting a unique backedge block for a loop.
-  LLVM_ABI void updatePhisWhenInsertingUniqueBackedgeBlock(BasicBlock *LoopHeader,
-                                                  BasicBlock *LoopPreheader,
-                                                  BasicBlock *BackedgeBlock);
+  LLVM_ABI void
+  updatePhisWhenInsertingUniqueBackedgeBlock(BasicBlock *LoopHeader,
+                                             BasicBlock *LoopPreheader,
+                                             BasicBlock *BackedgeBlock);
   /// Update MemorySSA after a loop was cloned, given the blocks in RPO order,
   /// the exit blocks and a 1:1 mapping of all blocks and instructions
   /// cloned. This involves duplicating all defs and uses in the cloned blocks
   /// Updating phi nodes in exit block successors is done separately.
   LLVM_ABI void updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
-                           ArrayRef<BasicBlock *> ExitBlocks,
-                           const ValueToValueMapTy &VM,
-                           bool IgnoreIncomingWithNoClones = false);
+                                    ArrayRef<BasicBlock *> ExitBlocks,
+                                    const ValueToValueMapTy &VM,
+                                    bool IgnoreIncomingWithNoClones = false);
   // Block BB was fully or partially cloned into its predecessor P1. Map
   // contains the 1:1 mapping of instructions cloned and VM[BB]=P1.
   LLVM_ABI void updateForClonedBlockIntoPred(BasicBlock *BB, BasicBlock *P1,
-                                    const ValueToValueMapTy &VM);
+                                             const ValueToValueMapTy &VM);
   /// Update phi nodes in exit block successors following cloning. Exit blocks
   /// that were not cloned don't have additional predecessors added.
   LLVM_ABI void updateExitBlocksForClonedLoop(ArrayRef<BasicBlock *> ExitBlocks,
-                                     const ValueToValueMapTy &VMap,
-                                     DominatorTree &DT);
+                                              const ValueToValueMapTy &VMap,
+                                              DominatorTree &DT);
   LLVM_ABI void updateExitBlocksForClonedLoop(
       ArrayRef<BasicBlock *> ExitBlocks,
       ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT);
@@ -122,14 +123,15 @@ class MemorySSAUpdater {
   /// DT is assumed to be already up to date. If UpdateDTFirst is true, first
   /// update the DT with the same updates.
   LLVM_ABI void applyUpdates(ArrayRef<CFGUpdate> Updates, DominatorTree &DT,
-                    bool UpdateDTFirst = false);
+                             bool UpdateDTFirst = false);
   /// Apply CFG insert updates, analogous with the DT edge updates.
-  LLVM_ABI void applyInsertUpdates(ArrayRef<CFGUpdate> Updates, DominatorTree &DT);
+  LLVM_ABI void applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
+                                   DominatorTree &DT);
 
   LLVM_ABI void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where);
   LLVM_ABI void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where);
   LLVM_ABI void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
-                   MemorySSA::InsertionPlace Where);
+                            MemorySSA::InsertionPlace Where);
   /// `From` block was spliced into `From` and `To`. There is a CFG edge from
   /// `From` to `To`. Move all accesses from `From` to `To` starting at
   /// instruction `Start`. `To` is newly created BB, so empty of
@@ -144,7 +146,7 @@ class MemorySSAUpdater {
   /// |      |        |  To  |
   /// |------|        |------|
   LLVM_ABI void moveAllAfterSpliceBlocks(BasicBlock *From, BasicBlock *To,
-                                Instruction *Start);
+                                         Instruction *Start);
   /// `From` block was merged into `To`. There is a CFG edge from `To` to
   /// `From`.`To` still branches to `From`, but all instructions were moved and
   /// `From` is now an empty block; `From` is about to be deleted. Move all
@@ -160,7 +162,7 @@ class MemorySSAUpdater {
   /// | From |        |      |
   /// |------|        |------|
   LLVM_ABI void moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
-                               Instruction *Start);
+                                        Instruction *Start);
   /// A new empty BasicBlock (New) now branches directly to Old. Some of
   /// Old's predecessors (Preds) are now branching to New instead of Old.
   /// If New is the only predecessor, move Old's Phi, if present, to New.
@@ -189,23 +191,23 @@ class MemorySSAUpdater {
   ///
   /// Note: If a MemoryAccess already exists for I, this function will make it
   /// inaccessible and it *must* have removeMemoryAccess called on it.
-  LLVM_ABI MemoryAccess *createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition,
-                                       const BasicBlock *BB,
-                                       MemorySSA::InsertionPlace Point,
-                                       bool CreationMustSucceed = true);
+  LLVM_ABI MemoryAccess *
+  createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition,
+                         const BasicBlock *BB, MemorySSA::InsertionPlace Point,
+                         bool CreationMustSucceed = true);
 
   /// Create a MemoryAccess in MemorySSA before an existing MemoryAccess.
   ///
   /// See createMemoryAccessInBB() for usage details.
   LLVM_ABI MemoryUseOrDef *createMemoryAccessBefore(Instruction *I,
-                                           MemoryAccess *Definition,
-                                           MemoryUseOrDef *InsertPt);
+                                                    MemoryAccess *Definition,
+                                                    MemoryUseOrDef *InsertPt);
   /// Create a MemoryAccess in MemorySSA after an existing MemoryAccess.
   ///
   /// See createMemoryAccessInBB() for usage details.
   LLVM_ABI MemoryUseOrDef *createMemoryAccessAfter(Instruction *I,
-                                          MemoryAccess *Definition,
-                                          MemoryAccess *InsertPt);
+                                                   MemoryAccess *Definition,
+                                                   MemoryAccess *InsertPt);
 
   /// Remove a MemoryAccess from MemorySSA, including updating all
   /// definitions and uses.
diff --git a/llvm/include/llvm/Analysis/ModuleSummaryAnalysis.h b/llvm/include/llvm/Analysis/ModuleSummaryAnalysis.h
index 62fb4a7544973..500f168de65a7 100644
--- a/llvm/include/llvm/Analysis/ModuleSummaryAnalysis.h
+++ b/llvm/include/llvm/Analysis/ModuleSummaryAnalysis.h
@@ -13,10 +13,10 @@
 #ifndef LLVM_ANALYSIS_MODULESUMMARYANALYSIS_H
 #define LLVM_ANALYSIS_MODULESUMMARYANALYSIS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/IR/ModuleSummaryIndex.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include <functional>
 #include <optional>
 
diff --git a/llvm/include/llvm/Analysis/MustExecute.h b/llvm/include/llvm/Analysis/MustExecute.h
index 0211864099f53..838f9e645c6c8 100644
--- a/llvm/include/llvm/Analysis/MustExecute.h
+++ b/llvm/include/llvm/Analysis/MustExecute.h
@@ -23,12 +23,12 @@
 #ifndef LLVM_ANALYSIS_MUSTEXECUTE_H
 #define LLVM_ANALYSIS_MUSTEXECUTE_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseSet.h"
 #include "llvm/Analysis/InstructionPrecedenceTracking.h"
 #include "llvm/IR/EHPersonalities.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -82,8 +82,9 @@ class LoopSafetyInfo {
 
   /// Return true if we must reach the block \p BB under assumption that the
   /// loop \p CurLoop is entered.
-  LLVM_ABI bool allLoopPathsLeadToBlock(const Loop *CurLoop, const BasicBlock *BB,
-                               const DominatorTree *DT) const;
+  LLVM_ABI bool allLoopPathsLeadToBlock(const Loop *CurLoop,
+                                        const BasicBlock *BB,
+                                        const DominatorTree *DT) const;
 
   /// Computes safety information for a loop checks loop body & header for
   /// the possibility of may throw exception, it takes LoopSafetyInfo and loop
@@ -107,7 +108,7 @@ class LoopSafetyInfo {
 /// Simple and conservative implementation of LoopSafetyInfo that can give
 /// false-positive answers to its queries in order to avoid complicated
 /// analysis.
-class LLVM_ABI SimpleLoopSafetyInfo: public LoopSafetyInfo {
+class LLVM_ABI SimpleLoopSafetyInfo : public LoopSafetyInfo {
   bool MayThrow = false;       // The current loop contains an instruction which
                                // may throw.
   bool HeaderMayThrow = false; // Same as previous, but specific to loop header
@@ -129,7 +130,7 @@ class LLVM_ABI SimpleLoopSafetyInfo: public LoopSafetyInfo {
 /// that should be invalidated by calling the methods insertInstructionTo and
 /// removeInstruction whenever we modify a basic block's contents by adding or
 /// removing instructions.
-class LLVM_ABI ICFLoopSafetyInfo: public LoopSafetyInfo {
+class LLVM_ABI ICFLoopSafetyInfo : public LoopSafetyInfo {
   bool MayThrow = false;       // The current loop contains an instruction which
                                // may throw.
   // Contains information about implicit control flow in this loop's blocks.
@@ -169,7 +170,8 @@ class LLVM_ABI ICFLoopSafetyInfo: public LoopSafetyInfo {
   void removeInstruction(const Instruction *Inst);
 };
 
-LLVM_ABI bool mayContainIrreducibleControl(const Function &F, const LoopInfo *LI);
+LLVM_ABI bool mayContainIrreducibleControl(const Function &F,
+                                           const LoopInfo *LI);
 
 struct MustBeExecutedContextExplorer;
 
diff --git a/llvm/include/llvm/Analysis/NoInferenceModelRunner.h b/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
index fdfde7d385df0..35de868a7b562 100644
--- a/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
+++ b/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
@@ -10,8 +10,8 @@
 #ifndef LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
 #define LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/MLModelRunner.h"
+#include "llvm/Support/Compiler.h"
 namespace llvm {
 class TensorSpec;
 
@@ -21,7 +21,7 @@ class TensorSpec;
 class NoInferenceModelRunner : public MLModelRunner {
 public:
   LLVM_ABI NoInferenceModelRunner(LLVMContext &Ctx,
-                         const std::vector<TensorSpec> &Inputs);
+                                  const std::vector<TensorSpec> &Inputs);
 
   static bool classof(const MLModelRunner *R) {
     return R->getKind() == MLModelRunner::Kind::NoOp;
diff --git a/llvm/include/llvm/Analysis/OptimizationRemarkEmitter.h b/llvm/include/llvm/Analysis/OptimizationRemarkEmitter.h
index a34404ede7268..3a194dc1ce538 100644
--- a/llvm/include/llvm/Analysis/OptimizationRemarkEmitter.h
+++ b/llvm/include/llvm/Analysis/OptimizationRemarkEmitter.h
@@ -14,12 +14,12 @@
 #ifndef LLVM_ANALYSIS_OPTIMIZATIONREMARKEMITTER_H
 #define LLVM_ANALYSIS_OPTIMIZATIONREMARKEMITTER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/BlockFrequencyInfo.h"
 #include "llvm/IR/DiagnosticInfo.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include <optional>
 
 namespace llvm {
@@ -59,7 +59,7 @@ class OptimizationRemarkEmitter {
 
   /// Handle invalidation events in the new pass manager.
   LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
-                  FunctionAnalysisManager::Invalidator &Inv);
+                           FunctionAnalysisManager::Invalidator &Inv);
 
   /// Return true iff at least *some* remarks are enabled.
   bool enabled() const {
diff --git a/llvm/include/llvm/Analysis/PHITransAddr.h b/llvm/include/llvm/Analysis/PHITransAddr.h
index 2c46887b1346a..a70bec23f94ba 100644
--- a/llvm/include/llvm/Analysis/PHITransAddr.h
+++ b/llvm/include/llvm/Analysis/PHITransAddr.h
@@ -13,9 +13,9 @@
 #ifndef LLVM_ANALYSIS_PHITRANSADDR_H
 #define LLVM_ANALYSIS_PHITRANSADDR_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/IR/Instruction.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 class AssumptionCache;
@@ -77,7 +77,7 @@ class PHITransAddr {
   /// CurBB to Pred, updating our state to reflect any needed changes.  If
   /// 'MustDominate' is true, the translated value must dominate PredBB.
   LLVM_ABI Value *translateValue(BasicBlock *CurBB, BasicBlock *PredBB,
-                        const DominatorTree *DT, bool MustDominate);
+                                 const DominatorTree *DT, bool MustDominate);
 
   /// translateWithInsertion - PHI translate this value into the specified
   /// predecessor block, inserting a computation of the value if it is
@@ -86,9 +86,10 @@ class PHITransAddr {
   /// All newly created instructions are added to the NewInsts list.  This
   /// returns null on failure.
   ///
-  LLVM_ABI Value *translateWithInsertion(BasicBlock *CurBB, BasicBlock *PredBB,
-                                const DominatorTree &DT,
-                                SmallVectorImpl<Instruction *> &NewInsts);
+  LLVM_ABI Value *
+  translateWithInsertion(BasicBlock *CurBB, BasicBlock *PredBB,
+                         const DominatorTree &DT,
+                         SmallVectorImpl<Instruction *> &NewInsts);
 
   LLVM_ABI void dump() const;
 
diff --git a/llvm/include/llvm/Analysis/PhiValues.h b/llvm/include/llvm/Analysis/PhiValues.h
index c690da072d31e..b47c205007c0c 100644
--- a/llvm/include/llvm/Analysis/PhiValues.h
+++ b/llvm/include/llvm/Analysis/PhiValues.h
@@ -19,13 +19,13 @@
 #ifndef LLVM_ANALYSIS_PHIVALUES_H
 #define LLVM_ANALYSIS_PHIVALUES_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseSet.h"
 #include "llvm/ADT/SetVector.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/IR/ValueHandle.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -67,7 +67,7 @@ class PhiValues {
 
   /// Handle invalidation events in the new pass manager.
   LLVM_ABI bool invalidate(Function &, const PreservedAnalyses &,
-                  FunctionAnalysisManager::Invalidator &);
+                           FunctionAnalysisManager::Invalidator &);
 
 private:
   using ConstValueSet = SmallSetVector<const Value *, 4>;
diff --git a/llvm/include/llvm/Analysis/PostDominators.h b/llvm/include/llvm/Analysis/PostDominators.h
index b7439b93abc62..79dc1409919e1 100644
--- a/llvm/include/llvm/Analysis/PostDominators.h
+++ b/llvm/include/llvm/Analysis/PostDominators.h
@@ -13,11 +13,11 @@
 #ifndef LLVM_ANALYSIS_POSTDOMINATORS_H
 #define LLVM_ANALYSIS_POSTDOMINATORS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DepthFirstIterator.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -34,7 +34,7 @@ class PostDominatorTree : public PostDomTreeBase<BasicBlock> {
   explicit PostDominatorTree(Function &F) { recalculate(F); }
   /// Handle invalidation explicitly.
   LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
-                  FunctionAnalysisManager::Invalidator &);
+                           FunctionAnalysisManager::Invalidator &);
 
   // Ensure base-class overloads are visible.
   using Base::dominates;
@@ -96,7 +96,7 @@ struct LLVM_ABI PostDominatorTreeWrapperPass : public FunctionPass {
   void print(raw_ostream &OS, const Module*) const override;
 };
 
-LLVM_ABI FunctionPass* createPostDomTree();
+LLVM_ABI FunctionPass *createPostDomTree();
 
 template <> struct GraphTraits<PostDominatorTree*>
   : public GraphTraits<DomTreeNode*> {
diff --git a/llvm/include/llvm/Analysis/ProfileSummaryInfo.h b/llvm/include/llvm/Analysis/ProfileSummaryInfo.h
index 04cc53642fd47..91dcc52c9456d 100644
--- a/llvm/include/llvm/Analysis/ProfileSummaryInfo.h
+++ b/llvm/include/llvm/Analysis/ProfileSummaryInfo.h
@@ -14,7 +14,6 @@
 #ifndef LLVM_ANALYSIS_PROFILESUMMARYINFO_H
 #define LLVM_ANALYSIS_PROFILESUMMARYINFO_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/Instructions.h"
@@ -22,6 +21,7 @@
 #include "llvm/IR/ProfileSummary.h"
 #include "llvm/Pass.h"
 #include "llvm/Support/BlockFrequency.h"
+#include "llvm/Support/Compiler.h"
 #include <memory>
 #include <optional>
 
@@ -101,9 +101,9 @@ class ProfileSummaryInfo {
   }
 
   /// Returns the profile count for \p CallInst.
-  LLVM_ABI std::optional<uint64_t> getProfileCount(const CallBase &CallInst,
-                                          BlockFrequencyInfo *BFI,
-                                          bool AllowSynthetic = false) const;
+  LLVM_ABI std::optional<uint64_t>
+  getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI,
+                  bool AllowSynthetic = false) const;
   /// Returns true if module \c M has partial-profile sample profile.
   LLVM_ABI bool hasPartialSampleProfile() const;
   /// Returns true if the working set size of the code is considered huge.
@@ -192,7 +192,8 @@ class ProfileSummaryInfo {
   /// cold percentile cutoff value.
   /// PercentileCutoff is encoded as a 6 digit decimal fixed point number, where
   /// the first two digits are the whole part. E.g. 995000 for 99.5 percentile.
-  LLVM_ABI bool isColdCountNthPercentile(int PercentileCutoff, uint64_t C) const;
+  LLVM_ABI bool isColdCountNthPercentile(int PercentileCutoff,
+                                         uint64_t C) const;
 
   /// Returns true if BasicBlock \p BB is considered hot.
   template <typename BBType, typename BFIT>
@@ -245,9 +246,11 @@ class ProfileSummaryInfo {
                                                       BlockFreq, BFI);
   }
   /// Returns true if the call site \p CB is considered hot.
-  LLVM_ABI bool isHotCallSite(const CallBase &CB, BlockFrequencyInfo *BFI) const;
+  LLVM_ABI bool isHotCallSite(const CallBase &CB,
+                              BlockFrequencyInfo *BFI) const;
   /// Returns true if call site \p CB is considered cold.
-  LLVM_ABI bool isColdCallSite(const CallBase &CB, BlockFrequencyInfo *BFI) const;
+  LLVM_ABI bool isColdCallSite(const CallBase &CB,
+                               BlockFrequencyInfo *BFI) const;
   /// Returns HotCountThreshold if set. Recompute HotCountThreshold
   /// if not set.
   LLVM_ABI uint64_t getOrCompHotCountThreshold() const;
diff --git a/llvm/include/llvm/Analysis/RegionPass.h b/llvm/include/llvm/Analysis/RegionPass.h
index e4e0431426b0a..40809108b558f 100644
--- a/llvm/include/llvm/Analysis/RegionPass.h
+++ b/llvm/include/llvm/Analysis/RegionPass.h
@@ -15,9 +15,9 @@
 #ifndef LLVM_ANALYSIS_REGIONPASS_H
 #define LLVM_ANALYSIS_REGIONPASS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/IR/LegacyPassManagers.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include <deque>
 
 namespace llvm {
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index b207ef0d840ae..167845ce646b9 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -20,7 +20,6 @@
 #ifndef LLVM_ANALYSIS_SCALAREVOLUTION_H
 #define LLVM_ANALYSIS_SCALAREVOLUTION_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/DenseMap.h"
@@ -36,6 +35,7 @@
 #include "llvm/IR/ValueHandle.h"
 #include "llvm/IR/ValueMap.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include <cassert>
 #include <cstdint>
 #include <memory>
@@ -482,8 +482,9 @@ class ScalarEvolution {
     return TestFlags == maskFlags(Flags, TestFlags);
   };
 
-  LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC,
-                  DominatorTree &DT, LoopInfo &LI);
+  LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
+                           AssumptionCache &AC, DominatorTree &DT,
+                           LoopInfo &LI);
   LLVM_ABI ScalarEvolution(ScalarEvolution &&Arg);
   LLVM_ABI ~ScalarEvolution();
 
@@ -531,8 +532,8 @@ class ScalarEvolution {
   /// a signed/unsigned overflow (\p Signed)? If \p CtxI is specified, the
   /// no-overflow fact should be true in the context of this instruction.
   LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
-                       const SCEV *LHS, const SCEV *RHS,
-                       const Instruction *CtxI = nullptr);
+                                const SCEV *LHS, const SCEV *RHS,
+                                const Instruction *CtxI = nullptr);
 
   /// Parse NSW/NUW flags from add/sub/mul IR binary operation \p Op into
   /// SCEV no-wrap flags, and deduce flag[s] that aren't known yet.
@@ -562,22 +563,26 @@ class ScalarEvolution {
   LLVM_ABI const SCEV *getConstant(ConstantInt *V);
   LLVM_ABI const SCEV *getConstant(const APInt &Val);
   LLVM_ABI const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
-  LLVM_ABI const SCEV *getLosslessPtrToIntExpr(const SCEV *Op, unsigned Depth = 0);
+  LLVM_ABI const SCEV *getLosslessPtrToIntExpr(const SCEV *Op,
+                                               unsigned Depth = 0);
   LLVM_ABI const SCEV *getPtrToIntExpr(const SCEV *Op, Type *Ty);
-  LLVM_ABI const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
+  LLVM_ABI const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty,
+                                       unsigned Depth = 0);
   LLVM_ABI const SCEV *getVScale(Type *Ty);
   LLVM_ABI const SCEV *getElementCount(Type *Ty, ElementCount EC);
-  LLVM_ABI const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
+  LLVM_ABI const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty,
+                                         unsigned Depth = 0);
   LLVM_ABI const SCEV *getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
-                                    unsigned Depth = 0);
-  LLVM_ABI const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
+                                             unsigned Depth = 0);
+  LLVM_ABI const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty,
+                                         unsigned Depth = 0);
   LLVM_ABI const SCEV *getSignExtendExprImpl(const SCEV *Op, Type *Ty,
-                                    unsigned Depth = 0);
+                                             unsigned Depth = 0);
   LLVM_ABI const SCEV *getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty);
   LLVM_ABI const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
   LLVM_ABI const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
-                         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
-                         unsigned Depth = 0);
+                                  SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+                                  unsigned Depth = 0);
   const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
                          SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                          unsigned Depth = 0) {
@@ -591,8 +596,8 @@ class ScalarEvolution {
     return getAddExpr(Ops, Flags, Depth);
   }
   LLVM_ABI const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
-                         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
-                         unsigned Depth = 0);
+                                  SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+                                  unsigned Depth = 0);
   const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
                          SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                          unsigned Depth = 0) {
@@ -608,10 +613,10 @@ class ScalarEvolution {
   LLVM_ABI const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
   LLVM_ABI const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
   LLVM_ABI const SCEV *getURemExpr(const SCEV *LHS, const SCEV *RHS);
-  LLVM_ABI const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L,
-                            SCEV::NoWrapFlags Flags);
+  LLVM_ABI const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step,
+                                     const Loop *L, SCEV::NoWrapFlags Flags);
   LLVM_ABI const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
-                            const Loop *L, SCEV::NoWrapFlags Flags);
+                                     const Loop *L, SCEV::NoWrapFlags Flags);
   const SCEV *getAddRecExpr(const SmallVectorImpl<const SCEV *> &Operands,
                             const Loop *L, SCEV::NoWrapFlags Flags) {
     SmallVector<const SCEV *, 4> NewOp(Operands.begin(), Operands.end());
@@ -622,7 +627,8 @@ class ScalarEvolution {
   /// Predicates. If successful return these <AddRecExpr, Predicates>;
   /// The function is intended to be called from PSCEV (the caller will decide
   /// whether to actually add the predicates and carry out the rewrites).
-  LLVM_ABI std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
+  LLVM_ABI std::optional<
+      std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
   createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI);
 
   /// Returns an expression for a GEP
@@ -630,13 +636,14 @@ class ScalarEvolution {
   /// \p GEP The GEP. The indices contained in the GEP itself are ignored,
   /// instead we use IndexExprs.
   /// \p IndexExprs The expressions for the indices.
-  LLVM_ABI const SCEV *getGEPExpr(GEPOperator *GEP,
-                         const SmallVectorImpl<const SCEV *> &IndexExprs);
+  LLVM_ABI const SCEV *
+  getGEPExpr(GEPOperator *GEP, const SmallVectorImpl<const SCEV *> &IndexExprs);
   LLVM_ABI const SCEV *getAbsExpr(const SCEV *Op, bool IsNSW);
   LLVM_ABI const SCEV *getMinMaxExpr(SCEVTypes Kind,
-                            SmallVectorImpl<const SCEV *> &Operands);
-  LLVM_ABI const SCEV *getSequentialMinMaxExpr(SCEVTypes Kind,
-                                      SmallVectorImpl<const SCEV *> &Operands);
+                                     SmallVectorImpl<const SCEV *> &Operands);
+  LLVM_ABI const SCEV *
+  getSequentialMinMaxExpr(SCEVTypes Kind,
+                          SmallVectorImpl<const SCEV *> &Operands);
   LLVM_ABI const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
   LLVM_ABI const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
   LLVM_ABI const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
@@ -644,9 +651,9 @@ class ScalarEvolution {
   LLVM_ABI const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
   LLVM_ABI const SCEV *getSMinExpr(SmallVectorImpl<const SCEV *> &Operands);
   LLVM_ABI const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS,
-                          bool Sequential = false);
+                                   bool Sequential = false);
   LLVM_ABI const SCEV *getUMinExpr(SmallVectorImpl<const SCEV *> &Operands,
-                          bool Sequential = false);
+                                   bool Sequential = false);
   LLVM_ABI const SCEV *getUnknown(Value *V);
   LLVM_ABI const SCEV *getCouldNotCompute();
 
@@ -677,11 +684,12 @@ class ScalarEvolution {
   LLVM_ABI const SCEV *getStoreSizeOfExpr(Type *IntTy, Type *StoreTy);
 
   /// Return an expression for offsetof on the given field with type IntTy
-  LLVM_ABI const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo);
+  LLVM_ABI const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy,
+                                       unsigned FieldNo);
 
   /// Return the SCEV object corresponding to -V.
-  LLVM_ABI const SCEV *getNegativeSCEV(const SCEV *V,
-                              SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
+  LLVM_ABI const SCEV *
+  getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
 
   /// Return the SCEV object corresponding to ~V.
   LLVM_ABI const SCEV *getNotSCEV(const SCEV *V);
@@ -694,8 +702,8 @@ class ScalarEvolution {
   /// explicitly convert the arguments using getPtrToIntExpr(), for pointer
   /// types that support it.
   LLVM_ABI const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
-                           SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
-                           unsigned Depth = 0);
+                                    SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+                                    unsigned Depth = 0);
 
   /// Compute ceil(N / D). N and D are treated as unsigned values.
   ///
@@ -710,12 +718,12 @@ class ScalarEvolution {
   /// Return a SCEV corresponding to a conversion of the input value to the
   /// specified type.  If the type must be extended, it is zero extended.
   LLVM_ABI const SCEV *getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
-                                      unsigned Depth = 0);
+                                               unsigned Depth = 0);
 
   /// Return a SCEV corresponding to a conversion of the input value to the
   /// specified type.  If the type must be extended, it is sign extended.
   LLVM_ABI const SCEV *getTruncateOrSignExtend(const SCEV *V, Type *Ty,
-                                      unsigned Depth = 0);
+                                               unsigned Depth = 0);
 
   /// Return a SCEV corresponding to a conversion of the input value to the
   /// specified type.  If the type must be extended, it is zero extended.  The
@@ -738,17 +746,20 @@ class ScalarEvolution {
 
   /// Promote the operands to the wider of the types using zero-extension, and
   /// then perform a umax operation with them.
-  LLVM_ABI const SCEV *getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI const SCEV *getUMaxFromMismatchedTypes(const SCEV *LHS,
+                                                  const SCEV *RHS);
 
   /// Promote the operands to the wider of the types using zero-extension, and
   /// then perform a umin operation with them.
-  LLVM_ABI const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS,
-                                         bool Sequential = false);
+  LLVM_ABI const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS,
+                                                  const SCEV *RHS,
+                                                  bool Sequential = false);
 
   /// Promote the operands to the wider of the types using zero-extension, and
   /// then perform a umin operation with them. N-ary function.
-  LLVM_ABI const SCEV *getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
-                                         bool Sequential = false);
+  LLVM_ABI const SCEV *
+  getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
+                             bool Sequential = false);
 
   /// Transitively follow the chain of pointer-type operands until reaching a
   /// SCEV that does not have a single pointer operand. This returns a
@@ -778,17 +789,19 @@ class ScalarEvolution {
   /// and RHS.  This is used to help avoid max expressions in loop trip
   /// counts, and to eliminate casts.
   LLVM_ABI bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred,
-                                const SCEV *LHS, const SCEV *RHS);
+                                         const SCEV *LHS, const SCEV *RHS);
 
   /// Test whether entry to the basic block is protected by a conditional
   /// between LHS and RHS.
-  LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred,
-                                      const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
+                                               CmpPredicate Pred,
+                                               const SCEV *LHS,
+                                               const SCEV *RHS);
 
   /// Test whether the backedge of the loop is protected by a conditional
   /// between LHS and RHS.  This is used to eliminate casts.
   LLVM_ABI bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred,
-                                   const SCEV *LHS, const SCEV *RHS);
+                                            const SCEV *LHS, const SCEV *RHS);
 
   /// A version of getTripCountFromExitCount below which always picks an
   /// evaluation type which can not result in overflow.
@@ -801,8 +814,8 @@ class ScalarEvolution {
   /// expression can overflow if ExitCount = UINT_MAX.  If EvalTy is not wide
   /// enough to hold the result without overflow, result unsigned wraps with
   /// 2s-complement semantics.  ex: EC = 255 (i8), TC = 0 (i8)
-  LLVM_ABI const SCEV *getTripCountFromExitCount(const SCEV *ExitCount, Type *EvalTy,
-                                        const Loop *L);
+  LLVM_ABI const SCEV *getTripCountFromExitCount(const SCEV *ExitCount,
+                                                 Type *EvalTy, const Loop *L);
 
   /// Returns the exact trip count of the loop if we can compute it, and
   /// the result is a small constant.  '0' is used to represent an unknown
@@ -820,7 +833,7 @@ class ScalarEvolution {
   /// the number times that the loop header executes if the loop exits
   /// prematurely via another branch.
   LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L,
-                                     const BasicBlock *ExitingBlock);
+                                              const BasicBlock *ExitingBlock);
 
   /// Returns the upper bound of the loop trip count as a normal unsigned
   /// value.
@@ -837,7 +850,7 @@ class ScalarEvolution {
   /// return 1 if the trip count is very large (>= 2^32).
   /// Note that the argument is an exit count for loop L, NOT a trip count.
   LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L,
-                                        const SCEV *ExitCount);
+                                                 const SCEV *ExitCount);
 
   /// Returns the largest constant divisor of the trip count of the
   /// loop.  Will return 1 if no trip count could be computed, or if a
@@ -850,8 +863,8 @@ class ScalarEvolution {
   /// count could very well be zero as well!). As explained in the comments
   /// for getSmallConstantTripCount, this assumes that control exits the loop
   /// via ExitingBlock.
-  LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L,
-                                        const BasicBlock *ExitingBlock);
+  LLVM_ABI unsigned
+  getSmallConstantTripMultiple(const Loop *L, const BasicBlock *ExitingBlock);
 
   /// The terms "backedge taken count" and "exit count" are used
   /// interchangeably to refer to the number of times the backedge of a loop
@@ -872,8 +885,9 @@ class ScalarEvolution {
   /// getBackedgeTakenCount.  The loop is guaranteed to exit (via *some* exit)
   /// before the backedge is executed (ExitCount + 1) times.  Note that there
   /// is no guarantee about *which* exit is taken on the exiting iteration.
-  LLVM_ABI const SCEV *getExitCount(const Loop *L, const BasicBlock *ExitingBlock,
-                           ExitCountKind Kind = Exact);
+  LLVM_ABI const SCEV *getExitCount(const Loop *L,
+                                    const BasicBlock *ExitingBlock,
+                                    ExitCountKind Kind = Exact);
 
   /// Same as above except this uses the predicated backedge taken info and
   /// may require predicates.
@@ -892,7 +906,8 @@ class ScalarEvolution {
   /// Note that it is not valid to call this method on a loop without a
   /// loop-invariant backedge-taken count (see
   /// hasLoopInvariantBackedgeTakenCount).
-  LLVM_ABI const SCEV *getBackedgeTakenCount(const Loop *L, ExitCountKind Kind = Exact);
+  LLVM_ABI const SCEV *getBackedgeTakenCount(const Loop *L,
+                                             ExitCountKind Kind = Exact);
 
   /// Similar to getBackedgeTakenCount, except it will add a set of
   /// SCEV predicates to Predicates that are required to be true in order for
@@ -1043,14 +1058,15 @@ class ScalarEvolution {
   /// Test if the given expression is known to be a power of 2.  OrNegative
   /// allows matching negative power of 2s, and OrZero allows matching 0.
   LLVM_ABI bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero = false,
-                              bool OrNegative = false);
+                                       bool OrNegative = false);
 
   /// Check that \p S is a multiple of \p M. When \p S is an AddRecExpr, \p S is
   /// a multiple of \p M if \p S starts with a multiple of \p M and at every
   /// iteration step \p S only adds multiples of \p M. \p Assumptions records
   /// the runtime predicates under which \p S is a multiple of \p M.
-  LLVM_ABI bool isKnownMultipleOf(const SCEV *S, uint64_t M,
-                         SmallVectorImpl<const SCEVPredicate *> &Assumptions);
+  LLVM_ABI bool
+  isKnownMultipleOf(const SCEV *S, uint64_t M,
+                    SmallVectorImpl<const SCEVPredicate *> &Assumptions);
 
   /// Splits SCEV expression \p S into two SCEVs. One of them is obtained from
   /// \p S by substitution of all AddRec sub-expression related to loop \p L
@@ -1068,8 +1084,8 @@ class ScalarEvolution {
   /// 0 (initial value) for the first element and to {1, +, 1}<L1> (post
   /// increment value) for the second one. In both cases AddRec expression
   /// related to L2 remains the same.
-  LLVM_ABI std::pair<const SCEV *, const SCEV *> SplitIntoInitAndPostInc(const Loop *L,
-                                                                const SCEV *S);
+  LLVM_ABI std::pair<const SCEV *, const SCEV *>
+  SplitIntoInitAndPostInc(const Loop *L, const SCEV *S);
 
   /// We'd like to check the predicate on every iteration of the most dominated
   /// loop between loops used in LHS and RHS.
@@ -1089,34 +1105,38 @@ class ScalarEvolution {
   ///    so we can assert on that.
   /// e. Return true if isLoopEntryGuardedByCond(Pred, E(LHS), E(RHS)) &&
   ///                   isLoopBackedgeGuardedByCond(Pred, B(LHS), B(RHS))
-  LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, const SCEV *LHS,
+                                    const SCEV *RHS);
 
   /// Test if the given expression is known to satisfy the condition described
   /// by Pred, LHS, and RHS.
-  LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS,
+                                 const SCEV *RHS);
 
   /// Check whether the condition described by Pred, LHS, and RHS is true or
   /// false. If we know it, return the evaluation of this condition. If neither
   /// is proved, return std::nullopt.
-  LLVM_ABI std::optional<bool> evaluatePredicate(CmpPredicate Pred, const SCEV *LHS,
-                                        const SCEV *RHS);
+  LLVM_ABI std::optional<bool>
+  evaluatePredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS);
 
   /// Test if the given expression is known to satisfy the condition described
   /// by Pred, LHS, and RHS in the given Context.
-  LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS,
-                          const Instruction *CtxI);
+  LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS,
+                                   const SCEV *RHS, const Instruction *CtxI);
 
   /// Check whether the condition described by Pred, LHS, and RHS is true or
   /// false in the given \p Context. If we know it, return the evaluation of
   /// this condition. If neither is proved, return std::nullopt.
-  LLVM_ABI std::optional<bool> evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS,
-                                          const SCEV *RHS,
-                                          const Instruction *CtxI);
+  LLVM_ABI std::optional<bool> evaluatePredicateAt(CmpPredicate Pred,
+                                                   const SCEV *LHS,
+                                                   const SCEV *RHS,
+                                                   const Instruction *CtxI);
 
   /// Test if the condition described by Pred, LHS, RHS is known to be true on
   /// every iteration of the loop of the recurrency LHS.
-  LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS,
-                               const SCEV *RHS);
+  LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred,
+                                        const SCEVAddRecExpr *LHS,
+                                        const SCEV *RHS);
 
   /// Information about the number of loop iterations for which a loop exit's
   /// branch condition evaluates to the not-taken path.  This is a temporary
@@ -1141,13 +1161,14 @@ class ScalarEvolution {
     /// as arguments and asserts enforce that internally.
     /*implicit*/ LLVM_ABI ExitLimit(const SCEV *E);
 
-    LLVM_ABI ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken,
+    LLVM_ABI
+    ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken,
               const SCEV *SymbolicMaxNotTaken, bool MaxOrZero,
               ArrayRef<ArrayRef<const SCEVPredicate *>> PredLists = {});
 
     LLVM_ABI ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken,
-              const SCEV *SymbolicMaxNotTaken, bool MaxOrZero,
-              ArrayRef<const SCEVPredicate *> PredList);
+                       const SCEV *SymbolicMaxNotTaken, bool MaxOrZero,
+                       ArrayRef<const SCEVPredicate *> PredList);
 
     /// Test whether this ExitLimit contains any computed information, or
     /// whether it's all SCEVCouldNotCompute values.
@@ -1173,8 +1194,9 @@ class ScalarEvolution {
   /// If \p AllowPredicates is set, this call will try to use a minimal set of
   /// SCEV predicates in order to return an exact answer.
   LLVM_ABI ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond,
-                                     bool ExitIfTrue, bool ControlsOnlyExit,
-                                     bool AllowPredicates = false);
+                                              bool ExitIfTrue,
+                                              bool ControlsOnlyExit,
+                                              bool AllowPredicates = false);
 
   /// A predicate is said to be monotonically increasing if may go from being
   /// false to being true as the loop iterates, but never the other way
@@ -1232,7 +1254,7 @@ class ScalarEvolution {
   /// unequal, LHS and RHS are set to the same value and Pred is set to either
   /// ICMP_EQ or ICMP_NE.
   LLVM_ABI bool SimplifyICmpOperands(CmpPredicate &Pred, const SCEV *&LHS,
-                            const SCEV *&RHS, unsigned Depth = 0);
+                                     const SCEV *&RHS, unsigned Depth = 0);
 
   /// Return the "disposition" of the given SCEV with respect to the given
   /// loop.
@@ -1255,7 +1277,8 @@ class ScalarEvolution {
 
   /// Return the "disposition" of the given SCEV with respect to the given
   /// block.
-  LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB);
+  LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S,
+                                                const BasicBlock *BB);
 
   /// Return true if elements that makes up the given SCEV dominate the
   /// specified basic block.
@@ -1274,15 +1297,17 @@ class ScalarEvolution {
   LLVM_ABI void print(raw_ostream &OS) const;
   LLVM_ABI void verify() const;
   LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
-                  FunctionAnalysisManager::Invalidator &Inv);
+                           FunctionAnalysisManager::Invalidator &Inv);
 
   /// Return the DataLayout associated with the module this SCEV instance is
   /// operating on.
   const DataLayout &getDataLayout() const { return DL; }
 
-  LLVM_ABI const SCEVPredicate *getEqualPredicate(const SCEV *LHS, const SCEV *RHS);
+  LLVM_ABI const SCEVPredicate *getEqualPredicate(const SCEV *LHS,
+                                                  const SCEV *RHS);
   LLVM_ABI const SCEVPredicate *getComparePredicate(ICmpInst::Predicate Pred,
-                                           const SCEV *LHS, const SCEV *RHS);
+                                                    const SCEV *LHS,
+                                                    const SCEV *RHS);
 
   LLVM_ABI const SCEVPredicate *
   getWrapPredicate(const SCEVAddRecExpr *AR,
@@ -1290,7 +1315,7 @@ class ScalarEvolution {
 
   /// Re-writes the SCEV according to the Predicates in \p A.
   LLVM_ABI const SCEV *rewriteUsingPredicate(const SCEV *S, const Loop *L,
-                                    const SCEVPredicate &A);
+                                             const SCEVPredicate &A);
   /// Tries to convert the \p S expression to an AddRec expression,
   /// adding additional predicates to \p Preds as required.
   LLVM_ABI const SCEVAddRecExpr *convertSCEVToAddRecWithPredicates(
@@ -1305,7 +1330,7 @@ class ScalarEvolution {
   /// canonicalizing an expression in the cases where the result isn't going
   /// to be a constant.
   LLVM_ABI std::optional<APInt> computeConstantDifference(const SCEV *LHS,
-                                                 const SCEV *RHS);
+                                                          const SCEV *RHS);
 
   /// Update no-wrap flags of an AddRec. This may drop the cached info about
   /// this AddRec (such as range info) in case if new flags may potentially
@@ -1350,7 +1375,8 @@ class ScalarEvolution {
 
   /// Try to apply information from loop guards for \p L to \p Expr.
   LLVM_ABI const SCEV *applyLoopGuards(const SCEV *Expr, const Loop *L);
-  LLVM_ABI const SCEV *applyLoopGuards(const SCEV *Expr, const LoopGuards &Guards);
+  LLVM_ABI const SCEV *applyLoopGuards(const SCEV *Expr,
+                                       const LoopGuards &Guards);
 
   /// Return true if the loop has no abnormal exits. That is, if the loop
   /// is not infinite, it must exit through an explicit edge in the CFG.
@@ -1367,8 +1393,9 @@ class ScalarEvolution {
   /// Return the set of Values that, if poison, will definitively result in S
   /// being poison as well. The returned set may be incomplete, i.e. there can
   /// be additional Values that also result in S being poison.
-  LLVM_ABI void getPoisonGeneratingValues(SmallPtrSetImpl<const Value *> &Result,
-                                 const SCEV *S);
+  LLVM_ABI void
+  getPoisonGeneratingValues(SmallPtrSetImpl<const Value *> &Result,
+                            const SCEV *S);
 
   /// Check whether it is poison-safe to represent the expression S using the
   /// instruction I. If such a replacement is performed, the poison flags of
@@ -1563,8 +1590,9 @@ class ScalarEvolution {
     using EdgeExitInfo = std::pair<BasicBlock *, ExitLimit>;
 
     /// Initialize BackedgeTakenInfo from a list of exact exit counts.
-    LLVM_ABI BackedgeTakenInfo(ArrayRef<EdgeExitInfo> ExitCounts, bool IsComplete,
-                      const SCEV *ConstantMax, bool MaxOrZero);
+    LLVM_ABI BackedgeTakenInfo(ArrayRef<EdgeExitInfo> ExitCounts,
+                               bool IsComplete, const SCEV *ConstantMax,
+                               bool MaxOrZero);
 
     /// Test whether this BackedgeTakenInfo contains any computed information,
     /// or whether it's all SCEVCouldNotCompute values.
@@ -1742,7 +1770,7 @@ class ScalarEvolution {
   /// NOTE: This returns a reference to an entry in a cache. It must be
   /// copied if its needed for longer.
   LLVM_ABI const ConstantRange &getRangeRef(const SCEV *S, RangeSignHint Hint,
-                                   unsigned Depth = 0);
+                                            unsigned Depth = 0);
 
   /// Determine the range for a particular SCEV, but evaluates ranges for
   /// operands iteratively first.
@@ -1870,12 +1898,13 @@ class ScalarEvolution {
         : L(L), ExitIfTrue(ExitIfTrue), AllowPredicates(AllowPredicates) {}
 
     LLVM_ABI std::optional<ExitLimit> find(const Loop *L, Value *ExitCond,
-                                  bool ExitIfTrue, bool ControlsOnlyExit,
-                                  bool AllowPredicates);
+                                           bool ExitIfTrue,
+                                           bool ControlsOnlyExit,
+                                           bool AllowPredicates);
 
     LLVM_ABI void insert(const Loop *L, Value *ExitCond, bool ExitIfTrue,
-                bool ControlsOnlyExit, bool AllowPredicates,
-                const ExitLimit &EL);
+                         bool ControlsOnlyExit, bool AllowPredicates,
+                         const ExitLimit &EL);
   };
 
   using ExitLimitCacheTy = ExitLimitCache;
@@ -1980,27 +2009,30 @@ class ScalarEvolution {
   /// whenever the given FoundCondValue value evaluates to true in given
   /// Context. If Context is nullptr, then the found predicate is true
   /// everywhere. LHS and FoundLHS may have different type width.
-  LLVM_ABI bool isImpliedCond(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS,
-                     const Value *FoundCondValue, bool Inverse,
-                     const Instruction *Context = nullptr);
+  LLVM_ABI bool isImpliedCond(CmpPredicate Pred, const SCEV *LHS,
+                              const SCEV *RHS, const Value *FoundCondValue,
+                              bool Inverse,
+                              const Instruction *Context = nullptr);
 
   /// Test whether the condition described by Pred, LHS, and RHS is true
   /// whenever the given FoundCondValue value evaluates to true in given
   /// Context. If Context is nullptr, then the found predicate is true
   /// everywhere. LHS and FoundLHS must have same type width.
   LLVM_ABI bool isImpliedCondBalancedTypes(CmpPredicate Pred, const SCEV *LHS,
-                                  const SCEV *RHS, CmpPredicate FoundPred,
-                                  const SCEV *FoundLHS, const SCEV *FoundRHS,
-                                  const Instruction *CtxI);
+                                           const SCEV *RHS,
+                                           CmpPredicate FoundPred,
+                                           const SCEV *FoundLHS,
+                                           const SCEV *FoundRHS,
+                                           const Instruction *CtxI);
 
   /// Test whether the condition described by Pred, LHS, and RHS is true
   /// whenever the condition described by FoundPred, FoundLHS, FoundRHS is
   /// true in given Context. If Context is nullptr, then the found predicate is
   /// true everywhere.
-  LLVM_ABI bool isImpliedCond(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS,
-                     CmpPredicate FoundPred, const SCEV *FoundLHS,
-                     const SCEV *FoundRHS,
-                     const Instruction *Context = nullptr);
+  LLVM_ABI bool isImpliedCond(CmpPredicate Pred, const SCEV *LHS,
+                              const SCEV *RHS, CmpPredicate FoundPred,
+                              const SCEV *FoundLHS, const SCEV *FoundRHS,
+                              const Instruction *Context = nullptr);
 
   /// Test whether the condition described by Pred, LHS, and RHS is true
   /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
@@ -2418,11 +2450,13 @@ class PredicatedScalarEvolution {
   LLVM_ABI const SCEVAddRecExpr *getAsAddRec(Value *V);
 
   /// Proves that V doesn't overflow by adding SCEV predicate.
-  LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags);
+  LLVM_ABI void setNoOverflow(Value *V,
+                              SCEVWrapPredicate::IncrementWrapFlags Flags);
 
   /// Returns true if we've proved that V doesn't wrap by means of a SCEV
   /// predicate.
-  LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags);
+  LLVM_ABI bool hasNoOverflow(Value *V,
+                              SCEVWrapPredicate::IncrementWrapFlags Flags);
 
   /// Returns the ScalarEvolution analysis used.
   ScalarEvolution *getSE() const { return &SE; }
@@ -2437,7 +2471,7 @@ class PredicatedScalarEvolution {
   /// Check if \p AR1 and \p AR2 are equal, while taking into account
   /// Equal predicates in Preds.
   LLVM_ABI bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1,
-                                const SCEVAddRecExpr *AR2) const;
+                                         const SCEVAddRecExpr *AR2) const;
 
 private:
   /// Increments the version number of the predicate.  This needs to be called
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h b/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
index 4b44539f4f33f..8f1844505119c 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
@@ -13,9 +13,9 @@
 #ifndef LLVM_ANALYSIS_SCALAREVOLUTIONALIASANALYSIS_H
 #define LLVM_ANALYSIS_SCALAREVOLUTIONALIASANALYSIS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -32,11 +32,12 @@ class SCEVAAResult : public AAResultBase {
   explicit SCEVAAResult(ScalarEvolution &SE) : SE(SE) {}
   SCEVAAResult(SCEVAAResult &&Arg) : AAResultBase(std::move(Arg)), SE(Arg.SE) {}
 
-  LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
-                    AAQueryInfo &AAQI, const Instruction *CtxI);
+  LLVM_ABI AliasResult alias(const MemoryLocation &LocA,
+                             const MemoryLocation &LocB, AAQueryInfo &AAQI,
+                             const Instruction *CtxI);
 
   LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
-                  FunctionAnalysisManager::Invalidator &Inv);
+                           FunctionAnalysisManager::Invalidator &Inv);
 
 private:
   Value *GetBaseValue(const SCEV *S);
@@ -71,7 +72,6 @@ class LLVM_ABI SCEVAAWrapperPass : public FunctionPass {
 
 /// Creates an instance of \c SCEVAAWrapperPass.
 LLVM_ABI FunctionPass *createSCEVAAWrapperPass();
-
 }
 
 #endif
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index 41795057df10b..13b9e1b812942 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -13,7 +13,6 @@
 #ifndef LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
 #define LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
@@ -21,6 +20,7 @@
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/ValueHandle.h"
 #include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/ErrorHandling.h"
 #include <cassert>
 #include <cstddef>
@@ -106,8 +106,8 @@ class SCEVCastExpr : public SCEV {
   const SCEV *Op;
   Type *Ty;
 
-  LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op,
-               Type *ty);
+  LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
+                        const SCEV *op, Type *ty);
 
 public:
   const SCEV *getOperand() const { return Op; }
@@ -142,7 +142,7 @@ class SCEVPtrToIntExpr : public SCEVCastExpr {
 class SCEVIntegralCastExpr : public SCEVCastExpr {
 protected:
   LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
-                       const SCEV *op, Type *ty);
+                                const SCEV *op, Type *ty);
 
 public:
   /// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -395,12 +395,14 @@ class SCEVAddRecExpr : public SCEVNAryExpr {
 
   /// Return the value of this chain of recurrences at the specified
   /// iteration number.
-  LLVM_ABI const SCEV *evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const;
+  LLVM_ABI const SCEV *evaluateAtIteration(const SCEV *It,
+                                           ScalarEvolution &SE) const;
 
   /// Return the value of this chain of recurrences at the specified iteration
   /// number. Takes an explicit list of operands to represent an AddRec.
-  LLVM_ABI static const SCEV *evaluateAtIteration(ArrayRef<const SCEV *> Operands,
-                                         const SCEV *It, ScalarEvolution &SE);
+  LLVM_ABI static const SCEV *
+  evaluateAtIteration(ArrayRef<const SCEV *> Operands, const SCEV *It,
+                      ScalarEvolution &SE);
 
   /// Return the number of iterations of this loop that produce
   /// values in the specified constant range.  Another way of
@@ -409,7 +411,7 @@ class SCEVAddRecExpr : public SCEVNAryExpr {
   /// exit count.  If the iteration count can't be computed, an
   /// instance of SCEVCouldNotCompute is returned.
   LLVM_ABI const SCEV *getNumIterationsInRange(const ConstantRange &Range,
-                                      ScalarEvolution &SE) const;
+                                               ScalarEvolution &SE) const;
 
   /// Return an expression representing the value of this expression
   /// one iteration of the loop ahead.
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h b/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h
index 9a455108712e3..7719e3354dcc5 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h
@@ -35,9 +35,9 @@
 #ifndef LLVM_ANALYSIS_SCALAREVOLUTIONNORMALIZATION_H
 #define LLVM_ANALYSIS_SCALAREVOLUTIONNORMALIZATION_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/STLFunctionalExtras.h"
 #include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -53,19 +53,22 @@ typedef function_ref<bool(const SCEVAddRecExpr *)> NormalizePredTy;
 /// Normalize \p S to be post-increment for all loops present in \p
 /// Loops. Returns nullptr if the result is not invertible and \p
 /// CheckInvertible is true.
-LLVM_ABI const SCEV *normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops,
-                                   ScalarEvolution &SE,
-                                   bool CheckInvertible = true);
+LLVM_ABI const SCEV *normalizeForPostIncUse(const SCEV *S,
+                                            const PostIncLoopSet &Loops,
+                                            ScalarEvolution &SE,
+                                            bool CheckInvertible = true);
 
 /// Normalize \p S for all add recurrence sub-expressions for which \p
 /// Pred returns true.
-LLVM_ABI const SCEV *normalizeForPostIncUseIf(const SCEV *S, NormalizePredTy Pred,
-                                     ScalarEvolution &SE);
+LLVM_ABI const SCEV *normalizeForPostIncUseIf(const SCEV *S,
+                                              NormalizePredTy Pred,
+                                              ScalarEvolution &SE);
 
 /// Denormalize \p S to be post-increment for all loops present in \p
 /// Loops.
-LLVM_ABI const SCEV *denormalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops,
-                                     ScalarEvolution &SE);
+LLVM_ABI const SCEV *denormalizeForPostIncUse(const SCEV *S,
+                                              const PostIncLoopSet &Loops,
+                                              ScalarEvolution &SE);
 } // namespace llvm
 
 #endif
diff --git a/llvm/include/llvm/Analysis/ScopedNoAliasAA.h b/llvm/include/llvm/Analysis/ScopedNoAliasAA.h
index 8df4b6d76a488..49620cf5ee078 100644
--- a/llvm/include/llvm/Analysis/ScopedNoAliasAA.h
+++ b/llvm/include/llvm/Analysis/ScopedNoAliasAA.h
@@ -14,10 +14,10 @@
 #ifndef LLVM_ANALYSIS_SCOPEDNOALIASAA_H
 #define LLVM_ANALYSIS_SCOPEDNOALIASAA_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include <memory>
 
 namespace llvm {
@@ -37,15 +37,18 @@ class ScopedNoAliasAAResult : public AAResultBase {
     return false;
   }
 
-  LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
-                    AAQueryInfo &AAQI, const Instruction *CtxI);
-  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
-                           AAQueryInfo &AAQI);
-  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
-                           AAQueryInfo &AAQI);
-
-  LLVM_ABI void collectScopedDomains(const MDNode *NoAlias,
-                            SmallPtrSetImpl<const MDNode *> &Domains) const;
+  LLVM_ABI AliasResult alias(const MemoryLocation &LocA,
+                             const MemoryLocation &LocB, AAQueryInfo &AAQI,
+                             const Instruction *CtxI);
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call,
+                                    const MemoryLocation &Loc,
+                                    AAQueryInfo &AAQI);
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call1,
+                                    const CallBase *Call2, AAQueryInfo &AAQI);
+
+  LLVM_ABI void
+  collectScopedDomains(const MDNode *NoAlias,
+                       SmallPtrSetImpl<const MDNode *> &Domains) const;
 
 private:
   bool mayAliasInScopes(const MDNode *Scopes, const MDNode *NoAlias) const;
diff --git a/llvm/include/llvm/Analysis/SimplifyQuery.h b/llvm/include/llvm/Analysis/SimplifyQuery.h
index 62149d3a579d6..d1d34f22a2fc5 100644
--- a/llvm/include/llvm/Analysis/SimplifyQuery.h
+++ b/llvm/include/llvm/Analysis/SimplifyQuery.h
@@ -9,9 +9,9 @@
 #ifndef LLVM_ANALYSIS_SIMPLIFYQUERY_H
 #define LLVM_ANALYSIS_SIMPLIFYQUERY_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/IR/Operator.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
diff --git a/llvm/include/llvm/Analysis/StaticDataProfileInfo.h b/llvm/include/llvm/Analysis/StaticDataProfileInfo.h
index 4c4e62be469b9..fa21eba1377df 100644
--- a/llvm/include/llvm/Analysis/StaticDataProfileInfo.h
+++ b/llvm/include/llvm/Analysis/StaticDataProfileInfo.h
@@ -1,12 +1,12 @@
 #ifndef LLVM_ANALYSIS_STATICDATAPROFILEINFO_H
 #define LLVM_ANALYSIS_STATICDATAPROFILEINFO_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseSet.h"
 #include "llvm/Analysis/ProfileSummaryInfo.h"
 #include "llvm/IR/Constant.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -23,7 +23,8 @@ class StaticDataProfileInfo {
   DenseSet<const Constant *> ConstantWithoutCounts;
 
   /// If \p C has a count, return it. Otherwise, return std::nullopt.
-  LLVM_ABI std::optional<uint64_t> getConstantProfileCount(const Constant *C) const;
+  LLVM_ABI std::optional<uint64_t>
+  getConstantProfileCount(const Constant *C) const;
 
 public:
   StaticDataProfileInfo() = default;
@@ -33,7 +34,7 @@ class StaticDataProfileInfo {
   /// the result exceeds it. Otherwise, mark the constant as having no profile
   /// count.
   LLVM_ABI void addConstantProfileCount(const Constant *C,
-                               std::optional<uint64_t> Count);
+                                        std::optional<uint64_t> Count);
 
   /// Return a section prefix for the constant \p C based on its profile count.
   /// - If a constant doesn't have a counter, return an empty string.
@@ -43,8 +44,8 @@ class StaticDataProfileInfo {
   ///   - If it has a cold count, return "unlikely".
   ///   - Otherwise (e.g. it's used by lukewarm functions), return an empty
   ///     string.
-  LLVM_ABI StringRef getConstantSectionPrefix(const Constant *C,
-                                     const ProfileSummaryInfo *PSI) const;
+  LLVM_ABI StringRef getConstantSectionPrefix(
+      const Constant *C, const ProfileSummaryInfo *PSI) const;
 };
 
 /// This wraps the StaticDataProfileInfo object as an immutable pass, for a
diff --git a/llvm/include/llvm/Analysis/TargetFolder.h b/llvm/include/llvm/Analysis/TargetFolder.h
index 244b667aefba5..54433b1345ba6 100644
--- a/llvm/include/llvm/Analysis/TargetFolder.h
+++ b/llvm/include/llvm/Analysis/TargetFolder.h
@@ -18,12 +18,12 @@
 #ifndef LLVM_ANALYSIS_TARGETFOLDER_H
 #define LLVM_ANALYSIS_TARGETFOLDER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/Analysis/ConstantFolding.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/IRBuilderFolder.h"
 #include "llvm/IR/Operator.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 
@@ -215,7 +215,6 @@ class LLVM_ABI TargetFolder final : public IRBuilderFolder {
     return Fold(ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy));
   }
 };
-
 }
 
 #endif
diff --git a/llvm/include/llvm/Analysis/TargetLibraryInfo.h b/llvm/include/llvm/Analysis/TargetLibraryInfo.h
index 064b96de85851..0596ff86b473e 100644
--- a/llvm/include/llvm/Analysis/TargetLibraryInfo.h
+++ b/llvm/include/llvm/Analysis/TargetLibraryInfo.h
@@ -9,13 +9,13 @@
 #ifndef LLVM_ANALYSIS_TARGETLIBRARYINFO_H
 #define LLVM_ANALYSIS_TARGETLIBRARYINFO_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/InstrTypes.h"
 #include "llvm/IR/Module.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/TargetParser/Triple.h"
 #include <bitset>
 #include <optional>
@@ -114,7 +114,7 @@ class TargetLibraryInfoImpl {
   /// Return true if the function type FTy is valid for the library function
   /// F, regardless of whether the function is available.
   LLVM_ABI bool isValidProtoForLibFunc(const FunctionType &FTy, LibFunc F,
-                              const Module &M) const;
+                                       const Module &M) const;
 
 public:
   /// List of known vector-functions libraries.
@@ -197,8 +197,9 @@ class TargetLibraryInfoImpl {
 
   /// Calls addVectorizableFunctions with a known preset of functions for the
   /// given vector library.
-  LLVM_ABI void addVectorizableFunctionsFromVecLib(enum VectorLibrary VecLib,
-                                          const llvm::Triple &TargetTriple);
+  LLVM_ABI void
+  addVectorizableFunctionsFromVecLib(enum VectorLibrary VecLib,
+                                     const llvm::Triple &TargetTriple);
 
   /// Return true if the function F has a vector equivalent with vectorization
   /// factor VF.
@@ -214,13 +215,13 @@ class TargetLibraryInfoImpl {
   /// Return the name of the equivalent of F, vectorized with factor VF. If no
   /// such mapping exists, return the empty string.
   LLVM_ABI StringRef getVectorizedFunction(StringRef F, const ElementCount &VF,
-                                  bool Masked) const;
+                                           bool Masked) const;
 
   /// Return a pointer to a VecDesc object holding all info for scalar to vector
   /// mappings in TLI for the equivalent of F, vectorized with factor VF.
   /// If no such mapping exists, return nullpointer.
-  LLVM_ABI const VecDesc *getVectorMappingInfo(StringRef F, const ElementCount &VF,
-                                      bool Masked) const;
+  LLVM_ABI const VecDesc *
+  getVectorMappingInfo(StringRef F, const ElementCount &VF, bool Masked) const;
 
   /// Set to true iff i32 parameters to library functions should have signext
   /// or zeroext attributes if they correspond to C-level int or unsigned int,
@@ -268,7 +269,7 @@ class TargetLibraryInfoImpl {
   /// Returns the largest vectorization factor used in the list of
   /// vector functions.
   LLVM_ABI void getWidestVF(StringRef ScalarF, ElementCount &FixedVF,
-                   ElementCount &Scalable) const;
+                            ElementCount &Scalable) const;
 
   /// Returns true if call site / callee has cdecl-compatible calling
   /// conventions.
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 6d1852b88f465..8f4ce80ada5ed 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -21,7 +21,6 @@
 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
 #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/APInt.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/Analysis/IVDescriptors.h"
@@ -31,6 +30,7 @@
 #include "llvm/Pass.h"
 #include "llvm/Support/AtomicOrdering.h"
 #include "llvm/Support/BranchProbability.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/InstructionCost.h"
 #include <functional>
 #include <optional>
@@ -113,8 +113,9 @@ struct HardwareLoopInfo {
                                   // icmp ne zero on the loop counter value and
                                   // produces an i1 to guard the loop entry.
   LLVM_ABI bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI,
-                               DominatorTree &DT, bool ForceNestedLoop = false,
-                               bool ForceHardwareLoopPHI = false);
+                                        DominatorTree &DT,
+                                        bool ForceNestedLoop = false,
+                                        bool ForceHardwareLoopPHI = false);
   LLVM_ABI bool canAnalyze(LoopInfo &LI);
 };
 
@@ -142,7 +143,7 @@ class IntrinsicCostAttributes {
       InstructionCost ScalarCost = InstructionCost::getInvalid());
 
   LLVM_ABI IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
-                          ArrayRef<const Value *> Args);
+                                   ArrayRef<const Value *> Args);
 
   LLVM_ABI IntrinsicCostAttributes(
       Intrinsic::ID Id, Type *RTy, ArrayRef<const Value *> Args,
@@ -372,7 +373,8 @@ class TargetTransformInfo {
 
   /// \returns The cost of having an Alloca in the caller if not inlined, to be
   /// added to the threshold
-  LLVM_ABI unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const;
+  LLVM_ABI unsigned getCallerAllocaCost(const CallBase *CB,
+                                        const AllocaInst *AI) const;
 
   /// \returns Vector bonus in percent.
   ///
@@ -397,10 +399,10 @@ class TargetTransformInfo {
   /// \return The estimated number of case clusters when lowering \p 'SI'.
   /// \p JTSize Set a jump table size only when \p SI is suitable for a jump
   /// table.
-  LLVM_ABI unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
-                                            unsigned &JTSize,
-                                            ProfileSummaryInfo *PSI,
-                                            BlockFrequencyInfo *BFI) const;
+  LLVM_ABI unsigned
+  getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize,
+                                   ProfileSummaryInfo *PSI,
+                                   BlockFrequencyInfo *BFI) const;
 
   /// Estimate the cost of a given IR user when lowered.
   ///
@@ -416,8 +418,8 @@ class TargetTransformInfo {
   /// The returned cost is defined in terms of \c TargetCostConstants, see its
   /// comments for a detailed explanation of the cost values.
   LLVM_ABI InstructionCost getInstructionCost(const User *U,
-                                     ArrayRef<const Value *> Operands,
-                                     TargetCostKind CostKind) const;
+                                              ArrayRef<const Value *> Operands,
+                                              TargetCostKind CostKind) const;
 
   /// This is a helper function which calls the three-argument
   /// getInstructionCost with \p Operands which are the current operands U has.
@@ -489,13 +491,14 @@ class TargetTransformInfo {
   ///
   /// \returns true if the intrinsic was handled.
   LLVM_ABI bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
-                                  Intrinsic::ID IID) const;
+                                           Intrinsic::ID IID) const;
 
   LLVM_ABI bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
 
   /// Return true if globals in this address space can have initializers other
   /// than `undef`.
-  LLVM_ABI bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const;
+  LLVM_ABI bool
+  canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const;
 
   LLVM_ABI unsigned getAssumedAddrSpace(const Value *V) const;
 
@@ -509,8 +512,9 @@ class TargetTransformInfo {
   /// operand index that collectFlatAddressOperands returned for the intrinsic.
   /// \returns nullptr if the intrinsic was not handled. Otherwise, returns the
   /// new value (which may be the original \p II with modified operands).
-  LLVM_ABI Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
-                                          Value *NewV) const;
+  LLVM_ABI Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
+                                                   Value *OldV,
+                                                   Value *NewV) const;
 
   /// Test whether calls to a function lower to actual program function
   /// calls.
@@ -640,14 +644,15 @@ class TargetTransformInfo {
   /// transformation. The caller will initialize UP with the current
   /// target-independent defaults.
   LLVM_ABI void getUnrollingPreferences(Loop *L, ScalarEvolution &,
-                               UnrollingPreferences &UP,
-                               OptimizationRemarkEmitter *ORE) const;
+                                        UnrollingPreferences &UP,
+                                        OptimizationRemarkEmitter *ORE) const;
 
   /// Query the target whether it would be profitable to convert the given loop
   /// into a hardware loop.
   LLVM_ABI bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
-                                AssumptionCache &AC, TargetLibraryInfo *LibInfo,
-                                HardwareLoopInfo &HWLoopInfo) const;
+                                         AssumptionCache &AC,
+                                         TargetLibraryInfo *LibInfo,
+                                         HardwareLoopInfo &HWLoopInfo) const;
 
   // Query the target for which minimum vectorization factor epilogue
   // vectorization should be considered.
@@ -690,7 +695,7 @@ class TargetTransformInfo {
   /// transformation. The caller will initialize \p PP with the current
   /// target-independent defaults with information from \p L and \p SE.
   LLVM_ABI void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                             PeelingPreferences &PP) const;
+                                      PeelingPreferences &PP) const;
 
   /// Targets can implement their own combinations for target-specific
   /// intrinsics. This function will be called from the InstCombine pass every
@@ -699,18 +704,19 @@ class TargetTransformInfo {
   /// \returns std::nullopt to not do anything target specific or a value that
   /// will be returned from the InstCombiner. It is possible to return null and
   /// stop further processing of the intrinsic by returning nullptr.
-  LLVM_ABI std::optional<Instruction *> instCombineIntrinsic(InstCombiner & IC,
-                                                    IntrinsicInst & II) const;
+  LLVM_ABI std::optional<Instruction *>
+  instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const;
   /// Can be used to implement target-specific instruction combining.
   /// \see instCombineIntrinsic
-  LLVM_ABI std::optional<Value *> simplifyDemandedUseBitsIntrinsic(
-      InstCombiner & IC, IntrinsicInst & II, APInt DemandedMask,
-      KnownBits & Known, bool &KnownBitsComputed) const;
+  LLVM_ABI std::optional<Value *>
+  simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
+                                   APInt DemandedMask, KnownBits &Known,
+                                   bool &KnownBitsComputed) const;
   /// Can be used to implement target-specific instruction combining.
   /// \see instCombineIntrinsic
   LLVM_ABI std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
-      InstCombiner & IC, IntrinsicInst & II, APInt DemandedElts,
-      APInt & UndefElts, APInt & UndefElts2, APInt & UndefElts3,
+      InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
+      APInt &UndefElts2, APInt &UndefElts3,
       std::function<void(Instruction *, unsigned, APInt, APInt &)>
           SimplifyAndSetOp) const;
   /// @}
@@ -755,14 +761,15 @@ class TargetTransformInfo {
   /// a scalable offset.
   ///
   /// TODO: Handle pre/postinc as well.
-  LLVM_ABI bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
-                             bool HasBaseReg, int64_t Scale,
-                             unsigned AddrSpace = 0, Instruction *I = nullptr,
-                             int64_t ScalableOffset = 0) const;
+  LLVM_ABI bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
+                                      int64_t BaseOffset, bool HasBaseReg,
+                                      int64_t Scale, unsigned AddrSpace = 0,
+                                      Instruction *I = nullptr,
+                                      int64_t ScalableOffset = 0) const;
 
   /// Return true if LSR cost of C1 is lower than C2.
   LLVM_ABI bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
-                     const TargetTransformInfo::LSRCost &C2) const;
+                              const TargetTransformInfo::LSRCost &C2) const;
 
   /// Return true if LSR major cost is number of registers. Targets which
   /// implement their own isLSRCostLess and unset number of registers as major
@@ -783,9 +790,9 @@ class TargetTransformInfo {
 
   /// Return true if the target can save a compare for loop count, for example
   /// hardware loop saves a compare.
-  LLVM_ABI bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
-                  DominatorTree *DT, AssumptionCache *AC,
-                  TargetLibraryInfo *LibInfo) const;
+  LLVM_ABI bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
+                           LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC,
+                           TargetLibraryInfo *LibInfo) const;
 
   enum AddressingModeKind {
     AMK_PreIndexed,
@@ -794,15 +801,15 @@ class TargetTransformInfo {
   };
 
   /// Return the preferred addressing mode LSR should make efforts to generate.
-  LLVM_ABI AddressingModeKind getPreferredAddressingMode(const Loop *L,
-                                                ScalarEvolution *SE) const;
+  LLVM_ABI AddressingModeKind
+  getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const;
 
   /// Return true if the target supports masked store.
   LLVM_ABI bool isLegalMaskedStore(Type *DataType, Align Alignment,
-                          unsigned AddressSpace) const;
+                                   unsigned AddressSpace) const;
   /// Return true if the target supports masked load.
   LLVM_ABI bool isLegalMaskedLoad(Type *DataType, Align Alignment,
-                         unsigned AddressSpace) const;
+                                  unsigned AddressSpace) const;
 
   /// Return true if the target supports nontemporal store.
   LLVM_ABI bool isLegalNTStore(Type *DataType, Align Alignment) const;
@@ -811,7 +818,8 @@ class TargetTransformInfo {
 
   /// \Returns true if the target supports broadcasting a load to a vector of
   /// type <NumElements x ElementTy>.
-  LLVM_ABI bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const;
+  LLVM_ABI bool isLegalBroadcastLoad(Type *ElementTy,
+                                     ElementCount NumElements) const;
 
   /// Return true if the target supports masked scatter.
   LLVM_ABI bool isLegalMaskedScatter(Type *DataType, Align Alignment) const;
@@ -819,13 +827,16 @@ class TargetTransformInfo {
   LLVM_ABI bool isLegalMaskedGather(Type *DataType, Align Alignment) const;
   /// Return true if the target forces scalarizing of llvm.masked.gather
   /// intrinsics.
-  LLVM_ABI bool forceScalarizeMaskedGather(VectorType *Type, Align Alignment) const;
+  LLVM_ABI bool forceScalarizeMaskedGather(VectorType *Type,
+                                           Align Alignment) const;
   /// Return true if the target forces scalarizing of llvm.masked.scatter
   /// intrinsics.
-  LLVM_ABI bool forceScalarizeMaskedScatter(VectorType *Type, Align Alignment) const;
+  LLVM_ABI bool forceScalarizeMaskedScatter(VectorType *Type,
+                                            Align Alignment) const;
 
   /// Return true if the target supports masked compress store.
-  LLVM_ABI bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const;
+  LLVM_ABI bool isLegalMaskedCompressStore(Type *DataType,
+                                           Align Alignment) const;
   /// Return true if the target supports masked expand load.
   LLVM_ABI bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const;
 
@@ -836,10 +847,12 @@ class TargetTransformInfo {
   /// type \p VTy, interleave factor \p Factor, alignment \p Alignment and
   /// address space \p AddrSpace.
   LLVM_ABI bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,
-                                    Align Alignment, unsigned AddrSpace) const;
+                                             Align Alignment,
+                                             unsigned AddrSpace) const;
 
   // Return true if the target supports masked vector histograms.
-  LLVM_ABI bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) const;
+  LLVM_ABI bool isLegalMaskedVectorHistogram(Type *AddrType,
+                                             Type *DataType) const;
 
   /// Return true if this is an alternating opcode pattern that can be lowered
   /// to a single instruction on the target. In X86 this is for the addsub
@@ -848,8 +861,9 @@ class TargetTransformInfo {
   /// selected by \p OpcodeMask. The mask contains one bit per lane and is a `0`
   /// when \p Opcode0 is selected and `1` when Opcode1 is selected.
   /// \p VecTy is the vector type of the instruction to be generated.
-  LLVM_ABI bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
-                       const SmallBitVector &OpcodeMask) const;
+  LLVM_ABI bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0,
+                                unsigned Opcode1,
+                                const SmallBitVector &OpcodeMask) const;
 
   /// Return true if we should be enabling ordered reductions for the target.
   LLVM_ABI bool enableOrderedReductions() const;
@@ -878,9 +892,9 @@ class TargetTransformInfo {
   /// If the AM is not supported, it returns a negative value.
   /// TODO: Handle pre/postinc as well.
   LLVM_ABI InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
-                                       StackOffset BaseOffset, bool HasBaseReg,
-                                       int64_t Scale,
-                                       unsigned AddrSpace = 0) const;
+                                                StackOffset BaseOffset,
+                                                bool HasBaseReg, int64_t Scale,
+                                                unsigned AddrSpace = 0) const;
 
   /// Return true if the loop strength reduce pass should make
   /// Instruction* based TTI queries to isLegalAddressingMode(). This is
@@ -924,37 +938,35 @@ class TargetTransformInfo {
 
   /// Identifies if the vector form of the intrinsic has a scalar operand.
   LLVM_ABI bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
-                                          unsigned ScalarOpdIdx) const;
+                                                   unsigned ScalarOpdIdx) const;
 
   /// Identifies if the vector form of the intrinsic is overloaded on the type
   /// of the operand at index \p OpdIdx, or on the return type if \p OpdIdx is
   /// -1.
   LLVM_ABI bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
-                                              int OpdIdx) const;
+                                                       int OpdIdx) const;
 
   /// Identifies if the vector form of the intrinsic that returns a struct is
   /// overloaded at the struct element index \p RetIdx.
-  LLVM_ABI bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID,
-                                                        int RetIdx) const;
+  LLVM_ABI bool
+  isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID,
+                                                   int RetIdx) const;
 
   /// Estimate the overhead of scalarizing an instruction. Insert and Extract
   /// are set if the demanded result elements need to be inserted and/or
   /// extracted from vectors.  The involved values may be passed in VL if
   /// Insert is true.
-  LLVM_ABI InstructionCost getScalarizationOverhead(VectorType *Ty,
-                                           const APInt &DemandedElts,
-                                           bool Insert, bool Extract,
-                                           TTI::TargetCostKind CostKind,
-                                           bool ForPoisonSrc = true,
-                                           ArrayRef<Value *> VL = {}) const;
+  LLVM_ABI InstructionCost getScalarizationOverhead(
+      VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
+      TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
+      ArrayRef<Value *> VL = {}) const;
 
   /// Estimate the overhead of scalarizing an instructions unique
   /// non-constant operands. The (potentially vector) types to use for each of
   /// argument are passes via Tys.
-  LLVM_ABI InstructionCost
-  getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
-                                   ArrayRef<Type *> Tys,
-                                   TTI::TargetCostKind CostKind) const;
+  LLVM_ABI InstructionCost getOperandsScalarizationOverhead(
+      ArrayRef<const Value *> Args, ArrayRef<Type *> Tys,
+      TTI::TargetCostKind CostKind) const;
 
   /// If target has efficient vector element load/store instructions, it can
   /// return true here so that insertion/extraction costs are not added to
@@ -1009,7 +1021,7 @@ class TargetTransformInfo {
     SmallVector<unsigned, 4> AllowedTailExpansions;
   };
   LLVM_ABI MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
-                                               bool IsZeroCmp) const;
+                                                        bool IsZeroCmp) const;
 
   /// Should the Select Optimization pass be enabled and ran.
   LLVM_ABI bool enableSelectOptimize() const;
@@ -1038,10 +1050,11 @@ class TargetTransformInfo {
   LLVM_ABI bool isFPVectorizationPotentiallyUnsafe() const;
 
   /// Determine if the target supports unaligned memory accesses.
-  LLVM_ABI bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
-                                      unsigned AddressSpace = 0,
-                                      Align Alignment = Align(1),
-                                      unsigned *Fast = nullptr) const;
+  LLVM_ABI bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
+                                               unsigned BitWidth,
+                                               unsigned AddressSpace = 0,
+                                               Align Alignment = Align(1),
+                                               unsigned *Fast = nullptr) const;
 
   /// Return hardware support for population count.
   LLVM_ABI PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
@@ -1070,18 +1083,18 @@ class TargetTransformInfo {
   /// Return the expected cost of materializing for the given integer
   /// immediate of the specified type.
   LLVM_ABI InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
-                                TargetCostKind CostKind) const;
+                                         TargetCostKind CostKind) const;
 
   /// Return the expected cost of materialization for the given integer
   /// immediate of the specified type for a given instruction. The cost can be
   /// zero if the immediate can be folded into the specified instruction.
   LLVM_ABI InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
-                                    const APInt &Imm, Type *Ty,
-                                    TargetCostKind CostKind,
-                                    Instruction *Inst = nullptr) const;
+                                             const APInt &Imm, Type *Ty,
+                                             TargetCostKind CostKind,
+                                             Instruction *Inst = nullptr) const;
   LLVM_ABI InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
-                                      const APInt &Imm, Type *Ty,
-                                      TargetCostKind CostKind) const;
+                                               const APInt &Imm, Type *Ty,
+                                               TargetCostKind CostKind) const;
 
   /// Return the expected cost for the given integer when optimising
   /// for size. This is different than the other integer immediate cost
@@ -1091,7 +1104,8 @@ class TargetTransformInfo {
   /// the total costs for a constant is calculated (the bigger the cost, the
   /// more beneficial constant hoisting is).
   LLVM_ABI InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
-                                        const APInt &Imm, Type *Ty) const;
+                                                 const APInt &Imm,
+                                                 Type *Ty) const;
 
   /// It can be advantageous to detach complex constants from their uses to make
   /// their generation cheaper. This hook allows targets to report when such
@@ -1100,7 +1114,7 @@ class TargetTransformInfo {
   /// constants prevents the code generator's ability to transform them into
   /// combinations of simpler operations.
   LLVM_ABI bool preferToKeepConstantsAttached(const Instruction &Inst,
-                                     const Function &Fn) const;
+                                              const Function &Fn) const;
 
   /// @}
 
@@ -1185,7 +1199,8 @@ class TargetTransformInfo {
   /// don't necessarily map onto the register classes used by the backend.
   /// FIXME: It's not currently possible to determine how many registers
   /// are used by the provided type.
-  LLVM_ABI unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const;
+  LLVM_ABI unsigned getRegisterClassForType(bool Vector,
+                                            Type *Ty = nullptr) const;
 
   /// \return the target-provided register class name
   LLVM_ABI const char *getRegisterClassName(unsigned ClassID) const;
@@ -1215,7 +1230,8 @@ class TargetTransformInfo {
   /// If false, the vectorization factor will be chosen based on the
   /// size of the widest element type.
   /// \p K Register Kind for vectorization.
-  LLVM_ABI bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const;
+  LLVM_ABI bool
+  shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const;
 
   /// \return The minimum vectorization factor for types of given element
   /// bit width, or 0 if there is no minimum VF. The returned value only
@@ -1237,7 +1253,7 @@ class TargetTransformInfo {
   /// \param ScalarValTy Scalar type of the stored value.
   /// Currently only used by the SLP vectorizer.
   LLVM_ABI unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
-                             Type *ScalarValTy) const;
+                                      Type *ScalarValTy) const;
 
   /// \return True if it should be considered for address type promotion.
   /// \p AllowPromotionWithoutCommonHeader Set true if promoting \p I is
@@ -1262,7 +1278,8 @@ class TargetTransformInfo {
   LLVM_ABI std::optional<unsigned> getCacheSize(CacheLevel Level) const;
 
   /// \return The associativity of the cache level, if available.
-  LLVM_ABI std::optional<unsigned> getCacheAssociativity(CacheLevel Level) const;
+  LLVM_ABI std::optional<unsigned>
+  getCacheAssociativity(CacheLevel Level) const;
 
   /// \return The minimum architectural page size for the target.
   LLVM_ABI std::optional<unsigned> getMinPageSize() const;
@@ -1290,8 +1307,9 @@ class TargetTransformInfo {
   ///         adding SW prefetches. The default is 1, i.e. prefetch with any
   ///         stride.
   LLVM_ABI unsigned getMinPrefetchStride(unsigned NumMemAccesses,
-                                unsigned NumStridedMemAccesses,
-                                unsigned NumPrefetches, bool HasCall) const;
+                                         unsigned NumStridedMemAccesses,
+                                         unsigned NumPrefetches,
+                                         bool HasCall) const;
 
   /// \return The maximum number of iterations to prefetch ahead.  If
   /// the required number of iterations is more than this number, no
@@ -1311,12 +1329,11 @@ class TargetTransformInfo {
   /// two extends. An example of an operation that uses a partial reduction is a
   /// dot product, which reduces two vectors to another of 4 times fewer and 4
   /// times larger elements.
-  LLVM_ABI InstructionCost
-  getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB,
-                          Type *AccumType, ElementCount VF,
-                          PartialReductionExtendKind OpAExtend,
-                          PartialReductionExtendKind OpBExtend,
-                          std::optional<unsigned> BinOp = std::nullopt) const;
+  LLVM_ABI InstructionCost getPartialReductionCost(
+      unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType,
+      ElementCount VF, PartialReductionExtendKind OpAExtend,
+      PartialReductionExtendKind OpBExtend,
+      std::optional<unsigned> BinOp = std::nullopt) const;
 
   /// \return The maximum interleave factor that any transform should try to
   /// perform for this target. This number depends on the level of parallelism
@@ -1372,12 +1389,11 @@ class TargetTransformInfo {
   /// passed through \p Args, which helps improve the cost estimation in some
   /// cases, like in broadcast loads.
   /// NOTE: For subvector extractions Tp represents the source type.
-  LLVM_ABI InstructionCost
-  getShuffleCost(ShuffleKind Kind, VectorType *Tp, ArrayRef<int> Mask = {},
-                 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
-                 int Index = 0, VectorType *SubTp = nullptr,
-                 ArrayRef<const Value *> Args = {},
-                 const Instruction *CxtI = nullptr) const;
+  LLVM_ABI InstructionCost getShuffleCost(
+      ShuffleKind Kind, VectorType *Tp, ArrayRef<int> Mask = {},
+      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, int Index = 0,
+      VectorType *SubTp = nullptr, ArrayRef<const Value *> Args = {},
+      const Instruction *CxtI = nullptr) const;
 
   /// Represents a hint about the context in which a cast is used.
   ///
@@ -1418,24 +1434,22 @@ class TargetTransformInfo {
   /// \return The expected cost of cast instructions, such as bitcast, trunc,
   /// zext, etc. If there is an existing instruction that holds Opcode, it
   /// may be passed in the 'I' parameter.
-  LLVM_ABI InstructionCost
-  getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
-                   TTI::CastContextHint CCH,
-                   TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
-                   const Instruction *I = nullptr) const;
+  LLVM_ABI InstructionCost getCastInstrCost(
+      unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH,
+      TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
+      const Instruction *I = nullptr) const;
 
   /// \return The expected cost of a sign- or zero-extended vector extract. Use
   /// Index = -1 to indicate that there is no information about the index value.
-  LLVM_ABI InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
-                                           VectorType *VecTy, unsigned Index,
-                                           TTI::TargetCostKind CostKind) const;
+  LLVM_ABI InstructionCost
+  getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
+                           unsigned Index, TTI::TargetCostKind CostKind) const;
 
   /// \return The expected cost of control-flow related instructions such as
   /// Phi, Ret, Br, Switch.
-  LLVM_ABI InstructionCost
-  getCFInstrCost(unsigned Opcode,
-                 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
-                 const Instruction *I = nullptr) const;
+  LLVM_ABI InstructionCost getCFInstrCost(
+      unsigned Opcode, TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
+      const Instruction *I = nullptr) const;
 
   /// \returns The expected cost of compare and select instructions. If there
   /// is an existing instruction that holds Opcode, it may be passed in the
@@ -1444,13 +1458,12 @@ class TargetTransformInfo {
   /// types are passed, \p VecPred must be used for all lanes.  For a
   /// comparison, the two operands are the natural values.  For a select, the
   /// two operands are the *value* operands, not the condition operand.
-  LLVM_ABI InstructionCost
-  getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
-                     CmpInst::Predicate VecPred,
-                     TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
-                     OperandValueInfo Op1Info = {OK_AnyValue, OP_None},
-                     OperandValueInfo Op2Info = {OK_AnyValue, OP_None},
-                     const Instruction *I = nullptr) const;
+  LLVM_ABI InstructionCost getCmpSelInstrCost(
+      unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
+      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
+      OperandValueInfo Op1Info = {OK_AnyValue, OP_None},
+      OperandValueInfo Op2Info = {OK_AnyValue, OP_None},
+      const Instruction *I = nullptr) const;
 
   /// \return The expected cost of vector Insert and Extract.
   /// Use -1 to indicate that there is no information on the index value.
@@ -1458,10 +1471,10 @@ class TargetTransformInfo {
   /// case is to provision the cost of vectorization/scalarization in
   /// vectorizer passes.
   LLVM_ABI InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
-                                     TTI::TargetCostKind CostKind,
-                                     unsigned Index = -1,
-                                     const Value *Op0 = nullptr,
-                                     const Value *Op1 = nullptr) const;
+                                              TTI::TargetCostKind CostKind,
+                                              unsigned Index = -1,
+                                              const Value *Op0 = nullptr,
+                                              const Value *Op1 = nullptr) const;
 
   /// \return The expected cost of vector Insert and Extract.
   /// Use -1 to indicate that there is no information on the index value.
@@ -1484,39 +1497,36 @@ class TargetTransformInfo {
   /// A typical suitable use case is cost estimation when vector instruction
   /// exists (e.g., from basic blocks during transformation).
   LLVM_ABI InstructionCost getVectorInstrCost(const Instruction &I, Type *Val,
-                                     TTI::TargetCostKind CostKind,
-                                     unsigned Index = -1) const;
+                                              TTI::TargetCostKind CostKind,
+                                              unsigned Index = -1) const;
 
   /// \return The expected cost of aggregate inserts and extracts. This is
   /// used when the instruction is not available; a typical use case is to
   /// provision the cost of vectorization/scalarization in vectorizer passes.
-  LLVM_ABI InstructionCost getInsertExtractValueCost(unsigned Opcode,
-                                            TTI::TargetCostKind CostKind) const;
+  LLVM_ABI InstructionCost getInsertExtractValueCost(
+      unsigned Opcode, TTI::TargetCostKind CostKind) const;
 
   /// \return The cost of replication shuffle of \p VF elements typed \p EltTy
   /// \p ReplicationFactor times.
   ///
   /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
   ///   <0,0,0,1,1,1,2,2,2,3,3,3>
-  LLVM_ABI InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
-                                            int VF,
-                                            const APInt &DemandedDstElts,
-                                            TTI::TargetCostKind CostKind) const;
+  LLVM_ABI InstructionCost getReplicationShuffleCost(
+      Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts,
+      TTI::TargetCostKind CostKind) const;
 
   /// \return The cost of Load and Store instructions.
-  LLVM_ABI InstructionCost
-  getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
-                  unsigned AddressSpace,
-                  TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
-                  OperandValueInfo OpdInfo = {OK_AnyValue, OP_None},
-                  const Instruction *I = nullptr) const;
+  LLVM_ABI InstructionCost getMemoryOpCost(
+      unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
+      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
+      OperandValueInfo OpdInfo = {OK_AnyValue, OP_None},
+      const Instruction *I = nullptr) const;
 
   /// \return The cost of VP Load and Store instructions.
-  LLVM_ABI InstructionCost
-  getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
-                    unsigned AddressSpace,
-                    TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
-                    const Instruction *I = nullptr) const;
+  LLVM_ABI InstructionCost getVPMemoryOpCost(
+      unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
+      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
+      const Instruction *I = nullptr) const;
 
   /// \return The cost of masked Load and Store instructions.
   LLVM_ABI InstructionCost getMaskedMemoryOpCost(
@@ -1639,8 +1649,8 @@ class TargetTransformInfo {
   /// \returns The cost of Intrinsic instructions. Analyses the real arguments.
   /// Three cases are handled: 1. scalar instruction 2. vector instruction
   /// 3. scalar instruction which is to be vectorized.
-  LLVM_ABI InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
-                                        TTI::TargetCostKind CostKind) const;
+  LLVM_ABI InstructionCost getIntrinsicInstrCost(
+      const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const;
 
   /// \returns The cost of Call instructions.
   LLVM_ABI InstructionCost getCallInstrCost(
@@ -1658,22 +1668,23 @@ class TargetTransformInfo {
   /// The 'SE' parameter holds pointer for the scalar evolution object which
   /// is used in order to get the Ptr step value in case of constant stride.
   /// The 'Ptr' parameter holds SCEV of the access pointer.
-  LLVM_ABI InstructionCost getAddressComputationCost(Type *Ty,
-                                            ScalarEvolution *SE = nullptr,
-                                            const SCEV *Ptr = nullptr) const;
+  LLVM_ABI InstructionCost getAddressComputationCost(
+      Type *Ty, ScalarEvolution *SE = nullptr, const SCEV *Ptr = nullptr) const;
 
   /// \returns The cost, if any, of keeping values of the given types alive
   /// over a callsite.
   ///
   /// Some types may require the use of register classes that do not have
   /// any callee-saved registers, so would require a spill and fill.
-  LLVM_ABI InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
+  LLVM_ABI InstructionCost
+  getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
 
   /// \returns True if the intrinsic is a supported memory intrinsic.  Info
   /// will contain additional information - whether the intrinsic may write
   /// or read to memory, volatility and the pointer.  Info is undefined
   /// if false is returned.
-  LLVM_ABI bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
+  LLVM_ABI bool getTgtMemIntrinsic(IntrinsicInst *Inst,
+                                   MemIntrinsicInfo &Info) const;
 
   /// \returns The maximum element size, in bytes, for an element
   /// unordered-atomic memory intrinsic.
@@ -1684,7 +1695,7 @@ class TargetTransformInfo {
   /// memory operation.  Returns nullptr if the target cannot create a result
   /// from the given intrinsic.
   LLVM_ABI Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
-                                           Type *ExpectedType) const;
+                                                    Type *ExpectedType) const;
 
   /// \returns The type to use in a loop expansion of a memcpy call.
   LLVM_ABI Type *getMemcpyLoopLoweringType(
@@ -1707,7 +1718,7 @@ class TargetTransformInfo {
   /// \returns True if the two functions have compatible attributes for inlining
   /// purposes.
   LLVM_ABI bool areInlineCompatible(const Function *Caller,
-                           const Function *Callee) const;
+                                    const Function *Callee) const;
 
   /// Returns a penalty for invoking call \p Call in \p F.
   /// For example, if a function F calls a function G, which in turn calls
@@ -1715,15 +1726,17 @@ class TargetTransformInfo {
   /// penalty of calling H from F, e.g. after inlining G into F.
   /// \p DefaultCallPenalty is passed to give a default penalty that
   /// the target can amend or override.
-  LLVM_ABI unsigned getInlineCallPenalty(const Function *F, const CallBase &Call,
-                                unsigned DefaultCallPenalty) const;
+  LLVM_ABI unsigned getInlineCallPenalty(const Function *F,
+                                         const CallBase &Call,
+                                         unsigned DefaultCallPenalty) const;
 
   /// \returns True if the caller and callee agree on how \p Types will be
   /// passed to or returned from the callee.
   /// to the callee.
   /// \param Types List of types to check.
-  LLVM_ABI bool areTypesABICompatible(const Function *Caller, const Function *Callee,
-                             const ArrayRef<Type *> &Types) const;
+  LLVM_ABI bool areTypesABICompatible(const Function *Caller,
+                                      const Function *Callee,
+                                      const ArrayRef<Type *> &Types) const;
 
   /// The type of load/store indexing.
   enum MemIndexedMode {
@@ -1751,16 +1764,18 @@ class TargetTransformInfo {
   LLVM_ABI bool isLegalToVectorizeStore(StoreInst *SI) const;
 
   /// \returns True if it is legal to vectorize the given load chain.
-  LLVM_ABI bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
-                                   unsigned AddrSpace) const;
+  LLVM_ABI bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
+                                            Align Alignment,
+                                            unsigned AddrSpace) const;
 
   /// \returns True if it is legal to vectorize the given store chain.
-  LLVM_ABI bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
-                                    unsigned AddrSpace) const;
+  LLVM_ABI bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
+                                             Align Alignment,
+                                             unsigned AddrSpace) const;
 
   /// \returns True if it is legal to vectorize the given reduction kind.
   LLVM_ABI bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
-                                   ElementCount VF) const;
+                                            ElementCount VF) const;
 
   /// \returns True if the given type is supported for scalable vectors
   LLVM_ABI bool isElementTypeLegalForScalableVector(Type *Ty) const;
@@ -1768,14 +1783,14 @@ class TargetTransformInfo {
   /// \returns The new vector factor value if the target doesn't support \p
   /// SizeInBytes loads or has a better vector factor.
   LLVM_ABI unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
-                               unsigned ChainSizeInBytes,
-                               VectorType *VecTy) const;
+                                        unsigned ChainSizeInBytes,
+                                        VectorType *VecTy) const;
 
   /// \returns The new vector factor value if the target doesn't support \p
   /// SizeInBytes stores or has a better vector factor.
   LLVM_ABI unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
-                                unsigned ChainSizeInBytes,
-                                VectorType *VecTy) const;
+                                         unsigned ChainSizeInBytes,
+                                         VectorType *VecTy) const;
 
   /// \returns True if the targets prefers fixed width vectorization if the
   /// loop vectorizer's cost-model assigns an equal cost to the fixed and
@@ -1839,7 +1854,7 @@ class TargetTransformInfo {
   /// Reference - "Vector Predication Intrinsics").
   /// Use of %evl is discouraged when that is not the case.
   LLVM_ABI bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
-                             Align Alignment) const;
+                                      Align Alignment) const;
 
   /// Return true if sinking I's operands to the same basic block as I is
   /// profitable, e.g. because the operands can be folded into a target
@@ -1847,7 +1862,7 @@ class TargetTransformInfo {
   /// \p Ops contains the Uses to sink ordered by dominance (dominating users
   /// come first).
   LLVM_ABI bool isProfitableToSinkOperands(Instruction *I,
-                                  SmallVectorImpl<Use *> &Ops) const;
+                                           SmallVectorImpl<Use *> &Ops) const;
 
   /// Return true if it's significantly cheaper to shift a vector by a uniform
   /// scalar than by an amount which will vary across each lane. On x86 before
@@ -1887,7 +1902,8 @@ class TargetTransformInfo {
 
   /// \returns How the target needs this vector-predicated operation to be
   /// transformed.
-  LLVM_ABI VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const;
+  LLVM_ABI VPLegalization
+  getVPLegalizationStrategy(const VPIntrinsic &PI) const;
   /// @}
 
   /// \returns Whether a 32-bit branch instruction is available in Arm or Thumb
@@ -1913,7 +1929,8 @@ class TargetTransformInfo {
 
   /// \return For an array of given Size, return alignment boundary to
   /// pad to. Default is no padding.
-  LLVM_ABI unsigned getNumBytesToPadGlobalArray(unsigned Size, Type *ArrayType) const;
+  LLVM_ABI unsigned getNumBytesToPadGlobalArray(unsigned Size,
+                                                Type *ArrayType) const;
 
   /// @}
 
@@ -1951,7 +1968,8 @@ class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
   ///
   /// The callback will be called with a particular function for which the TTI
   /// is needed and must return a TTI object for that function.
-  LLVM_ABI TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
+  LLVM_ABI
+  TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
 
   // Value semantics. We spell out the constructors for MSVC.
   TargetIRAnalysis(const TargetIRAnalysis &Arg)
@@ -2017,7 +2035,8 @@ class LLVM_ABI TargetTransformInfoWrapperPass : public ImmutablePass {
 ///
 /// This analysis pass just holds the TTI instance and makes it available to
 /// clients.
-LLVM_ABI ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
+LLVM_ABI ImmutablePass *
+createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
 
 } // namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/TensorSpec.h b/llvm/include/llvm/Analysis/TensorSpec.h
index d39b196363e67..d432ce8a203c4 100644
--- a/llvm/include/llvm/Analysis/TensorSpec.h
+++ b/llvm/include/llvm/Analysis/TensorSpec.h
@@ -9,8 +9,8 @@
 #ifndef LLVM_ANALYSIS_TENSORSPEC_H
 #define LLVM_ANALYSIS_TENSORSPEC_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Compiler.h"
 
 #include "llvm/ADT/StringMap.h"
 #include "llvm/IR/LLVMContext.h"
@@ -100,7 +100,7 @@ class TensorSpec final {
 
 private:
   LLVM_ABI TensorSpec(const std::string &Name, int Port, TensorType Type,
-             size_t ElementSize, const std::vector<int64_t> &Shape);
+                      size_t ElementSize, const std::vector<int64_t> &Shape);
 
   template <typename T> static TensorType getDataType();
 
@@ -113,7 +113,8 @@ class TensorSpec final {
 };
 
 /// For debugging.
-LLVM_ABI std::string tensorValueToString(const char *Buffer, const TensorSpec &Spec);
+LLVM_ABI std::string tensorValueToString(const char *Buffer,
+                                         const TensorSpec &Spec);
 
 /// Construct a TensorSpec from a JSON dictionary of the form:
 /// { "name": <string>,
@@ -122,8 +123,8 @@ LLVM_ABI std::string tensorValueToString(const char *Buffer, const TensorSpec &S
 ///   "shape": <array of ints> }
 /// For the "type" field, see the C++ primitive types used in
 /// TFUTILS_SUPPORTED_TYPES.
-LLVM_ABI std::optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
-                                                const json::Value &Value);
+LLVM_ABI std::optional<TensorSpec>
+getTensorSpecFromJSON(LLVMContext &Ctx, const json::Value &Value);
 
 #define TFUTILS_GETDATATYPE_DEF(T, Name)                                       \
   template <> LLVM_ABI TensorType TensorSpec::getDataType<T>();
diff --git a/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h b/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h
index 8b570a7ed0073..ddd80f9f58a57 100644
--- a/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h
+++ b/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h
@@ -15,10 +15,10 @@
 #ifndef LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
 #define LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/IR/PassManager.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
 #include <memory>
 
 namespace llvm {
@@ -47,17 +47,20 @@ class TypeBasedAAResult : public AAResultBase {
     return false;
   }
 
-  LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
-                    AAQueryInfo &AAQI, const Instruction *CtxI);
-  LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
-                               bool IgnoreLocals);
+  LLVM_ABI AliasResult alias(const MemoryLocation &LocA,
+                             const MemoryLocation &LocB, AAQueryInfo &AAQI,
+                             const Instruction *CtxI);
+  LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc,
+                                        AAQueryInfo &AAQI, bool IgnoreLocals);
 
-  LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI);
+  LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call,
+                                          AAQueryInfo &AAQI);
   LLVM_ABI MemoryEffects getMemoryEffects(const Function *F);
-  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
-                           AAQueryInfo &AAQI);
-  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
-                           AAQueryInfo &AAQI);
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call,
+                                    const MemoryLocation &Loc,
+                                    AAQueryInfo &AAQI);
+  LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call1,
+                                    const CallBase *Call2, AAQueryInfo &AAQI);
 
 private:
   bool Aliases(const MDNode *A, const MDNode *B) const;
diff --git a/llvm/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h b/llvm/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h
index 25d6477613556..1cb29470e2ff7 100644
--- a/llvm/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h
+++ b/llvm/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h
@@ -12,10 +12,10 @@
 #ifndef LLVM_ANALYSIS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
 #define LLVM_ANALYSIS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/StringMap.h"
 #include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
 #include <memory>
 #include <vector>
 
diff --git a/llvm/include/llvm/Analysis/Utils/Local.h b/llvm/include/llvm/Analysis/Utils/Local.h
index 4cd805fcbe5fb..d43f2ae344625 100644
--- a/llvm/include/llvm/Analysis/Utils/Local.h
+++ b/llvm/include/llvm/Analysis/Utils/Local.h
@@ -28,8 +28,8 @@ class Value;
 /// pointer). Return the result as a signed integer of intptr size.
 /// When NoAssumptions is true, no assumptions about index computation not
 /// overflowing is made.
-LLVM_ABI Value *emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP,
-                     bool NoAssumptions = false);
+LLVM_ABI Value *emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL,
+                              User *GEP, bool NoAssumptions = false);
 
 } // namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/Utils/TrainingLogger.h b/llvm/include/llvm/Analysis/Utils/TrainingLogger.h
index 45903f60bac87..f8c653e3e3c76 100644
--- a/llvm/include/llvm/Analysis/Utils/TrainingLogger.h
+++ b/llvm/include/llvm/Analysis/Utils/TrainingLogger.h
@@ -53,8 +53,8 @@
 #ifndef LLVM_ANALYSIS_UTILS_TRAININGLOGGER_H
 #define LLVM_ANALYSIS_UTILS_TRAININGLOGGER_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Compiler.h"
 
 #include "llvm/ADT/StringMap.h"
 #include "llvm/Analysis/TensorSpec.h"
@@ -111,9 +111,9 @@ class Logger final {
   /// corresponding indices) with any MLModelRunner implementations
   /// corresponding to the model being trained/logged.
   LLVM_ABI Logger(std::unique_ptr<raw_ostream> OS,
-         const std::vector<TensorSpec> &FeatureSpecs,
-         const TensorSpec &RewardSpec, bool IncludeReward,
-         std::optional<TensorSpec> AdviceSpec = std::nullopt);
+                  const std::vector<TensorSpec> &FeatureSpecs,
+                  const TensorSpec &RewardSpec, bool IncludeReward,
+                  std::optional<TensorSpec> AdviceSpec = std::nullopt);
 
   LLVM_ABI void switchContext(StringRef Name);
   LLVM_ABI void startObservation();
diff --git a/llvm/include/llvm/Analysis/ValueLattice.h b/llvm/include/llvm/Analysis/ValueLattice.h
index a04714c829336..262ff58f07dfd 100644
--- a/llvm/include/llvm/Analysis/ValueLattice.h
+++ b/llvm/include/llvm/Analysis/ValueLattice.h
@@ -9,9 +9,9 @@
 #ifndef LLVM_ANALYSIS_VALUELATTICE_H
 #define LLVM_ANALYSIS_VALUELATTICE_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/IR/ConstantRange.h"
 #include "llvm/IR/Constants.h"
+#include "llvm/Support/Compiler.h"
 
 //===----------------------------------------------------------------------===//
 //                               ValueLatticeElement
@@ -469,8 +469,8 @@ class ValueLatticeElement {
   /// true, false or undef constants, or nullptr if the comparison cannot be
   /// evaluated.
   LLVM_ABI Constant *getCompare(CmpInst::Predicate Pred, Type *Ty,
-                       const ValueLatticeElement &Other,
-                       const DataLayout &DL) const;
+                                const ValueLatticeElement &Other,
+                                const DataLayout &DL) const;
 
   /// Combine two sets of facts about the same value into a single set of
   /// facts.  Note that this method is not suitable for merging facts along
@@ -487,7 +487,8 @@ class ValueLatticeElement {
   ///   as not confuse the rest of LVI.  Ideally, we'd always return Undefined,
   ///   but we do not make this guarantee.  TODO: This would be a useful
   ///   enhancement.
-  LLVM_ABI ValueLatticeElement intersect(const ValueLatticeElement &Other) const;
+  LLVM_ABI ValueLatticeElement
+  intersect(const ValueLatticeElement &Other) const;
 
   unsigned getNumRangeExtensions() const { return NumRangeExtensions; }
   void setNumRangeExtensions(unsigned N) { NumRangeExtensions = N; }
@@ -496,6 +497,7 @@ class ValueLatticeElement {
 static_assert(sizeof(ValueLatticeElement) <= 40,
               "size of ValueLatticeElement changed unexpectedly");
 
-LLVM_ABI raw_ostream &operator<<(raw_ostream &OS, const ValueLatticeElement &Val);
+LLVM_ABI raw_ostream &operator<<(raw_ostream &OS,
+                                 const ValueLatticeElement &Val);
 } // end namespace llvm
 #endif
diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h
index aa1e012af5ef5..2629dde01d317 100644
--- a/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/llvm/include/llvm/Analysis/ValueTracking.h
@@ -14,15 +14,15 @@
 #ifndef LLVM_ANALYSIS_VALUETRACKING_H
 #define LLVM_ANALYSIS_VALUETRACKING_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/Analysis/SimplifyQuery.h"
 #include "llvm/Analysis/WithCache.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/FMF.h"
-#include "llvm/IR/Instructions.h"
 #include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instructions.h"
 #include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/Compiler.h"
 #include <cassert>
 #include <cstdint>
 
@@ -53,61 +53,67 @@ constexpr unsigned MaxAnalysisRecursionDepth = 6;
 /// where V is a vector, the known zero and known one values are the
 /// same width as the vector element, and the bit is set only if it is true
 /// for all of the elements in the vector.
-LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL,
-                      unsigned Depth = 0, AssumptionCache *AC = nullptr,
-                      const Instruction *CxtI = nullptr,
-                      const DominatorTree *DT = nullptr,
-                      bool UseInstrInfo = true);
+LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known,
+                               const DataLayout &DL, unsigned Depth = 0,
+                               AssumptionCache *AC = nullptr,
+                               const Instruction *CxtI = nullptr,
+                               const DominatorTree *DT = nullptr,
+                               bool UseInstrInfo = true);
 
 /// Returns the known bits rather than passing by reference.
 LLVM_ABI KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
-                           unsigned Depth = 0, AssumptionCache *AC = nullptr,
-                           const Instruction *CxtI = nullptr,
-                           const DominatorTree *DT = nullptr,
-                           bool UseInstrInfo = true);
+                                    unsigned Depth = 0,
+                                    AssumptionCache *AC = nullptr,
+                                    const Instruction *CxtI = nullptr,
+                                    const DominatorTree *DT = nullptr,
+                                    bool UseInstrInfo = true);
 
 /// Returns the known bits rather than passing by reference.
 LLVM_ABI KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
-                           const DataLayout &DL, unsigned Depth = 0,
-                           AssumptionCache *AC = nullptr,
-                           const Instruction *CxtI = nullptr,
-                           const DominatorTree *DT = nullptr,
-                           bool UseInstrInfo = true);
+                                    const DataLayout &DL, unsigned Depth = 0,
+                                    AssumptionCache *AC = nullptr,
+                                    const Instruction *CxtI = nullptr,
+                                    const DominatorTree *DT = nullptr,
+                                    bool UseInstrInfo = true);
 
 LLVM_ABI KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
-                           unsigned Depth, const SimplifyQuery &Q);
+                                    unsigned Depth, const SimplifyQuery &Q);
 
 LLVM_ABI KnownBits computeKnownBits(const Value *V, unsigned Depth,
-                           const SimplifyQuery &Q);
+                                    const SimplifyQuery &Q);
 
 LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
-                      const SimplifyQuery &Q);
+                               const SimplifyQuery &Q);
 
 /// Compute known bits from the range metadata.
 /// \p KnownZero the set of bits that are known to be zero
 /// \p KnownOne the set of bits that are known to be one
-LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known);
+LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
+                                                KnownBits &Known);
 
 /// Merge bits known from context-dependent facts into Known.
 LLVM_ABI void computeKnownBitsFromContext(const Value *V, KnownBits &Known,
-                                 unsigned Depth, const SimplifyQuery &Q);
+                                          unsigned Depth,
+                                          const SimplifyQuery &Q);
 
 /// Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
 LLVM_ABI KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I,
-                                       const KnownBits &KnownLHS,
-                                       const KnownBits &KnownRHS,
-                                       unsigned Depth, const SimplifyQuery &SQ);
+                                                const KnownBits &KnownLHS,
+                                                const KnownBits &KnownRHS,
+                                                unsigned Depth,
+                                                const SimplifyQuery &SQ);
 
 /// Adjust \p Known for the given select \p Arm to include information from the
 /// select \p Cond.
-LLVM_ABI void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm,
-                                 bool Invert, unsigned Depth,
-                                 const SimplifyQuery &Q);
+LLVM_ABI void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond,
+                                          Value *Arm, bool Invert,
+                                          unsigned Depth,
+                                          const SimplifyQuery &Q);
 
 /// Return true if LHS and RHS have no common bits set.
 LLVM_ABI bool haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
-                         const WithCache<const Value *> &RHSCache,
-                         const SimplifyQuery &SQ);
+                                  const WithCache<const Value *> &RHSCache,
+                                  const SimplifyQuery &SQ);
 
 /// Return true if the given value is known to have exactly one bit set when
 /// defined. For vectors return true if every element is known to be a power
@@ -115,14 +121,14 @@ LLVM_ABI bool haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
 /// vectors of integers. If 'OrZero' is set, then return true if the given
 /// value is either a power of two or zero.
 LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
-                            bool OrZero = false, unsigned Depth = 0,
-                            AssumptionCache *AC = nullptr,
-                            const Instruction *CxtI = nullptr,
-                            const DominatorTree *DT = nullptr,
-                            bool UseInstrInfo = true);
+                                     bool OrZero = false, unsigned Depth = 0,
+                                     AssumptionCache *AC = nullptr,
+                                     const Instruction *CxtI = nullptr,
+                                     const DominatorTree *DT = nullptr,
+                                     bool UseInstrInfo = true);
 
-LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
-                            const SimplifyQuery &Q);
+LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero,
+                                     unsigned Depth, const SimplifyQuery &Q);
 
 LLVM_ABI bool isOnlyUsedInZeroComparison(const Instruction *CxtI);
 
@@ -134,14 +140,15 @@ LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
 /// specified, perform context-sensitive analysis and return true if the
 /// pointer couldn't possibly be null at the specified instruction.
 /// Supports values with integer or pointer type and vectors of integers.
-LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth = 0);
+LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q,
+                             unsigned Depth = 0);
 
 /// Return true if the two given values are negation.
 /// Currently can recoginze Value pair:
 /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
 /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
-LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false,
-                     bool AllowPoison = true);
+LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y,
+                              bool NeedNSW = false, bool AllowPoison = true);
 
 /// Return true iff:
 /// 1. X is poison implies Y is poison.
@@ -152,22 +159,22 @@ LLVM_ABI bool isKnownInversion(const Value *X, const Value *Y);
 
 /// Returns true if the give value is known to be non-negative.
 LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ,
-                        unsigned Depth = 0);
+                                 unsigned Depth = 0);
 
 /// Returns true if the given value is known be positive (i.e. non-negative
 /// and non-zero).
 LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ,
-                     unsigned Depth = 0);
+                              unsigned Depth = 0);
 
 /// Returns true if the given value is known be negative (i.e. non-positive
 /// and non-zero).
 LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ,
-                     unsigned Depth = 0);
+                              unsigned Depth = 0);
 
 /// Return true if the given values are known to be non-equal when defined.
 /// Supports scalar integer types only.
-LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ,
-                     unsigned Depth = 0);
+LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2,
+                              const SimplifyQuery &SQ, unsigned Depth = 0);
 
 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
 /// simplify operations downstream. Mask is known to be zero for bits that V
@@ -179,7 +186,7 @@ LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQu
 /// same width as the vector element, and the bit is set only if it is true
 /// for all of the elements in the vector.
 LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask,
-                       const SimplifyQuery &SQ, unsigned Depth = 0);
+                                const SimplifyQuery &SQ, unsigned Depth = 0);
 
 /// Return the number of times the sign bit of the register is replicated into
 /// the other bits. We know that at least 1 bit is always equal to the sign
@@ -189,30 +196,32 @@ LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask,
 /// sign bits for the vector element with the mininum number of known sign
 /// bits.
 LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
-                            unsigned Depth = 0, AssumptionCache *AC = nullptr,
-                            const Instruction *CxtI = nullptr,
-                            const DominatorTree *DT = nullptr,
-                            bool UseInstrInfo = true);
+                                     unsigned Depth = 0,
+                                     AssumptionCache *AC = nullptr,
+                                     const Instruction *CxtI = nullptr,
+                                     const DominatorTree *DT = nullptr,
+                                     bool UseInstrInfo = true);
 
 /// Get the upper bound on bit size for this Value \p Op as a signed integer.
 /// i.e.  x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)).
 /// Similar to the APInt::getSignificantBits function.
-LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
-                                   unsigned Depth = 0,
-                                   AssumptionCache *AC = nullptr,
-                                   const Instruction *CxtI = nullptr,
-                                   const DominatorTree *DT = nullptr);
+LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op,
+                                            const DataLayout &DL,
+                                            unsigned Depth = 0,
+                                            AssumptionCache *AC = nullptr,
+                                            const Instruction *CxtI = nullptr,
+                                            const DominatorTree *DT = nullptr);
 
 /// Map a call instruction to an intrinsic ID.  Libcalls which have equivalent
 /// intrinsics are treated as-if they were intrinsics.
 LLVM_ABI Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
-                                      const TargetLibraryInfo *TLI);
+                                               const TargetLibraryInfo *TLI);
 
 /// Given an exploded icmp instruction, return true if the comparison only
 /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
 /// the result of the comparison is true when the input value is signed.
 LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
-                    bool &TrueIfSigned);
+                             bool &TrueIfSigned);
 
 /// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
 /// same result as an fcmp with the given operands.
@@ -222,14 +231,12 @@ LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
 ///
 /// If \p LookThroughSrc is false, ignore the source value (i.e. the first pair
 /// element will always be LHS.
-LLVM_ABI std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
-                                                const Function &F, Value *LHS,
-                                                Value *RHS,
-                                                bool LookThroughSrc = true);
-LLVM_ABI std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
-                                                const Function &F, Value *LHS,
-                                                const APFloat *ConstRHS,
-                                                bool LookThroughSrc = true);
+LLVM_ABI std::pair<Value *, FPClassTest>
+fcmpToClassTest(CmpInst::Predicate Pred, const Function &F, Value *LHS,
+                Value *RHS, bool LookThroughSrc = true);
+LLVM_ABI std::pair<Value *, FPClassTest>
+fcmpToClassTest(CmpInst::Predicate Pred, const Function &F, Value *LHS,
+                const APFloat *ConstRHS, bool LookThroughSrc = true);
 
 /// Compute the possible floating-point classes that \p LHS could be based on
 /// fcmp \Pred \p LHS, \p RHS.
@@ -267,37 +274,39 @@ fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
 /// point classes should be queried. Queries not specified in \p
 /// InterestedClasses should be reliable if they are determined during the
 /// query.
-LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts,
-                                 FPClassTest InterestedClasses, unsigned Depth,
-                                 const SimplifyQuery &SQ);
-
-LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, FPClassTest InterestedClasses,
-                                 unsigned Depth, const SimplifyQuery &SQ);
-
-LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const DataLayout &DL,
-                                 FPClassTest InterestedClasses = fcAllFlags,
-                                 unsigned Depth = 0,
-                                 const TargetLibraryInfo *TLI = nullptr,
-                                 AssumptionCache *AC = nullptr,
-                                 const Instruction *CxtI = nullptr,
-                                 const DominatorTree *DT = nullptr,
-                                 bool UseInstrInfo = true);
+LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V,
+                                          const APInt &DemandedElts,
+                                          FPClassTest InterestedClasses,
+                                          unsigned Depth,
+                                          const SimplifyQuery &SQ);
+
+LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V,
+                                          FPClassTest InterestedClasses,
+                                          unsigned Depth,
+                                          const SimplifyQuery &SQ);
+
+LLVM_ABI KnownFPClass computeKnownFPClass(
+    const Value *V, const DataLayout &DL,
+    FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
+    const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
+    const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
+    bool UseInstrInfo = true);
 
 /// Wrapper to account for known fast math flags at the use instruction.
-LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts,
-                                 FastMathFlags FMF,
-                                 FPClassTest InterestedClasses, unsigned Depth,
-                                 const SimplifyQuery &SQ);
+LLVM_ABI KnownFPClass computeKnownFPClass(
+    const Value *V, const APInt &DemandedElts, FastMathFlags FMF,
+    FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ);
 
 LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, FastMathFlags FMF,
-                                 FPClassTest InterestedClasses, unsigned Depth,
-                                 const SimplifyQuery &SQ);
+                                          FPClassTest InterestedClasses,
+                                          unsigned Depth,
+                                          const SimplifyQuery &SQ);
 
 /// Return true if we can prove that the specified FP value is never equal to
 /// -0.0. Users should use caution when considering PreserveSign
 /// denormal-fp-math.
 LLVM_ABI bool cannotBeNegativeZero(const Value *V, unsigned Depth,
-                          const SimplifyQuery &SQ);
+                                   const SimplifyQuery &SQ);
 
 /// Return true if we can prove that the specified FP value is either NaN or
 /// never less than -0.0.
@@ -308,28 +317,29 @@ LLVM_ABI bool cannotBeNegativeZero(const Value *V, unsigned Depth,
 ///   x > +0 --> true
 ///   x < -0 --> false
 LLVM_ABI bool cannotBeOrderedLessThanZero(const Value *V, unsigned Depth,
-                                 const SimplifyQuery &SQ);
+                                          const SimplifyQuery &SQ);
 
 /// Return true if the floating-point scalar value is not an infinity or if
 /// the floating-point vector value has no infinities. Return false if a value
 /// could ever be infinity.
 LLVM_ABI bool isKnownNeverInfinity(const Value *V, unsigned Depth,
-                          const SimplifyQuery &SQ);
+                                   const SimplifyQuery &SQ);
 
 /// Return true if the floating-point value can never contain a NaN or infinity.
 LLVM_ABI bool isKnownNeverInfOrNaN(const Value *V, unsigned Depth,
-                          const SimplifyQuery &SQ);
+                                   const SimplifyQuery &SQ);
 
 /// Return true if the floating-point scalar value is not a NaN or if the
 /// floating-point vector value has no NaN elements. Return false if a value
 /// could ever be NaN.
-LLVM_ABI bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ);
+LLVM_ABI bool isKnownNeverNaN(const Value *V, unsigned Depth,
+                              const SimplifyQuery &SQ);
 
 /// Return false if we can prove that the specified FP value's sign bit is 0.
 /// Return true if we can prove that the specified FP value's sign bit is 1.
 /// Otherwise return std::nullopt.
-LLVM_ABI std::optional<bool> computeKnownFPSignBit(const Value *V, unsigned Depth,
-                                          const SimplifyQuery &SQ);
+LLVM_ABI std::optional<bool>
+computeKnownFPSignBit(const Value *V, unsigned Depth, const SimplifyQuery &SQ);
 
 /// If the specified value can be set by repeating the same byte in memory,
 /// return the i8 value that it is represented with. This is true for all i8
@@ -374,7 +384,8 @@ GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
 
 /// Returns true if the GEP is based on a pointer to a string (array of
 // \p CharSize integers) and is indexing into this string.
-LLVM_ABI bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize = 8);
+LLVM_ABI bool isGEPBasedOnPointerToString(const GEPOperator *GEP,
+                                          unsigned CharSize = 8);
 
 /// Represents offset+length into a ConstantDataArray.
 struct ConstantDataArraySlice {
@@ -404,8 +415,10 @@ struct ConstantDataArraySlice {
 /// Returns true if the value \p V is a pointer into a ConstantDataArray.
 /// If successful \p Slice will point to a ConstantDataArray info object
 /// with an appropriate offset.
-LLVM_ABI bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
-                              unsigned ElementSize, uint64_t Offset = 0);
+LLVM_ABI bool getConstantDataArrayInfo(const Value *V,
+                                       ConstantDataArraySlice &Slice,
+                                       unsigned ElementSize,
+                                       uint64_t Offset = 0);
 
 /// This function computes the length of a null-terminated C string pointed to
 /// by V. If successful, it returns true and returns the string in Str. If
@@ -414,7 +427,7 @@ LLVM_ABI bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &S
 /// trailing null characters as well as any other characters that come after
 /// it.
 LLVM_ABI bool getConstantStringInfo(const Value *V, StringRef &Str,
-                           bool TrimAtNul = true);
+                                    bool TrimAtNul = true);
 
 /// If we can compute the length of the string pointed to by the specified
 /// pointer, return 'len+1'.  If we can't, return 0.
@@ -424,8 +437,9 @@ LLVM_ABI uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
 /// aliasing rules. You CAN'T use it to replace one value with another. If
 /// \p MustPreserveNullness is true, the call must preserve the nullness of
 /// the pointer.
-LLVM_ABI const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call,
-                                                  bool MustPreserveNullness);
+LLVM_ABI const Value *
+getArgumentAliasingToReturnedPointer(const CallBase *Call,
+                                     bool MustPreserveNullness);
 inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call,
                                                    bool MustPreserveNullness) {
   return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
@@ -446,7 +460,8 @@ LLVM_ABI bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
 /// original object being addressed. Note that the returned value has pointer
 /// type if the specified value does. If the \p MaxLookup value is non-zero, it
 /// limits the number of instructions to be stripped off.
-LLVM_ABI const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6);
+LLVM_ABI const Value *getUnderlyingObject(const Value *V,
+                                          unsigned MaxLookup = 6);
 inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) {
   // Force const to avoid infinite recursion.
   const Value *VConst = V;
@@ -486,13 +501,14 @@ LLVM_ABI const Value *getUnderlyingObjectAggressive(const Value *V);
 /// should not assume that Curr and Prev share the same underlying object thus
 /// it shouldn't look through the phi above.
 LLVM_ABI void getUnderlyingObjects(const Value *V,
-                          SmallVectorImpl<const Value *> &Objects,
-                          const LoopInfo *LI = nullptr, unsigned MaxLookup = 6);
+                                   SmallVectorImpl<const Value *> &Objects,
+                                   const LoopInfo *LI = nullptr,
+                                   unsigned MaxLookup = 6);
 
 /// This is a wrapper around getUnderlyingObjects and adds support for basic
 /// ptrtoint+arithmetic+inttoptr sequences.
 LLVM_ABI bool getUnderlyingObjectsForCodeGen(const Value *V,
-                                    SmallVectorImpl<Value *> &Objects);
+                                             SmallVectorImpl<Value *> &Objects);
 
 /// Returns unique alloca where the value comes from, or nullptr.
 /// If OffsetZero is true check that V points to the begining of the alloca.
@@ -548,13 +564,11 @@ LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I);
 ///
 /// This method can return true for instructions that read memory;
 /// for such instructions, moving them may change the resulting value.
-LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I,
-                                  const Instruction *CtxI = nullptr,
-                                  AssumptionCache *AC = nullptr,
-                                  const DominatorTree *DT = nullptr,
-                                  const TargetLibraryInfo *TLI = nullptr,
-                                  bool UseVariableInfo = true,
-                                  bool IgnoreUBImplyingAttrs = true);
+LLVM_ABI bool isSafeToSpeculativelyExecute(
+    const Instruction *I, const Instruction *CtxI = nullptr,
+    AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr,
+    const TargetLibraryInfo *TLI = nullptr, bool UseVariableInfo = true,
+    bool IgnoreUBImplyingAttrs = true);
 
 inline bool isSafeToSpeculativelyExecute(const Instruction *I,
                                          BasicBlock::iterator CtxI,
@@ -621,9 +635,10 @@ LLVM_ABI bool isAssumeLikeIntrinsic(const Instruction *I);
 /// to optimize away its argument. If the caller can ensure that this won't
 /// happen, it can call with AllowEphemerals set to true to get more valid
 /// assumptions.
-LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
-                             const DominatorTree *DT = nullptr,
-                             bool AllowEphemerals = false);
+LLVM_ABI bool isValidAssumeForContext(const Instruction *I,
+                                      const Instruction *CxtI,
+                                      const DominatorTree *DT = nullptr,
+                                      bool AllowEphemerals = false);
 
 enum class OverflowResult {
   /// Always overflows in the direction of signed/unsigned min value.
@@ -636,31 +651,34 @@ enum class OverflowResult {
   NeverOverflows,
 };
 
-LLVM_ABI OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS,
-                                             const SimplifyQuery &SQ,
-                                             bool IsNSW = false);
-LLVM_ABI OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
-                                           const SimplifyQuery &SQ);
-LLVM_ABI OverflowResult
-computeOverflowForUnsignedAdd(const WithCache<const Value *> &LHS,
-                              const WithCache<const Value *> &RHS,
-                              const SimplifyQuery &SQ);
-LLVM_ABI OverflowResult computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
-                                           const WithCache<const Value *> &RHS,
-                                           const SimplifyQuery &SQ);
+LLVM_ABI OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
+                                                      const Value *RHS,
+                                                      const SimplifyQuery &SQ,
+                                                      bool IsNSW = false);
+LLVM_ABI OverflowResult computeOverflowForSignedMul(const Value *LHS,
+                                                    const Value *RHS,
+                                                    const SimplifyQuery &SQ);
+LLVM_ABI OverflowResult computeOverflowForUnsignedAdd(
+    const WithCache<const Value *> &LHS, const WithCache<const Value *> &RHS,
+    const SimplifyQuery &SQ);
+LLVM_ABI OverflowResult computeOverflowForSignedAdd(
+    const WithCache<const Value *> &LHS, const WithCache<const Value *> &RHS,
+    const SimplifyQuery &SQ);
 /// This version also leverages the sign bit of Add if known.
 LLVM_ABI OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
-                                           const SimplifyQuery &SQ);
-LLVM_ABI OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
-                                             const SimplifyQuery &SQ);
-LLVM_ABI OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
-                                           const SimplifyQuery &SQ);
+                                                    const SimplifyQuery &SQ);
+LLVM_ABI OverflowResult computeOverflowForUnsignedSub(const Value *LHS,
+                                                      const Value *RHS,
+                                                      const SimplifyQuery &SQ);
+LLVM_ABI OverflowResult computeOverflowForSignedSub(const Value *LHS,
+                                                    const Value *RHS,
+                                                    const SimplifyQuery &SQ);
 
 /// Returns true if the arithmetic part of the \p WO 's result is
 /// used only along the paths control dependent on the computation
 /// not overflowing, \p WO being an <op>.with.overflow intrinsic.
 LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
-                               const DominatorTree &DT);
+                                        const DominatorTree &DT);
 
 /// Determine the possible constant range of vscale with the given bit width,
 /// based on the vscale_range function attribute.
@@ -669,16 +687,15 @@ LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth);
 /// Determine the possible constant range of an integer or vector of integer
 /// value. This is intended as a cheap, non-recursive check.
 LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned,
-                                   bool UseInstrInfo = true,
-                                   AssumptionCache *AC = nullptr,
-                                   const Instruction *CtxI = nullptr,
-                                   const DominatorTree *DT = nullptr,
-                                   unsigned Depth = 0);
+                                            bool UseInstrInfo = true,
+                                            AssumptionCache *AC = nullptr,
+                                            const Instruction *CtxI = nullptr,
+                                            const DominatorTree *DT = nullptr,
+                                            unsigned Depth = 0);
 
 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
-LLVM_ABI ConstantRange
-computeConstantRangeIncludingKnownBits(const WithCache<const Value *> &V,
-                                       bool ForSigned, const SimplifyQuery &SQ);
+LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(
+    const WithCache<const Value *> &V, bool ForSigned, const SimplifyQuery &SQ);
 
 /// Return true if this function can prove that the instruction I will
 /// always transfer execution to one of its successors (including the next
@@ -705,9 +722,10 @@ LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
 /// Return true if every instruction in the range (Begin, End) is
 /// guaranteed to transfer execution to its static successor. \p ScanLimit
 /// bounds the search to avoid scanning huge blocks.
-LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(
-    BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
-    unsigned ScanLimit = 32);
+LLVM_ABI bool
+isGuaranteedToTransferExecutionToSuccessor(BasicBlock::const_iterator Begin,
+                                           BasicBlock::const_iterator End,
+                                           unsigned ScanLimit = 32);
 
 /// Same as previous, but with range expressed via iterator_range.
 LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(
@@ -718,7 +736,7 @@ LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(
 ///
 /// Note that this currently only considers the loop header.
 LLVM_ABI bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
-                                            const Loop *L);
+                                                     const Loop *L);
 
 /// Return true if \p PoisonOp's user yields poison or raises UB if its
 /// operand \p PoisonOp is poison.
@@ -735,7 +753,7 @@ LLVM_ABI bool propagatesPoison(const Use &PoisonOp);
 /// when I is executed with any operands which appear in KnownPoison holding
 /// a poison value at the point of execution.
 LLVM_ABI bool mustTriggerUB(const Instruction *I,
-                   const SmallPtrSetImpl<const Value *> &KnownPoison);
+                            const SmallPtrSetImpl<const Value *> &KnownPoison);
 
 /// Return true if this function can prove that if Inst is executed
 /// and yields a poison value or undef bits, then that will trigger
@@ -765,8 +783,9 @@ LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst);
 /// canCreatePoison returns true if Op can create poison from non-poison
 /// operands.
 LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op,
-                            bool ConsiderFlagsAndMetadata = true);
-LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata = true);
+                                     bool ConsiderFlagsAndMetadata = true);
+LLVM_ABI bool canCreatePoison(const Operator *Op,
+                              bool ConsiderFlagsAndMetadata = true);
 
 /// Return true if V is poison given that ValAssumedPoison is already poison.
 /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
@@ -782,17 +801,18 @@ LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
 /// If CtxI and DT are specified this method performs flow-sensitive analysis
 /// and returns true if it is guaranteed to be never undef or poison
 /// immediately before the CtxI.
-LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
-                                      AssumptionCache *AC = nullptr,
-                                      const Instruction *CtxI = nullptr,
-                                      const DominatorTree *DT = nullptr,
-                                      unsigned Depth = 0);
+LLVM_ABI bool
+isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC = nullptr,
+                                 const Instruction *CtxI = nullptr,
+                                 const DominatorTree *DT = nullptr,
+                                 unsigned Depth = 0);
 
 /// Returns true if V cannot be poison, but may be undef.
-LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
-                               const Instruction *CtxI = nullptr,
-                               const DominatorTree *DT = nullptr,
-                               unsigned Depth = 0);
+LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V,
+                                        AssumptionCache *AC = nullptr,
+                                        const Instruction *CtxI = nullptr,
+                                        const DominatorTree *DT = nullptr,
+                                        unsigned Depth = 0);
 
 inline bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
                                       BasicBlock::iterator CtxI,
@@ -804,10 +824,11 @@ inline bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
 }
 
 /// Returns true if V cannot be undef, but may be poison.
-LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC = nullptr,
-                              const Instruction *CtxI = nullptr,
-                              const DominatorTree *DT = nullptr,
-                              unsigned Depth = 0);
+LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V,
+                                       AssumptionCache *AC = nullptr,
+                                       const Instruction *CtxI = nullptr,
+                                       const DominatorTree *DT = nullptr,
+                                       unsigned Depth = 0);
 
 /// Return true if undefined behavior would provable be executed on the path to
 /// OnPathTo if Root produced a posion result.  Note that this doesn't say
@@ -817,8 +838,8 @@ LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC = nul
 /// immediately before it) without introducing UB which didn't previously
 /// exist.  Note that a false result conveys no information.
 LLVM_ABI bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
-                                   Instruction *OnPathTo,
-                                   DominatorTree *DT);
+                                            Instruction *OnPathTo,
+                                            DominatorTree *DT);
 
 /// Convert an integer comparison with a constant RHS into an equivalent
 /// form with the strictness flipped predicate. Return the new predicate and
@@ -883,9 +904,9 @@ struct SelectPatternResult {
 ///
 /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
 ///
-LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
-                                       Instruction::CastOps *CastOp = nullptr,
-                                       unsigned Depth = 0);
+LLVM_ABI SelectPatternResult
+matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
+                   Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);
 
 inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS,
                                               const Value *&RHS) {
@@ -904,14 +925,14 @@ LLVM_ABI SelectPatternResult matchDecomposedSelectPattern(
     Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);
 
 /// Determine the pattern for predicate `X Pred Y ? X : Y`.
-LLVM_ABI SelectPatternResult
-getSelectPattern(CmpInst::Predicate Pred,
-                 SelectPatternNaNBehavior NaNBehavior = SPNB_NA,
-                 bool Ordered = false);
+LLVM_ABI SelectPatternResult getSelectPattern(
+    CmpInst::Predicate Pred, SelectPatternNaNBehavior NaNBehavior = SPNB_NA,
+    bool Ordered = false);
 
 /// Return the canonical comparison predicate for the specified
 /// minimum/maximum flavor.
-LLVM_ABI CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered = false);
+LLVM_ABI CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF,
+                                          bool Ordered = false);
 
 /// Convert given `SPF` to equivalent min/max intrinsic.
 /// Caller must ensure `SPF` is an integer min or max pattern.
@@ -959,12 +980,12 @@ canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
 ///
 /// NOTE: This is intentional simple.  If you want the ability to analyze
 /// non-trivial loop conditons, see ScalarEvolution instead.
-LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start,
-                           Value *&Step);
+LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
+                                    Value *&Start, Value *&Step);
 
 /// Analogous to the above, but starting from the binary operator
-LLVM_ABI bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, Value *&Start,
-                           Value *&Step);
+LLVM_ABI bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
+                                    Value *&Start, Value *&Step);
 
 /// Return true if RHS is known to be implied true by LHS.  Return false if
 /// RHS is known to be implied false by LHS.  Otherwise, return std::nullopt if
@@ -976,31 +997,29 @@ LLVM_ABI bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, Value
 ///  T | T | F
 ///  F | T | T
 /// (A)
-LLVM_ABI std::optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
-                                       const DataLayout &DL,
-                                       bool LHSIsTrue = true,
-                                       unsigned Depth = 0);
-LLVM_ABI std::optional<bool> isImpliedCondition(const Value *LHS, CmpPredicate RHSPred,
-                                       const Value *RHSOp0, const Value *RHSOp1,
-                                       const DataLayout &DL,
-                                       bool LHSIsTrue = true,
-                                       unsigned Depth = 0);
+LLVM_ABI std::optional<bool>
+isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL,
+                   bool LHSIsTrue = true, unsigned Depth = 0);
+LLVM_ABI std::optional<bool>
+isImpliedCondition(const Value *LHS, CmpPredicate RHSPred, const Value *RHSOp0,
+                   const Value *RHSOp1, const DataLayout &DL,
+                   bool LHSIsTrue = true, unsigned Depth = 0);
 
 /// Return the boolean condition value in the context of the given instruction
 /// if it is known based on dominating conditions.
-LLVM_ABI std::optional<bool> isImpliedByDomCondition(const Value *Cond,
-                                            const Instruction *ContextI,
-                                            const DataLayout &DL);
-LLVM_ABI std::optional<bool> isImpliedByDomCondition(CmpPredicate Pred, const Value *LHS,
-                                            const Value *RHS,
-                                            const Instruction *ContextI,
-                                            const DataLayout &DL);
+LLVM_ABI std::optional<bool>
+isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI,
+                        const DataLayout &DL);
+LLVM_ABI std::optional<bool>
+isImpliedByDomCondition(CmpPredicate Pred, const Value *LHS, const Value *RHS,
+                        const Instruction *ContextI, const DataLayout &DL);
 
 /// Call \p InsertAffected on all Values whose known bits / value may be
 /// affected by the condition \p Cond. Used by AssumptionCache and
 /// DomConditionCache.
-LLVM_ABI void findValuesAffectedByCondition(Value *Cond, bool IsAssume,
-                                   function_ref<void(Value *)> InsertAffected);
+LLVM_ABI void
+findValuesAffectedByCondition(Value *Cond, bool IsAssume,
+                              function_ref<void(Value *)> InsertAffected);
 
 } // end namespace llvm
 
diff --git a/llvm/include/llvm/Analysis/VectorUtils.h b/llvm/include/llvm/Analysis/VectorUtils.h
index 7d10bfc32ccec..52fe6f6cf43f2 100644
--- a/llvm/include/llvm/Analysis/VectorUtils.h
+++ b/llvm/include/llvm/Analysis/VectorUtils.h
@@ -13,7 +13,6 @@
 #ifndef LLVM_ANALYSIS_VECTORUTILS_H
 #define LLVM_ANALYSIS_VECTORUTILS_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/MapVector.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Analysis/LoopAccessAnalysis.h"
@@ -21,6 +20,7 @@
 #include "llvm/IR/VFABIDemangler.h"
 #include "llvm/IR/VectorTypeUtils.h"
 #include "llvm/Support/CheckedArithmetic.h"
+#include "llvm/Support/Compiler.h"
 
 namespace llvm {
 class TargetLibraryInfo;
@@ -145,20 +145,23 @@ LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID);
 /// intrinsic is redundant, but we want to implement scalarization of the
 /// vector. To prevent the requirement that an intrinsic also implements
 /// vectorization we provide this seperate function.
-LLVM_ABI bool isTriviallyScalarizable(Intrinsic::ID ID, const TargetTransformInfo *TTI);
+LLVM_ABI bool isTriviallyScalarizable(Intrinsic::ID ID,
+                                      const TargetTransformInfo *TTI);
 
 /// Identifies if the vector form of the intrinsic has a scalar operand.
 /// \p TTI is used to consider target specific intrinsics, if no target specific
 /// intrinsics will be considered then it is appropriate to pass in nullptr.
-LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx,
-                                        const TargetTransformInfo *TTI);
+LLVM_ABI bool
+isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx,
+                                   const TargetTransformInfo *TTI);
 
 /// Identifies if the vector form of the intrinsic is overloaded on the type of
 /// the operand at index \p OpdIdx, or on the return type if \p OpdIdx is -1.
 /// \p TTI is used to consider target specific intrinsics, if no target specific
 /// intrinsics will be considered then it is appropriate to pass in nullptr.
-LLVM_ABI bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx,
-                                            const TargetTransformInfo *TTI);
+LLVM_ABI bool
+isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx,
+                                       const TargetTransformInfo *TTI);
 
 /// Identifies if the vector form of the intrinsic that returns a struct is
 /// overloaded at the struct element index \p RetIdx. /// \p TTI is used to
@@ -170,8 +173,8 @@ LLVM_ABI bool isVectorIntrinsicWithStructReturnOverloadAtField(
 /// Returns intrinsic ID for call.
 /// For the input call instruction it finds mapping intrinsic and returns
 /// its intrinsic ID, in case it does not found it return not_intrinsic.
-LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI,
-                                          const TargetLibraryInfo *TLI);
+LLVM_ABI Intrinsic::ID
+getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI);
 
 /// Given a vector and an element number, see if the scalar value is
 /// already around as a register, for example if it were inserted then extracted
@@ -201,8 +204,9 @@ LLVM_ABI bool isSplatValue(const Value *V, int Index = -1, unsigned Depth = 0);
 /// Both \p DemandedLHS and \p DemandedRHS are initialised to [SrcWidth].
 /// \p AllowUndefElts permits "-1" indices to be treated as undef.
 LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask,
-                            const APInt &DemandedElts, APInt &DemandedLHS,
-                            APInt &DemandedRHS, bool AllowUndefElts = false);
+                                     const APInt &DemandedElts,
+                                     APInt &DemandedLHS, APInt &DemandedRHS,
+                                     bool AllowUndefElts = false);
 
 /// Does this shuffle mask represent either one slide shuffle or a pair of
 /// two slide shuffles, combined with a select on some constant vector mask?
@@ -211,7 +215,7 @@ LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask,
 /// will be matched a slide by 0.  The output parameter provides the source
 /// (-1 means no source), and slide direction for each slide.
 LLVM_ABI bool isMaskedSlidePair(ArrayRef<int> Mask, int NumElts,
-                       std::array<std::pair<int, int>, 2> &SrcInfo);
+                                std::array<std::pair<int, int>, 2> &SrcInfo);
 
 /// Replace each shuffle mask index with the scaled sequential indices for an
 /// equivalent mask of narrowed elements. Mask elements that are less than 0
@@ -225,7 +229,7 @@ LLVM_ABI bool isMaskedSlidePair(ArrayRef<int> Mask, int NumElts,
 /// succeeds because the indexes can always be multiplied (scaled up) to map to
 /// narrower vector elements.
 LLVM_ABI void narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
-                           SmallVectorImpl<int> &ScaledMask);
+                                    SmallVectorImpl<int> &ScaledMask);
 
 /// Try to transform a shuffle mask by replacing elements with the scaled index
 /// for an equivalent mask of widened elements. If all mask elements that would
@@ -243,13 +247,14 @@ LLVM_ABI void narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
 /// succeeds. This transform is not always possible because indexes may not
 /// divide evenly (scale down) to map to wider vector elements.
 LLVM_ABI bool widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
-                          SmallVectorImpl<int> &ScaledMask);
+                                   SmallVectorImpl<int> &ScaledMask);
 
 /// A variant of the previous method which is specialized for Scale=2, and
 /// treats -1 as undef and allows widening when a wider element is partially
 /// undef in the narrow form of the mask.  This transformation discards
 /// information about which bytes in the original shuffle were undef.
-LLVM_ABI bool widenShuffleMaskElts(ArrayRef<int> M, SmallVectorImpl<int> &NewMask);
+LLVM_ABI bool widenShuffleMaskElts(ArrayRef<int> M,
+                                   SmallVectorImpl<int> &NewMask);
 
 /// Attempt to narrow/widen the \p Mask shuffle mask to the \p NumDstElts target
 /// width. Internally this will call narrowShuffleMaskElts/widenShuffleMaskElts.
@@ -257,12 +262,12 @@ LLVM_ABI bool widenShuffleMaskElts(ArrayRef<int> M, SmallVectorImpl<int> &NewMas
 /// vice-versa). Returns false on failure, and ScaledMask will be in an
 /// undefined state.
 LLVM_ABI bool scaleShuffleMaskElts(unsigned NumDstElts, ArrayRef<int> Mask,
-                          SmallVectorImpl<int> &ScaledMask);
+                                   SmallVectorImpl<int> &ScaledMask);
 
 /// Repetitively apply `widenShuffleMaskElts()` for as long as it succeeds,
 /// to get the shuffle mask with widest possible elements.
 LLVM_ABI void getShuffleMaskWithWidestElts(ArrayRef<int> Mask,
-                                  SmallVectorImpl<int> &ScaledMask);
+                                           SmallVectorImpl<int> &ScaledMask);
 
 /// Splits and processes shuffle mask depending on the number of input and
 /// output registers. The function does 2 main things: 1) splits the
@@ -296,9 +301,9 @@ LLVM_ABI void processShuffleMasks(
 /// \param DemandedLHS    the demanded elements mask for the left operand
 /// \param DemandedRHS    the demanded elements mask for the right operand
 LLVM_ABI void getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth,
-                                         const APInt &DemandedElts,
-                                         APInt &DemandedLHS,
-                                         APInt &DemandedRHS);
+                                                  const APInt &DemandedElts,
+                                                  APInt &DemandedLHS,
+                                                  APInt &DemandedRHS);
 
 /// Compute a map of integer instructions to their minimum legal type
 /// size.
@@ -334,10 +339,9 @@ LLVM_ABI void getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth,
 ///
 /// If the optional TargetTransformInfo is provided, this function tries harder
 /// to do less work by only looking at illegal types.
-LLVM_ABI MapVector<Instruction*, uint64_t>
-computeMinimumValueSizes(ArrayRef<BasicBlock*> Blocks,
-                         DemandedBits &DB,
-                         const TargetTransformInfo *TTI=nullptr);
+LLVM_ABI MapVector<Instruction *, uint64_t>
+computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
+                         const TargetTransformInfo *TTI = nullptr);
 
 /// Compute the union of two access-group lists.
 ///
@@ -352,7 +356,7 @@ LLVM_ABI MDNode *uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2);
 /// If the list contains just one access group, it is returned directly. If the
 /// list is empty, returns nullptr.
 LLVM_ABI MDNode *intersectAccessGroups(const Instruction *Inst1,
-                              const Instruction *Inst2);
+                                       const Instruction *Inst2);
 
 /// Add metadata from \p Inst to \p Metadata, if it can be preserved after
 /// vectorization. It can be preserved after vectorization if the kind is one of
@@ -383,8 +387,9 @@ LLVM_ABI Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL);
 /// Note: The result is a mask of 0's and 1's, as opposed to the other
 /// create[*]Mask() utilities which create a shuffle mask (mask that
 /// consists of indices).
-LLVM_ABI Constant *createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
-                               const InterleaveGroup<Instruction> &Group);
+LLVM_ABI Constant *
+createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
+                     const InterleaveGroup<Instruction> &Group);
 
 /// Create a mask with replicated elements.
 ///
@@ -398,8 +403,8 @@ LLVM_ABI Constant *createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
 ///
 ///   <0,0,0,1,1,1,2,2,2,3,3,3>
-LLVM_ABI llvm::SmallVector<int, 16> createReplicatedMask(unsigned ReplicationFactor,
-                                                unsigned VF);
+LLVM_ABI llvm::SmallVector<int, 16>
+createReplicatedMask(unsigned ReplicationFactor, unsigned VF);
 
 /// Create an interleave shuffle mask.
 ///
@@ -412,7 +417,8 @@ LLVM_ABI llvm::SmallVector<int, 16> createReplicatedMask(unsigned ReplicationFac
 /// For example, the mask for VF = 4 and NumVecs = 2 is:
 ///
 ///   <0, 4, 1, 5, 2, 6, 3, 7>.
-LLVM_ABI llvm::SmallVector<int, 16> createInterleaveMask(unsigned VF, unsigned NumVecs);
+LLVM_ABI llvm::SmallVector<int, 16> createInterleaveMask(unsigned VF,
+                                                         unsigned NumVecs);
 
 /// Create a stride shuffle mask.
 ///
@@ -426,8 +432,8 @@ LLVM_ABI llvm::SmallVector<int, 16> createInterleaveMask(unsigned VF, unsigned N
 /// For example, the mask for Start = 0, Stride = 2, and VF = 4 is:
 ///
 ///   <0, 2, 4, 6>
-LLVM_ABI llvm::SmallVector<int, 16> createStrideMask(unsigned Start, unsigned Stride,
-                                            unsigned VF);
+LLVM_ABI llvm::SmallVector<int, 16>
+createStrideMask(unsigned Start, unsigned Stride, unsigned VF);
 
 /// Create a sequential shuffle mask.
 ///
@@ -447,7 +453,7 @@ createSequentialMask(unsigned Start, unsigned NumInts, unsigned NumUndefs);
 /// mask assuming both operands are identical. This assumes that the unary
 /// shuffle will use elements from operand 0 (operand 1 will be unused).
 LLVM_ABI llvm::SmallVector<int, 16> createUnaryMask(ArrayRef<int> Mask,
-                                           unsigned NumElts);
+                                                    unsigned NumElts);
 
 /// Concatenate a list of vectors.
 ///
@@ -456,7 +462,8 @@ LLVM_ABI llvm::SmallVector<int, 16> createUnaryMask(ArrayRef<int> Mask,
 /// their element types should be the same. The number of elements in the
 /// vectors should also be the same; however, if the last vector has fewer
 /// elements, it will be padded with undefs.
-LLVM_ABI Value *concatenateVectors(IRBuilderBase &Builder, ArrayRef<Value *> Vecs);
+LLVM_ABI Value *concatenateVectors(IRBuilderBase &Builder,
+                                   ArrayRef<Value *> Vecs);
 
 /// Given a mask vector of i1, Return true if all of the elements of this
 /// predicate mask are known to be false or undef.  That is, return true if all
diff --git a/llvm/include/llvm/Analysis/WithCache.h b/llvm/include/llvm/Analysis/WithCache.h
index fccd1a718ab3b..82c230a32297c 100644
--- a/llvm/include/llvm/Analysis/WithCache.h
+++ b/llvm/include/llvm/Analysis/WithCache.h
@@ -14,16 +14,16 @@
 #ifndef LLVM_ANALYSIS_WITHCACHE_H
 #define LLVM_ANALYSIS_WITHCACHE_H
 
-#include "llvm/Support/Compiler.h"
 #include "llvm/ADT/PointerIntPair.h"
 #include "llvm/IR/Value.h"
+#include "llvm/Support/Compiler.h"
 #include "llvm/Support/KnownBits.h"
 #include <type_traits>
 
 namespace llvm {
 struct SimplifyQuery;
 LLVM_ABI KnownBits computeKnownBits(const Value *V, unsigned Depth,
-                           const SimplifyQuery &Q);
+                                    const SimplifyQuery &Q);
 
 template <typename Arg> class WithCache {
   static_assert(std::is_pointer_v<Arg>, "WithCache requires a pointer type!");
diff --git a/llvm/lib/Analysis/CGSCCPassManager.cpp b/llvm/lib/Analysis/CGSCCPassManager.cpp
index d70a60615d714..81c9604fe8098 100644
--- a/llvm/lib/Analysis/CGSCCPassManager.cpp
+++ b/llvm/lib/Analysis/CGSCCPassManager.cpp
@@ -49,13 +49,16 @@ AnalysisKey ShouldNotRunFunctionPassesAnalysis::Key;
 
 // Explicit instantiations for the core proxy templates.
 template class LLVM_EXPORT_TEMPLATE AllAnalysesOn<LazyCallGraph::SCC>;
-template class LLVM_EXPORT_TEMPLATE AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
+template class LLVM_EXPORT_TEMPLATE
+    AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
 template class PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager,
                            LazyCallGraph &, CGSCCUpdateResult &>;
-template class LLVM_EXPORT_TEMPLATE InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;
-template class LLVM_EXPORT_TEMPLATE OuterAnalysisManagerProxy<ModuleAnalysisManager,
-                                         LazyCallGraph::SCC, LazyCallGraph &>;
-template class LLVM_EXPORT_TEMPLATE OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
+template class LLVM_EXPORT_TEMPLATE
+    InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;
+template class LLVM_EXPORT_TEMPLATE OuterAnalysisManagerProxy<
+    ModuleAnalysisManager, LazyCallGraph::SCC, LazyCallGraph &>;
+template class LLVM_EXPORT_TEMPLATE
+    OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
 
 /// Explicitly specialize the pass manager run method to handle call graph
 /// updates.
diff --git a/llvm/lib/Analysis/DomTreeUpdater.cpp b/llvm/lib/Analysis/DomTreeUpdater.cpp
index 7bf83afb3dad1..9376fdef23104 100644
--- a/llvm/lib/Analysis/DomTreeUpdater.cpp
+++ b/llvm/lib/Analysis/DomTreeUpdater.cpp
@@ -22,8 +22,8 @@
 
 namespace llvm {
 
-template class LLVM_EXPORT_TEMPLATE GenericDomTreeUpdater<DomTreeUpdater, DominatorTree,
-                                     PostDominatorTree>;
+template class LLVM_EXPORT_TEMPLATE
+    GenericDomTreeUpdater<DomTreeUpdater, DominatorTree, PostDominatorTree>;
 
 template LLVM_EXPORT_TEMPLATE void
 GenericDomTreeUpdater<DomTreeUpdater, DominatorTree,
diff --git a/llvm/lib/Analysis/LoopAnalysisManager.cpp b/llvm/lib/Analysis/LoopAnalysisManager.cpp
index 06980b91c39ef..9c8f98129100d 100644
--- a/llvm/lib/Analysis/LoopAnalysisManager.cpp
+++ b/llvm/lib/Analysis/LoopAnalysisManager.cpp
@@ -22,10 +22,12 @@ namespace llvm {
 // Explicit template instantiations and specialization definitions for core
 // template typedefs.
 template class LLVM_EXPORT_TEMPLATE AllAnalysesOn<Loop>;
-template class LLVM_EXPORT_TEMPLATE AnalysisManager<Loop, LoopStandardAnalysisResults &>;
-template class LLVM_EXPORT_TEMPLATE InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
-template class LLVM_EXPORT_TEMPLATE OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
-                                         LoopStandardAnalysisResults &>;
+template class LLVM_EXPORT_TEMPLATE
+    AnalysisManager<Loop, LoopStandardAnalysisResults &>;
+template class LLVM_EXPORT_TEMPLATE
+    InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
+template class LLVM_EXPORT_TEMPLATE OuterAnalysisManagerProxy<
+    FunctionAnalysisManager, Loop, LoopStandardAnalysisResults &>;
 
 bool LoopAnalysisManagerFunctionProxy::Result::invalidate(
     Function &F, const PreservedAnalyses &PA,
diff --git a/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp b/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp
index 4c5f57ca91b08..ee2342d164e8c 100644
--- a/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp
+++ b/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp
@@ -25,7 +25,8 @@ using namespace llvm::memprof;
 
 LLVM_ABI extern cl::opt<float> MemProfLifetimeAccessDensityColdThreshold;
 LLVM_ABI extern cl::opt<unsigned> MemProfAveLifetimeColdThreshold;
-LLVM_ABI extern cl::opt<unsigned> MemProfMinAveLifetimeAccessDensityHotThreshold;
+LLVM_ABI extern cl::opt<unsigned>
+    MemProfMinAveLifetimeAccessDensityHotThreshold;
 LLVM_ABI extern cl::opt<bool> MemProfUseHotHints;
 LLVM_ABI extern cl::opt<bool> MemProfKeepAllNotColdContexts;
 



More information about the llvm-commits mailing list