[llvm] 19b5495 - Port Swift's merge function pass to llvm: merging functions that differ in constants (#68235)

via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 3 11:14:03 PDT 2023


Author: Manman Ren
Date: 2023-11-03T11:13:58-07:00
New Revision: 19b5495b653a00da7a250f48b4f739fcf2bbe82f

URL: https://github.com/llvm/llvm-project/commit/19b5495b653a00da7a250f48b4f739fcf2bbe82f
DIFF: https://github.com/llvm/llvm-project/commit/19b5495b653a00da7a250f48b4f739fcf2bbe82f.diff

LOG: Port Swift's merge function pass to llvm: merging functions that differ in constants (#68235)

See RFC for details:
https://discourse.llvm.org/t/rfc-for-moving-swift-s-merge-function-pass-to-llvm/73778

We will need to refactor extension to FunctionComparator/FunctionHash to
StructuralHash. This patch adds a new pass which is ported from Swift,
and will need to discuss on how to migrate Swift’s pass over after we
land this in llvm.

Create this PR to get some early review on the patch.

---------

Co-authored-by: Manman Ren <mren at meta.com>

Added: 
    llvm/include/llvm/Transforms/IPO/MergeFunctionsIgnoringConst.h
    llvm/include/llvm/Transforms/Utils/FunctionComparatorIgnoringConst.h
    llvm/include/llvm/Transforms/Utils/MergeFunctionsIgnoringConst.h
    llvm/lib/Transforms/IPO/MergeFunctionsIgnoringConst.cpp
    llvm/lib/Transforms/Utils/FunctionComparatorIgnoringConst.cpp
    llvm/test/Transforms/MergeFuncIgnoringConst/merge_func.ll
    llvm/test/Transforms/MergeFuncIgnoringConst/merge_with_exception.ll

Modified: 
    llvm/include/llvm/Transforms/Utils/FunctionComparator.h
    llvm/lib/Passes/PassBuilder.cpp
    llvm/lib/Passes/PassBuilderPipelines.cpp
    llvm/lib/Passes/PassRegistry.def
    llvm/lib/Transforms/IPO/CMakeLists.txt
    llvm/lib/Transforms/Utils/CMakeLists.txt

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Transforms/IPO/MergeFunctionsIgnoringConst.h b/llvm/include/llvm/Transforms/IPO/MergeFunctionsIgnoringConst.h
new file mode 100644
index 000000000000000..638d009abf2bffc
--- /dev/null
+++ b/llvm/include/llvm/Transforms/IPO/MergeFunctionsIgnoringConst.h
@@ -0,0 +1,42 @@
+//===- MergeFunctionsIgnoringConst.h - Merge Functions ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass transforms simple global variables that never have their address
+// taken.  If obviously true, it marks read/write globals as constant, deletes
+// variables only stored to, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_MERGEFUNCTIONSIGNORINGCONST_H
+#define LLVM_TRANSFORMS_IPO_MERGEFUNCTIONSIGNORINGCONST_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+/// Merge functions that 
diff er by constants.
+class MergeFuncIgnoringConstPass
+    : public PassInfoMixin<MergeFuncIgnoringConstPass> {
+  bool PtrAuthEnabled = false;
+  unsigned PtrAuthKey = 0;
+  std::string MergeFuncSuffix = ".Tm";
+
+public:
+  MergeFuncIgnoringConstPass() {}
+  MergeFuncIgnoringConstPass(bool PtrAuthEnabled, unsigned PtrAuthKey,
+                             std::string Suffix)
+      : PtrAuthEnabled(PtrAuthEnabled), PtrAuthKey(PtrAuthKey),
+        MergeFuncSuffix(Suffix) {}
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_IPO_MERGEFUNCTIONSIGNORINGCONST_H

diff  --git a/llvm/include/llvm/Transforms/Utils/FunctionComparator.h b/llvm/include/llvm/Transforms/Utils/FunctionComparator.h
index c28f868039a1f7b..1a314b481c72c61 100644
--- a/llvm/include/llvm/Transforms/Utils/FunctionComparator.h
+++ b/llvm/include/llvm/Transforms/Utils/FunctionComparator.h
@@ -379,6 +379,7 @@ class FunctionComparator {
   /// But, we are still not able to compare operands of PHI nodes, since those
   /// could be operands from further BBs we didn't scan yet.
   /// So it's impossible to use dominance properties in general.
+protected:
   mutable DenseMap<const Value*, int> sn_mapL, sn_mapR;
 
   // The global state we will use

diff  --git a/llvm/include/llvm/Transforms/Utils/FunctionComparatorIgnoringConst.h b/llvm/include/llvm/Transforms/Utils/FunctionComparatorIgnoringConst.h
new file mode 100644
index 000000000000000..9c7fe3baf2fa0db
--- /dev/null
+++ b/llvm/include/llvm/Transforms/Utils/FunctionComparatorIgnoringConst.h
@@ -0,0 +1,58 @@
+//===- FunctionComparatorIgnoringConst.h - Function Comparator --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FunctionComparatorIgnoringConst class which is used by
+// the MergeFuncIgnoringConst pass for comparing functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATORIGNORINGCONST_H
+#define LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATORIGNORINGCONST_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Transforms/Utils/FunctionComparator.h"
+#include <set>
+
+namespace llvm {
+
+/// FunctionComparatorIgnoringConst - Compares two functions to determine
+/// whether or not they match when certain constants are ignored.
+class FunctionComparatorIgnoringConst : public FunctionComparator {
+public:
+  FunctionComparatorIgnoringConst(const Function *F1, const Function *F2,
+                                  GlobalNumberState *GN)
+      : FunctionComparator(F1, F2, GN) {}
+
+  int cmpOperandsIgnoringConsts(const Instruction *L, const Instruction *R,
+                                unsigned opIdx);
+
+  int cmpBasicBlocksIgnoringConsts(
+      const BasicBlock *BBL, const BasicBlock *BBR,
+      const std::set<std::pair<int, int>> *InstOpndIndex = nullptr);
+
+  int compareIgnoringConsts(
+      const std::set<std::pair<int, int>> *InstOpndIndex = nullptr);
+
+  int compareConstants(const Constant *L, const Constant *R) const {
+    return cmpConstants(L, R);
+  }
+
+private:
+  /// Scratch index for instruction in order during cmpOperandsIgnoringConsts.
+  int Index = 0;
+};
+
+} // end namespace llvm
+#endif // LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATORIGNORINGCONST_H

diff  --git a/llvm/include/llvm/Transforms/Utils/MergeFunctionsIgnoringConst.h b/llvm/include/llvm/Transforms/Utils/MergeFunctionsIgnoringConst.h
new file mode 100644
index 000000000000000..e63afbb6bbf1718
--- /dev/null
+++ b/llvm/include/llvm/Transforms/Utils/MergeFunctionsIgnoringConst.h
@@ -0,0 +1,29 @@
+//===- MergeFunctionsIgnoringConst.h - Merge Functions ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines helpers used in the MergeFunctionsIgnoringConst.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_MERGEFUNCTIONSIGNORINGCONST_H
+#define LLVM_TRANSFORMS_UTILS_MERGEFUNCTIONSIGNORINGCONST_H
+
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Operator.h"
+
+using namespace llvm;
+
+bool isEligibleInstrunctionForConstantSharing(const Instruction *I);
+
+bool isEligibleOperandForConstantSharing(const Instruction *I, unsigned OpIdx);
+
+bool isEligibleFunction(Function *F);
+
+Value *createCast(IRBuilder<> &Builder, Value *V, Type *DestTy);
+#endif // LLVM_TRANSFORMS_UTILS_MERGEFUNCTIONSIGNORINGCONST_H

diff  --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index 0d7cac19d44c3a8..789ddfcbf529879 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -123,6 +123,7 @@
 #include "llvm/Transforms/IPO/LowerTypeTests.h"
 #include "llvm/Transforms/IPO/MemProfContextDisambiguation.h"
 #include "llvm/Transforms/IPO/MergeFunctions.h"
+#include "llvm/Transforms/IPO/MergeFunctionsIgnoringConst.h"
 #include "llvm/Transforms/IPO/ModuleInliner.h"
 #include "llvm/Transforms/IPO/OpenMPOpt.h"
 #include "llvm/Transforms/IPO/PartialInlining.h"

diff  --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp
index baea2913338cda7..20dbd3952beb60f 100644
--- a/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -60,6 +60,7 @@
 #include "llvm/Transforms/IPO/LowerTypeTests.h"
 #include "llvm/Transforms/IPO/MemProfContextDisambiguation.h"
 #include "llvm/Transforms/IPO/MergeFunctions.h"
+#include "llvm/Transforms/IPO/MergeFunctionsIgnoringConst.h"
 #include "llvm/Transforms/IPO/ModuleInliner.h"
 #include "llvm/Transforms/IPO/OpenMPOpt.h"
 #include "llvm/Transforms/IPO/PartialInlining.h"
@@ -176,6 +177,10 @@ static cl::opt<bool> EnableMergeFunctions(
     "enable-merge-functions", cl::init(false), cl::Hidden,
     cl::desc("Enable function merging as part of the optimization pipeline"));
 
+static cl::opt<bool> EnableMergeFuncIgnoringConst(
+    "enable-merge-func-ignoring-const", cl::init(false), cl::Hidden,
+    cl::desc("Enable function merger that ignores constants"));
+
 static cl::opt<bool> EnablePostPGOLoopRotation(
     "enable-post-pgo-loop-rotation", cl::init(true), cl::Hidden,
     cl::desc("Run the loop rotation transformation after PGO instrumentation"));
@@ -1633,6 +1638,9 @@ ModulePassManager PassBuilder::buildThinLTODefaultPipeline(
   MPM.addPass(buildModuleOptimizationPipeline(
       Level, ThinOrFullLTOPhase::ThinLTOPostLink));
 
+  if (EnableMergeFuncIgnoringConst)
+    MPM.addPass(MergeFuncIgnoringConstPass());
+
   // Emit annotation remarks.
   addAnnotationRemarksPass(MPM);
 
@@ -1958,6 +1966,9 @@ PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
 
   invokeFullLinkTimeOptimizationLastEPCallbacks(MPM, Level);
 
+  if (EnableMergeFuncIgnoringConst)
+    MPM.addPass(MergeFuncIgnoringConstPass());
+
   // Emit annotation remarks.
   addAnnotationRemarksPass(MPM);
 

diff  --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def
index eb51ccef68c827d..ba32c64d18423b9 100644
--- a/llvm/lib/Passes/PassRegistry.def
+++ b/llvm/lib/Passes/PassRegistry.def
@@ -87,6 +87,7 @@ MODULE_PASS("lower-ifunc", LowerIFuncPass())
 MODULE_PASS("lowertypetests", LowerTypeTestsPass())
 MODULE_PASS("metarenamer", MetaRenamerPass())
 MODULE_PASS("mergefunc", MergeFunctionsPass())
+MODULE_PASS("mergefunc-ignoring-const", MergeFuncIgnoringConstPass())
 MODULE_PASS("name-anon-globals", NameAnonGlobalPass())
 MODULE_PASS("no-op-module", NoOpModulePass())
 MODULE_PASS("objc-arc-apelim", ObjCARCAPElimPass())

diff  --git a/llvm/lib/Transforms/IPO/CMakeLists.txt b/llvm/lib/Transforms/IPO/CMakeLists.txt
index 034f1587ae8df44..4dac04d3369950f 100644
--- a/llvm/lib/Transforms/IPO/CMakeLists.txt
+++ b/llvm/lib/Transforms/IPO/CMakeLists.txt
@@ -30,6 +30,7 @@ add_llvm_component_library(LLVMipo
   LowerTypeTests.cpp
   MemProfContextDisambiguation.cpp
   MergeFunctions.cpp
+  MergeFunctionsIgnoringConst.cpp
   ModuleInliner.cpp
   OpenMPOpt.cpp
   PartialInlining.cpp

diff  --git a/llvm/lib/Transforms/IPO/MergeFunctionsIgnoringConst.cpp b/llvm/lib/Transforms/IPO/MergeFunctionsIgnoringConst.cpp
new file mode 100644
index 000000000000000..d6ae788ddb9e1a1
--- /dev/null
+++ b/llvm/lib/Transforms/IPO/MergeFunctionsIgnoringConst.cpp
@@ -0,0 +1,1399 @@
+//===--- MergeFunctionsIgnoringConst.cpp - Merge functions ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass looks for similar functions that are mergeable and folds them.
+// The implementation is similar to LLVM's MergeFunctions pass. Instead of
+// merging identical functions, it merges functions which only 
diff er by a few
+// constants in certain instructions.
+// This is copied from Swift's implementation.
+//
+// This pass should run after LLVM's MergeFunctions pass, because it works best
+// if there are no _identical_ functions in the module.
+// Note: it would also work for identical functions but could produce more
+// code overhead than the LLVM pass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/IPO/MergeFunctionsIgnoringConst.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StableHashing.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/ObjCARCUtil.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/StructuralHash.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/Utils/FunctionComparatorIgnoringConst.h"
+#include <vector>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "mergefunc-ignoring-const"
+
+STATISTIC(NumFunctionsMergedIgnoringConst, "Number of functions merged");
+STATISTIC(NumThunksWrittenIgnoringConst, "Number of thunks generated");
+
+static cl::opt<bool> EnableAggressiveMergeFunc(
+    "enable-aggressive-mergefunc-ignoringconst", cl::init(false), cl::Hidden,
+    cl::desc("Enable more aggressive function merger"));
+
+static cl::opt<unsigned> NumFunctionsIgnoringConstForSanityCheck(
+    "mergefunc-ignoringconst-sanity",
+    cl::desc("How many functions in module could be used for "
+             "MergeFunctionsIgnoringConst pass sanity check. "
+             "'0' disables this check. Works only with '-debug' key."),
+    cl::init(0), cl::Hidden);
+
+static cl::opt<unsigned> IgnoringConstMergeThreshold(
+    "mergefunc-ignoringconst-threshold",
+    cl::desc("Functions larger than the threshold are considered for merging."
+             "'0' disables function merging at all."),
+    cl::init(15), cl::Hidden);
+
+cl::opt<bool> UseLinkOnceODRLinkageMerging(
+    "use-linkonceodr-linkage-merging", cl::init(false), cl::Hidden,
+    cl::desc(
+        "Use LinkeOnceODR linkage to deduplicate the identical merged function "
+        "(default = off)"));
+
+cl::opt<bool> NoInlineForMergedFunction(
+    "no-inline-merged-function", cl::init(false), cl::Hidden,
+    cl::desc("set noinline for merged function (default = off)"));
+
+static cl::opt<bool>
+    CastArrayType("merge-cast-array-type", cl::init(false), cl::Hidden,
+                  cl::desc("support for casting array type (default = off)"));
+
+static cl::opt<bool> IgnoreMusttailFunction(
+    "ignore-musttail-function", cl::init(false), cl::Hidden,
+    cl::desc(
+        "ignore functions containing callsites with musttail (default = off)"));
+
+static cl::opt<bool> AlwaysCallThunk(
+    "merge-always-call-thunk", cl::init(false), cl::Hidden,
+    cl::desc(
+        "do not replace callsites and always emit a thunk (default = off)"));
+
+static cl::list<std::string> MergeBlockRegexFilters(
+    "merge-block-regex", cl::Optional,
+    cl::desc("Block functions from merging if they match the given "
+             "regular expression"),
+    cl::ZeroOrMore);
+
+static cl::list<std::string> MergeAllowRegexFilters(
+    "merge-allow-regex", cl::Optional,
+    cl::desc("Allow functions from merging if they match the given "
+             "regular expression"),
+    cl::ZeroOrMore);
+
+bool isEligibleInstrunctionForConstantSharing(const Instruction *I) {
+  switch (I->getOpcode()) {
+  case Instruction::Load:
+  case Instruction::Store:
+  case Instruction::Call:
+    return true;
+  default: {
+    if (EnableAggressiveMergeFunc && I->getOpcode() == Instruction::Invoke)
+      return true;
+    return false;
+  }
+  }
+}
+
+/// Returns true if the \OpIdx operand of \p CI is the callee operand.
+static bool isCalleeOperand(const CallBase *CI, unsigned OpIdx) {
+  return &CI->getCalledOperandUse() == &CI->getOperandUse(OpIdx);
+}
+
+static bool canParameterizeCallOperand(const CallBase *CI, unsigned OpIdx) {
+  if (CI->isInlineAsm())
+    return false;
+  Function *Callee = CI->getCalledOperand()
+                         ? dyn_cast_or_null<Function>(
+                               CI->getCalledOperand()->stripPointerCasts())
+                         : nullptr;
+  if (Callee) {
+    if (Callee->isIntrinsic())
+      return false;
+    // objc_msgSend stubs must be called, and can't have their address taken.
+    if (Callee->getName().startswith("objc_msgSend$"))
+      return false;
+  }
+  if (isCalleeOperand(CI, OpIdx) &&
+      CI->getOperandBundle(LLVMContext::OB_ptrauth).has_value()) {
+    // The operand is the callee and it has already been signed. Ignore this
+    // because we cannot add another ptrauth bundle to the call instruction.
+    return false;
+  }
+  return true;
+}
+
+bool isEligibleOperandForConstantSharing(const Instruction *I, unsigned OpIdx) {
+  assert(OpIdx < I->getNumOperands() && "Invalid operand index");
+
+  if (!isEligibleInstrunctionForConstantSharing(I))
+    return false;
+
+  auto Opnd = I->getOperand(OpIdx);
+  if (!isa<Constant>(Opnd))
+    return false;
+
+  if (const auto *CI = dyn_cast<CallBase>(I))
+    return canParameterizeCallOperand(CI, OpIdx);
+
+  return true;
+}
+
+namespace {
+
+/// MergeFuncIgnoringConst finds functions which only 
diff er by constants in
+/// certain instructions, e.g. resulting from specialized functions of layout
+/// compatible types.
+/// Such functions are merged by replacing the 
diff ering constants by a
+/// parameter. The original functions are replaced by thunks which call the
+/// merged function with the specific argument constants.
+///
+class MergeFuncIgnoringConstImpl {
+public:
+  MergeFuncIgnoringConstImpl(bool PtrAuthEnabled, unsigned PtrAuthKey,
+                             std::string Suffix)
+      : FnTree(FunctionNodeCmp(&GlobalNumbers)), PtrAuthEnabled(PtrAuthEnabled),
+        PtrAuthKey(PtrAuthKey), MergeFuncSuffix(Suffix) {}
+
+  bool runImpl(Module &M);
+
+private:
+  struct FunctionEntry;
+
+  /// Describes the set of functions which are considered as "equivalent" (i.e.
+  /// only 
diff ering by some constants).
+  struct EquivalenceClass {
+    /// The single-linked list of all functions which are a member of this
+    /// equivalence class.
+    FunctionEntry *First;
+
+    /// A very cheap hash, used to early exit if functions do not match.
+    llvm::IRHash Hash;
+
+  public:
+    // Note the hash is recalculated potentially multiple times, but it is
+    // cheap.
+    EquivalenceClass(FunctionEntry *First)
+        : First(First), Hash(StructuralHash(*First->F)) {
+      assert(!First->Next);
+    }
+  };
+
+  /// The function comparison operator is provided here so that FunctionNodes do
+  /// not need to become larger with another pointer.
+  class FunctionNodeCmp {
+    GlobalNumberState *GlobalNumbers;
+
+  public:
+    FunctionNodeCmp(GlobalNumberState *GN) : GlobalNumbers(GN) {}
+    bool operator()(const EquivalenceClass &LHS,
+                    const EquivalenceClass &RHS) const {
+      // Order first by hashes, then full function comparison.
+      if (LHS.Hash != RHS.Hash)
+        return LHS.Hash < RHS.Hash;
+      FunctionComparatorIgnoringConst FCmp(LHS.First->F, RHS.First->F,
+                                           GlobalNumbers);
+      return FCmp.compareIgnoringConsts() == -1;
+    }
+  };
+  using FnTreeType = std::set<EquivalenceClass, FunctionNodeCmp>;
+
+  ///
+  struct FunctionEntry {
+    FunctionEntry(Function *F, FnTreeType::iterator I)
+        : F(F), Next(nullptr), NumUnhandledCallees(0), TreeIter(I),
+          IsMerged(false) {}
+
+    /// Back-link to the function.
+    AssertingVH<Function> F;
+
+    /// The next function in its equivalence class.
+    FunctionEntry *Next;
+
+    /// The number of not-yet merged callees. Used to process the merging in
+    /// bottom-up call order.
+    /// This is only valid in the first entry of an equivalence class. The
+    /// counts of all functions in an equivalence class are accumulated in the
+    /// first entry.
+    int NumUnhandledCallees;
+
+    /// The iterator of the function's equivalence class in the FnTree.
+    /// It's FnTree.end() if the function is not in an equivalence class.
+    FnTreeType::iterator TreeIter;
+
+    /// True if this function is already a thunk, calling the merged function.
+    bool IsMerged;
+  };
+
+  /// Describes an operator of a specific instruction.
+  struct OpLocation {
+    Instruction *I;
+    unsigned OpIndex;
+  };
+
+  /// Information for a function. Used during merging.
+  struct FunctionInfo {
+
+    FunctionInfo(Function *F)
+        : F(F), CurrentInst(nullptr), NumParamsNeeded(0) {}
+
+    void init() {
+      CurrentInst = &*F->begin()->begin();
+      NumParamsNeeded = 0;
+    }
+
+    /// Advances the current instruction to the next instruction.
+    void nextInst() {
+      assert(CurrentInst);
+      if (CurrentInst->isTerminator()) {
+        auto BlockIter = std::next(CurrentInst->getParent()->getIterator());
+        if (BlockIter == F->end()) {
+          CurrentInst = nullptr;
+          return;
+        }
+        CurrentInst = &*BlockIter->begin();
+        return;
+      }
+      CurrentInst = &*std::next(CurrentInst->getIterator());
+    }
+
+    /// Returns true if the operand \p OpIdx of the current instruction is the
+    /// callee of a call, which needs to be signed if passed as a parameter.
+    bool needsPointerSigning(unsigned OpIdx) const {
+      if (auto *CI = dyn_cast<CallInst>(CurrentInst))
+        return isCalleeOperand(CI, OpIdx);
+      return false;
+    }
+
+    Function *F;
+
+    /// The current instruction while iterating over all instructions.
+    Instruction *CurrentInst;
+
+    /// Roughly the number of parameters needed if this function would be
+    /// merged with the first function of the equivalence class.
+    int NumParamsNeeded;
+  };
+
+  using FunctionInfos = SmallVector<FunctionInfo, 8>;
+
+  /// Describes a parameter which we create to parameterize the merged function.
+  struct ParamInfo {
+    /// The value of the parameter for all the functions in the equivalence
+    /// class.
+    SmallVector<Constant *, 8> Values;
+
+    /// All uses of the parameter in the merged function.
+    SmallVector<OpLocation, 16> Uses;
+
+    /// The Discriminator for pointer signing.
+    /// Only not null if needsPointerSigning is true.
+    ConstantInt *Discriminator = nullptr;
+
+    /// True if the value is a callee function, which needs to be signed if
+    /// passed as a parameter.
+    bool NeedsPointerSigning = false;
+
+    /// Checks if this parameter can be used to describe an operand in all
+    /// functions of the equivalence class. Returns true if all values match
+    /// the specific instruction operands in all functions.
+    bool matches(const FunctionInfos &FInfos, unsigned OpIdx,
+                 bool PtrAuthEnabled) const {
+      unsigned NumFuncs = FInfos.size();
+      assert(Values.size() == NumFuncs);
+      if (PtrAuthEnabled &&
+          NeedsPointerSigning != FInfos[0].needsPointerSigning(OpIdx)) {
+        return false;
+      }
+      for (unsigned Idx = 0; Idx < NumFuncs; ++Idx) {
+        const FunctionInfo &FI = FInfos[Idx];
+        Constant *C = cast<Constant>(FI.CurrentInst->getOperand(OpIdx));
+        if (Values[Idx] != C)
+          return false;
+      }
+      return true;
+    }
+
+    /// Computes the Discriminator for pointer signing.
+    void computeDiscriminator(LLVMContext &Context) {
+      assert(NeedsPointerSigning);
+      assert(!Discriminator);
+
+      /// Get a hash from the concatenated function names.
+      /// The hash is deterministic, because the order of values depends on the
+      /// order of functions in the module, which is itself deterministic.
+      /// Note that the hash is not part of the ABI, because it's purly used
+      /// for pointer authentication between a module-private caller-callee
+      /// pair.
+      std::string concatenatedCalleeNames;
+      for (Constant *value : Values) {
+        if (auto *GO = dyn_cast<GlobalObject>(value))
+          concatenatedCalleeNames += GO->getName();
+      }
+      uint64_t rawHash = stable_hash_combine_string(concatenatedCalleeNames);
+      IntegerType *discrTy = Type::getInt64Ty(Context);
+      Discriminator = ConstantInt::get(discrTy, (rawHash % 0xFFFF) + 1);
+    }
+  };
+
+  using ParamInfos = SmallVector<ParamInfo, 16>;
+
+  Module *CurrentModule = nullptr;
+
+  GlobalNumberState GlobalNumbers;
+
+  /// A work queue of functions that may have been modified and should be
+  /// analyzed again.
+  std::vector<WeakTrackingVH> Deferred;
+
+  /// The set of all distinct functions. Use the insert() and remove() methods
+  /// to modify it. The map allows efficient lookup and deferring of Functions.
+  FnTreeType FnTree;
+
+  ValueMap<Function *, FunctionEntry *> FuncEntries;
+
+  // Maps a function-pointer / Discriminator pair to a corresponding global in
+  // the llvm.ptrauth section.
+  // This map is used as a cache to not create ptrauth globals twice.
+  DenseMap<std::pair<Constant *, ConstantInt *>, Constant *> PtrAuthGlobals;
+
+  /// True if the architecture has pointer authentication enabled.
+  bool PtrAuthEnabled = false;
+
+  /// The key for pointer authentication.
+  unsigned PtrAuthKey = 0;
+
+  std::string MergeFuncSuffix = ".Tm";
+
+  FunctionEntry *getEntry(Function *F) const { return FuncEntries.lookup(F); }
+
+  bool isInEquivalenceClass(FunctionEntry *FE) const {
+    if (FE->TreeIter != FnTree.end()) {
+      return true;
+    }
+    assert(!FE->Next);
+    assert(FE->NumUnhandledCallees == 0);
+    return false;
+  }
+
+  /// Checks the rules of order relation introduced among functions set.
+  /// Returns true, if sanity check has been passed, and false if failed.
+  bool doSanityCheck(std::vector<WeakTrackingVH> &Worklist);
+
+  /// Updates the NumUnhandledCallees of all user functions of the equivalence
+  /// class containing \p FE by \p Delta.
+  void updateUnhandledCalleeCount(FunctionEntry *FE, int Delta);
+
+  bool tryMergeEquivalenceClass(FunctionEntry *FirstInClass);
+
+  FunctionInfo removeFuncWithMostParams(FunctionInfos &FInfos);
+
+  bool deriveParams(ParamInfos &Params, FunctionInfos &FInfos,
+                    unsigned maxParams);
+
+  bool numOperandsDiffer(FunctionInfos &FInfos);
+
+  bool constsDiffer(const FunctionInfos &FInfos, unsigned OpIdx);
+
+  bool tryMapToParameter(FunctionInfos &FInfos, unsigned OpIdx,
+                         ParamInfos &Params, unsigned maxParams);
+
+  void replaceCallWithAddedPtrAuth(CallInst *origCall, Value *newCallee,
+                                   ConstantInt *Discriminator);
+
+  void mergeWithParams(const FunctionInfos &FInfos, ParamInfos &Params);
+  static void dumpMergeInfo(const FunctionInfos &FInfos, unsigned);
+
+  void removeEquivalenceClassFromTree(FunctionEntry *FE);
+
+  void writeThunk(Function *ToFunc, Function *Thunk, const ParamInfos &Params,
+                  unsigned FuncIdx);
+
+  bool isPtrAuthEnabled() const {
+    // TODO: fix pointer authentication
+    return PtrAuthEnabled;
+  }
+
+  ConstantInt *getPtrAuthKey() {
+    // TODO: fix pointer authentication
+    return ConstantInt::get(Type::getInt32Ty(CurrentModule->getContext()),
+                            PtrAuthKey);
+  }
+
+  /// Returns the value of function \p FuncIdx, and signes it if required.
+  Constant *getSignedValue(const ParamInfo &PI, unsigned FuncIdx) {
+    Constant *value = PI.Values[FuncIdx];
+    if (!PI.NeedsPointerSigning)
+      return value;
+
+    auto lookupKey = std::make_pair(value, PI.Discriminator);
+    Constant *&ptrAuthGlobal = PtrAuthGlobals[lookupKey];
+    if (!ptrAuthGlobal) {
+      // TODO: fix pointer authentication
+    }
+    return ptrAuthGlobal;
+  }
+
+  /// Replace all direct calls of Old with calls of New. Will bitcast New if
+  /// necessary to make types match.
+  bool replaceDirectCallers(Function *Old, Function *New,
+                            const ParamInfos &Params, unsigned FuncIdx);
+};
+
+} // end anonymous namespace
+
+bool MergeFuncIgnoringConstImpl::doSanityCheck(
+    std::vector<WeakTrackingVH> &Worklist) {
+  if (const unsigned Max = NumFunctionsIgnoringConstForSanityCheck) {
+    unsigned TripleNumber = 0;
+    bool Valid = true;
+
+    dbgs() << "MERGEFUNC-SANITY: Started for first " << Max << " functions.\n";
+
+    unsigned i = 0;
+    for (std::vector<WeakTrackingVH>::iterator I = Worklist.begin(),
+                                               E = Worklist.end();
+         I != E && i < Max; ++I, ++i) {
+      unsigned j = i;
+      for (std::vector<WeakTrackingVH>::iterator J = I; J != E && j < Max;
+           ++J, ++j) {
+        Function *F1 = cast<Function>(*I);
+        Function *F2 = cast<Function>(*J);
+        int Res1 = FunctionComparatorIgnoringConst(F1, F2, &GlobalNumbers)
+                       .compareIgnoringConsts();
+        int Res2 = FunctionComparatorIgnoringConst(F2, F1, &GlobalNumbers)
+                       .compareIgnoringConsts();
+
+        // If F1 <= F2, then F2 >= F1, otherwise report failure.
+        if (Res1 != -Res2) {
+          dbgs() << "MERGEFUNC-SANITY: Non-symmetric; triple: " << TripleNumber
+                 << "\n";
+          LLVM_DEBUG(F1->dump());
+          LLVM_DEBUG(F2->dump());
+          Valid = false;
+        }
+
+        if (Res1 == 0)
+          continue;
+
+        unsigned k = j;
+        for (std::vector<WeakTrackingVH>::iterator K = J; K != E && k < Max;
+             ++k, ++K, ++TripleNumber) {
+          if (K == J)
+            continue;
+
+          Function *F3 = cast<Function>(*K);
+          int Res3 = FunctionComparatorIgnoringConst(F1, F3, &GlobalNumbers)
+                         .compareIgnoringConsts();
+          int Res4 = FunctionComparatorIgnoringConst(F2, F3, &GlobalNumbers)
+                         .compareIgnoringConsts();
+
+          bool Transitive = true;
+
+          if (Res1 != 0 && Res1 == Res4) {
+            // F1 > F2, F2 > F3 => F1 > F3
+            Transitive = Res3 == Res1;
+          } else if (Res3 != 0 && Res3 == -Res4) {
+            // F1 > F3, F3 > F2 => F1 > F2
+            Transitive = Res3 == Res1;
+          } else if (Res4 != 0 && -Res3 == Res4) {
+            // F2 > F3, F3 > F1 => F2 > F1
+            Transitive = Res4 == -Res1;
+          }
+
+          if (!Transitive) {
+            dbgs() << "MERGEFUNC-SANITY: Non-transitive; triple: "
+                   << TripleNumber << "\n";
+            dbgs() << "Res1, Res3, Res4: " << Res1 << ", " << Res3 << ", "
+                   << Res4 << "\n";
+            LLVM_DEBUG(F1->dump());
+            LLVM_DEBUG(F2->dump());
+            LLVM_DEBUG(F3->dump());
+            Valid = false;
+          }
+        }
+      }
+    }
+
+    dbgs() << "MERGEFUNC-SANITY: " << (Valid ? "Passed." : "Failed.") << "\n";
+    return Valid;
+  }
+  return true;
+}
+
+/// Returns true if functions containing calls to \p F may be merged together.
+static bool mayMergeCallsToFunction(Function &F) {
+  StringRef Name = F.getName();
+
+  // Calls to dtrace probes must generate unique patchpoints.
+  if (Name.startswith("__dtrace"))
+    return false;
+
+  return true;
+}
+
+/// Returns the benefit, which is approximately the size of the function.
+/// Return 0, if the function should not be merged.
+static unsigned getBenefit(Function *F) {
+  unsigned Benefit = 0;
+
+  // We don't want to merge very small functions, because the overhead of
+  // adding creating thunks and/or adding parameters to the call sites
+  // outweighs the benefit.
+  for (BasicBlock &BB : *F) {
+    for (Instruction &I : BB) {
+      if (CallBase *CB = dyn_cast<CallBase>(&I)) {
+        Function *Callee = CB->getCalledFunction();
+        if (Callee && !mayMergeCallsToFunction(*Callee))
+          return 0;
+        if (!Callee || !Callee->isIntrinsic()) {
+          Benefit += 5;
+          continue;
+        }
+      }
+      Benefit += 1;
+    }
+  }
+  return Benefit;
+}
+
+/// Returns true if function \p F is eligible for merging.
+bool isEligibleFunction(Function *F) {
+  if (F->isDeclaration())
+    return false;
+
+  if (F->hasFnAttribute(llvm::Attribute::NoMerge))
+    return false;
+
+  if (F->hasAvailableExternallyLinkage()) {
+    return false;
+  }
+
+  if (F->getFunctionType()->isVarArg()) {
+    return false;
+  }
+
+  // Check against blocklist.
+  if (!MergeBlockRegexFilters.empty()) {
+    StringRef FuncName = F->getName();
+    for (const auto &tRegex : MergeBlockRegexFilters)
+      if (Regex(tRegex).match(FuncName)) {
+        return false;
+      }
+  }
+  // Check against allowlist
+  if (!MergeAllowRegexFilters.empty()) {
+    StringRef FuncName = F->getName();
+    bool found = false;
+    for (const auto &tRegex : MergeAllowRegexFilters)
+      if (Regex(tRegex).match(FuncName)) {
+        found = true;
+        break;
+      }
+    if (!found)
+      return false;
+  }
+
+  if (F->getCallingConv() == CallingConv::SwiftTail)
+    return false;
+
+  // if function contains callsites with musttail, if we merge
+  // it, the merged function will have the musttail callsite, but
+  // the number of parameters can change, thus the parameter count
+  // of the callsite will mismatch with the function itself.
+  if (IgnoreMusttailFunction) {
+    for (const BasicBlock &BB : *F) {
+      for (const Instruction &I : BB) {
+        const auto *CB = dyn_cast<CallBase>(&I);
+        if (CB && CB->isMustTailCall())
+          return false;
+      }
+    }
+  }
+
+  unsigned Benefit = getBenefit(F);
+  if (Benefit < IgnoringConstMergeThreshold) {
+    return false;
+  }
+
+  return true;
+}
+
+bool MergeFuncIgnoringConstImpl::runImpl(Module &M) {
+  if (IgnoringConstMergeThreshold == 0)
+    return false;
+
+  CurrentModule = &M;
+
+  // TODO: fix pointer authentication
+
+  bool Changed = false;
+
+  // All functions in the module, ordered by hash. Functions with a unique
+  // hash value are easily eliminated.
+  std::vector<std::pair<llvm::IRHash, Function *>> HashedFuncs;
+
+  for (Function &Func : M) {
+    if (isEligibleFunction(&Func)) {
+      HashedFuncs.push_back({StructuralHash(Func), &Func});
+    }
+  }
+
+  std::stable_sort(HashedFuncs.begin(), HashedFuncs.end(),
+                   [](const std::pair<llvm::IRHash, Function *> &a,
+                      const std::pair<llvm::IRHash, Function *> &b) {
+                     return a.first < b.first;
+                   });
+
+  std::vector<FunctionEntry> FuncEntryStorage;
+  FuncEntryStorage.reserve(HashedFuncs.size());
+
+  auto S = HashedFuncs.begin();
+  for (auto I = HashedFuncs.begin(), IE = HashedFuncs.end(); I != IE; ++I) {
+
+    Function *F = I->second;
+    FuncEntryStorage.push_back(FunctionEntry(F, FnTree.end()));
+    FunctionEntry &FE = FuncEntryStorage.back();
+    FuncEntries[F] = &FE;
+
+    // If the hash value matches the previous value or the next one, we must
+    // consider merging it. Otherwise it is dropped and never considered again.
+    if ((I != S && std::prev(I)->first == I->first) ||
+        (std::next(I) != IE && std::next(I)->first == I->first)) {
+      Deferred.push_back(WeakTrackingVH(F));
+    }
+  }
+
+  do {
+    std::vector<WeakTrackingVH> Worklist;
+    Deferred.swap(Worklist);
+
+    LLVM_DEBUG(dbgs() << "======\nbuild tree: worklist-size=" << Worklist.size()
+                      << '\n');
+    LLVM_DEBUG(doSanityCheck(Worklist));
+
+    SmallVector<FunctionEntry *, 8> FuncsToMerge;
+
+    // Insert all candidates into the Worklist.
+    for (WeakTrackingVH &I : Worklist) {
+      if (!I)
+        continue;
+      Function *F = cast<Function>(I);
+      FunctionEntry *FE = getEntry(F);
+      assert(!isInEquivalenceClass(FE));
+
+      std::pair<FnTreeType::iterator, bool> Result = FnTree.insert(FE);
+
+      FE->TreeIter = Result.first;
+      const EquivalenceClass &Eq = *Result.first;
+
+      if (Result.second) {
+        assert(Eq.First == FE);
+        LLVM_DEBUG(dbgs() << "  new in tree: " << F->getName() << '\n');
+      } else {
+        assert(Eq.First != FE);
+        LLVM_DEBUG(dbgs() << "  add to existing: " << F->getName() << '\n');
+        // Add the function to the existing equivalence class.
+        FE->Next = Eq.First->Next;
+        Eq.First->Next = FE;
+        // Schedule for merging if the function's equivalence class reaches the
+        // size of 2.
+        if (!FE->Next)
+          FuncsToMerge.push_back(Eq.First);
+      }
+    }
+    LLVM_DEBUG(dbgs() << "merge functions: tree-size=" << FnTree.size()
+                      << '\n');
+
+    // Figure out the leaf functions. We want to do the merging in bottom-up
+    // call order. This ensures that we don't parameterize on callee function
+    // names if we don't have to (because the callee may be merged).
+    // Note that "leaf functions" refer to the sub-call-graph of functions which
+    // are in the FnTree.
+    for (FunctionEntry *ToMerge : FuncsToMerge) {
+      assert(isInEquivalenceClass(ToMerge));
+      updateUnhandledCalleeCount(ToMerge, 1);
+    }
+
+    // Check if there are any leaf functions at all.
+    bool LeafFound = false;
+    for (FunctionEntry *ToMerge : FuncsToMerge) {
+      if (ToMerge->NumUnhandledCallees == 0)
+        LeafFound = true;
+    }
+    for (FunctionEntry *ToMerge : FuncsToMerge) {
+      if (isInEquivalenceClass(ToMerge)) {
+        // Only merge leaf functions (or all functions if all functions are in
+        // a call cycle).
+        if (ToMerge->NumUnhandledCallees == 0 || !LeafFound) {
+          updateUnhandledCalleeCount(ToMerge, -1);
+          Changed |= tryMergeEquivalenceClass(ToMerge);
+        } else {
+          // Non-leaf functions (i.e. functions in a call cycle) may become
+          // leaf functions in the next iteration.
+          removeEquivalenceClassFromTree(ToMerge);
+        }
+      }
+    }
+  } while (!Deferred.empty());
+
+  FnTree.clear();
+  GlobalNumbers.clear();
+  FuncEntries.clear();
+  PtrAuthGlobals.clear();
+
+  return Changed;
+}
+
+void MergeFuncIgnoringConstImpl::updateUnhandledCalleeCount(FunctionEntry *FE,
+                                                            int Delta) {
+  // Iterate over all functions of FE's equivalence class.
+  do {
+    for (Use &U : FE->F->uses()) {
+      if (auto *I = dyn_cast<Instruction>(U.getUser())) {
+        FunctionEntry *CallerFE = getEntry(I->getFunction());
+        if (CallerFE && CallerFE->TreeIter != FnTree.end()) {
+          // Accumulate the count in the first entry of the equivalence class.
+          FunctionEntry *Head = CallerFE->TreeIter->First;
+          Head->NumUnhandledCallees += Delta;
+        }
+      }
+    }
+    FE = FE->Next;
+  } while (FE);
+}
+
+bool MergeFuncIgnoringConstImpl::tryMergeEquivalenceClass(
+    FunctionEntry *FirstInClass) {
+  // Build the FInfos vector from all functions in the equivalence class.
+  FunctionInfos FInfos;
+  FunctionEntry *FE = FirstInClass;
+  do {
+    FInfos.push_back(FunctionInfo(FE->F));
+    FE->IsMerged = true;
+    FE = FE->Next;
+  } while (FE);
+  assert(FInfos.size() >= 2);
+
+  // Merged or not: in any case we remove the equivalence class from the FnTree.
+  removeEquivalenceClassFromTree(FirstInClass);
+
+  // Contains functions which 
diff er too much from the first function (i.e.
+  // would need too many parameters).
+  FunctionInfos Removed;
+
+  bool Changed = false;
+  int Try = 0;
+
+  unsigned Benefit = getBenefit(FirstInClass->F);
+
+  // The bigger the function, the more parameters are allowed.
+  unsigned maxParams = std::max(4u, Benefit / 100);
+
+  // We need multiple tries if there are some functions in FInfos which 
diff er
+  // too much from the first function in FInfos. But we limit the number of
+  // tries to a small number, because this is quadratic.
+  while (FInfos.size() >= 2 && Try++ < 4) {
+    ParamInfos Params;
+    bool Merged = deriveParams(Params, FInfos, maxParams);
+    if (Merged) {
+      mergeWithParams(FInfos, Params);
+      Changed = true;
+    } else {
+      // We ran out of parameters. Remove the function from the set which
+      // 
diff ers most from the first function.
+      Removed.push_back(removeFuncWithMostParams(FInfos));
+    }
+    if (Merged || FInfos.size() < 2) {
+      // Try again with the functions which were removed from the original set.
+      FInfos.swap(Removed);
+      Removed.clear();
+    }
+  }
+  return Changed;
+}
+
+/// Remove the function from \p FInfos which needs the most parameters. Add the
+/// removed function to
+MergeFuncIgnoringConstImpl::FunctionInfo
+MergeFuncIgnoringConstImpl::removeFuncWithMostParams(FunctionInfos &FInfos) {
+  FunctionInfos::iterator MaxIter = FInfos.end();
+  for (auto Iter = FInfos.begin(), End = FInfos.end(); Iter != End; ++Iter) {
+    if (MaxIter == FInfos.end() ||
+        Iter->NumParamsNeeded > MaxIter->NumParamsNeeded) {
+      MaxIter = Iter;
+    }
+  }
+  FunctionInfo Removed = *MaxIter;
+  FInfos.erase(MaxIter);
+  return Removed;
+}
+
+/// Finds the set of parameters which are required to merge the functions in
+/// \p FInfos.
+/// Returns true on success, i.e. the functions in \p FInfos can be merged with
+/// the parameters returned in \p Params.
+bool MergeFuncIgnoringConstImpl::deriveParams(ParamInfos &Params,
+                                              FunctionInfos &FInfos,
+                                              unsigned maxParams) {
+  for (FunctionInfo &FI : FInfos)
+    FI.init();
+
+  FunctionInfo &FirstFI = FInfos.front();
+
+  // Iterate over all instructions synchronously in all functions.
+  do {
+    if (isEligibleInstrunctionForConstantSharing(FirstFI.CurrentInst)) {
+
+      // Here we handle a rare corner case which needs to be explained:
+      // Usually the number of operands match, because otherwise the functions
+      // in FInfos would not be in the same equivalence class. There is only one
+      // exception to that: If the current instruction is a call to a function,
+      // which was merged in the previous iteration (in
+      // tryMergeEquivalenceClass) then the call could be replaced and has more
+      // arguments than the original call.
+      if (numOperandsDiffer(FInfos)) {
+        assert(isa<CallInst>(FirstFI.CurrentInst) &&
+               "only calls are expected to 
diff er in number of operands");
+        return false;
+      }
+
+      for (unsigned OpIdx = 0, NumOps = FirstFI.CurrentInst->getNumOperands();
+           OpIdx != NumOps; ++OpIdx) {
+
+        if (constsDiffer(FInfos, OpIdx)) {
+          // This instruction has operands which 
diff er in at least some
+          // functions. So we need to parameterize it.
+          if (!tryMapToParameter(FInfos, OpIdx, Params, maxParams)) {
+            // We ran out of parameters.
+            return false;
+          }
+        }
+      }
+    }
+    // Go to the next instruction in all functions.
+    for (FunctionInfo &FI : FInfos)
+      FI.nextInst();
+  } while (FirstFI.CurrentInst);
+
+  return true;
+}
+
+/// Returns true if the number of operands of the current instruction 
diff ers.
+bool MergeFuncIgnoringConstImpl::numOperandsDiffer(FunctionInfos &FInfos) {
+  unsigned numOps = FInfos[0].CurrentInst->getNumOperands();
+  for (const FunctionInfo &FI : ArrayRef<FunctionInfo>(FInfos).drop_front(1)) {
+    if (FI.CurrentInst->getNumOperands() != numOps)
+      return true;
+  }
+  return false;
+}
+
+/// Returns true if the \p OpIdx's constant operand in the current instruction
+/// does 
diff er in any of the functions in \p FInfos.
+bool MergeFuncIgnoringConstImpl::constsDiffer(const FunctionInfos &FInfos,
+                                              unsigned OpIdx) {
+  Constant *CommonConst = nullptr;
+
+  for (const FunctionInfo &FI : FInfos) {
+    Value *Op = FI.CurrentInst->getOperand(OpIdx);
+    if (auto *C = dyn_cast<Constant>(Op)) {
+      if (!CommonConst) {
+        CommonConst = C;
+      } else if (EnableAggressiveMergeFunc &&
+                 isa<ConstantPointerNull>(CommonConst) &&
+                 isa<ConstantPointerNull>(C)) {
+        // if both are null pointer, and if they are 
diff erent constants
+        // due to type, still treat them as the same.
+      } else if (C != CommonConst) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+/// Create a new parameter for 
diff ering operands or try to reuse an existing
+/// parameter.
+/// Returns true if a parameter could be created or found without exceeding the
+/// maximum number of parameters.
+bool MergeFuncIgnoringConstImpl::tryMapToParameter(FunctionInfos &FInfos,
+                                                   unsigned OpIdx,
+                                                   ParamInfos &Params,
+                                                   unsigned maxParams) {
+  ParamInfo *Matching = nullptr;
+  // Try to find an existing parameter which exactly matches the 
diff ering
+  // operands of the current instruction.
+  for (ParamInfo &PI : Params) {
+    if (PI.matches(FInfos, OpIdx, isPtrAuthEnabled())) {
+      Matching = &PI;
+      break;
+    }
+  }
+  if (!Matching) {
+    // We need a new parameter.
+    // Check if we are within the limit.
+    if (Params.size() >= maxParams)
+      return false;
+
+    Params.resize(Params.size() + 1);
+    Matching = &Params.back();
+    // Store the constant values into the new parameter.
+    Constant *FirstC = cast<Constant>(FInfos[0].CurrentInst->getOperand(OpIdx));
+    for (FunctionInfo &FI : FInfos) {
+      Constant *C = cast<Constant>(FI.CurrentInst->getOperand(OpIdx));
+      Matching->Values.push_back(C);
+      if (C != FirstC)
+        FI.NumParamsNeeded += 1;
+    }
+    if (isPtrAuthEnabled())
+      Matching->NeedsPointerSigning = FInfos[0].needsPointerSigning(OpIdx);
+  }
+  /// Remember where the parameter is needed when we build our merged function.
+  Matching->Uses.push_back({FInfos[0].CurrentInst, OpIdx});
+  return true;
+}
+
+/// Copy \p origCall with a \p newCalle and add a ptrauth bundle with \p
+/// Discriminator.
+void MergeFuncIgnoringConstImpl::replaceCallWithAddedPtrAuth(
+    CallInst *origCall, Value *newCallee, ConstantInt *Discriminator) {
+  SmallVector<llvm::OperandBundleDef, 4> bundles;
+  origCall->getOperandBundlesAsDefs(bundles);
+  ConstantInt *key = getPtrAuthKey();
+  llvm::Value *bundleArgs[] = {key, Discriminator};
+  bundles.emplace_back("ptrauth", bundleArgs);
+
+  SmallVector<llvm::Value *, 4> copiedArgs;
+  for (Value *op : origCall->args()) {
+    copiedArgs.push_back(op);
+  }
+
+  auto *newCall =
+      CallInst::Create(origCall->getFunctionType(), newCallee, copiedArgs,
+                       bundles, origCall->getName(), origCall);
+  newCall->setAttributes(origCall->getAttributes());
+  newCall->setTailCallKind(origCall->getTailCallKind());
+  newCall->setCallingConv(origCall->getCallingConv());
+  origCall->replaceAllUsesWith(newCall);
+  origCall->eraseFromParent();
+}
+
+void MergeFuncIgnoringConstImpl::dumpMergeInfo(const FunctionInfos &FInfos,
+                                               unsigned paramSize) {
+  std::set<llvm::IRHash> oHashes;
+  std::vector<std::string> funcLocs;
+  Function *OrigFunc = nullptr;
+  for (const auto &FInfo : FInfos) {
+    OrigFunc = FInfo.F;
+
+    llvm::IRHash origHash = StructuralHash(*OrigFunc);
+    oHashes.insert(origHash);
+
+    // Print debug location.
+    std::string Result;
+    raw_string_ostream DbgLocOS(Result);
+    if (DISubprogram *DIS = OrigFunc->getSubprogram()) {
+      DebugLoc FuncDbgLoc =
+          DILocation::get(DIS->getContext(), DIS->getScopeLine(), 0, DIS);
+      FuncDbgLoc.print(DbgLocOS);
+      DbgLocOS.flush();
+    }
+    std::string singleLine =
+        "# functionLoc " +
+        std::to_string(GlobalValue::getGUID(OrigFunc->getName())) + " " +
+        Result + " " + std::string(OrigFunc->getName()) + "\n";
+    funcLocs.push_back(singleLine);
+  }
+}
+
+/// Merge all functions in \p FInfos by creating thunks which call the single
+/// merged function with additional parameters.
+void MergeFuncIgnoringConstImpl::mergeWithParams(const FunctionInfos &FInfos,
+                                                 ParamInfos &Params) {
+  // We reuse the body of the first function for the new merged function.
+  Function *FirstF = FInfos.front().F;
+
+  // Build the type for the merged function. This will be the type of the
+  // original function (FirstF) but with the additional parameter which are
+  // needed to parameterize the merged function.
+  FunctionType *OrigTy = FirstF->getFunctionType();
+  SmallVector<Type *, 8> ParamTypes(OrigTy->param_begin(), OrigTy->param_end());
+
+  for (const ParamInfo &PI : Params) {
+    ParamTypes.push_back(PI.Values[0]->getType());
+  }
+
+  FunctionType *funcType =
+      FunctionType::get(OrigTy->getReturnType(), ParamTypes, false);
+
+  // Create the new function.
+  Function *NewFunction = Function::Create(funcType, FirstF->getLinkage(),
+                                           FirstF->getName() + MergeFuncSuffix);
+  if (auto *SP = FirstF->getSubprogram())
+    NewFunction->setSubprogram(SP);
+  NewFunction->copyAttributesFrom(FirstF);
+  // NOTE: this function is not externally available, do ensure that we reset
+  // the DLL storage
+  NewFunction->setDLLStorageClass(GlobalValue::DefaultStorageClass);
+  if (UseLinkOnceODRLinkageMerging)
+    NewFunction->setLinkage(GlobalValue::LinkOnceODRLinkage);
+  else
+    NewFunction->setLinkage(GlobalValue::InternalLinkage);
+  if (NoInlineForMergedFunction)
+    NewFunction->addFnAttr(Attribute::NoInline);
+
+  // Insert the new function after the last function in the equivalence class.
+  FirstF->getParent()->getFunctionList().insert(
+      std::next(FInfos[1].F->getIterator()), NewFunction);
+
+  LLVM_DEBUG(dbgs() << "  Merge into " << NewFunction->getName() << '\n');
+
+  // Move the body of FirstF into the NewFunction.
+  NewFunction->splice(NewFunction->begin(), FirstF);
+
+  auto NewArgIter = NewFunction->arg_begin();
+  for (Argument &OrigArg : FirstF->args()) {
+    Argument &NewArg = *NewArgIter++;
+    OrigArg.replaceAllUsesWith(&NewArg);
+  }
+  unsigned numOrigArgs = FirstF->arg_size();
+
+  SmallPtrSet<Function *, 8> SelfReferencingFunctions;
+
+  // Replace all 
diff ering operands with a parameter.
+  for (unsigned paramIdx = 0; paramIdx < Params.size(); ++paramIdx) {
+    const ParamInfo &PI = Params[paramIdx];
+    Argument *NewArg = NewFunction->getArg(numOrigArgs + paramIdx);
+
+    if (!PI.NeedsPointerSigning) {
+      for (const OpLocation &OL : PI.Uses) {
+        OL.I->setOperand(OL.OpIndex, NewArg);
+      }
+    }
+    // Collect all functions which are referenced by any parameter.
+    for (Value *V : PI.Values) {
+      if (auto *F = dyn_cast<Function>(V))
+        SelfReferencingFunctions.insert(F);
+    }
+  }
+
+  // Replace all 
diff ering operands, which need pointer signing, with a
+  // parameter.
+  // We need to do that after all other parameters, because here we replace
+  // call instructions, which must be live in case it has another constant to
+  // be replaced.
+  for (unsigned paramIdx = 0; paramIdx < Params.size(); ++paramIdx) {
+    ParamInfo &PI = Params[paramIdx];
+    if (PI.NeedsPointerSigning) {
+      PI.computeDiscriminator(NewFunction->getContext());
+      for (const OpLocation &OL : PI.Uses) {
+        auto *origCall = cast<CallInst>(OL.I);
+        Argument *newCallee = NewFunction->getArg(numOrigArgs + paramIdx);
+        replaceCallWithAddedPtrAuth(origCall, newCallee, PI.Discriminator);
+      }
+    }
+  }
+
+  for (unsigned FIdx = 0, NumFuncs = FInfos.size(); FIdx < NumFuncs; ++FIdx) {
+    Function *OrigFunc = FInfos[FIdx].F;
+    // Don't try to replace all callers of functions which are used as
+    // parameters because we must not delete such functions.
+    if (SelfReferencingFunctions.count(OrigFunc) == 0 &&
+        replaceDirectCallers(OrigFunc, NewFunction, Params, FIdx)) {
+      // We could replace all uses (and the function is not externally visible),
+      // so we can delete the original function.
+      auto Iter = FuncEntries.find(OrigFunc);
+      assert(Iter != FuncEntries.end());
+      assert(!isInEquivalenceClass(&*Iter->second));
+      Iter->second->F = nullptr;
+      FuncEntries.erase(Iter);
+      LLVM_DEBUG(dbgs() << "    Erase " << OrigFunc->getName() << '\n');
+      OrigFunc->eraseFromParent();
+    } else {
+      // Otherwise we need a thunk which calls the merged function.
+      writeThunk(NewFunction, OrigFunc, Params, FIdx);
+    }
+    ++NumFunctionsMergedIgnoringConst;
+  }
+}
+
+/// Remove all functions of \p FE's equivalence class from FnTree. Add them to
+/// Deferred so that we'll look at them in the next round.
+void MergeFuncIgnoringConstImpl::removeEquivalenceClassFromTree(
+    FunctionEntry *FE) {
+  if (!isInEquivalenceClass(FE))
+    return;
+
+  FnTreeType::iterator Iter = FE->TreeIter;
+  FunctionEntry *Unlink = Iter->First;
+  Unlink->NumUnhandledCallees = 0;
+  while (Unlink) {
+    LLVM_DEBUG(dbgs() << "    remove from tree: " << Unlink->F->getName()
+                      << '\n');
+    if (!Unlink->IsMerged)
+      Deferred.emplace_back(Unlink->F);
+    Unlink->TreeIter = FnTree.end();
+    assert(Unlink->NumUnhandledCallees == 0);
+    FunctionEntry *NextEntry = Unlink->Next;
+    Unlink->Next = nullptr;
+    Unlink = NextEntry;
+  }
+  FnTree.erase(Iter);
+}
+
+// Helper for writeThunk,
+// Selects proper bitcast operation,
+// but a bit simpler then CastInst::getCastOpcode.
+Value *createCast(IRBuilder<> &Builder, Value *V, Type *DestTy) {
+  Type *SrcTy = V->getType();
+  if (SrcTy->isStructTy()) {
+    assert(DestTy->isStructTy());
+    assert(SrcTy->getStructNumElements() == DestTy->getStructNumElements());
+    Value *Result = UndefValue::get(DestTy);
+    for (unsigned int I = 0, E = SrcTy->getStructNumElements(); I < E; ++I) {
+      Value *Element =
+          createCast(Builder, Builder.CreateExtractValue(V, ArrayRef(I)),
+                     DestTy->getStructElementType(I));
+
+      Result = Builder.CreateInsertValue(Result, Element, ArrayRef(I));
+    }
+    return Result;
+  }
+  assert(!DestTy->isStructTy());
+  if (CastArrayType) {
+    if (auto *SrcAT = dyn_cast<ArrayType>(SrcTy)) {
+      auto *DestAT = dyn_cast<ArrayType>(DestTy);
+      assert(DestAT);
+      assert(SrcAT->getNumElements() == DestAT->getNumElements());
+      Value *Result = UndefValue::get(DestTy);
+      for (unsigned int I = 0, E = SrcAT->getNumElements(); I < E; ++I) {
+        Value *Element =
+            createCast(Builder, Builder.CreateExtractValue(V, ArrayRef(I)),
+                       DestAT->getElementType());
+
+        Result = Builder.CreateInsertValue(Result, Element, ArrayRef(I));
+      }
+      return Result;
+    }
+    assert(!DestTy->isArrayTy());
+  }
+  if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
+    return Builder.CreateIntToPtr(V, DestTy);
+  else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
+    return Builder.CreatePtrToInt(V, DestTy);
+  else
+    return Builder.CreateBitCast(V, DestTy);
+}
+
+/// Replace \p Thunk with a simple tail call to \p ToFunc. Also add parameters
+/// to the call to \p ToFunc, which are defined by the FuncIdx's value in
+/// \p Params.
+void MergeFuncIgnoringConstImpl::writeThunk(Function *ToFunc, Function *Thunk,
+                                            const ParamInfos &Params,
+                                            unsigned FuncIdx) {
+  // Delete the existing content of Thunk.
+  Thunk->dropAllReferences();
+
+  BasicBlock *BB = BasicBlock::Create(Thunk->getContext(), "", Thunk);
+  IRBuilder<> Builder(BB);
+
+  SmallVector<Value *, 16> Args;
+  unsigned ParamIdx = 0;
+  FunctionType *ToFuncTy = ToFunc->getFunctionType();
+
+  // Add arguments which are passed through Thunk.
+  for (Argument &AI : Thunk->args()) {
+    Args.push_back(createCast(Builder, &AI, ToFuncTy->getParamType(ParamIdx)));
+    ++ParamIdx;
+  }
+  // Add new arguments defined by Params.
+  for (const ParamInfo &PI : Params) {
+    assert(ParamIdx < ToFuncTy->getNumParams());
+    Constant *param = getSignedValue(PI, FuncIdx);
+    Args.push_back(
+        createCast(Builder, param, ToFuncTy->getParamType(ParamIdx)));
+    ++ParamIdx;
+  }
+
+  CallInst *CI = Builder.CreateCall(ToFunc, Args);
+  bool isSwiftTailCall = ToFunc->getCallingConv() == CallingConv::SwiftTail &&
+                         Thunk->getCallingConv() == CallingConv::SwiftTail;
+  CI->setTailCallKind(isSwiftTailCall ? llvm::CallInst::TCK_MustTail
+                                      : llvm::CallInst::TCK_Tail);
+  CI->setCallingConv(ToFunc->getCallingConv());
+  CI->setAttributes(ToFunc->getAttributes());
+  if (Thunk->getReturnType()->isVoidTy()) {
+    Builder.CreateRetVoid();
+  } else {
+    Builder.CreateRet(createCast(Builder, CI, Thunk->getReturnType()));
+  }
+
+  LLVM_DEBUG(dbgs() << "    writeThunk: " << Thunk->getName() << '\n');
+  ++NumThunksWrittenIgnoringConst;
+}
+
+static llvm::AttributeList
+fixUpTypesInByValAndStructRetAttributes(llvm::FunctionType *fnType,
+                                        llvm::AttributeList attrList) {
+  auto &context = fnType->getContext();
+  if (!context.supportsTypedPointers())
+    return attrList;
+
+  for (unsigned i = 0; i < fnType->getNumParams(); ++i) {
+    auto paramTy = fnType->getParamType(i);
+    auto attrListIndex = llvm::AttributeList::FirstArgIndex + i;
+    if (attrList.hasParamAttr(i, llvm::Attribute::StructRet) &&
+        paramTy->getNonOpaquePointerElementType() !=
+            attrList.getParamStructRetType(i))
+      attrList = attrList.replaceAttributeTypeAtIndex(
+          context, attrListIndex, llvm::Attribute::StructRet,
+          paramTy->getNonOpaquePointerElementType());
+    if (attrList.hasParamAttr(i, llvm::Attribute::ByVal) &&
+        paramTy->getNonOpaquePointerElementType() !=
+            attrList.getParamByValType(i))
+      attrList = attrList.replaceAttributeTypeAtIndex(
+          context, attrListIndex, llvm::Attribute::ByVal,
+          paramTy->getNonOpaquePointerElementType());
+  }
+  return attrList;
+}
+
+/// Replace direct callers of Old with New. Also add parameters to the call to
+/// \p New, which are defined by the FuncIdx's value in \p Params.
+bool MergeFuncIgnoringConstImpl::replaceDirectCallers(Function *Old,
+                                                      Function *New,
+                                                      const ParamInfos &Params,
+                                                      unsigned FuncIdx) {
+  bool AllReplaced = true;
+
+  SmallVector<CallInst *, 8> Callers;
+
+  for (Use &U : Old->uses()) {
+    auto *I = dyn_cast<Instruction>(U.getUser());
+    if (!I) {
+      AllReplaced = false;
+      continue;
+    }
+    FunctionEntry *FE = getEntry(I->getFunction());
+    if (FE)
+      removeEquivalenceClassFromTree(FE);
+
+    auto *CI = dyn_cast<CallInst>(I);
+    if (!CI || CI->getCalledOperand() != Old) {
+      AllReplaced = false;
+      continue;
+    }
+    Callers.push_back(CI);
+  }
+  if (!AllReplaced)
+    return false;
+
+  // When AlwaysCallThunk is true, return false so a thunk will be emitted, also
+  // do not replace callsites.
+  if (AlwaysCallThunk)
+    return false;
+
+  for (CallInst *CI : Callers) {
+    auto &Context = New->getContext();
+    auto NewPAL = New->getAttributes();
+
+    SmallVector<Type *, 8> OldParamTypes;
+    SmallVector<Value *, 16> NewArgs;
+    SmallVector<AttributeSet, 8> NewArgAttrs;
+    IRBuilder<> Builder(CI);
+
+    FunctionType *NewFuncTy = New->getFunctionType();
+    (void)NewFuncTy;
+    unsigned ParamIdx = 0;
+
+    // Add the existing parameters.
+    for (Value *OldArg : CI->args()) {
+      NewArgAttrs.push_back(NewPAL.getParamAttrs(ParamIdx));
+      NewArgs.push_back(OldArg);
+      OldParamTypes.push_back(OldArg->getType());
+      ++ParamIdx;
+    }
+    // Add the new parameters.
+    for (const ParamInfo &PI : Params) {
+      assert(ParamIdx < NewFuncTy->getNumParams());
+      Constant *ArgValue = getSignedValue(PI, FuncIdx);
+      assert(ArgValue != Old && "should not try to replace all callers of self "
+                                "referencing functions");
+      NewArgs.push_back(ArgValue);
+      OldParamTypes.push_back(ArgValue->getType());
+      ++ParamIdx;
+    }
+
+    auto *FType = FunctionType::get(Old->getFunctionType()->getReturnType(),
+                                    OldParamTypes, false);
+    auto *FPtrType = PointerType::get(
+        FType, cast<PointerType>(New->getType())->getAddressSpace());
+
+    Value *Callee = ConstantExpr::getBitCast(New, FPtrType);
+    CallInst *NewCI;
+    if (objcarc::hasAttachedCallOpBundle(CI)) {
+      Value *BundleArgs[] = {*objcarc::getAttachedARCFunction(CI)};
+      OperandBundleDef OB("clang.arc.attachedcall", BundleArgs);
+      NewCI = Builder.CreateCall(FType, Callee, NewArgs, {OB});
+    } else {
+      NewCI = Builder.CreateCall(FType, Callee, NewArgs);
+    }
+    NewCI->setCallingConv(CI->getCallingConv());
+    // Don't transfer attributes from the function to the callee. Function
+    // attributes typically aren't relevant to the calling convention or ABI.
+    auto newAttrList = AttributeList::get(Context, /*FnAttrs=*/AttributeSet(),
+                                          NewPAL.getRetAttrs(), NewArgAttrs);
+    newAttrList = fixUpTypesInByValAndStructRetAttributes(FType, newAttrList);
+    NewCI->setAttributes(newAttrList);
+    if (IgnoreMusttailFunction && CI->isMustTailCall()) {
+      // replace a callsite with musttail.
+      llvm::errs() << "callsite has musttail in newF " << New->getName()
+                   << "\n";
+    }
+    NewCI->copyMetadata(*CI);
+    CI->replaceAllUsesWith(NewCI);
+    CI->eraseFromParent();
+  }
+  assert(Old->use_empty() && "should have replaced all uses of old function");
+  return Old->hasLocalLinkage();
+}
+
+PreservedAnalyses MergeFuncIgnoringConstPass::run(Module &M,
+                                                  ModuleAnalysisManager &MAM) {
+  if (MergeFuncIgnoringConstImpl(PtrAuthEnabled, PtrAuthKey, MergeFuncSuffix)
+          .runImpl(M))
+    return PreservedAnalyses::none();
+  return PreservedAnalyses::all();
+}

diff  --git a/llvm/lib/Transforms/Utils/CMakeLists.txt b/llvm/lib/Transforms/Utils/CMakeLists.txt
index 51e8821773c3af3..9c320beb09711af 100644
--- a/llvm/lib/Transforms/Utils/CMakeLists.txt
+++ b/llvm/lib/Transforms/Utils/CMakeLists.txt
@@ -27,6 +27,7 @@ add_llvm_component_library(LLVMTransformUtils
   FixIrreducible.cpp
   FlattenCFG.cpp
   FunctionComparator.cpp
+  FunctionComparatorIgnoringConst.cpp
   FunctionImportUtils.cpp
   GlobalStatus.cpp
   GuardUtils.cpp

diff  --git a/llvm/lib/Transforms/Utils/FunctionComparatorIgnoringConst.cpp b/llvm/lib/Transforms/Utils/FunctionComparatorIgnoringConst.cpp
new file mode 100644
index 000000000000000..9cfd95345598083
--- /dev/null
+++ b/llvm/lib/Transforms/Utils/FunctionComparatorIgnoringConst.cpp
@@ -0,0 +1,107 @@
+//===--- FunctionComparatorIgnoringConst.cpp - Function Comparator --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Utils/FunctionComparatorIgnoringConst.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Transforms/Utils/MergeFunctionsIgnoringConst.h"
+
+using namespace llvm;
+
+int FunctionComparatorIgnoringConst::cmpOperandsIgnoringConsts(
+    const Instruction *L, const Instruction *R, unsigned opIdx) {
+  Value *OpL = L->getOperand(opIdx);
+  Value *OpR = R->getOperand(opIdx);
+
+  int Res = cmpValues(OpL, OpR);
+  if (Res == 0)
+    return Res;
+
+  if (!isa<Constant>(OpL) || !isa<Constant>(OpR))
+    return Res;
+
+  if (!isEligibleOperandForConstantSharing(L, opIdx) ||
+      !isEligibleOperandForConstantSharing(R, opIdx))
+    return Res;
+
+  if (cmpTypes(OpL->getType(), OpR->getType()))
+    return Res;
+
+  return 0;
+}
+
+// Test whether two basic blocks have equivalent behavior.
+int FunctionComparatorIgnoringConst::cmpBasicBlocksIgnoringConsts(
+    const BasicBlock *BBL, const BasicBlock *BBR,
+    const std::set<std::pair<int, int>> *InstOpndIndex) {
+  BasicBlock::const_iterator InstL = BBL->begin(), InstLE = BBL->end();
+  BasicBlock::const_iterator InstR = BBR->begin(), InstRE = BBR->end();
+
+  do {
+    bool needToCmpOperands = true;
+    if (int Res = cmpOperations(&*InstL, &*InstR, needToCmpOperands))
+      return Res;
+    if (needToCmpOperands) {
+      assert(InstL->getNumOperands() == InstR->getNumOperands());
+
+      for (unsigned i = 0, e = InstL->getNumOperands(); i != e; ++i) {
+        // When a set for (instruction, operand) index pairs is given, we only
+        // ignore constants located at such indices. Otherwise, we precisely
+        // compare the operands.
+        if (InstOpndIndex && !InstOpndIndex->count(std::make_pair(Index, i))) {
+          Value *OpL = InstL->getOperand(i);
+          Value *OpR = InstR->getOperand(i);
+          if (int Res = cmpValues(OpL, OpR))
+            return Res;
+        }
+        if (int Res = cmpOperandsIgnoringConsts(&*InstL, &*InstR, i))
+          return Res;
+        // cmpValues should ensure this is true.
+        assert(cmpTypes(InstL->getOperand(i)->getType(),
+                        InstR->getOperand(i)->getType()) == 0);
+      }
+    }
+    ++Index;
+    ++InstL, ++InstR;
+  } while (InstL != InstLE && InstR != InstRE);
+
+  if (InstL != InstLE && InstR == InstRE)
+    return 1;
+  if (InstL == InstLE && InstR != InstRE)
+    return -1;
+  return 0;
+}
+
+// Test whether the two functions have equivalent behavior.
+int FunctionComparatorIgnoringConst::compareIgnoringConsts(
+    const std::set<std::pair<int, int>> *InstOpndIndex) {
+  beginCompare();
+  Index = 0;
+
+  if (int Res = compareSignature())
+    return Res;
+
+  Function::const_iterator LIter = FnL->begin(), LEnd = FnL->end();
+  Function::const_iterator RIter = FnR->begin(), REnd = FnR->end();
+
+  do {
+    const BasicBlock *BBL = &*LIter;
+    const BasicBlock *BBR = &*RIter;
+
+    if (int Res = cmpValues(BBL, BBR))
+      return Res;
+
+    if (int Res = cmpBasicBlocksIgnoringConsts(BBL, BBR, InstOpndIndex))
+      return Res;
+
+    ++LIter, ++RIter;
+  } while (LIter != LEnd && RIter != REnd);
+
+  return 0;
+}

diff  --git a/llvm/test/Transforms/MergeFuncIgnoringConst/merge_func.ll b/llvm/test/Transforms/MergeFuncIgnoringConst/merge_func.ll
new file mode 100644
index 000000000000000..1d84340da417235
--- /dev/null
+++ b/llvm/test/Transforms/MergeFuncIgnoringConst/merge_func.ll
@@ -0,0 +1,532 @@
+; RUN: opt -S -mergefunc-ignoringconst-threshold=4 -passes=mergefunc-ignoring-const %s | FileCheck %s
+
+ at g1 = external global i32
+ at g2 = external global i32
+ at g3 = external global i32
+ at g4 = external global i32
+ at g5 = external global i32
+
+; Test the most trivial example.
+
+; CHECK-LABEL: define i32 @simple_func1(i32 %x, i32 %y)
+; CHECK: %1 = tail call i32 @simple_func1.Tm(i32 %x, i32 %y, ptr @g1)
+; CHECK: ret i32 %1
+define i32 @simple_func1(i32 %x, i32 %y) {
+  %sum = add i32 %x, %y
+  %sum2 = add i32 %sum, %y
+  %l = load i32, i32* @g1, align 4
+  %sum3 = add i32 %sum2, %y
+  ret i32 %sum3
+}
+
+; CHECK-LABEL: define i32 @simple_func2(i32 %x, i32 %y)
+; CHECK: %1 = tail call i32 @simple_func1.Tm(i32 %x, i32 %y, ptr @g2)
+; CHECK: ret i32 %1
+define i32 @simple_func2(i32 %x, i32 %y) {
+  %sum = add i32 %x, %y
+  %sum2 = add i32 %sum, %y
+  %l = load i32, i32* @g2, align 4
+  %sum3 = add i32 %sum2, %y
+  ret i32 %sum3
+}
+
+; CHECK-LABEL: define internal i32 @simple_func1.Tm(i32 %0, i32 %1, ptr %2)
+; CHECK: %l = load i32, ptr %2
+; CHECK: ret
+
+
+; Merge 3 functions with 3 types of 
diff ering instructions: load, store and call.
+
+; CHECK-LABEL: define i32 @func1_of_3(i32 %x)
+; CHECK: %1 = tail call i32 @func1_of_3.Tm(i32 %x, ptr @g1, ptr @g1, ptr @callee1)
+; CHECK: ret i32 %1
+define i32 @func1_of_3(i32 %x) {
+  %l1 = load i32, i32* @g1, align 4
+  %sum = add i32 %x, %l1
+  %l2 = load i32, i32* @g1, align 4
+  %sum2 = add i32 %sum, %l2
+  store i32 %sum2, i32 *@g1, align 4
+  call void @callee1(i32 %sum2)
+  %sum3 = add i32 %sum2, %l2
+  ret i32 %sum3
+}
+
+; CHECK-LABEL: define i32 @func2_of_3(i32 %x)
+; CHECK: %1 = tail call i32 @func1_of_3.Tm(i32 %x, ptr @g2, ptr @g2, ptr @callee2)
+; CHECK: ret i32 %1
+define i32 @func2_of_3(i32 %x) {
+  %l1 = load i32, i32* @g2, align 4
+  %sum = add i32 %x, %l1
+  %l2 = load i32, i32* @g2, align 4
+  %sum2 = add i32 %sum, %l2
+  store i32 %sum2, i32 *@g2, align 4
+  call void @callee2(i32 %sum2)
+  %sum3 = add i32 %sum2, %l2
+  ret i32 %sum3
+}
+
+; CHECK-LABEL: define i32 @func3_of_3(i32 %x)
+; CHECK: %1 = tail call i32 @func1_of_3.Tm(i32 %x, ptr @g3, ptr @g1, ptr @callee3)
+; CHECK: ret i32 %1
+define i32 @func3_of_3(i32 %x) {
+  %l1 = load i32, i32* @g3, align 4
+  %sum = add i32 %x, %l1
+  %l2 = load i32, i32* @g1, align 4
+  %sum2 = add i32 %sum, %l2
+  store i32 %sum2, i32 *@g3, align 4
+  call void @callee3(i32 %sum2)
+  %sum3 = add i32 %sum2, %l2
+  ret i32 %sum3
+}
+
+; CHECK-LABEL: define internal i32 @func1_of_3.Tm(i32 %0, ptr %1, ptr %2, ptr %3)
+; CHECK: %l1 = load i32, ptr %1
+; CHECK: %l2 = load i32, ptr %2
+; CHECK: store i32 %sum2, ptr %1
+; CHECK: call void %3(i32 %sum2)
+; CHECK: ret
+
+declare void @callee1(i32 %x)
+declare void @callee2(i32 %x)
+declare void @callee3(i32 %x)
+
+; Preserve attributes
+
+; CHECK-LABEL: define void @sret_func1(ptr sret(i32) %p, i32 %x, i32 %y)
+; CHECK: tail call void @sret_func1.Tm(ptr sret(i32) %p, i32 %x, i32 %y, ptr @g1)
+; CHECK: ret void
+define void @sret_func1(i32* sret(i32) %p, i32 %x, i32 %y) {
+  %sum = add i32 %x, %y
+  %l = load i32, i32* @g1, align 4
+  %sum2 = add i32 %sum, %l
+  store i32 %sum2, i32* %p
+  ret void
+}
+
+; CHECK-LABEL: define void @sret_func2(ptr sret(i32) %p, i32 %x, i32 %y)
+; CHECK: tail call void @sret_func1.Tm(ptr sret(i32) %p, i32 %x, i32 %y, ptr @g2)
+; CHECK: ret void
+define void @sret_func2(i32* sret(i32) %p, i32 %x, i32 %y) {
+  %sum = add i32 %x, %y
+  %l = load i32, i32* @g2, align 4
+  %sum2 = add i32 %sum, %l
+  store i32 %sum2, i32* %p
+  ret void
+}
+
+; CHECK-LABEL: define internal void @sret_func1.Tm(ptr sret(i32) %0, i32 %1, i32 %2, ptr %3)
+; CHECK: %l = load i32, ptr %3, align 4
+; CHECK: store i32 %sum2, ptr %0
+; CHECK: ret
+
+
+; Don't merge all functions, because we would generate too many parameters.
+; Instead merge those functions which match best.
+
+; CHECK-LABEL: define i32 @func1_merged_with3(i32 %x)
+; CHECK: %1 = tail call i32 @func1_merged_with3.Tm(i32 %x, ptr @g1)
+; CHECK: ret i32 %1
+define i32 @func1_merged_with3(i32 %x) {
+  %l1 = load i32, i32* @g1, align 4
+  %sum = add i32 %x, %l1
+  %l2 = load i32, i32* @g2, align 4
+  %sum2 = add i32 %sum, %l2
+  %l3 = load i32, i32* @g3, align 4
+  %sum3 = add i32 %sum2, %l2
+  %l4 = load i32, i32* @g4, align 4
+  %sum4 = add i32 %sum3, %l2
+  %l5 = load i32, i32* @g5, align 4
+  %sum5 = add i32 %sum4, %l2
+  ret i32 %sum5
+}
+
+; CHECK-LABEL: define i32 @func2_merged_with4(i32 %x)
+; CHECK: %1 = tail call i32 @func2_merged_with4.Tm(i32 %x, ptr @g2)
+; CHECK: ret i32 %1
+define i32 @func2_merged_with4(i32 %x) {
+  %l1 = load i32, i32* @g2, align 4
+  %sum = add i32 %x, %l1
+  %l2 = load i32, i32* @g3, align 4
+  %sum2 = add i32 %sum, %l2
+  %l3 = load i32, i32* @g4, align 4
+  %sum3 = add i32 %sum2, %l2
+  %l4 = load i32, i32* @g5, align 4
+  %sum4 = add i32 %sum3, %l2
+  %l5 = load i32, i32* @g1, align 4
+  %sum5 = add i32 %sum4, %l2
+  ret i32 %sum5
+}
+
+; CHECK-LABEL: define i32 @func3_merged_with1(i32 %x)
+; CHECK: %1 = tail call i32 @func1_merged_with3.Tm(i32 %x, ptr @g2)
+; CHECK: ret i32 %1
+define i32 @func3_merged_with1(i32 %x) {
+  %l1 = load i32, i32* @g2, align 4
+  %sum = add i32 %x, %l1
+  %l2 = load i32, i32* @g2, align 4
+  %sum2 = add i32 %sum, %l2
+  %l3 = load i32, i32* @g3, align 4
+  %sum3 = add i32 %sum2, %l2
+  %l4 = load i32, i32* @g4, align 4
+  %sum4 = add i32 %sum3, %l2
+  %l5 = load i32, i32* @g5, align 4
+  %sum5 = add i32 %sum4, %l2
+  ret i32 %sum5
+}
+
+; CHECK-LABEL: define internal i32 @func1_merged_with3.Tm(i32 %0, ptr %1)
+; CHECK: load i32, ptr %1, align 4
+; CHECK: load i32, ptr @g2, align 4
+; CHECK: load i32, ptr @g3, align 4
+; CHECK: load i32, ptr @g4, align 4
+; CHECK: load i32, ptr @g5, align 4
+; CHECK: ret i32
+
+; CHECK-LABEL: define i32 @func4_merged_with2(i32 %x) {
+; CHECK: %1 = tail call i32 @func2_merged_with4.Tm(i32 %x, ptr @g1)
+; CHECK: ret i32 %1
+define i32 @func4_merged_with2(i32 %x) {
+  %l1 = load i32, i32* @g1, align 4
+  %sum = add i32 %x, %l1
+  %l2 = load i32, i32* @g3, align 4
+  %sum2 = add i32 %sum, %l2
+  %l3 = load i32, i32* @g4, align 4
+  %sum3 = add i32 %sum2, %l2
+  %l4 = load i32, i32* @g5, align 4
+  %sum4 = add i32 %sum3, %l2
+  %l5 = load i32, i32* @g1, align 4
+  %sum5 = add i32 %sum4, %l2
+  ret i32 %sum5
+}
+
+
+; The same example as above, but we cannot merge func2 with func4, because
+; func4 calls func1 (which is merged with func2 in the first iteration).
+
+declare i32 @get_int(i32 %x)
+
+; CHECK-LABEL: define i32 @Function1_merged_with_3(i32 %x)
+; CHECK: %1 = tail call i32 @Function1_merged_with_3.Tm(i32 %x, ptr @g1)
+; CHECK: ret i32 %1
+define i32 @Function1_merged_with_3(i32 %x) {
+  %l1 = load i32, i32* @g1, align 4
+  %sum = add i32 %x, %l1
+  %l2 = load i32, i32* @g2, align 4
+  %sum2 = add i32 %sum, %l2
+  %l3 = load i32, i32* @g3, align 4
+  %sum3 = add i32 %sum2, %l2
+  %l4 = load i32, i32* @g4, align 4
+  %sum4 = add i32 %sum3, %l2
+  %l5 = load i32, i32* @g5, align 4
+  %sum5 = add i32 %sum4, %l2
+  %c = call fastcc i32 @get_int(i32 %sum5)
+  ret i32 %c
+}
+
+; CHECK-LABEL: define i32 @Function2_not_merged(i32 %x)
+; CHECK: load
+; CHECK: load
+; CHECK: load
+; CHECK: load
+; CHECK: %c = call fastcc i32 @get_int
+; CHECK: ret i32 %c
+define i32 @Function2_not_merged(i32 %x) {
+  %l1 = load i32, i32* @g2, align 4
+  %sum = add i32 %x, %l1
+  %l2 = load i32, i32* @g3, align 4
+  %sum2 = add i32 %sum, %l2
+  %l3 = load i32, i32* @g4, align 4
+  %sum3 = add i32 %sum2, %l2
+  %l4 = load i32, i32* @g5, align 4
+  %sum4 = add i32 %sum3, %l2
+  %l5 = load i32, i32* @g1, align 4
+  %sum5 = add i32 %sum4, %l2
+  %c = call fastcc i32 @get_int(i32 %sum5)
+  ret i32 %c
+}
+
+; CHECK-LABEL: define i32 @Function3_merged_with_1(i32 %x)
+; CHECK: %1 = tail call i32 @Function1_merged_with_3.Tm(i32 %x, ptr @g2)
+; CHECK: ret i32 %1
+define i32 @Function3_merged_with_1(i32 %x) {
+  %l1 = load i32, i32* @g2, align 4
+  %sum = add i32 %x, %l1
+  %l2 = load i32, i32* @g2, align 4
+  %sum2 = add i32 %sum, %l2
+  %l3 = load i32, i32* @g3, align 4
+  %sum3 = add i32 %sum2, %l2
+  %l4 = load i32, i32* @g4, align 4
+  %sum4 = add i32 %sum3, %l2
+  %l5 = load i32, i32* @g5, align 4
+  %sum5 = add i32 %sum4, %l2
+  %c = call fastcc i32 @get_int(i32 %sum5)
+  ret i32 %c
+}
+
+; CHECK-LABEL: define internal i32 @Function1_merged_with_3.Tm(i32 %0, ptr %1)
+; CHECK: load
+; CHECK: load
+; CHECK: load
+; CHECK: load
+; CHECK: %c = call fastcc i32 @get_int
+; CHECK: ret i32 %c
+
+; CHECK-LABEL: define i32 @Function4_not_merged(i32 %x) {
+; CHECK: load
+; CHECK: load
+; CHECK: load
+; CHECK: load
+; CHECK: %1 = call fastcc i32 @Function1_merged_with_3.Tm(i32 %sum5, ptr @g1)
+; CHECK: ret i32 %1
+define i32 @Function4_not_merged(i32 %x) {
+  %l1 = load i32, i32* @g1, align 4
+  %sum = add i32 %x, %l1
+  %l2 = load i32, i32* @g3, align 4
+  %sum2 = add i32 %sum, %l2
+  %l3 = load i32, i32* @g4, align 4
+  %sum3 = add i32 %sum2, %l2
+  %l4 = load i32, i32* @g5, align 4
+  %sum4 = add i32 %sum3, %l2
+  %l5 = load i32, i32* @g1, align 4
+  %sum5 = add i32 %sum4, %l2
+  %c = call fastcc i32 @Function1_merged_with_3(i32 %sum5)
+  ret i32 %c
+}
+
+
+; Test a call chain: caller -> callee1 -> callee2.
+; Functions should be merged in bottom-up order: callee2, callee1, caller.
+; Also check that the calling convention is preserved.
+
+; CHECK-LABEL: define fastcc i32 @callee1_a(i32 %x, i32 %y)
+; CHECK: %1 = tail call fastcc i32 @callee1_a.Tm(i32 %x, i32 %y, ptr @g1)
+; CHECK: ret i32 %1
+define fastcc i32 @callee1_a(i32 %x, i32 %y) {
+  %sum = add i32 %x, %y
+  %sum2 = add i32 %sum, %y
+  %c = call i32 @callee2_a(i32 %sum2, i32 %y)
+  %sum3 = add i32 %sum2, %c
+  ret i32 %sum3
+}
+
+; CHECK-LABEL: define fastcc i32 @callee1_b(i32 %x, i32 %y)
+; CHECK: %1 = tail call fastcc i32 @callee1_a.Tm(i32 %x, i32 %y, ptr @g2)
+; CHECK: ret i32 %1
+define fastcc i32 @callee1_b(i32 %x, i32 %y) {
+  %sum = add i32 %x, %y
+  %sum2 = add i32 %sum, %y
+  %c = call i32 @callee2_b(i32 %sum2, i32 %y)
+  %sum3 = add i32 %sum2, %c
+  ret i32 %sum3
+}
+
+; CHECK-LABEL: define internal fastcc i32 @callee1_a.Tm(i32 %0, i32 %1, ptr %2)
+; CHECK: call i32 @callee2_a.Tm(i32 %sum2, i32 %1, ptr %2)
+; CHECK: ret
+
+; CHECK-NOT: @callee2_a(
+define internal i32 @callee2_a(i32 %x, i32 %y) {
+  %sum = add i32 %x, %y
+  %sum2 = sub i32 %sum, %y
+  %l = load i32, i32* @g1, align 4
+  %sum3 = add i32 %sum2, %y
+  ret i32 %sum3
+}
+
+; CHECK-NOT: @callee2_b(
+define internal i32 @callee2_b(i32 %x, i32 %y) {
+  %sum = add i32 %x, %y
+  %sum2 = sub i32 %sum, %y
+  %l = load i32, i32* @g2, align 4
+  %sum3 = add i32 %sum2, %y
+  ret i32 %sum3
+}
+
+; CHECK-LABEL: define i32 @caller_a(i32 %x, i32 %y)
+; CHECK: %1 = tail call i32 @caller_a.Tm(i32 %x, i32 %y, ptr @g1)
+; CHECK: ret i32 %1
+define i32 @caller_a(i32 %x, i32 %y) {
+  %sum = add i32 %x, %y
+  %sum2 = add i32 %sum, %y
+  %c = call fastcc i32 @callee1_a(i32 %sum2, i32 %y)
+  %sum3 = add i32 %sum2, %c
+  ret i32 %sum3
+}
+
+; CHECK-LABEL: define i32 @caller_b(i32 %x, i32 %y)
+; CHECK: %1 = tail call i32 @caller_a.Tm(i32 %x, i32 %y, ptr @g2)
+; CHECK: ret i32 %1
+define i32 @caller_b(i32 %x, i32 %y) {
+  %sum = add i32 %x, %y
+  %sum2 = add i32 %sum, %y
+  %c = call fastcc i32 @callee1_b(i32 %sum2, i32 %y)
+  %sum3 = add i32 %sum2, %c
+  ret i32 %sum3
+}
+
+; CHECK-LABEL: define internal i32 @caller_a.Tm(i32 %0, i32 %1, ptr %2)
+; CHECK: call fastcc i32 @callee1_a.Tm(i32 %sum2, i32 %1, ptr %2)
+; CHECK: ret
+
+
+; Ensure that we do not merge functions that are identical with the
+; exception of the order of the incoming blocks to a phi.
+
+; CHECK-LABEL: define linkonce_odr hidden i1 @first(i2 %0)
+define linkonce_odr hidden i1 @first(i2) {
+entry:
+; CHECK: switch i2
+  switch i2 %0, label %default [
+    i2 0, label %L1
+    i2 1, label %L2
+    i2 -2, label %L3
+  ]
+default:
+  unreachable
+L1:
+  br label %done
+L2:
+  br label %done
+L3:
+  br label %done
+done:
+  %result = phi i1 [ true, %L1 ], [ false, %L2 ], [ false, %L3 ]
+; CHECK: ret i1
+  ret i1 %result
+}
+
+; CHECK-LABEL: define linkonce_odr hidden i1 @second(i2 %0)
+define linkonce_odr hidden i1 @second(i2) {
+entry:
+; CHECK: switch i2
+  switch i2 %0, label %default [
+    i2 0, label %L1
+    i2 1, label %L2
+    i2 -2, label %L3
+  ]
+default:
+  unreachable
+L1:
+  br label %done
+L2:
+  br label %done
+L3:
+  br label %done
+done:
+  %result = phi i1 [ true, %L3 ], [ false, %L2 ], [ false, %L1 ]
+; CHECK: ret i1
+  ret i1 %result
+}
+
+; Check self recursive functions
+
+; CHECK-LABEL: define internal void @recursive1(i32 %x, i32 %y)
+; CHECK: tail call void @recursive1.Tm(i32 %x, i32 %y, ptr @g1, ptr @recursive1)
+; CHECK: ret void
+define internal void @recursive1(i32 %x, i32 %y) {
+  br i1 undef, label %bb1, label %bb2
+
+bb1:
+  %l = load i32, i32* @g1, align 4
+  call void @recursive1(i32 %x, i32 %y)
+  br label %bb2
+
+bb2:
+  ret void
+}
+
+; CHECK-LABEL: define internal void @recursive2(i32 %x, i32 %y)
+; CHECK: tail call void @recursive1.Tm(i32 %x, i32 %y, ptr @g2, ptr @recursive2)
+; CHECK: ret void
+define internal void @recursive2(i32 %x, i32 %y) {
+  br i1 undef, label %bb1, label %bb2
+
+bb1:
+  %l = load i32, i32* @g2, align 4
+  call void @recursive2(i32 %x, i32 %y)
+  br label %bb2
+
+bb2:
+  ret void
+}
+; CHECK-LABEL: define internal void @recursive1.Tm(i32 %0, i32 %1, ptr %2, ptr %3)
+; CHECK: load i32, ptr %2
+; CHECK: call void %3(i32 %0, i32 %1)
+; CHECK: ret void
+
+
+; CHECK-LABEL: define internal void @another_recursive_func(i32 %x)
+; CHECK: tail call void @another_recursive_func.Tm(i32 %x, ptr @g1, ptr @another_recursive_func)
+; CHECK: ret void
+define internal void @another_recursive_func(i32 %x) {
+  br i1 undef, label %bb1, label %bb2
+
+bb1:
+  store i32 %x, i32 *@g1, align 4
+  call void @another_recursive_func(i32 %x)
+  br label %bb2
+
+bb2:
+  ret void
+}
+; CHECK-NOT: @not_really_recursive(
+
+; CHECK-LABEL: define internal void @another_recursive_func.Tm(i32 %0, ptr %1, ptr %2)
+; CHECK: store i32 %0, ptr %1
+; CHECK: call void %2(i32 %0)
+; CHECK: ret void
+define internal void @not_really_recursive(i32 %x) {
+  br i1 undef, label %bb1, label %bb2
+
+bb1:
+  store i32 %x, i32 *@g2, align 4
+  call void @callee1(i32 %x)
+  br label %bb2
+
+bb2:
+  ret void
+}
+; CHECK-NOT: @not_really_recursive(
+
+; CHECK-LABEL: define void @call_recursive_funcs(i32 %x)
+; CHECK: call void @recursive1(i32 %x, i32 %x)
+; CHECK: call void @recursive2(i32 %x, i32 %x)
+; CHECK: call void @another_recursive_func(i32 %x)
+; CHECK: call void @another_recursive_func.Tm(i32 %x, ptr @g2, ptr @callee1)
+; CHECK: ret void
+define void @call_recursive_funcs(i32 %x) {
+  call void @recursive1(i32 %x, i32 %x)
+  call void @recursive2(i32 %x, i32 %x)
+  call void @another_recursive_func(i32 %x)
+  call void @not_really_recursive(i32 %x)
+  ret void
+}
+
+; Ensure that we do not merge functions which make use of distinct dtrace
+; probes. Each call to a dtrace probe must resolve to a unique patchpoint.
+
+declare void @"__dtrace_probe$Apple$Probe1$v1$696e74"(i32) local_unnamed_addr
+
+; CHECK-LABEL: define i32 @use_dtrace_probe1
+; CHECK: call void @"__dtrace_probe$Apple$Probe1$v1$696e74"
+define i32 @use_dtrace_probe1(i32 %x, i32 %y) {
+  %sum = add i32 %x, %y
+  %sum2 = add i32 %sum, %y
+  %l = load i32, i32* @g1, align 4
+  %sum3 = add i32 %sum2, %y
+  tail call void @"__dtrace_probe$Apple$Probe1$v1$696e74"(i32 undef)
+  ret i32 %sum3
+}
+
+declare void @"__dtrace_probe$Apple$Probe2$v1$696e74"(i32) local_unnamed_addr
+
+; CHECK-LABEL: define i32 @use_dtrace_probe2
+; CHECK: call void @"__dtrace_probe$Apple$Probe2$v1$696e74"
+define i32 @use_dtrace_probe2(i32 %x, i32 %y) {
+  %sum = add i32 %x, %y
+  %sum2 = add i32 %sum, %y
+  %l = load i32, i32* @g2, align 4
+  %sum3 = add i32 %sum2, %y
+  tail call void @"__dtrace_probe$Apple$Probe2$v1$696e74"(i32 undef)
+  ret i32 %sum3
+}

diff  --git a/llvm/test/Transforms/MergeFuncIgnoringConst/merge_with_exception.ll b/llvm/test/Transforms/MergeFuncIgnoringConst/merge_with_exception.ll
new file mode 100644
index 000000000000000..c5c8b898c046e51
--- /dev/null
+++ b/llvm/test/Transforms/MergeFuncIgnoringConst/merge_with_exception.ll
@@ -0,0 +1,190 @@
+; RUN: opt -S -enable-aggressive-mergefunc-ignoringconst -passes=mergefunc-ignoring-const %s -o - | FileCheck %s
+
+%4 = type opaque
+%10 = type opaque
+%"struct.SearchSpec::State" = type { %4* }
+%"struct.PointerList" = type { i8*, i8*, i8*, i8*, i8* }
+%"struct.DynamicCallback" = type { %10* }
+
+; CHECK: define ptr @invoke_foo(ptr nocapture readonly %.block_descriptor, ptr %stateWrapper)
+; CHECK: %1 = {{.*}}call ptr @invoke_foo.Tm
+; CHECK: define ptr @invoke_bar(ptr nocapture readonly %.block_descriptor, ptr %stateWrapper) {
+; CHECK: %1 = {{.*}}call ptr @invoke_foo.Tm
+; CHECK: define {{.*}}.Tm(ptr nocapture readonly %0, ptr %1, ptr %2, ptr %3)
+
+; Function Attrs: minsize optsize ssp uwtable
+define i8* @invoke_foo(i8* nocapture readonly %.block_descriptor, i8* %stateWrapper) #1 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+entry:
+  %state = alloca %"struct.SearchSpec::State", align 8
+  %agg.tmp = alloca %"struct.PointerList", align 8
+  %0 = tail call i8* @llvm.objc.retain(i8* %stateWrapper) #2
+  %1 = bitcast %"struct.SearchSpec::State"* %state to i8*
+  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %1) #2
+  %2 = getelementptr inbounds i8, i8* %stateWrapper, i64 16
+  %3 = bitcast i8* %2 to %"struct.SearchSpec::State"* (i8*)**
+  %4 = load %"struct.SearchSpec::State"* (i8*)*, %"struct.SearchSpec::State"* (i8*)** %3, align 8
+  %call.i4 = invoke nonnull align 8 dereferenceable(8) %"struct.SearchSpec::State"* %4(i8* nonnull %stateWrapper) #31
+          to label %invoke.cont unwind label %lpad
+
+invoke.cont:                                      ; preds = %entry
+  %initialText.i.i = getelementptr inbounds %"struct.SearchSpec::State", %"struct.SearchSpec::State"* %state, i64 0, i32 0
+  %initialText2.i.i = getelementptr inbounds %"struct.SearchSpec::State", %"struct.SearchSpec::State"* %call.i4, i64 0, i32 0
+  %5 = load %4*, %4** %initialText2.i.i, align 8
+  %6 = bitcast %4* %5 to i8*
+  %7 = tail call i8* @llvm.objc.retain(i8* %6) #2
+  store %4* %5, %4** %initialText.i.i, align 8
+  %block.capture.addr = getelementptr inbounds i8, i8* %.block_descriptor, i64 32
+  %8 = bitcast i8* %block.capture.addr to i8**
+  %9 = load i8*, i8** %8, align 8
+  invoke void @callee2(%"struct.PointerList"* nonnull sret(%"struct.PointerList") align 8 %agg.tmp, i8* %9, i1 zeroext false) #31
+          to label %invoke.cont2 unwind label %lpad1
+
+invoke.cont2:                                     ; preds = %invoke.cont
+  %block.capture.addr3 = getelementptr inbounds i8, i8* %.block_descriptor, i64 40
+  %10 = bitcast i8* %block.capture.addr3 to %4**
+  %agg.tmp6.sroa.3.0..sroa_idx12 = getelementptr inbounds %"struct.PointerList", %"struct.PointerList"* %agg.tmp, i64 0, i32 3
+  %agg.tmp6.sroa.3.0.copyload = load i8*, i8** %agg.tmp6.sroa.3.0..sroa_idx12, align 8
+  %11 = load %4*, %4** %10, align 8
+  invoke void @callee1(%"struct.SearchSpec::State"* nonnull align 8 dereferenceable(8) %state, %4* %11) #31
+          to label %invoke.cont4 unwind label %lpad.i
+
+lpad.i:                                           ; preds = %invoke.cont2
+  %12 = landingpad { i8*, i32 }
+          cleanup
+  call void @llvm.objc.release(i8* %agg.tmp6.sroa.3.0.copyload) #2
+  %.phi.trans.insert = bitcast %"struct.SearchSpec::State"* %state to i8**
+  %.pre = load i8*, i8** %.phi.trans.insert, align 8
+  br label %lpad1.body
+
+invoke.cont4:                                     ; preds = %invoke.cont2
+  call void @llvm.objc.release(i8* %agg.tmp6.sroa.3.0.copyload) #2
+  %13 = load %4*, %4** %initialText.i.i, align 8
+  store %4* null, %4** %initialText.i.i, align 8
+  %call78 = call fastcc i8* @callee3(%4* %13) #31 [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
+  call void (...) @llvm.objc.clang.arc.noop.use(i8* %call78) #2
+  %14 = bitcast %"struct.SearchSpec::State"* %state to i8**
+  %15 = load i8*, i8** %14, align 8
+  call void @llvm.objc.release(i8* %15) #2
+  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %1) #2
+  call void @llvm.objc.release(i8* nonnull %stateWrapper) #2, !clang.imprecise_release !1
+  %16 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %call78) #2
+  ret i8* %call78
+
+lpad:                                             ; preds = %entry
+  %17 = landingpad { i8*, i32 }
+          cleanup
+  br label %ehcleanup
+
+lpad1:                                            ; preds = %invoke.cont
+  %18 = landingpad { i8*, i32 }
+          cleanup
+  br label %lpad1.body
+
+lpad1.body:                                       ; preds = %lpad1, %lpad.i
+  %19 = phi i8* [ %6, %lpad1 ], [ %.pre, %lpad.i ]
+  %eh.lpad-body = phi { i8*, i32 } [ %18, %lpad1 ], [ %12, %lpad.i ]
+  call void @llvm.objc.release(i8* %19) #2
+  br label %ehcleanup
+
+ehcleanup:                                        ; preds = %lpad1.body, %lpad
+  %.pn = phi { i8*, i32 } [ %eh.lpad-body, %lpad1.body ], [ %17, %lpad ]
+  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %1) #2
+  call void @llvm.objc.release(i8* nonnull %stateWrapper) #2, !clang.imprecise_release !1
+  resume { i8*, i32 } %.pn
+}
+
+; Function Attrs: minsize optsize ssp uwtable
+define i8* @invoke_bar(i8* nocapture readonly %.block_descriptor, i8* %stateWrapper) #1 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+entry:
+  %state = alloca %"struct.DynamicCallback", align 8
+  %agg.tmp = alloca %"struct.PointerList", align 8
+  %0 = tail call i8* @llvm.objc.retain(i8* %stateWrapper) #2
+  %1 = bitcast %"struct.DynamicCallback"* %state to i8*
+  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %1) #2
+  %2 = getelementptr inbounds i8, i8* %stateWrapper, i64 16
+  %3 = bitcast i8* %2 to %"struct.DynamicCallback"* (i8*)**
+  %4 = load %"struct.DynamicCallback"* (i8*)*, %"struct.DynamicCallback"* (i8*)** %3, align 8
+  %call.i4 = invoke nonnull align 8 dereferenceable(8) %"struct.DynamicCallback"* %4(i8* nonnull %stateWrapper) #31
+          to label %invoke.cont unwind label %lpad
+
+invoke.cont:                                      ; preds = %entry
+  %call.i.i = getelementptr inbounds %"struct.DynamicCallback", %"struct.DynamicCallback"* %state, i64 0, i32 0
+  %call2.i.i = getelementptr inbounds %"struct.DynamicCallback", %"struct.DynamicCallback"* %call.i4, i64 0, i32 0
+  %5 = load %10*, %10** %call2.i.i, align 8
+  %6 = bitcast %10* %5 to i8*
+  %7 = tail call i8* @llvm.objc.retain(i8* %6) #2
+  store %10* %5, %10** %call.i.i, align 8
+  %block.capture.addr = getelementptr inbounds i8, i8* %.block_descriptor, i64 32
+  %8 = bitcast i8* %block.capture.addr to i8**
+  %9 = load i8*, i8** %8, align 8
+  invoke void @callee2(%"struct.PointerList"* nonnull sret(%"struct.PointerList") align 8 %agg.tmp, i8* %9, i1 zeroext false) #31
+          to label %invoke.cont2 unwind label %lpad1
+
+invoke.cont2:                                     ; preds = %invoke.cont
+  %block.capture.addr3 = getelementptr inbounds i8, i8* %.block_descriptor, i64 40
+  %10 = bitcast i8* %block.capture.addr3 to %10**
+  %agg.tmp6.sroa.3.0..sroa_idx12 = getelementptr inbounds %"struct.PointerList", %"struct.PointerList"* %agg.tmp, i64 0, i32 3
+  %agg.tmp6.sroa.3.0.copyload = load i8*, i8** %agg.tmp6.sroa.3.0..sroa_idx12, align 8
+  %11 = load %10*, %10** %10, align 8
+  invoke void @callee5(%"struct.DynamicCallback"* nonnull align 8 dereferenceable(8) %state, %10* %11) #31
+          to label %invoke.cont4 unwind label %lpad.i
+
+lpad.i:                                           ; preds = %invoke.cont2
+  %12 = landingpad { i8*, i32 }
+          cleanup
+  call void @llvm.objc.release(i8* %agg.tmp6.sroa.3.0.copyload) #2
+  %.phi.trans.insert = bitcast %"struct.DynamicCallback"* %state to i8**
+  %.pre = load i8*, i8** %.phi.trans.insert, align 8
+  br label %lpad1.body
+
+invoke.cont4:                                     ; preds = %invoke.cont2
+  call void @llvm.objc.release(i8* %agg.tmp6.sroa.3.0.copyload) #2
+  %13 = load %10*, %10** %call.i.i, align 8
+  store %10* null, %10** %call.i.i, align 8
+  %call78 = call fastcc i8* @callee4(%10* %13) #31 [ "clang.arc.attachedcall"(i8* (i8*)* @llvm.objc.retainAutoreleasedReturnValue) ]
+  call void (...) @llvm.objc.clang.arc.noop.use(i8* %call78) #2
+  %14 = bitcast %"struct.DynamicCallback"* %state to i8**
+  %15 = load i8*, i8** %14, align 8
+  call void @llvm.objc.release(i8* %15) #2
+  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %1) #2
+  call void @llvm.objc.release(i8* nonnull %stateWrapper) #2, !clang.imprecise_release !1
+  %16 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %call78) #2
+  ret i8* %call78
+
+lpad:                                             ; preds = %entry
+  %17 = landingpad { i8*, i32 }
+          cleanup
+  br label %ehcleanup
+
+lpad1:                                            ; preds = %invoke.cont
+  %18 = landingpad { i8*, i32 }
+          cleanup
+  br label %lpad1.body
+
+lpad1.body:                                       ; preds = %lpad1, %lpad.i
+  %19 = phi i8* [ %6, %lpad1 ], [ %.pre, %lpad.i ]
+  %eh.lpad-body = phi { i8*, i32 } [ %18, %lpad1 ], [ %12, %lpad.i ]
+  call void @llvm.objc.release(i8* %19) #2
+  br label %ehcleanup
+
+ehcleanup:                                        ; preds = %lpad1.body, %lpad
+  %.pn = phi { i8*, i32 } [ %eh.lpad-body, %lpad1.body ], [ %17, %lpad ]
+  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %1) #2
+  call void @llvm.objc.release(i8* nonnull %stateWrapper) #2, !clang.imprecise_release !1
+  resume { i8*, i32 } %.pn
+}
+declare void @callee1(%"struct.SearchSpec::State"* nonnull align 8 dereferenceable(8), %4*)
+declare void @callee2(%"struct.PointerList"* sret(%"struct.PointerList") align 8, i8*, i1 zeroext)
+declare i8* @callee3(%4* %state.coerce)
+declare i8* @callee4(%10* %state.coerce)
+declare void @callee5(%"struct.DynamicCallback"* nonnull align 8 dereferenceable(8), %10*)
+declare i32 @__gxx_personality_v0(...)
+declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
+declare void @llvm.objc.clang.arc.noop.use(...)
+declare void @llvm.objc.release(i8*)
+declare i8* @llvm.objc.retain(i8*)
+declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
+
+!1 = !{}


        


More information about the llvm-commits mailing list