[llvm] [ValueTracking] Make Depth last default arg (NFC) (PR #142384)

Ramkumar Ramachandra via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 2 08:26:46 PDT 2025


https://github.com/artagnon updated https://github.com/llvm/llvm-project/pull/142384

>From f2d3329d2b30fd58cc686dd1f49ed46be2fd2708 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Sat, 24 May 2025 17:44:27 +0100
Subject: [PATCH] [ValueTracking] Make Depth last default arg (NFC)

Having a finite Depth (or recursion limit) for computeKnownBits is very
limiting, but is currently a load-bearing necessity, as all KnownBits
are recomputed on each call and there is no caching. As a prerequisite
for an effort to remove the recursion limit altogether, either using a
clever caching technique, or writing a easily-invalidable KnownBits
analysis, make the Depth argument in APIs in ValueTracking uniformly the
last argument with a default value. This would aid in removing the
argument when the time comes, as many callers that currently pass 0
explicitly are now updated to omit the argument altogether.
---
 llvm/include/llvm/Analysis/ValueTracking.h    |  95 +-
 llvm/include/llvm/Analysis/WithCache.h        |   6 +-
 .../Transforms/InstCombine/InstCombiner.h     |  44 +-
 llvm/lib/Analysis/BasicAliasAnalysis.cpp      |   3 +-
 llvm/lib/Analysis/DemandedBits.cpp            |   4 +-
 llvm/lib/Analysis/IVDescriptors.cpp           |   2 +-
 llvm/lib/Analysis/InstructionSimplify.cpp     |  72 +-
 llvm/lib/Analysis/Lint.cpp                    |   3 +-
 llvm/lib/Analysis/ScalarEvolution.cpp         |  15 +-
 llvm/lib/Analysis/ValueTracking.cpp           | 877 +++++++++---------
 .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp    |  22 +-
 .../AMDGPU/AMDGPUInstCombineIntrinsic.cpp     |   3 +-
 .../AMDGPU/AMDGPULateCodeGenPrepare.cpp       |   2 +-
 llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp     |   6 +-
 .../AMDGPU/AMDGPUTargetTransformInfo.cpp      |   2 +-
 .../Target/Hexagon/HexagonVectorCombine.cpp   |   4 +-
 llvm/lib/Target/X86/X86PartialReduction.cpp   |   5 +-
 .../AggressiveInstCombine.cpp                 |   3 +-
 .../AggressiveInstCombineInternal.h           |   5 +-
 .../InstCombine/InstCombineAddSub.cpp         |  10 +-
 .../InstCombine/InstCombineAndOrXor.cpp       |  33 +-
 .../InstCombine/InstCombineCalls.cpp          |  21 +-
 .../InstCombine/InstCombineCasts.cpp          |  40 +-
 .../InstCombine/InstCombineCompares.cpp       |  53 +-
 .../InstCombine/InstCombineInternal.h         |  22 +-
 .../InstCombineLoadStoreAlloca.cpp            |   2 +-
 .../InstCombine/InstCombineMulDivRem.cpp      |  18 +-
 .../InstCombine/InstCombineSelect.cpp         |  16 +-
 .../InstCombine/InstCombineShifts.cpp         |  20 +-
 .../InstCombineSimplifyDemanded.cpp           | 175 ++--
 .../InstCombine/InstCombineVectorOps.cpp      |   2 +-
 .../InstCombine/InstructionCombining.cpp      |   4 +-
 llvm/lib/Transforms/Scalar/InferAlignment.cpp |   2 +-
 llvm/lib/Transforms/Utils/Local.cpp           |   2 +-
 llvm/lib/Transforms/Utils/LowerSwitch.cpp     |   2 +-
 llvm/lib/Transforms/Utils/SimplifyCFG.cpp     |   4 +-
 .../lib/Transforms/Utils/SimplifyLibCalls.cpp |  13 +-
 .../Vectorize/LoadStoreVectorizer.cpp         |   4 +-
 .../Transforms/Vectorize/SLPVectorizer.cpp    |  26 +-
 llvm/unittests/Analysis/ValueTrackingTest.cpp |  91 +-
 40 files changed, 859 insertions(+), 874 deletions(-)

diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h
index feb7a1fa2cb35..b05b8f349b8d5 100644
--- a/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/llvm/include/llvm/Analysis/ValueTracking.h
@@ -54,36 +54,37 @@ constexpr unsigned MaxAnalysisRecursionDepth = 6;
 /// same width as the vector element, and the bit is set only if it is true
 /// for all of the elements in the vector.
 LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known,
-                               const DataLayout &DL, unsigned Depth = 0,
+                               const DataLayout &DL,
                                AssumptionCache *AC = nullptr,
                                const Instruction *CxtI = nullptr,
                                const DominatorTree *DT = nullptr,
-                               bool UseInstrInfo = true);
+                               bool UseInstrInfo = true, unsigned Depth = 0);
 
 /// Returns the known bits rather than passing by reference.
 LLVM_ABI KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
-                                    unsigned Depth = 0,
                                     AssumptionCache *AC = nullptr,
                                     const Instruction *CxtI = nullptr,
                                     const DominatorTree *DT = nullptr,
-                                    bool UseInstrInfo = true);
+                                    bool UseInstrInfo = true,
+                                    unsigned Depth = 0);
 
 /// Returns the known bits rather than passing by reference.
 LLVM_ABI KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
-                                    const DataLayout &DL, unsigned Depth = 0,
+                                    const DataLayout &DL,
                                     AssumptionCache *AC = nullptr,
                                     const Instruction *CxtI = nullptr,
                                     const DominatorTree *DT = nullptr,
-                                    bool UseInstrInfo = true);
+                                    bool UseInstrInfo = true,
+                                    unsigned Depth = 0);
 
 LLVM_ABI KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
-                                    unsigned Depth, const SimplifyQuery &Q);
+                                    const SimplifyQuery &Q, unsigned Depth = 0);
 
-LLVM_ABI KnownBits computeKnownBits(const Value *V, unsigned Depth,
-                                    const SimplifyQuery &Q);
+LLVM_ABI KnownBits computeKnownBits(const Value *V, const SimplifyQuery &Q,
+                                    unsigned Depth = 0);
 
-LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
-                               const SimplifyQuery &Q);
+LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known,
+                               const SimplifyQuery &Q, unsigned Depth = 0);
 
 /// Compute known bits from the range metadata.
 /// \p KnownZero the set of bits that are known to be zero
@@ -93,22 +94,22 @@ LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
 
 /// Merge bits known from context-dependent facts into Known.
 LLVM_ABI void computeKnownBitsFromContext(const Value *V, KnownBits &Known,
-                                          unsigned Depth,
-                                          const SimplifyQuery &Q);
+                                          const SimplifyQuery &Q,
+                                          unsigned Depth = 0);
 
 /// Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
 LLVM_ABI KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I,
                                                 const KnownBits &KnownLHS,
                                                 const KnownBits &KnownRHS,
-                                                unsigned Depth,
-                                                const SimplifyQuery &SQ);
+                                                const SimplifyQuery &SQ,
+                                                unsigned Depth = 0);
 
 /// Adjust \p Known for the given select \p Arm to include information from the
 /// select \p Cond.
 LLVM_ABI void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond,
                                           Value *Arm, bool Invert,
-                                          unsigned Depth,
-                                          const SimplifyQuery &Q);
+                                          const SimplifyQuery &Q,
+                                          unsigned Depth = 0);
 
 /// Return true if LHS and RHS have no common bits set.
 LLVM_ABI bool haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
@@ -121,14 +122,16 @@ LLVM_ABI bool haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
 /// vectors of integers. If 'OrZero' is set, then return true if the given
 /// value is either a power of two or zero.
 LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
-                                     bool OrZero = false, unsigned Depth = 0,
+                                     bool OrZero = false,
                                      AssumptionCache *AC = nullptr,
                                      const Instruction *CxtI = nullptr,
                                      const DominatorTree *DT = nullptr,
-                                     bool UseInstrInfo = true);
+                                     bool UseInstrInfo = true,
+                                     unsigned Depth = 0);
 
 LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero,
-                                     unsigned Depth, const SimplifyQuery &Q);
+                                     const SimplifyQuery &Q,
+                                     unsigned Depth = 0);
 
 LLVM_ABI bool isOnlyUsedInZeroComparison(const Instruction *CxtI);
 
@@ -196,21 +199,21 @@ LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask,
 /// sign bits for the vector element with the mininum number of known sign
 /// bits.
 LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
-                                     unsigned Depth = 0,
                                      AssumptionCache *AC = nullptr,
                                      const Instruction *CxtI = nullptr,
                                      const DominatorTree *DT = nullptr,
-                                     bool UseInstrInfo = true);
+                                     bool UseInstrInfo = true,
+                                     unsigned Depth = 0);
 
 /// Get the upper bound on bit size for this Value \p Op as a signed integer.
 /// i.e.  x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)).
 /// Similar to the APInt::getSignificantBits function.
 LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op,
                                             const DataLayout &DL,
-                                            unsigned Depth = 0,
                                             AssumptionCache *AC = nullptr,
                                             const Instruction *CxtI = nullptr,
-                                            const DominatorTree *DT = nullptr);
+                                            const DominatorTree *DT = nullptr,
+                                            unsigned Depth = 0);
 
 /// Map a call instruction to an intrinsic ID.  Libcalls which have equivalent
 /// intrinsics are treated as-if they were intrinsics.
@@ -236,36 +239,36 @@ LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
 LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V,
                                           const APInt &DemandedElts,
                                           FPClassTest InterestedClasses,
-                                          unsigned Depth,
-                                          const SimplifyQuery &SQ);
+                                          const SimplifyQuery &SQ,
+                                          unsigned Depth = 0);
 
 LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V,
                                           FPClassTest InterestedClasses,
-                                          unsigned Depth,
-                                          const SimplifyQuery &SQ);
+                                          const SimplifyQuery &SQ,
+                                          unsigned Depth = 0);
 
 LLVM_ABI KnownFPClass computeKnownFPClass(
     const Value *V, const DataLayout &DL,
-    FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
+    FPClassTest InterestedClasses = fcAllFlags,
     const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
     const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
-    bool UseInstrInfo = true);
+    bool UseInstrInfo = true, unsigned Depth = 0);
 
 /// Wrapper to account for known fast math flags at the use instruction.
 LLVM_ABI KnownFPClass computeKnownFPClass(
     const Value *V, const APInt &DemandedElts, FastMathFlags FMF,
-    FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ);
+    FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth = 0);
 
 LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, FastMathFlags FMF,
                                           FPClassTest InterestedClasses,
-                                          unsigned Depth,
-                                          const SimplifyQuery &SQ);
+                                          const SimplifyQuery &SQ,
+                                          unsigned Depth = 0);
 
 /// Return true if we can prove that the specified FP value is never equal to
 /// -0.0. Users should use caution when considering PreserveSign
 /// denormal-fp-math.
-LLVM_ABI bool cannotBeNegativeZero(const Value *V, unsigned Depth,
-                                   const SimplifyQuery &SQ);
+LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ,
+                                   unsigned Depth = 0);
 
 /// Return true if we can prove that the specified FP value is either NaN or
 /// never less than -0.0.
@@ -275,30 +278,32 @@ LLVM_ABI bool cannotBeNegativeZero(const Value *V, unsigned Depth,
 ///       -0 --> true
 ///   x > +0 --> true
 ///   x < -0 --> false
-LLVM_ABI bool cannotBeOrderedLessThanZero(const Value *V, unsigned Depth,
-                                          const SimplifyQuery &SQ);
+LLVM_ABI bool cannotBeOrderedLessThanZero(const Value *V,
+                                          const SimplifyQuery &SQ,
+                                          unsigned Depth = 0);
 
 /// Return true if the floating-point scalar value is not an infinity or if
 /// the floating-point vector value has no infinities. Return false if a value
 /// could ever be infinity.
-LLVM_ABI bool isKnownNeverInfinity(const Value *V, unsigned Depth,
-                                   const SimplifyQuery &SQ);
+LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ,
+                                   unsigned Depth = 0);
 
 /// Return true if the floating-point value can never contain a NaN or infinity.
-LLVM_ABI bool isKnownNeverInfOrNaN(const Value *V, unsigned Depth,
-                                   const SimplifyQuery &SQ);
+LLVM_ABI bool isKnownNeverInfOrNaN(const Value *V, const SimplifyQuery &SQ,
+                                   unsigned Depth = 0);
 
 /// Return true if the floating-point scalar value is not a NaN or if the
 /// floating-point vector value has no NaN elements. Return false if a value
 /// could ever be NaN.
-LLVM_ABI bool isKnownNeverNaN(const Value *V, unsigned Depth,
-                              const SimplifyQuery &SQ);
+LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ,
+                              unsigned Depth = 0);
 
 /// Return false if we can prove that the specified FP value's sign bit is 0.
 /// Return true if we can prove that the specified FP value's sign bit is 1.
 /// Otherwise return std::nullopt.
-LLVM_ABI std::optional<bool>
-computeKnownFPSignBit(const Value *V, unsigned Depth, const SimplifyQuery &SQ);
+LLVM_ABI std::optional<bool> computeKnownFPSignBit(const Value *V,
+                                                   const SimplifyQuery &SQ,
+                                                   unsigned Depth = 0);
 
 /// Return true if the sign bit of the FP value can be ignored by the user when
 /// the value is zero.
diff --git a/llvm/include/llvm/Analysis/WithCache.h b/llvm/include/llvm/Analysis/WithCache.h
index 82c230a32297c..3bf35a889bbf2 100644
--- a/llvm/include/llvm/Analysis/WithCache.h
+++ b/llvm/include/llvm/Analysis/WithCache.h
@@ -22,8 +22,8 @@
 
 namespace llvm {
 struct SimplifyQuery;
-LLVM_ABI KnownBits computeKnownBits(const Value *V, unsigned Depth,
-                                    const SimplifyQuery &Q);
+LLVM_ABI KnownBits computeKnownBits(const Value *V, const SimplifyQuery &Q,
+                                    unsigned Depth);
 
 template <typename Arg> class WithCache {
   static_assert(std::is_pointer_v<Arg>, "WithCache requires a pointer type!");
@@ -45,7 +45,7 @@ template <typename Arg> class WithCache {
   mutable KnownBits Known;
 
   void calculateKnownBits(const SimplifyQuery &Q) const {
-    Known = computeKnownBits(Pointer.getPointer(), 0, Q);
+    Known = computeKnownBits(Pointer.getPointer(), Q, 0);
     Pointer.setInt(true);
   }
 
diff --git a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
index fa6b60cba15aa..fa313f5290773 100644
--- a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
+++ b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
@@ -430,36 +430,39 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner {
   /// methods should return the value returned by this function.
   virtual Instruction *eraseInstFromFunction(Instruction &I) = 0;
 
-  void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
-                        const Instruction *CxtI) const {
-    llvm::computeKnownBits(V, Known, Depth, SQ.getWithInstruction(CxtI));
+  void computeKnownBits(const Value *V, KnownBits &Known,
+                        const Instruction *CxtI, unsigned Depth = 0) const {
+    llvm::computeKnownBits(V, Known, SQ.getWithInstruction(CxtI), Depth);
   }
 
-  KnownBits computeKnownBits(const Value *V, unsigned Depth,
-                             const Instruction *CxtI) const {
-    return llvm::computeKnownBits(V, Depth, SQ.getWithInstruction(CxtI));
+  KnownBits computeKnownBits(const Value *V, const Instruction *CxtI,
+                             unsigned Depth = 0) const {
+    return llvm::computeKnownBits(V, SQ.getWithInstruction(CxtI), Depth);
   }
 
   bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false,
-                              unsigned Depth = 0,
-                              const Instruction *CxtI = nullptr) {
-    return llvm::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
-                                        SQ.getWithInstruction(CxtI));
+                              const Instruction *CxtI = nullptr,
+                              unsigned Depth = 0) {
+    return llvm::isKnownToBeAPowerOfTwo(V, OrZero, SQ.getWithInstruction(CxtI),
+                                        Depth);
   }
 
-  bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0,
-                         const Instruction *CxtI = nullptr) const {
+  bool MaskedValueIsZero(const Value *V, const APInt &Mask,
+                         const Instruction *CxtI = nullptr,
+                         unsigned Depth = 0) const {
     return llvm::MaskedValueIsZero(V, Mask, SQ.getWithInstruction(CxtI), Depth);
   }
 
-  unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0,
-                              const Instruction *CxtI = nullptr) const {
-    return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT);
+  unsigned ComputeNumSignBits(const Value *Op,
+                              const Instruction *CxtI = nullptr,
+                              unsigned Depth = 0) const {
+    return llvm::ComputeNumSignBits(Op, DL, &AC, CxtI, &DT, Depth);
   }
 
-  unsigned ComputeMaxSignificantBits(const Value *Op, unsigned Depth = 0,
-                                     const Instruction *CxtI = nullptr) const {
-    return llvm::ComputeMaxSignificantBits(Op, DL, Depth, &AC, CxtI, &DT);
+  unsigned ComputeMaxSignificantBits(const Value *Op,
+                                     const Instruction *CxtI = nullptr,
+                                     unsigned Depth = 0) const {
+    return llvm::ComputeMaxSignificantBits(Op, DL, &AC, CxtI, &DT, Depth);
   }
 
   OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
@@ -507,12 +510,13 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner {
 
   virtual bool SimplifyDemandedBits(Instruction *I, unsigned OpNo,
                                     const APInt &DemandedMask, KnownBits &Known,
-                                    unsigned Depth, const SimplifyQuery &Q) = 0;
+                                    const SimplifyQuery &Q,
+                                    unsigned Depth = 0) = 0;
 
   bool SimplifyDemandedBits(Instruction *I, unsigned OpNo,
                             const APInt &DemandedMask, KnownBits &Known) {
     return SimplifyDemandedBits(I, OpNo, DemandedMask, Known,
-                                /*Depth=*/0, SQ.getWithInstruction(I));
+                                SQ.getWithInstruction(I));
   }
 
   virtual Value *
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index bcc9a71917aaf..b110c2017b9eb 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -1261,8 +1261,7 @@ AliasResult BasicAAResult::aliasGEP(
 
     ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
                                             true, &AC, Index.CxtI);
-    KnownBits Known =
-        computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
+    KnownBits Known = computeKnownBits(Index.Val.V, DL, &AC, Index.CxtI, DT);
     CR = CR.intersectWith(
         ConstantRange::fromKnownBits(Known, /* Signed */ true),
         ConstantRange::Signed);
diff --git a/llvm/lib/Analysis/DemandedBits.cpp b/llvm/lib/Analysis/DemandedBits.cpp
index b538e16f25859..d7e2a3fa4fc59 100644
--- a/llvm/lib/Analysis/DemandedBits.cpp
+++ b/llvm/lib/Analysis/DemandedBits.cpp
@@ -70,11 +70,11 @@ void DemandedBits::determineLiveOperandBits(
 
         const DataLayout &DL = UserI->getDataLayout();
         Known = KnownBits(BitWidth);
-        computeKnownBits(V1, Known, DL, 0, &AC, UserI, &DT);
+        computeKnownBits(V1, Known, DL, &AC, UserI, &DT);
 
         if (V2) {
           Known2 = KnownBits(BitWidth);
-          computeKnownBits(V2, Known2, DL, 0, &AC, UserI, &DT);
+          computeKnownBits(V2, Known2, DL, &AC, UserI, &DT);
         }
       };
 
diff --git a/llvm/lib/Analysis/IVDescriptors.cpp b/llvm/lib/Analysis/IVDescriptors.cpp
index b7c7bcab168cc..d13f2e139ee4a 100644
--- a/llvm/lib/Analysis/IVDescriptors.cpp
+++ b/llvm/lib/Analysis/IVDescriptors.cpp
@@ -111,7 +111,7 @@ static std::pair<Type *, bool> computeRecurrenceType(Instruction *Exit,
     // If demanded bits wasn't able to limit the bit width, we can try to use
     // value tracking instead. This can be the case, for example, if the value
     // may be negative.
-    auto NumSignBits = ComputeNumSignBits(Exit, DL, 0, AC, nullptr, DT);
+    auto NumSignBits = ComputeNumSignBits(Exit, DL, AC, nullptr, DT);
     auto NumTypeBits = DL.getTypeSizeInBits(Exit->getType());
     MaxBitWidth = NumTypeBits - NumSignBits;
     KnownBits Bits = computeKnownBits(Exit, DL);
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 12c84c718d4fd..e397a228afee0 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -770,7 +770,7 @@ static Value *simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
     if (IsNUW)
       return Constant::getNullValue(Op0->getType());
 
-    KnownBits Known = computeKnownBits(Op1, /* Depth */ 0, Q);
+    KnownBits Known = computeKnownBits(Op1, Q);
     if (Known.Zero.isMaxSignedValue()) {
       // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
       // Op1 must be 0 because negating the minimum signed value is undefined.
@@ -1029,8 +1029,7 @@ static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
   // TODO: Convert this (and above) to range analysis
   //      ("computeConstantRangeIncludingKnownBits")?
   const APInt *C;
-  if (match(Y, m_APInt(C)) &&
-      computeKnownBits(X, /* Depth */ 0, Q).getMaxValue().ult(*C))
+  if (match(Y, m_APInt(C)) && computeKnownBits(X, Q).getMaxValue().ult(*C))
     return true;
 
   // Try again for any divisor:
@@ -1079,7 +1078,7 @@ static Value *simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0,
   if (Op0 == Op1)
     return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
 
-  KnownBits Known = computeKnownBits(Op1, /* Depth */ 0, Q);
+  KnownBits Known = computeKnownBits(Op1, Q);
   // X / 0 -> poison
   // X % 0 -> poison
   // If the divisor is known to be zero, just return poison. This can happen in
@@ -1149,7 +1148,7 @@ static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
     // have at least as many trailing zeros as the divisor to divide evenly. If
     // it has less trailing zeros, then the result must be poison.
     if (DivC->countr_zero()) {
-      KnownBits KnownOp0 = computeKnownBits(Op0, /* Depth */ 0, Q);
+      KnownBits KnownOp0 = computeKnownBits(Op0, Q);
       if (KnownOp0.countMaxTrailingZeros() < DivC->countr_zero())
         return PoisonValue::get(Op0->getType());
     }
@@ -1336,7 +1335,7 @@ static Value *simplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
 
   // If any bits in the shift amount make that value greater than or equal to
   // the number of bits in the type, the shift is undefined.
-  KnownBits KnownAmt = computeKnownBits(Op1, /* Depth */ 0, Q);
+  KnownBits KnownAmt = computeKnownBits(Op1, Q);
   if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
     return PoisonValue::get(Op0->getType());
 
@@ -1349,7 +1348,7 @@ static Value *simplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
   // Check for nsw shl leading to a poison value.
   if (IsNSW) {
     assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
-    KnownBits KnownVal = computeKnownBits(Op0, /* Depth */ 0, Q);
+    KnownBits KnownVal = computeKnownBits(Op0, Q);
     KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt);
 
     if (KnownVal.Zero.isSignBitSet())
@@ -1385,7 +1384,7 @@ static Value *simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
   // The low bit cannot be shifted out of an exact shift if it is set.
   // TODO: Generalize by counting trailing zeros (see fold for exact division).
   if (IsExact) {
-    KnownBits Op0Known = computeKnownBits(Op0, /* Depth */ 0, Q);
+    KnownBits Op0Known = computeKnownBits(Op0, Q);
     if (Op0Known.One[0])
       return Op0;
   }
@@ -1457,7 +1456,7 @@ static Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
   if (Q.IIQ.UseInstrInfo && match(Op1, m_APInt(ShRAmt)) &&
       match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) &&
       *ShRAmt == *ShLAmt) {
-    const KnownBits YKnown = computeKnownBits(Y, /* Depth */ 0, Q);
+    const KnownBits YKnown = computeKnownBits(Y, Q);
     const unsigned EffWidthY = YKnown.countMaxActiveBits();
     if (ShRAmt->uge(EffWidthY))
       return X;
@@ -1492,7 +1491,7 @@ static Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
     return X;
 
   // Arithmetic shifting an all-sign-bit value is a no-op.
-  unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
+  unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, Q.AC, Q.CxtI, Q.DT);
   if (NumSignBits == Op0->getType()->getScalarSizeInBits())
     return Op0;
 
@@ -1996,13 +1995,13 @@ static Value *simplifyAndCommutative(Value *Op0, Value *Op1,
 
   // -A & A = A if A is a power of two or zero.
   if (match(Op0, m_Neg(m_Specific(Op1))) &&
-      isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
+      isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, Q.AC, Q.CxtI, Q.DT))
     return Op1;
 
   // This is a similar pattern used for checking if a value is a power-of-2:
   // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
   if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) &&
-      isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
+      isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, Q.AC, Q.CxtI, Q.DT))
     return Constant::getNullValue(Op1->getType());
 
   // (x << N) & ((x << M) - 1) --> 0, where x is known to be a power of 2 and
@@ -2010,8 +2009,7 @@ static Value *simplifyAndCommutative(Value *Op0, Value *Op1,
   const APInt *Shift1, *Shift2;
   if (match(Op0, m_Shl(m_Value(X), m_APInt(Shift1))) &&
       match(Op1, m_Add(m_Shl(m_Specific(X), m_APInt(Shift2)), m_AllOnes())) &&
-      isKnownToBeAPowerOfTwo(X, Q.DL, /*OrZero*/ true, /*Depth*/ 0, Q.AC,
-                             Q.CxtI) &&
+      isKnownToBeAPowerOfTwo(X, Q.DL, /*OrZero*/ true, Q.AC, Q.CxtI) &&
       Shift1->uge(*Shift2))
     return Constant::getNullValue(Op0->getType());
 
@@ -2080,9 +2078,9 @@ static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
   Value *Shift;
   if (match(Op1, m_Power2(PowerC)) &&
       match(Op0, m_Add(m_Value(Shift), m_AllOnes())) &&
-      isKnownToBeAPowerOfTwo(Shift, Q.DL, /*OrZero*/ false, 0, Q.AC, Q.CxtI,
+      isKnownToBeAPowerOfTwo(Shift, Q.DL, /*OrZero*/ false, Q.AC, Q.CxtI,
                              Q.DT)) {
-    KnownBits Known = computeKnownBits(Shift, /* Depth */ 0, Q);
+    KnownBits Known = computeKnownBits(Shift, Q);
     // Use getActiveBits() to make use of the additional power of two knowledge
     if (PowerC->getActiveBits() >= Known.getMaxValue().getActiveBits())
       return ConstantInt::getNullValue(Op1->getType());
@@ -2146,10 +2144,10 @@ static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
                         m_Value(Y)))) {
     const unsigned Width = Op0->getType()->getScalarSizeInBits();
     const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
-    const KnownBits YKnown = computeKnownBits(Y, /* Depth */ 0, Q);
+    const KnownBits YKnown = computeKnownBits(Y, Q);
     const unsigned EffWidthY = YKnown.countMaxActiveBits();
     if (EffWidthY <= ShftCnt) {
-      const KnownBits XKnown = computeKnownBits(X, /* Depth */ 0, Q);
+      const KnownBits XKnown = computeKnownBits(X, Q);
       const unsigned EffWidthX = XKnown.countMaxActiveBits();
       const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
       const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
@@ -2946,7 +2944,7 @@ static Value *simplifyICmpWithZero(CmpPredicate Pred, Value *LHS, Value *RHS,
       return getTrue(ITy);
     break;
   case ICmpInst::ICMP_SLT: {
-    KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
+    KnownBits LHSKnown = computeKnownBits(LHS, Q);
     if (LHSKnown.isNegative())
       return getTrue(ITy);
     if (LHSKnown.isNonNegative())
@@ -2954,7 +2952,7 @@ static Value *simplifyICmpWithZero(CmpPredicate Pred, Value *LHS, Value *RHS,
     break;
   }
   case ICmpInst::ICMP_SLE: {
-    KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
+    KnownBits LHSKnown = computeKnownBits(LHS, Q);
     if (LHSKnown.isNegative())
       return getTrue(ITy);
     if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, Q))
@@ -2962,7 +2960,7 @@ static Value *simplifyICmpWithZero(CmpPredicate Pred, Value *LHS, Value *RHS,
     break;
   }
   case ICmpInst::ICMP_SGE: {
-    KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
+    KnownBits LHSKnown = computeKnownBits(LHS, Q);
     if (LHSKnown.isNegative())
       return getFalse(ITy);
     if (LHSKnown.isNonNegative())
@@ -2970,7 +2968,7 @@ static Value *simplifyICmpWithZero(CmpPredicate Pred, Value *LHS, Value *RHS,
     break;
   }
   case ICmpInst::ICMP_SGT: {
-    KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
+    KnownBits LHSKnown = computeKnownBits(LHS, Q);
     if (LHSKnown.isNegative())
       return getFalse(ITy);
     if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, Q))
@@ -3112,8 +3110,8 @@ static Value *simplifyICmpWithBinOpOnLHS(CmpPredicate Pred, BinaryOperator *LBO,
   // icmp pred (or X, Y), X
   if (match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
-      KnownBits RHSKnown = computeKnownBits(RHS, /* Depth */ 0, Q);
-      KnownBits YKnown = computeKnownBits(Y, /* Depth */ 0, Q);
+      KnownBits RHSKnown = computeKnownBits(RHS, Q);
+      KnownBits YKnown = computeKnownBits(Y, Q);
       if (RHSKnown.isNonNegative() && YKnown.isNegative())
         return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
       if (RHSKnown.isNegative() || YKnown.isNonNegative())
@@ -3128,7 +3126,7 @@ static Value *simplifyICmpWithBinOpOnLHS(CmpPredicate Pred, BinaryOperator *LBO,
       break;
     case ICmpInst::ICMP_SGT:
     case ICmpInst::ICMP_SGE: {
-      KnownBits Known = computeKnownBits(RHS, /* Depth */ 0, Q);
+      KnownBits Known = computeKnownBits(RHS, Q);
       if (!Known.isNonNegative())
         break;
       [[fallthrough]];
@@ -3139,7 +3137,7 @@ static Value *simplifyICmpWithBinOpOnLHS(CmpPredicate Pred, BinaryOperator *LBO,
       return getFalse(ITy);
     case ICmpInst::ICMP_SLT:
     case ICmpInst::ICMP_SLE: {
-      KnownBits Known = computeKnownBits(RHS, /* Depth */ 0, Q);
+      KnownBits Known = computeKnownBits(RHS, Q);
       if (!Known.isNonNegative())
         break;
       [[fallthrough]];
@@ -4108,10 +4106,8 @@ static Value *simplifyFCmpInst(CmpPredicate Pred, Value *LHS, Value *RHS,
   // This catches the 2 variable input case, constants are handled below as a
   // class-like compare.
   if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
-    KnownFPClass RHSClass =
-        computeKnownFPClass(RHS, fcAllFlags, /*Depth=*/0, Q);
-    KnownFPClass LHSClass =
-        computeKnownFPClass(LHS, fcAllFlags, /*Depth=*/0, Q);
+    KnownFPClass RHSClass = computeKnownFPClass(RHS, fcAllFlags, Q);
+    KnownFPClass LHSClass = computeKnownFPClass(LHS, fcAllFlags, Q);
 
     if (FMF.noNaNs() ||
         (RHSClass.isKnownNeverNaN() && LHSClass.isKnownNeverNaN()))
@@ -4131,7 +4127,7 @@ static Value *simplifyFCmpInst(CmpPredicate Pred, Value *LHS, Value *RHS,
                                                      fcAllFlags) {
     if (FullKnownClassLHS)
       return *FullKnownClassLHS;
-    return computeKnownFPClass(LHS, FMF, InterestedFlags, 0, Q);
+    return computeKnownFPClass(LHS, FMF, InterestedFlags, Q);
   };
 
   if (C && Q.CxtI) {
@@ -5711,7 +5707,7 @@ simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
   // fadd X, 0 ==> X, when we know X is not -0
   if (canIgnoreSNaN(ExBehavior, FMF))
     if (match(Op1, m_PosZeroFP()) &&
-        (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, /*Depth=*/0, Q)))
+        (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q)))
       return Op0;
 
   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
@@ -5773,7 +5769,7 @@ simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
   // fsub X, -0 ==> X, when we know X is not -0
   if (canIgnoreSNaN(ExBehavior, FMF))
     if (match(Op1, m_NegZeroFP()) &&
-        (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, /*Depth=*/0, Q)))
+        (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q)))
       return Op0;
 
   // fsub -0.0, (fsub -0.0, X) ==> X
@@ -5841,8 +5837,7 @@ static Value *simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
     if (FMF.noNaNs() && FMF.noSignedZeros())
       return ConstantFP::getZero(Op0->getType());
 
-    KnownFPClass Known =
-        computeKnownFPClass(Op0, FMF, fcInf | fcNan, /*Depth=*/0, Q);
+    KnownFPClass Known = computeKnownFPClass(Op0, FMF, fcInf | fcNan, Q);
     if (Known.isKnownNever(fcInf | fcNan)) {
       // if nsz is set, return 0.0
       if (FMF.noSignedZeros())
@@ -6306,7 +6301,7 @@ static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
   Value *X;
   switch (IID) {
   case Intrinsic::fabs:
-    if (computeKnownFPSignBit(Op0, /*Depth=*/0, Q) == false)
+    if (computeKnownFPSignBit(Op0, Q) == false)
       return Op0;
     break;
   case Intrinsic::bswap:
@@ -6321,8 +6316,7 @@ static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
     break;
   case Intrinsic::ctpop: {
     // ctpop(X) -> 1 iff X is non-zero power of 2.
-    if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ false, 0, Q.AC, Q.CxtI,
-                               Q.DT))
+    if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ false, Q.AC, Q.CxtI, Q.DT))
       return ConstantInt::get(Op0->getType(), 1);
     // If everything but the lowest bit is zero, that bit is the pop-count. Ex:
     // ctpop(and X, 1) --> and X, 1
@@ -6511,7 +6505,7 @@ Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType,
 
     Constant *C;
     if (match(Op1, m_ImmConstant(C))) {
-      KnownBits PtrKnown = computeKnownBits(Op0, /*Depth=*/0, Q);
+      KnownBits PtrKnown = computeKnownBits(Op0, Q);
       // See if we only masking off bits we know are already zero due to
       // alignment.
       APInt IrrelevantPtrBits =
diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp
index 7e540ea907893..1168005f48c0e 100644
--- a/llvm/lib/Analysis/Lint.cpp
+++ b/llvm/lib/Analysis/Lint.cpp
@@ -551,8 +551,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
 
   VectorType *VecTy = dyn_cast<VectorType>(V->getType());
   if (!VecTy) {
-    KnownBits Known =
-        computeKnownBits(V, DL, 0, AC, dyn_cast<Instruction>(V), DT);
+    KnownBits Known = computeKnownBits(V, DL, AC, dyn_cast<Instruction>(V), DT);
     return Known.isZero();
   }
 
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 56cdfabccb66f..2dfe625eb0dcc 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -6367,7 +6367,7 @@ APInt ScalarEvolution::getConstantMultipleImpl(const SCEV *S) {
     // ask ValueTracking for known bits
     const SCEVUnknown *U = cast<SCEVUnknown>(S);
     unsigned Known =
-        computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT)
+        computeKnownBits(U->getValue(), getDataLayout(), &AC, nullptr, &DT)
             .countMinTrailingZeros();
     return GetShiftedByZeros(Known);
   }
@@ -6485,8 +6485,8 @@ getRangeForUnknownRecurrence(const SCEVUnknown *U) {
   if (!TC || TC >= BitWidth)
     return FullSet;
 
-  auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT);
-  auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT);
+  auto KnownStart = computeKnownBits(Start, DL, &AC, nullptr, &DT);
+  auto KnownStep = computeKnownBits(Step, DL, &AC, nullptr, &DT);
   assert(KnownStart.getBitWidth() == BitWidth &&
          KnownStep.getBitWidth() == BitWidth);
 
@@ -6863,13 +6863,13 @@ const ConstantRange &ScalarEvolution::getRangeRef(
 
     // See if ValueTracking can give us a useful range.
     const DataLayout &DL = getDataLayout();
-    KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, &DT);
+    KnownBits Known = computeKnownBits(V, DL, &AC, nullptr, &DT);
     if (Known.getBitWidth() != BitWidth)
       Known = Known.zextOrTrunc(BitWidth);
 
     // ValueTracking may be able to compute a tighter result for the number of
     // sign bits than for the value of those sign bits.
-    unsigned NS = ComputeNumSignBits(V, DL, 0, &AC, nullptr, &DT);
+    unsigned NS = ComputeNumSignBits(V, DL, &AC, nullptr, &DT);
     if (U->getType()->isPointerTy()) {
       // If the pointer size is larger than the index size type, this can cause
       // NS to be larger than BitWidth. So compensate for this.
@@ -7818,8 +7818,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
         unsigned TZ = A.countr_zero();
         unsigned BitWidth = A.getBitWidth();
         KnownBits Known(BitWidth);
-        computeKnownBits(BO->LHS, Known, getDataLayout(),
-                         0, &AC, nullptr, &DT);
+        computeKnownBits(BO->LHS, Known, getDataLayout(), &AC, nullptr, &DT);
 
         APInt EffectiveMask =
             APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
@@ -9485,7 +9484,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
     // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most
     // bitwidth(K) iterations.
     Value *FirstValue = PN->getIncomingValueForBlock(Predecessor);
-    KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC,
+    KnownBits Known = computeKnownBits(FirstValue, DL, &AC,
                                        Predecessor->getTerminator(), &DT);
     auto *Ty = cast<IntegerType>(RHS->getType());
     if (Known.isNonNegative())
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 7f30cd73d57c9..2829294820332 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -132,44 +132,45 @@ static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
 }
 
 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
-                             KnownBits &Known, unsigned Depth,
-                             const SimplifyQuery &Q);
+                             KnownBits &Known, const SimplifyQuery &Q,
+                             unsigned Depth);
 
-void llvm::computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
-                            const SimplifyQuery &Q) {
+void llvm::computeKnownBits(const Value *V, KnownBits &Known,
+                            const SimplifyQuery &Q, unsigned Depth) {
   // Since the number of lanes in a scalable vector is unknown at compile time,
   // we track one bit which is implicitly broadcast to all lanes.  This means
   // that all lanes in a scalable vector are considered demanded.
   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
   APInt DemandedElts =
       FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
-  ::computeKnownBits(V, DemandedElts, Known, Depth, Q);
+  ::computeKnownBits(V, DemandedElts, Known, Q, Depth);
 }
 
 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
-                            const DataLayout &DL, unsigned Depth,
-                            AssumptionCache *AC, const Instruction *CxtI,
-                            const DominatorTree *DT, bool UseInstrInfo) {
-  computeKnownBits(
-      V, Known, Depth,
-      SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
+                            const DataLayout &DL, AssumptionCache *AC,
+                            const Instruction *CxtI, const DominatorTree *DT,
+                            bool UseInstrInfo, unsigned Depth) {
+  computeKnownBits(V, Known,
+                   SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo),
+                   Depth);
 }
 
 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
-                                 unsigned Depth, AssumptionCache *AC,
-                                 const Instruction *CxtI,
-                                 const DominatorTree *DT, bool UseInstrInfo) {
+                                 AssumptionCache *AC, const Instruction *CxtI,
+                                 const DominatorTree *DT, bool UseInstrInfo,
+                                 unsigned Depth) {
   return computeKnownBits(
-      V, Depth, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
+      V, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo), Depth);
 }
 
 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
-                                 const DataLayout &DL, unsigned Depth,
-                                 AssumptionCache *AC, const Instruction *CxtI,
-                                 const DominatorTree *DT, bool UseInstrInfo) {
+                                 const DataLayout &DL, AssumptionCache *AC,
+                                 const Instruction *CxtI,
+                                 const DominatorTree *DT, bool UseInstrInfo,
+                                 unsigned Depth) {
   return computeKnownBits(
-      V, DemandedElts, Depth,
-      SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
+      V, DemandedElts,
+      SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo), Depth);
 }
 
 static bool haveNoCommonBitsSetSpecialCases(const Value *LHS, const Value *RHS,
@@ -263,12 +264,13 @@ bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) {
 }
 
 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
-                                  bool OrZero, unsigned Depth,
-                                  AssumptionCache *AC, const Instruction *CxtI,
-                                  const DominatorTree *DT, bool UseInstrInfo) {
+                                  bool OrZero, AssumptionCache *AC,
+                                  const Instruction *CxtI,
+                                  const DominatorTree *DT, bool UseInstrInfo,
+                                  unsigned Depth) {
   return ::isKnownToBeAPowerOfTwo(
-      V, OrZero, Depth,
-      SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
+      V, OrZero, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo),
+      Depth);
 }
 
 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
@@ -276,7 +278,7 @@ static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
 
 bool llvm::isKnownNonNegative(const Value *V, const SimplifyQuery &SQ,
                               unsigned Depth) {
-  return computeKnownBits(V, Depth, SQ).isNonNegative();
+  return computeKnownBits(V, SQ, Depth).isNonNegative();
 }
 
 bool llvm::isKnownPositive(const Value *V, const SimplifyQuery &SQ,
@@ -286,19 +288,19 @@ bool llvm::isKnownPositive(const Value *V, const SimplifyQuery &SQ,
 
   // If `isKnownNonNegative` ever becomes more sophisticated, make sure to keep
   // this updated.
-  KnownBits Known = computeKnownBits(V, Depth, SQ);
+  KnownBits Known = computeKnownBits(V, SQ, Depth);
   return Known.isNonNegative() &&
          (Known.isNonZero() || isKnownNonZero(V, SQ, Depth));
 }
 
 bool llvm::isKnownNegative(const Value *V, const SimplifyQuery &SQ,
                            unsigned Depth) {
-  return computeKnownBits(V, Depth, SQ).isNegative();
+  return computeKnownBits(V, SQ, Depth).isNegative();
 }
 
 static bool isKnownNonEqual(const Value *V1, const Value *V2,
-                            const APInt &DemandedElts, unsigned Depth,
-                            const SimplifyQuery &Q);
+                            const APInt &DemandedElts, const SimplifyQuery &Q,
+                            unsigned Depth);
 
 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
                            const SimplifyQuery &Q, unsigned Depth) {
@@ -308,40 +310,41 @@ bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
   auto *FVTy = dyn_cast<FixedVectorType>(V1->getType());
   APInt DemandedElts =
       FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
-  return ::isKnownNonEqual(V1, V2, DemandedElts, Depth, Q);
+  return ::isKnownNonEqual(V1, V2, DemandedElts, Q, Depth);
 }
 
 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
                              const SimplifyQuery &SQ, unsigned Depth) {
   KnownBits Known(Mask.getBitWidth());
-  computeKnownBits(V, Known, Depth, SQ);
+  computeKnownBits(V, Known, SQ, Depth);
   return Mask.isSubsetOf(Known.Zero);
 }
 
 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
-                                   unsigned Depth, const SimplifyQuery &Q);
+                                   const SimplifyQuery &Q, unsigned Depth);
 
-static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
-                                   const SimplifyQuery &Q) {
+static unsigned ComputeNumSignBits(const Value *V, const SimplifyQuery &Q,
+                                   unsigned Depth = 0) {
   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
   APInt DemandedElts =
       FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
-  return ComputeNumSignBits(V, DemandedElts, Depth, Q);
+  return ComputeNumSignBits(V, DemandedElts, Q, Depth);
 }
 
 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
-                                  unsigned Depth, AssumptionCache *AC,
-                                  const Instruction *CxtI,
-                                  const DominatorTree *DT, bool UseInstrInfo) {
+                                  AssumptionCache *AC, const Instruction *CxtI,
+                                  const DominatorTree *DT, bool UseInstrInfo,
+                                  unsigned Depth) {
   return ::ComputeNumSignBits(
-      V, Depth, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
+      V, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo), Depth);
 }
 
 unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL,
-                                         unsigned Depth, AssumptionCache *AC,
+                                         AssumptionCache *AC,
                                          const Instruction *CxtI,
-                                         const DominatorTree *DT) {
-  unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT);
+                                         const DominatorTree *DT,
+                                         unsigned Depth) {
+  unsigned SignBits = ComputeNumSignBits(V, DL, AC, CxtI, DT, Depth);
   return V->getType()->getScalarSizeInBits() - SignBits + 1;
 }
 
@@ -349,24 +352,24 @@ static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
                                    bool NSW, bool NUW,
                                    const APInt &DemandedElts,
                                    KnownBits &KnownOut, KnownBits &Known2,
-                                   unsigned Depth, const SimplifyQuery &Q) {
-  computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
+                                   const SimplifyQuery &Q, unsigned Depth) {
+  computeKnownBits(Op1, DemandedElts, KnownOut, Q, Depth + 1);
 
   // If one operand is unknown and we have no nowrap information,
   // the result will be unknown independently of the second operand.
   if (KnownOut.isUnknown() && !NSW && !NUW)
     return;
 
-  computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
+  computeKnownBits(Op0, DemandedElts, Known2, Q, Depth + 1);
   KnownOut = KnownBits::computeForAddSub(Add, NSW, NUW, Known2, KnownOut);
 }
 
 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
                                 bool NUW, const APInt &DemandedElts,
                                 KnownBits &Known, KnownBits &Known2,
-                                unsigned Depth, const SimplifyQuery &Q) {
-  computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
-  computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
+                                const SimplifyQuery &Q, unsigned Depth) {
+  computeKnownBits(Op1, DemandedElts, Known, Q, Depth + 1);
+  computeKnownBits(Op0, DemandedElts, Known2, Q, Depth + 1);
 
   bool isKnownNegative = false;
   bool isKnownNonNegative = false;
@@ -792,15 +795,15 @@ static void computeKnownBitsFromICmpCond(const Value *V, ICmpInst *Cmp,
 }
 
 static void computeKnownBitsFromCond(const Value *V, Value *Cond,
-                                     KnownBits &Known, unsigned Depth,
-                                     const SimplifyQuery &SQ, bool Invert) {
+                                     KnownBits &Known, const SimplifyQuery &SQ,
+                                     bool Invert, unsigned Depth) {
   Value *A, *B;
   if (Depth < MaxAnalysisRecursionDepth &&
       match(Cond, m_LogicalOp(m_Value(A), m_Value(B)))) {
     KnownBits Known2(Known.getBitWidth());
     KnownBits Known3(Known.getBitWidth());
-    computeKnownBitsFromCond(V, A, Known2, Depth + 1, SQ, Invert);
-    computeKnownBitsFromCond(V, B, Known3, Depth + 1, SQ, Invert);
+    computeKnownBitsFromCond(V, A, Known2, SQ, Invert, Depth + 1);
+    computeKnownBitsFromCond(V, B, Known3, SQ, Invert, Depth + 1);
     if (Invert ? match(Cond, m_LogicalOr(m_Value(), m_Value()))
                : match(Cond, m_LogicalAnd(m_Value(), m_Value())))
       Known2 = Known2.unionWith(Known3);
@@ -831,14 +834,14 @@ static void computeKnownBitsFromCond(const Value *V, Value *Cond,
   }
 
   if (Depth < MaxAnalysisRecursionDepth && match(Cond, m_Not(m_Value(A))))
-    computeKnownBitsFromCond(V, A, Known, Depth + 1, SQ, !Invert);
+    computeKnownBitsFromCond(V, A, Known, SQ, !Invert, Depth + 1);
 }
 
 void llvm::computeKnownBitsFromContext(const Value *V, KnownBits &Known,
-                                       unsigned Depth, const SimplifyQuery &Q) {
+                                       const SimplifyQuery &Q, unsigned Depth) {
   // Handle injected condition.
   if (Q.CC && Q.CC->AffectedValues.contains(V))
-    computeKnownBitsFromCond(V, Q.CC->Cond, Known, Depth, Q, Q.CC->Invert);
+    computeKnownBitsFromCond(V, Q.CC->Cond, Known, Q, Q.CC->Invert, Depth);
 
   if (!Q.CxtI)
     return;
@@ -848,13 +851,13 @@ void llvm::computeKnownBitsFromContext(const Value *V, KnownBits &Known,
     for (BranchInst *BI : Q.DC->conditionsFor(V)) {
       BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0));
       if (Q.DT->dominates(Edge0, Q.CxtI->getParent()))
-        computeKnownBitsFromCond(V, BI->getCondition(), Known, Depth, Q,
-                                 /*Invert*/ false);
+        computeKnownBitsFromCond(V, BI->getCondition(), Known, Q,
+                                 /*Invert*/ false, Depth);
 
       BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1));
       if (Q.DT->dominates(Edge1, Q.CxtI->getParent()))
-        computeKnownBitsFromCond(V, BI->getCondition(), Known, Depth, Q,
-                                 /*Invert*/ true);
+        computeKnownBitsFromCond(V, BI->getCondition(), Known, Q,
+                                 /*Invert*/ true, Depth);
     }
 
     if (Known.hasConflict())
@@ -953,10 +956,10 @@ void llvm::computeKnownBitsFromContext(const Value *V, KnownBits &Known,
 /// combined for all permitted shift amounts.
 static void computeKnownBitsFromShiftOperator(
     const Operator *I, const APInt &DemandedElts, KnownBits &Known,
-    KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q,
+    KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth,
     function_ref<KnownBits(const KnownBits &, const KnownBits &, bool)> KF) {
-  computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
-  computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
+  computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
+  computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth + 1);
   // To limit compile-time impact, only query isKnownNonZero() if we know at
   // least something about the shift amount.
   bool ShAmtNonZero =
@@ -969,7 +972,7 @@ static void computeKnownBitsFromShiftOperator(
 static KnownBits
 getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts,
                          const KnownBits &KnownLHS, const KnownBits &KnownRHS,
-                         unsigned Depth, const SimplifyQuery &Q) {
+                         const SimplifyQuery &Q, unsigned Depth) {
   unsigned BitWidth = KnownLHS.getBitWidth();
   KnownBits KnownOut(BitWidth);
   bool IsAnd = false;
@@ -1026,7 +1029,7 @@ getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts,
        match(I, m_c_BinOp(m_Value(X), m_Sub(m_Deferred(X), m_Value(Y)))) ||
        match(I, m_c_BinOp(m_Value(X), m_Sub(m_Value(Y), m_Deferred(X)))))) {
     KnownBits KnownY(BitWidth);
-    computeKnownBits(Y, DemandedElts, KnownY, Depth + 1, Q);
+    computeKnownBits(Y, DemandedElts, KnownY, Q, Depth + 1);
     if (KnownY.countMinTrailingOnes() > 0) {
       if (IsAnd)
         KnownOut.Zero.setBit(0);
@@ -1038,8 +1041,8 @@ getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts,
 }
 
 static KnownBits computeKnownBitsForHorizontalOperation(
-    const Operator *I, const APInt &DemandedElts, unsigned Depth,
-    const SimplifyQuery &Q,
+    const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q,
+    unsigned Depth,
     const function_ref<KnownBits(const KnownBits &, const KnownBits &)>
         KnownBitsFunc) {
   APInt DemandedEltsLHS, DemandedEltsRHS;
@@ -1050,8 +1053,8 @@ static KnownBits computeKnownBitsForHorizontalOperation(
   const auto ComputeForSingleOpFunc =
       [Depth, &Q, KnownBitsFunc](const Value *Op, APInt &DemandedEltsOp) {
         return KnownBitsFunc(
-            computeKnownBits(Op, DemandedEltsOp, Depth + 1, Q),
-            computeKnownBits(Op, DemandedEltsOp << 1, Depth + 1, Q));
+            computeKnownBits(Op, DemandedEltsOp, Q, Depth + 1),
+            computeKnownBits(Op, DemandedEltsOp << 1, Q, Depth + 1));
       };
 
   if (DemandedEltsRHS.isZero())
@@ -1067,14 +1070,14 @@ static KnownBits computeKnownBitsForHorizontalOperation(
 KnownBits llvm::analyzeKnownBitsFromAndXorOr(const Operator *I,
                                              const KnownBits &KnownLHS,
                                              const KnownBits &KnownRHS,
-                                             unsigned Depth,
-                                             const SimplifyQuery &SQ) {
+                                             const SimplifyQuery &SQ,
+                                             unsigned Depth) {
   auto *FVTy = dyn_cast<FixedVectorType>(I->getType());
   APInt DemandedElts =
       FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
 
-  return getKnownBitsFromAndXorOr(I, DemandedElts, KnownLHS, KnownRHS, Depth,
-                                  SQ);
+  return getKnownBitsFromAndXorOr(I, DemandedElts, KnownLHS, KnownRHS, SQ,
+                                  Depth);
 }
 
 ConstantRange llvm::getVScaleRange(const Function *F, unsigned BitWidth) {
@@ -1097,15 +1100,15 @@ ConstantRange llvm::getVScaleRange(const Function *F, unsigned BitWidth) {
 }
 
 void llvm::adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond,
-                                       Value *Arm, bool Invert, unsigned Depth,
-                                       const SimplifyQuery &Q) {
+                                       Value *Arm, bool Invert,
+                                       const SimplifyQuery &Q, unsigned Depth) {
   // If we have a constant arm, we are done.
   if (Known.isConstant())
     return;
 
   // See what condition implies about the bits of the select arm.
   KnownBits CondRes(Known.getBitWidth());
-  computeKnownBitsFromCond(Arm, Cond, CondRes, Depth + 1, Q, Invert);
+  computeKnownBitsFromCond(Arm, Cond, CondRes, Q, Invert, Depth + 1);
   // If we don't get any information from the condition, no reason to
   // proceed.
   if (CondRes.isUnknown())
@@ -1190,8 +1193,9 @@ static void unionWithMinMaxIntrinsicClamp(const IntrinsicInst *II,
 
 static void computeKnownBitsFromOperator(const Operator *I,
                                          const APInt &DemandedElts,
-                                         KnownBits &Known, unsigned Depth,
-                                         const SimplifyQuery &Q) {
+                                         KnownBits &Known,
+                                         const SimplifyQuery &Q,
+                                         unsigned Depth) {
   unsigned BitWidth = Known.getBitWidth();
 
   KnownBits Known2(BitWidth);
@@ -1203,40 +1207,40 @@ static void computeKnownBitsFromOperator(const Operator *I,
       computeKnownBitsFromRangeMetadata(*MD, Known);
     break;
   case Instruction::And:
-    computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
-    computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
+    computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth + 1);
+    computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
 
-    Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
+    Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Q, Depth);
     break;
   case Instruction::Or:
-    computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
-    computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
+    computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth + 1);
+    computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
 
-    Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
+    Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Q, Depth);
     break;
   case Instruction::Xor:
-    computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
-    computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
+    computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth + 1);
+    computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
 
-    Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
+    Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Q, Depth);
     break;
   case Instruction::Mul: {
     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
     bool NUW = Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(I));
     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, NUW,
-                        DemandedElts, Known, Known2, Depth, Q);
+                        DemandedElts, Known, Known2, Q, Depth);
     break;
   }
   case Instruction::UDiv: {
-    computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
-    computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+    computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
+    computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
     Known =
         KnownBits::udiv(Known, Known2, Q.IIQ.isExact(cast<BinaryOperator>(I)));
     break;
   }
   case Instruction::SDiv: {
-    computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
-    computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+    computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
+    computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
     Known =
         KnownBits::sdiv(Known, Known2, Q.IIQ.isExact(cast<BinaryOperator>(I)));
     break;
@@ -1244,8 +1248,8 @@ static void computeKnownBitsFromOperator(const Operator *I,
   case Instruction::Select: {
     auto ComputeForArm = [&](Value *Arm, bool Invert) {
       KnownBits Res(Known.getBitWidth());
-      computeKnownBits(Arm, DemandedElts, Res, Depth + 1, Q);
-      adjustKnownBitsForSelectArm(Res, I->getOperand(0), Arm, Invert, Depth, Q);
+      computeKnownBits(Arm, DemandedElts, Res, Q, Depth + 1);
+      adjustKnownBitsForSelectArm(Res, I->getOperand(0), Arm, Invert, Q, Depth);
       return Res;
     };
     // Only known if known in both the LHS and RHS.
@@ -1279,7 +1283,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
 
     assert(SrcBitWidth && "SrcBitWidth can't be zero");
     Known = Known.anyextOrTrunc(SrcBitWidth);
-    computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
+    computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
     if (auto *Inst = dyn_cast<PossiblyNonNegInst>(I);
         Inst && Inst->hasNonNeg() && !Known.isNegative())
       Known.makeNonNegative();
@@ -1292,7 +1296,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
         // TODO: For now, not handling conversions like:
         // (bitcast i64 %x to <2 x i32>)
         !I->getType()->isVectorTy()) {
-      computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
+      computeKnownBits(I->getOperand(0), Known, Q, Depth + 1);
       break;
     }
 
@@ -1302,7 +1306,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
         V->getType()->isFPOrFPVectorTy()) {
       Type *FPType = V->getType()->getScalarType();
       KnownFPClass Result =
-          computeKnownFPClass(V, DemandedElts, fcAllFlags, Depth + 1, Q);
+          computeKnownFPClass(V, DemandedElts, fcAllFlags, Q, Depth + 1);
       FPClassTest FPClasses = Result.KnownFPClasses;
 
       // TODO: Treat it as zero/poison if the use of I is unreachable.
@@ -1370,8 +1374,8 @@ static void computeKnownBitsFromOperator(const Operator *I,
 
       KnownBits KnownSrc(SubBitWidth);
       for (unsigned i = 0; i != SubScale; ++i) {
-        computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc,
-                         Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc, Q,
+                         Depth + 1);
         unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i;
         Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
       }
@@ -1383,7 +1387,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
 
     Known = Known.trunc(SrcBitWidth);
-    computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
+    computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
     // If the sign bit of the input is known set or clear, then we know the
     // top bits of the result.
     Known = Known.sext(BitWidth);
@@ -1396,7 +1400,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
                          bool ShAmtNonZero) {
       return KnownBits::shl(KnownVal, KnownAmt, NUW, NSW, ShAmtNonZero);
     };
-    computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
+    computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Q, Depth,
                                       KF);
     // Trailing zeros of a right-shifted constant never decrease.
     const APInt *C;
@@ -1410,7 +1414,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
                       bool ShAmtNonZero) {
       return KnownBits::lshr(KnownVal, KnownAmt, ShAmtNonZero, Exact);
     };
-    computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
+    computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Q, Depth,
                                       KF);
     // Leading zeros of a left-shifted constant never decrease.
     const APInt *C;
@@ -1424,7 +1428,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
                       bool ShAmtNonZero) {
       return KnownBits::ashr(KnownVal, KnownAmt, ShAmtNonZero, Exact);
     };
-    computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
+    computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Q, Depth,
                                       KF);
     break;
   }
@@ -1432,25 +1436,25 @@ static void computeKnownBitsFromOperator(const Operator *I,
     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
     bool NUW = Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(I));
     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, NUW,
-                           DemandedElts, Known, Known2, Depth, Q);
+                           DemandedElts, Known, Known2, Q, Depth);
     break;
   }
   case Instruction::Add: {
     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
     bool NUW = Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(I));
     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, NUW,
-                           DemandedElts, Known, Known2, Depth, Q);
+                           DemandedElts, Known, Known2, Q, Depth);
     break;
   }
   case Instruction::SRem:
-    computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
-    computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+    computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
+    computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
     Known = KnownBits::srem(Known, Known2);
     break;
 
   case Instruction::URem:
-    computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
-    computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+    computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
+    computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
     Known = KnownBits::urem(Known, Known2);
     break;
   case Instruction::Alloca:
@@ -1459,7 +1463,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
   case Instruction::GetElementPtr: {
     // Analyze all of the subscripts of this getelementptr instruction
     // to determine if we can prove known low zero bits.
-    computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
+    computeKnownBits(I->getOperand(0), Known, Q, Depth + 1);
     // Accumulate the constant indices in a separate variable
     // to minimize the number of calls to computeForAddSub.
     unsigned IndexWidth = Q.DL.getIndexTypeSizeInBits(I->getType());
@@ -1527,7 +1531,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
       }
 
       KnownBits IndexBits =
-          computeKnownBits(Index, Depth + 1, Q).sextOrTrunc(IndexWidth);
+          computeKnownBits(Index, Q, Depth + 1).sextOrTrunc(IndexWidth);
       KnownBits ScalingFactor(IndexWidth);
       // Multiply by current sizeof type.
       // &A[i] == A + i * sizeof(*A[i]).
@@ -1582,7 +1586,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
         // add sufficient tests to cover.
         SimplifyQuery RecQ = Q.getWithoutCondContext();
         RecQ.CxtI = P;
-        computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
+        computeKnownBits(R, DemandedElts, Known2, RecQ, Depth + 1);
         switch (Opcode) {
         case Instruction::Shl:
           // A shl recurrence will only increase the tailing zeros
@@ -1625,12 +1629,12 @@ static void computeKnownBitsFromOperator(const Operator *I,
         // Ok, we have a PHI of the form L op= R. Check for low
         // zero bits.
         RecQ.CxtI = RInst;
-        computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
+        computeKnownBits(R, DemandedElts, Known2, RecQ, Depth + 1);
 
         // We need to take the minimum number of known bits
         KnownBits Known3(BitWidth);
         RecQ.CxtI = LInst;
-        computeKnownBits(L, DemandedElts, Known3, Depth + 1, RecQ);
+        computeKnownBits(L, DemandedElts, Known3, RecQ, Depth + 1);
 
         Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
                                        Known3.countMinTrailingZeros()));
@@ -1720,8 +1724,8 @@ static void computeKnownBitsFromOperator(const Operator *I,
         // want to waste time spinning around in loops.
         // TODO: See if we can base recursion limiter on number of incoming phi
         // edges so we don't overly clamp analysis.
-        computeKnownBits(IncValue, DemandedElts, Known2,
-                         MaxAnalysisRecursionDepth - 1, RecQ);
+        computeKnownBits(IncValue, DemandedElts, Known2, RecQ,
+                         MaxAnalysisRecursionDepth - 1);
 
         // See if we can further use a conditional branch into the phi
         // to help us determine the range of the value.
@@ -1781,7 +1785,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
 
     if (const Value *RV = CB->getReturnedArgOperand()) {
       if (RV->getType() == I->getType()) {
-        computeKnownBits(RV, Known2, Depth + 1, Q);
+        computeKnownBits(RV, Known2, Q, Depth + 1);
         Known = Known.unionWith(Known2);
         // If the function doesn't return properly for all input values
         // (e.g. unreachable exits) then there might be conflicts between the
@@ -1796,23 +1800,23 @@ static void computeKnownBitsFromOperator(const Operator *I,
       default:
         break;
       case Intrinsic::abs: {
-        computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
         bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
         Known = Known2.abs(IntMinIsPoison);
         break;
       }
       case Intrinsic::bitreverse:
-        computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
         Known.Zero |= Known2.Zero.reverseBits();
         Known.One |= Known2.One.reverseBits();
         break;
       case Intrinsic::bswap:
-        computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
         Known.Zero |= Known2.Zero.byteSwap();
         Known.One |= Known2.One.byteSwap();
         break;
       case Intrinsic::ctlz: {
-        computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
         // If we have a known 1, its position is our upper bound.
         unsigned PossibleLZ = Known2.countMaxLeadingZeros();
         // If this call is poison for 0 input, the result will be less than 2^n.
@@ -1823,7 +1827,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
         break;
       }
       case Intrinsic::cttz: {
-        computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
         // If we have a known 1, its position is our upper bound.
         unsigned PossibleTZ = Known2.countMaxTrailingZeros();
         // If this call is poison for 0 input, the result will be less than 2^n.
@@ -1834,7 +1838,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
         break;
       }
       case Intrinsic::ctpop: {
-        computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
         // We can bound the space the count needs.  Also, bits known to be zero
         // can't contribute to the population.
         unsigned BitsPossiblySet = Known2.countMaxPopulation();
@@ -1856,8 +1860,8 @@ static void computeKnownBitsFromOperator(const Operator *I,
           ShiftAmt = BitWidth - ShiftAmt;
 
         KnownBits Known3(BitWidth);
-        computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
-        computeKnownBits(I->getOperand(1), DemandedElts, Known3, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
+        computeKnownBits(I->getOperand(1), DemandedElts, Known3, Q, Depth + 1);
 
         Known.Zero =
             Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
@@ -1866,29 +1870,29 @@ static void computeKnownBitsFromOperator(const Operator *I,
         break;
       }
       case Intrinsic::uadd_sat:
-        computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
-        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
+        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
         Known = KnownBits::uadd_sat(Known, Known2);
         break;
       case Intrinsic::usub_sat:
-        computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
-        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
+        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
         Known = KnownBits::usub_sat(Known, Known2);
         break;
       case Intrinsic::sadd_sat:
-        computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
-        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
+        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
         Known = KnownBits::sadd_sat(Known, Known2);
         break;
       case Intrinsic::ssub_sat:
-        computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
-        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
+        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
         Known = KnownBits::ssub_sat(Known, Known2);
         break;
         // Vec reverse preserves bits from input vec.
       case Intrinsic::vector_reverse:
-        computeKnownBits(I->getOperand(0), DemandedElts.reverseBits(), Known,
-                         Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts.reverseBits(), Known, Q,
+                         Depth + 1);
         break;
         // for min/max/and/or reduce, any bit common to each element in the
         // input vec is set in the output.
@@ -1898,10 +1902,10 @@ static void computeKnownBitsFromOperator(const Operator *I,
       case Intrinsic::vector_reduce_umin:
       case Intrinsic::vector_reduce_smax:
       case Intrinsic::vector_reduce_smin:
-        computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), Known, Q, Depth + 1);
         break;
       case Intrinsic::vector_reduce_xor: {
-        computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), Known, Q, Depth + 1);
         // The zeros common to all vecs are zero in the output.
         // If the number of elements is odd, then the common ones remain. If the
         // number of elements is even, then the common ones becomes zeros.
@@ -1916,33 +1920,33 @@ static void computeKnownBitsFromOperator(const Operator *I,
         break;
       }
       case Intrinsic::umin:
-        computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
-        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
+        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
         Known = KnownBits::umin(Known, Known2);
         break;
       case Intrinsic::umax:
-        computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
-        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
+        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
         Known = KnownBits::umax(Known, Known2);
         break;
       case Intrinsic::smin:
-        computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
-        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
+        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
         Known = KnownBits::smin(Known, Known2);
         unionWithMinMaxIntrinsicClamp(II, Known);
         break;
       case Intrinsic::smax:
-        computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
-        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
+        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
         Known = KnownBits::smax(Known, Known2);
         unionWithMinMaxIntrinsicClamp(II, Known);
         break;
       case Intrinsic::ptrmask: {
-        computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
 
         const Value *Mask = I->getOperand(1);
         Known2 = KnownBits(Mask->getType()->getScalarSizeInBits());
-        computeKnownBits(Mask, DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(Mask, DemandedElts, Known2, Q, Depth + 1);
         // TODO: 1-extend would be more precise.
         Known &= Known2.anyextOrTrunc(BitWidth);
         break;
@@ -1950,15 +1954,15 @@ static void computeKnownBitsFromOperator(const Operator *I,
       case Intrinsic::x86_sse2_pmulh_w:
       case Intrinsic::x86_avx2_pmulh_w:
       case Intrinsic::x86_avx512_pmulh_w_512:
-        computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
-        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
+        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
         Known = KnownBits::mulhs(Known, Known2);
         break;
       case Intrinsic::x86_sse2_pmulhu_w:
       case Intrinsic::x86_avx2_pmulhu_w:
       case Intrinsic::x86_avx512_pmulhu_w_512:
-        computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
-        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
+        computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
         Known = KnownBits::mulhu(Known, Known2);
         break;
       case Intrinsic::x86_sse42_crc32_64_64:
@@ -1969,7 +1973,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
       case Intrinsic::x86_avx2_phadd_d:
       case Intrinsic::x86_avx2_phadd_w: {
         Known = computeKnownBitsForHorizontalOperation(
-            I, DemandedElts, Depth, Q,
+            I, DemandedElts, Q, Depth,
             [](const KnownBits &KnownLHS, const KnownBits &KnownRHS) {
               return KnownBits::add(KnownLHS, KnownRHS);
             });
@@ -1977,8 +1981,8 @@ static void computeKnownBitsFromOperator(const Operator *I,
       }
       case Intrinsic::x86_ssse3_phadd_sw_128:
       case Intrinsic::x86_avx2_phadd_sw: {
-        Known = computeKnownBitsForHorizontalOperation(I, DemandedElts, Depth,
-                                                       Q, KnownBits::sadd_sat);
+        Known = computeKnownBitsForHorizontalOperation(
+            I, DemandedElts, Q, Depth, KnownBits::sadd_sat);
         break;
       }
       case Intrinsic::x86_ssse3_phsub_d_128:
@@ -1986,7 +1990,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
       case Intrinsic::x86_avx2_phsub_d:
       case Intrinsic::x86_avx2_phsub_w: {
         Known = computeKnownBitsForHorizontalOperation(
-            I, DemandedElts, Depth, Q,
+            I, DemandedElts, Q, Depth,
             [](const KnownBits &KnownLHS, const KnownBits &KnownRHS) {
               return KnownBits::sub(KnownLHS, KnownRHS);
             });
@@ -1994,8 +1998,8 @@ static void computeKnownBitsFromOperator(const Operator *I,
       }
       case Intrinsic::x86_ssse3_phsub_sw_128:
       case Intrinsic::x86_avx2_phsub_sw: {
-        Known = computeKnownBitsForHorizontalOperation(I, DemandedElts, Depth,
-                                                       Q, KnownBits::ssub_sat);
+        Known = computeKnownBitsForHorizontalOperation(
+            I, DemandedElts, Q, Depth, KnownBits::ssub_sat);
         break;
       }
       case Intrinsic::riscv_vsetvli:
@@ -2049,14 +2053,14 @@ static void computeKnownBitsFromOperator(const Operator *I,
     Known.Zero.setAllBits();
     if (!!DemandedLHS) {
       const Value *LHS = Shuf->getOperand(0);
-      computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
+      computeKnownBits(LHS, DemandedLHS, Known, Q, Depth + 1);
       // If we don't know any bits, early out.
       if (Known.isUnknown())
         break;
     }
     if (!!DemandedRHS) {
       const Value *RHS = Shuf->getOperand(1);
-      computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
+      computeKnownBits(RHS, DemandedRHS, Known2, Q, Depth + 1);
       Known = Known.intersectWith(Known2);
     }
     break;
@@ -2081,14 +2085,14 @@ static void computeKnownBitsFromOperator(const Operator *I,
     Known.One.setAllBits();
     Known.Zero.setAllBits();
     if (NeedsElt) {
-      computeKnownBits(Elt, Known, Depth + 1, Q);
+      computeKnownBits(Elt, Known, Q, Depth + 1);
       // If we don't know any bits, early out.
       if (Known.isUnknown())
         break;
     }
 
     if (!DemandedVecElts.isZero()) {
-      computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
+      computeKnownBits(Vec, DemandedVecElts, Known2, Q, Depth + 1);
       Known = Known.intersectWith(Known2);
     }
     break;
@@ -2108,7 +2112,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
     APInt DemandedVecElts = APInt::getAllOnes(NumElts);
     if (CIdx && CIdx->getValue().ult(NumElts))
       DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
-    computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
+    computeKnownBits(Vec, DemandedVecElts, Known, Q, Depth + 1);
     break;
   }
   case Instruction::ExtractValue:
@@ -2122,18 +2126,18 @@ static void computeKnownBitsFromOperator(const Operator *I,
         case Intrinsic::sadd_with_overflow:
           computeKnownBitsAddSub(
               true, II->getArgOperand(0), II->getArgOperand(1), /*NSW=*/false,
-              /* NUW=*/false, DemandedElts, Known, Known2, Depth, Q);
+              /* NUW=*/false, DemandedElts, Known, Known2, Q, Depth);
           break;
         case Intrinsic::usub_with_overflow:
         case Intrinsic::ssub_with_overflow:
           computeKnownBitsAddSub(
               false, II->getArgOperand(0), II->getArgOperand(1), /*NSW=*/false,
-              /* NUW=*/false, DemandedElts, Known, Known2, Depth, Q);
+              /* NUW=*/false, DemandedElts, Known, Known2, Q, Depth);
           break;
         case Intrinsic::umul_with_overflow:
         case Intrinsic::smul_with_overflow:
           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
-                              false, DemandedElts, Known, Known2, Depth, Q);
+                              false, DemandedElts, Known, Known2, Q, Depth);
           break;
         }
       }
@@ -2142,7 +2146,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
   case Instruction::Freeze:
     if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
                                   Depth + 1))
-      computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
+      computeKnownBits(I->getOperand(0), Known, Q, Depth + 1);
     break;
   }
 }
@@ -2150,18 +2154,18 @@ static void computeKnownBitsFromOperator(const Operator *I,
 /// Determine which bits of V are known to be either zero or one and return
 /// them.
 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
-                                 unsigned Depth, const SimplifyQuery &Q) {
+                                 const SimplifyQuery &Q, unsigned Depth) {
   KnownBits Known(getBitWidth(V->getType(), Q.DL));
-  ::computeKnownBits(V, DemandedElts, Known, Depth, Q);
+  ::computeKnownBits(V, DemandedElts, Known, Q, Depth);
   return Known;
 }
 
 /// Determine which bits of V are known to be either zero or one and return
 /// them.
-KnownBits llvm::computeKnownBits(const Value *V, unsigned Depth,
-                                 const SimplifyQuery &Q) {
+KnownBits llvm::computeKnownBits(const Value *V, const SimplifyQuery &Q,
+                                 unsigned Depth) {
   KnownBits Known(getBitWidth(V->getType(), Q.DL));
-  computeKnownBits(V, Known, Depth, Q);
+  computeKnownBits(V, Known, Q, Depth);
   return Known;
 }
 
@@ -2181,8 +2185,8 @@ KnownBits llvm::computeKnownBits(const Value *V, unsigned Depth,
 /// same width as the vector element, and the bit is set only if it is true
 /// for all of the demanded elements in the vector specified by DemandedElts.
 void computeKnownBits(const Value *V, const APInt &DemandedElts,
-                      KnownBits &Known, unsigned Depth,
-                      const SimplifyQuery &Q) {
+                      KnownBits &Known, const SimplifyQuery &Q,
+                      unsigned Depth) {
   if (!DemandedElts) {
     // No demanded elts, better to assume we don't know anything.
     Known.resetAll();
@@ -2296,12 +2300,12 @@ void computeKnownBits(const Value *V, const APInt &DemandedElts,
   // the bits of its aliasee.
   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
     if (!GA->isInterposable())
-      computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
+      computeKnownBits(GA->getAliasee(), Known, Q, Depth + 1);
     return;
   }
 
   if (const Operator *I = dyn_cast<Operator>(V))
-    computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
+    computeKnownBitsFromOperator(I, DemandedElts, Known, Q, Depth);
   else if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
     if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange())
       Known = CR->toKnownBits();
@@ -2317,13 +2321,13 @@ void computeKnownBits(const Value *V, const APInt &DemandedElts,
   // Therefore, we run them after computeKnownBitsFromOperator.
 
   // Check whether we can determine known bits from context such as assumes.
-  computeKnownBitsFromContext(V, Known, Depth, Q);
+  computeKnownBitsFromContext(V, Known, Q, Depth);
 }
 
 /// Try to detect a recurrence that the value of the induction variable is
 /// always a power of two (or zero).
 static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
-                                   unsigned Depth, SimplifyQuery &Q) {
+                                   SimplifyQuery &Q, unsigned Depth) {
   BinaryOperator *BO = nullptr;
   Value *Start = nullptr, *Step = nullptr;
   if (!matchSimpleRecurrence(PN, BO, Start, Step))
@@ -2335,7 +2339,7 @@ static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
       // Initial value comes from a different BB, need to adjust context
       // instruction for analysis.
       Q.CxtI = PN->getIncomingBlock(U)->getTerminator();
-      if (!isKnownToBeAPowerOfTwo(Start, OrZero, Depth, Q))
+      if (!isKnownToBeAPowerOfTwo(Start, OrZero, Q, Depth))
         return false;
     }
   }
@@ -2351,7 +2355,7 @@ static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
     // Power of two is closed under multiplication.
     return (OrZero || Q.IIQ.hasNoUnsignedWrap(BO) ||
             Q.IIQ.hasNoSignedWrap(BO)) &&
-           isKnownToBeAPowerOfTwo(Step, OrZero, Depth, Q);
+           isKnownToBeAPowerOfTwo(Step, OrZero, Q, Depth);
   case Instruction::SDiv:
     // Start value must not be signmask for signed division, so simply being a
     // power of two is not sufficient, and it has to be a constant.
@@ -2363,7 +2367,7 @@ static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
     // If OrZero is false, cannot guarantee induction variable is non-zero after
     // division, same for Shr, unless it is exact division.
     return (OrZero || Q.IIQ.isExact(BO)) &&
-           isKnownToBeAPowerOfTwo(Step, false, Depth, Q);
+           isKnownToBeAPowerOfTwo(Step, false, Q, Depth);
   case Instruction::Shl:
     return OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO);
   case Instruction::AShr:
@@ -2400,8 +2404,8 @@ static bool isImpliedToBeAPowerOfTwoFromCond(const Value *V, bool OrZero,
 /// bit set when defined. For vectors return true if every element is known to
 /// be a power of two when defined. Supports values with integer or pointer
 /// types and vectors of integers.
-bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
-                                  const SimplifyQuery &Q) {
+bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero,
+                                  const SimplifyQuery &Q, unsigned Depth) {
   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
 
   if (isa<Constant>(V))
@@ -2469,30 +2473,30 @@ bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
 
   switch (I->getOpcode()) {
   case Instruction::ZExt:
-    return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
+    return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Q, Depth);
   case Instruction::Trunc:
-    return OrZero && isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
+    return OrZero && isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Q, Depth);
   case Instruction::Shl:
     if (OrZero || Q.IIQ.hasNoUnsignedWrap(I) || Q.IIQ.hasNoSignedWrap(I))
-      return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
+      return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Q, Depth);
     return false;
   case Instruction::LShr:
     if (OrZero || Q.IIQ.isExact(cast<BinaryOperator>(I)))
-      return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
+      return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Q, Depth);
     return false;
   case Instruction::UDiv:
     if (Q.IIQ.isExact(cast<BinaryOperator>(I)))
-      return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
+      return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Q, Depth);
     return false;
   case Instruction::Mul:
-    return isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q) &&
-           isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q) &&
+    return isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Q, Depth) &&
+           isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Q, Depth) &&
            (OrZero || isKnownNonZero(I, Q, Depth));
   case Instruction::And:
     // A power of two and'd with anything is a power of two or zero.
     if (OrZero &&
-        (isKnownToBeAPowerOfTwo(I->getOperand(1), /*OrZero*/ true, Depth, Q) ||
-         isKnownToBeAPowerOfTwo(I->getOperand(0), /*OrZero*/ true, Depth, Q)))
+        (isKnownToBeAPowerOfTwo(I->getOperand(1), /*OrZero*/ true, Q, Depth) ||
+         isKnownToBeAPowerOfTwo(I->getOperand(0), /*OrZero*/ true, Q, Depth)))
       return true;
     // X & (-X) is always a power of two or zero.
     if (match(I->getOperand(0), m_Neg(m_Specific(I->getOperand(1)))) ||
@@ -2507,19 +2511,19 @@ bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
         Q.IIQ.hasNoSignedWrap(VOBO)) {
       if (match(I->getOperand(0),
                 m_c_And(m_Specific(I->getOperand(1)), m_Value())) &&
-          isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q))
+          isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Q, Depth))
         return true;
       if (match(I->getOperand(1),
                 m_c_And(m_Specific(I->getOperand(0)), m_Value())) &&
-          isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q))
+          isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Q, Depth))
         return true;
 
       unsigned BitWidth = V->getType()->getScalarSizeInBits();
       KnownBits LHSBits(BitWidth);
-      computeKnownBits(I->getOperand(0), LHSBits, Depth, Q);
+      computeKnownBits(I->getOperand(0), LHSBits, Q, Depth);
 
       KnownBits RHSBits(BitWidth);
-      computeKnownBits(I->getOperand(1), RHSBits, Depth, Q);
+      computeKnownBits(I->getOperand(1), RHSBits, Q, Depth);
       // If i8 V is a power of two or zero:
       //  ZeroBits: 1 1 1 0 1 1 1 1
       // ~ZeroBits: 0 0 0 1 0 0 0 0
@@ -2537,8 +2541,8 @@ bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
     return false;
   }
   case Instruction::Select:
-    return isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q) &&
-           isKnownToBeAPowerOfTwo(I->getOperand(2), OrZero, Depth, Q);
+    return isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Q, Depth) &&
+           isKnownToBeAPowerOfTwo(I->getOperand(2), OrZero, Q, Depth);
   case Instruction::PHI: {
     // A PHI node is power of two if all incoming values are power of two, or if
     // it is an induction variable where in each step its value is a power of
@@ -2547,7 +2551,7 @@ bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
     SimplifyQuery RecQ = Q.getWithoutCondContext();
 
     // Check if it is an induction variable and always power of two.
-    if (isPowerOfTwoRecurrence(PN, OrZero, Depth, RecQ))
+    if (isPowerOfTwoRecurrence(PN, OrZero, RecQ, Depth))
       return true;
 
     // Recursively check all incoming values. Limit recursion to 2 levels, so
@@ -2561,7 +2565,7 @@ bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
       // Change the context instruction to the incoming block where it is
       // evaluated.
       RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
-      return isKnownToBeAPowerOfTwo(U.get(), OrZero, NewDepth, RecQ);
+      return isKnownToBeAPowerOfTwo(U.get(), OrZero, RecQ, NewDepth);
     });
   }
   case Instruction::Invoke:
@@ -2572,18 +2576,18 @@ bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
       case Intrinsic::smax:
       case Intrinsic::umin:
       case Intrinsic::smin:
-        return isKnownToBeAPowerOfTwo(II->getArgOperand(1), OrZero, Depth, Q) &&
-               isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q);
+        return isKnownToBeAPowerOfTwo(II->getArgOperand(1), OrZero, Q, Depth) &&
+               isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Q, Depth);
       // bswap/bitreverse just move around bits, but don't change any 1s/0s
       // thus dont change pow2/non-pow2 status.
       case Intrinsic::bitreverse:
       case Intrinsic::bswap:
-        return isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q);
+        return isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Q, Depth);
       case Intrinsic::fshr:
       case Intrinsic::fshl:
         // If Op0 == Op1, this is a rotate. is_pow2(rotate(x, y)) == is_pow2(x)
         if (II->getArgOperand(0) == II->getArgOperand(1))
-          return isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q);
+          return isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Q, Depth);
         break;
       default:
         break;
@@ -2602,8 +2606,8 @@ bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
 /// to be non-null.
 ///
 /// Currently this routine does not support vector GEPs.
-static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
-                              const SimplifyQuery &Q) {
+static bool isGEPKnownNonNull(const GEPOperator *GEP, const SimplifyQuery &Q,
+                              unsigned Depth) {
   const Function *F = nullptr;
   if (const Instruction *I = dyn_cast<Instruction>(GEP))
     F = I->getFunction();
@@ -2817,9 +2821,9 @@ static bool matchOpWithOpEqZero(Value *Op0, Value *Op1) {
                                                 m_Specific(Op0), m_Zero())));
 }
 
-static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth,
-                         const SimplifyQuery &Q, unsigned BitWidth, Value *X,
-                         Value *Y, bool NSW, bool NUW) {
+static bool isNonZeroAdd(const APInt &DemandedElts, const SimplifyQuery &Q,
+                         unsigned BitWidth, Value *X, Value *Y, bool NSW,
+                         bool NUW, unsigned Depth) {
   // (X + (X != 0)) is non zero
   if (matchOpWithOpEqZero(X, Y))
     return true;
@@ -2828,8 +2832,8 @@ static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth,
     return isKnownNonZero(Y, DemandedElts, Q, Depth) ||
            isKnownNonZero(X, DemandedElts, Q, Depth);
 
-  KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
-  KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
+  KnownBits XKnown = computeKnownBits(X, DemandedElts, Q, Depth);
+  KnownBits YKnown = computeKnownBits(Y, DemandedElts, Q, Depth);
 
   // If X and Y are both non-negative (as signed values) then their sum is not
   // zero unless both X and Y are zero.
@@ -2854,18 +2858,18 @@ static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth,
 
   // The sum of a non-negative number and a power of two is not zero.
   if (XKnown.isNonNegative() &&
-      isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
+      isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Q, Depth))
     return true;
   if (YKnown.isNonNegative() &&
-      isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
+      isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Q, Depth))
     return true;
 
   return KnownBits::add(XKnown, YKnown, NSW, NUW).isNonZero();
 }
 
-static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth,
-                         const SimplifyQuery &Q, unsigned BitWidth, Value *X,
-                         Value *Y) {
+static bool isNonZeroSub(const APInt &DemandedElts, const SimplifyQuery &Q,
+                         unsigned BitWidth, Value *X, Value *Y,
+                         unsigned Depth) {
   // (X - (X != 0)) is non zero
   // ((X != 0) - X) is non zero
   if (matchOpWithOpEqZero(X, Y))
@@ -2876,12 +2880,12 @@ static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth,
     if (C->isNullValue() && isKnownNonZero(Y, DemandedElts, Q, Depth))
       return true;
 
-  return ::isKnownNonEqual(X, Y, DemandedElts, Depth, Q);
+  return ::isKnownNonEqual(X, Y, DemandedElts, Q, Depth);
 }
 
-static bool isNonZeroMul(const APInt &DemandedElts, unsigned Depth,
-                         const SimplifyQuery &Q, unsigned BitWidth, Value *X,
-                         Value *Y, bool NSW, bool NUW) {
+static bool isNonZeroMul(const APInt &DemandedElts, const SimplifyQuery &Q,
+                         unsigned BitWidth, Value *X, Value *Y, bool NSW,
+                         bool NUW, unsigned Depth) {
   // If X and Y are non-zero then so is X * Y as long as the multiplication
   // does not overflow.
   if (NSW || NUW)
@@ -2890,11 +2894,11 @@ static bool isNonZeroMul(const APInt &DemandedElts, unsigned Depth,
 
   // If either X or Y is odd, then if the other is non-zero the result can't
   // be zero.
-  KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
+  KnownBits XKnown = computeKnownBits(X, DemandedElts, Q, Depth);
   if (XKnown.One[0])
     return isKnownNonZero(Y, DemandedElts, Q, Depth);
 
-  KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
+  KnownBits YKnown = computeKnownBits(Y, DemandedElts, Q, Depth);
   if (YKnown.One[0])
     return XKnown.isNonZero() || isKnownNonZero(X, DemandedElts, Q, Depth);
 
@@ -2908,8 +2912,8 @@ static bool isNonZeroMul(const APInt &DemandedElts, unsigned Depth,
 }
 
 static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts,
-                           unsigned Depth, const SimplifyQuery &Q,
-                           const KnownBits &KnownVal) {
+                           const SimplifyQuery &Q, const KnownBits &KnownVal,
+                           unsigned Depth) {
   auto ShiftOp = [&](const APInt &Lhs, const APInt &Rhs) {
     switch (I->getOpcode()) {
     case Instruction::Shl:
@@ -2939,7 +2943,7 @@ static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts,
     return false;
 
   KnownBits KnownCnt =
-      computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q);
+      computeKnownBits(I->getOperand(1), DemandedElts, Q, Depth);
   APInt MaxShift = KnownCnt.getMaxValue();
   unsigned NumBits = KnownVal.getBitWidth();
   if (MaxShift.uge(NumBits))
@@ -2960,7 +2964,7 @@ static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts,
 
 static bool isKnownNonZeroFromOperator(const Operator *I,
                                        const APInt &DemandedElts,
-                                       unsigned Depth, const SimplifyQuery &Q) {
+                                       const SimplifyQuery &Q, unsigned Depth) {
   unsigned BitWidth = getBitWidth(I->getType()->getScalarType(), Q.DL);
   switch (I->getOpcode()) {
   case Instruction::Alloca:
@@ -2968,7 +2972,7 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
     return I->getType()->getPointerAddressSpace() == 0;
   case Instruction::GetElementPtr:
     if (I->getType()->isPointerTy())
-      return isGEPKnownNonNull(cast<GEPOperator>(I), Depth, Q);
+      return isGEPKnownNonNull(cast<GEPOperator>(I), Q, Depth);
     break;
   case Instruction::BitCast: {
     // We need to be a bit careful here. We can only peek through the bitcast
@@ -3028,8 +3032,8 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
     break;
 
   case Instruction::Sub:
-    return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, I->getOperand(0),
-                        I->getOperand(1));
+    return isNonZeroSub(DemandedElts, Q, BitWidth, I->getOperand(0),
+                        I->getOperand(1), Depth);
   case Instruction::Xor:
     // (X ^ (X != 0)) is non zero
     if (matchOpWithOpEqZero(I->getOperand(0), I->getOperand(1)))
@@ -3056,11 +3060,11 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
     // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
     // if the lowest bit is shifted off the end.
     KnownBits Known(BitWidth);
-    computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth, Q);
+    computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth);
     if (Known.One[0])
       return true;
 
-    return isNonZeroShift(I, DemandedElts, Depth, Q, Known);
+    return isNonZeroShift(I, DemandedElts, Q, Known, Depth);
   }
   case Instruction::LShr:
   case Instruction::AShr: {
@@ -3072,11 +3076,11 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
     // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
     // defined if the sign bit is shifted off the end.
     KnownBits Known =
-        computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Q, Depth);
     if (Known.isNegative())
       return true;
 
-    return isNonZeroShift(I, DemandedElts, Depth, Q, Known);
+    return isNonZeroShift(I, DemandedElts, Q, Known, Depth);
   }
   case Instruction::UDiv:
   case Instruction::SDiv: {
@@ -3086,14 +3090,14 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
       return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
 
     KnownBits XKnown =
-        computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
+        computeKnownBits(I->getOperand(0), DemandedElts, Q, Depth);
     // If X is fully unknown we won't be able to figure anything out so don't
     // both computing knownbits for Y.
     if (XKnown.isUnknown())
       return false;
 
     KnownBits YKnown =
-        computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q);
+        computeKnownBits(I->getOperand(1), DemandedElts, Q, Depth);
     if (I->getOpcode() == Instruction::SDiv) {
       // For signed division need to compare abs value of the operands.
       XKnown = XKnown.abs(/*IntMinIsPoison*/ false);
@@ -3111,15 +3115,15 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
     // If Add has nuw wrap flag, then if either X or Y is non-zero the result is
     // non-zero.
     auto *BO = cast<OverflowingBinaryOperator>(I);
-    return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, I->getOperand(0),
+    return isNonZeroAdd(DemandedElts, Q, BitWidth, I->getOperand(0),
                         I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO),
-                        Q.IIQ.hasNoUnsignedWrap(BO));
+                        Q.IIQ.hasNoUnsignedWrap(BO), Depth);
   }
   case Instruction::Mul: {
     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(I);
-    return isNonZeroMul(DemandedElts, Depth, Q, BitWidth, I->getOperand(0),
+    return isNonZeroMul(DemandedElts, Q, BitWidth, I->getOperand(0),
                         I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO),
-                        Q.IIQ.hasNoUnsignedWrap(BO));
+                        Q.IIQ.hasNoUnsignedWrap(BO), Depth);
   }
   case Instruction::Select: {
     // (C ? X : Y) != 0 if X != 0 and Y != 0.
@@ -3265,17 +3269,17 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
       default:
         break;
       case Instruction::Add:
-        return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth,
-                            WO->getArgOperand(0), WO->getArgOperand(1),
+        return isNonZeroAdd(DemandedElts, Q, BitWidth, WO->getArgOperand(0),
+                            WO->getArgOperand(1),
                             /*NSW=*/false,
-                            /*NUW=*/false);
+                            /*NUW=*/false, Depth);
       case Instruction::Sub:
-        return isNonZeroSub(DemandedElts, Depth, Q, BitWidth,
-                            WO->getArgOperand(0), WO->getArgOperand(1));
+        return isNonZeroSub(DemandedElts, Q, BitWidth, WO->getArgOperand(0),
+                            WO->getArgOperand(1), Depth);
       case Instruction::Mul:
-        return isNonZeroMul(DemandedElts, Depth, Q, BitWidth,
-                            WO->getArgOperand(0), WO->getArgOperand(1),
-                            /*NSW=*/false, /*NUW=*/false);
+        return isNonZeroMul(DemandedElts, Q, BitWidth, WO->getArgOperand(0),
+                            WO->getArgOperand(1),
+                            /*NSW=*/false, /*NUW=*/false, Depth);
         break;
       }
     }
@@ -3314,12 +3318,12 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
         // NB: We don't do usub_sat here as in any case we can prove its
         // non-zero, we will fold it to `sub nuw` in InstCombine.
       case Intrinsic::ssub_sat:
-        return isNonZeroSub(DemandedElts, Depth, Q, BitWidth,
-                            II->getArgOperand(0), II->getArgOperand(1));
+        return isNonZeroSub(DemandedElts, Q, BitWidth, II->getArgOperand(0),
+                            II->getArgOperand(1), Depth);
       case Intrinsic::sadd_sat:
-        return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth,
-                            II->getArgOperand(0), II->getArgOperand(1),
-                            /*NSW=*/true, /* NUW=*/false);
+        return isNonZeroAdd(DemandedElts, Q, BitWidth, II->getArgOperand(0),
+                            II->getArgOperand(1),
+                            /*NSW=*/true, /* NUW=*/false, Depth);
         // Vec reverse preserves zero/non-zero status from input vec.
       case Intrinsic::vector_reverse:
         return isKnownNonZero(II->getArgOperand(0), DemandedElts.reverseBits(),
@@ -3353,12 +3357,12 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
         // Avoid re-computing isKnownNonZero.
         std::optional<bool> Op0NonZero, Op1NonZero;
         KnownBits Op1Known =
-            computeKnownBits(II->getArgOperand(1), DemandedElts, Depth, Q);
+            computeKnownBits(II->getArgOperand(1), DemandedElts, Q, Depth);
         if (Op1Known.isNonNegative() &&
             IsNonZero(II->getArgOperand(1), Op1NonZero, Op1Known))
           return true;
         KnownBits Op0Known =
-            computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q);
+            computeKnownBits(II->getArgOperand(0), DemandedElts, Q, Depth);
         if (Op0Known.isNonNegative() &&
             IsNonZero(II->getArgOperand(0), Op0NonZero, Op0Known))
           return true;
@@ -3369,11 +3373,11 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
         // If either arg is negative the result is non-zero. Otherwise
         // the result is non-zero if both ops are non-zero.
         KnownBits Op1Known =
-            computeKnownBits(II->getArgOperand(1), DemandedElts, Depth, Q);
+            computeKnownBits(II->getArgOperand(1), DemandedElts, Q, Depth);
         if (Op1Known.isNegative())
           return true;
         KnownBits Op0Known =
-            computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q);
+            computeKnownBits(II->getArgOperand(0), DemandedElts, Q, Depth);
         if (Op0Known.isNegative())
           return true;
 
@@ -3385,10 +3389,10 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
         return isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth) &&
                isKnownNonZero(II->getArgOperand(1), DemandedElts, Q, Depth);
       case Intrinsic::cttz:
-        return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q)
+        return computeKnownBits(II->getArgOperand(0), DemandedElts, Q, Depth)
             .Zero[0];
       case Intrinsic::ctlz:
-        return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q)
+        return computeKnownBits(II->getArgOperand(0), DemandedElts, Q, Depth)
             .isNonNegative();
       case Intrinsic::fshr:
       case Intrinsic::fshl:
@@ -3411,7 +3415,7 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
   }
 
   KnownBits Known(BitWidth);
-  computeKnownBits(I, DemandedElts, Known, Depth, Q);
+  computeKnownBits(I, DemandedElts, Known, Q, Depth);
   return Known.One != 0;
 }
 
@@ -3506,7 +3510,7 @@ bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
   }
 
   if (const auto *I = dyn_cast<Operator>(V))
-    if (isKnownNonZeroFromOperator(I, DemandedElts, Depth, Q))
+    if (isKnownNonZeroFromOperator(I, DemandedElts, Q, Depth))
       return true;
 
   if (!isa<Constant>(V) &&
@@ -3648,8 +3652,8 @@ getInvertibleOperands(const Operator *Op1,
 /// Only handle a small subset of binops where (binop V2, X) with non-zero X
 /// implies V2 != V1.
 static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2,
-                                      const APInt &DemandedElts, unsigned Depth,
-                                      const SimplifyQuery &Q) {
+                                      const APInt &DemandedElts,
+                                      const SimplifyQuery &Q, unsigned Depth) {
   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
   if (!BO)
     return false;
@@ -3677,8 +3681,8 @@ static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2,
 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
 /// the multiplication is nuw or nsw.
 static bool isNonEqualMul(const Value *V1, const Value *V2,
-                          const APInt &DemandedElts, unsigned Depth,
-                          const SimplifyQuery &Q) {
+                          const APInt &DemandedElts, const SimplifyQuery &Q,
+                          unsigned Depth) {
   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
     const APInt *C;
     return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
@@ -3692,8 +3696,8 @@ static bool isNonEqualMul(const Value *V1, const Value *V2,
 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
 /// the shift is nuw or nsw.
 static bool isNonEqualShl(const Value *V1, const Value *V2,
-                          const APInt &DemandedElts, unsigned Depth,
-                          const SimplifyQuery &Q) {
+                          const APInt &DemandedElts, const SimplifyQuery &Q,
+                          unsigned Depth) {
   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
     const APInt *C;
     return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
@@ -3704,8 +3708,8 @@ static bool isNonEqualShl(const Value *V1, const Value *V2,
 }
 
 static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
-                           const APInt &DemandedElts, unsigned Depth,
-                           const SimplifyQuery &Q) {
+                           const APInt &DemandedElts, const SimplifyQuery &Q,
+                           unsigned Depth) {
   // Check two PHIs are in same block.
   if (PN1->getParent() != PN2->getParent())
     return false;
@@ -3727,7 +3731,7 @@ static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
 
     SimplifyQuery RecQ = Q.getWithoutCondContext();
     RecQ.CxtI = IncomBB->getTerminator();
-    if (!isKnownNonEqual(IV1, IV2, DemandedElts, Depth + 1, RecQ))
+    if (!isKnownNonEqual(IV1, IV2, DemandedElts, RecQ, Depth + 1))
       return false;
     UsedFullRecursion = true;
   }
@@ -3735,8 +3739,8 @@ static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
 }
 
 static bool isNonEqualSelect(const Value *V1, const Value *V2,
-                             const APInt &DemandedElts, unsigned Depth,
-                             const SimplifyQuery &Q) {
+                             const APInt &DemandedElts, const SimplifyQuery &Q,
+                             unsigned Depth) {
   const SelectInst *SI1 = dyn_cast<SelectInst>(V1);
   if (!SI1)
     return false;
@@ -3746,12 +3750,12 @@ static bool isNonEqualSelect(const Value *V1, const Value *V2,
     const Value *Cond2 = SI2->getCondition();
     if (Cond1 == Cond2)
       return isKnownNonEqual(SI1->getTrueValue(), SI2->getTrueValue(),
-                             DemandedElts, Depth + 1, Q) &&
+                             DemandedElts, Q, Depth + 1) &&
              isKnownNonEqual(SI1->getFalseValue(), SI2->getFalseValue(),
-                             DemandedElts, Depth + 1, Q);
+                             DemandedElts, Q, Depth + 1);
   }
-  return isKnownNonEqual(SI1->getTrueValue(), V2, DemandedElts, Depth + 1, Q) &&
-         isKnownNonEqual(SI1->getFalseValue(), V2, DemandedElts, Depth + 1, Q);
+  return isKnownNonEqual(SI1->getTrueValue(), V2, DemandedElts, Q, Depth + 1) &&
+         isKnownNonEqual(SI1->getFalseValue(), V2, DemandedElts, Q, Depth + 1);
 }
 
 // Check to see if A is both a GEP and is the incoming value for a PHI in the
@@ -3807,7 +3811,7 @@ static bool isNonEqualPointersWithRecursiveGEP(const Value *A, const Value *B,
 }
 
 static bool isKnownNonEqualFromContext(const Value *V1, const Value *V2,
-                                       unsigned Depth, const SimplifyQuery &Q) {
+                                       const SimplifyQuery &Q, unsigned Depth) {
   if (!Q.CxtI)
     return false;
 
@@ -3865,8 +3869,8 @@ static bool isKnownNonEqualFromContext(const Value *V1, const Value *V2,
 
 /// Return true if it is known that V1 != V2.
 static bool isKnownNonEqual(const Value *V1, const Value *V2,
-                            const APInt &DemandedElts, unsigned Depth,
-                            const SimplifyQuery &Q) {
+                            const APInt &DemandedElts, const SimplifyQuery &Q,
+                            unsigned Depth) {
   if (V1 == V2)
     return false;
   if (V1->getType() != V2->getType())
@@ -3883,44 +3887,44 @@ static bool isKnownNonEqual(const Value *V1, const Value *V2,
   auto *O2 = dyn_cast<Operator>(V2);
   if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
     if (auto Values = getInvertibleOperands(O1, O2))
-      return isKnownNonEqual(Values->first, Values->second, DemandedElts,
-                             Depth + 1, Q);
+      return isKnownNonEqual(Values->first, Values->second, DemandedElts, Q,
+                             Depth + 1);
 
     if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
       const PHINode *PN2 = cast<PHINode>(V2);
       // FIXME: This is missing a generalization to handle the case where one is
       // a PHI and another one isn't.
-      if (isNonEqualPHIs(PN1, PN2, DemandedElts, Depth, Q))
+      if (isNonEqualPHIs(PN1, PN2, DemandedElts, Q, Depth))
         return true;
     };
   }
 
-  if (isModifyingBinopOfNonZero(V1, V2, DemandedElts, Depth, Q) ||
-      isModifyingBinopOfNonZero(V2, V1, DemandedElts, Depth, Q))
+  if (isModifyingBinopOfNonZero(V1, V2, DemandedElts, Q, Depth) ||
+      isModifyingBinopOfNonZero(V2, V1, DemandedElts, Q, Depth))
     return true;
 
-  if (isNonEqualMul(V1, V2, DemandedElts, Depth, Q) ||
-      isNonEqualMul(V2, V1, DemandedElts, Depth, Q))
+  if (isNonEqualMul(V1, V2, DemandedElts, Q, Depth) ||
+      isNonEqualMul(V2, V1, DemandedElts, Q, Depth))
     return true;
 
-  if (isNonEqualShl(V1, V2, DemandedElts, Depth, Q) ||
-      isNonEqualShl(V2, V1, DemandedElts, Depth, Q))
+  if (isNonEqualShl(V1, V2, DemandedElts, Q, Depth) ||
+      isNonEqualShl(V2, V1, DemandedElts, Q, Depth))
     return true;
 
   if (V1->getType()->isIntOrIntVectorTy()) {
     // Are any known bits in V1 contradictory to known bits in V2? If V1
     // has a known zero where V2 has a known one, they must not be equal.
-    KnownBits Known1 = computeKnownBits(V1, DemandedElts, Depth, Q);
+    KnownBits Known1 = computeKnownBits(V1, DemandedElts, Q, Depth);
     if (!Known1.isUnknown()) {
-      KnownBits Known2 = computeKnownBits(V2, DemandedElts, Depth, Q);
+      KnownBits Known2 = computeKnownBits(V2, DemandedElts, Q, Depth);
       if (Known1.Zero.intersects(Known2.One) ||
           Known2.Zero.intersects(Known1.One))
         return true;
     }
   }
 
-  if (isNonEqualSelect(V1, V2, DemandedElts, Depth, Q) ||
-      isNonEqualSelect(V2, V1, DemandedElts, Depth, Q))
+  if (isNonEqualSelect(V1, V2, DemandedElts, Q, Depth) ||
+      isNonEqualSelect(V2, V1, DemandedElts, Q, Depth))
     return true;
 
   if (isNonEqualPointersWithRecursiveGEP(V1, V2, Q) ||
@@ -3932,9 +3936,9 @@ static bool isKnownNonEqual(const Value *V1, const Value *V2,
   // Check PtrToInt type matches the pointer size.
   if (match(V1, m_PtrToIntSameSize(Q.DL, m_Value(A))) &&
       match(V2, m_PtrToIntSameSize(Q.DL, m_Value(B))))
-    return isKnownNonEqual(A, B, DemandedElts, Depth + 1, Q);
+    return isKnownNonEqual(A, B, DemandedElts, Q, Depth + 1);
 
-  if (isKnownNonEqualFromContext(V1, V2, Depth, Q))
+  if (isKnownNonEqualFromContext(V1, V2, Q, Depth))
     return true;
 
   return false;
@@ -3969,11 +3973,11 @@ static unsigned computeNumSignBitsVectorConstant(const Value *V,
 
 static unsigned ComputeNumSignBitsImpl(const Value *V,
                                        const APInt &DemandedElts,
-                                       unsigned Depth, const SimplifyQuery &Q);
+                                       const SimplifyQuery &Q, unsigned Depth);
 
 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
-                                   unsigned Depth, const SimplifyQuery &Q) {
-  unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
+                                   const SimplifyQuery &Q, unsigned Depth) {
+  unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Q, Depth);
   assert(Result > 0 && "At least one sign bit needs to be present!");
   return Result;
 }
@@ -3987,7 +3991,7 @@ static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
 /// elements in the vector specified by DemandedElts.
 static unsigned ComputeNumSignBitsImpl(const Value *V,
                                        const APInt &DemandedElts,
-                                       unsigned Depth, const SimplifyQuery &Q) {
+                                       const SimplifyQuery &Q, unsigned Depth) {
   Type *Ty = V->getType();
 #ifndef NDEBUG
   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
@@ -4042,7 +4046,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
       if (isa<FixedVectorType>(Ty)) {
         // Fast case - sign splat can be simply split across the small elements.
         // This works for both vector and scalar sources
-        Tmp = ComputeNumSignBits(Src, Depth + 1, Q);
+        Tmp = ComputeNumSignBits(Src, Q, Depth + 1);
         if (Tmp == SrcBits)
           return TyBits;
       }
@@ -4050,7 +4054,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
     }
     case Instruction::SExt:
       Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
-      return ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q) +
+      return ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1) +
              Tmp;
 
     case Instruction::SDiv: {
@@ -4064,7 +4068,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
 
         // Calculate the incoming numerator bits.
         unsigned NumBits =
-            ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
+            ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
 
         // Add floor(log(C)) bits to the numerator bits.
         return std::min(TyBits, NumBits + Denominator->logBase2());
@@ -4073,7 +4077,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
     }
 
     case Instruction::SRem: {
-      Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
+      Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
 
       const APInt *Denominator;
       // srem X, C -> we know that the result is within [-C+1,C) when C is a
@@ -4104,7 +4108,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
     }
 
     case Instruction::AShr: {
-      Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
+      Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
       // ashr X, C   -> adds C sign bits.  Vectors too.
       const APInt *ShAmt;
       if (match(U->getOperand(1), m_APInt(ShAmt))) {
@@ -4127,11 +4131,11 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
         // all extended bits are shifted out.
         if (match(U->getOperand(0), m_ZExt(m_Value(X))) &&
             ShAmt->uge(TyBits - X->getType()->getScalarSizeInBits())) {
-          Tmp = ComputeNumSignBits(X, DemandedElts, Depth + 1, Q);
+          Tmp = ComputeNumSignBits(X, DemandedElts, Q, Depth + 1);
           Tmp += TyBits - X->getType()->getScalarSizeInBits();
         } else
           Tmp =
-              ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
+              ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
         if (ShAmt->uge(Tmp))
           break; // Shifted all sign bits out.
         Tmp2 = ShAmt->getZExtValue();
@@ -4143,9 +4147,9 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
     case Instruction::Or:
     case Instruction::Xor: // NOT is handled here.
       // Logical binary ops preserve the number of sign bits at the worst.
-      Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
+      Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
       if (Tmp != 1) {
-        Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q);
+        Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1);
         FirstAnswer = std::min(Tmp, Tmp2);
         // We computed what we know about the sign bits as our first
         // answer. Now proceed to the generic code that uses
@@ -4161,24 +4165,24 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
       if (isSignedMinMaxClamp(U, X, CLow, CHigh))
         return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
 
-      Tmp = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q);
+      Tmp = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1);
       if (Tmp == 1)
         break;
-      Tmp2 = ComputeNumSignBits(U->getOperand(2), DemandedElts, Depth + 1, Q);
+      Tmp2 = ComputeNumSignBits(U->getOperand(2), DemandedElts, Q, Depth + 1);
       return std::min(Tmp, Tmp2);
     }
 
     case Instruction::Add:
       // Add can have at most one carry bit.  Thus we know that the output
       // is, at worst, one more bit than the inputs.
-      Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
+      Tmp = ComputeNumSignBits(U->getOperand(0), Q, Depth + 1);
       if (Tmp == 1) break;
 
       // Special case decrementing a value (ADD X, -1):
       if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
         if (CRHS->isAllOnesValue()) {
           KnownBits Known(TyBits);
-          computeKnownBits(U->getOperand(0), DemandedElts, Known, Depth + 1, Q);
+          computeKnownBits(U->getOperand(0), DemandedElts, Known, Q, Depth + 1);
 
           // If the input is known to be 0 or 1, the output is 0/-1, which is
           // all sign bits set.
@@ -4191,13 +4195,13 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
             return Tmp;
         }
 
-      Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q);
+      Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1);
       if (Tmp2 == 1)
         break;
       return std::min(Tmp, Tmp2) - 1;
 
     case Instruction::Sub:
-      Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q);
+      Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1);
       if (Tmp2 == 1)
         break;
 
@@ -4205,7 +4209,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
       if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
         if (CLHS->isNullValue()) {
           KnownBits Known(TyBits);
-          computeKnownBits(U->getOperand(1), DemandedElts, Known, Depth + 1, Q);
+          computeKnownBits(U->getOperand(1), DemandedElts, Known, Q, Depth + 1);
           // If the input is known to be 0 or 1, the output is 0/-1, which is
           // all sign bits set.
           if ((Known.Zero | 1).isAllOnes())
@@ -4222,7 +4226,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
 
       // Sub can have at most one carry bit.  Thus we know that the output
       // is, at worst, one more bit than the inputs.
-      Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
+      Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
       if (Tmp == 1)
         break;
       return std::min(Tmp, Tmp2) - 1;
@@ -4231,11 +4235,11 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
       // The output of the Mul can be at most twice the valid bits in the
       // inputs.
       unsigned SignBitsOp0 =
-          ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
+          ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
       if (SignBitsOp0 == 1)
         break;
       unsigned SignBitsOp1 =
-          ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q);
+          ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1);
       if (SignBitsOp1 == 1)
         break;
       unsigned OutValidBits =
@@ -4259,7 +4263,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
         if (Tmp == 1) return Tmp;
         RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
         Tmp = std::min(Tmp, ComputeNumSignBits(PN->getIncomingValue(i),
-                                               DemandedElts, Depth + 1, RecQ));
+                                               DemandedElts, RecQ, Depth + 1));
       }
       return Tmp;
     }
@@ -4268,7 +4272,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
       // If the input contained enough sign bits that some remain after the
       // truncation, then we can make use of that. Otherwise we don't know
       // anything.
-      Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
+      Tmp = ComputeNumSignBits(U->getOperand(0), Q, Depth + 1);
       unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits();
       if (Tmp > (OperandTyBits - TyBits))
         return Tmp - (OperandTyBits - TyBits);
@@ -4281,7 +4285,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
       // skip tracking the specific element. But at least we might find
       // information valid for all elements of the vector (for example if vector
       // is sign extended, shifted, etc).
-      return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
+      return ComputeNumSignBits(U->getOperand(0), Q, Depth + 1);
 
     case Instruction::ShuffleVector: {
       // Collect the minimum number of sign bits that are shared by every vector
@@ -4299,7 +4303,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
       Tmp = std::numeric_limits<unsigned>::max();
       if (!!DemandedLHS) {
         const Value *LHS = Shuf->getOperand(0);
-        Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
+        Tmp = ComputeNumSignBits(LHS, DemandedLHS, Q, Depth + 1);
       }
       // If we don't know anything, early out and try computeKnownBits
       // fall-back.
@@ -4307,7 +4311,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
         break;
       if (!!DemandedRHS) {
         const Value *RHS = Shuf->getOperand(1);
-        Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
+        Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Q, Depth + 1);
         Tmp = std::min(Tmp, Tmp2);
       }
       // If we don't know anything, early out and try computeKnownBits
@@ -4324,7 +4328,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
           break;
         case Intrinsic::abs:
           Tmp =
-              ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
+              ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
           if (Tmp == 1)
             break;
 
@@ -4352,7 +4356,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V,
     return VecSignBits;
 
   KnownBits Known(TyBits);
-  computeKnownBits(V, DemandedElts, Known, Depth, Q);
+  computeKnownBits(V, DemandedElts, Known, Q, Depth);
 
   // If we know that the sign bit is either zero or one, determine the number of
   // identical bits in the top of the input value.
@@ -4546,22 +4550,23 @@ bool llvm::isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
 }
 
 static void computeKnownFPClassFromCond(const Value *V, Value *Cond,
-                                        unsigned Depth, bool CondIsTrue,
+                                        bool CondIsTrue,
                                         const Instruction *CxtI,
-                                        KnownFPClass &KnownFromContext) {
+                                        KnownFPClass &KnownFromContext,
+                                        unsigned Depth = 0) {
   Value *A, *B;
   if (Depth < MaxAnalysisRecursionDepth &&
       (CondIsTrue ? match(Cond, m_LogicalAnd(m_Value(A), m_Value(B)))
                   : match(Cond, m_LogicalOr(m_Value(A), m_Value(B))))) {
-    computeKnownFPClassFromCond(V, A, Depth + 1, CondIsTrue, CxtI,
-                                KnownFromContext);
-    computeKnownFPClassFromCond(V, B, Depth + 1, CondIsTrue, CxtI,
-                                KnownFromContext);
+    computeKnownFPClassFromCond(V, A, CondIsTrue, CxtI, KnownFromContext,
+                                Depth + 1);
+    computeKnownFPClassFromCond(V, B, CondIsTrue, CxtI, KnownFromContext,
+                                Depth + 1);
     return;
   }
   if (Depth < MaxAnalysisRecursionDepth && match(Cond, m_Not(m_Value(A)))) {
-    computeKnownFPClassFromCond(V, A, Depth + 1, !CondIsTrue, CxtI,
-                                KnownFromContext);
+    computeKnownFPClassFromCond(V, A, !CondIsTrue, CxtI, KnownFromContext,
+                                Depth + 1);
     return;
   }
   CmpPredicate Pred;
@@ -4604,13 +4609,13 @@ static KnownFPClass computeKnownFPClassFromContext(const Value *V,
 
       BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0));
       if (Q.DT->dominates(Edge0, Q.CxtI->getParent()))
-        computeKnownFPClassFromCond(V, Cond, /*Depth=*/0, /*CondIsTrue=*/true,
-                                    Q.CxtI, KnownFromContext);
+        computeKnownFPClassFromCond(V, Cond, /*CondIsTrue=*/true, Q.CxtI,
+                                    KnownFromContext);
 
       BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1));
       if (Q.DT->dominates(Edge1, Q.CxtI->getParent()))
-        computeKnownFPClassFromCond(V, Cond, /*Depth=*/0, /*CondIsTrue=*/false,
-                                    Q.CxtI, KnownFromContext);
+        computeKnownFPClassFromCond(V, Cond, /*CondIsTrue=*/false, Q.CxtI,
+                                    KnownFromContext);
     }
   }
 
@@ -4632,7 +4637,7 @@ static KnownFPClass computeKnownFPClassFromContext(const Value *V,
     if (!isValidAssumeForContext(I, Q.CxtI, Q.DT))
       continue;
 
-    computeKnownFPClassFromCond(V, I->getArgOperand(0), /*Depth=*/0,
+    computeKnownFPClassFromCond(V, I->getArgOperand(0),
                                 /*CondIsTrue=*/true, Q.CxtI, KnownFromContext);
   }
 
@@ -4641,29 +4646,30 @@ static KnownFPClass computeKnownFPClassFromContext(const Value *V,
 
 void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
                          FPClassTest InterestedClasses, KnownFPClass &Known,
-                         unsigned Depth, const SimplifyQuery &Q);
+                         const SimplifyQuery &Q, unsigned Depth);
 
 static void computeKnownFPClass(const Value *V, KnownFPClass &Known,
-                                FPClassTest InterestedClasses, unsigned Depth,
-                                const SimplifyQuery &Q) {
+                                FPClassTest InterestedClasses,
+                                const SimplifyQuery &Q, unsigned Depth) {
   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
   APInt DemandedElts =
       FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
-  computeKnownFPClass(V, DemandedElts, InterestedClasses, Known, Depth, Q);
+  computeKnownFPClass(V, DemandedElts, InterestedClasses, Known, Q, Depth);
 }
 
 static void computeKnownFPClassForFPTrunc(const Operator *Op,
                                           const APInt &DemandedElts,
                                           FPClassTest InterestedClasses,
-                                          KnownFPClass &Known, unsigned Depth,
-                                          const SimplifyQuery &Q) {
+                                          KnownFPClass &Known,
+                                          const SimplifyQuery &Q,
+                                          unsigned Depth) {
   if ((InterestedClasses &
        (KnownFPClass::OrderedLessThanZeroMask | fcNan)) == fcNone)
     return;
 
   KnownFPClass KnownSrc;
   computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
-                      KnownSrc, Depth + 1, Q);
+                      KnownSrc, Q, Depth + 1);
 
   // Sign should be preserved
   // TODO: Handle cannot be ordered greater than zero
@@ -4677,7 +4683,7 @@ static void computeKnownFPClassForFPTrunc(const Operator *Op,
 
 void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
                          FPClassTest InterestedClasses, KnownFPClass &Known,
-                         unsigned Depth, const SimplifyQuery &Q) {
+                         const SimplifyQuery &Q, unsigned Depth) {
   assert(Known.isUnknown() && "should not be called with known information");
 
   if (!DemandedElts) {
@@ -4787,7 +4793,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
   switch (Opc) {
   case Instruction::FNeg: {
     computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
-                        Known, Depth + 1, Q);
+                        Known, Q, Depth + 1);
     Known.fneg();
     break;
   }
@@ -4833,11 +4839,11 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
 
     KnownFPClass Known2;
     computeKnownFPClass(LHS, DemandedElts, InterestedClasses & FilterLHS, Known,
-                        Depth + 1, Q);
+                        Q, Depth + 1);
     Known.KnownFPClasses &= FilterLHS;
 
     computeKnownFPClass(RHS, DemandedElts, InterestedClasses & FilterRHS,
-                        Known2, Depth + 1, Q);
+                        Known2, Q, Depth + 1);
     Known2.KnownFPClasses &= FilterRHS;
 
     Known |= Known2;
@@ -4852,7 +4858,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
         // If we only care about the sign bit we don't need to inspect the
         // operand.
         computeKnownFPClass(II->getArgOperand(0), DemandedElts,
-                            InterestedClasses, Known, Depth + 1, Q);
+                            InterestedClasses, Known, Q, Depth + 1);
       }
 
       Known.fabs();
@@ -4862,9 +4868,9 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
       KnownFPClass KnownSign;
 
       computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
-                          Known, Depth + 1, Q);
+                          Known, Q, Depth + 1);
       computeKnownFPClass(II->getArgOperand(1), DemandedElts, InterestedClasses,
-                          KnownSign, Depth + 1, Q);
+                          KnownSign, Q, Depth + 1);
       Known.copysign(KnownSign);
       break;
     }
@@ -4882,7 +4888,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
       // x * x + y is non-negative if y is non-negative.
       KnownFPClass KnownAddend;
       computeKnownFPClass(II->getArgOperand(2), DemandedElts, InterestedClasses,
-                          KnownAddend, Depth + 1, Q);
+                          KnownAddend, Q, Depth + 1);
 
       if (KnownAddend.cannotBeOrderedLessThanZero())
         Known.knownNot(fcNegative);
@@ -4896,7 +4902,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
         InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask;
 
       computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
-                          KnownSrc, Depth + 1, Q);
+                          KnownSrc, Q, Depth + 1);
 
       if (KnownSrc.isKnownNeverPosInfinity())
         Known.knownNot(fcPosInf);
@@ -4928,7 +4934,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
       // Return NaN on infinite inputs.
       KnownFPClass KnownSrc;
       computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
-                          KnownSrc, Depth + 1, Q);
+                          KnownSrc, Q, Depth + 1);
       Known.knownNot(fcInf);
       if (KnownSrc.isKnownNeverNaN() && KnownSrc.isKnownNeverInfinity())
         Known.knownNot(fcNan);
@@ -4942,9 +4948,9 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
     case Intrinsic::maximumnum: {
       KnownFPClass KnownLHS, KnownRHS;
       computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
-                          KnownLHS, Depth + 1, Q);
+                          KnownLHS, Q, Depth + 1);
       computeKnownFPClass(II->getArgOperand(1), DemandedElts, InterestedClasses,
-                          KnownRHS, Depth + 1, Q);
+                          KnownRHS, Q, Depth + 1);
 
       bool NeverNaN = KnownLHS.isKnownNeverNaN() || KnownRHS.isKnownNeverNaN();
       Known = KnownLHS | KnownRHS;
@@ -5035,7 +5041,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
     case Intrinsic::canonicalize: {
       KnownFPClass KnownSrc;
       computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
-                          KnownSrc, Depth + 1, Q);
+                          KnownSrc, Q, Depth + 1);
 
       // This is essentially a stronger form of
       // propagateCanonicalizingSrc. Other "canonicalizing" operations don't
@@ -5086,7 +5092,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
       // reduce min/max will choose an element from one of the vector elements,
       // so we can infer and class information that is common to all elements.
       Known = computeKnownFPClass(II->getArgOperand(0), II->getFastMathFlags(),
-                                  InterestedClasses, Depth + 1, Q);
+                                  InterestedClasses, Q, Depth + 1);
       // Can only propagate sign if output is never NaN.
       if (!Known.isKnownNeverNaN())
         Known.SignBit.reset();
@@ -5096,7 +5102,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
     case Intrinsic::vector_reverse:
       Known = computeKnownFPClass(
           II->getArgOperand(0), DemandedElts.reverseBits(),
-          II->getFastMathFlags(), InterestedClasses, Depth + 1, Q);
+          II->getFastMathFlags(), InterestedClasses, Q, Depth + 1);
       break;
     case Intrinsic::trunc:
     case Intrinsic::floor:
@@ -5112,7 +5118,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
       if (InterestedSrcs & fcNegFinite)
         InterestedSrcs |= fcNegFinite;
       computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
-                          KnownSrc, Depth + 1, Q);
+                          KnownSrc, Q, Depth + 1);
 
       // Integer results cannot be subnormal.
       Known.knownNot(fcSubnormal);
@@ -5145,7 +5151,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
 
       KnownFPClass KnownSrc;
       computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
-                          KnownSrc, Depth + 1, Q);
+                          KnownSrc, Q, Depth + 1);
       if (KnownSrc.isKnownNeverNaN()) {
         Known.knownNot(fcNan);
         Known.signBitMustBeZero();
@@ -5155,7 +5161,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
     }
     case Intrinsic::fptrunc_round: {
       computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known,
-                                    Depth, Q);
+                                    Q, Depth);
       break;
     }
     case Intrinsic::log:
@@ -5179,7 +5185,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
 
       KnownFPClass KnownSrc;
       computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
-                          KnownSrc, Depth + 1, Q);
+                          KnownSrc, Q, Depth + 1);
 
       if (KnownSrc.isKnownNeverPosInfinity())
         Known.knownNot(fcPosInf);
@@ -5210,7 +5216,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
       unsigned BitWidth = ExpTy->getScalarType()->getIntegerBitWidth();
       KnownBits ExponentKnownBits(BitWidth);
       computeKnownBits(Exp, isa<VectorType>(ExpTy) ? DemandedElts : APInt(1, 1),
-                       ExponentKnownBits, Depth + 1, Q);
+                       ExponentKnownBits, Q, Depth + 1);
 
       if (ExponentKnownBits.Zero[0]) { // Is even
         Known.knownNot(fcNegative);
@@ -5227,7 +5233,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
       //   pow(-inf, exp) --> -inf if exp is positive odd.
       KnownFPClass KnownSrc;
       computeKnownFPClass(II->getArgOperand(0), DemandedElts, fcNegative,
-                          KnownSrc, Depth + 1, Q);
+                          KnownSrc, Q, Depth + 1);
       if (KnownSrc.isKnownNever(fcNegative))
         Known.knownNot(fcNegative);
       break;
@@ -5235,7 +5241,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
     case Intrinsic::ldexp: {
       KnownFPClass KnownSrc;
       computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
-                          KnownSrc, Depth + 1, Q);
+                          KnownSrc, Q, Depth + 1);
       Known.propagateNaN(KnownSrc, /*PropagateSign=*/true);
 
       // Sign is preserved, but underflows may produce zeroes.
@@ -5298,7 +5304,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
     }
     case Intrinsic::arithmetic_fence: {
       computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
-                          Known, Depth + 1, Q);
+                          Known, Q, Depth + 1);
       break;
     }
     case Intrinsic::experimental_constrained_sitofp:
@@ -5341,7 +5347,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
     if (InterestedClasses & fcNan)
       InterestedSrcs |= fcInf;
     computeKnownFPClass(Op->getOperand(1), DemandedElts, InterestedSrcs,
-                        KnownRHS, Depth + 1, Q);
+                        KnownRHS, Q, Depth + 1);
 
     if ((WantNaN && KnownRHS.isKnownNeverNaN()) ||
         (WantNegative && KnownRHS.cannotBeOrderedLessThanZero()) ||
@@ -5350,7 +5356,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
       // RHS is canonically cheaper to compute. Skip inspecting the LHS if
       // there's no point.
       computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedSrcs,
-                          KnownLHS, Depth + 1, Q);
+                          KnownLHS, Q, Depth + 1);
       // Adding positive and negative infinity produces NaN.
       // TODO: Check sign of infinities.
       if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
@@ -5409,12 +5415,12 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
 
     KnownFPClass KnownLHS, KnownRHS;
     computeKnownFPClass(Op->getOperand(1), DemandedElts, NeedForNan, KnownRHS,
-                        Depth + 1, Q);
+                        Q, Depth + 1);
     if (!KnownRHS.isKnownNeverNaN())
       break;
 
     computeKnownFPClass(Op->getOperand(0), DemandedElts, NeedForNan, KnownLHS,
-                        Depth + 1, Q);
+                        Q, Depth + 1);
     if (!KnownLHS.isKnownNeverNaN())
       break;
 
@@ -5472,8 +5478,8 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
     KnownFPClass KnownLHS, KnownRHS;
 
     computeKnownFPClass(Op->getOperand(1), DemandedElts,
-                        fcNan | fcInf | fcZero | fcNegative, KnownRHS,
-                        Depth + 1, Q);
+                        fcNan | fcInf | fcZero | fcNegative, KnownRHS, Q,
+                        Depth + 1);
 
     bool KnowSomethingUseful =
         KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNever(fcNegative);
@@ -5484,8 +5490,8 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
                        : fcNan | fcInf | fcZero | fcSubnormal | fcNegative;
 
       computeKnownFPClass(Op->getOperand(0), DemandedElts,
-                          InterestedClasses & InterestedLHS, KnownLHS,
-                          Depth + 1, Q);
+                          InterestedClasses & InterestedLHS, KnownLHS, Q,
+                          Depth + 1);
     }
 
     const Function *F = cast<Instruction>(Op)->getFunction();
@@ -5534,7 +5540,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
   case Instruction::FPExt: {
     // Infinity, nan and zero propagate from source.
     computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
-                        Known, Depth + 1, Q);
+                        Known, Q, Depth + 1);
 
     const fltSemantics &DstTy =
         Op->getType()->getScalarType()->getFltSemantics();
@@ -5556,8 +5562,8 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
     break;
   }
   case Instruction::FPTrunc: {
-    computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known,
-                                  Depth, Q);
+    computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known, Q,
+                                  Depth);
     break;
   }
   case Instruction::SIToFP:
@@ -5603,7 +5609,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
       if (CIdx && CIdx->getValue().ult(NumElts))
         DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
       return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
-                                 Depth + 1, Q);
+                                 Q, Depth + 1);
     }
 
     break;
@@ -5626,7 +5632,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
 
     // Do we demand the inserted element?
     if (NeedsElt) {
-      computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1, Q);
+      computeKnownFPClass(Elt, Known, InterestedClasses, Q, Depth + 1);
       // If we don't know any bits, early out.
       if (Known.isUnknown())
         break;
@@ -5637,8 +5643,8 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
     // Do we need anymore elements from Vec?
     if (!DemandedVecElts.isZero()) {
       KnownFPClass Known2;
-      computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
-                          Depth + 1, Q);
+      computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2, Q,
+                          Depth + 1);
       Known |= Known2;
     }
 
@@ -5654,8 +5660,8 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
 
     if (!!DemandedLHS) {
       const Value *LHS = Shuf->getOperand(0);
-      computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known,
-                          Depth + 1, Q);
+      computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known, Q,
+                          Depth + 1);
 
       // If we don't know any bits, early out.
       if (Known.isUnknown())
@@ -5667,8 +5673,8 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
     if (!!DemandedRHS) {
       KnownFPClass Known2;
       const Value *RHS = Shuf->getOperand(1);
-      computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2,
-                          Depth + 1, Q);
+      computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2, Q,
+                          Depth + 1);
       Known |= Known2;
     }
 
@@ -5687,7 +5693,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
 
           KnownFPClass KnownSrc;
           computeKnownFPClass(II->getArgOperand(0), DemandedElts,
-                              InterestedClasses, KnownSrc, Depth + 1, Q);
+                              InterestedClasses, KnownSrc, Q, Depth + 1);
 
           const Function *F = cast<Instruction>(Op)->getFunction();
           const fltSemantics &FltSem =
@@ -5722,8 +5728,8 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
       }
     }
 
-    computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known, Depth + 1,
-                        Q);
+    computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known, Q,
+                        Depth + 1);
     break;
   }
   case Instruction::PHI: {
@@ -5756,8 +5762,8 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
         // to waste time spinning around in loops. We need at least depth 2 to
         // detect known sign bits.
         computeKnownFPClass(IncValue, DemandedElts, InterestedClasses, KnownSrc,
-                            PhiRecursionLimit,
-                            Q.getWithoutCondContext().getWithInstruction(CxtI));
+                            Q.getWithoutCondContext().getWithInstruction(CxtI),
+                            PhiRecursionLimit);
 
         if (First) {
           Known = KnownSrc;
@@ -5781,7 +5787,7 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
 
     const Type *Ty = Op->getType()->getScalarType();
     KnownBits Bits(Ty->getScalarSizeInBits());
-    computeKnownBits(Src, DemandedElts, Bits, Depth + 1, Q);
+    computeKnownBits(Src, DemandedElts, Bits, Q, Depth + 1);
 
     // Transfer information from the sign bit.
     if (Bits.isNonNegative())
@@ -5837,43 +5843,43 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
 KnownFPClass llvm::computeKnownFPClass(const Value *V,
                                        const APInt &DemandedElts,
                                        FPClassTest InterestedClasses,
-                                       unsigned Depth,
-                                       const SimplifyQuery &SQ) {
+                                       const SimplifyQuery &SQ,
+                                       unsigned Depth) {
   KnownFPClass KnownClasses;
-  ::computeKnownFPClass(V, DemandedElts, InterestedClasses, KnownClasses, Depth,
-                        SQ);
+  ::computeKnownFPClass(V, DemandedElts, InterestedClasses, KnownClasses, SQ,
+                        Depth);
   return KnownClasses;
 }
 
 KnownFPClass llvm::computeKnownFPClass(const Value *V,
                                        FPClassTest InterestedClasses,
-                                       unsigned Depth,
-                                       const SimplifyQuery &SQ) {
+                                       const SimplifyQuery &SQ,
+                                       unsigned Depth) {
   KnownFPClass Known;
-  ::computeKnownFPClass(V, Known, InterestedClasses, Depth, SQ);
+  ::computeKnownFPClass(V, Known, InterestedClasses, SQ, Depth);
   return Known;
 }
 
 KnownFPClass llvm::computeKnownFPClass(
     const Value *V, const DataLayout &DL, FPClassTest InterestedClasses,
-    unsigned Depth, const TargetLibraryInfo *TLI, AssumptionCache *AC,
-    const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) {
-  return computeKnownFPClass(
-      V, InterestedClasses, Depth,
-      SimplifyQuery(DL, TLI, DT, AC, CxtI, UseInstrInfo));
+    const TargetLibraryInfo *TLI, AssumptionCache *AC, const Instruction *CxtI,
+    const DominatorTree *DT, bool UseInstrInfo, unsigned Depth) {
+  return computeKnownFPClass(V, InterestedClasses,
+                             SimplifyQuery(DL, TLI, DT, AC, CxtI, UseInstrInfo),
+                             Depth);
 }
 
 KnownFPClass
 llvm::computeKnownFPClass(const Value *V, const APInt &DemandedElts,
                           FastMathFlags FMF, FPClassTest InterestedClasses,
-                          unsigned Depth, const SimplifyQuery &SQ) {
+                          const SimplifyQuery &SQ, unsigned Depth) {
   if (FMF.noNaNs())
     InterestedClasses &= ~fcNan;
   if (FMF.noInfs())
     InterestedClasses &= ~fcInf;
 
   KnownFPClass Result =
-      computeKnownFPClass(V, DemandedElts, InterestedClasses, Depth, SQ);
+      computeKnownFPClass(V, DemandedElts, InterestedClasses, SQ, Depth);
 
   if (FMF.noNaNs())
     Result.KnownFPClasses &= ~fcNan;
@@ -5884,56 +5890,57 @@ llvm::computeKnownFPClass(const Value *V, const APInt &DemandedElts,
 
 KnownFPClass llvm::computeKnownFPClass(const Value *V, FastMathFlags FMF,
                                        FPClassTest InterestedClasses,
-                                       unsigned Depth,
-                                       const SimplifyQuery &SQ) {
+                                       const SimplifyQuery &SQ,
+                                       unsigned Depth) {
   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
   APInt DemandedElts =
       FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
-  return computeKnownFPClass(V, DemandedElts, FMF, InterestedClasses, Depth,
-                             SQ);
+  return computeKnownFPClass(V, DemandedElts, FMF, InterestedClasses, SQ,
+                             Depth);
 }
 
-bool llvm::cannotBeNegativeZero(const Value *V, unsigned Depth,
-                                const SimplifyQuery &SQ) {
-  KnownFPClass Known = computeKnownFPClass(V, fcNegZero, Depth, SQ);
+bool llvm::cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ,
+                                unsigned Depth) {
+  KnownFPClass Known = computeKnownFPClass(V, fcNegZero, SQ, Depth);
   return Known.isKnownNeverNegZero();
 }
 
-bool llvm::cannotBeOrderedLessThanZero(const Value *V, unsigned Depth,
-                                       const SimplifyQuery &SQ) {
+bool llvm::cannotBeOrderedLessThanZero(const Value *V, const SimplifyQuery &SQ,
+                                       unsigned Depth) {
   KnownFPClass Known =
-      computeKnownFPClass(V, KnownFPClass::OrderedLessThanZeroMask, Depth, SQ);
+      computeKnownFPClass(V, KnownFPClass::OrderedLessThanZeroMask, SQ, Depth);
   return Known.cannotBeOrderedLessThanZero();
 }
 
-bool llvm::isKnownNeverInfinity(const Value *V, unsigned Depth,
-                                const SimplifyQuery &SQ) {
-  KnownFPClass Known = computeKnownFPClass(V, fcInf, Depth, SQ);
+bool llvm::isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ,
+                                unsigned Depth) {
+  KnownFPClass Known = computeKnownFPClass(V, fcInf, SQ, Depth);
   return Known.isKnownNeverInfinity();
 }
 
 /// Return true if the floating-point value can never contain a NaN or infinity.
-bool llvm::isKnownNeverInfOrNaN(const Value *V, unsigned Depth,
-                                const SimplifyQuery &SQ) {
-  KnownFPClass Known = computeKnownFPClass(V, fcInf | fcNan, Depth, SQ);
+bool llvm::isKnownNeverInfOrNaN(const Value *V, const SimplifyQuery &SQ,
+                                unsigned Depth) {
+  KnownFPClass Known = computeKnownFPClass(V, fcInf | fcNan, SQ, Depth);
   return Known.isKnownNeverNaN() && Known.isKnownNeverInfinity();
 }
 
 /// Return true if the floating-point scalar value is not a NaN or if the
 /// floating-point vector value has no NaN elements. Return false if a value
 /// could ever be NaN.
-bool llvm::isKnownNeverNaN(const Value *V, unsigned Depth,
-                           const SimplifyQuery &SQ) {
-  KnownFPClass Known = computeKnownFPClass(V, fcNan, Depth, SQ);
+bool llvm::isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ,
+                           unsigned Depth) {
+  KnownFPClass Known = computeKnownFPClass(V, fcNan, SQ, Depth);
   return Known.isKnownNeverNaN();
 }
 
 /// Return false if we can prove that the specified FP value's sign bit is 0.
 /// Return true if we can prove that the specified FP value's sign bit is 1.
 /// Otherwise return std::nullopt.
-std::optional<bool> llvm::computeKnownFPSignBit(const Value *V, unsigned Depth,
-                                                const SimplifyQuery &SQ) {
-  KnownFPClass Known = computeKnownFPClass(V, fcAllFlags, Depth, SQ);
+std::optional<bool> llvm::computeKnownFPSignBit(const Value *V,
+                                                const SimplifyQuery &SQ,
+                                                unsigned Depth) {
+  KnownFPClass Known = computeKnownFPClass(V, fcAllFlags, SQ, Depth);
   return Known.SignBit;
 }
 
@@ -7063,8 +7070,8 @@ OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
                                                    const Value *RHS,
                                                    const SimplifyQuery &SQ,
                                                    bool IsNSW) {
-  KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, SQ);
-  KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, SQ);
+  KnownBits LHSKnown = computeKnownBits(LHS, SQ);
+  KnownBits RHSKnown = computeKnownBits(RHS, SQ);
 
   // mul nsw of two non-negative numbers is also nuw.
   if (IsNSW && LHSKnown.isNonNegative() && RHSKnown.isNonNegative())
@@ -7089,7 +7096,7 @@ OverflowResult llvm::computeOverflowForSignedMul(const Value *LHS,
   // Note that underestimating the number of sign bits gives a more
   // conservative answer.
   unsigned SignBits =
-      ::ComputeNumSignBits(LHS, 0, SQ) + ::ComputeNumSignBits(RHS, 0, SQ);
+      ::ComputeNumSignBits(LHS, SQ) + ::ComputeNumSignBits(RHS, SQ);
 
   // First handle the easy case: if we have enough sign bits there's
   // definitely no overflow.
@@ -7106,8 +7113,8 @@ OverflowResult llvm::computeOverflowForSignedMul(const Value *LHS,
     // product is exactly the minimum negative number.
     // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
     // For simplicity we just check if at least one side is not negative.
-    KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, SQ);
-    KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, SQ);
+    KnownBits LHSKnown = computeKnownBits(LHS, SQ);
+    KnownBits RHSKnown = computeKnownBits(RHS, SQ);
     if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
       return OverflowResult::NeverOverflows;
   }
@@ -7147,8 +7154,7 @@ computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
   //
   // Since the carry into the most significant position is always equal to
   // the carry out of the addition, there is no signed overflow.
-  if (::ComputeNumSignBits(LHS, 0, SQ) > 1 &&
-      ::ComputeNumSignBits(RHS, 0, SQ) > 1)
+  if (::ComputeNumSignBits(LHS, SQ) > 1 && ::ComputeNumSignBits(RHS, SQ) > 1)
     return OverflowResult::NeverOverflows;
 
   ConstantRange LHSRange =
@@ -7175,7 +7181,7 @@ computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
       (LHSRange.isAllNegative() || RHSRange.isAllNegative());
   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
     KnownBits AddKnown(LHSRange.getBitWidth());
-    computeKnownBitsFromContext(Add, AddKnown, /*Depth=*/0, SQ);
+    computeKnownBitsFromContext(Add, AddKnown, SQ);
     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
         (AddKnown.isNegative() && LHSOrRHSKnownNegative))
       return OverflowResult::NeverOverflows;
@@ -7235,8 +7241,7 @@ OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
 
   // If LHS and RHS each have at least two sign bits, the subtraction
   // cannot overflow.
-  if (::ComputeNumSignBits(LHS, 0, SQ) > 1 &&
-      ::ComputeNumSignBits(RHS, 0, SQ) > 1)
+  if (::ComputeNumSignBits(LHS, SQ) > 1 && ::ComputeNumSignBits(RHS, SQ) > 1)
     return OverflowResult::NeverOverflows;
 
   ConstantRange LHSRange =
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index aa76a0612c0e7..22b921fb2084f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -180,7 +180,7 @@ class AMDGPUCodeGenPrepareImpl
   /// Wrapper to pass all the arguments to computeKnownFPClass
   KnownFPClass computeKnownFPClass(const Value *V, FPClassTest Interested,
                                    const Instruction *CtxI) const {
-    return llvm::computeKnownFPClass(V, DL, Interested, 0, TLI, AC, CtxI, DT);
+    return llvm::computeKnownFPClass(V, DL, Interested, TLI, AC, CtxI, DT);
   }
 
   bool canIgnoreDenormalInput(const Value *V, const Instruction *CtxI) const {
@@ -596,11 +596,11 @@ bool AMDGPUCodeGenPrepareImpl::promoteUniformBitreverseToI32(
 }
 
 unsigned AMDGPUCodeGenPrepareImpl::numBitsUnsigned(Value *Op) const {
-  return computeKnownBits(Op, DL, 0, AC).countMaxActiveBits();
+  return computeKnownBits(Op, DL, AC).countMaxActiveBits();
 }
 
 unsigned AMDGPUCodeGenPrepareImpl::numBitsSigned(Value *Op) const {
-  return ComputeMaxSignificantBits(Op, DL, 0, AC);
+  return ComputeMaxSignificantBits(Op, DL, AC);
 }
 
 static void extractValues(IRBuilder<> &Builder,
@@ -1198,13 +1198,13 @@ unsigned AMDGPUCodeGenPrepareImpl::getDivNumBits(BinaryOperator &I, Value *Num,
          Den->getType()->getScalarSizeInBits());
   unsigned SSBits = Num->getType()->getScalarSizeInBits();
   if (IsSigned) {
-    unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
+    unsigned RHSSignBits = ComputeNumSignBits(Den, DL, AC, &I);
     // A sign bit needs to be reserved for shrinking.
     unsigned DivBits = SSBits - RHSSignBits + 1;
     if (DivBits > MaxDivBits)
       return SSBits;
 
-    unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
+    unsigned LHSSignBits = ComputeNumSignBits(Num, DL, AC, &I);
 
     unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
     DivBits = SSBits - SignBits + 1;
@@ -1213,7 +1213,7 @@ unsigned AMDGPUCodeGenPrepareImpl::getDivNumBits(BinaryOperator &I, Value *Num,
 
   // All bits are used for unsigned division for Num or Den in range
   // (SignedMax, UnsignedMax].
-  KnownBits Known = computeKnownBits(Den, DL, 0, AC, &I);
+  KnownBits Known = computeKnownBits(Den, DL, AC, &I);
   if (Known.isNegative() || !Known.isNonNegative())
     return SSBits;
   unsigned RHSSignBits = Known.countMinLeadingZeros();
@@ -1221,7 +1221,7 @@ unsigned AMDGPUCodeGenPrepareImpl::getDivNumBits(BinaryOperator &I, Value *Num,
   if (DivBits > MaxDivBits)
     return SSBits;
 
-  Known = computeKnownBits(Num, DL, 0, AC, &I);
+  Known = computeKnownBits(Num, DL, AC, &I);
   if (Known.isNegative() || !Known.isNonNegative())
     return SSBits;
   unsigned LHSSignBits = Known.countMinLeadingZeros();
@@ -1358,7 +1358,7 @@ bool AMDGPUCodeGenPrepareImpl::divHasSpecialOptimization(BinaryOperator &I,
     // If there's no wider mulhi, there's only a better expansion for powers of
     // two.
     // TODO: Should really know for each vector element.
-    if (isKnownToBeAPowerOfTwo(C, DL, true, 0, AC, &I, DT))
+    if (isKnownToBeAPowerOfTwo(C, DL, true, AC, &I, DT))
       return true;
 
     return false;
@@ -1368,8 +1368,7 @@ bool AMDGPUCodeGenPrepareImpl::divHasSpecialOptimization(BinaryOperator &I,
     // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
     if (BinOpDen->getOpcode() == Instruction::Shl &&
         isa<Constant>(BinOpDen->getOperand(0)) &&
-        isKnownToBeAPowerOfTwo(BinOpDen->getOperand(0), DL, true, 0, AC, &I,
-                               DT)) {
+        isKnownToBeAPowerOfTwo(BinOpDen->getOperand(0), DL, true, AC, &I, DT)) {
       return true;
     }
   }
@@ -2289,8 +2288,7 @@ bool AMDGPUCodeGenPrepareImpl::visitFMinLike(IntrinsicInst &I) {
 
   // Match pattern for fract intrinsic in contexts where the nan check has been
   // optimized out (and hope the knowledge the source can't be nan wasn't lost).
-  if (!I.hasNoNaNs() &&
-      !isKnownNeverNaN(FractArg, /*Depth=*/0, SimplifyQuery(DL, TLI)))
+  if (!I.hasNoNaNs() && !isKnownNeverNaN(FractArg, SimplifyQuery(DL, TLI)))
     return false;
 
   IRBuilder<> Builder(&I);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 16f27533a0c19..f4f391e0a6bab 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -365,8 +365,7 @@ bool GCNTTIImpl::canSimplifyLegacyMulToMul(const Instruction &I,
   }
 
   SimplifyQuery SQ = IC.getSimplifyQuery().getWithInstruction(&I);
-  if (isKnownNeverInfOrNaN(Op0, /*Depth=*/0, SQ) &&
-      isKnownNeverInfOrNaN(Op1, /*Depth=*/0, SQ)) {
+  if (isKnownNeverInfOrNaN(Op0, SQ) && isKnownNeverInfOrNaN(Op1, SQ)) {
     // Neither operand is infinity or NaN.
     return true;
   }
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
index f0d63f523088b..df976cf3f7fdb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
@@ -61,7 +61,7 @@ class AMDGPULateCodeGenPrepare
 
   // Check if the specified value is at least DWORD aligned.
   bool isDWORDAligned(const Value *V) const {
-    KnownBits Known = computeKnownBits(V, DL, 0, AC);
+    KnownBits Known = computeKnownBits(V, DL, AC);
     return Known.countMinTrailingZeros() >= 2;
   }
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
index 5c3abd9ce1e37..8767208d20ec9 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
@@ -614,7 +614,7 @@ static bool isKnownIntegral(const Value *V, const DataLayout &DL,
 
     // Need to check int size cannot produce infinity, which computeKnownFPClass
     // knows how to do already.
-    return isKnownNeverInfinity(I, /*Depth=*/0, SimplifyQuery(DL));
+    return isKnownNeverInfinity(I, SimplifyQuery(DL));
   case Instruction::Call: {
     const CallInst *CI = cast<CallInst>(I);
     switch (CI->getIntrinsicID()) {
@@ -626,7 +626,7 @@ static bool isKnownIntegral(const Value *V, const DataLayout &DL,
     case Intrinsic::round:
     case Intrinsic::roundeven:
       return (FMF.noInfs() && FMF.noNaNs()) ||
-             isKnownNeverInfOrNaN(I, /*Depth=*/0, SimplifyQuery(DL));
+             isKnownNeverInfOrNaN(I, SimplifyQuery(DL));
     default:
       break;
     }
@@ -764,7 +764,7 @@ bool AMDGPULibCalls::fold(CallInst *CI) {
       // TODO: Account for flags on current call
       if (PowrFunc &&
           cannotBeOrderedLessThanZero(
-              FPOp->getOperand(0), /*Depth=*/0,
+              FPOp->getOperand(0),
               SimplifyQuery(M->getDataLayout(), TLInfo, DT, AC, Call))) {
         Call->setCalledFunction(PowrFunc);
         return fold_pow(FPOp, B, PowrInfo) || true;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 86a6e49fce027..58bfc0b80b24f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -1086,7 +1086,7 @@ Value *GCNTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
         return nullptr;
 
       // TODO: Do we need to thread more context in here?
-      KnownBits Known = computeKnownBits(MaskOp, DL, 0, nullptr, II);
+      KnownBits Known = computeKnownBits(MaskOp, DL, nullptr, II);
       if (Known.countMinLeadingOnes() < 32)
         return nullptr;
 
diff --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
index d0f14d8ad7286..3de6df568c9f4 100644
--- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
@@ -2829,13 +2829,13 @@ auto HexagonVectorCombine::calculatePointerDifference(Value *Ptr0,
 auto HexagonVectorCombine::getNumSignificantBits(const Value *V,
                                                  const Instruction *CtxI) const
     -> unsigned {
-  return ComputeMaxSignificantBits(V, DL, /*Depth=*/0, &AC, CtxI, &DT);
+  return ComputeMaxSignificantBits(V, DL, &AC, CtxI, &DT);
 }
 
 auto HexagonVectorCombine::getKnownBits(const Value *V,
                                         const Instruction *CtxI) const
     -> KnownBits {
-  return computeKnownBits(V, DL, /*Depth=*/0, &AC, CtxI, &DT);
+  return computeKnownBits(V, DL, &AC, CtxI, &DT);
 }
 
 auto HexagonVectorCombine::isSafeToClone(const Instruction &In) const -> bool {
diff --git a/llvm/lib/Target/X86/X86PartialReduction.cpp b/llvm/lib/Target/X86/X86PartialReduction.cpp
index 1d0815ee830b9..a25e4e0f464a4 100644
--- a/llvm/lib/Target/X86/X86PartialReduction.cpp
+++ b/llvm/lib/Target/X86/X86PartialReduction.cpp
@@ -157,8 +157,7 @@ bool X86PartialReduction::tryMAddReplacement(Instruction *Op,
 
     // If the operation can be freely truncated and has enough sign bits we
     // can shrink.
-    if (IsFreeTruncation(Op) &&
-        ComputeNumSignBits(Op, *DL, 0, nullptr, Mul) > 16)
+    if (IsFreeTruncation(Op) && ComputeNumSignBits(Op, *DL, nullptr, Mul) > 16)
       return true;
 
     // SelectionDAG has limited support for truncating through an add or sub if
@@ -167,7 +166,7 @@ bool X86PartialReduction::tryMAddReplacement(Instruction *Op,
       if (BO->getParent() == Mul->getParent() &&
           IsFreeTruncation(BO->getOperand(0)) &&
           IsFreeTruncation(BO->getOperand(1)) &&
-          ComputeNumSignBits(Op, *DL, 0, nullptr, Mul) > 16)
+          ComputeNumSignBits(Op, *DL, nullptr, Mul) > 16)
         return true;
     }
 
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
index c128687062ade..e9260b6ab092f 100644
--- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
+++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
@@ -422,8 +422,7 @@ static bool foldSqrt(CallInst *Call, LibFunc Func, TargetTransformInfo &TTI,
   if (TTI.haveFastSqrt(Ty) &&
       (Call->hasNoNaNs() ||
        cannotBeOrderedLessThanZero(
-           Arg, 0,
-           SimplifyQuery(Call->getDataLayout(), &TLI, &DT, &AC, Call)))) {
+           Arg, SimplifyQuery(Call->getDataLayout(), &TLI, &DT, &AC, Call)))) {
     IRBuilder<> Builder(Call);
     Value *NewSqrt =
         Builder.CreateIntrinsic(Intrinsic::sqrt, Ty, Arg, Call, "sqrt");
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h
index 28a0181392ef3..0b8a9fa48e342 100644
--- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h
+++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombineInternal.h
@@ -107,15 +107,14 @@ class TruncInstCombine {
   Type *getBestTruncatedType();
 
   KnownBits computeKnownBits(const Value *V) const {
-    return llvm::computeKnownBits(V, DL, /*Depth=*/0, &AC,
+    return llvm::computeKnownBits(V, DL, &AC,
                                   /*CtxI=*/cast<Instruction>(CurrentTruncInst),
                                   &DT);
   }
 
   unsigned ComputeNumSignBits(const Value *V) const {
     return llvm::ComputeNumSignBits(
-        V, DL, /*Depth=*/0, &AC, /*CtxI=*/cast<Instruction>(CurrentTruncInst),
-        &DT);
+        V, DL, &AC, /*CtxI=*/cast<Instruction>(CurrentTruncInst), &DT);
   }
 
   /// Given a \p V value and a \p SclTy scalar type return the generated reduced
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 658bbbc569766..a9ac5ff9b9c89 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -949,7 +949,7 @@ Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) {
     // If X has no high-bits set above an xor mask:
     // add (xor X, LowMaskC), C --> sub (LowMaskC + C), X
     if (C2->isMask()) {
-      KnownBits LHSKnown = computeKnownBits(X, 0, &Add);
+      KnownBits LHSKnown = computeKnownBits(X, &Add);
       if ((*C2 | LHSKnown.Zero).isAllOnes())
         return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C2 + *C), X);
     }
@@ -965,8 +965,8 @@ Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) {
         ShAmt = BitWidth - C->logBase2() - 1;
       else if (C2->isPowerOf2())
         ShAmt = BitWidth - C2->logBase2() - 1;
-      if (ShAmt && MaskedValueIsZero(X, APInt::getHighBitsSet(BitWidth, ShAmt),
-                                     0, &Add)) {
+      if (ShAmt &&
+          MaskedValueIsZero(X, APInt::getHighBitsSet(BitWidth, ShAmt), &Add)) {
         Constant *ShAmtC = ConstantInt::get(Ty, ShAmt);
         Value *NewShl = Builder.CreateShl(X, ShAmtC, "sext");
         return BinaryOperator::CreateAShr(NewShl, ShAmtC);
@@ -2438,7 +2438,7 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
       // zero. We don't use information from dominating conditions so this
       // transform is easier to reverse if necessary.
       KnownBits RHSKnown = llvm::computeKnownBits(
-          Op1, 0, SQ.getWithInstruction(&I).getWithoutDomCondCache());
+          Op1, SQ.getWithInstruction(&I).getWithoutDomCondCache());
       if ((*Op0C | RHSKnown.Zero).isAllOnes())
         return BinaryOperator::CreateXor(Op1, Op0);
     }
@@ -2990,7 +2990,7 @@ Instruction *InstCombinerImpl::visitFSub(BinaryOperator &I) {
   // killed later. We still limit that particular transform with 'hasOneUse'
   // because an fneg is assumed better/cheaper than a generic fsub.
   if (I.hasNoSignedZeros() ||
-      cannotBeNegativeZero(Op0, 0, getSimplifyQuery().getWithInstruction(&I))) {
+      cannotBeNegativeZero(Op0, getSimplifyQuery().getWithInstruction(&I))) {
     if (match(Op1, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) {
       Value *NewSub = Builder.CreateFSubFMF(Y, X, &I);
       return BinaryOperator::CreateFAddFMF(Op0, NewSub, &I);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 59b46ebdb72e2..2fb4bfecda8aa 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -697,8 +697,8 @@ static Value *foldLogOpOfMaskedICmps(Value *LHS, Value *RHS, bool IsAnd,
   // -> (icmp eq (A & (B|D)), (B|D))
   // iff B and D is known to be a power of two
   if (Mask & Mask_NotAllZeros &&
-      isKnownToBeAPowerOfTwo(B, /*OrZero=*/false, /*Depth=*/0, Q) &&
-      isKnownToBeAPowerOfTwo(D, /*OrZero=*/false, /*Depth=*/0, Q)) {
+      isKnownToBeAPowerOfTwo(B, /*OrZero=*/false, Q) &&
+      isKnownToBeAPowerOfTwo(D, /*OrZero=*/false, Q)) {
     // If this is a logical and/or, then we must prevent propagation of a
     // poison value from the RHS by inserting freeze.
     if (IsLogical)
@@ -759,7 +759,7 @@ Value *InstCombinerImpl::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
   }
 
   // This simplification is only valid if the upper range is not negative.
-  KnownBits Known = computeKnownBits(RangeEnd, /*Depth=*/0, Cmp1);
+  KnownBits Known = computeKnownBits(RangeEnd, Cmp1);
   if (!Known.isNonNegative())
     return nullptr;
 
@@ -800,8 +800,7 @@ foldAndOrOfICmpsWithPow2AndWithZero(InstCombiner::BuilderTy &Builder,
   if (!match(LHS, m_OneUse(m_ICmp(Pred, m_Value(Op), m_Zero()))) ||
       !match(RHS, m_OneUse(m_c_ICmp(Pred, m_Specific(Op), m_Value(Pow2)))) ||
       match(Pow2, m_One()) ||
-      !isKnownToBeAPowerOfTwo(Pow2, Q.DL, /*OrZero=*/true, /*Depth=*/0, Q.AC,
-                              Q.CxtI, Q.DT))
+      !isKnownToBeAPowerOfTwo(Pow2, Q.DL, /*OrZero=*/true, Q.AC, Q.CxtI, Q.DT))
     return nullptr;
 
   Value *And = Builder.CreateAnd(Op, Pow2);
@@ -2447,7 +2446,7 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
   if (match(&I, m_c_And(m_Value(Y), m_OneUse(m_CombineOr(
                                         m_c_Add(m_Value(X), m_Deferred(Y)),
                                         m_Sub(m_Value(X), m_Deferred(Y)))))) &&
-      isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, /*Depth*/ 0, &I))
+      isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, &I))
     return BinaryOperator::CreateAnd(Builder.CreateNot(X), Y);
 
   if (match(Op1, m_APInt(C))) {
@@ -2572,13 +2571,13 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
         match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
       APInt NotAndMask(~(*C));
       BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Op0)->getOpcode();
-      if (MaskedValueIsZero(X, NotAndMask, 0, &I)) {
+      if (MaskedValueIsZero(X, NotAndMask, &I)) {
         // Not masking anything out for the LHS, move mask to RHS.
         // and ({x}or X, Y), C --> {x}or X, (and Y, C)
         Value *NewRHS = Builder.CreateAnd(Y, Op1, Y->getName() + ".masked");
         return BinaryOperator::Create(BinOp, X, NewRHS);
       }
-      if (!isa<Constant>(Y) && MaskedValueIsZero(Y, NotAndMask, 0, &I)) {
+      if (!isa<Constant>(Y) && MaskedValueIsZero(Y, NotAndMask, &I)) {
         // Not masking anything out for the RHS, move mask to LHS.
         // and ({x}or X, Y), C --> {x}or (and X, C), Y
         Value *NewLHS = Builder.CreateAnd(X, Op1, X->getName() + ".masked");
@@ -2613,7 +2612,7 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
           match(C1, m_Power2())) {
         Constant *Log2C1 = ConstantExpr::getExactLogBase2(C1);
         Constant *LshrC = ConstantExpr::getAdd(C2, Log2C3);
-        KnownBits KnownLShrc = computeKnownBits(LshrC, 0, nullptr);
+        KnownBits KnownLShrc = computeKnownBits(LshrC, nullptr);
         if (KnownLShrc.getMaxValue().ult(Width)) {
           // iff C1,C3 is pow2 and C2 + cttz(C3) < BitWidth:
           // ((C1 << X) >> C2) & C3 -> X == (cttz(C3)+C2-cttz(C1)) ? C3 : 0
@@ -2814,7 +2813,7 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
                         m_Value(B)))) {
     if (A->getType()->isIntOrIntVectorTy(1))
       return SelectInst::Create(A, Constant::getNullValue(Ty), B);
-    if (computeKnownBits(A, /* Depth */ 0, &I).countMaxActiveBits() <= 1) {
+    if (computeKnownBits(A, &I).countMaxActiveBits() <= 1) {
       return SelectInst::Create(
           Builder.CreateICmpEQ(A, Constant::getNullValue(A->getType())), B,
           Constant::getNullValue(Ty));
@@ -2954,7 +2953,7 @@ InstCombinerImpl::convertOrOfShiftsToFunnelShift(Instruction &Or) {
       // might remove it after this fold). This still doesn't guarantee that the
       // final codegen will match this original pattern.
       if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) {
-        KnownBits KnownL = computeKnownBits(L, /*Depth*/ 0, &Or);
+        KnownBits KnownL = computeKnownBits(L, &Or);
         return KnownL.getMaxValue().ult(Width) ? L : nullptr;
       }
 
@@ -3725,7 +3724,7 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
   Value *X, *Y;
   const APInt *CV;
   if (match(&I, m_c_Or(m_OneUse(m_Xor(m_Value(X), m_APInt(CV))), m_Value(Y))) &&
-      !CV->isAllOnes() && MaskedValueIsZero(Y, *CV, 0, &I)) {
+      !CV->isAllOnes() && MaskedValueIsZero(Y, *CV, &I)) {
     // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0
     // The check for a 'not' op is for efficiency (if Y is known zero --> ~X).
     Value *Or = Builder.CreateOr(X, Y);
@@ -3769,14 +3768,14 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
         // ((X | B) & C0) | (B & C1) --> (X | B) & (C0 | C1)
         // iff (C0 & C1) == 0 and (X & ~C0) == 0
         if (match(A, m_c_Or(m_Value(X), m_Specific(B))) &&
-            MaskedValueIsZero(X, ~*C0, 0, &I)) {
+            MaskedValueIsZero(X, ~*C0, &I)) {
           Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
           return BinaryOperator::CreateAnd(A, C01);
         }
         // (A & C0) | ((X | A) & C1) --> (X | A) & (C0 | C1)
         // iff (C0 & C1) == 0 and (X & ~C1) == 0
         if (match(B, m_c_Or(m_Value(X), m_Specific(A))) &&
-            MaskedValueIsZero(X, ~*C1, 0, &I)) {
+            MaskedValueIsZero(X, ~*C1, &I)) {
           Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
           return BinaryOperator::CreateAnd(B, C01);
         }
@@ -4156,7 +4155,7 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
   // (X & C1) | C2 -> X & (C1 | C2) iff (X & C2) == C2
   if (match(Op0, m_OneUse(m_And(m_Value(X), m_APInt(C1)))) &&
       match(Op1, m_APInt(C2))) {
-    KnownBits KnownX = computeKnownBits(X, /*Depth*/ 0, &I);
+    KnownBits KnownX = computeKnownBits(X, &I);
     if ((KnownX.One & *C2) == *C2)
       return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *C1 | *C2));
   }
@@ -4310,7 +4309,7 @@ Value *InstCombinerImpl::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS,
         LC->isZero() && RC->isZero() && LHS->hasOneUse() && RHS->hasOneUse() &&
         match(LHS0, m_And(m_Value(X), m_Value(Pow2))) &&
         match(RHS0, m_And(m_Value(Y), m_Specific(Pow2))) &&
-        isKnownToBeAPowerOfTwo(Pow2, /*OrZero=*/true, /*Depth=*/0, &I)) {
+        isKnownToBeAPowerOfTwo(Pow2, /*OrZero=*/true, &I)) {
       Value *Xor = Builder.CreateXor(X, Y);
       Value *And = Builder.CreateAnd(Xor, Pow2);
       return Builder.CreateICmp(PredL == PredR ? ICmpInst::ICMP_NE
@@ -4896,7 +4895,7 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
 
       // (X | C) ^ RHSC --> X ^ (C ^ RHSC) iff X & C == 0
       if (match(Op0, m_Or(m_Value(X), m_APInt(C))) &&
-          MaskedValueIsZero(X, *C, 0, &I))
+          MaskedValueIsZero(X, *C, &I))
         return BinaryOperator::CreateXor(X, ConstantInt::get(Ty, *C ^ *RHSC));
 
       // When X is a power-of-two or zero and zero input is poison:
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index e101edf4a6208..cfb4af391b540 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -601,7 +601,7 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) {
     return BO;
   }
 
-  KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
+  KnownBits Known = IC.computeKnownBits(Op0, &II);
 
   // Create a mask for bits above (ctlz) or below (cttz) the first known one.
   unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros()
@@ -684,7 +684,7 @@ static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) {
   }
 
   KnownBits Known(BitWidth);
-  IC.computeKnownBits(Op0, Known, 0, &II);
+  IC.computeKnownBits(Op0, Known, &II);
 
   // If all bits are zero except for exactly one fixed bit, then the result
   // must be 0 or 1, and we can get that answer by shifting to LSB:
@@ -1099,7 +1099,7 @@ Instruction *InstCombinerImpl::foldIntrinsicIsFPClass(IntrinsicInst &II) {
 }
 
 static std::optional<bool> getKnownSign(Value *Op, const SimplifyQuery &SQ) {
-  KnownBits Known = computeKnownBits(Op, /*Depth=*/0, SQ);
+  KnownBits Known = computeKnownBits(Op, SQ);
   if (Known.isNonNegative())
     return false;
   if (Known.isNegative())
@@ -1222,9 +1222,8 @@ Instruction *InstCombinerImpl::matchSAddSubSat(IntrinsicInst &MinMax1) {
 
   // The two operands of the add/sub must be nsw-truncatable to the NewTy. This
   // is usually achieved via a sext from a smaller type.
-  if (ComputeMaxSignificantBits(AddSub->getOperand(0), 0, AddSub) >
-          NewBitWidth ||
-      ComputeMaxSignificantBits(AddSub->getOperand(1), 0, AddSub) > NewBitWidth)
+  if (ComputeMaxSignificantBits(AddSub->getOperand(0), AddSub) > NewBitWidth ||
+      ComputeMaxSignificantBits(AddSub->getOperand(1), AddSub) > NewBitWidth)
     return nullptr;
 
   // Finally create and return the sat intrinsic, truncated to the new type
@@ -2160,7 +2159,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
       }
     }
 
-    KnownBits Known = computeKnownBits(IIOperand, 0, II);
+    KnownBits Known = computeKnownBits(IIOperand, II);
     uint64_t LZ = alignDown(Known.countMinLeadingZeros(), 8);
     uint64_t TZ = alignDown(Known.countMinTrailingZeros(), 8);
     unsigned BW = Known.getBitWidth();
@@ -2752,7 +2751,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
   case Intrinsic::copysign: {
     Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1);
     if (std::optional<bool> KnownSignBit = computeKnownFPSignBit(
-            Sign, /*Depth=*/0, getSimplifyQuery().getWithInstruction(II))) {
+            Sign, getSimplifyQuery().getWithInstruction(II))) {
       if (*KnownSignBit) {
         // If we know that the sign argument is negative, reduce to FNABS:
         // copysign Mag, -Sign --> fneg (fabs Mag)
@@ -3100,8 +3099,8 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
           ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
         break;
       Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1);
-      uint64_t Bytes1 = computeKnownBits(Bytes, 0, Op0).One.getZExtValue();
-      uint64_t Mask1 = computeKnownBits(Mask, 0, II).One.getZExtValue();
+      uint64_t Bytes1 = computeKnownBits(Bytes, Op0).One.getZExtValue();
+      uint64_t Mask1 = computeKnownBits(Mask, II).One.getZExtValue();
       // Check if every byte has common bits in Bytes and Mask.
       uint64_t C = Bytes1 & Mask1;
       if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000))
@@ -3392,7 +3391,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
     // If there is a dominating assume with the same condition as this one,
     // then this one is redundant, and should be removed.
     KnownBits Known(1);
-    computeKnownBits(IIOperand, Known, 0, II);
+    computeKnownBits(IIOperand, Known, II);
     if (Known.isAllOnes() && isAssumeWithEmptyBundle(cast<AssumeInst>(*II)))
       return eraseInstFromFunction(*II);
 
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 9088b5bdec78b..d234a0566e191 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -295,8 +295,8 @@ static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC,
     APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
     // Do not preserve the original context instruction. Simplifying div/rem
     // based on later context may introduce a trap.
-    if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, I) &&
-        IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, I)) {
+    if (IC.MaskedValueIsZero(I->getOperand(0), Mask, I) &&
+        IC.MaskedValueIsZero(I->getOperand(1), Mask, I)) {
       return canEvaluateTruncated(I->getOperand(0), Ty, IC, I) &&
              canEvaluateTruncated(I->getOperand(1), Ty, IC, I);
     }
@@ -321,7 +321,7 @@ static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC,
     //       zero - use AmtKnownBits.getMaxValue().
     uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
     uint32_t BitWidth = Ty->getScalarSizeInBits();
-    KnownBits AmtKnownBits = IC.computeKnownBits(I->getOperand(1), 0, CxtI);
+    KnownBits AmtKnownBits = IC.computeKnownBits(I->getOperand(1), CxtI);
     APInt MaxShiftAmt = AmtKnownBits.getMaxValue();
     APInt ShiftedBits = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
     if (MaxShiftAmt.ult(BitWidth)) {
@@ -333,7 +333,7 @@ static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC,
           return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
                  canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
       }
-      if (IC.MaskedValueIsZero(I->getOperand(0), ShiftedBits, 0, CxtI))
+      if (IC.MaskedValueIsZero(I->getOperand(0), ShiftedBits, CxtI))
         return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
                canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
     }
@@ -351,7 +351,7 @@ static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC,
         llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout());
     unsigned ShiftedBits = OrigBitWidth - BitWidth;
     if (AmtKnownBits.getMaxValue().ult(BitWidth) &&
-        ShiftedBits < IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI))
+        ShiftedBits < IC.ComputeNumSignBits(I->getOperand(0), CxtI))
       return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
              canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
     break;
@@ -595,7 +595,7 @@ Instruction *InstCombinerImpl::narrowFunnelShift(TruncInst &Trunc) {
   // from 'zext', 'and' or 'shift'). High bits of the left-shifted value are
   // truncated, so those do not matter.
   APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth);
-  if (!MaskedValueIsZero(ShVal1, HiBitMask, 0, &Trunc))
+  if (!MaskedValueIsZero(ShVal1, HiBitMask, &Trunc))
     return nullptr;
 
   // Adjust the width of ShAmt for narrowed funnel shift operation:
@@ -951,13 +951,13 @@ Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) {
 
   bool Changed = false;
   if (!Trunc.hasNoSignedWrap() &&
-      ComputeMaxSignificantBits(Src, /*Depth=*/0, &Trunc) <= DestWidth) {
+      ComputeMaxSignificantBits(Src, &Trunc) <= DestWidth) {
     Trunc.setHasNoSignedWrap(true);
     Changed = true;
   }
   if (!Trunc.hasNoUnsignedWrap() &&
       MaskedValueIsZero(Src, APInt::getBitsSetFrom(SrcWidth, DestWidth),
-                        /*Depth=*/0, &Trunc)) {
+                        &Trunc)) {
     Trunc.setHasNoUnsignedWrap(true);
     Changed = true;
   }
@@ -1000,7 +1000,7 @@ Instruction *InstCombinerImpl::transformZExtICmp(ICmpInst *Cmp,
     if (Op1CV->isZero() && Cmp->isEquality()) {
       // Exactly 1 possible 1? But not the high-bit because that is
       // canonicalized to this form.
-      KnownBits Known = computeKnownBits(Cmp->getOperand(0), 0, &Zext);
+      KnownBits Known = computeKnownBits(Cmp->getOperand(0), &Zext);
       APInt KnownZeroMask(~Known.Zero);
       uint32_t ShAmt = KnownZeroMask.logBase2();
       bool IsExpectShAmt = KnownZeroMask.isPowerOf2() &&
@@ -1109,7 +1109,7 @@ static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear,
       unsigned VSize = V->getType()->getScalarSizeInBits();
       if (IC.MaskedValueIsZero(I->getOperand(1),
                                APInt::getHighBitsSet(VSize, BitsToClear),
-                               0, CxtI)) {
+                               CxtI)) {
         // If this is an And instruction and all of the BitsToClear are
         // known to be zero we can reset BitsToClear.
         if (I->getOpcode() == Instruction::And)
@@ -1229,10 +1229,9 @@ Instruction *InstCombinerImpl::visitZExt(ZExtInst &Zext) {
 
     // If the high bits are already filled with zeros, just replace this
     // cast with the result.
-    if (MaskedValueIsZero(Res,
-                          APInt::getHighBitsSet(DestBitSize,
-                                                DestBitSize - SrcBitsKept),
-                             0, &Zext))
+    if (MaskedValueIsZero(
+            Res, APInt::getHighBitsSet(DestBitSize, DestBitSize - SrcBitsKept),
+            &Zext))
       return replaceInstUsesWith(Zext, Res);
 
     // We need to emit an AND to clear the high bits.
@@ -1369,7 +1368,7 @@ Instruction *InstCombinerImpl::transformSExtICmp(ICmpInst *Cmp,
     // the icmp and sext into bitwise/integer operations.
     if (Cmp->hasOneUse() &&
         Cmp->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
-      KnownBits Known = computeKnownBits(Op0, 0, &Sext);
+      KnownBits Known = computeKnownBits(Op0, &Sext);
 
       APInt KnownZeroMask(~Known.Zero);
       if (KnownZeroMask.isPowerOf2()) {
@@ -1509,7 +1508,7 @@ Instruction *InstCombinerImpl::visitSExt(SExtInst &Sext) {
 
     // If the high bits are already filled with sign bit, just replace this
     // cast with the result.
-    if (ComputeNumSignBits(Res, 0, &Sext) > DestBitSize - SrcBitSize)
+    if (ComputeNumSignBits(Res, &Sext) > DestBitSize - SrcBitSize)
       return replaceInstUsesWith(Sext, Res);
 
     // We need to emit a shl + ashr to do the sign extend.
@@ -1523,7 +1522,7 @@ Instruction *InstCombinerImpl::visitSExt(SExtInst &Sext) {
     // If the input has more sign bits than bits truncated, then convert
     // directly to final type.
     unsigned XBitSize = X->getType()->getScalarSizeInBits();
-    if (ComputeNumSignBits(X, 0, &Sext) > XBitSize - SrcBitSize)
+    if (ComputeNumSignBits(X, &Sext) > XBitSize - SrcBitSize)
       return CastInst::CreateIntegerCast(X, DestTy, /* isSigned */ true);
 
     // If input is a trunc from the destination type, then convert into shifts.
@@ -1748,7 +1747,7 @@ static bool isKnownExactCastIntToFP(CastInst &I, InstCombinerImpl &IC) {
   // TODO:
   // Try harder to find if the source integer type has less significant bits.
   // For example, compute number of sign bits.
-  KnownBits SrcKnown = IC.computeKnownBits(Src, 0, &I);
+  KnownBits SrcKnown = IC.computeKnownBits(Src, &I);
   int SigBits = (int)SrcTy->getScalarSizeInBits() -
                 SrcKnown.countMinLeadingZeros() -
                 SrcKnown.countMinTrailingZeros();
@@ -2007,9 +2006,8 @@ static Instruction *foldFPtoI(Instruction &FI, InstCombiner &IC) {
   // fpto{u/s}i non-norm --> 0
   FPClassTest Mask =
       FI.getOpcode() == Instruction::FPToUI ? fcPosNormal : fcNormal;
-  KnownFPClass FPClass =
-      computeKnownFPClass(FI.getOperand(0), Mask, /*Depth=*/0,
-                          IC.getSimplifyQuery().getWithInstruction(&FI));
+  KnownFPClass FPClass = computeKnownFPClass(
+      FI.getOperand(0), Mask, IC.getSimplifyQuery().getWithInstruction(&FI));
   if (FPClass.isKnownNever(Mask))
     return IC.replaceInstUsesWith(FI, ConstantInt::getNullValue(FI.getType()));
 
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index bdc7a49700cfc..c112fae351817 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -1119,8 +1119,8 @@ static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
   // This is only really a signed overflow check if the inputs have been
   // sign-extended; check for that condition. For example, if CI2 is 2^31 and
   // the operands of the add are 64 bits wide, we need at least 33 sign bits.
-  if (IC.ComputeMaxSignificantBits(A, 0, &I) > NewWidth ||
-      IC.ComputeMaxSignificantBits(B, 0, &I) > NewWidth)
+  if (IC.ComputeMaxSignificantBits(A, &I) > NewWidth ||
+      IC.ComputeMaxSignificantBits(B, &I) > NewWidth)
     return nullptr;
 
   // In order to replace the original add with a narrower
@@ -1183,7 +1183,7 @@ Instruction *InstCombinerImpl::foldIRemByPowerOfTwoToBitTest(ICmpInst &I) {
   if (!match(&I, m_ICmp(Pred, m_OneUse(m_IRem(m_Value(X), m_Value(Y))),
                         m_CombineAnd(m_Zero(), m_Value(Zero)))))
     return nullptr;
-  if (!isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, 0, &I))
+  if (!isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, &I))
     return nullptr;
   // This may increase instruction count, we don't enforce that Y is a constant.
   Value *Mask = Builder.CreateAdd(Y, Constant::getAllOnesValue(Y->getType()));
@@ -1250,8 +1250,8 @@ Instruction *InstCombinerImpl::foldICmpWithZero(ICmpInst &Cmp) {
   Value *X, *Y;
   if (match(Cmp.getOperand(0), m_URem(m_Value(X), m_Value(Y))) &&
       ICmpInst::isEquality(Pred)) {
-    KnownBits XKnown = computeKnownBits(X, 0, &Cmp);
-    KnownBits YKnown = computeKnownBits(Y, 0, &Cmp);
+    KnownBits XKnown = computeKnownBits(X, &Cmp);
+    KnownBits YKnown = computeKnownBits(Y, &Cmp);
     if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
       return new ICmpInst(Pred, X, Cmp.getOperand(1));
   }
@@ -1261,13 +1261,13 @@ Instruction *InstCombinerImpl::foldICmpWithZero(ICmpInst &Cmp) {
   if (match(Cmp.getOperand(0), m_Mul(m_Value(X), m_Value(Y))) &&
       ICmpInst::isEquality(Pred)) {
 
-    KnownBits XKnown = computeKnownBits(X, 0, &Cmp);
+    KnownBits XKnown = computeKnownBits(X, &Cmp);
     // if X % 2 != 0
     //    (icmp eq/ne Y)
     if (XKnown.countMaxTrailingZeros() == 0)
       return new ICmpInst(Pred, Y, Cmp.getOperand(1));
 
-    KnownBits YKnown = computeKnownBits(Y, 0, &Cmp);
+    KnownBits YKnown = computeKnownBits(Y, &Cmp);
     // if Y % 2 != 0
     //    (icmp eq/ne X)
     if (YKnown.countMaxTrailingZeros() == 0)
@@ -1487,7 +1487,7 @@ Instruction *InstCombinerImpl::foldICmpTruncConstant(ICmpInst &Cmp,
 
     // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
     // of the high bits truncated out of x are known.
-    KnownBits Known = computeKnownBits(X, 0, &Cmp);
+    KnownBits Known = computeKnownBits(X, &Cmp);
 
     // If all the high bits are known, we can do this xform.
     if ((Known.Zero | Known.One).countl_one() >= SrcBits - DstBits) {
@@ -1808,7 +1808,7 @@ Instruction *InstCombinerImpl::foldICmpAndConstConst(ICmpInst &Cmp,
     }
 
     APInt NewC2 = *C2;
-    KnownBits Know = computeKnownBits(And->getOperand(0), 0, And);
+    KnownBits Know = computeKnownBits(And->getOperand(0), And);
     // Set high zeros of C2 to allow matching negated power-of-2.
     NewC2 = *C2 | APInt::getHighBitsSet(C2->getBitWidth(),
                                         Know.countMinLeadingZeros());
@@ -3770,8 +3770,7 @@ static Instruction *foldCtpopPow2Test(ICmpInst &I, IntrinsicInst *CtpopLhs,
   if (((I.isEquality() || Pred == ICmpInst::ICMP_UGT) && CRhs == 1) ||
       (Pred == ICmpInst::ICMP_ULT && CRhs == 2)) {
     Value *Op = CtpopLhs->getArgOperand(0);
-    KnownBits OpKnown = computeKnownBits(Op, Q.DL,
-                                         /*Depth*/ 0, Q.AC, Q.CxtI, Q.DT);
+    KnownBits OpKnown = computeKnownBits(Op, Q.DL, Q.AC, Q.CxtI, Q.DT);
     // No need to check for count > 1, that should be already constant folded.
     if (OpKnown.countMinPopulation() == 1) {
       Value *And = Builder.CreateAnd(
@@ -4441,13 +4440,13 @@ static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q,
     // Pow2 - 1 is a Mask.
     if (!Not && match(I->getOperand(1), m_AllOnes()))
       return isKnownToBeAPowerOfTwo(I->getOperand(0), Q.DL, /*OrZero*/ true,
-                                    Depth, Q.AC, Q.CxtI, Q.DT);
+                                    Q.AC, Q.CxtI, Q.DT, Depth);
     break;
   case Instruction::Sub:
     // -Pow2 is a ~Mask.
     if (Not && match(I->getOperand(0), m_Zero()))
       return isKnownToBeAPowerOfTwo(I->getOperand(1), Q.DL, /*OrZero*/ true,
-                                    Depth, Q.AC, Q.CxtI, Q.DT);
+                                    Q.AC, Q.CxtI, Q.DT, Depth);
     break;
   case Instruction::Call: {
     if (auto *II = dyn_cast<IntrinsicInst>(I)) {
@@ -5008,7 +5007,7 @@ static Instruction *foldICmpAndXX(ICmpInst &I, const SimplifyQuery &Q,
   if (!ICmpInst::isSigned(Pred))
     return nullptr;
 
-  KnownBits KnownY = IC.computeKnownBits(A, /*Depth=*/0, &I);
+  KnownBits KnownY = IC.computeKnownBits(A, &I);
   // (X & NegY) spred X --> (X & NegY) upred X
   if (KnownY.isNegative())
     return new ICmpInst(ICmpInst::getUnsignedPredicate(Pred), Op0, Op1);
@@ -5440,7 +5439,7 @@ Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
          match(Op1, m_c_Mul(m_Specific(Z), m_Value(Y))))) {
       if (ICmpInst::isSigned(Pred)) {
         if (Op0HasNSW && Op1HasNSW) {
-          KnownBits ZKnown = computeKnownBits(Z, 0, &I);
+          KnownBits ZKnown = computeKnownBits(Z, &I);
           if (ZKnown.isStrictlyPositive())
             return new ICmpInst(Pred, X, Y);
           if (ZKnown.isNegative())
@@ -5463,7 +5462,7 @@ Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
               isKnownNonEqual(X, Y, SQ))
             return new ICmpInst(Pred, Z, Constant::getNullValue(Z->getType()));
 
-          KnownBits ZKnown = computeKnownBits(Z, 0, &I);
+          KnownBits ZKnown = computeKnownBits(Z, &I);
           // if Z % 2 != 0
           //    X * Z eq/ne Y * Z -> X eq/ne Y
           if (ZKnown.countMaxTrailingZeros() == 0)
@@ -6144,12 +6143,12 @@ Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
   // 1. A & B_Pow2 != B_Pow2 -> A & B_Pow2 == 0
   // 2. A & B_Pow2 == B_Pow2 -> A & B_Pow2 != 0
   if (match(Op0, m_c_And(m_Specific(Op1), m_Value())) &&
-      isKnownToBeAPowerOfTwo(Op1, /* OrZero */ false, 0, &I))
+      isKnownToBeAPowerOfTwo(Op1, /* OrZero */ false, &I))
     return new ICmpInst(CmpInst::getInversePredicate(Pred), Op0,
                         ConstantInt::getNullValue(Op0->getType()));
 
   if (match(Op1, m_c_And(m_Specific(Op0), m_Value())) &&
-      isKnownToBeAPowerOfTwo(Op0, /* OrZero */ false, 0, &I))
+      isKnownToBeAPowerOfTwo(Op0, /* OrZero */ false, &I))
     return new ICmpInst(CmpInst::getInversePredicate(Pred), Op1,
                         ConstantInt::getNullValue(Op1->getType()));
 
@@ -6187,7 +6186,7 @@ Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
                    m_ICmp(m_OneUse(m_c_And(m_Value(A), m_Matcher)), m_Zero())))
       IsZero = true;
 
-    if (IsZero && isKnownToBeAPowerOfTwo(A, /* OrZero */ true, /*Depth*/ 0, &I))
+    if (IsZero && isKnownToBeAPowerOfTwo(A, /* OrZero */ true, &I))
       // (icmp eq/ne (and (add/sub/xor X, P2), P2), P2)
       //    -> (icmp eq/ne (and X, P2), 0)
       // (icmp eq/ne (and (add/sub/xor X, P2), P2), 0)
@@ -6840,11 +6839,10 @@ Instruction *InstCombinerImpl::foldICmpUsingKnownBits(ICmpInst &I) {
     // (especially IndVarSimplify) may not be able to reliably undo.
     SimplifyQuery Q = SQ.getWithoutDomCondCache().getWithInstruction(&I);
     if (SimplifyDemandedBits(&I, 0, getDemandedBitsLHSMask(I, BitWidth),
-                             Op0Known, /*Depth=*/0, Q))
+                             Op0Known, Q))
       return &I;
 
-    if (SimplifyDemandedBits(&I, 1, APInt::getAllOnes(BitWidth), Op1Known,
-                             /*Depth=*/0, Q))
+    if (SimplifyDemandedBits(&I, 1, APInt::getAllOnes(BitWidth), Op1Known, Q))
       return &I;
   }
 
@@ -7755,7 +7753,7 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
     // and       (X & ~Y) != 0 --> (X & Y) == 0
     // if A is a power of 2.
     if (match(Op0, m_And(m_Value(X), m_Not(m_Value(Y)))) &&
-        match(Op1, m_Zero()) && isKnownToBeAPowerOfTwo(X, false, 0, &I) &&
+        match(Op1, m_Zero()) && isKnownToBeAPowerOfTwo(X, false, &I) &&
         I.isEquality())
       return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(X, Y),
                           Op1);
@@ -8378,10 +8376,9 @@ static Instruction *foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI,
     // flag then we can assume we do not have that case. Otherwise we might be
     // able to prove that either X or Y is not infinity.
     if (!LHSI->hasNoNaNs() && !LHSI->hasNoInfs() &&
-        !isKnownNeverInfinity(Y, /*Depth=*/0,
+        !isKnownNeverInfinity(Y,
                               CI.getSimplifyQuery().getWithInstruction(&I)) &&
-        !isKnownNeverInfinity(X, /*Depth=*/0,
-                              CI.getSimplifyQuery().getWithInstruction(&I)))
+        !isKnownNeverInfinity(X, CI.getSimplifyQuery().getWithInstruction(&I)))
       break;
 
     [[fallthrough]];
@@ -8531,11 +8528,11 @@ Instruction *InstCombinerImpl::visitFCmpInst(FCmpInst &I) {
   // then canonicalize the operand to 0.0.
   if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
     if (!match(Op0, m_PosZeroFP()) &&
-        isKnownNeverNaN(Op0, 0, getSimplifyQuery().getWithInstruction(&I)))
+        isKnownNeverNaN(Op0, getSimplifyQuery().getWithInstruction(&I)))
       return replaceOperand(I, 0, ConstantFP::getZero(OpType));
 
     if (!match(Op1, m_PosZeroFP()) &&
-        isKnownNeverNaN(Op1, 0, getSimplifyQuery().getWithInstruction(&I)))
+        isKnownNeverNaN(Op1, getSimplifyQuery().getWithInstruction(&I)))
       return replaceOperand(I, 1, ConstantFP::getZero(OpType));
   }
 
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index 5e0cd17fb1924..334462d715f95 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -204,8 +204,8 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
                                    const Instruction *CtxI = nullptr,
                                    unsigned Depth = 0) const {
     return llvm::computeKnownFPClass(
-        Val, FMF, Interested, Depth,
-        getSimplifyQuery().getWithInstruction(CtxI));
+        Val, FMF, Interested, getSimplifyQuery().getWithInstruction(CtxI),
+        Depth);
   }
 
   KnownFPClass computeKnownFPClass(Value *Val,
@@ -213,7 +213,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
                                    const Instruction *CtxI = nullptr,
                                    unsigned Depth = 0) const {
     return llvm::computeKnownFPClass(
-        Val, Interested, Depth, getSimplifyQuery().getWithInstruction(CtxI));
+        Val, Interested, getSimplifyQuery().getWithInstruction(CtxI), Depth);
   }
 
   /// Check if fmul \p MulVal, +0.0 will yield +0.0 (or signed zero is
@@ -558,20 +558,22 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
   /// Attempts to replace I with a simpler value based on the demanded
   /// bits.
   Value *SimplifyDemandedUseBits(Instruction *I, const APInt &DemandedMask,
-                                 KnownBits &Known, unsigned Depth,
-                                 const SimplifyQuery &Q);
+                                 KnownBits &Known, const SimplifyQuery &Q,
+                                 unsigned Depth = 0);
   using InstCombiner::SimplifyDemandedBits;
   bool SimplifyDemandedBits(Instruction *I, unsigned Op,
                             const APInt &DemandedMask, KnownBits &Known,
-                            unsigned Depth, const SimplifyQuery &Q) override;
+                            const SimplifyQuery &Q,
+                            unsigned Depth = 0) override;
 
   /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne
   /// bits. It also tries to handle simplifications that can be done based on
   /// DemandedMask, but without modifying the Instruction.
   Value *SimplifyMultipleUseDemandedBits(Instruction *I,
                                          const APInt &DemandedMask,
-                                         KnownBits &Known, unsigned Depth,
-                                         const SimplifyQuery &Q);
+                                         KnownBits &Known,
+                                         const SimplifyQuery &Q,
+                                         unsigned Depth = 0);
 
   /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded
   /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence.
@@ -591,8 +593,8 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
   /// Attempts to replace V with a simpler value based on the demanded
   /// floating-point classes
   Value *SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask,
-                                    KnownFPClass &Known, unsigned Depth,
-                                    Instruction *CxtI);
+                                    KnownFPClass &Known, Instruction *CxtI,
+                                    unsigned Depth = 0);
   bool SimplifyDemandedFPClass(Instruction *I, unsigned Op,
                                FPClassTest DemandedMask, KnownFPClass &Known,
                                unsigned Depth = 0);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index c29cba6f675c5..324e6022f3f05 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -918,7 +918,7 @@ static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC,
   // first non-zero index.
   auto IsAllNonNegative = [&]() {
     for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
-      KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
+      KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), MemI);
       if (Known.isNonNegative())
         continue;
       return false;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index c7023eb79b04e..457199a72510e 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -66,7 +66,7 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombinerImpl &IC,
   // inexact.  Similarly for <<.
   BinaryOperator *I = dyn_cast<BinaryOperator>(V);
   if (I && I->isLogicalShift() &&
-      IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) {
+      IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, &CxtI)) {
     // We know that this is an exact/nuw shift and that the input is a
     // non-zero context as well.
     if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) {
@@ -987,9 +987,8 @@ Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
   // X * -0.0 --> copysign(0.0, -X)
   const APFloat *FPC;
   if (match(Op1, m_APFloatAllowPoison(FPC)) && FPC->isZero() &&
-      ((I.hasNoInfs() &&
-        isKnownNeverNaN(Op0, /*Depth=*/0, SQ.getWithInstruction(&I))) ||
-       isKnownNeverNaN(&I, /*Depth=*/0, SQ.getWithInstruction(&I)))) {
+      ((I.hasNoInfs() && isKnownNeverNaN(Op0, SQ.getWithInstruction(&I))) ||
+       isKnownNeverNaN(&I, SQ.getWithInstruction(&I)))) {
     if (FPC->isNegative())
       Op0 = Builder.CreateFNegFMF(Op0, &I);
     CallInst *CopySign = Builder.CreateIntrinsic(Intrinsic::copysign,
@@ -1720,7 +1719,7 @@ Instruction *InstCombinerImpl::visitUDiv(BinaryOperator &I) {
       return Log2;
 
     // Op0 udiv Op1 -> Op0 lshr cttz(Op1), if Op1 is a power of 2.
-    if (isKnownToBeAPowerOfTwo(Denom, /*OrZero=*/true, /*Depth=*/0, &I))
+    if (isKnownToBeAPowerOfTwo(Denom, /*OrZero=*/true, &I))
       // This will increase instruction count but it's okay
       // since bitwise operations are substantially faster than
       // division.
@@ -1830,7 +1829,7 @@ Instruction *InstCombinerImpl::visitSDiv(BinaryOperator &I) {
                               ConstantInt::getAllOnesValue(Ty));
   }
 
-  KnownBits KnownDividend = computeKnownBits(Op0, 0, &I);
+  KnownBits KnownDividend = computeKnownBits(Op0, &I);
   if (!I.isExact() &&
       (match(Op1, m_Power2(Op1C)) || match(Op1, m_NegatedPower2(Op1C))) &&
       KnownDividend.countMinTrailingZeros() >= Op1C->countr_zero()) {
@@ -1855,7 +1854,7 @@ Instruction *InstCombinerImpl::visitSDiv(BinaryOperator &I) {
       return BinaryOperator::CreateNeg(Shr);
     }
 
-    if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
+    if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, &I)) {
       // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
       // Safe because the only negative value (1 << Y) can take on is
       // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
@@ -2409,7 +2408,7 @@ Instruction *InstCombinerImpl::visitURem(BinaryOperator &I) {
   // X urem Y -> X and Y-1, where Y is a power of 2,
   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
   Type *Ty = I.getType();
-  if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
+  if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, &I)) {
     // This may increase instruction count, we don't enforce that Y is a
     // constant.
     Constant *N1 = Constant::getAllOnesValue(Ty);
@@ -2492,8 +2491,7 @@ Instruction *InstCombinerImpl::visitSRem(BinaryOperator &I) {
   // If the sign bits of both operands are zero (i.e. we can prove they are
   // unsigned inputs), turn this into a urem.
   APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits()));
-  if (MaskedValueIsZero(Op1, Mask, 0, &I) &&
-      MaskedValueIsZero(Op0, Mask, 0, &I)) {
+  if (MaskedValueIsZero(Op1, Mask, &I) && MaskedValueIsZero(Op0, Mask, &I)) {
     // X srem Y -> X urem Y, iff X and Y don't have sign bit set
     return BinaryOperator::CreateURem(Op0, Op1, I.getName());
   }
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index cca5705f6b58d..d7d0431a5b8d0 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -100,7 +100,7 @@ static Instruction *foldSelectBinOpIdentity(SelectInst &Sel,
   // transform. Bail out if we can not exclude that possibility.
   if (isa<FPMathOperator>(BO))
     if (!BO->hasNoSignedZeros() &&
-        !cannotBeNegativeZero(Y, 0,
+        !cannotBeNegativeZero(Y,
                               IC.getSimplifyQuery().getWithInstruction(&Sel)))
       return nullptr;
 
@@ -2800,9 +2800,8 @@ static Instruction *foldSelectWithFCmpToFabs(SelectInst &SI,
     if (match(TrueVal, m_FSub(m_PosZeroFP(), m_Specific(X))) &&
         (cast<FPMathOperator>(CondVal)->hasNoNaNs() || SI.hasNoNaNs() ||
          (SI.hasOneUse() && canIgnoreSignBitOfNaN(*SI.use_begin())) ||
-         isKnownNeverNaN(X, /*Depth=*/0,
-                         IC.getSimplifyQuery().getWithInstruction(
-                             cast<Instruction>(CondVal))))) {
+         isKnownNeverNaN(X, IC.getSimplifyQuery().getWithInstruction(
+                                cast<Instruction>(CondVal))))) {
       if (!Swap && (Pred == FCmpInst::FCMP_OLE || Pred == FCmpInst::FCMP_ULE)) {
         Value *Fabs = IC.Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, &SI);
         return IC.replaceInstUsesWith(SI, Fabs);
@@ -3757,8 +3756,7 @@ static Value *foldSelectBitTest(SelectInst &Sel, Value *CondVal, Value *TrueVal,
       assert(ICmpInst::isEquality(Res->Pred) && "Not equality test?");
       AndMask = Res->Mask;
       V = Res->X;
-      KnownBits Known =
-          computeKnownBits(V, /*Depth=*/0, SQ.getWithInstruction(&Sel));
+      KnownBits Known = computeKnownBits(V, SQ.getWithInstruction(&Sel));
       AndMask &= Known.getMaxValue();
       if (!AndMask.isPowerOf2())
         return nullptr;
@@ -4185,7 +4183,7 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) {
   // the assumption cache, so make sure that is populated.
   if (!CondVal->getType()->isVectorTy() && !AC.assumptions().empty()) {
     KnownBits Known(1);
-    computeKnownBits(CondVal, Known, 0, &SI);
+    computeKnownBits(CondVal, Known, &SI);
     if (Known.One.isOne())
       return replaceInstUsesWith(SI, TrueVal);
     if (Known.Zero.isOne())
@@ -4340,7 +4338,7 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) {
     if (!CC.AffectedValues.empty()) {
       if (!isa<Constant>(TrueVal) &&
           hasAffectedValue(TrueVal, CC.AffectedValues, /*Depth=*/0)) {
-        KnownBits Known = llvm::computeKnownBits(TrueVal, /*Depth=*/0, Q);
+        KnownBits Known = llvm::computeKnownBits(TrueVal, Q);
         if (Known.isConstant())
           return replaceOperand(SI, 1,
                                 ConstantInt::get(SelType, Known.getConstant()));
@@ -4349,7 +4347,7 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) {
       CC.Invert = true;
       if (!isa<Constant>(FalseVal) &&
           hasAffectedValue(FalseVal, CC.AffectedValues, /*Depth=*/0)) {
-        KnownBits Known = llvm::computeKnownBits(FalseVal, /*Depth=*/0, Q);
+        KnownBits Known = llvm::computeKnownBits(FalseVal, Q);
         if (Known.isConstant())
           return replaceOperand(SI, 2,
                                 ConstantInt::get(SelType, Known.getConstant()));
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 7d369a1639a37..550f095b26ba4 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -567,7 +567,7 @@ static bool canEvaluateShiftedShift(unsigned OuterShAmt, bool IsOuterShl,
     unsigned MaskShift =
         IsInnerShl ? TypeWidth - InnerShAmt : InnerShAmt - OuterShAmt;
     APInt Mask = APInt::getLowBitsSet(TypeWidth, OuterShAmt) << MaskShift;
-    if (IC.MaskedValueIsZero(InnerShift->getOperand(0), Mask, 0, CxtI))
+    if (IC.MaskedValueIsZero(InnerShift->getOperand(0), Mask, CxtI))
       return true;
   }
 
@@ -1003,13 +1003,13 @@ static bool setShiftFlags(BinaryOperator &I, const SimplifyQuery &Q) {
   }
 
   // Compute what we know about shift count.
-  KnownBits KnownCnt = computeKnownBits(I.getOperand(1), /* Depth */ 0, Q);
+  KnownBits KnownCnt = computeKnownBits(I.getOperand(1), Q);
   unsigned BitWidth = KnownCnt.getBitWidth();
   // Since shift produces a poison value if RHS is equal to or larger than the
   // bit width, we can safely assume that RHS is less than the bit width.
   uint64_t MaxCnt = KnownCnt.getMaxValue().getLimitedValue(BitWidth - 1);
 
-  KnownBits KnownAmt = computeKnownBits(I.getOperand(0), /* Depth */ 0, Q);
+  KnownBits KnownAmt = computeKnownBits(I.getOperand(0), Q);
   bool Changed = false;
 
   if (I.getOpcode() == Instruction::Shl) {
@@ -1021,8 +1021,8 @@ static bool setShiftFlags(BinaryOperator &I, const SimplifyQuery &Q) {
     // If we have more sign bits than maximum shift cnt we have nsw.
     if (!I.hasNoSignedWrap()) {
       if (MaxCnt < KnownAmt.countMinSignBits() ||
-          MaxCnt < ComputeNumSignBits(I.getOperand(0), Q.DL, /*Depth*/ 0, Q.AC,
-                                      Q.CxtI, Q.DT)) {
+          MaxCnt <
+              ComputeNumSignBits(I.getOperand(0), Q.DL, Q.AC, Q.CxtI, Q.DT)) {
         I.setHasNoSignedWrap();
         Changed = true;
       }
@@ -1068,7 +1068,7 @@ Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) {
     if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) {
       unsigned SrcWidth = X->getType()->getScalarSizeInBits();
       if (ShAmtC < SrcWidth &&
-          MaskedValueIsZero(X, APInt::getHighBitsSet(SrcWidth, ShAmtC), 0, &I))
+          MaskedValueIsZero(X, APInt::getHighBitsSet(SrcWidth, ShAmtC), &I))
         return new ZExtInst(Builder.CreateShl(X, ShAmtC), Ty);
     }
 
@@ -1311,8 +1311,8 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
 
   // Fold (X + Y) / 2 --> (X & Y) iff (X u<= 1) && (Y u<= 1)
   if (match(Op0, m_Add(m_Value(X), m_Value(Y))) && match(Op1, m_One()) &&
-      computeKnownBits(X, /*Depth=*/0, &I).countMaxActiveBits() <= 1 &&
-      computeKnownBits(Y, /*Depth=*/0, &I).countMaxActiveBits() <= 1)
+      computeKnownBits(X, &I).countMaxActiveBits() <= 1 &&
+      computeKnownBits(Y, &I).countMaxActiveBits() <= 1)
     return BinaryOperator::CreateAnd(X, Y);
 
   // (sub nuw X, (Y << nuw Z)) >>u exact Z --> (X >>u exact Z) sub nuw Y
@@ -1625,7 +1625,7 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
   if (match(Op0, m_Shl(m_Value(Shl0_Op0), m_Value(Shl0_Op1))) &&
       match(Op1, m_Intrinsic<Intrinsic::cttz>(m_BinOp(Shl1))) &&
       match(Shl1, m_Shl(m_Specific(Shl0_Op0), m_Value(Shl1_Op1))) &&
-      isKnownToBeAPowerOfTwo(Shl0_Op0, /*OrZero=*/true, 0, &I)) {
+      isKnownToBeAPowerOfTwo(Shl0_Op0, /*OrZero=*/true, &I)) {
     auto *Shl0 = cast<BinaryOperator>(Op0);
     bool HasNUW = Shl0->hasNoUnsignedWrap() && Shl1->hasNoUnsignedWrap();
     bool HasNSW = Shl0->hasNoSignedWrap() && Shl1->hasNoSignedWrap();
@@ -1829,7 +1829,7 @@ Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) {
     return R;
 
   // See if we can turn a signed shr into an unsigned shr.
-  if (MaskedValueIsZero(Op0, APInt::getSignMask(BitWidth), 0, &I)) {
+  if (MaskedValueIsZero(Op0, APInt::getSignMask(BitWidth), &I)) {
     Instruction *Lshr = BinaryOperator::CreateLShr(Op0, Op1);
     Lshr->setIsExact(I.isExact());
     return Lshr;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index b5c1ee0e01cd0..0e3436d12702d 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -75,7 +75,7 @@ bool InstCombinerImpl::SimplifyDemandedInstructionBits(Instruction &Inst,
                                                        KnownBits &Known) {
   APInt DemandedMask(APInt::getAllOnes(Known.getBitWidth()));
   Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, Known,
-                                     0, SQ.getWithInstruction(&Inst));
+                                     SQ.getWithInstruction(&Inst));
   if (!V) return false;
   if (V == &Inst) return true;
   replaceInstUsesWith(Inst, V);
@@ -94,12 +94,13 @@ bool InstCombinerImpl::SimplifyDemandedInstructionBits(Instruction &Inst) {
 /// change and false otherwise.
 bool InstCombinerImpl::SimplifyDemandedBits(Instruction *I, unsigned OpNo,
                                             const APInt &DemandedMask,
-                                            KnownBits &Known, unsigned Depth,
-                                            const SimplifyQuery &Q) {
+                                            KnownBits &Known,
+                                            const SimplifyQuery &Q,
+                                            unsigned Depth) {
   Use &U = I->getOperandUse(OpNo);
   Value *V = U.get();
   if (isa<Constant>(V)) {
-    llvm::computeKnownBits(V, Known, Depth, Q);
+    llvm::computeKnownBits(V, Known, Q, Depth);
     return false;
   }
 
@@ -112,7 +113,7 @@ bool InstCombinerImpl::SimplifyDemandedBits(Instruction *I, unsigned OpNo,
 
   Instruction *VInst = dyn_cast<Instruction>(V);
   if (!VInst) {
-    llvm::computeKnownBits(V, Known, Depth, Q);
+    llvm::computeKnownBits(V, Known, Q, Depth);
     return false;
   }
 
@@ -122,12 +123,12 @@ bool InstCombinerImpl::SimplifyDemandedBits(Instruction *I, unsigned OpNo,
   Value *NewVal;
   if (VInst->hasOneUse()) {
     // If the instruction has one use, we can directly simplify it.
-    NewVal = SimplifyDemandedUseBits(VInst, DemandedMask, Known, Depth, Q);
+    NewVal = SimplifyDemandedUseBits(VInst, DemandedMask, Known, Q, Depth);
   } else {
     // If there are multiple uses of this instruction, then we can simplify
     // VInst to some other value, but not modify the instruction.
     NewVal =
-        SimplifyMultipleUseDemandedBits(VInst, DemandedMask, Known, Depth, Q);
+        SimplifyMultipleUseDemandedBits(VInst, DemandedMask, Known, Q, Depth);
   }
   if (!NewVal) return false;
   if (Instruction* OpInst = dyn_cast<Instruction>(U))
@@ -163,8 +164,8 @@ bool InstCombinerImpl::SimplifyDemandedBits(Instruction *I, unsigned OpNo,
 Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
                                                  const APInt &DemandedMask,
                                                  KnownBits &Known,
-                                                 unsigned Depth,
-                                                 const SimplifyQuery &Q) {
+                                                 const SimplifyQuery &Q,
+                                                 unsigned Depth) {
   assert(I != nullptr && "Null pointer of Value???");
   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
   uint32_t BitWidth = DemandedMask.getBitWidth();
@@ -198,9 +199,9 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
     // significant bit and all those below it.
     DemandedFromOps = APInt::getLowBitsSet(BitWidth, BitWidth - NLZ);
     if (ShrinkDemandedConstant(I, 0, DemandedFromOps) ||
-        SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnown, Depth + 1, Q) ||
+        SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnown, Q, Depth + 1) ||
         ShrinkDemandedConstant(I, 1, DemandedFromOps) ||
-        SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Depth + 1, Q)) {
+        SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Q, Depth + 1)) {
       disableWrapFlagsBasedOnUnusedHighBits(I, NLZ);
       return true;
     }
@@ -209,17 +210,17 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
 
   switch (I->getOpcode()) {
   default:
-    llvm::computeKnownBits(I, Known, Depth, Q);
+    llvm::computeKnownBits(I, Known, Q, Depth);
     break;
   case Instruction::And: {
     // If either the LHS or the RHS are Zero, the result is zero.
-    if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1, Q) ||
-        SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.Zero, LHSKnown,
-                             Depth + 1, Q))
+    if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Q, Depth + 1) ||
+        SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.Zero, LHSKnown, Q,
+                             Depth + 1))
       return I;
 
     Known = analyzeKnownBitsFromAndXorOr(cast<Operator>(I), LHSKnown, RHSKnown,
-                                         Depth, Q);
+                                         Q, Depth);
 
     // If the client is only demanding bits that we know, return the known
     // constant.
@@ -241,16 +242,16 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
   }
   case Instruction::Or: {
     // If either the LHS or the RHS are One, the result is One.
-    if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1, Q) ||
-        SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.One, LHSKnown,
-                             Depth + 1, Q)) {
+    if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Q, Depth + 1) ||
+        SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.One, LHSKnown, Q,
+                             Depth + 1)) {
       // Disjoint flag may not longer hold.
       I->dropPoisonGeneratingFlags();
       return I;
     }
 
     Known = analyzeKnownBitsFromAndXorOr(cast<Operator>(I), LHSKnown, RHSKnown,
-                                         Depth, Q);
+                                         Q, Depth);
 
     // If the client is only demanding bits that we know, return the known
     // constant.
@@ -281,8 +282,8 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
     break;
   }
   case Instruction::Xor: {
-    if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1, Q) ||
-        SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Depth + 1, Q))
+    if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Q, Depth + 1) ||
+        SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Q, Depth + 1))
       return I;
     Value *LHS, *RHS;
     if (DemandedMask == 1 &&
@@ -296,7 +297,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
     }
 
     Known = analyzeKnownBitsFromAndXorOr(cast<Operator>(I), LHSKnown, RHSKnown,
-                                         Depth, Q);
+                                         Q, Depth);
 
     // If the client is only demanding bits that we know, return the known
     // constant.
@@ -373,8 +374,8 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
     break;
   }
   case Instruction::Select: {
-    if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnown, Depth + 1, Q) ||
-        SimplifyDemandedBits(I, 1, DemandedMask, LHSKnown, Depth + 1, Q))
+    if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnown, Q, Depth + 1) ||
+        SimplifyDemandedBits(I, 1, DemandedMask, LHSKnown, Q, Depth + 1))
       return I;
 
     // If the operands are constants, see if we can simplify them.
@@ -415,9 +416,9 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
 
     // Only known if known in both the LHS and RHS.
     adjustKnownBitsForSelectArm(LHSKnown, I->getOperand(0), I->getOperand(1),
-                                /*Invert=*/false, Depth, Q);
+                                /*Invert=*/false, Q, Depth);
     adjustKnownBitsForSelectArm(RHSKnown, I->getOperand(0), I->getOperand(2),
-                                /*Invert=*/true, Depth, Q);
+                                /*Invert=*/true, Q, Depth);
     Known = LHSKnown.intersectWith(RHSKnown);
     break;
   }
@@ -445,8 +446,8 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
 
     APInt InputDemandedMask = DemandedMask.zextOrTrunc(SrcBitWidth);
     KnownBits InputKnown(SrcBitWidth);
-    if (SimplifyDemandedBits(I, 0, InputDemandedMask, InputKnown, Depth + 1,
-                             Q)) {
+    if (SimplifyDemandedBits(I, 0, InputDemandedMask, InputKnown, Q,
+                             Depth + 1)) {
       // For zext nneg, we may have dropped the instruction which made the
       // input non-negative.
       I->dropPoisonGeneratingFlags();
@@ -472,7 +473,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
       InputDemandedBits.setBit(SrcBitWidth-1);
 
     KnownBits InputKnown(SrcBitWidth);
-    if (SimplifyDemandedBits(I, 0, InputDemandedBits, InputKnown, Depth + 1, Q))
+    if (SimplifyDemandedBits(I, 0, InputDemandedBits, InputKnown, Q, Depth + 1))
       return I;
 
     // If the input sign bit is known zero, or if the NewBits are not demanded
@@ -533,7 +534,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
     unsigned NLZ = DemandedMask.countl_zero();
     APInt DemandedFromOps = APInt::getLowBitsSet(BitWidth, BitWidth - NLZ);
     if (ShrinkDemandedConstant(I, 1, DemandedFromOps) ||
-        SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Depth + 1, Q))
+        SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Q, Depth + 1))
       return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ);
 
     // If low order bits are not demanded and known to be zero in one operand,
@@ -543,7 +544,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
     APInt DemandedFromLHS = DemandedFromOps;
     DemandedFromLHS.clearLowBits(NTZ);
     if (ShrinkDemandedConstant(I, 0, DemandedFromLHS) ||
-        SimplifyDemandedBits(I, 0, DemandedFromLHS, LHSKnown, Depth + 1, Q))
+        SimplifyDemandedBits(I, 0, DemandedFromLHS, LHSKnown, Q, Depth + 1))
       return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ);
 
     // If we are known to be adding zeros to every bit below
@@ -576,7 +577,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
     unsigned NLZ = DemandedMask.countl_zero();
     APInt DemandedFromOps = APInt::getLowBitsSet(BitWidth, BitWidth - NLZ);
     if (ShrinkDemandedConstant(I, 1, DemandedFromOps) ||
-        SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Depth + 1, Q))
+        SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Q, Depth + 1))
       return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ);
 
     // If low order bits are not demanded and are known to be zero in RHS,
@@ -586,7 +587,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
     APInt DemandedFromLHS = DemandedFromOps;
     DemandedFromLHS.clearLowBits(NTZ);
     if (ShrinkDemandedConstant(I, 0, DemandedFromLHS) ||
-        SimplifyDemandedBits(I, 0, DemandedFromLHS, LHSKnown, Depth + 1, Q))
+        SimplifyDemandedBits(I, 0, DemandedFromLHS, LHSKnown, Q, Depth + 1))
       return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ);
 
     // If we are known to be subtracting zeros from every bit below
@@ -639,7 +640,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
       return InsertNewInstWith(And1, I->getIterator());
     }
 
-    llvm::computeKnownBits(I, Known, Depth, Q);
+    llvm::computeKnownBits(I, Known, Q, Depth);
     break;
   }
   case Instruction::Shl: {
@@ -660,7 +661,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
             auto [IID, FShiftArgs] = *Opt;
             if ((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
                 FShiftArgs[0] == FShiftArgs[1]) {
-              llvm::computeKnownBits(I, Known, Depth, Q);
+              llvm::computeKnownBits(I, Known, Q, Depth);
               break;
             }
           }
@@ -674,7 +675,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
         if (I->hasNoSignedWrap()) {
           unsigned NumHiDemandedBits = BitWidth - DemandedMask.countr_zero();
           unsigned SignBits =
-              ComputeNumSignBits(I->getOperand(0), Depth + 1, Q.CxtI);
+              ComputeNumSignBits(I->getOperand(0), Q.CxtI, Depth + 1);
           if (SignBits > ShiftAmt && SignBits - ShiftAmt >= NumHiDemandedBits)
             return I->getOperand(0);
         }
@@ -706,7 +707,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
       else if (IOp->hasNoUnsignedWrap())
         DemandedMaskIn.setHighBits(ShiftAmt);
 
-      if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1, Q))
+      if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Q, Depth + 1))
         return I;
 
       Known = KnownBits::shl(Known,
@@ -719,13 +720,13 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
       // demanding those bits from the pre-shifted operand either.
       if (unsigned CTLZ = DemandedMask.countl_zero()) {
         APInt DemandedFromOp(APInt::getLowBitsSet(BitWidth, BitWidth - CTLZ));
-        if (SimplifyDemandedBits(I, 0, DemandedFromOp, Known, Depth + 1, Q)) {
+        if (SimplifyDemandedBits(I, 0, DemandedFromOp, Known, Q, Depth + 1)) {
           // We can't guarantee that nsw/nuw hold after simplifying the operand.
           I->dropPoisonGeneratingFlags();
           return I;
         }
       }
-      llvm::computeKnownBits(I, Known, Depth, Q);
+      llvm::computeKnownBits(I, Known, Q, Depth);
     }
     break;
   }
@@ -742,7 +743,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
             auto [IID, FShiftArgs] = *Opt;
             if ((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
                 FShiftArgs[0] == FShiftArgs[1]) {
-              llvm::computeKnownBits(I, Known, Depth, Q);
+              llvm::computeKnownBits(I, Known, Q, Depth);
               break;
             }
           }
@@ -756,7 +757,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
         // need to shift.
         unsigned NumHiDemandedBits = BitWidth - DemandedMask.countr_zero();
         unsigned SignBits =
-            ComputeNumSignBits(I->getOperand(0), Depth + 1, Q.CxtI);
+            ComputeNumSignBits(I->getOperand(0), Q.CxtI, Depth + 1);
         if (SignBits >= NumHiDemandedBits)
           return I->getOperand(0);
 
@@ -789,7 +790,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
 
       // Unsigned shift right.
       APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
-      if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1, Q)) {
+      if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Q, Depth + 1)) {
         // exact flag may not longer hold.
         I->dropPoisonGeneratingFlags();
         return I;
@@ -799,12 +800,12 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
       if (ShiftAmt)
         Known.Zero.setHighBits(ShiftAmt);  // high bits known zero.
     } else {
-      llvm::computeKnownBits(I, Known, Depth, Q);
+      llvm::computeKnownBits(I, Known, Q, Depth);
     }
     break;
   }
   case Instruction::AShr: {
-    unsigned SignBits = ComputeNumSignBits(I->getOperand(0), Depth + 1, Q.CxtI);
+    unsigned SignBits = ComputeNumSignBits(I->getOperand(0), Q.CxtI, Depth + 1);
 
     // If we only want bits that already match the signbit then we don't need
     // to shift.
@@ -834,7 +835,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
       bool ShiftedInBitsDemanded = DemandedMask.countl_zero() < ShiftAmt;
       if (ShiftedInBitsDemanded)
         DemandedMaskIn.setSignBit();
-      if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1, Q)) {
+      if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Q, Depth + 1)) {
         // exact flag may not longer hold.
         I->dropPoisonGeneratingFlags();
         return I;
@@ -854,7 +855,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
           Known, KnownBits::makeConstant(APInt(BitWidth, ShiftAmt)),
           ShiftAmt != 0, I->isExact());
     } else {
-      llvm::computeKnownBits(I, Known, Depth, Q);
+      llvm::computeKnownBits(I, Known, Q, Depth);
     }
     break;
   }
@@ -866,7 +867,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
       unsigned RHSTrailingZeros = SA->countr_zero();
       APInt DemandedMaskIn =
           APInt::getHighBitsSet(BitWidth, BitWidth - RHSTrailingZeros);
-      if (SimplifyDemandedBits(I, 0, DemandedMaskIn, LHSKnown, Depth + 1, Q)) {
+      if (SimplifyDemandedBits(I, 0, DemandedMaskIn, LHSKnown, Q, Depth + 1)) {
         // We can't guarantee that "exact" is still true after changing the
         // the dividend.
         I->dropPoisonGeneratingFlags();
@@ -876,7 +877,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
       Known = KnownBits::udiv(LHSKnown, KnownBits::makeConstant(*SA),
                               cast<BinaryOperator>(I)->isExact());
     } else {
-      llvm::computeKnownBits(I, Known, Depth, Q);
+      llvm::computeKnownBits(I, Known, Q, Depth);
     }
     break;
   }
@@ -888,13 +889,13 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
 
       APInt LowBits = *Rem - 1;
       APInt Mask2 = LowBits | APInt::getSignMask(BitWidth);
-      if (SimplifyDemandedBits(I, 0, Mask2, LHSKnown, Depth + 1, Q))
+      if (SimplifyDemandedBits(I, 0, Mask2, LHSKnown, Q, Depth + 1))
         return I;
       Known = KnownBits::srem(LHSKnown, KnownBits::makeConstant(*Rem));
       break;
     }
 
-    llvm::computeKnownBits(I, Known, Depth, Q);
+    llvm::computeKnownBits(I, Known, Q, Depth);
     break;
   }
   case Instruction::Call: {
@@ -950,10 +951,10 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
         unsigned MaskWidth = I->getOperand(1)->getType()->getScalarSizeInBits();
         RHSKnown = KnownBits(MaskWidth);
         // If either the LHS or the RHS are Zero, the result is zero.
-        if (SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Depth + 1, Q) ||
+        if (SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Q, Depth + 1) ||
             SimplifyDemandedBits(
                 I, 1, (DemandedMask & ~LHSKnown.Zero).zextOrTrunc(MaskWidth),
-                RHSKnown, Depth + 1, Q))
+                RHSKnown, Q, Depth + 1))
           return I;
 
         // TODO: Should be 1-extend
@@ -994,7 +995,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
                          m_PtrAdd(m_Value(InnerPtr), m_ConstantInt(GEPIndex)),
                          m_ConstantInt(PtrMaskImmediate)))) {
 
-          LHSKnown = computeKnownBits(InnerPtr, Depth + 1, I);
+          LHSKnown = computeKnownBits(InnerPtr, I, Depth + 1);
           if (!LHSKnown.isZero()) {
             const unsigned trailingZeros = LHSKnown.countMinTrailingZeros();
             uint64_t PointerAlignBits = (uint64_t(1) << trailingZeros) - 1;
@@ -1039,10 +1040,10 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
         APInt DemandedMaskLHS(DemandedMask.lshr(ShiftAmt));
         APInt DemandedMaskRHS(DemandedMask.shl(BitWidth - ShiftAmt));
         if (I->getOperand(0) != I->getOperand(1)) {
-          if (SimplifyDemandedBits(I, 0, DemandedMaskLHS, LHSKnown, Depth + 1,
-                                   Q) ||
-              SimplifyDemandedBits(I, 1, DemandedMaskRHS, RHSKnown, Depth + 1,
-                                   Q)) {
+          if (SimplifyDemandedBits(I, 0, DemandedMaskLHS, LHSKnown, Q,
+                                   Depth + 1) ||
+              SimplifyDemandedBits(I, 1, DemandedMaskRHS, RHSKnown, Q,
+                                   Depth + 1)) {
             // Range attribute may no longer hold.
             I->dropPoisonGeneratingReturnAttributes();
             return I;
@@ -1050,14 +1051,14 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
         } else { // fshl is a rotate
           // Avoid converting rotate into funnel shift.
           // Only simplify if one operand is constant.
-          LHSKnown = computeKnownBits(I->getOperand(0), Depth + 1, I);
+          LHSKnown = computeKnownBits(I->getOperand(0), I, Depth + 1);
           if (DemandedMaskLHS.isSubsetOf(LHSKnown.Zero | LHSKnown.One) &&
               !match(I->getOperand(0), m_SpecificInt(LHSKnown.One))) {
             replaceOperand(*I, 0, Constant::getIntegerValue(VTy, LHSKnown.One));
             return I;
           }
 
-          RHSKnown = computeKnownBits(I->getOperand(1), Depth + 1, I);
+          RHSKnown = computeKnownBits(I->getOperand(1), I, Depth + 1);
           if (DemandedMaskRHS.isSubsetOf(RHSKnown.Zero | RHSKnown.One) &&
               !match(I->getOperand(1), m_SpecificInt(RHSKnown.One))) {
             replaceOperand(*I, 1, Constant::getIntegerValue(VTy, RHSKnown.One));
@@ -1107,7 +1108,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
     }
 
     if (!KnownBitsComputed)
-      llvm::computeKnownBits(I, Known, Depth, Q);
+      llvm::computeKnownBits(I, Known, Q, Depth);
     break;
   }
   }
@@ -1126,7 +1127,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
     return Constant::getIntegerValue(VTy, Known.One);
 
   if (VerifyKnownBits) {
-    KnownBits ReferenceKnown = llvm::computeKnownBits(I, Depth, Q);
+    KnownBits ReferenceKnown = llvm::computeKnownBits(I, Q, Depth);
     if (Known != ReferenceKnown) {
       errs() << "Mismatched known bits for " << *I << " in "
              << I->getFunction()->getName() << "\n";
@@ -1143,8 +1144,8 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
 /// bits. It also tries to handle simplifications that can be done based on
 /// DemandedMask, but without modifying the Instruction.
 Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
-    Instruction *I, const APInt &DemandedMask, KnownBits &Known, unsigned Depth,
-    const SimplifyQuery &Q) {
+    Instruction *I, const APInt &DemandedMask, KnownBits &Known,
+    const SimplifyQuery &Q, unsigned Depth) {
   unsigned BitWidth = DemandedMask.getBitWidth();
   Type *ITy = I->getType();
 
@@ -1157,11 +1158,11 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
   // this instruction has a simpler value in that context.
   switch (I->getOpcode()) {
   case Instruction::And: {
-    llvm::computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, Q);
-    llvm::computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, Q);
+    llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth + 1);
+    llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth + 1);
     Known = analyzeKnownBitsFromAndXorOr(cast<Operator>(I), LHSKnown, RHSKnown,
-                                         Depth, Q);
-    computeKnownBitsFromContext(I, Known, Depth, Q);
+                                         Q, Depth);
+    computeKnownBitsFromContext(I, Known, Q, Depth);
 
     // If the client is only demanding bits that we know, return the known
     // constant.
@@ -1178,11 +1179,11 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
     break;
   }
   case Instruction::Or: {
-    llvm::computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, Q);
-    llvm::computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, Q);
+    llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth + 1);
+    llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth + 1);
     Known = analyzeKnownBitsFromAndXorOr(cast<Operator>(I), LHSKnown, RHSKnown,
-                                         Depth, Q);
-    computeKnownBitsFromContext(I, Known, Depth, Q);
+                                         Q, Depth);
+    computeKnownBitsFromContext(I, Known, Q, Depth);
 
     // If the client is only demanding bits that we know, return the known
     // constant.
@@ -1201,11 +1202,11 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
     break;
   }
   case Instruction::Xor: {
-    llvm::computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, Q);
-    llvm::computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, Q);
+    llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth + 1);
+    llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth + 1);
     Known = analyzeKnownBitsFromAndXorOr(cast<Operator>(I), LHSKnown, RHSKnown,
-                                         Depth, Q);
-    computeKnownBitsFromContext(I, Known, Depth, Q);
+                                         Q, Depth);
+    computeKnownBitsFromContext(I, Known, Q, Depth);
 
     // If the client is only demanding bits that we know, return the known
     // constant.
@@ -1228,18 +1229,18 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
 
     // If an operand adds zeros to every bit below the highest demanded bit,
     // that operand doesn't change the result. Return the other side.
-    llvm::computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, Q);
+    llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth + 1);
     if (DemandedFromOps.isSubsetOf(RHSKnown.Zero))
       return I->getOperand(0);
 
-    llvm::computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, Q);
+    llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth + 1);
     if (DemandedFromOps.isSubsetOf(LHSKnown.Zero))
       return I->getOperand(1);
 
     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
     bool NUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
     Known = KnownBits::add(LHSKnown, RHSKnown, NSW, NUW);
-    computeKnownBitsFromContext(I, Known, Depth, Q);
+    computeKnownBitsFromContext(I, Known, Q, Depth);
     break;
   }
   case Instruction::Sub: {
@@ -1248,20 +1249,20 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
 
     // If an operand subtracts zeros from every bit below the highest demanded
     // bit, that operand doesn't change the result. Return the other side.
-    llvm::computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, Q);
+    llvm::computeKnownBits(I->getOperand(1), RHSKnown, Q, Depth + 1);
     if (DemandedFromOps.isSubsetOf(RHSKnown.Zero))
       return I->getOperand(0);
 
     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
     bool NUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
-    llvm::computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, Q);
+    llvm::computeKnownBits(I->getOperand(0), LHSKnown, Q, Depth + 1);
     Known = KnownBits::sub(LHSKnown, RHSKnown, NSW, NUW);
-    computeKnownBitsFromContext(I, Known, Depth, Q);
+    computeKnownBitsFromContext(I, Known, Q, Depth);
     break;
   }
   case Instruction::AShr: {
     // Compute the Known bits to simplify things downstream.
-    llvm::computeKnownBits(I, Known, Depth, Q);
+    llvm::computeKnownBits(I, Known, Q, Depth);
 
     // If this user is only demanding bits that we know, return the known
     // constant.
@@ -1288,7 +1289,7 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
   }
   default:
     // Compute the Known bits to simplify things downstream.
-    llvm::computeKnownBits(I, Known, Depth, Q);
+    llvm::computeKnownBits(I, Known, Q, Depth);
 
     // If this user is only demanding bits that we know, return the known
     // constant.
@@ -1967,8 +1968,8 @@ static Constant *getFPClassConstant(Type *Ty, FPClassTest Mask) {
 Value *InstCombinerImpl::SimplifyDemandedUseFPClass(Value *V,
                                                     FPClassTest DemandedMask,
                                                     KnownFPClass &Known,
-                                                    unsigned Depth,
-                                                    Instruction *CxtI) {
+                                                    Instruction *CxtI,
+                                                    unsigned Depth) {
   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
   Type *VTy = V->getType();
 
@@ -2078,7 +2079,7 @@ bool InstCombinerImpl::SimplifyDemandedFPClass(Instruction *I, unsigned OpNo,
                                                unsigned Depth) {
   Use &U = I->getOperandUse(OpNo);
   Value *NewVal =
-      SimplifyDemandedUseFPClass(U.get(), DemandedMask, Known, Depth, I);
+      SimplifyDemandedUseFPClass(U.get(), DemandedMask, Known, I, Depth);
   if (!NewVal)
     return false;
   if (Instruction *OpInst = dyn_cast<Instruction>(U))
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 116b862228417..f946c3856948b 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -2244,7 +2244,7 @@ static Instruction *foldSelectShuffleWith1Binop(ShuffleVectorInst &Shuf,
   // have preserved the exact NaN bit-pattern.
   // Avoid the folding if X can have NaN elements.
   if (Shuf.getType()->getElementType()->isFloatingPointTy() &&
-      !isKnownNeverNaN(X, 0, SQ))
+      !isKnownNeverNaN(X, SQ))
     return nullptr;
 
   // Shuffle identity constants into the lanes that return the original value.
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 3dc89772676df..439a86d951a83 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -3620,7 +3620,7 @@ Instruction *InstCombinerImpl::visitReturnInst(ReturnInst &RI) {
 
   KnownFPClass KnownClass;
   Value *Simplified =
-      SimplifyDemandedUseFPClass(RetVal, ~ReturnClass, KnownClass, 0, &RI);
+      SimplifyDemandedUseFPClass(RetVal, ~ReturnClass, KnownClass, &RI);
   if (!Simplified)
     return nullptr;
 
@@ -3957,7 +3957,7 @@ Instruction *InstCombinerImpl::visitSwitchInst(SwitchInst &SI) {
       return replaceOperand(SI, 0, V);
   }
 
-  KnownBits Known = computeKnownBits(Cond, 0, &SI);
+  KnownBits Known = computeKnownBits(Cond, &SI);
   unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
   unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
 
diff --git a/llvm/lib/Transforms/Scalar/InferAlignment.cpp b/llvm/lib/Transforms/Scalar/InferAlignment.cpp
index ced04fdb5b4e8..0ddc23152d84f 100644
--- a/llvm/lib/Transforms/Scalar/InferAlignment.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAlignment.cpp
@@ -62,7 +62,7 @@ bool inferAlignment(Function &F, AssumptionCache &AC, DominatorTree &DT) {
     for (Instruction &I : BB) {
       Changed |= tryToImproveAlign(
           DL, &I, [&](Value *PtrOp, Align OldAlign, Align PrefAlign) {
-            KnownBits Known = computeKnownBits(PtrOp, DL, 0, &AC, &I, &DT);
+            KnownBits Known = computeKnownBits(PtrOp, DL, &AC, &I, &DT);
             unsigned TrailZ = std::min(Known.countMinTrailingZeros(),
                                        +Value::MaxAlignmentExponent);
             return Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index a0618726ac0ac..be71cb69ad8cc 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -1582,7 +1582,7 @@ Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
   assert(V->getType()->isPointerTy() &&
          "getOrEnforceKnownAlignment expects a pointer!");
 
-  KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
+  KnownBits Known = computeKnownBits(V, DL, AC, CxtI, DT);
   unsigned TrailZ = Known.countMinTrailingZeros();
 
   // Avoid trouble with ridiculously large TrailZ values, such as
diff --git a/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/llvm/lib/Transforms/Utils/LowerSwitch.cpp
index b70310b364598..6acbce884fcc0 100644
--- a/llvm/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/llvm/lib/Transforms/Utils/LowerSwitch.cpp
@@ -406,7 +406,7 @@ void ProcessSwitchInst(SwitchInst *SI,
     //    roughly C icmp's per switch, where C is the number of cases in the
     //    switch, while LowerSwitch only needs to call LVI once per switch.
     const DataLayout &DL = F->getDataLayout();
-    KnownBits Known = computeKnownBits(Val, DL, /*Depth=*/0, AC, SI);
+    KnownBits Known = computeKnownBits(Val, DL, AC, SI);
     // TODO Shouldn't this create a signed range?
     ConstantRange KnownBitsRange =
         ConstantRange::fromKnownBits(Known, /*IsSigned=*/false);
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index b7299e01b0c5f..0a92061f06978 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -5854,13 +5854,13 @@ static bool eliminateDeadSwitchCases(SwitchInst *SI, DomTreeUpdater *DTU,
                                      AssumptionCache *AC,
                                      const DataLayout &DL) {
   Value *Cond = SI->getCondition();
-  KnownBits Known = computeKnownBits(Cond, DL, 0, AC, SI);
+  KnownBits Known = computeKnownBits(Cond, DL, AC, SI);
 
   // We can also eliminate cases by determining that their values are outside of
   // the limited range of the condition based on how many significant (non-sign)
   // bits are in the condition value.
   unsigned MaxSignificantBitsInCond =
-      ComputeMaxSignificantBits(Cond, DL, 0, AC, SI);
+      ComputeMaxSignificantBits(Cond, DL, AC, SI);
 
   // Gather dead cases.
   SmallVector<ConstantInt *, 8> DeadCases;
diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index 94a79ad824370..737321daa9109 100644
--- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -1001,7 +1001,7 @@ Value *LibCallSimplifier::optimizeStringLength(CallInst *CI, IRBuilderBase &B,
       }
 
       Value *Offset = GEP->getOperand(2);
-      KnownBits Known = computeKnownBits(Offset, DL, 0, nullptr, CI, nullptr);
+      KnownBits Known = computeKnownBits(Offset, DL, nullptr, CI, nullptr);
       uint64_t ArrSize =
              cast<ArrayType>(GEP->getSourceElementType())->getNumElements();
 
@@ -2262,7 +2262,7 @@ Value *LibCallSimplifier::replacePowWithSqrt(CallInst *Pow, IRBuilderBase &B) {
   // errno), but sqrt(-Inf) is required by various standards to set errno.
   if (!Pow->doesNotAccessMemory() && !Pow->hasNoInfs() &&
       !isKnownNeverInfinity(
-          Base, 0, SimplifyQuery(DL, TLI, DT, AC, Pow, true, true, DC)))
+          Base, SimplifyQuery(DL, TLI, DT, AC, Pow, true, true, DC)))
     return nullptr;
 
   Sqrt = getSqrtCall(Base, AttributeList(), Pow->doesNotAccessMemory(), Mod, B,
@@ -2574,8 +2574,7 @@ Value *LibCallSimplifier::optimizeLog(CallInst *Log, IRBuilderBase &B) {
       SimplifyQuery SQ(DL, TLI, DT, AC, Log, true, true, DC);
       KnownFPClass Known = computeKnownFPClass(
           Log->getOperand(0),
-          KnownFPClass::OrderedLessThanZeroMask | fcSubnormal,
-          /*Depth=*/0, SQ);
+          KnownFPClass::OrderedLessThanZeroMask | fcSubnormal, SQ);
       Function *F = Log->getParent()->getParent();
       const fltSemantics &FltSem = Ty->getScalarType()->getFltSemantics();
       IsKnownNoErrno =
@@ -2803,12 +2802,10 @@ Value *LibCallSimplifier::optimizeFMod(CallInst *CI, IRBuilderBase &B) {
   bool IsNoNan = CI->hasNoNaNs();
   if (!IsNoNan) {
     SimplifyQuery SQ(DL, TLI, DT, AC, CI, true, true, DC);
-    KnownFPClass Known0 = computeKnownFPClass(CI->getOperand(0), fcInf,
-                                              /*Depth=*/0, SQ);
+    KnownFPClass Known0 = computeKnownFPClass(CI->getOperand(0), fcInf, SQ);
     if (Known0.isKnownNeverInfinity()) {
       KnownFPClass Known1 =
-          computeKnownFPClass(CI->getOperand(1), fcZero | fcSubnormal,
-                              /*Depth=*/0, SQ);
+          computeKnownFPClass(CI->getOperand(1), fcZero | fcSubnormal, SQ);
       Function *F = CI->getParent()->getParent();
       const fltSemantics &FltSem =
           CI->getType()->getScalarType()->getFltSemantics();
diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 53c83a8c256e1..89f63c3b66aad 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -1269,8 +1269,8 @@ std::optional<APInt> Vectorizer::getConstantOffsetComplexAddrs(
     // When computing known bits, use the GEPs as context instructions, since
     // they likely are in the same BB as the load/store.
     KnownBits Known(BitWidth);
-    computeKnownBits((IdxDiff.sge(0) ? ValA : OpB), Known, DL, 0, &AC,
-                     ContextInst, &DT);
+    computeKnownBits((IdxDiff.sge(0) ? ValA : OpB), Known, DL, &AC, ContextInst,
+                     &DT);
     APInt BitsAllowedToBeSet = Known.Zero.zext(IdxDiff.getBitWidth());
     if (Signed)
       BitsAllowedToBeSet.clearBit(BitWidth - 1);
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 3e7f4cb56c0b3..6fcd606afaa22 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -10488,7 +10488,7 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth,
       } else if (ShuffleOrOp == Instruction::SIToFP ||
                  ShuffleOrOp == Instruction::UIToFP) {
         unsigned NumSignBits =
-            ComputeNumSignBits(VL0->getOperand(0), *DL, 0, AC, nullptr, DT);
+            ComputeNumSignBits(VL0->getOperand(0), *DL, AC, nullptr, DT);
         if (auto *OpI = dyn_cast<Instruction>(VL0->getOperand(0))) {
           APInt Mask = DB->getDemandedBits(OpI);
           NumSignBits = std::max(NumSignBits, Mask.countl_zero());
@@ -10532,12 +10532,12 @@ void BoUpSLP::buildTreeRec(ArrayRef<Value *> VLRef, unsigned Depth,
       buildTreeRec(Operands.back(), Depth + 1, {TE, 1});
       if (ShuffleOrOp == Instruction::ICmp) {
         unsigned NumSignBits0 =
-            ComputeNumSignBits(VL0->getOperand(0), *DL, 0, AC, nullptr, DT);
+            ComputeNumSignBits(VL0->getOperand(0), *DL, AC, nullptr, DT);
         if (NumSignBits0 * 2 >=
             DL->getTypeSizeInBits(VL0->getOperand(0)->getType()))
           ExtraBitWidthNodes.insert(getOperandEntry(TE, 0)->Idx);
         unsigned NumSignBits1 =
-            ComputeNumSignBits(VL0->getOperand(1), *DL, 0, AC, nullptr, DT);
+            ComputeNumSignBits(VL0->getOperand(1), *DL, AC, nullptr, DT);
         if (NumSignBits1 * 2 >=
             DL->getTypeSizeInBits(VL0->getOperand(1)->getType()))
           ExtraBitWidthNodes.insert(getOperandEntry(TE, 1)->Idx);
@@ -20024,7 +20024,7 @@ bool BoUpSLP::collectValuesToDemote(
       if (MaskedValueIsZero(V, Mask, SimplifyQuery(*DL)))
         return true;
     }
-    unsigned NumSignBits = ComputeNumSignBits(V, *DL, 0, AC, nullptr, DT);
+    unsigned NumSignBits = ComputeNumSignBits(V, *DL, AC, nullptr, DT);
     unsigned BitWidth1 = OrigBitWidth - NumSignBits;
     if (IsSignedNode)
       ++BitWidth1;
@@ -20260,8 +20260,8 @@ bool BoUpSLP::collectValuesToDemote(
         KnownBits AmtKnownBits = computeKnownBits(I->getOperand(1), *DL);
         unsigned ShiftedBits = OrigBitWidth - BitWidth;
         return AmtKnownBits.getMaxValue().ult(BitWidth) &&
-               ShiftedBits < ComputeNumSignBits(I->getOperand(0), *DL, 0, AC,
-                                                nullptr, DT);
+               ShiftedBits <
+                   ComputeNumSignBits(I->getOperand(0), *DL, AC, nullptr, DT);
       });
     };
     return TryProcessInstruction(
@@ -20324,10 +20324,10 @@ bool BoUpSLP::collectValuesToDemote(
                "Expected min/max intrinsics only.");
         unsigned SignBits = OrigBitWidth - BitWidth;
         APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth - 1);
-        unsigned Op0SignBits = ComputeNumSignBits(I->getOperand(0), *DL, 0, AC,
-                                              nullptr, DT);
-        unsigned Op1SignBits = ComputeNumSignBits(I->getOperand(1), *DL, 0, AC,
-                                              nullptr, DT);
+        unsigned Op0SignBits =
+            ComputeNumSignBits(I->getOperand(0), *DL, AC, nullptr, DT);
+        unsigned Op1SignBits =
+            ComputeNumSignBits(I->getOperand(1), *DL, AC, nullptr, DT);
         return SignBits <= Op0SignBits &&
                ((SignBits != Op0SignBits &&
                  !isKnownNonNegative(I->getOperand(0), SimplifyQuery(*DL))) ||
@@ -20346,7 +20346,7 @@ bool BoUpSLP::collectValuesToDemote(
         unsigned SignBits = OrigBitWidth - BitWidth;
         APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth - 1);
         unsigned Op0SignBits =
-            ComputeNumSignBits(I->getOperand(0), *DL, 0, AC, nullptr, DT);
+            ComputeNumSignBits(I->getOperand(0), *DL, AC, nullptr, DT);
         return SignBits <= Op0SignBits &&
                ((SignBits != Op0SignBits &&
                  !isKnownNonNegative(I->getOperand(0), SimplifyQuery(*DL))) ||
@@ -20532,7 +20532,7 @@ void BoUpSLP::computeMinimumValueSizes() {
     for (Value *Root : E.Scalars) {
       if (isa<PoisonValue>(Root))
         continue;
-      unsigned NumSignBits = ComputeNumSignBits(Root, *DL, 0, AC, nullptr, DT);
+      unsigned NumSignBits = ComputeNumSignBits(Root, *DL, AC, nullptr, DT);
       TypeSize NumTypeBits =
           DL->getTypeSizeInBits(Root->getType()->getScalarType());
       unsigned BitWidth1 = NumTypeBits - NumSignBits;
@@ -20623,7 +20623,7 @@ void BoUpSLP::computeMinimumValueSizes() {
       for (Value *V : *UserIgnoreList) {
         if (isa<PoisonValue>(V))
           continue;
-        unsigned NumSignBits = ComputeNumSignBits(V, *DL, 0, AC, nullptr, DT);
+        unsigned NumSignBits = ComputeNumSignBits(V, *DL, AC, nullptr, DT);
         TypeSize NumTypeBits = DL->getTypeSizeInBits(V->getType());
         unsigned BitWidth1 = NumTypeBits - NumSignBits;
         if (!isKnownNonNegative(V, SimplifyQuery(*DL)))
diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp
index 8343afd63bed5..e23005b60891d 100644
--- a/llvm/unittests/Analysis/ValueTrackingTest.cpp
+++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -2015,27 +2015,27 @@ TEST_F(ComputeKnownFPClassTest, SqrtNszSignBit) {
 
   {
     KnownFPClass UseInstrInfo =
-        computeKnownFPClass(A, M->getDataLayout(), fcAllFlags, 0, nullptr,
-                            nullptr, nullptr, nullptr, /*UseInstrInfo=*/true);
+        computeKnownFPClass(A, M->getDataLayout(), fcAllFlags, nullptr, nullptr,
+                            nullptr, nullptr, /*UseInstrInfo=*/true);
     EXPECT_EQ(SqrtMask, UseInstrInfo.KnownFPClasses);
     EXPECT_EQ(std::nullopt, UseInstrInfo.SignBit);
 
     KnownFPClass NoUseInstrInfo =
-        computeKnownFPClass(A, M->getDataLayout(), fcAllFlags, 0, nullptr,
-                            nullptr, nullptr, nullptr, /*UseInstrInfo=*/false);
+        computeKnownFPClass(A, M->getDataLayout(), fcAllFlags, nullptr, nullptr,
+                            nullptr, nullptr, /*UseInstrInfo=*/false);
     EXPECT_EQ(SqrtMask, NoUseInstrInfo.KnownFPClasses);
     EXPECT_EQ(std::nullopt, NoUseInstrInfo.SignBit);
   }
 
   {
     KnownFPClass UseInstrInfoNSZ =
-        computeKnownFPClass(A2, M->getDataLayout(), fcAllFlags, 0, nullptr,
+        computeKnownFPClass(A2, M->getDataLayout(), fcAllFlags, nullptr,
                             nullptr, nullptr, nullptr, /*UseInstrInfo=*/true);
     EXPECT_EQ(NszSqrtMask, UseInstrInfoNSZ.KnownFPClasses);
     EXPECT_EQ(std::nullopt, UseInstrInfoNSZ.SignBit);
 
     KnownFPClass NoUseInstrInfoNSZ =
-        computeKnownFPClass(A2, M->getDataLayout(), fcAllFlags, 0, nullptr,
+        computeKnownFPClass(A2, M->getDataLayout(), fcAllFlags, nullptr,
                             nullptr, nullptr, nullptr, /*UseInstrInfo=*/false);
     EXPECT_EQ(SqrtMask, NoUseInstrInfoNSZ.KnownFPClasses);
     EXPECT_EQ(std::nullopt, NoUseInstrInfoNSZ.SignBit);
@@ -2043,14 +2043,14 @@ TEST_F(ComputeKnownFPClassTest, SqrtNszSignBit) {
 
   {
     KnownFPClass UseInstrInfoNoNan =
-        computeKnownFPClass(A3, M->getDataLayout(), fcAllFlags, 0, nullptr,
+        computeKnownFPClass(A3, M->getDataLayout(), fcAllFlags, nullptr,
                             nullptr, nullptr, nullptr, /*UseInstrInfo=*/true);
     EXPECT_EQ(fcPositive | fcNegZero | fcQNan,
               UseInstrInfoNoNan.KnownFPClasses);
     EXPECT_EQ(std::nullopt, UseInstrInfoNoNan.SignBit);
 
     KnownFPClass NoUseInstrInfoNoNan =
-        computeKnownFPClass(A3, M->getDataLayout(), fcAllFlags, 0, nullptr,
+        computeKnownFPClass(A3, M->getDataLayout(), fcAllFlags, nullptr,
                             nullptr, nullptr, nullptr, /*UseInstrInfo=*/false);
     EXPECT_EQ(fcPositive | fcNegZero | fcQNan,
               NoUseInstrInfoNoNan.KnownFPClasses);
@@ -2059,13 +2059,13 @@ TEST_F(ComputeKnownFPClassTest, SqrtNszSignBit) {
 
   {
     KnownFPClass UseInstrInfoNSZNoNan =
-        computeKnownFPClass(A4, M->getDataLayout(), fcAllFlags, 0, nullptr,
+        computeKnownFPClass(A4, M->getDataLayout(), fcAllFlags, nullptr,
                             nullptr, nullptr, nullptr, /*UseInstrInfo=*/true);
     EXPECT_EQ(fcPositive | fcQNan, UseInstrInfoNSZNoNan.KnownFPClasses);
     EXPECT_EQ(std::nullopt, UseInstrInfoNSZNoNan.SignBit);
 
     KnownFPClass NoUseInstrInfoNSZNoNan =
-        computeKnownFPClass(A4, M->getDataLayout(), fcAllFlags, 0, nullptr,
+        computeKnownFPClass(A4, M->getDataLayout(), fcAllFlags, nullptr,
                             nullptr, nullptr, nullptr, /*UseInstrInfo=*/false);
     EXPECT_EQ(fcPositive | fcNegZero | fcQNan,
               NoUseInstrInfoNSZNoNan.KnownFPClasses);
@@ -2085,8 +2085,7 @@ TEST_F(ComputeKnownFPClassTest, Constants) {
 
   {
     KnownFPClass ConstAggZero = computeKnownFPClass(
-        ConstantAggregateZero::get(V4F32), M->getDataLayout(), fcAllFlags, 0,
-        nullptr, nullptr, nullptr, nullptr);
+        ConstantAggregateZero::get(V4F32), M->getDataLayout(), fcAllFlags);
 
     EXPECT_EQ(fcPosZero, ConstAggZero.KnownFPClasses);
     ASSERT_TRUE(ConstAggZero.SignBit);
@@ -2094,17 +2093,15 @@ TEST_F(ComputeKnownFPClassTest, Constants) {
   }
 
   {
-    KnownFPClass Undef =
-        computeKnownFPClass(UndefValue::get(F32), M->getDataLayout(),
-                            fcAllFlags, 0, nullptr, nullptr, nullptr, nullptr);
+    KnownFPClass Undef = computeKnownFPClass(UndefValue::get(F32),
+                                             M->getDataLayout(), fcAllFlags);
     EXPECT_EQ(fcAllFlags, Undef.KnownFPClasses);
     EXPECT_FALSE(Undef.SignBit);
   }
 
   {
-    KnownFPClass Poison =
-        computeKnownFPClass(PoisonValue::get(F32), M->getDataLayout(),
-                            fcAllFlags, 0, nullptr, nullptr, nullptr, nullptr);
+    KnownFPClass Poison = computeKnownFPClass(PoisonValue::get(F32),
+                                              M->getDataLayout(), fcAllFlags);
     EXPECT_EQ(fcNone, Poison.KnownFPClasses);
     ASSERT_TRUE(Poison.SignBit);
     EXPECT_FALSE(*Poison.SignBit);
@@ -2115,9 +2112,9 @@ TEST_F(ComputeKnownFPClassTest, Constants) {
     Constant *ZeroF32 = ConstantFP::getZero(F32);
     Constant *PoisonF32 = PoisonValue::get(F32);
 
-    KnownFPClass PartiallyPoison = computeKnownFPClass(
-        ConstantVector::get({ZeroF32, PoisonF32}), M->getDataLayout(),
-        fcAllFlags, 0, nullptr, nullptr, nullptr, nullptr);
+    KnownFPClass PartiallyPoison =
+        computeKnownFPClass(ConstantVector::get({ZeroF32, PoisonF32}),
+                            M->getDataLayout(), fcAllFlags);
     EXPECT_EQ(fcPosZero, PartiallyPoison.KnownFPClasses);
     ASSERT_TRUE(PartiallyPoison.SignBit);
     EXPECT_FALSE(*PartiallyPoison.SignBit);
@@ -2128,9 +2125,9 @@ TEST_F(ComputeKnownFPClassTest, Constants) {
     Constant *NegZeroF32 = ConstantFP::getZero(F32, true);
     Constant *PoisonF32 = PoisonValue::get(F32);
 
-    KnownFPClass PartiallyPoison = computeKnownFPClass(
-        ConstantVector::get({NegZeroF32, PoisonF32}), M->getDataLayout(),
-        fcAllFlags, 0, nullptr, nullptr, nullptr, nullptr);
+    KnownFPClass PartiallyPoison =
+        computeKnownFPClass(ConstantVector::get({NegZeroF32, PoisonF32}),
+                            M->getDataLayout(), fcAllFlags);
     EXPECT_EQ(fcNegZero, PartiallyPoison.KnownFPClasses);
     ASSERT_TRUE(PartiallyPoison.SignBit);
     EXPECT_TRUE(*PartiallyPoison.SignBit);
@@ -2141,9 +2138,9 @@ TEST_F(ComputeKnownFPClassTest, Constants) {
     Constant *NegZeroF32 = ConstantFP::getZero(F32, true);
     Constant *PoisonF32 = PoisonValue::get(F32);
 
-    KnownFPClass PartiallyPoison = computeKnownFPClass(
-        ConstantVector::get({PoisonF32, NegZeroF32}), M->getDataLayout(),
-        fcAllFlags, 0, nullptr, nullptr, nullptr, nullptr);
+    KnownFPClass PartiallyPoison =
+        computeKnownFPClass(ConstantVector::get({PoisonF32, NegZeroF32}),
+                            M->getDataLayout(), fcAllFlags);
     EXPECT_EQ(fcNegZero, PartiallyPoison.KnownFPClasses);
     EXPECT_TRUE(PartiallyPoison.SignBit);
   }
@@ -2453,8 +2450,8 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsPtrToIntTrunc) {
       "}\n"
       "declare void @llvm.assume(i1)\n");
   AssumptionCache AC(*F);
-  KnownBits Known = computeKnownBits(
-      A, M->getDataLayout(), /* Depth */ 0, &AC, F->front().getTerminator());
+  KnownBits Known =
+      computeKnownBits(A, M->getDataLayout(), &AC, F->front().getTerminator());
   EXPECT_TRUE(Known.isUnknown());
 }
 
@@ -2471,8 +2468,8 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsPtrToIntZext) {
       "}\n"
       "declare void @llvm.assume(i1)\n");
   AssumptionCache AC(*F);
-  KnownBits Known = computeKnownBits(
-      A, M->getDataLayout(), /* Depth */ 0, &AC, F->front().getTerminator());
+  KnownBits Known =
+      computeKnownBits(A, M->getDataLayout(), &AC, F->front().getTerminator());
   EXPECT_TRUE(Known.isUnknown());
 }
 
@@ -2488,8 +2485,8 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsFreeze) {
                 "declare void @llvm.assume(i1)\n"
                 "declare i32 @any_num()\n");
   AssumptionCache AC(*F);
-  KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC,
-                                     F->front().getTerminator());
+  KnownBits Known =
+      computeKnownBits(A, M->getDataLayout(), &AC, F->front().getTerminator());
   EXPECT_EQ(Known.Zero.getZExtValue(), 31u);
   EXPECT_EQ(Known.One.getZExtValue(), 0u);
 }
@@ -2518,12 +2515,12 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsAddWithRange) {
                 "}\n"
                 "declare void @llvm.assume(i1)\n");
   AssumptionCache AC(*F);
-  KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC,
-                                     F->front().getTerminator());
+  KnownBits Known =
+      computeKnownBits(A, M->getDataLayout(), &AC, F->front().getTerminator());
   EXPECT_EQ(Known.Zero.getZExtValue(), ~(65536llu - 1));
   EXPECT_EQ(Known.One.getZExtValue(), 0u);
   Instruction &APlus512 = findInstructionByName(F, "APlus512");
-  Known = computeKnownBits(&APlus512, M->getDataLayout(), /* Depth */ 0, &AC,
+  Known = computeKnownBits(&APlus512, M->getDataLayout(), &AC,
                            F->front().getTerminator());
   // We know of one less zero because 512 may have produced a 1 that
   // got carried all the way to the first trailing zero.
@@ -2542,7 +2539,7 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsUnknownVScale) {
                                                       {Builder.getInt32Ty()});
   CallInst *CI = Builder.CreateCall(TheFn, {}, {}, "");
 
-  KnownBits Known = computeKnownBits(CI, M.getDataLayout(), /* Depth */ 0);
+  KnownBits Known = computeKnownBits(CI, M.getDataLayout());
   // There is no parent function so we cannot look up the vscale_range
   // attribute to determine the number of bits.
   EXPECT_EQ(Known.One.getZExtValue(), 0u);
@@ -2550,7 +2547,7 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsUnknownVScale) {
 
   BasicBlock *BB = BasicBlock::Create(Context);
   CI->insertInto(BB, BB->end());
-  Known = computeKnownBits(CI, M.getDataLayout(), /* Depth */ 0);
+  Known = computeKnownBits(CI, M.getDataLayout());
   // There is no parent function so we cannot look up the vscale_range
   // attribute to determine the number of bits.
   EXPECT_EQ(Known.One.getZExtValue(), 0u);
@@ -2573,12 +2570,12 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsAddWithRangeNoOverlap) {
                 "}\n"
                 "declare void @llvm.assume(i1)\n");
   AssumptionCache AC(*F);
-  KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC,
-                                     F->front().getTerminator());
+  KnownBits Known =
+      computeKnownBits(A, M->getDataLayout(), &AC, F->front().getTerminator());
   EXPECT_EQ(Known.Zero.getZExtValue(), ~(64llu - 1));
   EXPECT_EQ(Known.One.getZExtValue(), 32u);
   Instruction &APlus512 = findInstructionByName(F, "APlus512");
-  Known = computeKnownBits(&APlus512, M->getDataLayout(), /* Depth */ 0, &AC,
+  Known = computeKnownBits(&APlus512, M->getDataLayout(), &AC,
                            F->front().getTerminator());
   EXPECT_EQ(Known.Zero.getZExtValue(), ~512llu & ~(64llu - 1));
   EXPECT_EQ(Known.One.getZExtValue(), 512u | 32u);
@@ -2600,12 +2597,12 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPWithRange) {
       "}\n"
       "declare void @llvm.assume(i1)\n");
   AssumptionCache AC(*F);
-  KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC,
-                                     F->front().getTerminator());
+  KnownBits Known =
+      computeKnownBits(A, M->getDataLayout(), &AC, F->front().getTerminator());
   EXPECT_EQ(Known.Zero.getZExtValue(), ~(65536llu - 1));
   EXPECT_EQ(Known.One.getZExtValue(), 0u);
   Instruction &APtrPlus512 = findInstructionByName(F, "APtrPlus512");
-  Known = computeKnownBits(&APtrPlus512, M->getDataLayout(), /* Depth */ 0, &AC,
+  Known = computeKnownBits(&APtrPlus512, M->getDataLayout(), &AC,
                            F->front().getTerminator());
   // We know of one less zero because 512 may have produced a 1 that
   // got carried all the way to the first trailing zero.
@@ -2633,12 +2630,12 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPWithRangeNoOverlap) {
       "}\n"
       "declare void @llvm.assume(i1)\n");
   AssumptionCache AC(*F);
-  KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC,
-                                     F->front().getTerminator());
+  KnownBits Known =
+      computeKnownBits(A, M->getDataLayout(), &AC, F->front().getTerminator());
   EXPECT_EQ(Known.Zero.getZExtValue(), ~(64llu - 1));
   EXPECT_EQ(Known.One.getZExtValue(), 32u);
   Instruction &APtrPlus512 = findInstructionByName(F, "APtrPlus512");
-  Known = computeKnownBits(&APtrPlus512, M->getDataLayout(), /* Depth */ 0, &AC,
+  Known = computeKnownBits(&APtrPlus512, M->getDataLayout(), &AC,
                            F->front().getTerminator());
   EXPECT_EQ(Known.Zero.getZExtValue(), ~512llu & ~(64llu - 1));
   EXPECT_EQ(Known.One.getZExtValue(), 512u | 32u);



More information about the llvm-commits mailing list