[llvm] [HashRecognize] Introduce new analysis (PR #139120)
via llvm-commits
llvm-commits at lists.llvm.org
Thu May 8 11:18:46 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-analysis
Author: Ramkumar Ramachandra (artagnon)
<details>
<summary>Changes</summary>
Introduce a fresh analysis for recognizing polynomial hashes, with the rationale that several targets have specific instructions to optimize things like CRC and GHASH (eg. X86 and RISC-V crypto extension). We limit the scope to polynomial hashes computed in a Galois field of characteristic 2, since this class of operations can also be optimized in the absence of target-specific instructions to use a lookup table.
At the moment, we only recognize the CRC algorithm.
-- 8< --
Depends on #<!-- -->138836.
---
Patch is 81.79 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/139120.diff
11 Files Affected:
- (added) llvm/include/llvm/Analysis/HashRecognize.h (+89)
- (modified) llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h (+48)
- (modified) llvm/lib/Analysis/CMakeLists.txt (+1)
- (added) llvm/lib/Analysis/HashRecognize.cpp (+681)
- (modified) llvm/lib/Analysis/LoopAccessAnalysis.cpp (+13-26)
- (modified) llvm/lib/Analysis/ScalarEvolution.cpp (+8-13)
- (modified) llvm/lib/Passes/PassBuilder.cpp (+1)
- (modified) llvm/lib/Passes/PassRegistry.def (+2)
- (added) llvm/test/Analysis/HashRecognize/cyclic-redundancy-check.ll (+859)
- (modified) llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn (+1)
- (modified) llvm/utils/update_analyze_test_checks.py (+5-2)
``````````diff
diff --git a/llvm/include/llvm/Analysis/HashRecognize.h b/llvm/include/llvm/Analysis/HashRecognize.h
new file mode 100644
index 0000000000000..cc353836118a3
--- /dev/null
+++ b/llvm/include/llvm/Analysis/HashRecognize.h
@@ -0,0 +1,89 @@
+//===- HashRecognize.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface for the HashRecognize analysis, which identifies hash functions
+// that can be optimized using a lookup-table or with target-specific
+// instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_HASHRECOGNIZE_H
+#define LLVM_ANALYSIS_HASHRECOGNIZE_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/KnownBits.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+/// A tuple of bits that are expected to be zero, number N of them expected to
+/// be zero, with a boolean indicating whether it's the top or bottom N bits
+/// expected to be zero.
+using ErrBits = std::tuple<KnownBits, unsigned, bool>;
+
+/// A custom std::array with 256 entries, that also has a print function.
+struct CRCTable : public std::array<APInt, 256> {
+ void print(raw_ostream &OS) const;
+};
+
+/// The structure that is returned when a polynomial algorithm was recognized by
+/// the analysis. Currently, only the CRC algorithm is recognized.
+struct PolynomialInfo {
+ unsigned TripCount;
+ const Value *LHS;
+ APInt RHS;
+ const Value *ComputedValue;
+ bool ByteOrderSwapped;
+ const Value *LHSAux;
+ PolynomialInfo(unsigned TripCount, const Value *LHS, const APInt &RHS,
+ const Value *ComputedValue, bool ByteOrderSwapped,
+ const Value *LHSAux = nullptr);
+};
+
+/// The analysis.
+class HashRecognize {
+ const Loop &L;
+ ScalarEvolution &SE;
+
+public:
+ HashRecognize(const Loop &L, ScalarEvolution &SE);
+
+ // The main analysis entry point.
+ std::variant<PolynomialInfo, ErrBits, StringRef> recognizeCRC() const;
+
+ // Auxilary entry point after analysis to interleave the generating polynomial
+ // and return a 256-entry CRC table.
+ CRCTable genSarwateTable(const APInt &GenPoly, bool ByteOrderSwapped) const;
+
+ void print(raw_ostream &OS) const;
+};
+
+class HashRecognizePrinterPass
+ : public PassInfoMixin<HashRecognizePrinterPass> {
+ raw_ostream &OS;
+
+public:
+ explicit HashRecognizePrinterPass(raw_ostream &OS) : OS(OS) {}
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &);
+};
+
+class HashRecognizeAnalysis : public AnalysisInfoMixin<HashRecognizeAnalysis> {
+ friend AnalysisInfoMixin<HashRecognizeAnalysis>;
+ static AnalysisKey Key;
+
+public:
+ using Result = HashRecognize;
+ Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR);
+};
+} // namespace llvm
+
+#endif
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
index 674147ca175ef..536d74f296931 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
@@ -58,6 +58,8 @@ template <typename Class> struct class_match {
template <typename ITy> bool match(ITy *V) const { return isa<Class>(V); }
};
+inline class_match<const SCEV> m_SCEV() { return class_match<const SCEV>(); }
+
template <typename Class> struct bind_ty {
Class *&VR;
@@ -93,6 +95,41 @@ struct specificscev_ty {
/// Match if we have a specific specified SCEV.
inline specificscev_ty m_Specific(const SCEV *S) { return S; }
+template <typename Class> struct cst_match {
+ Class CV;
+
+ cst_match(Class Op0) : CV(Op0) {}
+
+ bool match(const SCEV *S) const {
+ assert((isa<SCEVCouldNotCompute>(S) || !S->getType()->isVectorTy()) &&
+ "no vector types expected from SCEVs");
+ auto *C = dyn_cast<SCEVConstant>(S);
+ return C && C->getAPInt() == CV;
+ }
+};
+
+/// Match an SCEV constant with a plain unsigned integer.
+inline cst_match<uint64_t> m_scev_SpecificInt(uint64_t V) { return V; }
+
+struct bind_cst_ty {
+ const APInt *&CR;
+
+ bind_cst_ty(const APInt *&Op0) : CR(Op0) {}
+
+ bool match(const SCEV *S) const {
+ assert((isa<SCEVCouldNotCompute>(S) || !S->getType()->isVectorTy()) &&
+ "no vector types expected from SCEVs");
+ auto *C = dyn_cast<SCEVConstant>(S);
+ if (!C)
+ return false;
+ CR = &C->getAPInt();
+ return true;
+ }
+};
+
+/// Match an SCEV constant and bind it to an APInt.
+inline bind_cst_ty m_scev_APInt(const APInt *&C) { return C; }
+
/// Match a unary SCEV.
template <typename SCEVTy, typename Op0_t> struct SCEVUnaryExpr_match {
Op0_t Op0;
@@ -149,6 +186,17 @@ m_scev_Add(const Op0_t &Op0, const Op1_t &Op1) {
return m_scev_Binary<SCEVAddExpr>(Op0, Op1);
}
+template <typename Op0_t, typename Op1_t>
+inline SCEVBinaryExpr_match<SCEVMulExpr, Op0_t, Op1_t>
+m_scev_Mul(const Op0_t &Op0, const Op1_t &Op1) {
+ return m_scev_Binary<SCEVMulExpr>(Op0, Op1);
+}
+
+template <typename Op0_t, typename Op1_t>
+inline SCEVBinaryExpr_match<SCEVUDivExpr, Op0_t, Op1_t>
+m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1) {
+ return m_scev_Binary<SCEVUDivExpr>(Op0, Op1);
+}
} // namespace SCEVPatternMatch
} // namespace llvm
diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt
index a17a75e6fbcac..b225345e825d9 100644
--- a/llvm/lib/Analysis/CMakeLists.txt
+++ b/llvm/lib/Analysis/CMakeLists.txt
@@ -77,6 +77,7 @@ add_llvm_component_library(LLVMAnalysis
FunctionPropertiesAnalysis.cpp
GlobalsModRef.cpp
GuardUtils.cpp
+ HashRecognize.cpp
HeatUtils.cpp
IRSimilarityIdentifier.cpp
IVDescriptors.cpp
diff --git a/llvm/lib/Analysis/HashRecognize.cpp b/llvm/lib/Analysis/HashRecognize.cpp
new file mode 100644
index 0000000000000..5c94be8f5b9ab
--- /dev/null
+++ b/llvm/lib/Analysis/HashRecognize.cpp
@@ -0,0 +1,681 @@
+//===- HashRecognize.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The HashRecognize analysis recognizes unoptimized polynomial hash functions
+// with operations over a Galois field of characteristic 2, also called binary
+// fields, or GF(2^n): this class of hash functions can be optimized using a
+// lookup-table-driven implementation, or with target-specific instructions.
+// Examples:
+//
+// 1. Cyclic redundancy check (CRC), which is a polynomial division in GF(2).
+// 2. Rabin fingerprint, a component of the Rabin-Karp algorithm, which is a
+// rolling hash polynomial division in GF(2).
+// 3. Rijndael MixColumns, a step in AES computation, which is a polynomial
+// multiplication in GF(2^3).
+// 4. GHASH, the authentication mechanism in AES Galois/Counter Mode (GCM),
+// which is a polynomial evaluation in GF(2^128).
+//
+// All of them use an irreducible generating polynomial of degree m,
+//
+// c_m * x^m + c_(m-1) * x^(m-1) + ... + c_0 * x^0
+//
+// where each coefficient c is can take values in GF(2^n), where 2^n is termed
+// the order of the Galois field. For GF(2), each coefficient can take values
+// either 0 or 1, and the polynomial is simply represented by m+1 bits,
+// corresponding to the coefficients. The different variants of CRC are named by
+// degree of generating polynomial used: so CRC-32 would use a polynomial of
+// degree 32.
+//
+// The reason algorithms on GF(2^n) can be optimized with a lookup-table is the
+// following: in such fields, polynomial addition and subtraction are identical
+// and equivalent to XOR, polynomial multiplication is an AND, and polynomial
+// division is identity: the XOR and AND operations in unoptimized
+// implmentations are performed bit-wise, and can be optimized to be performed
+// chunk-wise, by interleaving copies of the generating polynomial, and storing
+// the pre-computed values in a table.
+//
+// A generating polynomial of m bits always has the MSB set, so we usually
+// omit it. An example of a 16-bit polynomial is the CRC-16-CCITT polynomial:
+//
+// (x^16) + x^12 + x^5 + 1 = (1) 0001 0000 0010 0001 = 0x1021
+//
+// Transmissions are either in big-endian or little-endian form, and hash
+// algorithms are written according to this. For example, IEEE 802 and RS-232
+// specify little-endian transmission.
+//
+//===----------------------------------------------------------------------===//
+//
+// At the moment, we only recognize the CRC algorithm.
+// Documentation on CRC32 from the kernel:
+// https://www.kernel.org/doc/Documentation/crc32.txt
+//
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/HashRecognize.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionPatternMatch.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/KnownBits.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "hash-recognize"
+
+// KnownBits for a PHI node. There are at most two PHI nodes, corresponding to
+// the Simple Recurrence and Conditional Recurrence. The IndVar PHI is not
+// relevant.
+using KnownPhiMap = SmallDenseMap<const PHINode *, KnownBits, 2>;
+
+// A pair of a PHI node along with its incoming value from within a loop.
+using PhiStepPair = std::pair<const PHINode *, const Instruction *>;
+
+/// A much simpler version of ValueTracking, in that it computes KnownBits of
+/// values, except that it computes the evolution of KnownBits in a loop with a
+/// given trip count, and predication is specialized for a significant-bit
+/// check.
+class ValueEvolution {
+ unsigned TripCount;
+ bool ByteOrderSwapped;
+ APInt GenPoly;
+ StringRef ErrStr;
+ unsigned AtIteration;
+
+ KnownBits computeBinOp(const BinaryOperator *I, const KnownPhiMap &KnownPhis);
+ KnownBits computeInstr(const Instruction *I, const KnownPhiMap &KnownPhis);
+ KnownBits compute(const Value *V, const KnownPhiMap &KnownPhis);
+
+public:
+ ValueEvolution(unsigned TripCount, bool ByteOrderSwapped);
+
+ // In case ValueEvolution encounters an error, these are meant to be used for
+ // a precise error message.
+ bool hasError() const;
+ StringRef getError() const;
+
+ // Given a list of PHI nodes along with their incoming value from within the
+ // loop, and the trip-count of the loop, computeEvolutions
+ // computes the KnownBits of each of the PHI nodes on the final iteration.
+ std::optional<KnownPhiMap>
+ computeEvolutions(ArrayRef<PhiStepPair> PhiEvolutions);
+};
+
+ValueEvolution::ValueEvolution(unsigned TripCount, bool ByteOrderSwapped)
+ : TripCount(TripCount), ByteOrderSwapped(ByteOrderSwapped) {}
+
+bool ValueEvolution::hasError() const { return !ErrStr.empty(); }
+StringRef ValueEvolution::getError() const { return ErrStr; }
+
+/// Compute the KnownBits of BinaryOperator \p I.
+KnownBits ValueEvolution::computeBinOp(const BinaryOperator *I,
+ const KnownPhiMap &KnownPhis) {
+ unsigned BitWidth = I->getType()->getScalarSizeInBits();
+
+ KnownBits KnownL(compute(I->getOperand(0), KnownPhis));
+ KnownBits KnownR(compute(I->getOperand(1), KnownPhis));
+
+ switch (I->getOpcode()) {
+ case Instruction::BinaryOps::And:
+ return KnownL & KnownR;
+ case Instruction::BinaryOps::Or:
+ return KnownL | KnownR;
+ case Instruction::BinaryOps::Xor:
+ return KnownL ^ KnownR;
+ case Instruction::BinaryOps::Shl: {
+ auto *OBO = cast<OverflowingBinaryOperator>(I);
+ return KnownBits::shl(KnownL, KnownR, OBO->hasNoUnsignedWrap(),
+ OBO->hasNoSignedWrap());
+ }
+ case Instruction::BinaryOps::LShr:
+ return KnownBits::lshr(KnownL, KnownR);
+ case Instruction::BinaryOps::AShr:
+ return KnownBits::ashr(KnownL, KnownR);
+ case Instruction::BinaryOps::Add: {
+ auto *OBO = cast<OverflowingBinaryOperator>(I);
+ return KnownBits::add(KnownL, KnownR, OBO->hasNoUnsignedWrap(),
+ OBO->hasNoSignedWrap());
+ }
+ case Instruction::BinaryOps::Sub: {
+ auto *OBO = cast<OverflowingBinaryOperator>(I);
+ return KnownBits::sub(KnownL, KnownR, OBO->hasNoUnsignedWrap(),
+ OBO->hasNoSignedWrap());
+ }
+ case Instruction::BinaryOps::Mul: {
+ Value *Op0 = I->getOperand(0);
+ Value *Op1 = I->getOperand(1);
+ bool SelfMultiply = Op0 == Op1;
+ if (SelfMultiply)
+ SelfMultiply &= isGuaranteedNotToBeUndef(Op0);
+ return KnownBits::mul(KnownL, KnownR, SelfMultiply);
+ }
+ case Instruction::BinaryOps::UDiv:
+ return KnownBits::udiv(KnownL, KnownR);
+ case Instruction::BinaryOps::SDiv:
+ return KnownBits::sdiv(KnownL, KnownR);
+ case Instruction::BinaryOps::URem:
+ return KnownBits::urem(KnownL, KnownR);
+ case Instruction::BinaryOps::SRem:
+ return KnownBits::srem(KnownL, KnownR);
+ default:
+ ErrStr = "Unknown BinaryOperator";
+ return {BitWidth};
+ }
+}
+
+/// Compute the KnownBits of Instruction \p I.
+KnownBits ValueEvolution::computeInstr(const Instruction *I,
+ const KnownPhiMap &KnownPhis) {
+ using namespace llvm::PatternMatch;
+
+ unsigned BitWidth = I->getType()->getScalarSizeInBits();
+
+ // We look up in the map that contains the KnownBits of the PHI from the
+ // previous iteration.
+ if (const PHINode *P = dyn_cast<PHINode>(I))
+ return KnownPhis.lookup_or(P, {BitWidth});
+
+ // Compute the KnownBits for a Select(Cmp()), forcing it to take the take the
+ // branch that is predicated on the (least|most)-significant-bit check.
+ CmpPredicate Pred;
+ Value *L, *R, *TV, *FV;
+ if (match(I, m_Select(m_ICmp(Pred, m_Value(L), m_Value(R)), m_Value(TV),
+ m_Value(FV)))) {
+ KnownBits KnownL = compute(L, KnownPhis).zextOrTrunc(BitWidth);
+ KnownBits KnownR = compute(R, KnownPhis).zextOrTrunc(BitWidth);
+ KnownBits KnownTV = compute(TV, KnownPhis);
+ KnownBits KnownFV = compute(FV, KnownPhis);
+ auto LCR = ConstantRange::fromKnownBits(KnownL, false);
+ auto RCR = ConstantRange::fromKnownBits(KnownR, false);
+
+ // We need to check LCR against [0, 2) in the little-endian case, because
+ // the RCR check is too lax: it is simply [0, SMIN).
+ auto CheckLCR = ConstantRange(APInt::getZero(BitWidth), APInt(BitWidth, 2));
+ if (!ByteOrderSwapped && LCR != CheckLCR) {
+ ErrStr = "Bad LHS of significant-bit-check";
+ return {BitWidth};
+ }
+
+ // Check that the predication is on (most|least) significant bit.
+ auto AllowedR = ConstantRange::makeAllowedICmpRegion(Pred, RCR);
+ auto InverseR = ConstantRange::makeAllowedICmpRegion(
+ CmpInst::getInversePredicate(Pred), RCR);
+ ConstantRange LSBRange(APInt::getZero(BitWidth), APInt(BitWidth, 1));
+ ConstantRange MSBRange(APInt::getZero(BitWidth),
+ APInt::getSignedMinValue(BitWidth));
+ const ConstantRange &CheckRCR = ByteOrderSwapped ? MSBRange : LSBRange;
+ if (AllowedR == CheckRCR)
+ return KnownTV;
+ if (AllowedR.inverse() == CheckRCR)
+ return KnownFV;
+
+ ErrStr = "Bad RHS of significant-bit-check";
+ return {BitWidth};
+ }
+
+ if (auto *BO = dyn_cast<BinaryOperator>(I))
+ return computeBinOp(BO, KnownPhis);
+
+ switch (I->getOpcode()) {
+ case Instruction::CastOps::Trunc:
+ return compute(I->getOperand(0), KnownPhis).trunc(BitWidth);
+ case Instruction::CastOps::ZExt:
+ return compute(I->getOperand(0), KnownPhis).zext(BitWidth);
+ case Instruction::CastOps::SExt:
+ return compute(I->getOperand(0), KnownPhis).sext(BitWidth);
+ default:
+ ErrStr = "Unknown Instruction";
+ return {BitWidth};
+ }
+}
+
+/// Compute the KnownBits of Value \p V.
+KnownBits ValueEvolution::compute(const Value *V,
+ const KnownPhiMap &KnownPhis) {
+ using namespace llvm::PatternMatch;
+
+ unsigned BitWidth = V->getType()->getScalarSizeInBits();
+
+ const APInt *C;
+ if (match(V, m_APInt(C)))
+ return KnownBits::makeConstant(*C);
+
+ if (auto *I = dyn_cast<Instruction>(V))
+ return computeInstr(I, KnownPhis);
+
+ return {BitWidth};
+}
+
+// Takes every PHI-step pair in PhiEvolutions, and computes KnownBits on the
+// final iteration, using KnownBits from the previous iteration.
+std::optional<KnownPhiMap>
+ValueEvolution::computeEvolutions(ArrayRef<PhiStepPair> PhiEvolutions) {
+ KnownPhiMap KnownPhis;
+ for (unsigned I = 0; I < TripCount; ++I) {
+ AtIteration = I;
+ for (auto [Phi, Step] : PhiEvolutions) {
+ // Check that the {top, bottom} I bits are zero, with the rest unknown.
+ KnownBits KnownAtIter = computeInstr(Step, KnownPhis);
+ if (KnownAtIter.getBitWidth() < I + 1) {
+ ErrStr = "Loop iterations exceed bitwidth of result";
+ return std::nullopt;
+ }
+ KnownPhis.emplace_or_assign(Phi, KnownAtIter);
+ }
+ }
+
+ // Return the final ComputedBits.
+ return KnownPhis;
+}
+
+/// A Conditional Recurrence is a recurrence of the form:
+///
+/// loop:
+/// %rec = [%start, %entry], [%step, %loop]
+/// ...
+/// %step = select _, %tv, %fv
+///
+/// where %tv and %fv ultimately end up using %rec via the same %BO instruction,
+/// after digging through the use-def chain.
+///
+/// \p ExtraConst is relevant if \p BOWithConstOpToMatch is supplied: when
+/// digging the use-def chain, a BinOp with opcode \p BOWithConstOpToMatch is
+/// matched, and \p ExtraConst is a constant operand of that BinOp. This
+/// peculiary exists, because in a CRC algorithm, the \p BOWithConstOpToMatch is
+/// an XOR, and the \p ExtraConst ends up being the generating polynomial.
+static bool matchConditionalRecurrence(
+ const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step,
+ const Loop &L, const APInt *&ExtraConst,
+ Instruction::BinaryOps BOWithConstOpToMatch = Instruction::BinaryOpsEnd) {
+ if (P->getNumIncomingValues() != 2)
+ return false;
+
+ for (unsigned Idx = 0; Idx != 2; ++Idx) {
+ using namespace llvm::PatternMatch;
+
+ Value *FoundStep = P->getIncomingValue(Idx);
+ Value *FoundStart = P->getIncomingValue(!Idx);
+
+ Instruction *TV, *FV;
+ if (!match(FoundStep,
+ m_Select(m_Cmp(), m_Instruction(TV), m_Instruction(FV))))
+ continue;
+
+ auto DigRecurrence = [&](Instruction *V) -> BinaryOperator * {
+ SmallVector<Instruction *> Worklist;
+ Worklist.push_back(V);
+ while (!Worklist.empty()) {
+ Instruction *I = Worklist.pop_back_val();
+
+ // Don't add a PHI's operands to the Worklist.
+ if (isa<PHINode>(I))
+ continue;
+
+ // Find a recurrence over a BinOp, by matching either of its operands
+ // with with the PHINode.
+ if (match(I, m_c_BinOp(m_Value(), m_Specific(P))))
+ return cast<BinaryOperator>(I);
+
+ // Bind to ExtraConst, if we match exactly one.
+ if (I->getOpcode() == BOWithConstOpToMatch) {
+ if (ExtraConst)
+ return nullptr;
+ match(I, m_c_BinOp(m_APInt(ExtraConst), m_Value()));
+ }
+
+ // Continue along the use-def chain.
+ for (Use &U : I->operands())
+ if (auto *UI = dyn_cast<Instruction>(U))
+ if (L.contains(UI))
+ Worklist.push_back(UI);
+ }
+ return nullptr;
+ };
+
+ // For a conditional recurrence, both the true and false values of the
+ // select must ultimately end up in the same recurrent BinOp.
+ BinaryOperator *FoundBO = DigRecurrence(TV);
+ BinaryOperator *AltBO = DigRecurrence(FV);...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/139120
More information about the llvm-commits
mailing list