[llvm] [BOLT][X86]Redirect never-taken jumps (PR #113923)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Oct 28 08:18:14 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-bolt
Author: ShatianWang (ShatianWang)
<details>
<summary>Changes</summary>
A new BOLT pass to reduce code size in X86 by redirecting never-taken jumps that take 5 or 6 bytes to nearby jumps with the same jump target and compatible condition codes. Doing each such redirection will save 3 or 4 bytes depending on if the redirected jump is unconditional or conditional, since a short jump takes only 2 bytes. The pass can be turned on with BOLT option -redirect-never-taken-jumps.
---
Patch is 33.35 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/113923.diff
7 Files Affected:
- (modified) bolt/include/bolt/Core/BinaryBasicBlock.h (+12-2)
- (added) bolt/include/bolt/Passes/RedirectNeverTakenJumps.h (+58)
- (modified) bolt/lib/Core/BinaryBasicBlock.cpp (+73)
- (modified) bolt/lib/Passes/CMakeLists.txt (+1)
- (added) bolt/lib/Passes/RedirectNeverTakenJumps.cpp (+503)
- (modified) bolt/lib/Rewrite/BinaryPassManager.cpp (+9)
- (added) bolt/test/X86/redirect-never-taken-jumps.s (+86)
``````````diff
diff --git a/bolt/include/bolt/Core/BinaryBasicBlock.h b/bolt/include/bolt/Core/BinaryBasicBlock.h
index b4f31cf2bae6f6..df4c3f3a20f23f 100644
--- a/bolt/include/bolt/Core/BinaryBasicBlock.h
+++ b/bolt/include/bolt/Core/BinaryBasicBlock.h
@@ -789,13 +789,23 @@ class BinaryBasicBlock {
return SplitInst;
}
- /// Split basic block at the instruction pointed to by II.
+ /// Split basic block at the instruction pointed to by II that is
+ /// not after any branch instructions in the basic block.
/// All iterators pointing after II get invalidated.
///
/// Return the new basic block that starts with the instruction
- /// at the split point.
+ /// at the split point, which has been inserted at the end of the
+ /// current function.
BinaryBasicBlock *splitAt(iterator II);
+ /// Split basic block in place at the instruction pointed to by II.
+ /// All iterators pointing after II get invalidated.
+ ///
+ /// Return the new basic block that starts with the instruction
+ /// at the split point, which has been inserted right after the
+ /// current basic block in the current function.
+ BinaryBasicBlock *splitInPlaceAt(iterator II);
+
/// Set start offset of this basic block in the input binary.
void setOffset(uint32_t Offset) { InputRange.first = Offset; };
diff --git a/bolt/include/bolt/Passes/RedirectNeverTakenJumps.h b/bolt/include/bolt/Passes/RedirectNeverTakenJumps.h
new file mode 100644
index 00000000000000..d85eb5fb0fdf35
--- /dev/null
+++ b/bolt/include/bolt/Passes/RedirectNeverTakenJumps.h
@@ -0,0 +1,58 @@
+//===- bolt/Passes/RedirectNeverTakenJumps.h - Code size reduction --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass reduces code size in X86 by redirecting never-taken jumps that take
+// 5 or 6 bytes to nearby jumps with the same jump target and compatible
+// condition codes. Doing each such redirection will save 3 or 4 bytes depending
+// on if the redirected jump is unconditional or conditional, since a short jump
+// takes only 2 bytes. The pass can be turned on with BOLT option
+// -redirect-never-taken-jumps.
+//
+// There are two modes for classifying "never-taken" jumps: aggressive and
+// conservative. The aggressive mode classifies any jump with zero execution
+// count as never-taken, and can be turned on with BOLT option
+// -aggressive-never-taken. The conservative mode is used by default and
+// accounts for potential errors in the input profile. It infers if a jump with
+// zero execution count is actually never-taken by checking the gap between the
+// inflow (resp. outflow) and block execution count for each basic block.
+// The conservativeness is controlled by BOLT option
+// -conservative-never-taken-threshold. The smaller the threshold, the more
+// conservative the classification is. In most realistic settings, the value
+// should exceed 1.0. The current default is 1.25.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef BOLT_PASSES_REDIRECT_NEVER_TAKEN_JUMPS_H
+#define BOLT_PASSES_REDIRECT_NEVER_TAKEN_JUMPS_H
+
+#include "bolt/Passes/BinaryPasses.h"
+#include <atomic>
+
+namespace llvm {
+namespace bolt {
+
+class RedirectNeverTakenJumps : public BinaryFunctionPass {
+private:
+ std::atomic<uint64_t> TotalHotSizeSavings{0ull};
+ std::atomic<uint64_t> TotalSizeSavings{0ull};
+
+public:
+ explicit RedirectNeverTakenJumps(const cl::opt<bool> &PrintPass)
+ : BinaryFunctionPass(PrintPass) {}
+
+ const char *getName() const override { return "redirect-never-taken-jumps"; }
+
+ Error runOnFunctions(BinaryContext &BC) override;
+
+ void performRedirections(BinaryFunction &Function);
+};
+
+} // namespace bolt
+} // namespace llvm
+
+#endif
diff --git a/bolt/lib/Core/BinaryBasicBlock.cpp b/bolt/lib/Core/BinaryBasicBlock.cpp
index 2a2192b79bb4bf..4e87865f6a210f 100644
--- a/bolt/lib/Core/BinaryBasicBlock.cpp
+++ b/bolt/lib/Core/BinaryBasicBlock.cpp
@@ -572,5 +572,78 @@ BinaryBasicBlock *BinaryBasicBlock::splitAt(iterator II) {
return NewBlock;
}
+BinaryBasicBlock *BinaryBasicBlock::splitInPlaceAt(iterator II) {
+ assert(II != end() && "expected iterator pointing to instruction");
+ if (II == begin())
+ return this;
+ const BinaryContext &BC = Function->getBinaryContext();
+ std::vector<std::unique_ptr<BinaryBasicBlock>> ToAdd;
+ ToAdd.emplace_back(getFunction()->createBasicBlock());
+ BinaryBasicBlock *BBNew = ToAdd.back().get();
+ uint64_t BBNewExecCount = 0;
+
+ // Find successors of the current block that needs to be moved.
+ BinaryBasicBlock *CondSuccessor = nullptr;
+ BinaryBasicBlock::BinaryBranchInfo CondSuccessorBI;
+ BinaryBasicBlock *UncondSuccessor = nullptr;
+ BinaryBasicBlock::BinaryBranchInfo UncondSuccessorBI;
+ auto I = end();
+ while (I != II) {
+ --I;
+ if (BC.MIB->isUnconditionalBranch(*I)) {
+ const MCSymbol *TargetSymbol = BC.MIB->getTargetSymbol(*I);
+ UncondSuccessor = getSuccessor(TargetSymbol, UncondSuccessorBI);
+ } else if (BC.MIB->isConditionalBranch(*I)) {
+ const MCSymbol *TargetSymbol = BC.MIB->getTargetSymbol(*I);
+ CondSuccessor = getSuccessor(TargetSymbol, CondSuccessorBI);
+ }
+ }
+
+ // Adjust successors of the current and the new blocks.
+ if (CondSuccessor != nullptr) {
+ BBNew->addSuccessor(CondSuccessor, CondSuccessorBI);
+ BBNewExecCount +=
+ CondSuccessorBI.Count != BinaryBasicBlock::COUNT_NO_PROFILE
+ ? CondSuccessorBI.Count
+ : 0;
+ removeSuccessor(CondSuccessor);
+ }
+ if (UncondSuccessor != nullptr) {
+ BBNew->addSuccessor(UncondSuccessor, UncondSuccessorBI);
+ BBNewExecCount +=
+ UncondSuccessorBI.Count != BinaryBasicBlock::COUNT_NO_PROFILE
+ ? UncondSuccessorBI.Count
+ : 0;
+ removeSuccessor(UncondSuccessor);
+ } else { // Fall through.
+ BinaryBasicBlock *NextBB =
+ Function->getLayout().getBasicBlockAfter(this, /*IgnoreSplits=*/false);
+ assert(NextBB);
+ if (getSuccessor(NextBB->getLabel())) {
+ const BinaryBasicBlock::BinaryBranchInfo &BI = getBranchInfo(*NextBB);
+ BBNew->addSuccessor(NextBB, BI);
+ BBNewExecCount +=
+ BI.Count != BinaryBasicBlock::COUNT_NO_PROFILE ? BI.Count : 0;
+ removeSuccessor(NextBB);
+ }
+ }
+ addSuccessor(BBNew, BBNewExecCount, 0);
+ BBNew->setExecutionCount(BBNewExecCount);
+
+ // Set correct CFI state for the new block.
+ BBNew->setCFIState(getCFIStateAtInstr(&*II));
+
+ // Move instructions over.
+ adjustNumPseudos(II, end(), -1);
+ BBNew->addInstructions(II, end());
+ Instructions.erase(II, end());
+
+ // Insert new block after the current block.
+ getFunction()->insertBasicBlocks(
+ this, std::move(ToAdd), /*UpdateLayout*/ true, /*UpdateCFIState*/ true,
+ /*RecomputeLandingPads*/ false);
+ return BBNew;
+}
+
} // namespace bolt
} // namespace llvm
diff --git a/bolt/lib/Passes/CMakeLists.txt b/bolt/lib/Passes/CMakeLists.txt
index 1c1273b3d2420d..1b64d3e1d0b9e9 100644
--- a/bolt/lib/Passes/CMakeLists.txt
+++ b/bolt/lib/Passes/CMakeLists.txt
@@ -27,6 +27,7 @@ add_llvm_library(LLVMBOLTPasses
PettisAndHansen.cpp
PLTCall.cpp
ContinuityStats.cpp
+ RedirectNeverTakenJumps.cpp
RegAnalysis.cpp
RegReAssign.cpp
ReorderAlgorithm.cpp
diff --git a/bolt/lib/Passes/RedirectNeverTakenJumps.cpp b/bolt/lib/Passes/RedirectNeverTakenJumps.cpp
new file mode 100644
index 00000000000000..418f055beff306
--- /dev/null
+++ b/bolt/lib/Passes/RedirectNeverTakenJumps.cpp
@@ -0,0 +1,503 @@
+//===- bolt/Passes/RedirectNeverTakenJumps.cpp - Code size reduction ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements RedirectNeverTakenJumps class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "bolt/Passes/RedirectNeverTakenJumps.h"
+#include "bolt/Core/ParallelUtilities.h"
+
+using namespace llvm;
+using namespace bolt;
+
+namespace opts {
+extern cl::OptionCategory BoltOptCategory;
+
+static cl::opt<bool> RedirectNeverTakenJumps(
+ "redirect-never-taken-jumps",
+ cl::desc("Apply a heuristic to redirect never taken jumps in order to "
+ "reduce hot code size (X86 only)"),
+ cl::Hidden, cl::init(false), cl::cat(BoltOptCategory));
+
+static cl::opt<bool> AggressiveNeverTaken(
+ "aggressive-never-taken",
+ cl::desc("Classify all zero-execution-count jumps as never taken. This "
+ "option ignores the possibility of execution counts of hot jumps "
+ "being incorrectly set to 0 in the input profile"),
+ cl::ReallyHidden, cl::init(false), cl::cat(BoltOptCategory));
+
+static cl::opt<double> ConservativeNeverTakenThreshold(
+ "conservative-never-taken-threshold",
+ cl::desc(
+ "When aggressive-never-taken=0 (default), this value controls how "
+ "conservative the classification of never-taken jumps is. The smaller "
+ "the value the more conservative the classification. In most realistic "
+ "settings, the value should exceed 1.0. Default 1.25."),
+ cl::ZeroOrMore, cl::init(1.25), cl::ReallyHidden, cl::cat(BoltOptCategory));
+} // namespace opts
+
+namespace {
+/// A jump instruction in the binary.
+struct JumpT {
+ JumpT(const JumpT &) = delete;
+ JumpT(JumpT &&) = default;
+ JumpT &operator=(const JumpT &) = delete;
+ JumpT &operator=(JumpT &&) = default;
+
+ explicit JumpT(MCInst *Inst, unsigned CC, bool IsUnconditional,
+ BinaryBasicBlock *OriginalTargetBB, uint64_t ExecutionCount,
+ BinaryBasicBlock *HomeBB, uint64_t OriginalAddress,
+ uint64_t OriginalInstrSize)
+ : Inst(Inst), CC(CC), IsUnconditional(IsUnconditional),
+ OriginalTargetBB(OriginalTargetBB), ExecutionCount(ExecutionCount),
+ HomeBB(HomeBB), OriginalAddress(OriginalAddress),
+ OriginalInstrSize(OriginalInstrSize) {}
+
+ MCInst *Inst;
+ unsigned CC;
+ bool IsUnconditional;
+ BinaryBasicBlock *OriginalTargetBB;
+ uint64_t ExecutionCount;
+ BinaryBasicBlock *HomeBB;
+ uint64_t OriginalAddress{0};
+ uint8_t OriginalInstrSize{0};
+
+ bool IsLongNeverTaken{false};
+ bool IsRedirectionTarget{false};
+ JumpT *RedirectionTarget{nullptr};
+ JumpT *UncondJumpInSameBlock{nullptr};
+};
+
+using Jumps = std::vector<std::unique_ptr<JumpT>>;
+using JumpPtrs = std::vector<JumpT *>;
+using FlowMapTy = std::unordered_map<const BinaryBasicBlock *, uint64_t>;
+using BlockToJumpsMapTy =
+ std::unordered_map<BinaryBasicBlock *, std::vector<JumpT *>>;
+
+/// Size of jump instructions in bytes in X86.
+static constexpr uint8_t ShortJumpSize = 2;
+static constexpr uint8_t LongUncondJumpSize = 5;
+static constexpr uint8_t LongCondJumpSize = 6;
+
+/// The longest distance for any short jump on X86.
+static constexpr uint8_t ShortJumpBits = 8;
+static constexpr uint8_t ShortestJumpSpan = 1ULL << (ShortJumpBits - 1);
+
+bool isLongJump(const uint64_t JumpStartAddr, const uint64_t JumpEndAddr,
+ const bool SameFragment) {
+ if (!SameFragment)
+ return true;
+ if (JumpEndAddr > JumpStartAddr)
+ return JumpEndAddr - JumpStartAddr > ShortestJumpSpan - 1;
+ else
+ return JumpStartAddr - JumpEndAddr > ShortestJumpSpan;
+}
+
+void createJumps(BinaryFunction &Function, FunctionFragment &Fragment,
+ Jumps &JumpsInFunction, JumpPtrs &JumpsInFragment) {
+ const BinaryContext &BC = Function.getBinaryContext();
+
+ auto createJump = [&](MCInst *Branch, bool IsUnconditional,
+ BinaryBasicBlock *SourceBB, BinaryBasicBlock *TargetBB,
+ const uint8_t OffsetFromBlockEnd) {
+ const BinaryBasicBlock::BinaryBranchInfo &BI =
+ SourceBB->getBranchInfo(*TargetBB);
+ uint64_t ExecCount = 0;
+ if (BI.Count != BinaryBasicBlock::COUNT_NO_PROFILE)
+ ExecCount = BI.Count;
+
+ const uint64_t JumpEndAddr = TargetBB->getOutputStartAddress();
+ const uint64_t JumpStartAddr =
+ SourceBB->getOutputEndAddress() - OffsetFromBlockEnd;
+ const uint8_t LongJumpSize =
+ IsUnconditional ? LongUncondJumpSize : LongCondJumpSize;
+ const uint8_t JumpInstrSize =
+ isLongJump(JumpStartAddr, JumpEndAddr,
+ SourceBB->getFragmentNum() == TargetBB->getFragmentNum())
+ ? LongJumpSize
+ : ShortJumpSize;
+ return std::unique_ptr<JumpT>(new JumpT(
+ Branch, BC.MIB->getCondCode(*Branch), IsUnconditional, TargetBB,
+ ExecCount, SourceBB, JumpStartAddr - JumpInstrSize, JumpInstrSize));
+ };
+
+ for (BinaryBasicBlock *BB : Fragment) {
+ const MCSymbol *TBB = nullptr;
+ const MCSymbol *FBB = nullptr;
+ MCInst *CondBranch = nullptr;
+ MCInst *UncondBranch = nullptr;
+ BinaryBasicBlock *CondSuccessor = nullptr;
+ BinaryBasicBlock *UncondSuccessor = nullptr;
+
+ if (BB->analyzeBranch(TBB, FBB, CondBranch, UncondBranch)) {
+ if (BB->succ_size() == 1) {
+ UncondSuccessor = BB->getSuccessor();
+ if (UncondBranch != nullptr) {
+ std::unique_ptr<JumpT> Jump =
+ createJump(UncondBranch, true, BB, UncondSuccessor, 0);
+ JumpsInFragment.push_back(Jump.get());
+ JumpsInFunction.push_back(std::move(Jump));
+ }
+ } else if (BB->succ_size() == 2) {
+ assert(CondBranch != nullptr);
+ CondSuccessor = BB->getConditionalSuccessor(true);
+ UncondSuccessor = BB->getConditionalSuccessor(false);
+ std::unique_ptr<JumpT> UncondJump = nullptr;
+ std::unique_ptr<JumpT> CondJump = nullptr;
+ uint8_t UncondJumpInstrSize = 0;
+ if (UncondBranch != nullptr) {
+ UncondJump = createJump(UncondBranch, true, BB, UncondSuccessor, 0);
+ UncondJumpInstrSize = UncondJump->OriginalInstrSize;
+ }
+ if (!BC.MIB->isDynamicBranch(*CondBranch)) {
+ CondJump = createJump(CondBranch, false, BB, CondSuccessor,
+ UncondJumpInstrSize);
+ if (UncondJump != nullptr)
+ CondJump->UncondJumpInSameBlock = UncondJump.get();
+ }
+ if (CondJump != nullptr) {
+ JumpsInFragment.push_back(CondJump.get());
+ JumpsInFunction.push_back(std::move(CondJump));
+ }
+ if (UncondJump != nullptr) {
+ JumpsInFragment.push_back(UncondJump.get());
+ JumpsInFunction.push_back(std::move(UncondJump));
+ }
+ }
+ }
+ }
+}
+
+void identifyCandidates(BinaryFunction &Function, JumpPtrs &JumpsInFragment,
+ BlockToJumpsMapTy &TargetsToJumps) {
+ // Identify jumps that are long and never taken.
+ // First check if each jump is long and have zero execution count.
+ auto isLongZeroCount = [&](const JumpT &Jump) {
+ return Jump.ExecutionCount == 0 && Jump.OriginalInstrSize > ShortJumpSize;
+ ;
+ };
+
+ BlockToJumpsMapTy SourcesToJumps;
+ for (JumpT *Jump : JumpsInFragment) {
+ Jump->IsLongNeverTaken = isLongZeroCount(*Jump);
+ assert(Jump->OriginalTargetBB != nullptr);
+ TargetsToJumps[Jump->OriginalTargetBB].push_back(Jump);
+ SourcesToJumps[Jump->HomeBB].push_back(Jump);
+ }
+
+ // Next identify zero-execution-count jumps that are unlikely to actually be
+ // never-taken by comparing the value of inflow (resp outflow) of each basic
+ // block with its block execution count.
+ FlowMapTy IncomingMap;
+ FlowMapTy OutgoingMap;
+ for (const BinaryBasicBlock &BB : Function) {
+ auto SuccBIIter = BB.branch_info_begin();
+ for (BinaryBasicBlock *Succ : BB.successors()) {
+ const uint64_t Count = SuccBIIter->Count;
+ if (Count == BinaryBasicBlock::COUNT_NO_PROFILE || Count == 0) {
+ ++SuccBIIter;
+ continue;
+ }
+ IncomingMap[Succ] += Count;
+ OutgoingMap[&BB] += Count;
+ ++SuccBIIter;
+ }
+ }
+
+ if (!opts::AggressiveNeverTaken) {
+ for (auto &TargetToJumps : TargetsToJumps) {
+ const BinaryBasicBlock *TargetBB = TargetToJumps.first;
+ if (TargetBB->getKnownExecutionCount() == 0)
+ continue;
+ const uint64_t IncomingCount = IncomingMap[TargetBB];
+ // If there is a noticeable gap between the incoming edge count and the BB
+ // execution count, then we don't want to trust the 0 execution count
+ // edges as actually 0 execution count.
+ if (IncomingCount * opts::ConservativeNeverTakenThreshold <
+ TargetBB->getKnownExecutionCount()) {
+ for (JumpT *Jump : TargetToJumps.second) {
+ Jump->IsLongNeverTaken = false;
+ }
+ }
+ }
+
+ for (auto &SourceToJumps : SourcesToJumps) {
+ const BinaryBasicBlock *SourceBB = SourceToJumps.first;
+ if (SourceBB->getKnownExecutionCount() == 0)
+ continue;
+ const uint64_t OutgoingCount = OutgoingMap[SourceBB];
+ // If there is a noticeable gap between the outgoing edge count and the BB
+ // execution count, then we don't want to trust the 0 execution count
+ // edges as actually 0 execution count.
+
+ if (OutgoingCount * opts::ConservativeNeverTakenThreshold <
+ SourceBB->getKnownExecutionCount()) {
+ for (JumpT *Jump : SourceToJumps.second) {
+ Jump->IsLongNeverTaken = false;
+ }
+ }
+ }
+ }
+}
+
+uint64_t makeRedirectionDecisions(BlockToJumpsMapTy &TargetsToJumps) {
+ uint64_t NumRedirected = 0;
+ for (auto &TargetToJumps : TargetsToJumps) {
+ std::vector<JumpT *> &Jumps = TargetToJumps.second;
+ if (Jumps.size() <= 1)
+ continue;
+ std::unordered_map<unsigned, JumpT *> MostRecentCondJumps;
+ JumpT *MostRecentUncondJump = nullptr;
+
+ // Round 1: redirect jumps to the closest candidate to its right.
+ for (auto JumpItr = Jumps.rbegin(); JumpItr != Jumps.rend(); ++JumpItr) {
+ JumpT *CurrJump = *JumpItr;
+ if (CurrJump->IsLongNeverTaken) {
+ // Check if we can redirect CurrJump to MostRecentUncondJump.
+ if (MostRecentUncondJump != nullptr) {
+ if (!isLongJump(CurrJump->OriginalAddress + ShortJumpSize,
+ MostRecentUncondJump->OriginalAddress, true)) {
+ // Redirect CurrJump to MostRecentUncondJump if the latter is close
+ // enough.
+ CurrJump->RedirectionTarget = MostRecentUncondJump;
+ MostRecentUncondJump->IsRedirectionTarget = true;
+ NumRedirected++;
+ } else if (!CurrJump->IsUnconditional) {
+ // Otherwise, try to redirect CurrJump to the most recent
+ // conditional jump with the same conditional code.
+ JumpT *MostRecentCondJump = MostRecentCondJumps[CurrJump->CC];
+ if (MostRecentCondJump != nullptr &&
+ !isLongJump(CurrJump->OriginalAddress + ShortJumpSize,
+ MostRecentCondJump->OriginalAddress, true)) {
+ CurrJump->RedirectionTarget = MostRecentCondJump;
+ MostRecentCondJump->IsRedirectionTarget = true;
+ NumRedirected++;
+ }
+ }
+ } else if (!CurrJump->IsUnconditional) {
+ // If MostRecentUncondJump does not exist and CurrJump is conditional,
+ // try to redirect CurrJump to the most recent conditional jump with
+ // the same conditional code
+ JumpT *MostRecentCondJump = MostRecentCondJumps[CurrJump->CC];
+ if (MostRecentCondJump != nullptr &&
+ !isLongJump(CurrJump->OriginalAddress + ShortJumpSize,
+ MostRecentCondJump->OriginalAddress, true)) {
+ CurrJump->RedirectionTarget = MostRecentCondJump;
+ MostRecentCondJump->IsRedirectionTarget = true;
+ NumRedirected++;
+ }
+ }
+ }
+
+ // Update most recent jump by condition.
+ if (CurrJump->IsUnconditional)
+ MostRecentUncondJump = CurrJump;
+ else
+ MostRecentCondJumps[CurrJump->CC] = CurrJump;
+ }
+
+ // Ro...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/113923
More information about the llvm-commits
mailing list