[llvm] [X86] Set up the framework for optimization of CCMP/CTEST (PR #84603)
Shengchen Kan via llvm-commits
llvm-commits at lists.llvm.org
Wed May 8 12:16:37 PDT 2024
================
@@ -0,0 +1,887 @@
+//==========-- X86ConditionalCompares.cpp --- CCMP formation for X86 -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the X86ConditionalCompares pass which reduces
+// branching by using the conditional compare instructions CCMP, CTEST.
+//
+// The CFG transformations for forming conditional compares are very similar to
+// if-conversion, and this pass should run immediately before the early
+// if-conversion pass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86InstrInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineTraceMetrics.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "x86-ccmp"
+
+// Absolute maximum number of instructions allowed per speculated block.
+// This bypasses all other heuristics, so it should be set fairly high.
+static cl::opt<unsigned> BlockInstrLimit(
+ "x86-ccmp-limit", cl::init(30), cl::Hidden,
+ cl::desc("Maximum number of instructions per speculated block."));
+
+STATISTIC(NumConsidered, "Number of ccmps considered");
+STATISTIC(NumPhiRejs, "Number of ccmps rejected (PHI)");
+STATISTIC(NumPhysRejs, "Number of ccmps rejected (Physregs)");
+STATISTIC(NumPhi2Rejs, "Number of ccmps rejected (PHI2)");
+STATISTIC(NumHeadBranchRejs, "Number of ccmps rejected (Head branch)");
+STATISTIC(NumCmpBranchRejs, "Number of ccmps rejected (CmpBB branch)");
+STATISTIC(NumCmpTermRejs, "Number of ccmps rejected (CmpBB is cbz...)");
+STATISTIC(NumMultEFLAGSUses, "Number of ccmps rejected (EFLAGS used)");
+STATISTIC(NumUnknEFLAGSDefs, "Number of ccmps rejected (EFLAGS def unknown)");
+STATISTIC(NumSpeculateRejs, "Number of ccmps rejected (Can't speculate)");
+STATISTIC(NumConverted, "Number of ccmp instructions created");
+
+//===----------------------------------------------------------------------===//
+// SSACCmpConv
+//===----------------------------------------------------------------------===//
+//
+// The SSACCmpConv class performs ccmp-conversion on SSA form machine code
+// after determining if it is possible. The class contains no heuristics;
+// external code should be used to determine when ccmp-conversion is a good
+// idea.
+//
+// CCmp-formation works on a CFG representing chained conditions, typically
+// from C's short-circuit || and && operators:
+//
+// From: Head To: Head
+// / | CmpBB
+// / | / |
+// | CmpBB / |
+// | / | Tail |
+// | / | | |
+// Tail | | |
+// | | | |
+// ... ... ... ...
+//
+// The Head block is terminated by a br.cond instruction, and the CmpBB block
+// contains compare + br.cond. Tail must be a successor of both.
+//
+// The cmp-conversion turns the compare instruction in CmpBB into a conditional
+// compare, and merges CmpBB into Head, speculatively executing its
+// instructions. The X86 conditional compare instructions have an operand that
+// specifies the conditional flags to set values when the condition is false and
+// the compare isn't executed. This makes it possible to chain compares with
+// different condition codes.
+//
+// Example:
+//
+// void f(int a, int b) {
+// if (a == 5 || b == 17)
+// foo();
+// }
+//
+// Head:
+// cmpl $5, $edi
+// je Tail
+// CmpBB:
+// cmpl $17, $esi
+// je Tail
+// ...
+// Tail:
+// call foo
+//
+// Becomes:
+//
+// Head:
+// cmpl $5, $edi
+// ccmpel {dfv=zf} $17, $edi
+// je Tail
+// ...
+// Tail:
+// call foo
+//
+// The ccmp condition code is the one that would cause the Head terminator to
+// branch to CmpBB.
+
+namespace {
+class SSACCmpConv {
+ MachineFunction *MF;
+ const X86Subtarget *STI;
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ MachineRegisterInfo *MRI;
+ const MachineBranchProbabilityInfo *MBPI;
+
+public:
+ /// The first block containing a conditional branch, dominating everything
+ /// else.
+ MachineBasicBlock *Head;
+
+ /// The block containing cmp+br.cond with a successor shared with Head.
+ MachineBasicBlock *CmpBB;
+
+ /// The common successor for Head and CmpBB.
+ MachineBasicBlock *Tail;
+
+ /// The compare instruction in CmpBB that can be converted to a ccmp.
+ MachineInstr *CmpMI;
+
+private:
+ /// The branch condition in Head as determined by analyzeBranch.
+ SmallVector<MachineOperand, 4> HeadCond;
+
+ /// The condition code that makes Head branch to CmpBB.
+ X86::CondCode HeadCmpBBCC;
+
+ /// The branch condition in CmpBB.
+ SmallVector<MachineOperand, 4> CmpBBCond;
+
+ /// The condition code that makes CmpBB branch to Tail.
+ X86::CondCode CmpBBTailCC;
+
+ /// Check if the Tail PHIs are trivially convertible.
+ bool trivialTailPHIs();
+
+ /// Remove CmpBB from the Tail PHIs.
+ void updateTailPHIs();
+
+ /// Check if an operand defining DstReg is dead.
+ bool isDeadDef(unsigned DstReg);
+
+ /// Find the compare instruction in MBB that controls the conditional branch.
+ /// Return NULL if a convertible instruction can't be found.
+ MachineInstr *findConvertibleCompare(MachineBasicBlock *MBB);
+
+ /// Return true if all non-terminator instructions in MBB can be safely
+ /// speculated.
+ bool canSpeculateInstrs(MachineBasicBlock *MBB, const MachineInstr *CmpMI);
+
+public:
+ /// runOnMachineFunction - Initialize per-function data structures.
+ void runOnMachineFunction(MachineFunction &MF,
+ const MachineBranchProbabilityInfo *MBPI) {
+ this->MF = &MF;
+ this->MBPI = MBPI;
+ STI = &MF.getSubtarget<X86Subtarget>();
+ TII = MF.getSubtarget().getInstrInfo();
+ TRI = MF.getSubtarget().getRegisterInfo();
+ MRI = &MF.getRegInfo();
+ }
+
+ /// If the sub-CFG headed by MBB can be cmp-converted, initialize the
+ /// internal state, and return true.
+ bool canConvert(MachineBasicBlock *MBB);
+
+ /// Cmo-convert the last block passed to canConvertCmp(), assuming
+ /// it is possible. Add any erased blocks to RemovedBlocks.
+ void convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks);
+};
+} // end anonymous namespace
+
+// Check that all PHIs in Tail are selecting the same value from Head and CmpBB.
+// This means that no if-conversion is required when merging CmpBB into Head.
+bool SSACCmpConv::trivialTailPHIs() {
+ for (auto &I : *Tail) {
+ if (!I.isPHI())
+ break;
+ unsigned HeadReg = 0, CmpBBReg = 0;
+ // PHI operands come in (VReg, MBB) pairs.
+ for (unsigned oi = 1, oe = I.getNumOperands(); oi != oe; oi += 2) {
+ MachineBasicBlock *MBB = I.getOperand(oi + 1).getMBB();
+ Register Reg = I.getOperand(oi).getReg();
+ if (MBB == Head) {
+ assert((!HeadReg || HeadReg == Reg) && "Inconsistent PHI operands");
+ HeadReg = Reg;
+ }
+ if (MBB == CmpBB) {
+ assert((!CmpBBReg || CmpBBReg == Reg) && "Inconsistent PHI operands");
+ CmpBBReg = Reg;
+ }
+ }
+ if (HeadReg != CmpBBReg)
+ return false;
+ }
+ return true;
+}
+
+// Assuming that trivialTailPHIs() is true, update the Tail PHIs by simply
+// removing the CmpBB operands. The Head operands will be identical.
+void SSACCmpConv::updateTailPHIs() {
+ for (auto &I : *Tail) {
+ if (!I.isPHI())
+ break;
+ // I is a PHI. It can have multiple entries for CmpBB.
+ for (unsigned Idx = I.getNumOperands(); Idx > 2; Idx -= 2) {
+ // PHI operands are (Reg, MBB) at (Idx-2, Idx-1).
+ if (I.getOperand(Idx - 1).getMBB() == CmpBB) {
+ I.removeOperand(Idx - 1);
+ I.removeOperand(Idx - 2);
+ }
+ }
+ }
+}
+
+bool SSACCmpConv::isDeadDef(unsigned DstReg) {
+ if (!Register::isVirtualRegister(DstReg))
+ return false;
+ // A virtual register def without any uses will be marked dead later, and
+ // eventually replaced by the zero register.
+ return MRI->use_nodbg_empty(DstReg);
+}
+
+MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
+ MachineBasicBlock::iterator I = MBB->getFirstTerminator();
+ if (I == MBB->end())
+ return nullptr;
+ // The terminator must be controlled by the flags.
+ if (!I->readsRegister(X86::EFLAGS)) {
+ ++NumCmpTermRejs;
+ LLVM_DEBUG(dbgs() << "Flags not used by terminator: " << *I);
+ return nullptr;
+ }
+
+ // Now find the instruction controlling the terminator.
+ for (MachineBasicBlock::iterator B = MBB->begin(); I != B;) {
+ I = prev_nodbg(I, MBB->begin());
+ assert(!I->isTerminator() && "Spurious terminator");
+
+ switch (I->getOpcode()) {
+ // This pass run before peephole optimization, so the SUB has not been
+ // optimized to CMP yet.
+ case X86::SUB8rr:
+ case X86::SUB16rr:
+ case X86::SUB32rr:
+ case X86::SUB64rr:
+ case X86::SUB8ri:
+ case X86::SUB16ri:
+ case X86::SUB32ri:
+ case X86::SUB64ri32:
+ case X86::SUB8rr_ND:
+ case X86::SUB16rr_ND:
+ case X86::SUB32rr_ND:
+ case X86::SUB64rr_ND:
+ case X86::SUB8ri_ND:
+ case X86::SUB16ri_ND:
+ case X86::SUB32ri_ND:
+ case X86::SUB64ri32_ND: {
+ if (!isDeadDef(I->getOperand(0).getReg()))
+ return nullptr;
+ return STI->hasCCMP() ? &*I : nullptr;
+ }
+ case X86::CMP8rr:
+ case X86::CMP16rr:
+ case X86::CMP32rr:
+ case X86::CMP64rr:
+ case X86::CMP8ri:
+ case X86::CMP16ri:
+ case X86::CMP32ri:
+ case X86::CMP64ri32:
+ case X86::TEST8rr:
+ case X86::TEST16rr:
+ case X86::TEST32rr:
+ case X86::TEST64rr:
+ case X86::TEST8ri:
+ case X86::TEST16ri:
+ case X86::TEST32ri:
+ case X86::TEST64ri32:
+ return STI->hasCCMP() ? &*I : nullptr;
+ default:
+ break;
+ }
+
+ // Check for flag reads and clobbers.
+ PhysRegInfo PRI = AnalyzePhysRegInBundle(*I, X86::EFLAGS, TRI);
+
+ if (PRI.Read) {
+ // The ccmp doesn't produce exactly the same flags as the original
+ // compare, so reject the transform if there are uses of the flags
+ // besides the terminators.
+ LLVM_DEBUG(dbgs() << "Can't create ccmp with multiple uses: " << *I);
+ ++NumMultEFLAGSUses;
+ return nullptr;
+ }
+
+ if (PRI.Defined || PRI.Clobbered) {
+ LLVM_DEBUG(dbgs() << "Not convertible compare: " << *I);
+ ++NumUnknEFLAGSDefs;
+ return nullptr;
+ }
+ }
+ LLVM_DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB)
+ << '\n');
+ return nullptr;
+}
+
+/// Determine if all the instructions in MBB can safely
+/// be speculated. The terminators are not considered.
+///
+/// Only CmpMI is allowed to clobber the flags.
+///
+bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB,
+ const MachineInstr *CmpMI) {
+ // Reject any live-in physregs. It's very hard to get right.
+ if (!MBB->livein_empty()) {
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " has live-ins.\n");
+ return false;
+ }
+
+ unsigned InstrCount = 0;
+
+ // Check all instructions, except the terminators. It is assumed that
+ // terminators never have side effects or define any used register values.
+ for (auto &I : make_range(MBB->begin(), MBB->getFirstTerminator())) {
+ if (I.isDebugInstr())
+ continue;
+
+ if (++InstrCount > BlockInstrLimit) {
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " has more than "
+ << BlockInstrLimit << " instructions.\n");
+ return false;
+ }
+
+ // There shouldn't normally be any phis in a single-predecessor block.
+ if (I.isPHI()) {
+ LLVM_DEBUG(dbgs() << "Can't hoist: " << I);
+ return false;
+ }
+
+ // Don't speculate loads. Note that it may be possible and desirable to
+ // speculate GOT or constant pool loads that are guaranteed not to trap,
+ // but we don't support that for now.
+ if (I.mayLoad()) {
+ LLVM_DEBUG(dbgs() << "Won't speculate load: " << I);
+ return false;
+ }
+
+ // We never speculate stores, so an AA pointer isn't necessary.
+ bool DontMoveAcrossStore = true;
+ if (!I.isSafeToMove(nullptr, DontMoveAcrossStore)) {
+ LLVM_DEBUG(dbgs() << "Can't speculate: " << I);
+ return false;
+ }
+
+ // Only CmpMI is allowed to clobber the flags.
+ if (&I != CmpMI && I.modifiesRegister(X86::EFLAGS, TRI)) {
----------------
KanRobert wrote:
It's already considered in `MachineInstr::isSafeToMove`
https://github.com/llvm/llvm-project/pull/84603
More information about the llvm-commits
mailing list