[llvm] [X86] Set up the framework for optimization of CCMP/CTEST (PR #84603)
Shengchen Kan via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 8 21:02:22 PST 2024
https://github.com/KanRobert created https://github.com/llvm/llvm-project/pull/84603
SPEC: https://cdrdv2.intel.com/v1/dl/getContent/784266
Blog: https://kanrobert.github.io/rfc/All-about-APX-conditional-ISA
>From cd0b74c1188217a5d3e1325c32b5c7aa20f614f5 Mon Sep 17 00:00:00 2001
From: Shengchen Kan <shengchen.kan at intel.com>
Date: Sat, 9 Mar 2024 12:23:47 +0800
Subject: [PATCH] [X86] Set up the framework for optimization of CCMP/CTEST
SPEC: https://cdrdv2.intel.com/v1/dl/getContent/784266
Blog: https://kanrobert.github.io/rfc/All-about-APX-conditional-ISA
---
llvm/lib/Target/X86/CMakeLists.txt | 1 +
llvm/lib/Target/X86/X86.h | 4 +
.../lib/Target/X86/X86ConditionalCompares.cpp | 887 ++++++++++++
llvm/lib/Target/X86/X86ISelLowering.cpp | 4 +-
llvm/lib/Target/X86/X86TargetMachine.cpp | 2 +
llvm/test/CodeGen/X86/apx/ccmp.ll | 1116 +++++++++++++++
llvm/test/CodeGen/X86/apx/ctest.ll | 1212 +++++++++++++++++
llvm/test/CodeGen/X86/opt-pipeline.ll | 1 +
8 files changed, 3226 insertions(+), 1 deletion(-)
create mode 100644 llvm/lib/Target/X86/X86ConditionalCompares.cpp
create mode 100644 llvm/test/CodeGen/X86/apx/ccmp.ll
create mode 100644 llvm/test/CodeGen/X86/apx/ctest.ll
diff --git a/llvm/lib/Target/X86/CMakeLists.txt b/llvm/lib/Target/X86/CMakeLists.txt
index 610999f0cc3cf0..17fd0a004147aa 100644
--- a/llvm/lib/Target/X86/CMakeLists.txt
+++ b/llvm/lib/Target/X86/CMakeLists.txt
@@ -30,6 +30,7 @@ set(sources
X86CallingConv.cpp
X86CmovConversion.cpp
X86CodeGenPassBuilder.cpp
+ X86ConditionalCompares.cpp
X86DomainReassignment.cpp
X86DiscriminateMemOps.cpp
X86LowerTileCopy.cpp
diff --git a/llvm/lib/Target/X86/X86.h b/llvm/lib/Target/X86/X86.h
index 21623a805f5568..042704355704ac 100644
--- a/llvm/lib/Target/X86/X86.h
+++ b/llvm/lib/Target/X86/X86.h
@@ -157,6 +157,9 @@ FunctionPass *createX86InsertX87waitPass();
/// ways.
FunctionPass *createX86PartialReductionPass();
+/// This pass performs CCMP optimization.
+FunctionPass *createX86ConditionalCompares();
+
InstructionSelector *createX86InstructionSelector(const X86TargetMachine &TM,
X86Subtarget &,
X86RegisterBankInfo &);
@@ -194,6 +197,7 @@ void initializeX86LowerAMXTypeLegacyPassPass(PassRegistry &);
void initializeX86LowerTileCopyPass(PassRegistry &);
void initializeX86OptimizeLEAPassPass(PassRegistry &);
void initializeX86PartialReductionPass(PassRegistry &);
+void initializeX86ConditionalComparesPass(PassRegistry &);
void initializeX86PreTileConfigPass(PassRegistry &);
void initializeX86ReturnThunksPass(PassRegistry &);
void initializeX86SpeculativeExecutionSideEffectSuppressionPass(PassRegistry &);
diff --git a/llvm/lib/Target/X86/X86ConditionalCompares.cpp b/llvm/lib/Target/X86/X86ConditionalCompares.cpp
new file mode 100644
index 00000000000000..3e7c9d0e3d48f0
--- /dev/null
+++ b/llvm/lib/Target/X86/X86ConditionalCompares.cpp
@@ -0,0 +1,887 @@
+//==========-- X86ConditionalCompares.cpp --- CCMP formation for X86 -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the X86ConditionalCompares pass which reduces
+// branching by using the conditional compare instructions CCMP, CTEST.
+//
+// The CFG transformations for forming conditional compares are very similar to
+// if-conversion, and this pass should run immediately before the early
+// if-conversion pass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86InstrInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineTraceMetrics.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "x86-ccmp"
+
+// Absolute maximum number of instructions allowed per speculated block.
+// This bypasses all other heuristics, so it should be set fairly high.
+static cl::opt<unsigned> BlockInstrLimit(
+ "x86-ccmp-limit", cl::init(30), cl::Hidden,
+ cl::desc("Maximum number of instructions per speculated block."));
+
+STATISTIC(NumConsidered, "Number of ccmps considered");
+STATISTIC(NumPhiRejs, "Number of ccmps rejected (PHI)");
+STATISTIC(NumPhysRejs, "Number of ccmps rejected (Physregs)");
+STATISTIC(NumPhi2Rejs, "Number of ccmps rejected (PHI2)");
+STATISTIC(NumHeadBranchRejs, "Number of ccmps rejected (Head branch)");
+STATISTIC(NumCmpBranchRejs, "Number of ccmps rejected (CmpBB branch)");
+STATISTIC(NumCmpTermRejs, "Number of ccmps rejected (CmpBB is cbz...)");
+STATISTIC(NumMultEFLAGSUses, "Number of ccmps rejected (EFLAGS used)");
+STATISTIC(NumUnknEFLAGSDefs, "Number of ccmps rejected (EFLAGS def unknown)");
+STATISTIC(NumSpeculateRejs, "Number of ccmps rejected (Can't speculate)");
+STATISTIC(NumConverted, "Number of ccmp instructions created");
+
+//===----------------------------------------------------------------------===//
+// SSACCmpConv
+//===----------------------------------------------------------------------===//
+//
+// The SSACCmpConv class performs ccmp-conversion on SSA form machine code
+// after determining if it is possible. The class contains no heuristics;
+// external code should be used to determine when ccmp-conversion is a good
+// idea.
+//
+// CCmp-formation works on a CFG representing chained conditions, typically
+// from C's short-circuit || and && operators:
+//
+// From: Head To: Head
+// / | CmpBB
+// / | / |
+// | CmpBB / |
+// | / | Tail |
+// | / | | |
+// Tail | | |
+// | | | |
+// ... ... ... ...
+//
+// The Head block is terminated by a br.cond instruction, and the CmpBB block
+// contains compare + br.cond. Tail must be a successor of both.
+//
+// The cmp-conversion turns the compare instruction in CmpBB into a conditional
+// compare, and merges CmpBB into Head, speculatively executing its
+// instructions. The X86 conditional compare instructions have an operand that
+// specifies the conditional flags to set values when the condition is false and
+// the compare isn't executed. This makes it possible to chain compares with
+// different condition codes.
+//
+// Example:
+//
+// void f(int a, int b) {
+// if (a == 5 || b == 17)
+// foo();
+// }
+//
+// Head:
+// cmpl $5, $edi
+// je Tail
+// CmpBB:
+// cmpl $17, $esi
+// je Tail
+// ...
+// Tail:
+// call foo
+//
+// Becomes:
+//
+// Head:
+// cmpl $5, $edi
+// ccmpel {dfv=zf} $17, $edi
+// je Tail
+// ...
+// Tail:
+// call foo
+//
+// The ccmp condition code is the one that would cause the Head terminator to
+// branch to CmpBB.
+
+namespace {
+class SSACCmpConv {
+ MachineFunction *MF;
+ const X86Subtarget *STI;
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ MachineRegisterInfo *MRI;
+ const MachineBranchProbabilityInfo *MBPI;
+
+public:
+ /// The first block containing a conditional branch, dominating everything
+ /// else.
+ MachineBasicBlock *Head;
+
+ /// The block containing cmp+br.cond with a successor shared with Head.
+ MachineBasicBlock *CmpBB;
+
+ /// The common successor for Head and CmpBB.
+ MachineBasicBlock *Tail;
+
+ /// The compare instruction in CmpBB that can be converted to a ccmp.
+ MachineInstr *CmpMI;
+
+private:
+ /// The branch condition in Head as determined by analyzeBranch.
+ SmallVector<MachineOperand, 4> HeadCond;
+
+ /// The condition code that makes Head branch to CmpBB.
+ X86::CondCode HeadCmpBBCC;
+
+ /// The branch condition in CmpBB.
+ SmallVector<MachineOperand, 4> CmpBBCond;
+
+ /// The condition code that makes CmpBB branch to Tail.
+ X86::CondCode CmpBBTailCC;
+
+ /// Check if the Tail PHIs are trivially convertible.
+ bool trivialTailPHIs();
+
+ /// Remove CmpBB from the Tail PHIs.
+ void updateTailPHIs();
+
+ /// Check if an operand defining DstReg is dead.
+ bool isDeadDef(unsigned DstReg);
+
+ /// Find the compare instruction in MBB that controls the conditional branch.
+ /// Return NULL if a convertible instruction can't be found.
+ MachineInstr *findConvertibleCompare(MachineBasicBlock *MBB);
+
+ /// Return true if all non-terminator instructions in MBB can be safely
+ /// speculated.
+ bool canSpeculateInstrs(MachineBasicBlock *MBB, const MachineInstr *CmpMI);
+
+public:
+ /// runOnMachineFunction - Initialize per-function data structures.
+ void runOnMachineFunction(MachineFunction &MF,
+ const MachineBranchProbabilityInfo *MBPI) {
+ this->MF = &MF;
+ this->MBPI = MBPI;
+ STI = &MF.getSubtarget<X86Subtarget>();
+ TII = MF.getSubtarget().getInstrInfo();
+ TRI = MF.getSubtarget().getRegisterInfo();
+ MRI = &MF.getRegInfo();
+ }
+
+ /// If the sub-CFG headed by MBB can be cmp-converted, initialize the
+ /// internal state, and return true.
+ bool canConvert(MachineBasicBlock *MBB);
+
+ /// Cmo-convert the last block passed to canConvertCmp(), assuming
+ /// it is possible. Add any erased blocks to RemovedBlocks.
+ void convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks);
+};
+} // end anonymous namespace
+
+// Check that all PHIs in Tail are selecting the same value from Head and CmpBB.
+// This means that no if-conversion is required when merging CmpBB into Head.
+bool SSACCmpConv::trivialTailPHIs() {
+ for (auto &I : *Tail) {
+ if (!I.isPHI())
+ break;
+ unsigned HeadReg = 0, CmpBBReg = 0;
+ // PHI operands come in (VReg, MBB) pairs.
+ for (unsigned oi = 1, oe = I.getNumOperands(); oi != oe; oi += 2) {
+ MachineBasicBlock *MBB = I.getOperand(oi + 1).getMBB();
+ Register Reg = I.getOperand(oi).getReg();
+ if (MBB == Head) {
+ assert((!HeadReg || HeadReg == Reg) && "Inconsistent PHI operands");
+ HeadReg = Reg;
+ }
+ if (MBB == CmpBB) {
+ assert((!CmpBBReg || CmpBBReg == Reg) && "Inconsistent PHI operands");
+ CmpBBReg = Reg;
+ }
+ }
+ if (HeadReg != CmpBBReg)
+ return false;
+ }
+ return true;
+}
+
+// Assuming that trivialTailPHIs() is true, update the Tail PHIs by simply
+// removing the CmpBB operands. The Head operands will be identical.
+void SSACCmpConv::updateTailPHIs() {
+ for (auto &I : *Tail) {
+ if (!I.isPHI())
+ break;
+ // I is a PHI. It can have multiple entries for CmpBB.
+ for (unsigned Idx = I.getNumOperands(); Idx > 2; Idx -= 2) {
+ // PHI operands are (Reg, MBB) at (Idx-2, Idx-1).
+ if (I.getOperand(Idx - 1).getMBB() == CmpBB) {
+ I.removeOperand(Idx - 1);
+ I.removeOperand(Idx - 2);
+ }
+ }
+ }
+}
+
+bool SSACCmpConv::isDeadDef(unsigned DstReg) {
+ if (!Register::isVirtualRegister(DstReg))
+ return false;
+ // A virtual register def without any uses will be marked dead later, and
+ // eventually replaced by the zero register.
+ return MRI->use_nodbg_empty(DstReg);
+}
+
+MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
+ MachineBasicBlock::iterator I = MBB->getFirstTerminator();
+ if (I == MBB->end())
+ return nullptr;
+ // The terminator must be controlled by the flags.
+ if (!I->readsRegister(X86::EFLAGS)) {
+ ++NumCmpTermRejs;
+ LLVM_DEBUG(dbgs() << "Flags not used by terminator: " << *I);
+ return nullptr;
+ }
+
+ // Now find the instruction controlling the terminator.
+ for (MachineBasicBlock::iterator B = MBB->begin(); I != B;) {
+ I = prev_nodbg(I, MBB->begin());
+ assert(!I->isTerminator() && "Spurious terminator");
+
+ switch (I->getOpcode()) {
+ // This pass run before peephole optimization, so the SUB has not been
+ // optimized to CMP yet.
+ case X86::SUB8rr:
+ case X86::SUB16rr:
+ case X86::SUB32rr:
+ case X86::SUB64rr:
+ case X86::SUB8ri:
+ case X86::SUB16ri:
+ case X86::SUB32ri:
+ case X86::SUB64ri32:
+ case X86::SUB8rr_ND:
+ case X86::SUB16rr_ND:
+ case X86::SUB32rr_ND:
+ case X86::SUB64rr_ND:
+ case X86::SUB8ri_ND:
+ case X86::SUB16ri_ND:
+ case X86::SUB32ri_ND:
+ case X86::SUB64ri32_ND: {
+ if (!isDeadDef(I->getOperand(0).getReg()))
+ return nullptr;
+ return STI->hasCCMP() ? &*I : nullptr;
+ }
+ case X86::CMP8rr:
+ case X86::CMP16rr:
+ case X86::CMP32rr:
+ case X86::CMP64rr:
+ case X86::CMP8ri:
+ case X86::CMP16ri:
+ case X86::CMP32ri:
+ case X86::CMP64ri32:
+ case X86::TEST8rr:
+ case X86::TEST16rr:
+ case X86::TEST32rr:
+ case X86::TEST64rr:
+ case X86::TEST8ri:
+ case X86::TEST16ri:
+ case X86::TEST32ri:
+ case X86::TEST64ri32:
+ return STI->hasCCMP() ? &*I : nullptr;
+ default:
+ break;
+ }
+
+ // Check for flag reads and clobbers.
+ PhysRegInfo PRI = AnalyzePhysRegInBundle(*I, X86::EFLAGS, TRI);
+
+ if (PRI.Read) {
+ // The ccmp doesn't produce exactly the same flags as the original
+ // compare, so reject the transform if there are uses of the flags
+ // besides the terminators.
+ LLVM_DEBUG(dbgs() << "Can't create ccmp with multiple uses: " << *I);
+ ++NumMultEFLAGSUses;
+ return nullptr;
+ }
+
+ if (PRI.Defined || PRI.Clobbered) {
+ LLVM_DEBUG(dbgs() << "Not convertible compare: " << *I);
+ ++NumUnknEFLAGSDefs;
+ return nullptr;
+ }
+ }
+ LLVM_DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB)
+ << '\n');
+ return nullptr;
+}
+
+/// Determine if all the instructions in MBB can safely
+/// be speculated. The terminators are not considered.
+///
+/// Only CmpMI is allowed to clobber the flags.
+///
+bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB,
+ const MachineInstr *CmpMI) {
+ // Reject any live-in physregs. It's very hard to get right.
+ if (!MBB->livein_empty()) {
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " has live-ins.\n");
+ return false;
+ }
+
+ unsigned InstrCount = 0;
+
+ // Check all instructions, except the terminators. It is assumed that
+ // terminators never have side effects or define any used register values.
+ for (auto &I : make_range(MBB->begin(), MBB->getFirstTerminator())) {
+ if (I.isDebugInstr())
+ continue;
+
+ if (++InstrCount > BlockInstrLimit) {
+ LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " has more than "
+ << BlockInstrLimit << " instructions.\n");
+ return false;
+ }
+
+ // There shouldn't normally be any phis in a single-predecessor block.
+ if (I.isPHI()) {
+ LLVM_DEBUG(dbgs() << "Can't hoist: " << I);
+ return false;
+ }
+
+ // Don't speculate loads. Note that it may be possible and desirable to
+ // speculate GOT or constant pool loads that are guaranteed not to trap,
+ // but we don't support that for now.
+ if (I.mayLoad()) {
+ LLVM_DEBUG(dbgs() << "Won't speculate load: " << I);
+ return false;
+ }
+
+ // We never speculate stores, so an AA pointer isn't necessary.
+ bool DontMoveAcrossStore = true;
+ if (!I.isSafeToMove(nullptr, DontMoveAcrossStore)) {
+ LLVM_DEBUG(dbgs() << "Can't speculate: " << I);
+ return false;
+ }
+
+ // Only CmpMI is allowed to clobber the flags.
+ if (&I != CmpMI && I.modifiesRegister(X86::EFLAGS, TRI)) {
+ LLVM_DEBUG(dbgs() << "Clobbers flags: " << I);
+ return false;
+ }
+ }
+ return true;
+}
+
+// Parse a condition code returned by analyzeBranch, and compute the CondCode
+// corresponding to TBB.
+static bool parseCond(ArrayRef<MachineOperand> Cond, X86::CondCode &CC,
+ ArrayRef<X86::CondCode> UnsupportedCCs = {}) {
+ if (Cond.size() != 1)
+ return false;
+
+ CC = static_cast<X86::CondCode>(Cond[0].getImm());
+
+ for (const auto &UnsupportedCC : UnsupportedCCs) {
+ if (CC == UnsupportedCC)
+ return false;
+ }
+
+ return CC != X86::COND_INVALID;
+}
+
+static unsigned getNumOfJcc(const MachineBasicBlock *MBB) {
+ unsigned NumOfJcc = 0;
+ for (auto It = MBB->rbegin(); It != MBB->rend(); ++It) {
+ if (!It->isTerminator())
+ return NumOfJcc;
+ if (It->getOpcode() == X86::JCC_1)
+ ++NumOfJcc;
+ }
+ return NumOfJcc;
+}
+
+/// Analyze the sub-cfg rooted in MBB, and return true if it is a potential
+/// candidate for cmp-conversion. Fill out the internal state.
+///
+bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
+ Head = MBB;
+ Tail = CmpBB = nullptr;
+
+ if (Head->succ_size() != 2)
+ return false;
+ MachineBasicBlock *Succ0 = Head->succ_begin()[0];
+ MachineBasicBlock *Succ1 = Head->succ_begin()[1];
+
+ // CmpBB can only have a single predecessor. Tail is allowed many.
+ if (Succ0->pred_size() != 1)
+ std::swap(Succ0, Succ1);
+
+ // Succ0 is our candidate for CmpBB.
+ if (Succ0->pred_size() != 1 || Succ0->succ_size() != 2)
+ return false;
+
+ CmpBB = Succ0;
+ Tail = Succ1;
+
+ if (!CmpBB->isSuccessor(Tail))
+ return false;
+
+ // The CFG topology checks out.
+ LLVM_DEBUG(dbgs() << "\nTriangle: " << printMBBReference(*Head) << " -> "
+ << printMBBReference(*CmpBB) << " -> "
+ << printMBBReference(*Tail) << '\n');
+ ++NumConsidered;
+
+ // Tail is allowed to have many predecessors, but we can't handle PHIs yet.
+ //
+ // FIXME: Real PHIs could be if-converted as long as the CmpBB values are
+ // defined before The CmpBB cmp clobbers the flags. Alternatively, it should
+ // always be safe to sink the ccmp down to immediately before the CmpBB
+ // terminators.
+ if (!trivialTailPHIs()) {
+ LLVM_DEBUG(dbgs() << "Can't handle phis in Tail.\n");
+ ++NumPhiRejs;
+ return false;
+ }
+
+ if (!Tail->livein_empty()) {
+ LLVM_DEBUG(dbgs() << "Can't handle live-in physregs in Tail.\n");
+ ++NumPhysRejs;
+ return false;
+ }
+
+ // CmpBB should never have PHIs since Head is its only predecessor.
+ // FIXME: Clean them up if it happens.
+ if (!CmpBB->empty() && CmpBB->front().isPHI()) {
+ LLVM_DEBUG(dbgs() << "Can't handle phis in CmpBB.\n");
+ ++NumPhi2Rejs;
+ return false;
+ }
+
+ if (!CmpBB->livein_empty()) {
+ LLVM_DEBUG(dbgs() << "Can't handle live-in physregs in CmpBB.\n");
+ ++NumPhysRejs;
+ return false;
+ }
+
+ // The branch we're looking to eliminate must be analyzable.
+ HeadCond.clear();
+ MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
+ if (TII->analyzeBranch(*Head, TBB, FBB, HeadCond)) {
+ LLVM_DEBUG(dbgs() << "Head branch not analyzable.\n");
+ ++NumHeadBranchRejs;
+ return false;
+ }
+
+ // CCMP/CTEST resets all the bits of EFLAGS
+ unsigned NumOfJcc = getNumOfJcc(Head);
+ if (NumOfJcc > 1) {
+ LLVM_DEBUG(dbgs() << "More than one Jcc in Head.\n");
+ ++NumHeadBranchRejs;
+ return false;
+ }
+
+ // This is weird, probably some sort of degenerate CFG, or an edge to a
+ // landing pad.
+ if (!TBB || HeadCond.empty()) {
+ LLVM_DEBUG(
+ dbgs() << "analyzeBranch didn't find conditional branch in Head.\n");
+ ++NumHeadBranchRejs;
+ return false;
+ }
+
+ if (!parseCond(HeadCond, HeadCmpBBCC, {X86::COND_P, X86::COND_NP})) {
+ LLVM_DEBUG(dbgs() << "Unsupported branch type on Head\n");
+ ++NumHeadBranchRejs;
+ return false;
+ }
+
+ // Make sure the branch direction is right.
+ if (TBB != CmpBB) {
+ assert(TBB == Tail && "Unexpected TBB");
+ HeadCmpBBCC = X86::GetOppositeBranchCondition(HeadCmpBBCC);
+ }
+
+ CmpBBCond.clear();
+ TBB = FBB = nullptr;
+ if (TII->analyzeBranch(*CmpBB, TBB, FBB, CmpBBCond)) {
+ LLVM_DEBUG(dbgs() << "CmpBB branch not analyzable.\n");
+ ++NumCmpBranchRejs;
+ return false;
+ }
+
+ if (!TBB || CmpBBCond.empty()) {
+ LLVM_DEBUG(
+ dbgs() << "analyzeBranch didn't find conditional branch in CmpBB.\n");
+ ++NumCmpBranchRejs;
+ return false;
+ }
+
+ if (!parseCond(CmpBBCond, CmpBBTailCC)) {
+ LLVM_DEBUG(dbgs() << "Unsupported branch type on CmpBB\n");
+ ++NumCmpBranchRejs;
+ return false;
+ }
+
+ if (TBB != Tail)
+ CmpBBTailCC = X86::GetOppositeBranchCondition(CmpBBTailCC);
+
+ CmpMI = findConvertibleCompare(CmpBB);
+ if (!CmpMI)
+ return false;
+
+ if (!canSpeculateInstrs(CmpBB, CmpMI)) {
+ ++NumSpeculateRejs;
+ return false;
+ }
+
+ return true;
+}
+
+static int getCondFlagsFromCondCode(X86::CondCode CC) {
+ // CCMP/CTEST has two conditional operands:
+ // - SCC: source conditonal code (same as CMOV)
+ // - DCF: destination conditional flags, which has 4 valid bits
+ //
+ // +----+----+----+----+
+ // | OF | SF | ZF | CF |
+ // +----+----+----+----+
+ //
+ // If SCC(source conditional code) evaluates to false, CCMP/CTEST will updates
+ // the conditional flags by as follows:
+ //
+ // OF = DCF.OF
+ // SF = DCF.SF
+ // ZF = DCF.ZF
+ // CF = DCF.CF
+ // PF = DCF.CF
+ // AF = 0 (Auxiliary Carry Flag)
+ //
+ // Otherwise, the CMP or TEST is executed and it updates the
+ // CSPAZO flags normally.
+ //
+ // NOTE:
+ // If SCC = P, then SCC evaluates to true regardless of the CSPAZO value.
+ // If SCC = NP, then SCC evaluates to false regardless of the CSPAZO value.
+ enum { CF = 1, ZF = 2, SF = 4, OF = 8, PF = CF };
+ int Flags = 0;
+ switch (CC) {
+ default:
+ llvm_unreachable("Illegal condition code!");
+ case X86::COND_NO:
+ case X86::COND_NE:
+ case X86::COND_GE:
+ case X86::COND_G:
+ case X86::COND_AE:
+ case X86::COND_A:
+ case X86::COND_NS:
+ case X86::COND_NP:
+ break;
+ case X86::COND_O:
+ Flags |= OF;
+ break;
+ case X86::COND_B:
+ case X86::COND_BE:
+ Flags |= CF;
+ break;
+ case X86::COND_E:
+ case X86::COND_LE:
+ Flags |= ZF;
+ break;
+ case X86::COND_S:
+ case X86::COND_L:
+ Flags |= SF;
+ break;
+ case X86::COND_P:
+ Flags |= PF;
+ break;
+ }
+ return Flags;
+}
+
+void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
+ LLVM_DEBUG(dbgs() << "Merging " << printMBBReference(*CmpBB) << " into "
+ << printMBBReference(*Head) << ":\n"
+ << *CmpBB);
+
+ // All CmpBB instructions are moved into Head, and CmpBB is deleted.
+ // Update the CFG first.
+ updateTailPHIs();
+
+ // Save successor probabilties before removing CmpBB and Tail from their
+ // parents.
+ BranchProbability Head2CmpBB = MBPI->getEdgeProbability(Head, CmpBB);
+ BranchProbability CmpBB2Tail = MBPI->getEdgeProbability(CmpBB, Tail);
+
+ Head->removeSuccessor(CmpBB);
+ CmpBB->removeSuccessor(Tail);
+
+ // If Head and CmpBB had successor probabilties, udpate the probabilities to
+ // reflect the ccmp-conversion.
+ if (Head->hasSuccessorProbabilities() && CmpBB->hasSuccessorProbabilities()) {
+
+ // Head is allowed two successors. We've removed CmpBB, so the remaining
+ // successor is Tail. We need to increase the successor probability for
+ // Tail to account for the CmpBB path we removed.
+ //
+ // Pr(Tail|Head) += Pr(CmpBB|Head) * Pr(Tail|CmpBB).
+ assert(*Head->succ_begin() == Tail && "Head successor is not Tail");
+ BranchProbability Head2Tail = MBPI->getEdgeProbability(Head, Tail);
+ Head->setSuccProbability(Head->succ_begin(),
+ Head2Tail + Head2CmpBB * CmpBB2Tail);
+
+ // We will transfer successors of CmpBB to Head in a moment without
+ // normalizing the successor probabilities. Set the successor probabilites
+ // before doing so.
+ //
+ // Pr(I|Head) = Pr(CmpBB|Head) * Pr(I|CmpBB).
+ for (auto I = CmpBB->succ_begin(), E = CmpBB->succ_end(); I != E; ++I) {
+ BranchProbability CmpBB2I = MBPI->getEdgeProbability(CmpBB, *I);
+ CmpBB->setSuccProbability(I, Head2CmpBB * CmpBB2I);
+ }
+ }
+
+ Head->transferSuccessorsAndUpdatePHIs(CmpBB);
+ DebugLoc TermDL = Head->getFirstTerminator()->getDebugLoc();
+ TII->removeBranch(*Head);
+
+ Head->splice(Head->end(), CmpBB, CmpBB->begin(), CmpBB->end());
+
+ // Now replace CmpMI with a ccmp instruction that also considers the incoming
+ // flags.
+ unsigned Opc = [=]() {
+ switch (CmpMI->getOpcode()) {
+ default:
+ llvm_unreachable("Unknown compare opcode");
+ case X86::SUB8rr:
+ case X86::SUB8rr_ND:
+ return X86::CCMP8rr;
+ case X86::SUB16rr:
+ case X86::SUB16rr_ND:
+ return X86::CCMP16rr;
+ case X86::SUB32rr:
+ case X86::SUB32rr_ND:
+ return X86::CCMP32rr;
+ case X86::SUB64rr:
+ case X86::SUB64rr_ND:
+ return X86::CCMP64rr;
+ case X86::SUB8ri:
+ case X86::SUB8ri_ND:
+ return X86::CCMP8ri;
+ case X86::SUB16ri:
+ case X86::SUB16ri_ND:
+ return X86::CCMP16ri;
+ case X86::SUB32ri:
+ case X86::SUB32ri_ND:
+ return X86::CCMP32ri;
+ case X86::SUB64ri32:
+ case X86::SUB64ri32_ND:
+ return X86::CCMP64ri32;
+ case X86::CMP8rr:
+ return X86::CCMP8rr;
+ case X86::CMP16rr:
+ return X86::CCMP16rr;
+ case X86::CMP32rr:
+ return X86::CCMP32rr;
+ case X86::CMP64rr:
+ return X86::CCMP64rr;
+ case X86::CMP8ri:
+ return X86::CCMP8ri;
+ case X86::CMP16ri:
+ return X86::CCMP16ri;
+ case X86::CMP32ri:
+ return X86::CCMP32ri;
+ case X86::CMP64ri32:
+ return X86::CCMP64ri32;
+ case X86::TEST8rr:
+ return X86::CTEST8rr;
+ case X86::TEST16rr:
+ return X86::CTEST16rr;
+ case X86::TEST32rr:
+ return X86::CTEST32rr;
+ case X86::TEST64rr:
+ return X86::CTEST64rr;
+ case X86::TEST8ri:
+ return X86::CTEST8ri;
+ case X86::TEST16ri:
+ return X86::CTEST16ri;
+ case X86::TEST32ri:
+ return X86::CTEST32ri;
+ case X86::TEST64ri32:
+ return X86::CTEST64ri32;
+ }
+ }();
+ const MCInstrDesc &MCID = TII->get(Opc);
+ unsigned NumDefs = CmpMI->getDesc().getNumDefs();
+ MachineOperand Op0 = CmpMI->getOperand(NumDefs);
+ MachineOperand Op1 = CmpMI->getOperand(NumDefs + 1);
+ BuildMI(*Head, CmpMI, CmpMI->getDebugLoc(), MCID)
+ .add(Op0)
+ .add(Op1)
+ .addImm(getCondFlagsFromCondCode(CmpBBTailCC))
+ .addImm(HeadCmpBBCC);
+ CmpMI->eraseFromParent();
+ Head->updateTerminator(CmpBB->getNextNode());
+
+ RemovedBlocks.push_back(CmpBB);
+ CmpBB->eraseFromParent();
+ LLVM_DEBUG(dbgs() << "Result:\n" << *Head);
+ ++NumConverted;
+}
+
+//===----------------------------------------------------------------------===//
+// X86ConditionalCompares Pass
+//===----------------------------------------------------------------------===//
+
+namespace {
+class X86ConditionalCompares : public MachineFunctionPass {
+ const X86Subtarget *STI = nullptr;
+ const MachineBranchProbabilityInfo *MBPI = nullptr;
+ const TargetInstrInfo *TII = nullptr;
+ const TargetRegisterInfo *TRI = nullptr;
+ MachineRegisterInfo *MRI = nullptr;
+ MachineDominatorTree *DomTree = nullptr;
+ MachineLoopInfo *Loops = nullptr;
+ MachineTraceMetrics *Traces = nullptr;
+ SSACCmpConv CmpConv;
+
+public:
+ static char ID;
+ X86ConditionalCompares() : MachineFunctionPass(ID) {
+ initializeX86ConditionalComparesPass(*PassRegistry::getPassRegistry());
+ }
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnMachineFunction(MachineFunction &MF) override;
+ StringRef getPassName() const override { return "X86 Conditional Compares"; }
+
+private:
+ bool tryConvert(MachineBasicBlock *MBB);
+ void updateDomTree(ArrayRef<MachineBasicBlock *> Removed);
+ void updateLoops(ArrayRef<MachineBasicBlock *> Removed);
+ void invalidateTraces();
+ bool shouldConvert();
+};
+} // end anonymous namespace
+
+char X86ConditionalCompares::ID = 0;
+
+INITIALIZE_PASS_BEGIN(X86ConditionalCompares, "x86-ccmp", "X86 CCMP Pass",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics)
+INITIALIZE_PASS_END(X86ConditionalCompares, "x86-ccmp", "X86 CCMP Pass", false,
+ false)
+
+FunctionPass *llvm::createX86ConditionalCompares() {
+ return new X86ConditionalCompares();
+}
+
+void X86ConditionalCompares::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<MachineBranchProbabilityInfo>();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ AU.addRequired<MachineTraceMetrics>();
+ AU.addPreserved<MachineTraceMetrics>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+/// Update the dominator tree after if-conversion erased some blocks.
+void X86ConditionalCompares::updateDomTree(
+ ArrayRef<MachineBasicBlock *> Removed) {
+ // convert() removes CmpBB which was previously dominated by Head.
+ // CmpBB children should be transferred to Head.
+ MachineDomTreeNode *HeadNode = DomTree->getNode(CmpConv.Head);
+ for (MachineBasicBlock *RemovedMBB : Removed) {
+ MachineDomTreeNode *Node = DomTree->getNode(RemovedMBB);
+ assert(Node != HeadNode && "Cannot erase the head node");
+ assert(Node->getIDom() == HeadNode && "CmpBB should be dominated by Head");
+ while (Node->getNumChildren())
+ DomTree->changeImmediateDominator(Node->back(), HeadNode);
+ DomTree->eraseNode(RemovedMBB);
+ }
+}
+
+/// Update LoopInfo after if-conversion.
+void X86ConditionalCompares::updateLoops(
+ ArrayRef<MachineBasicBlock *> Removed) {
+ if (!Loops)
+ return;
+ for (MachineBasicBlock *RemovedMBB : Removed)
+ Loops->removeBlock(RemovedMBB);
+}
+
+/// Invalidate MachineTraceMetrics before if-conversion.
+void X86ConditionalCompares::invalidateTraces() {
+ Traces->invalidate(CmpConv.Head);
+ Traces->invalidate(CmpConv.CmpBB);
+}
+
+/// Apply cost model and heuristics to the if-conversion in IfConv.
+/// Return true if the conversion is a good idea.
+///
+bool X86ConditionalCompares::shouldConvert() { return true; }
+
+bool X86ConditionalCompares::tryConvert(MachineBasicBlock *MBB) {
+ bool Changed = false;
+ while (CmpConv.canConvert(MBB) && shouldConvert()) {
+ invalidateTraces();
+ SmallVector<MachineBasicBlock *, 4> RemovedBlocks;
+ CmpConv.convert(RemovedBlocks);
+ Changed = true;
+ updateDomTree(RemovedBlocks);
+ updateLoops(RemovedBlocks);
+ }
+ return Changed;
+}
+
+bool X86ConditionalCompares::runOnMachineFunction(MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "********** X86 Conditional Compares **********\n"
+ << "********** Function: " << MF.getName() << '\n');
+ if (skipFunction(MF.getFunction()))
+ return false;
+
+ STI = &MF.getSubtarget<X86Subtarget>();
+ if (!STI->hasCCMP())
+ return false;
+
+ TII = MF.getSubtarget().getInstrInfo();
+ TRI = MF.getSubtarget().getRegisterInfo();
+ MRI = &MF.getRegInfo();
+ DomTree = &getAnalysis<MachineDominatorTree>();
+ Loops = getAnalysisIfAvailable<MachineLoopInfo>();
+ MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
+ Traces = &getAnalysis<MachineTraceMetrics>();
+
+ bool Changed = false;
+ CmpConv.runOnMachineFunction(MF, MBPI);
+
+ // Visit blocks in dominator tree pre-order. The pre-order enables multiple
+ // cmp-conversions from the same head block.
+ // Note that updateDomTree() modifies the children of the DomTree node
+ // currently being visited. The df_iterator supports that; it doesn't look at
+ // child_begin() / child_end() until after a node has been visited.
+ for (auto *I : depth_first(DomTree))
+ if (tryConvert(I->getBlock()))
+ Changed = true;
+
+ return Changed;
+}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e1e6c22eb8cca5..f7920a035e79a0 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -3377,7 +3377,9 @@ X86TargetLowering::getJumpConditionMergingParams(Instruction::BinaryOps Opc,
const Value *Lhs,
const Value *Rhs) const {
using namespace llvm::PatternMatch;
- int BaseCost = BrMergingBaseCostThresh.getValue();
+ // Disable condition merging when CCMP is available b/c we can eliminate
+ // branches in a more efficient way.
+ int BaseCost = Subtarget.hasCCMP() ? -1 : BrMergingBaseCostThresh.getValue();
// a == b && a == c is a fast pattern on x86.
ICmpInst::Predicate Pred;
if (BaseCost >= 0 && Opc == Instruction::And &&
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 276bc7f08d4cd7..2051c2368d687b 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -98,6 +98,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeX86Target() {
initializeX86LoadValueInjectionRetHardeningPassPass(PR);
initializeX86OptimizeLEAPassPass(PR);
initializeX86PartialReductionPass(PR);
+ initializeX86ConditionalComparesPass(PR);
initializePseudoProbeInserterPass(PR);
initializeX86ReturnThunksPass(PR);
initializeX86DAGToDAGISelPass(PR);
@@ -508,6 +509,7 @@ bool X86PassConfig::addGlobalInstructionSelect() {
}
bool X86PassConfig::addILPOpts() {
+ addPass(createX86ConditionalCompares());
addPass(&EarlyIfConverterID);
if (EnableMachineCombinerPass)
addPass(&MachineCombinerID);
diff --git a/llvm/test/CodeGen/X86/apx/ccmp.ll b/llvm/test/CodeGen/X86/apx/ccmp.ll
new file mode 100644
index 00000000000000..5d6c281404cb53
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/ccmp.ll
@@ -0,0 +1,1116 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ccmp -verify-machineinstrs -show-mc-encoding | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ccmp,+ndd -verify-machineinstrs -show-mc-encoding | FileCheck %s --check-prefix=NDD
+
+define void @ccmp8rr_zf(i8 noundef %a, i8 noundef %b, i8 noundef %c) {
+; CHECK-LABEL: ccmp8rr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %dl, %dil # encoding: [0x40,0x38,0xd7]
+; CHECK-NEXT: ccmpneb {dfv=zf} %dl, %sil # encoding: [0x62,0xf4,0x14,0x05,0x38,0xd6]
+; CHECK-NEXT: jne .LBB0_1 # encoding: [0x75,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB0_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB0_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp8rr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %dl, %dil # encoding: [0x40,0x38,0xd7]
+; NDD-NEXT: ccmpneb {dfv=zf} %dl, %sil # encoding: [0x62,0xf4,0x14,0x05,0x38,0xd6]
+; NDD-NEXT: jne .LBB0_1 # encoding: [0x75,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB0_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB0_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %cmp = icmp eq i8 %a, %c
+ %cmp1 = icmp eq i8 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp16rr_sf(i16 noundef %a, i16 noundef %b, i16 noundef %c) {
+; CHECK-LABEL: ccmp16rr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %dx, %di # encoding: [0x66,0x39,0xd7]
+; CHECK-NEXT: ccmplew {dfv=sf} %dx, %si # encoding: [0x62,0xf4,0x25,0x0e,0x39,0xd6]
+; CHECK-NEXT: jge .LBB1_1 # encoding: [0x7d,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB1_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB1_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp16rr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %dx, %di # encoding: [0x66,0x39,0xd7]
+; NDD-NEXT: ccmplew {dfv=sf} %dx, %si # encoding: [0x62,0xf4,0x25,0x0e,0x39,0xd6]
+; NDD-NEXT: jge .LBB1_1 # encoding: [0x7d,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB1_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB1_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %cmp = icmp sgt i16 %a, %c
+ %cmp1 = icmp slt i16 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32rr_cf(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ccmp32rr_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %edx, %edi # encoding: [0x39,0xd7]
+; CHECK-NEXT: ccmpbl {dfv=cf} %edx, %esi # encoding: [0x62,0xf4,0x0c,0x02,0x39,0xd6]
+; CHECK-NEXT: ja .LBB2_1 # encoding: [0x77,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB2_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB2_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp32rr_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %edx, %edi # encoding: [0x39,0xd7]
+; NDD-NEXT: ccmpbl {dfv=cf} %edx, %esi # encoding: [0x62,0xf4,0x0c,0x02,0x39,0xd6]
+; NDD-NEXT: ja .LBB2_1 # encoding: [0x77,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB2_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB2_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %cmp = icmp uge i32 %a, %c
+ %cmp1 = icmp ule i32 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64rr_of(i64 %a, i64 %b) {
+; CHECK-LABEL: ccmp64rr_of:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; CHECK-NEXT: ccmpneq {dfv=of} %rsi, %rdi # encoding: [0x62,0xf4,0xc4,0x05,0x39,0xf7]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp64rr_of:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; NDD-NEXT: ccmpneq {dfv=of} %rsi, %rdi # encoding: [0x62,0xf4,0xc4,0x05,0x39,0xf7]
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %cond1 = icmp eq i64 %a, 0
+ br i1 %cond1, label %bb3, label %bb1
+
+bb1: ; preds = %bb
+ %smul = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+ %obit = extractvalue {i64, i1} %smul, 1
+ br i1 %obit, label %bb3, label %bb2
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ccmp8ri_zf(i8 noundef %a, i8 noundef %b, i8 noundef %c) {
+; CHECK-LABEL: ccmp8ri_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %dl, %dil # encoding: [0x40,0x38,0xd7]
+; CHECK-NEXT: ccmpleb {dfv=zf} $123, %sil # encoding: [0x62,0xf4,0x14,0x0e,0x80,0xfe,0x7b]
+; CHECK-NEXT: jne .LBB4_1 # encoding: [0x75,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB4_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB4_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp8ri_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %dl, %dil # encoding: [0x40,0x38,0xd7]
+; NDD-NEXT: ccmpleb {dfv=zf} $123, %sil # encoding: [0x62,0xf4,0x14,0x0e,0x80,0xfe,0x7b]
+; NDD-NEXT: jne .LBB4_1 # encoding: [0x75,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB4_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB4_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %cmp = icmp sgt i8 %a, %c
+ %cmp1 = icmp eq i8 %b, 123
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp16ri8_zf(i16 noundef %a, i16 noundef %b, i16 noundef %c) {
+; CHECK-LABEL: ccmp16ri8_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %dx, %di # encoding: [0x66,0x39,0xd7]
+; CHECK-NEXT: ccmplew {dfv=zf} $122, %si # encoding: [0x62,0xf4,0x15,0x0e,0x81,0xfe,0x7a,0x00]
+; CHECK-NEXT: jg .LBB5_1 # encoding: [0x7f,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB5_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp16ri8_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %dx, %di # encoding: [0x66,0x39,0xd7]
+; NDD-NEXT: ccmplew {dfv=zf} $122, %si # encoding: [0x62,0xf4,0x15,0x0e,0x81,0xfe,0x7a,0x00]
+; NDD-NEXT: jg .LBB5_1 # encoding: [0x7f,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB5_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB5_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %cmp = icmp sgt i16 %a, %c
+ %cmp1 = icmp slt i16 %b, 123
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32ri8_cf(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ccmp32ri8_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %edx, %edi # encoding: [0x39,0xd7]
+; CHECK-NEXT: ccmpal {dfv=cf} $123, %esi # encoding: [0x62,0xf4,0x0c,0x07,0x81,0xfe,0x7b,0x00,0x00,0x00]
+; CHECK-NEXT: ja .LBB6_1 # encoding: [0x77,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB6_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB6_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp32ri8_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %edx, %edi # encoding: [0x39,0xd7]
+; NDD-NEXT: ccmpal {dfv=cf} $123, %esi # encoding: [0x62,0xf4,0x0c,0x07,0x81,0xfe,0x7b,0x00,0x00,0x00]
+; NDD-NEXT: ja .LBB6_1 # encoding: [0x77,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB6_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB6_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %cmp = icmp ule i32 %a, %c
+ %cmp1 = icmp ule i32 %b, 123
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64ri8_zf(i64 noundef %a, i64 noundef %b, i64 noundef %c) {
+; CHECK-LABEL: ccmp64ri8_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpq %rdx, %rdi # encoding: [0x48,0x39,0xd7]
+; CHECK-NEXT: ccmpleq {dfv=zf} $122, %rsi # encoding: [0x62,0xf4,0x94,0x0e,0x81,0xfe,0x7a,0x00,0x00,0x00]
+; CHECK-NEXT: jg .LBB7_1 # encoding: [0x7f,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB7_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB7_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp64ri8_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpq %rdx, %rdi # encoding: [0x48,0x39,0xd7]
+; NDD-NEXT: ccmpleq {dfv=zf} $122, %rsi # encoding: [0x62,0xf4,0x94,0x0e,0x81,0xfe,0x7a,0x00,0x00,0x00]
+; NDD-NEXT: jg .LBB7_1 # encoding: [0x7f,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB7_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB7_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %cmp = icmp sgt i64 %a, %c
+ %cmp1 = icmp slt i64 %b, 123
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp16ri_zf(i16 noundef %a, i16 noundef %b, i16 noundef %c) {
+; CHECK-LABEL: ccmp16ri_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %dx, %di # encoding: [0x66,0x39,0xd7]
+; CHECK-NEXT: movswl %si, %eax # encoding: [0x0f,0xbf,0xc6]
+; CHECK-NEXT: ccmpael {dfv=zf} $1233, %eax # encoding: [0x62,0xf4,0x14,0x03,0x81,0xf8,0xd1,0x04,0x00,0x00]
+; CHECK-NEXT: # imm = 0x4D1
+; CHECK-NEXT: jg .LBB8_1 # encoding: [0x7f,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB8_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB8_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp16ri_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %dx, %di # encoding: [0x66,0x39,0xd7]
+; NDD-NEXT: movswl %si, %eax # encoding: [0x0f,0xbf,0xc6]
+; NDD-NEXT: ccmpael {dfv=zf} $1233, %eax # encoding: [0x62,0xf4,0x14,0x03,0x81,0xf8,0xd1,0x04,0x00,0x00]
+; NDD-NEXT: # imm = 0x4D1
+; NDD-NEXT: jg .LBB8_1 # encoding: [0x7f,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB8_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB8_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %cmp = icmp ult i16 %a, %c
+ %cmp1 = icmp slt i16 %b, 1234
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32ri_cf(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ccmp32ri_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %edx, %edi # encoding: [0x39,0xd7]
+; CHECK-NEXT: ccmpbl {dfv=cf} $123456, %esi # encoding: [0x62,0xf4,0x0c,0x02,0x81,0xfe,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: # imm = 0x1E240
+; CHECK-NEXT: ja .LBB9_1 # encoding: [0x77,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB9_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB9_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp32ri_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %edx, %edi # encoding: [0x39,0xd7]
+; NDD-NEXT: ccmpbl {dfv=cf} $123456, %esi # encoding: [0x62,0xf4,0x0c,0x02,0x81,0xfe,0x40,0xe2,0x01,0x00]
+; NDD-NEXT: # imm = 0x1E240
+; NDD-NEXT: ja .LBB9_1 # encoding: [0x77,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB9_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB9_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %cmp = icmp uge i32 %a, %c
+ %cmp1 = icmp ule i32 %b, 123456
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64ri32_zf(i64 noundef %a, i64 noundef %b, i64 noundef %c) {
+; CHECK-LABEL: ccmp64ri32_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpq %rdx, %rdi # encoding: [0x48,0x39,0xd7]
+; CHECK-NEXT: ccmpbeq {dfv=zf} $123455, %rsi # encoding: [0x62,0xf4,0x94,0x06,0x81,0xfe,0x3f,0xe2,0x01,0x00]
+; CHECK-NEXT: # imm = 0x1E23F
+; CHECK-NEXT: jg .LBB10_1 # encoding: [0x7f,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB10_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB10_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp64ri32_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpq %rdx, %rdi # encoding: [0x48,0x39,0xd7]
+; NDD-NEXT: ccmpbeq {dfv=zf} $123455, %rsi # encoding: [0x62,0xf4,0x94,0x06,0x81,0xfe,0x3f,0xe2,0x01,0x00]
+; NDD-NEXT: # imm = 0x1E23F
+; NDD-NEXT: jg .LBB10_1 # encoding: [0x7f,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB10_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB10_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %cmp = icmp ugt i64 %a, %c
+ %cmp1 = icmp slt i64 %b, 123456
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp8rm_zf(i8 noundef %a, i8 noundef %b, i8 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp8rm_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %dl, %dil # encoding: [0x40,0x38,0xd7]
+; CHECK-NEXT: ccmpneb {dfv=zf} (%rcx), %sil # encoding: [0x62,0xf4,0x14,0x05,0x3a,0x31]
+; CHECK-NEXT: jne .LBB11_1 # encoding: [0x75,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB11_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB11_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp8rm_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %dl, %dil # encoding: [0x40,0x38,0xd7]
+; NDD-NEXT: ccmpneb {dfv=zf} (%rcx), %sil # encoding: [0x62,0xf4,0x14,0x05,0x3a,0x31]
+; NDD-NEXT: jne .LBB11_1 # encoding: [0x75,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB11_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB11_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %d = load i8, ptr %ptr
+ %cmp = icmp eq i8 %a, %c
+ %cmp1 = icmp eq i8 %b, %d
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp16rm_sf(i16 noundef %a, i16 noundef %b, i16 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp16rm_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %dx, %di # encoding: [0x66,0x39,0xd7]
+; CHECK-NEXT: ccmplew {dfv=sf} (%rcx), %si # encoding: [0x62,0xf4,0x25,0x0e,0x3b,0x31]
+; CHECK-NEXT: jge .LBB12_1 # encoding: [0x7d,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB12_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB12_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp16rm_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %dx, %di # encoding: [0x66,0x39,0xd7]
+; NDD-NEXT: ccmplew {dfv=sf} (%rcx), %si # encoding: [0x62,0xf4,0x25,0x0e,0x3b,0x31]
+; NDD-NEXT: jge .LBB12_1 # encoding: [0x7d,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB12_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB12_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %d = load i16, ptr %ptr
+ %cmp = icmp sgt i16 %a, %c
+ %cmp1 = icmp slt i16 %b, %d
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32rm_cf(i32 noundef %a, i32 noundef %b, i32 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp32rm_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %edx, %edi # encoding: [0x39,0xd7]
+; CHECK-NEXT: ccmpgl {dfv=cf} (%rcx), %esi # encoding: [0x62,0xf4,0x0c,0x0f,0x3b,0x31]
+; CHECK-NEXT: ja .LBB13_1 # encoding: [0x77,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB13_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB13_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp32rm_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %edx, %edi # encoding: [0x39,0xd7]
+; NDD-NEXT: ccmpgl {dfv=cf} (%rcx), %esi # encoding: [0x62,0xf4,0x0c,0x0f,0x3b,0x31]
+; NDD-NEXT: ja .LBB13_1 # encoding: [0x77,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB13_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB13_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %d = load i32, ptr %ptr
+ %cmp = icmp sle i32 %a, %c
+ %cmp1 = icmp ule i32 %b, %d
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64rm_sf(i64 noundef %a, i64 noundef %b, i64 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp64rm_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpq %rdx, %rdi # encoding: [0x48,0x39,0xd7]
+; CHECK-NEXT: ccmpleq {dfv=sf} (%rcx), %rsi # encoding: [0x62,0xf4,0xa4,0x0e,0x3b,0x31]
+; CHECK-NEXT: jge .LBB14_1 # encoding: [0x7d,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB14_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB14_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp64rm_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpq %rdx, %rdi # encoding: [0x48,0x39,0xd7]
+; NDD-NEXT: ccmpleq {dfv=sf} (%rcx), %rsi # encoding: [0x62,0xf4,0xa4,0x0e,0x3b,0x31]
+; NDD-NEXT: jge .LBB14_1 # encoding: [0x7d,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB14_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB14_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %d = load i64, ptr %ptr
+ %cmp = icmp sgt i64 %a, %c
+ %cmp1 = icmp slt i64 %b, %d
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp8mr_zf(i8 noundef %a, i8 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp8mr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %sil, %dil # encoding: [0x40,0x38,0xf7]
+; CHECK-NEXT: ccmpgeb {dfv=zf} %sil, (%rdx) # encoding: [0x62,0xf4,0x14,0x0d,0x38,0x32]
+; CHECK-NEXT: jne .LBB15_1 # encoding: [0x75,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB15_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB15_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp8mr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %sil, %dil # encoding: [0x40,0x38,0xf7]
+; NDD-NEXT: ccmpgeb {dfv=zf} %sil, (%rdx) # encoding: [0x62,0xf4,0x14,0x0d,0x38,0x32]
+; NDD-NEXT: jne .LBB15_1 # encoding: [0x75,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB15_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB15_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i8, ptr %ptr
+ %cmp = icmp slt i8 %a, %c
+ %cmp1 = icmp eq i8 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp16mr_sf(i16 noundef %a, i16 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp16mr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %si, %di # encoding: [0x66,0x39,0xf7]
+; CHECK-NEXT: ccmplew {dfv=sf} %si, (%rdx) # encoding: [0x62,0xf4,0x25,0x0e,0x39,0x32]
+; CHECK-NEXT: jge .LBB16_1 # encoding: [0x7d,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB16_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB16_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp16mr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %si, %di # encoding: [0x66,0x39,0xf7]
+; NDD-NEXT: ccmplew {dfv=sf} %si, (%rdx) # encoding: [0x62,0xf4,0x25,0x0e,0x39,0x32]
+; NDD-NEXT: jge .LBB16_1 # encoding: [0x7d,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB16_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB16_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i16, ptr %ptr
+ %cmp = icmp sgt i16 %a, %c
+ %cmp1 = icmp slt i16 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32mr_cf(i32 noundef %a, i32 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp32mr_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %esi, %edi # encoding: [0x39,0xf7]
+; CHECK-NEXT: ccmpll {dfv=cf} %esi, (%rdx) # encoding: [0x62,0xf4,0x0c,0x0c,0x39,0x32]
+; CHECK-NEXT: ja .LBB17_1 # encoding: [0x77,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB17_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB17_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp32mr_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %esi, %edi # encoding: [0x39,0xf7]
+; NDD-NEXT: ccmpll {dfv=cf} %esi, (%rdx) # encoding: [0x62,0xf4,0x0c,0x0c,0x39,0x32]
+; NDD-NEXT: ja .LBB17_1 # encoding: [0x77,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB17_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB17_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i32, ptr %ptr
+ %cmp = icmp sge i32 %a, %c
+ %cmp1 = icmp ule i32 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64mr_sf(i64 noundef %a, i64 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp64mr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpq %rsi, %rdi # encoding: [0x48,0x39,0xf7]
+; CHECK-NEXT: ccmpleq {dfv=sf} %rsi, (%rdx) # encoding: [0x62,0xf4,0xa4,0x0e,0x39,0x32]
+; CHECK-NEXT: jge .LBB18_1 # encoding: [0x7d,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB18_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB18_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp64mr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpq %rsi, %rdi # encoding: [0x48,0x39,0xf7]
+; NDD-NEXT: ccmpleq {dfv=sf} %rsi, (%rdx) # encoding: [0x62,0xf4,0xa4,0x0e,0x39,0x32]
+; NDD-NEXT: jge .LBB18_1 # encoding: [0x7d,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB18_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB18_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i64, ptr %ptr
+ %cmp = icmp sgt i64 %a, %c
+ %cmp1 = icmp slt i64 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp8mi_zf(i8 noundef %a, i8 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp8mi_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %sil, %dil # encoding: [0x40,0x38,0xf7]
+; CHECK-NEXT: ccmpneb {dfv=zf} $123, (%rdx) # encoding: [0x62,0xf4,0x14,0x05,0x80,0x3a,0x7b]
+; CHECK-NEXT: jne .LBB19_1 # encoding: [0x75,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB19_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB19_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp8mi_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %sil, %dil # encoding: [0x40,0x38,0xf7]
+; NDD-NEXT: ccmpneb {dfv=zf} $123, (%rdx) # encoding: [0x62,0xf4,0x14,0x05,0x80,0x3a,0x7b]
+; NDD-NEXT: jne .LBB19_1 # encoding: [0x75,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB19_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB19_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i8, ptr %ptr
+ %cmp = icmp eq i8 %a, %c
+ %cmp1 = icmp eq i8 %b, 123
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp16mi8_zf(i16 noundef %a, i16 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp16mi8_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %si, %di # encoding: [0x66,0x39,0xf7]
+; CHECK-NEXT: ccmplew {dfv=zf} $122, (%rdx) # encoding: [0x62,0xf4,0x15,0x0e,0x81,0x3a,0x7a,0x00]
+; CHECK-NEXT: jg .LBB20_1 # encoding: [0x7f,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB20_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB20_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp16mi8_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %si, %di # encoding: [0x66,0x39,0xf7]
+; NDD-NEXT: ccmplew {dfv=zf} $122, (%rdx) # encoding: [0x62,0xf4,0x15,0x0e,0x81,0x3a,0x7a,0x00]
+; NDD-NEXT: jg .LBB20_1 # encoding: [0x7f,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB20_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB20_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i16, ptr %ptr
+ %cmp = icmp sgt i16 %a, %c
+ %cmp1 = icmp slt i16 %b, 123
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32mi8_cf(i32 noundef %a, i32 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp32mi8_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %esi, %edi # encoding: [0x39,0xf7]
+; CHECK-NEXT: ccmpnel {dfv=cf} $123, (%rdx) # encoding: [0x62,0xf4,0x0c,0x05,0x81,0x3a,0x7b,0x00,0x00,0x00]
+; CHECK-NEXT: ja .LBB21_1 # encoding: [0x77,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB21_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB21_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp32mi8_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %esi, %edi # encoding: [0x39,0xf7]
+; NDD-NEXT: ccmpnel {dfv=cf} $123, (%rdx) # encoding: [0x62,0xf4,0x0c,0x05,0x81,0x3a,0x7b,0x00,0x00,0x00]
+; NDD-NEXT: ja .LBB21_1 # encoding: [0x77,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB21_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB21_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i32, ptr %ptr
+ %cmp = icmp eq i32 %a, %c
+ %cmp1 = icmp ule i32 %b, 123
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64mi8_zf(i64 noundef %a, i64 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp64mi8_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpq %rsi, %rdi # encoding: [0x48,0x39,0xf7]
+; CHECK-NEXT: ccmpleq {dfv=zf} $122, (%rdx) # encoding: [0x62,0xf4,0x94,0x0e,0x81,0x3a,0x7a,0x00,0x00,0x00]
+; CHECK-NEXT: jg .LBB22_1 # encoding: [0x7f,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB22_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB22_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp64mi8_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpq %rsi, %rdi # encoding: [0x48,0x39,0xf7]
+; NDD-NEXT: ccmpleq {dfv=zf} $122, (%rdx) # encoding: [0x62,0xf4,0x94,0x0e,0x81,0x3a,0x7a,0x00,0x00,0x00]
+; NDD-NEXT: jg .LBB22_1 # encoding: [0x7f,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB22_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB22_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i64, ptr %ptr
+ %cmp = icmp sgt i64 %a, %c
+ %cmp1 = icmp slt i64 %b, 123
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp16mi_zf(i16 noundef %a, i16 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp16mi_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %si, %di # encoding: [0x66,0x39,0xf7]
+; CHECK-NEXT: movswl (%rdx), %eax # encoding: [0x0f,0xbf,0x02]
+; CHECK-NEXT: ccmplel {dfv=zf} $1233, %eax # encoding: [0x62,0xf4,0x14,0x0e,0x81,0xf8,0xd1,0x04,0x00,0x00]
+; CHECK-NEXT: # imm = 0x4D1
+; CHECK-NEXT: jg .LBB23_1 # encoding: [0x7f,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB23_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB23_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp16mi_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %si, %di # encoding: [0x66,0x39,0xf7]
+; NDD-NEXT: movswl (%rdx), %eax # encoding: [0x0f,0xbf,0x02]
+; NDD-NEXT: ccmplel {dfv=zf} $1233, %eax # encoding: [0x62,0xf4,0x14,0x0e,0x81,0xf8,0xd1,0x04,0x00,0x00]
+; NDD-NEXT: # imm = 0x4D1
+; NDD-NEXT: jg .LBB23_1 # encoding: [0x7f,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB23_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB23_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i16, ptr %ptr
+ %cmp = icmp sgt i16 %a, %c
+ %cmp1 = icmp slt i16 %b, 1234
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32mi_cf(i32 noundef %a, i32 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp32mi_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %esi, %edi # encoding: [0x39,0xf7]
+; CHECK-NEXT: ccmpnel {dfv=cf} $123456, (%rdx) # encoding: [0x62,0xf4,0x0c,0x05,0x81,0x3a,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: # imm = 0x1E240
+; CHECK-NEXT: ja .LBB24_1 # encoding: [0x77,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB24_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB24_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp32mi_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %esi, %edi # encoding: [0x39,0xf7]
+; NDD-NEXT: ccmpnel {dfv=cf} $123456, (%rdx) # encoding: [0x62,0xf4,0x0c,0x05,0x81,0x3a,0x40,0xe2,0x01,0x00]
+; NDD-NEXT: # imm = 0x1E240
+; NDD-NEXT: ja .LBB24_1 # encoding: [0x77,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB24_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB24_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i32, ptr %ptr
+ %cmp = icmp eq i32 %a, %c
+ %cmp1 = icmp ule i32 %b, 123456
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64mi32_zf(i64 noundef %a, i64 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp64mi32_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpq %rsi, %rdi # encoding: [0x48,0x39,0xf7]
+; CHECK-NEXT: ccmpleq {dfv=zf} $123455, (%rdx) # encoding: [0x62,0xf4,0x94,0x0e,0x81,0x3a,0x3f,0xe2,0x01,0x00]
+; CHECK-NEXT: # imm = 0x1E23F
+; CHECK-NEXT: jg .LBB25_1 # encoding: [0x7f,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB25_1-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: # encoding: [0xeb,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; CHECK-NEXT: .LBB25_1: # %if.end
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ccmp64mi32_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpq %rsi, %rdi # encoding: [0x48,0x39,0xf7]
+; NDD-NEXT: ccmpleq {dfv=zf} $123455, (%rdx) # encoding: [0x62,0xf4,0x94,0x0e,0x81,0x3a,0x3f,0xe2,0x01,0x00]
+; NDD-NEXT: # imm = 0x1E23F
+; NDD-NEXT: jg .LBB25_1 # encoding: [0x7f,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB25_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: # encoding: [0xeb,A]
+; NDD-NEXT: # fixup A - offset: 1, value: foo-1, kind: FK_PCRel_1
+; NDD-NEXT: .LBB25_1: # %if.end
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i64, ptr %ptr
+ %cmp = icmp sgt i64 %a, %c
+ %cmp1 = icmp slt i64 %b, 123456
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+declare dso_local void @foo(...)
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
diff --git a/llvm/test/CodeGen/X86/apx/ctest.ll b/llvm/test/CodeGen/X86/apx/ctest.ll
new file mode 100644
index 00000000000000..41d689487e1cb7
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/ctest.ll
@@ -0,0 +1,1212 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ccmp -verify-machineinstrs -show-mc-encoding | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ccmp,+ndd -verify-machineinstrs -show-mc-encoding | FileCheck %s --check-prefix=NDD
+
+define void @ctest8rr_zf(i8 %a, i8 %b) {
+; CHECK-LABEL: ctest8rr_zf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; CHECK-NEXT: ctesteb {dfv=zf} %sil, %dil # encoding: [0x62,0xf4,0x14,0x04,0x84,0xf7]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest8rr_zf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; NDD-NEXT: je .LBB0_1 # encoding: [0x74,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB0_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %bb3
+; NDD-NEXT: retq # encoding: [0xc3]
+; NDD-NEXT: .LBB0_1: # %bb1
+; NDD-NEXT: andb %sil, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x20,0xf7]
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %cond1 = icmp eq i8 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i8 %a, %b
+ %cond2 = icmp sgt i8 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest16rr_sf(i16 %a, i16 %b) {
+; CHECK-LABEL: ctest16rr_sf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; CHECK-NEXT: ctestew {dfv=sf} %si, %di # encoding: [0x62,0xf4,0x25,0x04,0x85,0xf7]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest16rr_sf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; NDD-NEXT: jne .LBB1_2 # encoding: [0x75,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB1_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %bb1
+; NDD-NEXT: andw %si, %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x21,0xf7]
+; NDD-NEXT: .LBB1_2: # %bb3
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %cond1 = icmp ule i16 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i16 %a, %b
+ %cond2 = icmp sge i16 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest32rr_zf(i32 %a, i32 %b) {
+; CHECK-LABEL: ctest32rr_zf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: movb $1, %al # encoding: [0xb0,0x01]
+; CHECK-NEXT: testb %al, %al # encoding: [0x84,0xc0]
+; CHECK-NEXT: ctestel {dfv=zf} %esi, %edi # encoding: [0x62,0xf4,0x14,0x04,0x85,0xf7]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest32rr_zf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: movb $1, %al # encoding: [0xb0,0x01]
+; NDD-NEXT: testb %al, %al # encoding: [0x84,0xc0]
+; NDD-NEXT: jne .LBB2_2 # encoding: [0x75,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB2_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %bb1
+; NDD-NEXT: andl %esi, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x21,0xf7]
+; NDD-NEXT: .LBB2_2: # %bb3
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %cond1 = icmp ult i32 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i32 %a, %b
+ %cond2 = icmp ugt i32 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest64rr_sf(i64 %a, i64 %b) {
+; CHECK-LABEL: ctest64rr_sf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; CHECK-NEXT: ctestsq {dfv=sf} %rsi, %rdi # encoding: [0x62,0xf4,0xa4,0x08,0x85,0xf7]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest64rr_sf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; NDD-NEXT: js .LBB3_1 # encoding: [0x78,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB3_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %bb3
+; NDD-NEXT: retq # encoding: [0xc3]
+; NDD-NEXT: .LBB3_1: # %bb1
+; NDD-NEXT: andq %rsi, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x21,0xf7]
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %cond1 = icmp slt i64 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i64 %a, %b
+ %cond2 = icmp sge i64 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest8ri_zf(i8 %a) {
+; CHECK-LABEL: ctest8ri_zf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; CHECK-NEXT: ctestsb {dfv=zf} $123, %dil # encoding: [0x62,0xf4,0x14,0x08,0xf6,0xc7,0x7b]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest8ri_zf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; NDD-NEXT: ctestsb {dfv=zf} $123, %dil # encoding: [0x62,0xf4,0x14,0x08,0xf6,0xc7,0x7b]
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %cond1 = icmp slt i8 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i8 %a, 123
+ %cond2 = icmp ugt i8 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest16ri_sf(i16 %a) {
+; CHECK-LABEL: ctest16ri_sf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; CHECK-NEXT: ctestnel {dfv=sf} $1234, %edi # encoding: [0x62,0xf4,0x24,0x05,0xf7,0xc7,0xd2,0x04,0x00,0x00]
+; CHECK-NEXT: # imm = 0x4D2
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest16ri_sf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; NDD-NEXT: ctestnel {dfv=sf} $1234, %edi # encoding: [0x62,0xf4,0x24,0x05,0xf7,0xc7,0xd2,0x04,0x00,0x00]
+; NDD-NEXT: # imm = 0x4D2
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %cond1 = icmp ne i16 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i16 %a, 1234
+ %cond2 = icmp sge i16 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest32ri_zf(i32 %a) {
+; CHECK-LABEL: ctest32ri_zf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; CHECK-NEXT: ctestel {dfv=zf} $123456, %edi # encoding: [0x62,0xf4,0x14,0x04,0xf7,0xc7,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: # imm = 0x1E240
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest32ri_zf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: ctestel {dfv=zf} $123456, %edi # encoding: [0x62,0xf4,0x14,0x04,0xf7,0xc7,0x40,0xe2,0x01,0x00]
+; NDD-NEXT: # imm = 0x1E240
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %cond1 = icmp eq i32 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i32 %a, 123456
+ %cond2 = icmp sgt i32 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest64ri_sf(i64 %a) {
+; CHECK-LABEL: ctest64ri_sf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: cmpq $122, %rdi # encoding: [0x48,0x83,0xff,0x7a]
+; CHECK-NEXT: ctestlel {dfv=sf} $123456, %edi # encoding: [0x62,0xf4,0x24,0x0e,0xf7,0xc7,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: # imm = 0x1E240
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest64ri_sf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: cmpq $122, %rdi # encoding: [0x48,0x83,0xff,0x7a]
+; NDD-NEXT: ctestlel {dfv=sf} $123456, %edi # encoding: [0x62,0xf4,0x24,0x0e,0xf7,0xc7,0x40,0xe2,0x01,0x00]
+; NDD-NEXT: # imm = 0x1E240
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %cond1 = icmp slt i64 %a, 123
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i64 %a, 123456
+ %cond2 = icmp sge i64 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest8mr_zf(i8 %a, ptr %ptr) {
+; CHECK-LABEL: ctest8mr_zf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; CHECK-NEXT: ctesteb {dfv=zf} %dil, (%rsi) # encoding: [0x62,0xf4,0x14,0x04,0x84,0x3e]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest8mr_zf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; NDD-NEXT: je .LBB8_1 # encoding: [0x74,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB8_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %bb3
+; NDD-NEXT: retq # encoding: [0xc3]
+; NDD-NEXT: .LBB8_1: # %bb1
+; NDD-NEXT: andb (%rsi), %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x22,0x3e]
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %b = load i8, ptr %ptr
+ %cond1 = icmp eq i8 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i8 %a, %b
+ %cond2 = icmp ugt i8 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest16mr_sf(i16 %a, ptr %ptr) {
+; CHECK-LABEL: ctest16mr_sf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; CHECK-NEXT: ctestew {dfv=sf} %di, (%rsi) # encoding: [0x62,0xf4,0x25,0x04,0x85,0x3e]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest16mr_sf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; NDD-NEXT: je .LBB9_1 # encoding: [0x74,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB9_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %bb3
+; NDD-NEXT: retq # encoding: [0xc3]
+; NDD-NEXT: .LBB9_1: # %bb1
+; NDD-NEXT: andw (%rsi), %di, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x23,0x3e]
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %b = load i16, ptr %ptr
+ %cond1 = icmp eq i16 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i16 %a, %b
+ %cond2 = icmp sge i16 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest32mr_zf(i32 %a, ptr %ptr) {
+; CHECK-LABEL: ctest32mr_zf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; CHECK-NEXT: ctestel {dfv=zf} %edi, (%rsi) # encoding: [0x62,0xf4,0x14,0x04,0x85,0x3e]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest32mr_zf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: je .LBB10_1 # encoding: [0x74,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB10_1-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.2: # %bb3
+; NDD-NEXT: retq # encoding: [0xc3]
+; NDD-NEXT: .LBB10_1: # %bb1
+; NDD-NEXT: andl (%rsi), %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x23,0x3e]
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %b = load i32, ptr %ptr
+ %cond1 = icmp eq i32 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i32 %a, %b
+ %cond2 = icmp sgt i32 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest64mr_sf(i64 %a, ptr %ptr) {
+; CHECK-LABEL: ctest64mr_sf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; CHECK-NEXT: ctestnsq {dfv=sf} %rdi, (%rsi) # encoding: [0x62,0xf4,0xa4,0x09,0x85,0x3e]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest64mr_sf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; NDD-NEXT: js .LBB11_2 # encoding: [0x78,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB11_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %bb1
+; NDD-NEXT: andq (%rsi), %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x23,0x3e]
+; NDD-NEXT: .LBB11_2: # %bb3
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %b = load i64, ptr %ptr
+ %cond1 = icmp sge i64 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i64 %a, %b
+ %cond2 = icmp sge i64 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest8mi_zf(i8 %a, ptr %ptr) {
+; CHECK-LABEL: ctest8mi_zf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; CHECK-NEXT: ctesteb {dfv=zf} $123, (%rsi) # encoding: [0x62,0xf4,0x14,0x04,0xf6,0x06,0x7b]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest8mi_zf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; NDD-NEXT: ctesteb {dfv=zf} $123, (%rsi) # encoding: [0x62,0xf4,0x14,0x04,0xf6,0x06,0x7b]
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %b = load i8, ptr %ptr
+ %cond1 = icmp eq i8 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i8 %b, 123
+ %cond2 = icmp sgt i8 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest16mi_sf(i16 %a, ptr %ptr) {
+; CHECK-LABEL: ctest16mi_sf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: movzwl (%rsi), %eax # encoding: [0x0f,0xb7,0x06]
+; CHECK-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; CHECK-NEXT: ctestgl {dfv=sf} $1234, %eax # encoding: [0x62,0xf4,0x24,0x0f,0xf7,0xc0,0xd2,0x04,0x00,0x00]
+; CHECK-NEXT: # imm = 0x4D2
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest16mi_sf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: movzwl (%rsi), %eax # encoding: [0x0f,0xb7,0x06]
+; NDD-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; NDD-NEXT: ctestgl {dfv=sf} $1234, %eax # encoding: [0x62,0xf4,0x24,0x0f,0xf7,0xc0,0xd2,0x04,0x00,0x00]
+; NDD-NEXT: # imm = 0x4D2
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %b = load i16, ptr %ptr
+ %cond1 = icmp sgt i16 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i16 %b, 1234
+ %cond2 = icmp sge i16 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest32mi_zf(i32 %a, ptr %ptr) {
+; CHECK-LABEL: ctest32mi_zf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; CHECK-NEXT: ctestel {dfv=zf} $123456, (%rsi) # encoding: [0x62,0xf4,0x14,0x04,0xf7,0x06,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: # imm = 0x1E240
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest32mi_zf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: ctestel {dfv=zf} $123456, (%rsi) # encoding: [0x62,0xf4,0x14,0x04,0xf7,0x06,0x40,0xe2,0x01,0x00]
+; NDD-NEXT: # imm = 0x1E240
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %b = load i32, ptr %ptr
+ %cond1 = icmp eq i32 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i32 %b, 123456
+ %cond2 = icmp ugt i32 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ctest64mi_sf(i64 %a, ptr %ptr) {
+; CHECK-LABEL: ctest64mi_sf:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: movq (%rsi), %rax # encoding: [0x48,0x8b,0x06]
+; CHECK-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; CHECK-NEXT: ctestlel {dfv=sf} $123456, %eax # encoding: [0x62,0xf4,0x24,0x0e,0xf7,0xc0,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: # imm = 0x1E240
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest64mi_sf:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: movq (%rsi), %rax # encoding: [0x48,0x8b,0x06]
+; NDD-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; NDD-NEXT: ctestlel {dfv=sf} $123456, %eax # encoding: [0x62,0xf4,0x24,0x0e,0xf7,0xc0,0x40,0xe2,0x01,0x00]
+; NDD-NEXT: # imm = 0x1E240
+; NDD-NEXT: retq # encoding: [0xc3]
+bb:
+ %b = load i64, ptr %ptr
+ %cond1 = icmp sle i64 %a, 0
+ br i1 %cond1, label %bb1, label %bb3
+
+bb1: ; preds = %bb
+ %and = and i64 %b, 123456
+ %cond2 = icmp sge i64 %and, 0
+ br i1 %cond2, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define i8 @ctest8rr_zf_opt(i8 %a, i8 %b, i8* nocapture %c) {
+; CHECK-LABEL: ctest8rr_zf_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andl %edi, %esi # encoding: [0x21,0xfe]
+; CHECK-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; CHECK-NEXT: ctestneb {dfv=zf} %sil, %sil # encoding: [0x62,0xf4,0x14,0x05,0x84,0xf6]
+; CHECK-NEXT: jle .LBB16_2 # encoding: [0x7e,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB16_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rdx) # encoding: [0x40,0x88,0x3a]
+; CHECK-NEXT: .LBB16_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest8rr_zf_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andl %esi, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x21,0xf7]
+; NDD-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; NDD-NEXT: ctestneb {dfv=zf} %al, %al # encoding: [0x62,0xf4,0x14,0x05,0x84,0xc0]
+; NDD-NEXT: jle .LBB16_2 # encoding: [0x7e,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB16_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rdx) # encoding: [0x40,0x88,0x3a]
+; NDD-NEXT: .LBB16_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %tobool = icmp ne i8 %a, 0
+ %and = and i8 %a, %b
+ %cmp = icmp sgt i8 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i8 0
+}
+
+define i16 @ctest16rr_sf_opt(i16 %a, i16 %b, i16* nocapture %c) {
+; CHECK-LABEL: ctest16rr_sf_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andl %edi, %esi # encoding: [0x21,0xfe]
+; CHECK-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; CHECK-NEXT: ctestsw {dfv=sf} %si, %si # encoding: [0x62,0xf4,0x25,0x08,0x85,0xf6]
+; CHECK-NEXT: js .LBB17_2 # encoding: [0x78,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB17_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movw %di, (%rdx) # encoding: [0x66,0x89,0x3a]
+; CHECK-NEXT: .LBB17_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest16rr_sf_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andl %esi, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x21,0xf7]
+; NDD-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; NDD-NEXT: ctestsw {dfv=sf} %ax, %ax # encoding: [0x62,0xf4,0x25,0x08,0x85,0xc0]
+; NDD-NEXT: js .LBB17_2 # encoding: [0x78,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB17_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movw %di, (%rdx) # encoding: [0x66,0x89,0x3a]
+; NDD-NEXT: .LBB17_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %tobool = icmp slt i16 %a, 0
+ %and = and i16 %a, %b
+ %cmp = icmp sge i16 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i16 %a, i16* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i16 0
+}
+
+define i32 @ctest32rr_zf_opt(i32 %a, i32 %b, i32* nocapture %c) {
+; CHECK-LABEL: ctest32rr_zf_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andl %edi, %esi # encoding: [0x21,0xfe]
+; CHECK-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; CHECK-NEXT: ctestnel {dfv=zf} %esi, %esi # encoding: [0x62,0xf4,0x14,0x05,0x85,0xf6]
+; CHECK-NEXT: je .LBB18_2 # encoding: [0x74,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB18_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movl %edi, (%rdx) # encoding: [0x89,0x3a]
+; CHECK-NEXT: .LBB18_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest32rr_zf_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andl %esi, %edi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x21,0xf7]
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: ctestnel {dfv=zf} %eax, %eax # encoding: [0x62,0xf4,0x14,0x05,0x85,0xc0]
+; NDD-NEXT: je .LBB18_2 # encoding: [0x74,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB18_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movl %edi, (%rdx) # encoding: [0x89,0x3a]
+; NDD-NEXT: .LBB18_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %tobool = icmp ne i32 %a, 0
+ %and = and i32 %a, %b
+ %cmp = icmp ugt i32 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i32 %a, i32* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i32 0
+}
+
+define i64 @ctest64rr_sf_opt(i64 %a, i64 %b, i64* nocapture %c) {
+; CHECK-LABEL: ctest64rr_sf_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andq %rdi, %rsi # encoding: [0x48,0x21,0xfe]
+; CHECK-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; CHECK-NEXT: ctestneq {dfv=sf} %rsi, %rsi # encoding: [0x62,0xf4,0xa4,0x05,0x85,0xf6]
+; CHECK-NEXT: js .LBB19_2 # encoding: [0x78,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB19_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movq %rdi, (%rdx) # encoding: [0x48,0x89,0x3a]
+; CHECK-NEXT: .LBB19_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest64rr_sf_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andq %rsi, %rdi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x21,0xf7]
+; NDD-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; NDD-NEXT: ctestneq {dfv=sf} %rax, %rax # encoding: [0x62,0xf4,0xa4,0x05,0x85,0xc0]
+; NDD-NEXT: js .LBB19_2 # encoding: [0x78,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB19_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movq %rdi, (%rdx) # encoding: [0x48,0x89,0x3a]
+; NDD-NEXT: .LBB19_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %tobool = icmp ne i64 %a, 0
+ %and = and i64 %a, %b
+ %cmp = icmp sge i64 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i64 %a, i64* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i64 0
+}
+
+define i8 @ctest8mr_opt(i8 %a, i8 %b, i8* nocapture %c, i8* %ptr) {
+; CHECK-LABEL: ctest8mr_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andb (%rcx), %sil # encoding: [0x40,0x22,0x31]
+; CHECK-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; CHECK-NEXT: ctestneb {dfv=} %sil, %sil # encoding: [0x62,0xf4,0x04,0x05,0x84,0xf6]
+; CHECK-NEXT: jns .LBB20_2 # encoding: [0x79,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB20_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rdx) # encoding: [0x40,0x88,0x3a]
+; CHECK-NEXT: .LBB20_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest8mr_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andb (%rcx), %sil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x22,0x31]
+; NDD-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; NDD-NEXT: ctestneb {dfv=} %al, %al # encoding: [0x62,0xf4,0x04,0x05,0x84,0xc0]
+; NDD-NEXT: jns .LBB20_2 # encoding: [0x79,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB20_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rdx) # encoding: [0x40,0x88,0x3a]
+; NDD-NEXT: .LBB20_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %d = load i8, i8* %ptr
+ %tobool = icmp ne i8 %a, 0
+ %and = and i8 %b, %d
+ %cmp = icmp slt i8 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i8 0
+}
+
+define i16 @ctest16mr_opt(i16 %a, i16 %b, i16* nocapture %c, i16* %ptr) {
+; CHECK-LABEL: ctest16mr_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andw (%rcx), %si # encoding: [0x66,0x23,0x31]
+; CHECK-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; CHECK-NEXT: ctestnew {dfv=} %si, %si # encoding: [0x62,0xf4,0x05,0x05,0x85,0xf6]
+; CHECK-NEXT: jns .LBB21_2 # encoding: [0x79,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB21_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movw %di, (%rdx) # encoding: [0x66,0x89,0x3a]
+; CHECK-NEXT: .LBB21_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest16mr_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andw (%rcx), %si, %ax # encoding: [0x62,0xf4,0x7d,0x18,0x23,0x31]
+; NDD-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; NDD-NEXT: ctestnew {dfv=} %ax, %ax # encoding: [0x62,0xf4,0x05,0x05,0x85,0xc0]
+; NDD-NEXT: jns .LBB21_2 # encoding: [0x79,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB21_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movw %di, (%rdx) # encoding: [0x66,0x89,0x3a]
+; NDD-NEXT: .LBB21_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %d = load i16, i16* %ptr
+ %tobool = icmp ne i16 %a, 0
+ %and = and i16 %b, %d
+ %cmp = icmp slt i16 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i16 %a, i16* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i16 0
+}
+
+define i32 @ctest32mr_opt(i32 %a, i32 %b, i32* nocapture %c, i32* %ptr) {
+; CHECK-LABEL: ctest32mr_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andl (%rcx), %esi # encoding: [0x23,0x31]
+; CHECK-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; CHECK-NEXT: ctestnel {dfv=} %esi, %esi # encoding: [0x62,0xf4,0x04,0x05,0x85,0xf6]
+; CHECK-NEXT: jns .LBB22_2 # encoding: [0x79,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB22_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movl %edi, (%rdx) # encoding: [0x89,0x3a]
+; CHECK-NEXT: .LBB22_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest32mr_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andl (%rcx), %esi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x23,0x31]
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: ctestnel {dfv=} %eax, %eax # encoding: [0x62,0xf4,0x04,0x05,0x85,0xc0]
+; NDD-NEXT: jns .LBB22_2 # encoding: [0x79,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB22_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movl %edi, (%rdx) # encoding: [0x89,0x3a]
+; NDD-NEXT: .LBB22_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %d = load i32, i32* %ptr
+ %tobool = icmp ne i32 %a, 0
+ %and = and i32 %b, %d
+ %cmp = icmp slt i32 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i32 %a, i32* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i32 0
+}
+
+define i64 @ctest64mr_opt(i64 %a, i64 %b, i64* nocapture %c, i64* %ptr) {
+; CHECK-LABEL: ctest64mr_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andq (%rcx), %rsi # encoding: [0x48,0x23,0x31]
+; CHECK-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; CHECK-NEXT: ctestneq {dfv=} %rsi, %rsi # encoding: [0x62,0xf4,0x84,0x05,0x85,0xf6]
+; CHECK-NEXT: jns .LBB23_2 # encoding: [0x79,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB23_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movq %rdi, (%rdx) # encoding: [0x48,0x89,0x3a]
+; CHECK-NEXT: .LBB23_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest64mr_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andq (%rcx), %rsi, %rax # encoding: [0x62,0xf4,0xfc,0x18,0x23,0x31]
+; NDD-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; NDD-NEXT: ctestneq {dfv=} %rax, %rax # encoding: [0x62,0xf4,0x84,0x05,0x85,0xc0]
+; NDD-NEXT: jns .LBB23_2 # encoding: [0x79,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB23_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movq %rdi, (%rdx) # encoding: [0x48,0x89,0x3a]
+; NDD-NEXT: .LBB23_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %d = load i64, i64* %ptr
+ %tobool = icmp ne i64 %a, 0
+ %and = and i64 %b, %d
+ %cmp = icmp slt i64 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i64 %a, i64* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i64 0
+}
+
+define i8 @ctest8ri_opt(i8 %a, i8 %b, i8* nocapture %c) {
+; CHECK-LABEL: ctest8ri_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andb $123, %sil # encoding: [0x40,0x80,0xe6,0x7b]
+; CHECK-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; CHECK-NEXT: ctestneb {dfv=} %sil, %sil # encoding: [0x62,0xf4,0x04,0x05,0x84,0xf6]
+; CHECK-NEXT: jns .LBB24_2 # encoding: [0x79,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB24_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rdx) # encoding: [0x40,0x88,0x3a]
+; CHECK-NEXT: .LBB24_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest8ri_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andb $123, %sil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x80,0xe6,0x7b]
+; NDD-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; NDD-NEXT: ctestneb {dfv=} %al, %al # encoding: [0x62,0xf4,0x04,0x05,0x84,0xc0]
+; NDD-NEXT: jns .LBB24_2 # encoding: [0x79,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB24_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rdx) # encoding: [0x40,0x88,0x3a]
+; NDD-NEXT: .LBB24_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %tobool = icmp ne i8 %a, 0
+ %and = and i8 %b, 123
+ %cmp = icmp slt i8 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i8 0
+}
+
+define i16 @ctest16ri_opt(i16 %a, i16 %b, i16* nocapture %c) {
+; CHECK-LABEL: ctest16ri_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andl $1234, %esi # encoding: [0x81,0xe6,0xd2,0x04,0x00,0x00]
+; CHECK-NEXT: # imm = 0x4D2
+; CHECK-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; CHECK-NEXT: ctestnew {dfv=} %si, %si # encoding: [0x62,0xf4,0x05,0x05,0x85,0xf6]
+; CHECK-NEXT: jns .LBB25_2 # encoding: [0x79,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB25_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movw %di, (%rdx) # encoding: [0x66,0x89,0x3a]
+; CHECK-NEXT: .LBB25_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest16ri_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andl $1234, %esi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x81,0xe6,0xd2,0x04,0x00,0x00]
+; NDD-NEXT: # imm = 0x4D2
+; NDD-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; NDD-NEXT: ctestnew {dfv=} %ax, %ax # encoding: [0x62,0xf4,0x05,0x05,0x85,0xc0]
+; NDD-NEXT: jns .LBB25_2 # encoding: [0x79,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB25_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movw %di, (%rdx) # encoding: [0x66,0x89,0x3a]
+; NDD-NEXT: .LBB25_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %tobool = icmp ne i16 %a, 0
+ %and = and i16 %b, 1234
+ %cmp = icmp slt i16 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i16 %a, i16* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i16 0
+}
+
+define i32 @ctest32ri_opt(i32 %a, i32 %b, i32* nocapture %c) {
+; CHECK-LABEL: ctest32ri_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andl $123456, %esi # encoding: [0x81,0xe6,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: # imm = 0x1E240
+; CHECK-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; CHECK-NEXT: ctestnel {dfv=} %esi, %esi # encoding: [0x62,0xf4,0x04,0x05,0x85,0xf6]
+; CHECK-NEXT: jns .LBB26_2 # encoding: [0x79,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB26_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movl %edi, (%rdx) # encoding: [0x89,0x3a]
+; CHECK-NEXT: .LBB26_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest32ri_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andl $123456, %esi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x81,0xe6,0x40,0xe2,0x01,0x00]
+; NDD-NEXT: # imm = 0x1E240
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: ctestnel {dfv=} %eax, %eax # encoding: [0x62,0xf4,0x04,0x05,0x85,0xc0]
+; NDD-NEXT: jns .LBB26_2 # encoding: [0x79,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB26_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movl %edi, (%rdx) # encoding: [0x89,0x3a]
+; NDD-NEXT: .LBB26_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %tobool = icmp ne i32 %a, 0
+ %and = and i32 %b, 123456
+ %cmp = icmp slt i32 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i32 %a, i32* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i32 0
+}
+
+define i64 @ctest64ri_opt(i64 %a, i64 %b, i64* nocapture %c) {
+; CHECK-LABEL: ctest64ri_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andl $123456, %esi # encoding: [0x81,0xe6,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: # imm = 0x1E240
+; CHECK-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; CHECK-NEXT: ctestneq {dfv=} %rsi, %rsi # encoding: [0x62,0xf4,0x84,0x05,0x85,0xf6]
+; CHECK-NEXT: jns .LBB27_2 # encoding: [0x79,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB27_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movq %rdi, (%rdx) # encoding: [0x48,0x89,0x3a]
+; CHECK-NEXT: .LBB27_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest64ri_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andl $123456, %esi, %eax # encoding: [0x62,0xf4,0x7c,0x18,0x81,0xe6,0x40,0xe2,0x01,0x00]
+; NDD-NEXT: # imm = 0x1E240
+; NDD-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; NDD-NEXT: ctestneq {dfv=} %rax, %rax # encoding: [0x62,0xf4,0x84,0x05,0x85,0xc0]
+; NDD-NEXT: jns .LBB27_2 # encoding: [0x79,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB27_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movq %rdi, (%rdx) # encoding: [0x48,0x89,0x3a]
+; NDD-NEXT: .LBB27_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %tobool = icmp ne i64 %a, 0
+ %and = and i64 %b, 123456
+ %cmp = icmp slt i64 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i64 %a, i64* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i64 0
+}
+
+define i8 @ctest8mi_opt(i8 %a, i8* %ptr, i8* nocapture %c) {
+; CHECK-LABEL: ctest8mi_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movzbl (%rsi), %eax # encoding: [0x0f,0xb6,0x06]
+; CHECK-NEXT: andb $123, %al # encoding: [0x24,0x7b]
+; CHECK-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; CHECK-NEXT: ctestneb {dfv=} %al, %al # encoding: [0x62,0xf4,0x04,0x05,0x84,0xc0]
+; CHECK-NEXT: jns .LBB28_2 # encoding: [0x79,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB28_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rdx) # encoding: [0x40,0x88,0x3a]
+; CHECK-NEXT: .LBB28_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest8mi_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andb $123, (%rsi), %al # encoding: [0x62,0xf4,0x7c,0x18,0x80,0x26,0x7b]
+; NDD-NEXT: testb %dil, %dil # encoding: [0x40,0x84,0xff]
+; NDD-NEXT: ctestneb {dfv=} %al, %al # encoding: [0x62,0xf4,0x04,0x05,0x84,0xc0]
+; NDD-NEXT: jns .LBB28_2 # encoding: [0x79,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB28_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rdx) # encoding: [0x40,0x88,0x3a]
+; NDD-NEXT: .LBB28_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i8, i8* %ptr
+ %tobool = icmp ne i8 %a, 0
+ %and = and i8 %b, 123
+ %cmp = icmp slt i8 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i8 0
+}
+
+define i16 @ctest16mi_opt(i16 %a, i16* %ptr, i16* nocapture %c) {
+; CHECK-LABEL: ctest16mi_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movzwl (%rsi), %eax # encoding: [0x0f,0xb7,0x06]
+; CHECK-NEXT: andl $1234, %eax # encoding: [0x25,0xd2,0x04,0x00,0x00]
+; CHECK-NEXT: # imm = 0x4D2
+; CHECK-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; CHECK-NEXT: ctestnew {dfv=} %ax, %ax # encoding: [0x62,0xf4,0x05,0x05,0x85,0xc0]
+; CHECK-NEXT: jns .LBB29_2 # encoding: [0x79,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB29_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movw %di, (%rdx) # encoding: [0x66,0x89,0x3a]
+; CHECK-NEXT: .LBB29_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest16mi_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movzwl (%rsi), %eax # encoding: [0x0f,0xb7,0x06]
+; NDD-NEXT: andl $1234, %eax # EVEX TO LEGACY Compression encoding: [0x25,0xd2,0x04,0x00,0x00]
+; NDD-NEXT: # imm = 0x4D2
+; NDD-NEXT: testw %di, %di # encoding: [0x66,0x85,0xff]
+; NDD-NEXT: ctestnew {dfv=} %ax, %ax # encoding: [0x62,0xf4,0x05,0x05,0x85,0xc0]
+; NDD-NEXT: jns .LBB29_2 # encoding: [0x79,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB29_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movw %di, (%rdx) # encoding: [0x66,0x89,0x3a]
+; NDD-NEXT: .LBB29_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i16, i16* %ptr
+ %tobool = icmp ne i16 %a, 0
+ %and = and i16 %b, 1234
+ %cmp = icmp slt i16 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i16 %a, i16* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i16 0
+}
+
+define i32 @ctest32mi_opt(i32 %a, i32* %ptr, i32* nocapture %c) {
+; CHECK-LABEL: ctest32mi_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movl $123456, %eax # encoding: [0xb8,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: # imm = 0x1E240
+; CHECK-NEXT: andl (%rsi), %eax # encoding: [0x23,0x06]
+; CHECK-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; CHECK-NEXT: ctestnel {dfv=} %eax, %eax # encoding: [0x62,0xf4,0x04,0x05,0x85,0xc0]
+; CHECK-NEXT: jns .LBB30_2 # encoding: [0x79,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB30_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movl %edi, (%rdx) # encoding: [0x89,0x3a]
+; CHECK-NEXT: .LBB30_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest32mi_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andl $123456, (%rsi), %eax # encoding: [0x62,0xf4,0x7c,0x18,0x81,0x26,0x40,0xe2,0x01,0x00]
+; NDD-NEXT: # imm = 0x1E240
+; NDD-NEXT: testl %edi, %edi # encoding: [0x85,0xff]
+; NDD-NEXT: ctestnel {dfv=} %eax, %eax # encoding: [0x62,0xf4,0x04,0x05,0x85,0xc0]
+; NDD-NEXT: jns .LBB30_2 # encoding: [0x79,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB30_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movl %edi, (%rdx) # encoding: [0x89,0x3a]
+; NDD-NEXT: .LBB30_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i32, i32* %ptr
+ %tobool = icmp ne i32 %a, 0
+ %and = and i32 %b, 123456
+ %cmp = icmp slt i32 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i32 %a, i32* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i32 0
+}
+
+define i64 @ctest64mi_opt(i64 %a, i64* %ptr, i64* nocapture %c) {
+; CHECK-LABEL: ctest64mi_opt:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq (%rsi), %rax # encoding: [0x48,0x8b,0x06]
+; CHECK-NEXT: andl $123456, %eax # encoding: [0x25,0x40,0xe2,0x01,0x00]
+; CHECK-NEXT: # imm = 0x1E240
+; CHECK-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; CHECK-NEXT: ctestneq {dfv=} %rax, %rax # encoding: [0x62,0xf4,0x84,0x05,0x85,0xc0]
+; CHECK-NEXT: jns .LBB31_2 # encoding: [0x79,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: .LBB31_2-1, kind: FK_PCRel_1
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movq %rdi, (%rdx) # encoding: [0x48,0x89,0x3a]
+; CHECK-NEXT: .LBB31_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; CHECK-NEXT: retq # encoding: [0xc3]
+;
+; NDD-LABEL: ctest64mi_opt:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movq (%rsi), %rax # encoding: [0x48,0x8b,0x06]
+; NDD-NEXT: andl $123456, %eax # EVEX TO LEGACY Compression encoding: [0x25,0x40,0xe2,0x01,0x00]
+; NDD-NEXT: # imm = 0x1E240
+; NDD-NEXT: testq %rdi, %rdi # encoding: [0x48,0x85,0xff]
+; NDD-NEXT: ctestneq {dfv=} %rax, %rax # encoding: [0x62,0xf4,0x84,0x05,0x85,0xc0]
+; NDD-NEXT: jns .LBB31_2 # encoding: [0x79,A]
+; NDD-NEXT: # fixup A - offset: 1, value: .LBB31_2-1, kind: FK_PCRel_1
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movq %rdi, (%rdx) # encoding: [0x48,0x89,0x3a]
+; NDD-NEXT: .LBB31_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0]
+; NDD-NEXT: retq # encoding: [0xc3]
+entry:
+ %b = load i64, i64* %ptr
+ %tobool = icmp ne i64 %a, 0
+ %and = and i64 %b, 123456
+ %cmp = icmp slt i64 %and, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i64 %a, i64* %c, align 4
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret i64 0
+}
diff --git a/llvm/test/CodeGen/X86/opt-pipeline.ll b/llvm/test/CodeGen/X86/opt-pipeline.ll
index 43589dc993dabb..a12779115db5be 100644
--- a/llvm/test/CodeGen/X86/opt-pipeline.ll
+++ b/llvm/test/CodeGen/X86/opt-pipeline.ll
@@ -101,6 +101,7 @@
; CHECK-NEXT: MachineDominator Tree Construction
; CHECK-NEXT: Machine Natural Loop Construction
; CHECK-NEXT: Machine Trace Metrics
+; CHECK-NEXT: X86 Conditional Compares
; CHECK-NEXT: Early If-Conversion
; CHECK-NEXT: Lazy Machine Block Frequency Analysis
; CHECK-NEXT: Machine InstCombiner
More information about the llvm-commits
mailing list