[llvm] 331eb8a - [X86][CodeGen] Support lowering for CCMP/CTEST (#91747)
via llvm-commits
llvm-commits at lists.llvm.org
Sun May 26 03:32:27 PDT 2024
Author: Shengchen Kan
Date: 2024-05-26T18:32:23+08:00
New Revision: 331eb8a0047504f3ae2cdf2d6c60b93e5d0543f1
URL: https://github.com/llvm/llvm-project/commit/331eb8a0047504f3ae2cdf2d6c60b93e5d0543f1
DIFF: https://github.com/llvm/llvm-project/commit/331eb8a0047504f3ae2cdf2d6c60b93e5d0543f1.diff
LOG: [X86][CodeGen] Support lowering for CCMP/CTEST (#91747)
DAG combine for `CCMP` and `CTESTrr`:
```
and/or(setcc(cc0, flag0), setcc(cc1, sub (X, Y)))
->
setcc(cc1, ccmp(X, Y, ~cflags/cflags, cc0/~cc0, flag0))
and/or(setcc(cc0, flag0), setcc(cc1, cmp (X, 0)))
->
setcc(cc1, ctest(X, X, ~cflags/cflags, cc0/~cc0, flag0))
```
where `cflags` is determined by `cc1`.
Generic DAG combine:
```
cmp(setcc(cc, X), 0)
brcond ne
->
X
brcond cc
sub(setcc(cc, X), 1)
brcond ne
->
X
brcond ~cc
```
Post DAG transform: `ANDrr/rm + CTESTrr -> CTESTrr/CTESTmr`
Pattern match for `CTESTri`:
```
X= and A, B
ctest(X, X, cflags, cc0/, flag0)
->
ctest(A, B, cflags, cc0/, flag0)
```
`CTESTmi` is already handled by the memory folding mechanism in MIR.
Added:
llvm/test/CodeGen/X86/apx/ccmp.ll
llvm/test/CodeGen/X86/apx/ctest.ll
Modified:
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/lib/Target/X86/X86ISelLowering.h
llvm/lib/Target/X86/X86InstrConditionalCompare.td
llvm/lib/Target/X86/X86InstrFragments.td
llvm/lib/Target/X86/X86InstrInfo.cpp
llvm/lib/Target/X86/X86InstrInfo.h
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 93d866384b482..c7aeb0633e4ba 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -1801,11 +1801,8 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
if (N->getNumValues() == RV->getNumValues())
DAG.ReplaceAllUsesWith(N, RV.getNode());
- else {
- assert(N->getValueType(0) == RV.getValueType() &&
- N->getNumValues() == 1 && "Type mismatch");
+ else
DAG.ReplaceAllUsesWith(N, &RV);
- }
// Push the new node and any users onto the worklist. Omit this if the
// new node is the EntryToken (e.g. if a store managed to get optimized
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 7f76324fa5705..3227bf75a43fb 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -1553,11 +1553,16 @@ void X86DAGToDAGISel::PostprocessISelDAG() {
switch (Opc) {
default:
continue;
- // TESTrr+ANDrr/rm -> TESTrr/TESTmr
+ // ANDrr/rm + TESTrr+ -> TESTrr/TESTmr
case X86::TEST8rr:
case X86::TEST16rr:
case X86::TEST32rr:
- case X86::TEST64rr: {
+ case X86::TEST64rr:
+ // ANDrr/rm + CTESTrr -> CTESTrr/CTESTmr
+ case X86::CTEST8rr:
+ case X86::CTEST16rr:
+ case X86::CTEST32rr:
+ case X86::CTEST64rr: {
auto &Op0 = N->getOperand(0);
if (Op0 != N->getOperand(1) || !Op0->hasNUsesOfValue(2, Op0.getResNo()) ||
!Op0.isMachineOpcode())
@@ -1575,8 +1580,11 @@ void X86DAGToDAGISel::PostprocessISelDAG() {
CASE_ND(AND64rr) {
if (And->hasAnyUseOfValue(1))
continue;
- MachineSDNode *Test = CurDAG->getMachineNode(
- Opc, SDLoc(N), MVT::i32, And.getOperand(0), And.getOperand(1));
+ SmallVector<SDValue> Ops(N->op_values());
+ Ops[0] = And.getOperand(0);
+ Ops[1] = And.getOperand(1);
+ MachineSDNode *Test =
+ CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i32, Ops);
ReplaceUses(N, Test);
MadeChange = true;
continue;
@@ -1588,8 +1596,9 @@ void X86DAGToDAGISel::PostprocessISelDAG() {
if (And->hasAnyUseOfValue(1))
continue;
unsigned NewOpc;
+ bool IsCTESTCC = X86::isCTESTCC(Opc);
#define FROM_TO(A, B) \
- CASE_ND(A) NewOpc = X86::B; \
+ CASE_ND(A) NewOpc = IsCTESTCC ? X86::C##B : X86::B; \
break;
switch (And.getMachineOpcode()) {
FROM_TO(AND8rm, TEST8mr);
@@ -1600,10 +1609,20 @@ void X86DAGToDAGISel::PostprocessISelDAG() {
#undef FROM_TO
#undef CASE_ND
// Need to swap the memory and register operand.
- SDValue Ops[] = {And.getOperand(1), And.getOperand(2),
- And.getOperand(3), And.getOperand(4),
- And.getOperand(5), And.getOperand(0),
- And.getOperand(6) /* Chain */};
+ SmallVector<SDValue> Ops = {And.getOperand(1), And.getOperand(2),
+ And.getOperand(3), And.getOperand(4),
+ And.getOperand(5), And.getOperand(0)};
+ // CC, Cflags.
+ if (IsCTESTCC) {
+ Ops.push_back(N->getOperand(2));
+ Ops.push_back(N->getOperand(3));
+ }
+ // Chain of memory load
+ Ops.push_back(And.getOperand(6));
+ // Glue
+ if (IsCTESTCC)
+ Ops.push_back(N->getOperand(4));
+
MachineSDNode *Test = CurDAG->getMachineNode(
NewOpc, SDLoc(N), MVT::i32, MVT::Other, Ops);
CurDAG->setNodeMemRefs(
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index ca32cfe542330..7d90296a3eea6 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -88,6 +88,12 @@ static cl::opt<int> BrMergingBaseCostThresh(
"to never merge branches."),
cl::Hidden);
+static cl::opt<int> BrMergingCcmpBias(
+ "x86-br-merging-ccmp-bias", cl::init(6),
+ cl::desc("Increases 'x86-br-merging-base-cost' in cases that the target "
+ "supports conditional compare instructions."),
+ cl::Hidden);
+
static cl::opt<int> BrMergingLikelyBias(
"x86-br-merging-likely-bias", cl::init(0),
cl::desc("Increases 'x86-br-merging-base-cost' in cases that it is likely "
@@ -3403,6 +3409,9 @@ X86TargetLowering::getJumpConditionMergingParams(Instruction::BinaryOps Opc,
const Value *Rhs) const {
using namespace llvm::PatternMatch;
int BaseCost = BrMergingBaseCostThresh.getValue();
+ // With CCMP, branches can be merged in a more efficient way.
+ if (BaseCost >= 0 && Subtarget.hasCCMP())
+ BaseCost += BrMergingCcmpBias;
// a == b && a == c is a fast pattern on x86.
ICmpInst::Predicate Pred;
if (BaseCost >= 0 && Opc == Instruction::And &&
@@ -33937,6 +33946,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(TESTUI)
NODE_NAME_CASE(FP80_ADD)
NODE_NAME_CASE(STRICT_FP80_ADD)
+ NODE_NAME_CASE(CCMP)
+ NODE_NAME_CASE(CTEST)
}
return nullptr;
#undef NODE_NAME_CASE
@@ -49208,6 +49219,147 @@ static SDValue combineBMILogicOp(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+static SDValue combineX86SubCmpForFlags(SDNode *N, SDValue Flag,
+ SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &ST) {
+ // cmp(setcc(cc, X), 0)
+ // brcond ne
+ // ->
+ // X
+ // brcond cc
+
+ // sub(setcc(cc, X), 1)
+ // brcond ne
+ // ->
+ // X
+ // brcond ~cc
+ //
+ // if only flag has users
+
+ SDValue SetCC = N->getOperand(0);
+
+ // TODO: Remove the check hasCCMP() and update the non-APX tests.
+ if (!ST.hasCCMP() || SetCC.getOpcode() != X86ISD::SETCC || !Flag.hasOneUse())
+ return SDValue();
+
+ // Check the only user of flag is `brcond ne`.
+ SDNode *BrCond = *Flag->uses().begin();
+ if (BrCond->getOpcode() != X86ISD::BRCOND)
+ return SDValue();
+ unsigned CondNo = 2;
+ if (static_cast<X86::CondCode>(BrCond->getConstantOperandVal(CondNo)) !=
+ X86::COND_NE)
+ return SDValue();
+
+ SDValue X = SetCC.getOperand(1);
+ // Replace API is called manually here b/c the number of results may change.
+ DAG.ReplaceAllUsesOfValueWith(Flag, X);
+
+ SDValue CCN = SetCC.getOperand(0);
+ X86::CondCode CC =
+ static_cast<X86::CondCode>(CCN->getAsAPIntVal().getSExtValue());
+ X86::CondCode OppositeCC = X86::GetOppositeBranchCondition(CC);
+ // Update CC for the consumer of the flag.
+ // The old CC is `ne`. Hence, when comparing the result with 0, we are
+ // checking if the second condition evaluates to true. When comparing the
+ // result with 1, we are checking uf the second condition evaluates to false.
+ SmallVector<SDValue> Ops(BrCond->op_values());
+ if (isNullConstant(N->getOperand(1)))
+ Ops[CondNo] = CCN;
+ else if (isOneConstant(N->getOperand(1)))
+ Ops[CondNo] = DAG.getTargetConstant(OppositeCC, SDLoc(BrCond), MVT::i8);
+ else
+ llvm_unreachable("expect constant 0 or 1");
+
+ SDValue NewBrCond =
+ DAG.getNode(X86ISD::BRCOND, SDLoc(BrCond), BrCond->getValueType(0), Ops);
+ // Avoid self-assign error b/c CC1 can be `e/ne`.
+ if (BrCond != NewBrCond.getNode())
+ DCI.CombineTo(BrCond, NewBrCond);
+ return X;
+}
+
+static SDValue combineAndOrForCcmpCtest(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &ST) {
+ // and/or(setcc(cc0, flag0), setcc(cc1, sub (X, Y)))
+ // ->
+ // setcc(cc1, ccmp(X, Y, ~cflags/cflags, cc0/~cc0, flag0))
+
+ // and/or(setcc(cc0, flag0), setcc(cc1, cmp (X, 0)))
+ // ->
+ // setcc(cc1, ctest(X, X, ~cflags/cflags, cc0/~cc0, flag0))
+ //
+ // where cflags is determined by cc1.
+
+ if (!ST.hasCCMP())
+ return SDValue();
+
+ SDValue SetCC0 = N->getOperand(0);
+ SDValue SetCC1 = N->getOperand(1);
+ if (SetCC0.getOpcode() != X86ISD::SETCC ||
+ SetCC1.getOpcode() != X86ISD::SETCC)
+ return SDValue();
+
+ auto GetCombineToOpc = [&](SDValue V) -> unsigned {
+ SDValue Op = V.getOperand(1);
+ unsigned Opc = Op.getOpcode();
+ if (Opc == X86ISD::SUB)
+ return X86ISD::CCMP;
+ if (Opc == X86ISD::CMP && isNullConstant(Op.getOperand(1)))
+ return X86ISD::CTEST;
+ return 0U;
+ };
+
+ unsigned NewOpc = 0;
+
+ // AND/OR is commutable. Canonicalize the operands to make SETCC with SUB/CMP
+ // appear on the right.
+ if (!(NewOpc = GetCombineToOpc(SetCC1))) {
+ std::swap(SetCC0, SetCC1);
+ if (!(NewOpc = GetCombineToOpc(SetCC1)))
+ return SDValue();
+ }
+
+ X86::CondCode CC0 =
+ static_cast<X86::CondCode>(SetCC0.getConstantOperandVal(0));
+ // CCMP/CTEST is not conditional when the source condition is COND_P/COND_NP.
+ if (CC0 == X86::COND_P || CC0 == X86::COND_NP)
+ return SDValue();
+
+ bool IsOR = N->getOpcode() == ISD::OR;
+
+ // CMP/TEST is executed and updates the EFLAGS normally only when SrcCC
+ // evaluates to true. So we need to inverse CC0 as SrcCC when the logic
+ // operator is OR. Similar for CC1.
+ SDValue SrcCC =
+ IsOR ? DAG.getTargetConstant(X86::GetOppositeBranchCondition(CC0),
+ SDLoc(SetCC0.getOperand(0)), MVT::i8)
+ : SetCC0.getOperand(0);
+ SDValue CC1N = SetCC1.getOperand(0);
+ X86::CondCode CC1 =
+ static_cast<X86::CondCode>(CC1N->getAsAPIntVal().getSExtValue());
+ X86::CondCode OppositeCC1 = X86::GetOppositeBranchCondition(CC1);
+ X86::CondCode CFlagsCC = IsOR ? CC1 : OppositeCC1;
+ SDLoc DL(N);
+ SDValue CFlags = DAG.getTargetConstant(
+ X86::getCCMPCondFlagsFromCondCode(CFlagsCC), DL, MVT::i8);
+ SDValue Sub = SetCC1.getOperand(1);
+
+ // Replace any uses of the old flag produced by SUB/CMP with the new one
+ // produced by CCMP/CTEST.
+ SDValue CCMP = (NewOpc == X86ISD::CCMP)
+ ? DAG.getNode(X86ISD::CCMP, DL, MVT::i32,
+ {Sub.getOperand(0), Sub.getOperand(1),
+ CFlags, SrcCC, SetCC0.getOperand(1)})
+ : DAG.getNode(X86ISD::CTEST, DL, MVT::i32,
+ {Sub.getOperand(0), Sub.getOperand(0),
+ CFlags, SrcCC, SetCC0.getOperand(1)});
+
+ return DAG.getNode(X86ISD::SETCC, DL, MVT::i8, {CC1N, CCMP});
+}
+
static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
@@ -49291,6 +49443,9 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
}
}
+ if (SDValue SetCC = combineAndOrForCcmpCtest(N, DAG, DCI, Subtarget))
+ return SetCC;
+
if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
return V;
@@ -50076,6 +50231,9 @@ static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
}
}
+ if (SDValue SetCC = combineAndOrForCcmpCtest(N, DAG, DCI, Subtarget))
+ return SetCC;
+
if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
return R;
@@ -54597,6 +54755,7 @@ static bool onlyZeroFlagUsed(SDValue Flags) {
}
static SDValue combineCMP(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
// Only handle test patterns.
if (!isNullConstant(N->getOperand(1)))
@@ -54611,6 +54770,10 @@ static SDValue combineCMP(SDNode *N, SelectionDAG &DAG,
EVT VT = Op.getValueType();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (SDValue CMP =
+ combineX86SubCmpForFlags(N, SDValue(N, 0), DAG, DCI, Subtarget))
+ return CMP;
+
// If we have a constant logical shift that's only used in a comparison
// against zero turn it into an equivalent AND. This allows turning it into
// a TEST instruction later.
@@ -54739,7 +54902,8 @@ static SDValue combineCMP(SDNode *N, SelectionDAG &DAG,
}
static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI) {
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &ST) {
assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
"Expected X86ISD::ADD or X86ISD::SUB");
@@ -54750,6 +54914,10 @@ static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
bool IsSub = X86ISD::SUB == N->getOpcode();
unsigned GenericOpc = IsSub ? ISD::SUB : ISD::ADD;
+ if (IsSub && isOneConstant(N->getOperand(1)) && !N->hasAnyUseOfValue(0))
+ if (SDValue CMP = combineX86SubCmpForFlags(N, SDValue(N, 1), DAG, DCI, ST))
+ return CMP;
+
// If we don't use the flag result, simplify back to a generic ADD/SUB.
if (!N->hasAnyUseOfValue(1)) {
SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
@@ -57049,11 +57217,11 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::BLENDV: return combineSelect(N, DAG, DCI, Subtarget);
case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
- case X86ISD::CMP: return combineCMP(N, DAG, Subtarget);
+ case X86ISD::CMP: return combineCMP(N, DAG, DCI, Subtarget);
case ISD::ADD: return combineAdd(N, DAG, DCI, Subtarget);
case ISD::SUB: return combineSub(N, DAG, DCI, Subtarget);
case X86ISD::ADD:
- case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI);
+ case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI, Subtarget);
case X86ISD::SBB: return combineSBB(N, DAG);
case X86ISD::ADC: return combineADC(N, DAG, DCI);
case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 14b9eb7329432..1facd1dff9f14 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -735,6 +735,10 @@ namespace llvm {
// Perform an FP80 add after changing precision control in FPCW.
FP80_ADD,
+ // Conditional compare instructions
+ CCMP,
+ CTEST,
+
/// X86 strict FP compare instructions.
STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE,
STRICT_FCMPS,
diff --git a/llvm/lib/Target/X86/X86InstrConditionalCompare.td b/llvm/lib/Target/X86/X86InstrConditionalCompare.td
index e5c1143eba87f..3d296773103b5 100644
--- a/llvm/lib/Target/X86/X86InstrConditionalCompare.td
+++ b/llvm/lib/Target/X86/X86InstrConditionalCompare.td
@@ -78,6 +78,34 @@ let mayLoad = 1 in {
}
}
+def : Pat<(X86ccmp GR8:$src1, GR8:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP8rr GR8:$src1, GR8:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR16:$src1, GR16:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP16rr GR16:$src1, GR16:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR32:$src1, GR32:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP32rr GR32:$src1, GR32:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR64:$src1, GR64:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP64rr GR64:$src1, GR64:$src2, timm:$dcf, timm:$cond)>;
+
+def : Pat<(X86ccmp GR8:$src1, (i8 imm:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP8ri GR8:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR16:$src1, (i16 imm:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP16ri GR16:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR32:$src1, (i32 imm:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP32ri GR32:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR64:$src1, (i64 imm:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP64ri32 GR64:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
+
+def : Pat<(X86ccmp GR8:$src1, (loadi8 addr:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP8rm GR8:$src1, addr:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR16:$src1, (loadi16 addr:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP16rm GR16:$src1, addr:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR32:$src1, (loadi32 addr:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP32rm GR32:$src1, addr:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ccmp GR64:$src1, (loadi64 addr:$src2), timm:$dcf, timm:$cond, EFLAGS),
+ (CCMP64rm GR64:$src1, addr:$src2, timm:$dcf, timm:$cond)>;
+
+
//===----------------------------------------------------------------------===//
// CTEST Instructions
//
@@ -108,3 +136,21 @@ let mayLoad = 1 in {
def CTEST64mr: Ctest<0x85, MRMDestMem, Xi64, i64mem, GR64>;
}
}
+
+def : Pat<(X86ctest GR8:$src1, GR8:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CTEST8rr GR8:$src1, GR8:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ctest GR16:$src1, GR16:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CTEST16rr GR16:$src1, GR16:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ctest GR32:$src1, GR32:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CTEST32rr GR32:$src1, GR32:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ctest GR64:$src1, GR64:$src2, timm:$dcf, timm:$cond, EFLAGS),
+ (CTEST64rr GR64:$src1, GR64:$src2, timm:$dcf, timm:$cond)>;
+
+def : Pat<(X86ctestpat GR8:$src1, imm:$src2, timm:$dcf, timm:$cond),
+ (CTEST8ri GR8:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ctestpat GR16:$src1, imm:$src2, timm:$dcf, timm:$cond),
+ (CTEST16ri GR16:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ctestpat GR32:$src1, imm:$src2, timm:$dcf, timm:$cond),
+ (CTEST32ri GR32:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
+def : Pat<(X86ctestpat GR64:$src1, imm:$src2, timm:$dcf, timm:$cond),
+ (CTEST64ri32 GR64:$src1, imm:$src2, timm:$dcf, timm:$cond)>;
diff --git a/llvm/lib/Target/X86/X86InstrFragments.td b/llvm/lib/Target/X86/X86InstrFragments.td
index 142e1867e6160..162e322712a6d 100644
--- a/llvm/lib/Target/X86/X86InstrFragments.td
+++ b/llvm/lib/Target/X86/X86InstrFragments.td
@@ -12,6 +12,9 @@ def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>,
def SDTX86FCmp : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisFP<1>,
SDTCisSameAs<1, 2>]>;
+def SDTX86Ccmp : SDTypeProfile<1, 5,
+ [SDTCisVT<3, i8>, SDTCisVT<4, i8>, SDTCisVT<5, i32>]>;
+
def SDTX86Cmov : SDTypeProfile<1, 4,
[SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
@@ -138,6 +141,9 @@ def X86strict_fcmp : SDNode<"X86ISD::STRICT_FCMP", SDTX86FCmp, [SDNPHasChain]>;
def X86strict_fcmps : SDNode<"X86ISD::STRICT_FCMPS", SDTX86FCmp, [SDNPHasChain]>;
def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>;
+def X86ccmp : SDNode<"X86ISD::CCMP", SDTX86Ccmp>;
+def X86ctest : SDNode<"X86ISD::CTEST", SDTX86Ccmp>;
+
def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
[SDNPHasChain]>;
@@ -577,6 +583,14 @@ def add_su : binop_oneuse<add>;
def and_su : binop_oneuse<and>;
def srl_su : binop_oneuse<srl>;
+class binop_twouses<SDPatternOperator operator>
+ : PatFrag<(ops node:$A, node:$B),
+ (operator node:$A, node:$B), [{
+ return N->hasNUsesOfValue(2, 0);
+}]>;
+
+def and_du : binop_twouses<and>;
+
// unary op with only one user
class unop_oneuse<SDPatternOperator operator>
: PatFrag<(ops node:$A),
@@ -601,7 +615,10 @@ def X86sub_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
def X86testpat : PatFrag<(ops node:$lhs, node:$rhs),
(X86cmp (and_su node:$lhs, node:$rhs), 0)>;
-
+def X86ctestpat : PatFrag<(ops node:$lhs, node:$rhs, node:$dcf, node:$cond),
+ (X86ctest (and_du node:$lhs, node:$rhs),
+ (and_du node:$lhs, node:$rhs), node:$dcf,
+ node:$cond, EFLAGS)>;
def X86any_fcmp : PatFrags<(ops node:$lhs, node:$rhs),
[(X86strict_fcmp node:$lhs, node:$rhs),
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 26c68ce3c1a2d..7d05f950b6fe9 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -3164,6 +3164,63 @@ X86::CondCode X86::getCondFromCCMP(const MachineInstr &MI) {
: X86::COND_INVALID;
}
+int X86::getCCMPCondFlagsFromCondCode(X86::CondCode CC) {
+ // CCMP/CTEST has two conditional operands:
+ // - SCC: source conditonal code (same as CMOV)
+ // - DCF: destination conditional flags, which has 4 valid bits
+ //
+ // +----+----+----+----+
+ // | OF | SF | ZF | CF |
+ // +----+----+----+----+
+ //
+ // If SCC(source conditional code) evaluates to false, CCMP/CTEST will updates
+ // the conditional flags by as follows:
+ //
+ // OF = DCF.OF
+ // SF = DCF.SF
+ // ZF = DCF.ZF
+ // CF = DCF.CF
+ // PF = DCF.CF
+ // AF = 0 (Auxiliary Carry Flag)
+ //
+ // Otherwise, the CMP or TEST is executed and it updates the
+ // CSPAZO flags normally.
+ //
+ // NOTE:
+ // If SCC = P, then SCC evaluates to true regardless of the CSPAZO value.
+ // If SCC = NP, then SCC evaluates to false regardless of the CSPAZO value.
+
+ enum { CF = 1, ZF = 2, SF = 4, OF = 8, PF = CF };
+
+ switch (CC) {
+ default:
+ llvm_unreachable("Illegal condition code!");
+ case X86::COND_NO:
+ case X86::COND_NE:
+ case X86::COND_GE:
+ case X86::COND_G:
+ case X86::COND_AE:
+ case X86::COND_A:
+ case X86::COND_NS:
+ case X86::COND_NP:
+ return 0;
+ case X86::COND_O:
+ return OF;
+ case X86::COND_B:
+ case X86::COND_BE:
+ return CF;
+ break;
+ case X86::COND_E:
+ case X86::COND_LE:
+ return ZF;
+ case X86::COND_S:
+ case X86::COND_L:
+ return SF;
+ case X86::COND_P:
+ return PF;
+ }
+}
+
/// Return the inverse of the specified condition,
/// e.g. turning COND_E to COND_NE.
X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index 55deca73b1f3a..295fac60c6e40 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -74,6 +74,9 @@ CondCode getCondFromCFCMov(const MachineInstr &MI);
// Turn CCMP instruction into condition code.
CondCode getCondFromCCMP(const MachineInstr &MI);
+// Turn condition code into condition flags for CCMP/CTEST.
+int getCCMPCondFlagsFromCondCode(CondCode CC);
+
/// GetOppositeBranchCondition - Return the inverse of the specified cond,
/// e.g. turning COND_E to COND_NE.
CondCode GetOppositeBranchCondition(CondCode CC);
diff --git a/llvm/test/CodeGen/X86/apx/ccmp.ll b/llvm/test/CodeGen/X86/apx/ccmp.ll
new file mode 100644
index 0000000000000..e081024b86989
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/ccmp.ll
@@ -0,0 +1,1102 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ccmp -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ccmp,+ndd -verify-machineinstrs | FileCheck %s --check-prefix=NDD
+
+define void @ccmp8rr_zf(i8 noundef %a, i8 noundef %b, i8 noundef %c) {
+; CHECK-LABEL: ccmp8rr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %dl, %dil
+; CHECK-NEXT: ccmpneb {dfv=zf} %dl, %sil
+; CHECK-NEXT: jne .LBB0_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB0_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8rr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %dl, %dil
+; NDD-NEXT: ccmpneb {dfv=zf} %dl, %sil
+; NDD-NEXT: jne .LBB0_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB0_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp eq i8 %a, %c
+ %cmp1 = icmp eq i8 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp8rr_cf(i8 noundef %a, i8 noundef %b) {
+; CHECK-LABEL: ccmp8rr_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb $2, %dil
+; CHECK-NEXT: ccmpgeb {dfv=cf} $2, %sil
+; CHECK-NEXT: jb .LBB1_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB1_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8rr_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb $2, %dil
+; NDD-NEXT: ccmpgeb {dfv=cf} $2, %sil
+; NDD-NEXT: jb .LBB1_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB1_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i8 %a, 1
+ %tobool = icmp ugt i8 %b, 1
+ %or.cond = and i1 %cmp, %tobool
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret void
+}
+
+define i8 @ccmp8rr_sf(i8 %a, i8 %b, i8* nocapture %c) {
+; CHECK-LABEL: ccmp8rr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ccmpneb {dfv=sf} $2, %sil
+; CHECK-NEXT: jl .LBB2_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rdx)
+; CHECK-NEXT: .LBB2_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8rr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ccmpneb {dfv=sf} $2, %sil
+; NDD-NEXT: jl .LBB2_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rdx)
+; NDD-NEXT: .LBB2_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 0
+ %cmp = icmp sgt i8 %b, 1
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define i8 @ccmp8rr_none(i8 %a, i8 %b, i8* nocapture %c) {
+; CHECK-LABEL: ccmp8rr_none:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ccmpeb {dfv=} $2, %sil
+; CHECK-NEXT: jl .LBB3_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rdx)
+; CHECK-NEXT: .LBB3_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8rr_none:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ccmpeb {dfv=} $2, %sil
+; NDD-NEXT: jl .LBB3_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rdx)
+; NDD-NEXT: .LBB3_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 0
+ %cmp = icmp sgt i8 %b, 1
+ %or.cond = select i1 %tobool, i1 true, i1 %cmp
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define void @ccmp16rr_sf(i16 noundef %a, i16 noundef %b, i16 noundef %c) {
+; CHECK-LABEL: ccmp16rr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %dx, %di
+; CHECK-NEXT: ccmplew {dfv=sf} %dx, %si
+; CHECK-NEXT: jge .LBB4_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB4_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp16rr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %dx, %di
+; NDD-NEXT: ccmplew {dfv=sf} %dx, %si
+; NDD-NEXT: jge .LBB4_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB4_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i16 %a, %c
+ %cmp1 = icmp slt i16 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32rr_cf(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ccmp32rr_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %edx, %edi
+; CHECK-NEXT: ccmpbl {dfv=cf} %edx, %esi
+; CHECK-NEXT: ja .LBB5_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB5_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp32rr_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %edx, %edi
+; NDD-NEXT: ccmpbl {dfv=cf} %edx, %esi
+; NDD-NEXT: ja .LBB5_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB5_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp uge i32 %a, %c
+ %cmp1 = icmp ule i32 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64rr_of(i64 %a, i64 %b, i64 %c) {
+; CHECK-LABEL: ccmp64rr_of:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: cmpq %rdx, %rdi
+; CHECK-NEXT: ccmpbq {dfv=of} %rsi, %rdi
+; CHECK-NEXT: jno .LBB6_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB6_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp64rr_of:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: cmpq %rdx, %rdi
+; NDD-NEXT: ccmpbq {dfv=of} %rsi, %rdi
+; NDD-NEXT: jno .LBB6_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB6_1: # %if.end
+; NDD-NEXT: retq
+bb:
+ %cmp = icmp uge i64 %a, %c
+ %smul = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+ %obit = extractvalue {i64, i1} %smul, 1
+ %or.cond = or i1 %cmp, %obit
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64rr_of_crossbb(i64 %a, i64 %b) {
+; CHECK-LABEL: ccmp64rr_of_crossbb:
+; CHECK: # %bb.0: # %bb
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: je .LBB7_2
+; CHECK-NEXT: # %bb.1: # %bb1
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: .LBB7_2: # %bb3
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp64rr_of_crossbb:
+; NDD: # %bb.0: # %bb
+; NDD-NEXT: testq %rdi, %rdi
+; NDD-NEXT: je .LBB7_2
+; NDD-NEXT: # %bb.1: # %bb1
+; NDD-NEXT: cmpq %rsi, %rdi
+; NDD-NEXT: .LBB7_2: # %bb3
+; NDD-NEXT: retq
+bb:
+ %cond1 = icmp eq i64 %a, 0
+ br i1 %cond1, label %bb3, label %bb1
+
+bb1: ; preds = %bb
+ %smul = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+ %obit = extractvalue {i64, i1} %smul, 1
+ br i1 %obit, label %bb3, label %bb2
+
+bb2: ; preds = %bb1
+ %tmp = ptrtoint ptr null to i64
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1, %bb
+ ret void
+}
+
+define void @ccmp8ri_zf(i8 noundef %a, i8 noundef %b, i8 noundef %c) {
+; CHECK-LABEL: ccmp8ri_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %dl, %dil
+; CHECK-NEXT: ccmpleb {dfv=zf} $123, %sil
+; CHECK-NEXT: jne .LBB8_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB8_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8ri_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %dl, %dil
+; NDD-NEXT: ccmpleb {dfv=zf} $123, %sil
+; NDD-NEXT: jne .LBB8_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB8_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i8 %a, %c
+ %cmp1 = icmp eq i8 %b, 123
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define i8 @ccmp8ri_zf_double(i8 %a, double %b, i8* nocapture %c) {
+; CHECK-LABEL: ccmp8ri_zf_double:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: ccmpeb {dfv=zf} $123, %dil
+; CHECK-NEXT: je .LBB9_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rsi)
+; CHECK-NEXT: .LBB9_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8ri_zf_double:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: xorpd %xmm1, %xmm1
+; NDD-NEXT: ucomisd %xmm1, %xmm0
+; NDD-NEXT: ccmpeb {dfv=zf} $123, %dil
+; NDD-NEXT: je .LBB9_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rsi)
+; NDD-NEXT: .LBB9_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 123
+ %cmp = fcmp ueq double %b, 0.0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define i8 @ccmp8ri_zf_double_p(i8 %a, double %b, i8* nocapture %c) {
+; CHECK-LABEL: ccmp8ri_zf_double_p:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb $123, %dil
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: ucomisd %xmm0, %xmm0
+; CHECK-NEXT: setp %cl
+; CHECK-NEXT: andb %al, %cl
+; CHECK-NEXT: cmpb $1, %cl
+; CHECK-NEXT: jne .LBB10_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rsi)
+; CHECK-NEXT: .LBB10_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8ri_zf_double_p:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb $123, %dil
+; NDD-NEXT: setne %al
+; NDD-NEXT: ucomisd %xmm0, %xmm0
+; NDD-NEXT: setp %cl
+; NDD-NEXT: andb %cl, %al
+; NDD-NEXT: cmpb $1, %al
+; NDD-NEXT: jne .LBB10_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rsi)
+; NDD-NEXT: .LBB10_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 123
+ %cmp = fcmp uno double %b, 0.0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define i8 @ccmp8ri_zf_double_np(i8 %a, double %b, i8* nocapture %c) {
+; CHECK-LABEL: ccmp8ri_zf_double_np:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb $123, %dil
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: ucomisd %xmm0, %xmm0
+; CHECK-NEXT: setnp %cl
+; CHECK-NEXT: andb %al, %cl
+; CHECK-NEXT: cmpb $1, %cl
+; CHECK-NEXT: jne .LBB11_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rsi)
+; CHECK-NEXT: .LBB11_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8ri_zf_double_np:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb $123, %dil
+; NDD-NEXT: setne %al
+; NDD-NEXT: ucomisd %xmm0, %xmm0
+; NDD-NEXT: setnp %cl
+; NDD-NEXT: andb %cl, %al
+; NDD-NEXT: cmpb $1, %al
+; NDD-NEXT: jne .LBB11_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rsi)
+; NDD-NEXT: .LBB11_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 123
+ %cmp = fcmp ord double %b, 0.0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define void @ccmp16ri_zf(i16 noundef %a, i16 noundef %b, i16 noundef %c) {
+; CHECK-LABEL: ccmp16ri_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %dx, %di
+; CHECK-NEXT: movswl %si, %eax
+; CHECK-NEXT: ccmpael {dfv=sf} $1234, %eax # imm = 0x4D2
+; CHECK-NEXT: jge .LBB12_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB12_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp16ri_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %dx, %di
+; NDD-NEXT: movswl %si, %eax
+; NDD-NEXT: ccmpael {dfv=sf} $1234, %eax # imm = 0x4D2
+; NDD-NEXT: jge .LBB12_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB12_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp ult i16 %a, %c
+ %cmp1 = icmp slt i16 %b, 1234
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32ri_cf(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ccmp32ri_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %edx, %edi
+; CHECK-NEXT: ccmpbl {dfv=cf} $123457, %esi # imm = 0x1E241
+; CHECK-NEXT: jae .LBB13_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB13_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp32ri_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %edx, %edi
+; NDD-NEXT: ccmpbl {dfv=cf} $123457, %esi # imm = 0x1E241
+; NDD-NEXT: jae .LBB13_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB13_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp uge i32 %a, %c
+ %cmp1 = icmp ule i32 %b, 123456
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64ri32_zf(i64 noundef %a, i64 noundef %b, i64 noundef %c) {
+; CHECK-LABEL: ccmp64ri32_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpq %rdx, %rdi
+; CHECK-NEXT: ccmpbeq {dfv=sf} $123456, %rsi # imm = 0x1E240
+; CHECK-NEXT: jge .LBB14_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB14_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp64ri32_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpq %rdx, %rdi
+; NDD-NEXT: ccmpbeq {dfv=sf} $123456, %rsi # imm = 0x1E240
+; NDD-NEXT: jge .LBB14_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB14_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp ugt i64 %a, %c
+ %cmp1 = icmp slt i64 %b, 123456
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp8rm_zf(i8 noundef %a, i8 noundef %b, i8 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp8rm_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %dl, %dil
+; CHECK-NEXT: ccmpneb {dfv=zf} (%rcx), %sil
+; CHECK-NEXT: jne .LBB15_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB15_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8rm_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %dl, %dil
+; NDD-NEXT: ccmpneb {dfv=zf} (%rcx), %sil
+; NDD-NEXT: jne .LBB15_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB15_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %d = load i8, ptr %ptr
+ %cmp = icmp eq i8 %a, %c
+ %cmp1 = icmp eq i8 %b, %d
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp16rm_sf(i16 noundef %a, i16 noundef %b, i16 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp16rm_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %dx, %di
+; CHECK-NEXT: ccmplew {dfv=sf} (%rcx), %si
+; CHECK-NEXT: jge .LBB16_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB16_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp16rm_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %dx, %di
+; NDD-NEXT: ccmplew {dfv=sf} (%rcx), %si
+; NDD-NEXT: jge .LBB16_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB16_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %d = load i16, ptr %ptr
+ %cmp = icmp sgt i16 %a, %c
+ %cmp1 = icmp slt i16 %b, %d
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32rm_cf(i32 noundef %a, i32 noundef %b, i32 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp32rm_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %edx, %edi
+; CHECK-NEXT: ccmpgl {dfv=cf} (%rcx), %esi
+; CHECK-NEXT: ja .LBB17_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB17_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp32rm_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %edx, %edi
+; NDD-NEXT: ccmpgl {dfv=cf} (%rcx), %esi
+; NDD-NEXT: ja .LBB17_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB17_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %d = load i32, ptr %ptr
+ %cmp = icmp sle i32 %a, %c
+ %cmp1 = icmp ule i32 %b, %d
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64rm_sf(i64 noundef %a, i64 noundef %b, i64 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp64rm_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpq %rdx, %rdi
+; CHECK-NEXT: ccmpleq {dfv=sf} (%rcx), %rsi
+; CHECK-NEXT: jge .LBB18_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB18_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp64rm_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpq %rdx, %rdi
+; NDD-NEXT: ccmpleq {dfv=sf} (%rcx), %rsi
+; NDD-NEXT: jge .LBB18_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB18_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %d = load i64, ptr %ptr
+ %cmp = icmp sgt i64 %a, %c
+ %cmp1 = icmp slt i64 %b, %d
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp8mr_zf(i8 noundef %a, i8 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp8mr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %sil, %dil
+; CHECK-NEXT: ccmpgeb {dfv=zf} %sil, (%rdx)
+; CHECK-NEXT: jne .LBB19_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB19_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8mr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %sil, %dil
+; NDD-NEXT: ccmpgeb {dfv=zf} %sil, (%rdx)
+; NDD-NEXT: jne .LBB19_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB19_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i8, ptr %ptr
+ %cmp = icmp slt i8 %a, %c
+ %cmp1 = icmp eq i8 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp16mr_sf(i16 noundef %a, i16 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp16mr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %si, %di
+; CHECK-NEXT: ccmplew {dfv=sf} %si, (%rdx)
+; CHECK-NEXT: jge .LBB20_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB20_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp16mr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %si, %di
+; NDD-NEXT: ccmplew {dfv=sf} %si, (%rdx)
+; NDD-NEXT: jge .LBB20_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB20_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i16, ptr %ptr
+ %cmp = icmp sgt i16 %a, %c
+ %cmp1 = icmp slt i16 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32mr_cf(i32 noundef %a, i32 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp32mr_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: ccmpll {dfv=cf} %esi, (%rdx)
+; CHECK-NEXT: ja .LBB21_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB21_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp32mr_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %esi, %edi
+; NDD-NEXT: ccmpll {dfv=cf} %esi, (%rdx)
+; NDD-NEXT: ja .LBB21_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB21_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i32, ptr %ptr
+ %cmp = icmp sge i32 %a, %c
+ %cmp1 = icmp ule i32 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64mr_sf(i64 noundef %a, i64 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp64mr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: ccmpleq {dfv=sf} %rsi, (%rdx)
+; CHECK-NEXT: jge .LBB22_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB22_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp64mr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpq %rsi, %rdi
+; NDD-NEXT: ccmpleq {dfv=sf} %rsi, (%rdx)
+; NDD-NEXT: jge .LBB22_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB22_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i64, ptr %ptr
+ %cmp = icmp sgt i64 %a, %c
+ %cmp1 = icmp slt i64 %b, %c
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp8mi_zf(i8 noundef %a, i8 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp8mi_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpb %sil, %dil
+; CHECK-NEXT: ccmpneb {dfv=zf} $123, (%rdx)
+; CHECK-NEXT: jne .LBB23_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB23_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp8mi_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpb %sil, %dil
+; NDD-NEXT: ccmpneb {dfv=zf} $123, (%rdx)
+; NDD-NEXT: jne .LBB23_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB23_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i8, ptr %ptr
+ %cmp = icmp eq i8 %a, %c
+ %cmp1 = icmp eq i8 %b, 123
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp16mi_zf(i16 noundef %a, i16 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp16mi_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpw %si, %di
+; CHECK-NEXT: ccmplew {dfv=sf} $1234, (%rdx) # imm = 0x4D2
+; CHECK-NEXT: jge .LBB24_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB24_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp16mi_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpw %si, %di
+; NDD-NEXT: ccmplew {dfv=sf} $1234, (%rdx) # imm = 0x4D2
+; NDD-NEXT: jge .LBB24_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB24_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i16, ptr %ptr
+ %cmp = icmp sgt i16 %a, %c
+ %cmp1 = icmp slt i16 %b, 1234
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp32mi_cf(i32 noundef %a, i32 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp32mi_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: ccmpnel {dfv=cf} $123457, (%rdx) # imm = 0x1E241
+; CHECK-NEXT: jae .LBB25_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB25_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp32mi_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %esi, %edi
+; NDD-NEXT: ccmpnel {dfv=cf} $123457, (%rdx) # imm = 0x1E241
+; NDD-NEXT: jae .LBB25_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB25_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i32, ptr %ptr
+ %cmp = icmp eq i32 %a, %c
+ %cmp1 = icmp ule i32 %b, 123456
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp64mi32_zf(i64 noundef %a, i64 noundef %c, ptr %ptr) {
+; CHECK-LABEL: ccmp64mi32_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpq %rsi, %rdi
+; CHECK-NEXT: ccmpleq {dfv=sf} $123456, (%rdx) # imm = 0x1E240
+; CHECK-NEXT: jge .LBB26_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB26_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp64mi32_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpq %rsi, %rdi
+; NDD-NEXT: ccmpleq {dfv=sf} $123456, (%rdx) # imm = 0x1E240
+; NDD-NEXT: jge .LBB26_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB26_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i64, ptr %ptr
+ %cmp = icmp sgt i64 %a, %c
+ %cmp1 = icmp slt i64 %b, 123456
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ccmp_continous(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ccmp_continous:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ccmplel {dfv=} $2, %esi
+; CHECK-NEXT: ccmpll {dfv=} $3, %edx
+; CHECK-NEXT: jge .LBB27_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB27_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp_continous:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ccmplel {dfv=} $2, %esi
+; NDD-NEXT: ccmpll {dfv=} $3, %edx
+; NDD-NEXT: jge .LBB27_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB27_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp slt i32 %a, 1
+ %cmp1 = icmp slt i32 %b, 2
+ %or.cond = and i1 %cmp, %cmp1
+ %cmp3 = icmp slt i32 %c, 3
+ %or.cond4 = and i1 %or.cond, %cmp3
+ br i1 %or.cond4, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret void
+}
+
+define i32 @ccmp_nobranch(i32 noundef %a, i32 noundef %b) {
+; CHECK-LABEL: ccmp_nobranch:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ccmplel {dfv=} $2, %esi
+; CHECK-NEXT: setge %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp_nobranch:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ccmplel {dfv=} $2, %esi
+; NDD-NEXT: setge %al
+; NDD-NEXT: movzbl %al, %eax
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i32 %a, 0
+ %cmp1 = icmp sgt i32 %b, 1
+ %or.cond.not = or i1 %cmp, %cmp1
+ %. = zext i1 %or.cond.not to i32
+ ret i32 %.
+}
+
+define i32 @ccmp_continous_nobranch(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ccmp_continous_nobranch:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl $2, %edi
+; CHECK-NEXT: ccmpll {dfv=sf} $2, %esi
+; CHECK-NEXT: ccmpll {dfv=sf} $4, %edx
+; CHECK-NEXT: setge %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ccmp_continous_nobranch:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl $2, %edi
+; NDD-NEXT: ccmpll {dfv=sf} $2, %esi
+; NDD-NEXT: ccmpll {dfv=sf} $4, %edx
+; NDD-NEXT: setge %al
+; NDD-NEXT: movzbl %al, %eax
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i32 %a, 1
+ %cmp1 = icmp slt i32 %b, 2
+ %cmp2 = icmp sgt i32 %c, 3
+ %or1 = or i1 %cmp, %cmp1
+ %or2 = and i1 %or1, %cmp2
+ %. = zext i1 %or2 to i32
+ ret i32 %.
+}
+
+declare dso_local void @foo(...)
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
diff --git a/llvm/test/CodeGen/X86/apx/ctest.ll b/llvm/test/CodeGen/X86/apx/ctest.ll
new file mode 100644
index 0000000000000..22afc39fd40c9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/ctest.ll
@@ -0,0 +1,905 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ccmp -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ccmp,+ndd -verify-machineinstrs | FileCheck %s --check-prefix=NDD
+
+define void @ctest8rr_zf(i8 noundef %a, i8 noundef %b) {
+; CHECK-LABEL: ctest8rr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ctestneb {dfv=zf} %sil, %sil
+; CHECK-NEXT: jne .LBB0_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB0_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8rr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ctestneb {dfv=zf} %sil, %sil
+; NDD-NEXT: jne .LBB0_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB0_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %cmp1 = icmp eq i8 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define i8 @ctest8rr_zf_double(i8 %a, double %b, i8* nocapture %c) {
+; CHECK-LABEL: ctest8rr_zf_double:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xorpd %xmm1, %xmm1
+; CHECK-NEXT: ucomisd %xmm1, %xmm0
+; CHECK-NEXT: ctesteb {dfv=zf} %dil, %dil
+; CHECK-NEXT: je .LBB1_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rsi)
+; CHECK-NEXT: .LBB1_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8rr_zf_double:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: xorpd %xmm1, %xmm1
+; NDD-NEXT: ucomisd %xmm1, %xmm0
+; NDD-NEXT: ctesteb {dfv=zf} %dil, %dil
+; NDD-NEXT: je .LBB1_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rsi)
+; NDD-NEXT: .LBB1_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 0
+ %cmp = fcmp ueq double %b, 0.0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define i8 @ctest8rr_zf_double_p(i8 %a, double %b, i8* nocapture %c) {
+; CHECK-LABEL: ctest8rr_zf_double_p:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: ucomisd %xmm0, %xmm0
+; CHECK-NEXT: setp %cl
+; CHECK-NEXT: andb %al, %cl
+; CHECK-NEXT: cmpb $1, %cl
+; CHECK-NEXT: jne .LBB2_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rsi)
+; CHECK-NEXT: .LBB2_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8rr_zf_double_p:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: setne %al
+; NDD-NEXT: ucomisd %xmm0, %xmm0
+; NDD-NEXT: setp %cl
+; NDD-NEXT: andb %cl, %al
+; NDD-NEXT: cmpb $1, %al
+; NDD-NEXT: jne .LBB2_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rsi)
+; NDD-NEXT: .LBB2_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 0
+ %cmp = fcmp uno double %b, 0.0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define i8 @ctest8rr_zf_double_np(i8 %a, double %b, i8* nocapture %c) {
+; CHECK-LABEL: ctest8rr_zf_double_np:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: ucomisd %xmm0, %xmm0
+; CHECK-NEXT: setnp %cl
+; CHECK-NEXT: andb %al, %cl
+; CHECK-NEXT: cmpb $1, %cl
+; CHECK-NEXT: jne .LBB3_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rsi)
+; CHECK-NEXT: .LBB3_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8rr_zf_double_np:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: setne %al
+; NDD-NEXT: ucomisd %xmm0, %xmm0
+; NDD-NEXT: setnp %cl
+; NDD-NEXT: andb %cl, %al
+; NDD-NEXT: cmpb $1, %al
+; NDD-NEXT: jne .LBB3_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rsi)
+; NDD-NEXT: .LBB3_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 0
+ %cmp = fcmp ord double %b, 0.0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define void @ctest8rr_sf(i8 noundef %a, i8 noundef %b) {
+; CHECK-LABEL: ctest8rr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ctesteb {dfv=sf} %sil, %sil
+; CHECK-NEXT: js .LBB4_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB4_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8rr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ctesteb {dfv=sf} %sil, %sil
+; NDD-NEXT: js .LBB4_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB4_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp ule i8 %a, 0
+ %tobool = icmp sge i8 %b, 0
+ %or.cond = and i1 %cmp, %tobool
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret void
+}
+
+define i8 @ctest8rr_sf_2(i8 %a, i8 %b, i8* nocapture %c) {
+; CHECK-LABEL: ctest8rr_sf_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ctestleb {dfv=sf} %sil, %sil
+; CHECK-NEXT: jns .LBB5_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rdx)
+; CHECK-NEXT: .LBB5_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8rr_sf_2:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ctestleb {dfv=sf} %sil, %sil
+; NDD-NEXT: jns .LBB5_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rdx)
+; NDD-NEXT: .LBB5_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp sgt i8 %a, 0
+ %cmp = icmp slt i8 %b, 0
+ %or.cond = select i1 %tobool, i1 true, i1 %cmp
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define i8 @ctest8rr_none(i8 %a, i8 %b, i8* nocapture %c) {
+; CHECK-LABEL: ctest8rr_none:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ctestneb {dfv=} %sil, %sil
+; CHECK-NEXT: jne .LBB6_2
+; CHECK-NEXT: # %bb.1: # %if.then
+; CHECK-NEXT: movb %dil, (%rdx)
+; CHECK-NEXT: .LBB6_2: # %if.end
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8rr_none:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ctestneb {dfv=} %sil, %sil
+; NDD-NEXT: jne .LBB6_2
+; NDD-NEXT: # %bb.1: # %if.then
+; NDD-NEXT: movb %dil, (%rdx)
+; NDD-NEXT: .LBB6_2: # %if.end
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: retq
+entry:
+ %tobool = icmp ne i8 %a, 0
+ %cmp = icmp eq i8 %b, 0
+ %or.cond = select i1 %tobool, i1 %cmp, i1 false
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then:
+ store i8 %a, i8* %c, align 4
+ br label %if.end
+
+if.end:
+ ret i8 0
+}
+
+define void @ctest16rr_sf(i16 noundef %a, i16 noundef %b) {
+; CHECK-LABEL: ctest16rr_sf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testw %di, %di
+; CHECK-NEXT: ctestlew {dfv=sf} %si, %si
+; CHECK-NEXT: jns .LBB7_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB7_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest16rr_sf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testw %di, %di
+; NDD-NEXT: ctestlew {dfv=sf} %si, %si
+; NDD-NEXT: jns .LBB7_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB7_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i16 %a, 0
+ %cmp1 = icmp slt i16 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest32rr_zf(i32 noundef %a, i32 noundef %b) {
+; CHECK-LABEL: ctest32rr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ctestsl {dfv=zf} %esi, %esi
+; CHECK-NEXT: jne .LBB8_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB8_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest32rr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ctestsl {dfv=zf} %esi, %esi
+; NDD-NEXT: jne .LBB8_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB8_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sge i32 %a, 0
+ %cmp1 = icmp eq i32 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest8ri_zf(i8 noundef %a, i8 noundef %b) {
+; CHECK-LABEL: ctest8ri_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ctestneb {dfv=zf} $123, %sil
+; CHECK-NEXT: jne .LBB9_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB9_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8ri_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ctestneb {dfv=zf} $123, %sil
+; NDD-NEXT: jne .LBB9_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB9_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp eq i8 %a, 0
+ %and = and i8 %b, 123
+ %cmp1 = icmp eq i8 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest16ri_zf(i16 noundef %a, i16 noundef %b) {
+; CHECK-LABEL: ctest16ri_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andl $1234, %esi # imm = 0x4D2
+; CHECK-NEXT: testw %di, %di
+; CHECK-NEXT: ctestnew {dfv=zf} %si, %si
+; CHECK-NEXT: jne .LBB10_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB10_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest16ri_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: andl $1234, %esi, %eax # imm = 0x4D2
+; NDD-NEXT: testw %di, %di
+; NDD-NEXT: ctestnew {dfv=zf} %ax, %ax
+; NDD-NEXT: jne .LBB10_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB10_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp eq i16 %a, 0
+ %and = and i16 %b, 1234
+ %cmp1 = icmp eq i16 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest32ri_zf(i32 noundef %a, i32 noundef %b) {
+; CHECK-LABEL: ctest32ri_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ctestnel {dfv=zf} $12345, %esi # imm = 0x3039
+; CHECK-NEXT: jne .LBB11_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB11_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest32ri_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ctestnel {dfv=zf} $12345, %esi # imm = 0x3039
+; NDD-NEXT: jne .LBB11_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB11_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp eq i32 %a, 0
+ %and = and i32 %b, 12345
+ %cmp1 = icmp eq i32 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest64ri32_zf(i64 noundef %a, i64 noundef %b) {
+; CHECK-LABEL: ctest64ri32_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: ctestneq {dfv=zf} $123456, %rsi # imm = 0x1E240
+; CHECK-NEXT: jne .LBB12_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB12_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest64ri32_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testq %rdi, %rdi
+; NDD-NEXT: ctestneq {dfv=zf} $123456, %rsi # imm = 0x1E240
+; NDD-NEXT: jne .LBB12_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB12_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp eq i64 %a, 0
+ %and = and i64 %b, 123456
+ %cmp1 = icmp eq i64 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest8mr_zf(i8 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest8mr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movzbl (%rsi), %eax
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ctestneb {dfv=zf} %al, %al
+; CHECK-NEXT: jne .LBB13_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB13_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8mr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movzbl (%rsi), %eax
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ctestneb {dfv=zf} %al, %al
+; NDD-NEXT: jne .LBB13_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB13_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i8, ptr %ptr
+ %cmp = icmp eq i8 %a, 0
+ %cmp1 = icmp eq i8 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest16mr_zf(i16 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest16mr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movzwl (%rsi), %eax
+; CHECK-NEXT: testw %di, %di
+; CHECK-NEXT: ctestnew {dfv=zf} %ax, %ax
+; CHECK-NEXT: jne .LBB14_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB14_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest16mr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movzwl (%rsi), %eax
+; NDD-NEXT: testw %di, %di
+; NDD-NEXT: ctestnew {dfv=zf} %ax, %ax
+; NDD-NEXT: jne .LBB14_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB14_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i16, ptr %ptr
+ %cmp = icmp eq i16 %a, 0
+ %cmp1 = icmp eq i16 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest32mr_cf(i32 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest32mr_cf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movl (%rsi), %eax
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ctestnel {dfv=zf} %eax, %eax
+; CHECK-NEXT: jne .LBB15_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB15_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest32mr_cf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movl (%rsi), %eax
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ctestnel {dfv=zf} %eax, %eax
+; NDD-NEXT: jne .LBB15_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB15_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i32, ptr %ptr
+ %cmp = icmp eq i32 %a, 0
+ %cmp1 = icmp eq i32 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest64mr_zf(i64 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest64mr_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq (%rsi), %rax
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: ctestneq {dfv=zf} %rax, %rax
+; CHECK-NEXT: jne .LBB16_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB16_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest64mr_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movq (%rsi), %rax
+; NDD-NEXT: testq %rdi, %rdi
+; NDD-NEXT: ctestneq {dfv=zf} %rax, %rax
+; NDD-NEXT: jne .LBB16_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB16_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i64, ptr %ptr
+ %cmp = icmp eq i64 %a, 0
+ %cmp1 = icmp eq i64 %b, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest8mi_zf(i8 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest8mi_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testb %dil, %dil
+; CHECK-NEXT: ctestneb {dfv=zf} $123, (%rsi)
+; CHECK-NEXT: jne .LBB17_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB17_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest8mi_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testb %dil, %dil
+; NDD-NEXT: ctestneb {dfv=zf} $123, (%rsi)
+; NDD-NEXT: jne .LBB17_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB17_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i8, ptr %ptr
+ %cmp = icmp eq i8 %a, 0
+ %and = and i8 %b, 123
+ %cmp1 = icmp eq i8 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest16mi_zf(i16 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest16mi_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movzwl (%rsi), %eax
+; CHECK-NEXT: andl $1234, %eax # imm = 0x4D2
+; CHECK-NEXT: testw %di, %di
+; CHECK-NEXT: ctestnew {dfv=zf} %ax, %ax
+; CHECK-NEXT: jne .LBB18_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB18_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest16mi_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movzwl (%rsi), %eax
+; NDD-NEXT: andl $1234, %eax # imm = 0x4D2
+; NDD-NEXT: testw %di, %di
+; NDD-NEXT: ctestnew {dfv=zf} %ax, %ax
+; NDD-NEXT: jne .LBB18_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB18_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i16, ptr %ptr
+ %cmp = icmp eq i16 %a, 0
+ %and = and i16 %b, 1234
+ %cmp1 = icmp eq i16 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest32mi_zf(i32 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest32mi_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movzwl (%rsi), %eax
+; CHECK-NEXT: andl $12345, %eax # imm = 0x3039
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ctestnew {dfv=zf} %ax, %ax
+; CHECK-NEXT: jne .LBB19_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB19_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest32mi_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: movzwl (%rsi), %eax
+; NDD-NEXT: andl $12345, %eax # imm = 0x3039
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ctestnew {dfv=zf} %ax, %ax
+; NDD-NEXT: jne .LBB19_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB19_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i32, ptr %ptr
+ %cmp = icmp eq i32 %a, 0
+ %and = and i32 %b, 12345
+ %cmp1 = icmp eq i32 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest64mi32_zf(i64 noundef %a, ptr %ptr) {
+; CHECK-LABEL: ctest64mi32_zf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: ctestnel {dfv=zf} $123456, (%rsi) # imm = 0x1E240
+; CHECK-NEXT: jne .LBB20_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB20_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest64mi32_zf:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testq %rdi, %rdi
+; NDD-NEXT: ctestnel {dfv=zf} $123456, (%rsi) # imm = 0x1E240
+; NDD-NEXT: jne .LBB20_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB20_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %b = load i64, ptr %ptr
+ %cmp = icmp eq i64 %a, 0
+ %and = and i64 %b, 123456
+ %cmp1 = icmp eq i64 %and, 0
+ %or.cond = or i1 %cmp, %cmp1
+ br i1 %or.cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+}
+
+define void @ctest_continous(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ctest_continous:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: ctestll {dfv=} %esi, %esi
+; CHECK-NEXT: ctestnsl {dfv=sf} %edx, %edx
+; CHECK-NEXT: jns .LBB21_1
+; CHECK-NEXT: # %bb.2: # %if.then
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: jmp foo # TAILCALL
+; CHECK-NEXT: .LBB21_1: # %if.end
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest_continous:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: cmpl %esi, %edi
+; NDD-NEXT: ctestll {dfv=} %esi, %esi
+; NDD-NEXT: ctestnsl {dfv=sf} %edx, %edx
+; NDD-NEXT: jns .LBB21_1
+; NDD-NEXT: # %bb.2: # %if.then
+; NDD-NEXT: xorl %eax, %eax
+; NDD-NEXT: jmp foo # TAILCALL
+; NDD-NEXT: .LBB21_1: # %if.end
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp slt i32 %a, %b
+ %cmp1 = icmp slt i32 %b, 0
+ %or.cond = and i1 %cmp, %cmp1
+ %cmp2 = icmp slt i32 %c, 0
+ %or.cond3 = or i1 %or.cond, %cmp2
+ br i1 %or.cond3, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void (...) @foo()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ ret void
+}
+
+define i32 @ctest_nobranch(i32 noundef %a, i32 noundef %b) {
+; CHECK-LABEL: ctest_nobranch:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ctestlel {dfv=} %esi, %esi
+; CHECK-NEXT: setg %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest_nobranch:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ctestlel {dfv=} %esi, %esi
+; NDD-NEXT: setg %al
+; NDD-NEXT: movzbl %al, %eax
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i32 %a, 0
+ %cmp1 = icmp sgt i32 %b, 0
+ %or.cond.not = or i1 %cmp, %cmp1
+ %. = zext i1 %or.cond.not to i32
+ ret i32 %.
+}
+
+define i32 @ctest_continous_nobranch(i32 noundef %a, i32 noundef %b, i32 noundef %c) {
+; CHECK-LABEL: ctest_continous_nobranch:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: ctestlel {dfv=sf} %esi, %esi
+; CHECK-NEXT: ctestsl {dfv=zf} %edx, %edx
+; CHECK-NEXT: setg %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: retq
+;
+; NDD-LABEL: ctest_continous_nobranch:
+; NDD: # %bb.0: # %entry
+; NDD-NEXT: testl %edi, %edi
+; NDD-NEXT: ctestlel {dfv=sf} %esi, %esi
+; NDD-NEXT: ctestsl {dfv=zf} %edx, %edx
+; NDD-NEXT: setg %al
+; NDD-NEXT: movzbl %al, %eax
+; NDD-NEXT: retq
+entry:
+ %cmp = icmp sgt i32 %a, 0
+ %cmp1 = icmp slt i32 %b, 0
+ %cmp2 = icmp sgt i32 %c, 0
+ %or1 = or i1 %cmp, %cmp1
+ %or2 = and i1 %or1, %cmp2
+ %. = zext i1 %or2 to i32
+ ret i32 %.
+}
+
+declare dso_local void @foo(...)
More information about the llvm-commits
mailing list