[llvm] [Xtensa] Implement lowering SELECT_CC, SETCC. (PR #97017)
Andrei Safronov via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 9 06:02:39 PDT 2024
https://github.com/andreisfr updated https://github.com/llvm/llvm-project/pull/97017
>From d37939680d08589fcdbbf9277a17ac16e0ff7dc2 Mon Sep 17 00:00:00 2001
From: Andrei Safronov <safronov at espressif.com>
Date: Fri, 28 Jun 2024 09:48:48 +0300
Subject: [PATCH 1/4] [Xtensa] Implement lowering SELECT_CC, SETCC.
---
llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 186 ++++++++++++++
llvm/lib/Target/Xtensa/XtensaISelLowering.h | 28 ++-
llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 32 +++
llvm/lib/Target/Xtensa/XtensaOperators.td | 9 +
llvm/test/CodeGen/Xtensa/select-cc.ll | 125 ++++++++++
llvm/test/CodeGen/Xtensa/setcc.ll | 232 ++++++++++++++++++
llvm/utils/UpdateTestChecks/asm.py | 8 +
7 files changed, 619 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/Xtensa/select-cc.ll
create mode 100644 llvm/test/CodeGen/Xtensa/setcc.ll
diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
index 6509793012504..8f19ae1b8e672 100644
--- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
+++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
@@ -90,6 +90,26 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BR_CC, MVT::i64, Expand);
setOperationAction(ISD::BR_CC, MVT::f32, Expand);
+ // Used by legalize types to correctly generate the setcc result.
+ // AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
+ setOperationPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
+ setOperationPromotedToType(ISD::BR_CC, MVT::i1, MVT::i32);
+
+ setOperationAction(ISD::BR_CC, MVT::i32, Legal);
+ setOperationAction(ISD::BR_CC, MVT::i64, Expand);
+
+ setOperationAction(ISD::SELECT, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::i64, Expand);
+
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
+
+ setOperationAction(ISD::SETCC, MVT::i32, Custom);
+ setOperationAction(ISD::SETCC, MVT::i64, Expand);
+
+ // make BRCOND legal, its actually only legal for a subset of conds
+ setOperationAction(ISD::BRCOND, MVT::Other, Legal);
+
// Implement custom stack allocations
setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
// Implement custom stack save and restore
@@ -514,6 +534,38 @@ XtensaTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
return DAG.getNode(XtensaISD::RET, DL, MVT::Other, RetOps);
}
+SDValue XtensaTargetLowering::LowerSELECT_CC(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT Ty = Op.getOperand(0).getValueType();
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ SDValue TrueValue = Op.getOperand(2);
+ SDValue FalseValue = Op.getOperand(3);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op->getOperand(4))->get();
+ SDValue TargetCC = DAG.getConstant(CC, DL, MVT::i32);
+
+ // Wrap select nodes
+ return DAG.getNode(XtensaISD::SELECT_CC, DL, Ty, LHS, RHS, TrueValue,
+ FalseValue, TargetCC);
+}
+
+SDValue XtensaTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT Ty = Op.getOperand(0).getValueType();
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+ SDValue TargetCC = DAG.getConstant(CC, DL, MVT::i32);
+
+ // Expand to target SELECT_CC
+ SDValue TrueValue = DAG.getConstant(1, DL, Op.getValueType());
+ SDValue FalseValue = DAG.getConstant(0, DL, Op.getValueType());
+
+ return DAG.getNode(XtensaISD::SELECT_CC, DL, Ty, LHS, RHS, TrueValue,
+ FalseValue, TargetCC);
+}
+
SDValue XtensaTargetLowering::LowerImmediate(SDValue Op,
SelectionDAG &DAG) const {
const ConstantSDNode *CN = cast<ConstantSDNode>(Op);
@@ -676,6 +728,10 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op,
return LowerJumpTable(Op, DAG);
case ISD::ConstantPool:
return LowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
+ case ISD::SETCC:
+ return LowerSETCC(Op, DAG);
+ case ISD::SELECT_CC:
+ return LowerSELECT_CC(Op, DAG);
case ISD::STACKSAVE:
return LowerSTACKSAVE(Op, DAG);
case ISD::STACKRESTORE:
@@ -697,6 +753,136 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const {
return "XtensaISD::PCREL_WRAPPER";
case XtensaISD::RET:
return "XtensaISD::RET";
+ case XtensaISD::SELECT:
+ return "XtensaISD::SELECT";
+ case XtensaISD::SELECT_CC:
+ return "XtensaISD::SELECT_CC";
}
return nullptr;
}
+
+//===----------------------------------------------------------------------===//
+// Custom insertion
+//===----------------------------------------------------------------------===//
+
+static int GetBranchKind(int Cond, bool &BrInv) {
+ switch (Cond) {
+ case ISD::SETEQ:
+ case ISD::SETOEQ:
+ case ISD::SETUEQ:
+ return Xtensa::BEQ;
+ case ISD::SETNE:
+ case ISD::SETONE:
+ case ISD::SETUNE:
+ return Xtensa::BNE;
+ case ISD::SETLT:
+ case ISD::SETOLT:
+ return Xtensa::BLT;
+ case ISD::SETLE:
+ case ISD::SETOLE:
+ BrInv = true;
+ return Xtensa::BGE;
+ case ISD::SETGT:
+ case ISD::SETOGT:
+ BrInv = true;
+ return Xtensa::BLT;
+ case ISD::SETGE:
+ case ISD::SETOGE:
+ return Xtensa::BGE;
+ case ISD::SETULT:
+ return Xtensa::BLTU;
+ case ISD::SETULE:
+ BrInv = true;
+ return Xtensa::BGEU;
+ case ISD::SETUGT:
+ BrInv = true;
+ return Xtensa::BLTU;
+ case ISD::SETUGE:
+ return Xtensa::BGEU;
+ default:
+ return -1;
+ }
+}
+
+MachineBasicBlock *
+XtensaTargetLowering::emitSelectCC(MachineInstr &MI,
+ MachineBasicBlock *MBB) const {
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
+
+ MachineOperand &LHS = MI.getOperand(1);
+ MachineOperand &RHS = MI.getOperand(2);
+ MachineOperand &TrueValue = MI.getOperand(3);
+ MachineOperand &FalseValue = MI.getOperand(4);
+ MachineOperand &Cond = MI.getOperand(5);
+
+ // To "insert" a SELECT_CC instruction, we actually have to insert
+ // CopyMBB and SinkMBB blocks and add branch to MBB. We build phi
+ // operation in SinkMBB like phi (TrueVakue,FalseValue), where TrueValue
+ // is passed from MMB and FalseValue is passed from CopyMBB.
+ // MBB
+ // | \
+ // | CopyMBB
+ // | /
+ // SinkMBB
+ // The incoming instruction knows the
+ // destination vreg to set, the condition code register to branch on, the
+ // true/false values to select between, and a branch opcode to use.
+ const BasicBlock *LLVM_BB = MBB->getBasicBlock();
+ MachineFunction::iterator It = ++MBB->getIterator();
+
+ MachineFunction *F = MBB->getParent();
+ MachineBasicBlock *CopyMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
+
+ F->insert(It, CopyMBB);
+ F->insert(It, SinkMBB);
+
+ // Transfer the remainder of MBB and its successor edges to SinkMBB.
+ SinkMBB->splice(SinkMBB->begin(), MBB,
+ std::next(MachineBasicBlock::iterator(MI)), MBB->end());
+ SinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
+
+ MBB->addSuccessor(CopyMBB);
+ MBB->addSuccessor(SinkMBB);
+
+ bool BrInv = false;
+ int BrKind = GetBranchKind(Cond.getImm(), BrInv);
+ if (BrInv) {
+ BuildMI(MBB, DL, TII.get(BrKind))
+ .addReg(RHS.getReg())
+ .addReg(LHS.getReg())
+ .addMBB(SinkMBB);
+ } else {
+ BuildMI(MBB, DL, TII.get(BrKind))
+ .addReg(LHS.getReg())
+ .addReg(RHS.getReg())
+ .addMBB(SinkMBB);
+ }
+
+ CopyMBB->addSuccessor(SinkMBB);
+
+ // SinkMBB:
+ // %Result = phi [ %FalseValue, CopyMBB ], [ %TrueValue, MBB ]
+ // ...
+
+ BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII.get(Xtensa::PHI),
+ MI.getOperand(0).getReg())
+ .addReg(FalseValue.getReg())
+ .addMBB(CopyMBB)
+ .addReg(TrueValue.getReg())
+ .addMBB(MBB);
+
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
+ return SinkMBB;
+}
+
+MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter(
+ MachineInstr &MI, MachineBasicBlock *MBB) const {
+ switch (MI.getOpcode()) {
+ case Xtensa::SELECT:
+ return emitSelectCC(MI, MBB);
+ default:
+ report_fatal_error("Unexpected instr type to insert");
+ }
+}
diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h
index 23a0217daaa96..8a4491c38db5f 100644
--- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h
+++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h
@@ -33,7 +33,14 @@ enum {
// Wraps a TargetGlobalAddress that should be loaded using PC-relative
// accesses. Operand 0 is the address.
PCREL_WRAPPER,
- RET
+ RET,
+
+ // Selects between operand 0 and operand 1. Operand 2 is the
+ // mask of condition-code values for which operand 0 should be
+ // chosen over operand 1; it has the same form as BR_CCMASK.
+ // Operand 3 is the flag operand.
+ SELECT,
+ SELECT_CC
};
}
@@ -44,6 +51,13 @@ class XtensaTargetLowering : public TargetLowering {
explicit XtensaTargetLowering(const TargetMachine &TM,
const XtensaSubtarget &STI);
+ EVT getSetCCResultType(const DataLayout &, LLVMContext &,
+ EVT VT) const override {
+ if (!VT.isVector())
+ return MVT::i32;
+ return VT.changeVectorElementTypeToInteger();
+ }
+
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
const char *getTargetNodeName(unsigned Opcode) const override;
@@ -71,6 +85,10 @@ class XtensaTargetLowering : public TargetLowering {
const XtensaSubtarget &getSubtarget() const { return Subtarget; }
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const override;
+
private:
const XtensaSubtarget &Subtarget;
@@ -86,6 +104,10 @@ class XtensaTargetLowering : public TargetLowering {
SDValue LowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const;
+ SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
+
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const;
@@ -95,6 +117,10 @@ class XtensaTargetLowering : public TargetLowering {
SDValue getAddrPCRel(SDValue Op, SelectionDAG &DAG) const;
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
+
+ // Implement EmitInstrWithCustomInserter for individual operation types.
+ MachineBasicBlock *emitSelectCC(MachineInstr &MI,
+ MachineBasicBlock *BB) const;
};
} // end namespace llvm
diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
index f68d20dcdd54a..9e3a35808d0b6 100644
--- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
+++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
@@ -425,6 +425,29 @@ def : Pat<(brcc SETLE, AR:$s, AR:$t, bb:$target),
def : Pat<(brcc SETULE, AR:$s, AR:$t, bb:$target),
(BGEU AR:$t, AR:$s, bb:$target)>;
+def : Pat<(brcond (i32 (seteq AR:$s, AR:$t)), bb:$target),
+ (BEQ AR:$s, AR:$t, bb:$target)>;
+def : Pat<(brcond (i32 (setne AR:$s, AR:$t)), bb:$target),
+ (BNE AR:$s, AR:$t, bb:$target)>;
+def : Pat<(brcond (i32 (setge AR:$s, AR:$t)), bb:$target),
+ (BGE AR:$s, AR:$t, bb:$target)>;
+def : Pat<(brcond (i32 (setle AR:$s, AR:$t)), bb:$target),
+ (BLT AR:$s, AR:$t, bb:$target)>;
+def : Pat<(brcond (i32 (setuge AR:$s, AR:$t)), bb:$target),
+ (BGEU AR:$s, AR:$t, bb:$target)>;
+def : Pat<(brcond (i32 (setult AR:$s, AR:$t)), bb:$target),
+ (BLTU AR:$s, AR:$t, bb:$target)>;
+def : Pat<(brcond (i32 (setgt AR:$s, AR:$t)), bb:$target),
+ (BLT AR:$t, AR:$s, bb:$target)>;
+def : Pat<(brcond (i32 (setugt AR:$s, AR:$t)), bb:$target),
+ (BLTU AR:$t, AR:$s, bb:$target)>;
+def : Pat<(brcond (i32 (setle AR:$s, AR:$t)), bb:$target),
+ (BGE AR:$t, AR:$s, bb:$target)>;
+def : Pat<(brcond (i32 (setule AR:$s, AR:$t)), bb:$target),
+ (BGEU AR:$t, AR:$s, bb:$target)>;
+
+def : Pat<(brcond AR:$s, bb:$target), (BNEZ AR:$s, bb:$target)>;
+
//===----------------------------------------------------------------------===//
// Call and jump instructions
//===----------------------------------------------------------------------===//
@@ -574,3 +597,12 @@ let Defs = [SP], Uses = [SP] in {
"#ADJCALLSTACKUP",
[(Xtensa_callseq_end timm:$amt1, timm:$amt2)]>;
}
+
+//===----------------------------------------------------------------------===//
+// Generic select instruction
+//===----------------------------------------------------------------------===//
+let usesCustomInserter = 1 in {
+ def SELECT : Pseudo<(outs AR:$dst), (ins AR:$lhs, AR:$rhs, AR:$t, AR:$f, i32imm:$cond),
+ "!select $dst, $lhs, $rhs, $t, $f, $cond",
+ [(set AR:$dst, (Xtensa_select_cc AR:$lhs, AR:$rhs, AR:$t, AR:$f, imm:$cond))]>;
+}
diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td
index 88d3c9dfe7fd8..aab2d2d2bbe79 100644
--- a/llvm/lib/Target/Xtensa/XtensaOperators.td
+++ b/llvm/lib/Target/Xtensa/XtensaOperators.td
@@ -19,6 +19,11 @@ def SDT_XtensaWrapPtr : SDTypeProfile<1, 1,
def SDT_XtensaBrJT : SDTypeProfile<0, 2,
[SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
+
+def SDT_XtensaSelectCC : SDTypeProfile<1, 5,
+ [SDTCisSameAs<0, 1>,
+ SDTCisSameAs<2, 3>,
+ SDTCisVT<5, i32>]>;
//===----------------------------------------------------------------------===//
// Node definitions
//===----------------------------------------------------------------------===//
@@ -38,3 +43,7 @@ def Xtensa_callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_XtensaCallSeqEnd,
SDNPOutGlue]>;
def Xtensa_brjt: SDNode<"XtensaISD::BR_JT", SDT_XtensaBrJT, [SDNPHasChain]>;
+
+def Xtensa_select : SDNode<"XtensaISD::SELECT", SDTSelect>;
+def Xtensa_select_cc: SDNode<"XtensaISD::SELECT_CC", SDT_XtensaSelectCC,
+ [SDNPInGlue]>;
diff --git a/llvm/test/CodeGen/Xtensa/select-cc.ll b/llvm/test/CodeGen/Xtensa/select-cc.ll
new file mode 100644
index 0000000000000..6073ee7cc2558
--- /dev/null
+++ b/llvm/test/CodeGen/Xtensa/select-cc.ll
@@ -0,0 +1,125 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=xtensa -disable-block-placement -verify-machineinstrs < %s \
+; RUN: | FileCheck %s
+
+define signext i32 @foo(i32 signext %a, ptr %b) nounwind {
+; CHECK-LABEL: foo:
+; CHECK: l32i a8, a3, 0
+; CHECK-NEXT: beq a2, a8, .LBB0_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: bne a2, a8, .LBB0_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB0_4:
+; CHECK-NEXT: bltu a8, a2, .LBB0_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB0_6:
+; CHECK-NEXT: bgeu a2, a8, .LBB0_8
+; CHECK-NEXT: # %bb.7:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB0_8:
+; CHECK-NEXT: bltu a2, a8, .LBB0_10
+; CHECK-NEXT: # %bb.9:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB0_10:
+; CHECK-NEXT: bgeu a8, a2, .LBB0_12
+; CHECK-NEXT: # %bb.11:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB0_12:
+; CHECK-NEXT: blt a8, a2, .LBB0_14
+; CHECK-NEXT: # %bb.13:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB0_14:
+; CHECK-NEXT: bge a2, a8, .LBB0_16
+; CHECK-NEXT: # %bb.15:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB0_16:
+; CHECK-NEXT: blt a2, a8, .LBB0_18
+; CHECK-NEXT: # %bb.17:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB0_18:
+; CHECK-NEXT: bge a8, a2, .LBB0_20
+; CHECK-NEXT: # %bb.19:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB0_20:
+; CHECK-NEXT: movi a9, 1
+; CHECK-NEXT: blt a8, a9, .LBB0_22
+; CHECK-NEXT: # %bb.21:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB0_22:
+; CHECK-NEXT: movi a9, -1
+; CHECK-NEXT: blt a9, a8, .LBB0_24
+; CHECK-NEXT: # %bb.23:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB0_24:
+; CHECK-NEXT: movi a9, 1024
+; CHECK-NEXT: blt a9, a8, .LBB0_26
+; CHECK-NEXT: # %bb.25:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB0_26:
+; CHECK-NEXT: movi a9, 2046
+; CHECK-NEXT: bltu a9, a8, .LBB0_28
+; CHECK-NEXT: # %bb.27:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB0_28:
+; CHECK-NEXT: ret
+ %val1 = load i32, ptr %b
+ %tst1 = icmp eq i32 %a, %val1
+ %val2 = select i1 %tst1, i32 %a, i32 %val1
+
+ %val3 = load i32, ptr %b
+ %tst2 = icmp ne i32 %val2, %val3
+ %val4 = select i1 %tst2, i32 %val2, i32 %val3
+
+ %val5 = load i32, ptr %b
+ %tst3 = icmp ugt i32 %val4, %val5
+ %val6 = select i1 %tst3, i32 %val4, i32 %val5
+
+ %val7 = load i32, ptr %b
+ %tst4 = icmp uge i32 %val6, %val7
+ %val8 = select i1 %tst4, i32 %val6, i32 %val7
+
+ %val9 = load i32, ptr %b
+ %tst5 = icmp ult i32 %val8, %val9
+ %val10 = select i1 %tst5, i32 %val8, i32 %val9
+
+ %val11 = load i32, ptr %b
+ %tst6 = icmp ule i32 %val10, %val11
+ %val12 = select i1 %tst6, i32 %val10, i32 %val11
+
+ %val13 = load i32, ptr %b
+ %tst7 = icmp sgt i32 %val12, %val13
+ %val14 = select i1 %tst7, i32 %val12, i32 %val13
+
+ %val15 = load i32, ptr %b
+ %tst8 = icmp sge i32 %val14, %val15
+ %val16 = select i1 %tst8, i32 %val14, i32 %val15
+
+ %val17 = load i32, ptr %b
+ %tst9 = icmp slt i32 %val16, %val17
+ %val18 = select i1 %tst9, i32 %val16, i32 %val17
+
+ %val19 = load i32, ptr %b
+ %tst10 = icmp sle i32 %val18, %val19
+ %val20 = select i1 %tst10, i32 %val18, i32 %val19
+
+ %val21 = load i32, ptr %b
+ %tst11 = icmp slt i32 %val21, 1
+ %val22 = select i1 %tst11, i32 %val20, i32 %val21
+
+ %val23 = load i32, ptr %b
+ %tst12 = icmp sgt i32 %val21, -1
+ %val24 = select i1 %tst12, i32 %val22, i32 %val23
+
+ %val25 = load i32, ptr %b
+ %tst13 = icmp sgt i32 %val25, 1024
+ %val26 = select i1 %tst13, i32 %val24, i32 %val25
+
+ %val27 = load i32, ptr %b
+ %tst14 = icmp ugt i32 %val21, 2046
+ %val28 = select i1 %tst14, i32 %val26, i32 %val27
+ ret i32 %val28
+}
diff --git a/llvm/test/CodeGen/Xtensa/setcc.ll b/llvm/test/CodeGen/Xtensa/setcc.ll
new file mode 100644
index 0000000000000..a8557b2365a31
--- /dev/null
+++ b/llvm/test/CodeGen/Xtensa/setcc.ll
@@ -0,0 +1,232 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=xtensa-unkonwn-elf -O0 | FileCheck %s
+
+define i32 @f1(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f1:
+; CHECK: addi a8, a1, -16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: beq a2, a3, .LBB0_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp eq i32 %a, %b
+ %res = zext i1 %cond to i32
+ ret i32 %res
+}
+
+define i32 @f2(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f2:
+; CHECK: addi a8, a1, -16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: blt a2, a3, .LBB1_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB1_2:
+; CHECK-NEXT: l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp slt i32 %a, %b
+ %res = zext i1 %cond to i32
+ ret i32 %res
+}
+
+define i32 @f3(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f3:
+; CHECK: addi a8, a1, -16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: bge a3, a2, .LBB2_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB2_2:
+; CHECK-NEXT: l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp sle i32 %a, %b
+ %res = zext i1 %cond to i32
+ ret i32 %res
+}
+
+define i32 @f4(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f4:
+; CHECK: addi a8, a1, -16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: blt a3, a2, .LBB3_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB3_2:
+; CHECK-NEXT: l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp sgt i32 %a, %b
+ %res = zext i1 %cond to i32
+ ret i32 %res
+}
+
+define i32 @f5(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f5:
+; CHECK: addi a8, a1, -16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: bge a2, a3, .LBB4_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB4_2:
+; CHECK-NEXT: l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp sge i32 %a, %b
+ %res = zext i1 %cond to i32
+ ret i32 %res
+}
+
+define i32 @f6(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f6:
+; CHECK: addi a8, a1, -16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: bne a2, a3, .LBB5_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB5_2:
+; CHECK-NEXT: l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp ne i32 %a, %b
+ %res = zext i1 %cond to i32
+ ret i32 %res
+}
+
+define i32 @f7(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f7:
+; CHECK: addi a8, a1, -16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: bltu a2, a3, .LBB6_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB6_2:
+; CHECK-NEXT: l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp ult i32 %a, %b
+ %res = zext i1 %cond to i32
+ ret i32 %res
+}
+
+define i32 @f8(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f8:
+; CHECK: addi a8, a1, -16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: bgeu a3, a2, .LBB7_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB7_2:
+; CHECK-NEXT: l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp ule i32 %a, %b
+ %res = zext i1 %cond to i32
+ ret i32 %res
+}
+
+define i32 @f9(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f9:
+; CHECK: addi a8, a1, -16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: bltu a3, a2, .LBB8_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB8_2:
+; CHECK-NEXT: l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp ugt i32 %a, %b
+ %res = zext i1 %cond to i32
+ ret i32 %res
+}
+
+define i32 @f10(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f10:
+; CHECK: addi a8, a1, -16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: bgeu a2, a3, .LBB9_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB9_2:
+; CHECK-NEXT: l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp uge i32 %a, %b
+ %res = zext i1 %cond to i32
+ ret i32 %res
+}
diff --git a/llvm/utils/UpdateTestChecks/asm.py b/llvm/utils/UpdateTestChecks/asm.py
index f150098eaaeef..3fa67e34e21d7 100644
--- a/llvm/utils/UpdateTestChecks/asm.py
+++ b/llvm/utils/UpdateTestChecks/asm.py
@@ -109,6 +109,13 @@ class string:
flags=(re.M | re.S),
)
+ASM_FUNCTION_XTENSA_RE = re.compile(
+ r'^_?(?P<func>[^.:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n# %bb.0:\n'
+ r'(?P<body>.*?)\n'
+ r'^\.Lfunc_end\d+:\n', # Match the end label
+ flags=(re.M | re.S)
+)
+
ASM_FUNCTION_PPC_RE = re.compile(
r"#[ \-\t]*Begin function (?P<func>[^.:]+)\n"
r".*?"
@@ -579,6 +586,7 @@ def get_run_handler(triple):
"nvptx": (scrub_asm_nvptx, ASM_FUNCTION_NVPTX_RE),
"loongarch32": (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE),
"loongarch64": (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE),
+ "xtensa": (scrub_asm_avr, ASM_FUNCTION_XTENSA_RE),
}
handler = None
best_prefix = ""
>From c42bf96eb6b22037c7d0391f18881962f3446ff9 Mon Sep 17 00:00:00 2001
From: Andrei Safronov <safronov at espressif.com>
Date: Wed, 3 Jul 2024 12:26:16 +0300
Subject: [PATCH 2/4] [Xtensa] Fix setcc test and remove redundant code.
---
llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 4 -
llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 22 --
llvm/test/CodeGen/Xtensa/select-cc.ll | 259 +++++++++++-------
llvm/test/CodeGen/Xtensa/setcc.ll | 1 -
llvm/utils/UpdateTestChecks/asm.py | 8 -
5 files changed, 159 insertions(+), 135 deletions(-)
diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
index 8f19ae1b8e672..c9db99ca54f6f 100644
--- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
+++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
@@ -91,7 +91,6 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BR_CC, MVT::f32, Expand);
// Used by legalize types to correctly generate the setcc result.
- // AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
setOperationPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
setOperationPromotedToType(ISD::BR_CC, MVT::i1, MVT::i32);
@@ -107,9 +106,6 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SETCC, MVT::i32, Custom);
setOperationAction(ISD::SETCC, MVT::i64, Expand);
- // make BRCOND legal, its actually only legal for a subset of conds
- setOperationAction(ISD::BRCOND, MVT::Other, Legal);
-
// Implement custom stack allocations
setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
// Implement custom stack save and restore
diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
index 9e3a35808d0b6..2d8242ad2b5f2 100644
--- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
+++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
@@ -425,28 +425,6 @@ def : Pat<(brcc SETLE, AR:$s, AR:$t, bb:$target),
def : Pat<(brcc SETULE, AR:$s, AR:$t, bb:$target),
(BGEU AR:$t, AR:$s, bb:$target)>;
-def : Pat<(brcond (i32 (seteq AR:$s, AR:$t)), bb:$target),
- (BEQ AR:$s, AR:$t, bb:$target)>;
-def : Pat<(brcond (i32 (setne AR:$s, AR:$t)), bb:$target),
- (BNE AR:$s, AR:$t, bb:$target)>;
-def : Pat<(brcond (i32 (setge AR:$s, AR:$t)), bb:$target),
- (BGE AR:$s, AR:$t, bb:$target)>;
-def : Pat<(brcond (i32 (setle AR:$s, AR:$t)), bb:$target),
- (BLT AR:$s, AR:$t, bb:$target)>;
-def : Pat<(brcond (i32 (setuge AR:$s, AR:$t)), bb:$target),
- (BGEU AR:$s, AR:$t, bb:$target)>;
-def : Pat<(brcond (i32 (setult AR:$s, AR:$t)), bb:$target),
- (BLTU AR:$s, AR:$t, bb:$target)>;
-def : Pat<(brcond (i32 (setgt AR:$s, AR:$t)), bb:$target),
- (BLT AR:$t, AR:$s, bb:$target)>;
-def : Pat<(brcond (i32 (setugt AR:$s, AR:$t)), bb:$target),
- (BLTU AR:$t, AR:$s, bb:$target)>;
-def : Pat<(brcond (i32 (setle AR:$s, AR:$t)), bb:$target),
- (BGE AR:$t, AR:$s, bb:$target)>;
-def : Pat<(brcond (i32 (setule AR:$s, AR:$t)), bb:$target),
- (BGEU AR:$t, AR:$s, bb:$target)>;
-
-def : Pat<(brcond AR:$s, bb:$target), (BNEZ AR:$s, bb:$target)>;
//===----------------------------------------------------------------------===//
// Call and jump instructions
diff --git a/llvm/test/CodeGen/Xtensa/select-cc.ll b/llvm/test/CodeGen/Xtensa/select-cc.ll
index 6073ee7cc2558..3a020ec433233 100644
--- a/llvm/test/CodeGen/Xtensa/select-cc.ll
+++ b/llvm/test/CodeGen/Xtensa/select-cc.ll
@@ -1,125 +1,184 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=xtensa -disable-block-placement -verify-machineinstrs < %s \
; RUN: | FileCheck %s
-define signext i32 @foo(i32 signext %a, ptr %b) nounwind {
-; CHECK-LABEL: foo:
+define signext i32 @f_eq(i32 signext %a, ptr %b) nounwind {
+; CHECK-LABEL: f_eq:
; CHECK: l32i a8, a3, 0
; CHECK-NEXT: beq a2, a8, .LBB0_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: or a2, a8, a8
; CHECK-NEXT: .LBB0_2:
-; CHECK-NEXT: bne a2, a8, .LBB0_4
-; CHECK-NEXT: # %bb.3:
-; CHECK-NEXT: or a2, a8, a8
-; CHECK-NEXT: .LBB0_4:
-; CHECK-NEXT: bltu a8, a2, .LBB0_6
-; CHECK-NEXT: # %bb.5:
-; CHECK-NEXT: or a2, a8, a8
-; CHECK-NEXT: .LBB0_6:
-; CHECK-NEXT: bgeu a2, a8, .LBB0_8
-; CHECK-NEXT: # %bb.7:
-; CHECK-NEXT: or a2, a8, a8
-; CHECK-NEXT: .LBB0_8:
-; CHECK-NEXT: bltu a2, a8, .LBB0_10
-; CHECK-NEXT: # %bb.9:
-; CHECK-NEXT: or a2, a8, a8
-; CHECK-NEXT: .LBB0_10:
-; CHECK-NEXT: bgeu a8, a2, .LBB0_12
-; CHECK-NEXT: # %bb.11:
-; CHECK-NEXT: or a2, a8, a8
-; CHECK-NEXT: .LBB0_12:
-; CHECK-NEXT: blt a8, a2, .LBB0_14
-; CHECK-NEXT: # %bb.13:
-; CHECK-NEXT: or a2, a8, a8
-; CHECK-NEXT: .LBB0_14:
-; CHECK-NEXT: bge a2, a8, .LBB0_16
-; CHECK-NEXT: # %bb.15:
-; CHECK-NEXT: or a2, a8, a8
-; CHECK-NEXT: .LBB0_16:
-; CHECK-NEXT: blt a2, a8, .LBB0_18
-; CHECK-NEXT: # %bb.17:
-; CHECK-NEXT: or a2, a8, a8
-; CHECK-NEXT: .LBB0_18:
-; CHECK-NEXT: bge a8, a2, .LBB0_20
-; CHECK-NEXT: # %bb.19:
-; CHECK-NEXT: or a2, a8, a8
-; CHECK-NEXT: .LBB0_20:
-; CHECK-NEXT: movi a9, 1
-; CHECK-NEXT: blt a8, a9, .LBB0_22
-; CHECK-NEXT: # %bb.21:
-; CHECK-NEXT: or a2, a8, a8
-; CHECK-NEXT: .LBB0_22:
-; CHECK-NEXT: movi a9, -1
-; CHECK-NEXT: blt a9, a8, .LBB0_24
-; CHECK-NEXT: # %bb.23:
-; CHECK-NEXT: or a2, a8, a8
-; CHECK-NEXT: .LBB0_24:
-; CHECK-NEXT: movi a9, 1024
-; CHECK-NEXT: blt a9, a8, .LBB0_26
-; CHECK-NEXT: # %bb.25:
-; CHECK-NEXT: or a2, a8, a8
-; CHECK-NEXT: .LBB0_26:
-; CHECK-NEXT: movi a9, 2046
-; CHECK-NEXT: bltu a9, a8, .LBB0_28
-; CHECK-NEXT: # %bb.27:
-; CHECK-NEXT: or a2, a8, a8
-; CHECK-NEXT: .LBB0_28:
; CHECK-NEXT: ret
%val1 = load i32, ptr %b
%tst1 = icmp eq i32 %a, %val1
%val2 = select i1 %tst1, i32 %a, i32 %val1
+ ret i32 %val2
+}
- %val3 = load i32, ptr %b
- %tst2 = icmp ne i32 %val2, %val3
- %val4 = select i1 %tst2, i32 %val2, i32 %val3
-
- %val5 = load i32, ptr %b
- %tst3 = icmp ugt i32 %val4, %val5
- %val6 = select i1 %tst3, i32 %val4, i32 %val5
+define signext i32 @f_ne(i32 signext %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ne:
+; CHECK: l32i a8, a3, 0
+; CHECK-NEXT: bne a2, a8, .LBB1_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB1_2:
+; CHECK-NEXT: ret
+ %val1 = load i32, ptr %b
+ %tst1 = icmp ne i32 %a, %val1
+ %val2 = select i1 %tst1, i32 %a, i32 %val1
+ ret i32 %val2
+}
- %val7 = load i32, ptr %b
- %tst4 = icmp uge i32 %val6, %val7
- %val8 = select i1 %tst4, i32 %val6, i32 %val7
+define signext i32 @f_ugt(i32 signext %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ugt:
+; CHECK: l32i a8, a3, 0
+; CHECK-NEXT: bltu a8, a2, .LBB2_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB2_2:
+; CHECK-NEXT: ret
+ %val1 = load i32, ptr %b
+ %tst1 = icmp ugt i32 %a, %val1
+ %val2 = select i1 %tst1, i32 %a, i32 %val1
+ ret i32 %val2
+}
- %val9 = load i32, ptr %b
- %tst5 = icmp ult i32 %val8, %val9
- %val10 = select i1 %tst5, i32 %val8, i32 %val9
+define signext i32 @f_uge(i32 signext %a, ptr %b) nounwind {
+; CHECK-LABEL: f_uge:
+; CHECK: l32i a8, a3, 0
+; CHECK-NEXT: bgeu a2, a8, .LBB3_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB3_2:
+; CHECK-NEXT: ret
+ %val1 = load i32, ptr %b
+ %tst1 = icmp uge i32 %a, %val1
+ %val2 = select i1 %tst1, i32 %a, i32 %val1
+ ret i32 %val2
+}
- %val11 = load i32, ptr %b
- %tst6 = icmp ule i32 %val10, %val11
- %val12 = select i1 %tst6, i32 %val10, i32 %val11
+define signext i32 @f_ult(i32 signext %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ult:
+; CHECK: l32i a8, a3, 0
+; CHECK-NEXT: bltu a2, a8, .LBB4_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB4_2:
+; CHECK-NEXT: ret
+ %val1 = load i32, ptr %b
+ %tst1 = icmp ult i32 %a, %val1
+ %val2 = select i1 %tst1, i32 %a, i32 %val1
+ ret i32 %val2
+}
- %val13 = load i32, ptr %b
- %tst7 = icmp sgt i32 %val12, %val13
- %val14 = select i1 %tst7, i32 %val12, i32 %val13
+define signext i32 @f_ule(i32 signext %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ule:
+; CHECK: l32i a8, a3, 0
+; CHECK-NEXT: bgeu a8, a2, .LBB5_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB5_2:
+; CHECK-NEXT: ret
+ %val1 = load i32, ptr %b
+ %tst1 = icmp ule i32 %a, %val1
+ %val2 = select i1 %tst1, i32 %a, i32 %val1
+ ret i32 %val2
+}
- %val15 = load i32, ptr %b
- %tst8 = icmp sge i32 %val14, %val15
- %val16 = select i1 %tst8, i32 %val14, i32 %val15
+define signext i32 @f_sgt(i32 signext %a, ptr %b) nounwind {
+; CHECK-LABEL: f_sgt:
+; CHECK: l32i a8, a3, 0
+; CHECK-NEXT: blt a8, a2, .LBB6_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB6_2:
+; CHECK-NEXT: ret
+ %val1 = load i32, ptr %b
+ %tst1 = icmp sgt i32 %a, %val1
+ %val2 = select i1 %tst1, i32 %a, i32 %val1
+ ret i32 %val2
+}
- %val17 = load i32, ptr %b
- %tst9 = icmp slt i32 %val16, %val17
- %val18 = select i1 %tst9, i32 %val16, i32 %val17
+define signext i32 @f_sge(i32 signext %a, ptr %b) nounwind {
+; CHECK-LABEL: f_sge:
+; CHECK: l32i a8, a3, 0
+; CHECK-NEXT: bge a2, a8, .LBB7_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB7_2:
+; CHECK-NEXT: ret
+ %val1 = load i32, ptr %b
+ %tst1 = icmp sge i32 %a, %val1
+ %val2 = select i1 %tst1, i32 %a, i32 %val1
+ ret i32 %val2
+}
- %val19 = load i32, ptr %b
- %tst10 = icmp sle i32 %val18, %val19
- %val20 = select i1 %tst10, i32 %val18, i32 %val19
+define signext i32 @f_slt(i32 signext %a, ptr %b) nounwind {
+; CHECK-LABEL: f_slt:
+; CHECK: l32i a8, a3, 0
+; CHECK-NEXT: blt a2, a8, .LBB8_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB8_2:
+; CHECK-NEXT: ret
+ %val1 = load i32, ptr %b
+ %tst1 = icmp slt i32 %a, %val1
+ %val2 = select i1 %tst1, i32 %a, i32 %val1
+ ret i32 %val2
+}
- %val21 = load i32, ptr %b
- %tst11 = icmp slt i32 %val21, 1
- %val22 = select i1 %tst11, i32 %val20, i32 %val21
+define signext i32 @f_sle(i32 signext %a, ptr %b) nounwind {
+; CHECK-LABEL: f_sle:
+; CHECK: l32i a8, a3, 0
+; CHECK-NEXT: bge a8, a2, .LBB9_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a2, a8, a8
+; CHECK-NEXT: .LBB9_2:
+; CHECK-NEXT: ret
+ %val1 = load i32, ptr %b
+ %tst1 = icmp sle i32 %a, %val1
+ %val2 = select i1 %tst1, i32 %a, i32 %val1
+ ret i32 %val2
+}
- %val23 = load i32, ptr %b
- %tst12 = icmp sgt i32 %val21, -1
- %val24 = select i1 %tst12, i32 %val22, i32 %val23
+define signext i32 @f_slt_imm(i32 signext %a, ptr %b) nounwind {
+; CHECK-LABEL: f_slt_imm:
+; CHECK: movi a8, 1
+; CHECK-NEXT: blt a2, a8, .LBB10_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a2, a3, 0
+; CHECK-NEXT: .LBB10_2:
+; CHECK-NEXT: ret
+ %val1 = load i32, ptr %b
+ %tst1 = icmp slt i32 %a, 1
+ %val2 = select i1 %tst1, i32 %a, i32 %val1
+ ret i32 %val2
+}
- %val25 = load i32, ptr %b
- %tst13 = icmp sgt i32 %val25, 1024
- %val26 = select i1 %tst13, i32 %val24, i32 %val25
+define signext i32 @f_sgt_imm(i32 signext %a, ptr %b) nounwind {
+; CHECK-LABEL: f_sgt_imm:
+; CHECK: movi a8, -1
+; CHECK-NEXT: blt a8, a2, .LBB11_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a2, a3, 0
+; CHECK-NEXT: .LBB11_2:
+; CHECK-NEXT: ret
+ %val1 = load i32, ptr %b
+ %tst1 = icmp sgt i32 %a, -1
+ %val2 = select i1 %tst1, i32 %a, i32 %val1
+ ret i32 %val2
+}
- %val27 = load i32, ptr %b
- %tst14 = icmp ugt i32 %val21, 2046
- %val28 = select i1 %tst14, i32 %val26, i32 %val27
- ret i32 %val28
+define signext i32 @f_ult_imm(i32 signext %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ult_imm:
+; CHECK: movi a8, 1024
+; CHECK-NEXT: bltu a2, a8, .LBB12_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a2, a3, 0
+; CHECK-NEXT: .LBB12_2:
+; CHECK-NEXT: ret
+ %val1 = load i32, ptr %b
+ %tst1 = icmp ult i32 %a, 1024
+ %val2 = select i1 %tst1, i32 %a, i32 %val1
+ ret i32 %val2
}
diff --git a/llvm/test/CodeGen/Xtensa/setcc.ll b/llvm/test/CodeGen/Xtensa/setcc.ll
index a8557b2365a31..f3dccf0d1bdcc 100644
--- a/llvm/test/CodeGen/Xtensa/setcc.ll
+++ b/llvm/test/CodeGen/Xtensa/setcc.ll
@@ -1,4 +1,3 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=xtensa-unkonwn-elf -O0 | FileCheck %s
define i32 @f1(i32 %a, i32 %b) nounwind {
diff --git a/llvm/utils/UpdateTestChecks/asm.py b/llvm/utils/UpdateTestChecks/asm.py
index 3fa67e34e21d7..f150098eaaeef 100644
--- a/llvm/utils/UpdateTestChecks/asm.py
+++ b/llvm/utils/UpdateTestChecks/asm.py
@@ -109,13 +109,6 @@ class string:
flags=(re.M | re.S),
)
-ASM_FUNCTION_XTENSA_RE = re.compile(
- r'^_?(?P<func>[^.:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n# %bb.0:\n'
- r'(?P<body>.*?)\n'
- r'^\.Lfunc_end\d+:\n', # Match the end label
- flags=(re.M | re.S)
-)
-
ASM_FUNCTION_PPC_RE = re.compile(
r"#[ \-\t]*Begin function (?P<func>[^.:]+)\n"
r".*?"
@@ -586,7 +579,6 @@ def get_run_handler(triple):
"nvptx": (scrub_asm_nvptx, ASM_FUNCTION_NVPTX_RE),
"loongarch32": (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE),
"loongarch64": (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE),
- "xtensa": (scrub_asm_avr, ASM_FUNCTION_XTENSA_RE),
}
handler = None
best_prefix = ""
>From c697e00816b978dbfac6ef679628f8d4b32c0b45 Mon Sep 17 00:00:00 2001
From: Andrei Safronov <safronov at espressif.com>
Date: Sun, 7 Jul 2024 02:54:18 +0300
Subject: [PATCH 3/4] [Xtensa] Minor fixes, remove redundant code.
Removed redundant operations promotions and actions. Also removed setcc
lowering and redundant branch kind cases from GetBranchKind() function.
Implemented additional br_cc tests.
---
llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 43 +-----
llvm/lib/Target/Xtensa/XtensaISelLowering.h | 11 +-
llvm/lib/Target/Xtensa/XtensaOperators.td | 1 -
llvm/test/CodeGen/Xtensa/brcc.ll | 130 ++++++++++++++++--
4 files changed, 127 insertions(+), 58 deletions(-)
diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
index c9db99ca54f6f..901ec133d5922 100644
--- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
+++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
@@ -85,26 +85,13 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM,
// indirect jump.
setOperationAction(ISD::BR_JT, MVT::Other, Custom);
- setOperationPromotedToType(ISD::BR_CC, MVT::i1, MVT::i32);
setOperationAction(ISD::BR_CC, MVT::i32, Legal);
setOperationAction(ISD::BR_CC, MVT::i64, Expand);
setOperationAction(ISD::BR_CC, MVT::f32, Expand);
- // Used by legalize types to correctly generate the setcc result.
- setOperationPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
- setOperationPromotedToType(ISD::BR_CC, MVT::i1, MVT::i32);
-
- setOperationAction(ISD::BR_CC, MVT::i32, Legal);
- setOperationAction(ISD::BR_CC, MVT::i64, Expand);
-
setOperationAction(ISD::SELECT, MVT::i32, Expand);
- setOperationAction(ISD::SELECT, MVT::i64, Expand);
-
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
-
- setOperationAction(ISD::SETCC, MVT::i32, Custom);
- setOperationAction(ISD::SETCC, MVT::i64, Expand);
+ setOperationAction(ISD::SETCC, MVT::i32, Expand);
// Implement custom stack allocations
setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
@@ -546,22 +533,6 @@ SDValue XtensaTargetLowering::LowerSELECT_CC(SDValue Op,
FalseValue, TargetCC);
}
-SDValue XtensaTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT Ty = Op.getOperand(0).getValueType();
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
- SDValue TargetCC = DAG.getConstant(CC, DL, MVT::i32);
-
- // Expand to target SELECT_CC
- SDValue TrueValue = DAG.getConstant(1, DL, Op.getValueType());
- SDValue FalseValue = DAG.getConstant(0, DL, Op.getValueType());
-
- return DAG.getNode(XtensaISD::SELECT_CC, DL, Ty, LHS, RHS, TrueValue,
- FalseValue, TargetCC);
-}
-
SDValue XtensaTargetLowering::LowerImmediate(SDValue Op,
SelectionDAG &DAG) const {
const ConstantSDNode *CN = cast<ConstantSDNode>(Op);
@@ -724,8 +695,6 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op,
return LowerJumpTable(Op, DAG);
case ISD::ConstantPool:
return LowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
- case ISD::SETCC:
- return LowerSETCC(Op, DAG);
case ISD::SELECT_CC:
return LowerSELECT_CC(Op, DAG);
case ISD::STACKSAVE:
@@ -749,8 +718,6 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const {
return "XtensaISD::PCREL_WRAPPER";
case XtensaISD::RET:
return "XtensaISD::RET";
- case XtensaISD::SELECT:
- return "XtensaISD::SELECT";
case XtensaISD::SELECT_CC:
return "XtensaISD::SELECT_CC";
}
@@ -764,26 +731,18 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const {
static int GetBranchKind(int Cond, bool &BrInv) {
switch (Cond) {
case ISD::SETEQ:
- case ISD::SETOEQ:
- case ISD::SETUEQ:
return Xtensa::BEQ;
case ISD::SETNE:
- case ISD::SETONE:
- case ISD::SETUNE:
return Xtensa::BNE;
case ISD::SETLT:
- case ISD::SETOLT:
return Xtensa::BLT;
case ISD::SETLE:
- case ISD::SETOLE:
BrInv = true;
return Xtensa::BGE;
case ISD::SETGT:
- case ISD::SETOGT:
BrInv = true;
return Xtensa::BLT;
case ISD::SETGE:
- case ISD::SETOGE:
return Xtensa::BGE;
case ISD::SETULT:
return Xtensa::BLTU;
diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h
index 8a4491c38db5f..657756dc30950 100644
--- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h
+++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h
@@ -35,11 +35,10 @@ enum {
PCREL_WRAPPER,
RET,
- // Selects between operand 0 and operand 1. Operand 2 is the
- // mask of condition-code values for which operand 0 should be
- // chosen over operand 1; it has the same form as BR_CCMASK.
- // Operand 3 is the flag operand.
- SELECT,
+ // Select with condition operator - This selects between a true value and
+ // a false value (ops #2 and #3) based on the boolean result of comparing
+ // the lhs and rhs (ops #0 and #1) of a conditional expression with the
+ // condition code in op #4
SELECT_CC
};
}
@@ -104,8 +103,6 @@ class XtensaTargetLowering : public TargetLowering {
SDValue LowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const;
- SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
-
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td
index aab2d2d2bbe79..93cd1c933dbde 100644
--- a/llvm/lib/Target/Xtensa/XtensaOperators.td
+++ b/llvm/lib/Target/Xtensa/XtensaOperators.td
@@ -44,6 +44,5 @@ def Xtensa_callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_XtensaCallSeqEnd,
def Xtensa_brjt: SDNode<"XtensaISD::BR_JT", SDT_XtensaBrJT, [SDNPHasChain]>;
-def Xtensa_select : SDNode<"XtensaISD::SELECT", SDTSelect>;
def Xtensa_select_cc: SDNode<"XtensaISD::SELECT_CC", SDT_XtensaSelectCC,
[SDNPInGlue]>;
diff --git a/llvm/test/CodeGen/Xtensa/brcc.ll b/llvm/test/CodeGen/Xtensa/brcc.ll
index 83f6dfd1aebc3..05e59d36993ed 100644
--- a/llvm/test/CodeGen/Xtensa/brcc.ll
+++ b/llvm/test/CodeGen/Xtensa/brcc.ll
@@ -1,10 +1,10 @@
; RUN: llc -march=xtensa < %s | FileCheck %s
-; CHECK-LABEL: brcc1:
+; CHECK-LABEL: brcc_sgt:
; CHECK: bge a3, a2, .LBB0_2
; CHECK: addi a2, a2, 4
; CHECK: .LBB0_2:
-define i32 @brcc1(i32 %a, i32 %b) nounwind {
+define i32 @brcc_sgt(i32 %a, i32 %b) nounwind {
entry:
%wb = icmp sgt i32 %a, %b
br i1 %wb, label %t1, label %t2
@@ -19,11 +19,11 @@ exit:
ret i32 %v
}
-; CHECK-LABEL: brcc2
+; CHECK-LABEL: brcc_ugt
; CHECK: bgeu a3, a2, .LBB1_2
; CHECK: addi a2, a2, 4
; CHECK: .LBB1_2:
-define i32 @brcc2(i32 %a, i32 %b) nounwind {
+define i32 @brcc_ugt(i32 %a, i32 %b) nounwind {
entry:
%wb = icmp ugt i32 %a, %b
br i1 %wb, label %t1, label %t2
@@ -38,11 +38,11 @@ exit:
ret i32 %v
}
-; CHECK-LABEL: brcc3:
+; CHECK-LABEL: brcc_sle:
; CHECK: blt a3, a2, .LBB2_2
; CHECK: addi a2, a2, 4
; CHECK: .LBB2_2:
-define i32 @brcc3(i32 %a, i32 %b) nounwind {
+define i32 @brcc_sle(i32 %a, i32 %b) nounwind {
entry:
%wb = icmp sle i32 %a, %b
br i1 %wb, label %t1, label %t2
@@ -57,11 +57,11 @@ exit:
ret i32 %v
}
-; CHECK-LABEL: brcc4
+; CHECK-LABEL: brcc_ule
; CHECK: bltu a3, a2, .LBB3_2
; CHECK: addi a2, a2, 4
; CHECK: .LBB3_2:
-define i32 @brcc4(i32 %a, i32 %b) nounwind {
+define i32 @brcc_ule(i32 %a, i32 %b) nounwind {
entry:
%wb = icmp ule i32 %a, %b
br i1 %wb, label %t1, label %t2
@@ -75,3 +75,117 @@ exit:
%v = phi i32 [ %t1v, %t1 ], [ %t2v, %t2 ]
ret i32 %v
}
+
+; CHECK-LABEL: brcc_eq:
+; CHECK: bne a2, a3, .LBB4_2
+; CHECK: addi a2, a2, 4
+; CHECK: .LBB4_2:
+define i32 @brcc_eq(i32 %a, i32 %b) nounwind {
+entry:
+ %wb = icmp eq i32 %a, %b
+ br i1 %wb, label %t1, label %t2
+t1:
+ %t1v = add i32 %a, 4
+ br label %exit
+t2:
+ %t2v = add i32 %b, 8
+ br label %exit
+exit:
+ %v = phi i32 [ %t1v, %t1 ], [ %t2v, %t2 ]
+ ret i32 %v
+}
+
+; CHECK-LABEL: brcc_ne:
+; CHECK: beq a2, a3, .LBB5_2
+; CHECK: addi a2, a2, 4
+; CHECK: .LBB5_2:
+define i32 @brcc_ne(i32 %a, i32 %b) nounwind {
+entry:
+ %wb = icmp ne i32 %a, %b
+ br i1 %wb, label %t1, label %t2
+t1:
+ %t1v = add i32 %a, 4
+ br label %exit
+t2:
+ %t2v = add i32 %b, 8
+ br label %exit
+exit:
+ %v = phi i32 [ %t1v, %t1 ], [ %t2v, %t2 ]
+ ret i32 %v
+}
+
+; CHECK-LABEL: brcc_ge:
+; CHECK: blt a2, a3, .LBB6_2
+; CHECK: addi a2, a2, 4
+; CHECK: .LBB6_2:
+define i32 @brcc_ge(i32 %a, i32 %b) nounwind {
+entry:
+ %wb = icmp sge i32 %a, %b
+ br i1 %wb, label %t1, label %t2
+t1:
+ %t1v = add i32 %a, 4
+ br label %exit
+t2:
+ %t2v = add i32 %b, 8
+ br label %exit
+exit:
+ %v = phi i32 [ %t1v, %t1 ], [ %t2v, %t2 ]
+ ret i32 %v
+}
+
+; CHECK-LABEL: brcc_lt:
+; CHECK: bge a2, a3, .LBB7_2
+; CHECK: addi a2, a2, 4
+; CHECK: .LBB7_2:
+define i32 @brcc_lt(i32 %a, i32 %b) nounwind {
+entry:
+ %wb = icmp slt i32 %a, %b
+ br i1 %wb, label %t1, label %t2
+t1:
+ %t1v = add i32 %a, 4
+ br label %exit
+t2:
+ %t2v = add i32 %b, 8
+ br label %exit
+exit:
+ %v = phi i32 [ %t1v, %t1 ], [ %t2v, %t2 ]
+ ret i32 %v
+}
+
+; CHECK-LABEL: brcc_uge:
+; CHECK: bltu a2, a3, .LBB8_2
+; CHECK: addi a2, a2, 4
+; CHECK: .LBB8_2:
+define i32 @brcc_uge(i32 %a, i32 %b) nounwind {
+entry:
+ %wb = icmp uge i32 %a, %b
+ br i1 %wb, label %t1, label %t2
+t1:
+ %t1v = add i32 %a, 4
+ br label %exit
+t2:
+ %t2v = add i32 %b, 8
+ br label %exit
+exit:
+ %v = phi i32 [ %t1v, %t1 ], [ %t2v, %t2 ]
+ ret i32 %v
+}
+
+; CHECK-LABEL: brcc_ult:
+; CHECK: bgeu a2, a3, .LBB9_2
+; CHECK: addi a2, a2, 4
+; CHECK: .LBB9_2:
+define i32 @brcc_ult(i32 %a, i32 %b) nounwind {
+entry:
+ %wb = icmp ult i32 %a, %b
+ br i1 %wb, label %t1, label %t2
+t1:
+ %t1v = add i32 %a, 4
+ br label %exit
+t2:
+ %t2v = add i32 %b, 8
+ br label %exit
+exit:
+ %v = phi i32 [ %t1v, %t1 ], [ %t2v, %t2 ]
+ ret i32 %v
+}
>From ac634c8b08adef1da8114821fbb7ad8a70e994a2 Mon Sep 17 00:00:00 2001
From: Andrei Safronov <safronov at espressif.com>
Date: Tue, 9 Jul 2024 14:50:34 +0300
Subject: [PATCH 4/4] [Xtensa] Add tests for SELECTCC/SETCC with i64 operands.
Implemented new tests for SELECTCC/SETCC with i64 operands. Added
minor code fixes.
---
llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 98 ++--
llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 2 +-
llvm/test/CodeGen/Xtensa/select-cc.ll | 346 +++++++++++-
llvm/test/CodeGen/Xtensa/setcc.ll | 514 +++++++++++++++++-
4 files changed, 877 insertions(+), 83 deletions(-)
diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
index 901ec133d5922..8bf1e12614296 100644
--- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
+++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp
@@ -517,6 +517,38 @@ XtensaTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
return DAG.getNode(XtensaISD::RET, DL, MVT::Other, RetOps);
}
+static unsigned getBranchOpcode(ISD::CondCode Cond, bool &BrInv) {
+ BrInv = false;
+ switch (Cond) {
+ case ISD::SETEQ:
+ return Xtensa::BEQ;
+ case ISD::SETNE:
+ return Xtensa::BNE;
+ case ISD::SETLT:
+ return Xtensa::BLT;
+ case ISD::SETLE:
+ BrInv = true;
+ return Xtensa::BGE;
+ case ISD::SETGT:
+ BrInv = true;
+ return Xtensa::BLT;
+ case ISD::SETGE:
+ return Xtensa::BGE;
+ case ISD::SETULT:
+ return Xtensa::BLTU;
+ case ISD::SETULE:
+ BrInv = true;
+ return Xtensa::BGEU;
+ case ISD::SETUGT:
+ BrInv = true;
+ return Xtensa::BLTU;
+ case ISD::SETUGE:
+ return Xtensa::BGEU;
+ default:
+ llvm_unreachable("Unknown branch kind");
+ }
+}
+
SDValue XtensaTargetLowering::LowerSELECT_CC(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
@@ -526,11 +558,19 @@ SDValue XtensaTargetLowering::LowerSELECT_CC(SDValue Op,
SDValue TrueValue = Op.getOperand(2);
SDValue FalseValue = Op.getOperand(3);
ISD::CondCode CC = cast<CondCodeSDNode>(Op->getOperand(4))->get();
- SDValue TargetCC = DAG.getConstant(CC, DL, MVT::i32);
+
+ bool BrInv;
+ unsigned BrKind = getBranchOpcode(CC, BrInv);
+ SDValue TargetCC = DAG.getConstant(BrKind, DL, MVT::i32);
// Wrap select nodes
- return DAG.getNode(XtensaISD::SELECT_CC, DL, Ty, LHS, RHS, TrueValue,
- FalseValue, TargetCC);
+ if (BrInv) {
+ return DAG.getNode(XtensaISD::SELECT_CC, DL, Ty, RHS, LHS, TrueValue,
+ FalseValue, TargetCC);
+ } else {
+ return DAG.getNode(XtensaISD::SELECT_CC, DL, Ty, LHS, RHS, TrueValue,
+ FalseValue, TargetCC);
+ }
}
SDValue XtensaTargetLowering::LowerImmediate(SDValue Op,
@@ -728,37 +768,6 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const {
// Custom insertion
//===----------------------------------------------------------------------===//
-static int GetBranchKind(int Cond, bool &BrInv) {
- switch (Cond) {
- case ISD::SETEQ:
- return Xtensa::BEQ;
- case ISD::SETNE:
- return Xtensa::BNE;
- case ISD::SETLT:
- return Xtensa::BLT;
- case ISD::SETLE:
- BrInv = true;
- return Xtensa::BGE;
- case ISD::SETGT:
- BrInv = true;
- return Xtensa::BLT;
- case ISD::SETGE:
- return Xtensa::BGE;
- case ISD::SETULT:
- return Xtensa::BLTU;
- case ISD::SETULE:
- BrInv = true;
- return Xtensa::BGEU;
- case ISD::SETUGT:
- BrInv = true;
- return Xtensa::BLTU;
- case ISD::SETUGE:
- return Xtensa::BGEU;
- default:
- return -1;
- }
-}
-
MachineBasicBlock *
XtensaTargetLowering::emitSelectCC(MachineInstr &MI,
MachineBasicBlock *MBB) const {
@@ -769,7 +778,7 @@ XtensaTargetLowering::emitSelectCC(MachineInstr &MI,
MachineOperand &RHS = MI.getOperand(2);
MachineOperand &TrueValue = MI.getOperand(3);
MachineOperand &FalseValue = MI.getOperand(4);
- MachineOperand &Cond = MI.getOperand(5);
+ unsigned BrKind = MI.getOperand(5).getImm();
// To "insert" a SELECT_CC instruction, we actually have to insert
// CopyMBB and SinkMBB blocks and add branch to MBB. We build phi
@@ -801,19 +810,10 @@ XtensaTargetLowering::emitSelectCC(MachineInstr &MI,
MBB->addSuccessor(CopyMBB);
MBB->addSuccessor(SinkMBB);
- bool BrInv = false;
- int BrKind = GetBranchKind(Cond.getImm(), BrInv);
- if (BrInv) {
- BuildMI(MBB, DL, TII.get(BrKind))
- .addReg(RHS.getReg())
- .addReg(LHS.getReg())
- .addMBB(SinkMBB);
- } else {
- BuildMI(MBB, DL, TII.get(BrKind))
- .addReg(LHS.getReg())
- .addReg(RHS.getReg())
- .addMBB(SinkMBB);
- }
+ BuildMI(MBB, DL, TII.get(BrKind))
+ .addReg(LHS.getReg())
+ .addReg(RHS.getReg())
+ .addMBB(SinkMBB);
CopyMBB->addSuccessor(SinkMBB);
@@ -838,6 +838,6 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter(
case Xtensa::SELECT:
return emitSelectCC(MI, MBB);
default:
- report_fatal_error("Unexpected instr type to insert");
+ llvm_unreachable("Unexpected instr type to insert");
}
}
diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
index 2d8242ad2b5f2..704f30ceba75f 100644
--- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
+++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td
@@ -582,5 +582,5 @@ let Defs = [SP], Uses = [SP] in {
let usesCustomInserter = 1 in {
def SELECT : Pseudo<(outs AR:$dst), (ins AR:$lhs, AR:$rhs, AR:$t, AR:$f, i32imm:$cond),
"!select $dst, $lhs, $rhs, $t, $f, $cond",
- [(set AR:$dst, (Xtensa_select_cc AR:$lhs, AR:$rhs, AR:$t, AR:$f, imm:$cond))]>;
+ [(set i32:$dst, (Xtensa_select_cc i32:$lhs, i32:$rhs, i32:$t, i32:$f, imm:$cond))]>;
}
diff --git a/llvm/test/CodeGen/Xtensa/select-cc.ll b/llvm/test/CodeGen/Xtensa/select-cc.ll
index 3a020ec433233..82a7df9934653 100644
--- a/llvm/test/CodeGen/Xtensa/select-cc.ll
+++ b/llvm/test/CodeGen/Xtensa/select-cc.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=xtensa -disable-block-placement -verify-machineinstrs < %s \
; RUN: | FileCheck %s
-define signext i32 @f_eq(i32 signext %a, ptr %b) nounwind {
+define i32 @f_eq(i32 %a, ptr %b) nounwind {
; CHECK-LABEL: f_eq:
; CHECK: l32i a8, a3, 0
; CHECK-NEXT: beq a2, a8, .LBB0_2
@@ -15,7 +15,7 @@ define signext i32 @f_eq(i32 signext %a, ptr %b) nounwind {
ret i32 %val2
}
-define signext i32 @f_ne(i32 signext %a, ptr %b) nounwind {
+define i32 @f_ne(i32 %a, ptr %b) nounwind {
; CHECK-LABEL: f_ne:
; CHECK: l32i a8, a3, 0
; CHECK-NEXT: bne a2, a8, .LBB1_2
@@ -29,7 +29,7 @@ define signext i32 @f_ne(i32 signext %a, ptr %b) nounwind {
ret i32 %val2
}
-define signext i32 @f_ugt(i32 signext %a, ptr %b) nounwind {
+define i32 @f_ugt(i32 %a, ptr %b) nounwind {
; CHECK-LABEL: f_ugt:
; CHECK: l32i a8, a3, 0
; CHECK-NEXT: bltu a8, a2, .LBB2_2
@@ -43,7 +43,7 @@ define signext i32 @f_ugt(i32 signext %a, ptr %b) nounwind {
ret i32 %val2
}
-define signext i32 @f_uge(i32 signext %a, ptr %b) nounwind {
+define i32 @f_uge(i32 %a, ptr %b) nounwind {
; CHECK-LABEL: f_uge:
; CHECK: l32i a8, a3, 0
; CHECK-NEXT: bgeu a2, a8, .LBB3_2
@@ -57,7 +57,7 @@ define signext i32 @f_uge(i32 signext %a, ptr %b) nounwind {
ret i32 %val2
}
-define signext i32 @f_ult(i32 signext %a, ptr %b) nounwind {
+define i32 @f_ult(i32 %a, ptr %b) nounwind {
; CHECK-LABEL: f_ult:
; CHECK: l32i a8, a3, 0
; CHECK-NEXT: bltu a2, a8, .LBB4_2
@@ -71,7 +71,7 @@ define signext i32 @f_ult(i32 signext %a, ptr %b) nounwind {
ret i32 %val2
}
-define signext i32 @f_ule(i32 signext %a, ptr %b) nounwind {
+define i32 @f_ule(i32 %a, ptr %b) nounwind {
; CHECK-LABEL: f_ule:
; CHECK: l32i a8, a3, 0
; CHECK-NEXT: bgeu a8, a2, .LBB5_2
@@ -85,7 +85,7 @@ define signext i32 @f_ule(i32 signext %a, ptr %b) nounwind {
ret i32 %val2
}
-define signext i32 @f_sgt(i32 signext %a, ptr %b) nounwind {
+define i32 @f_sgt(i32 %a, ptr %b) nounwind {
; CHECK-LABEL: f_sgt:
; CHECK: l32i a8, a3, 0
; CHECK-NEXT: blt a8, a2, .LBB6_2
@@ -99,7 +99,7 @@ define signext i32 @f_sgt(i32 signext %a, ptr %b) nounwind {
ret i32 %val2
}
-define signext i32 @f_sge(i32 signext %a, ptr %b) nounwind {
+define i32 @f_sge(i32 %a, ptr %b) nounwind {
; CHECK-LABEL: f_sge:
; CHECK: l32i a8, a3, 0
; CHECK-NEXT: bge a2, a8, .LBB7_2
@@ -113,7 +113,7 @@ define signext i32 @f_sge(i32 signext %a, ptr %b) nounwind {
ret i32 %val2
}
-define signext i32 @f_slt(i32 signext %a, ptr %b) nounwind {
+define i32 @f_slt(i32 %a, ptr %b) nounwind {
; CHECK-LABEL: f_slt:
; CHECK: l32i a8, a3, 0
; CHECK-NEXT: blt a2, a8, .LBB8_2
@@ -127,7 +127,7 @@ define signext i32 @f_slt(i32 signext %a, ptr %b) nounwind {
ret i32 %val2
}
-define signext i32 @f_sle(i32 signext %a, ptr %b) nounwind {
+define i32 @f_sle(i32 %a, ptr %b) nounwind {
; CHECK-LABEL: f_sle:
; CHECK: l32i a8, a3, 0
; CHECK-NEXT: bge a8, a2, .LBB9_2
@@ -141,7 +141,7 @@ define signext i32 @f_sle(i32 signext %a, ptr %b) nounwind {
ret i32 %val2
}
-define signext i32 @f_slt_imm(i32 signext %a, ptr %b) nounwind {
+define i32 @f_slt_imm(i32 %a, ptr %b) nounwind {
; CHECK-LABEL: f_slt_imm:
; CHECK: movi a8, 1
; CHECK-NEXT: blt a2, a8, .LBB10_2
@@ -155,7 +155,7 @@ define signext i32 @f_slt_imm(i32 signext %a, ptr %b) nounwind {
ret i32 %val2
}
-define signext i32 @f_sgt_imm(i32 signext %a, ptr %b) nounwind {
+define i32 @f_sgt_imm(i32 %a, ptr %b) nounwind {
; CHECK-LABEL: f_sgt_imm:
; CHECK: movi a8, -1
; CHECK-NEXT: blt a8, a2, .LBB11_2
@@ -169,7 +169,7 @@ define signext i32 @f_sgt_imm(i32 signext %a, ptr %b) nounwind {
ret i32 %val2
}
-define signext i32 @f_ult_imm(i32 signext %a, ptr %b) nounwind {
+define i32 @f_ult_imm(i32 %a, ptr %b) nounwind {
; CHECK-LABEL: f_ult_imm:
; CHECK: movi a8, 1024
; CHECK-NEXT: bltu a2, a8, .LBB12_2
@@ -182,3 +182,323 @@ define signext i32 @f_ult_imm(i32 signext %a, ptr %b) nounwind {
%val2 = select i1 %tst1, i32 %a, i32 %val1
ret i32 %val2
}
+
+; Tests for i64 operands
+
+define i64 @f_eq_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_eq_i64:
+; CHECK: l32i a8, a4, 4
+; CHECK-NEXT: xor a9, a3, a8
+; CHECK-NEXT: l32i a11, a4, 0
+; CHECK-NEXT: xor a10, a2, a11
+; CHECK-NEXT: or a9, a10, a9
+; CHECK-NEXT: movi a10, 0
+; CHECK-NEXT: beq a9, a10, .LBB13_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a2, a11, a11
+; CHECK-NEXT: .LBB13_2:
+; CHECK-NEXT: beq a9, a10, .LBB13_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: or a3, a8, a8
+; CHECK-NEXT: .LBB13_4:
+; CHECK-NEXT: ret
+ %val1 = load i64, ptr %b
+ %tst1 = icmp eq i64 %a, %val1
+ %val2 = select i1 %tst1, i64 %a, i64 %val1
+ ret i64 %val2
+}
+
+define i64 @f_ne_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ne_i64:
+; CHECK: l32i a8, a4, 4
+; CHECK-NEXT: xor a9, a3, a8
+; CHECK-NEXT: l32i a11, a4, 0
+; CHECK-NEXT: xor a10, a2, a11
+; CHECK-NEXT: or a9, a10, a9
+; CHECK-NEXT: movi a10, 0
+; CHECK-NEXT: bne a9, a10, .LBB14_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a2, a11, a11
+; CHECK-NEXT: .LBB14_2:
+; CHECK-NEXT: bne a9, a10, .LBB14_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: or a3, a8, a8
+; CHECK-NEXT: .LBB14_4:
+; CHECK-NEXT: ret
+ %val1 = load i64, ptr %b
+ %tst1 = icmp ne i64 %a, %val1
+ %val2 = select i1 %tst1, i64 %a, i64 %val1
+ ret i64 %val2
+}
+
+define i64 @f_ugt_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ugt_i64:
+; CHECK: l32i a8, a4, 4
+; CHECK-NEXT: movi a9, 0
+; CHECK-NEXT: movi a10, 1
+; CHECK-NEXT: or a7, a10, a10
+; CHECK-NEXT: bltu a8, a3, .LBB15_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a7, a9, a9
+; CHECK-NEXT: .LBB15_2:
+; CHECK-NEXT: l32i a11, a4, 0
+; CHECK-NEXT: bltu a11, a2, .LBB15_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: or a10, a9, a9
+; CHECK-NEXT: .LBB15_4:
+; CHECK-NEXT: beq a3, a8, .LBB15_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: or a10, a7, a7
+; CHECK-NEXT: .LBB15_6:
+; CHECK-NEXT: bne a10, a9, .LBB15_8
+; CHECK-NEXT: # %bb.7:
+; CHECK-NEXT: or a2, a11, a11
+; CHECK-NEXT: .LBB15_8:
+; CHECK-NEXT: bne a10, a9, .LBB15_10
+; CHECK-NEXT: # %bb.9:
+; CHECK-NEXT: or a3, a8, a8
+; CHECK-NEXT: .LBB15_10:
+; CHECK-NEXT: ret
+ %val1 = load i64, ptr %b
+ %tst1 = icmp ugt i64 %a, %val1
+ %val2 = select i1 %tst1, i64 %a, i64 %val1
+ ret i64 %val2
+}
+
+define i64 @f_uge_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_uge_i64:
+; CHECK: l32i a8, a4, 4
+; CHECK-NEXT: movi a9, 0
+; CHECK-NEXT: movi a10, 1
+; CHECK-NEXT: or a7, a10, a10
+; CHECK-NEXT: bgeu a3, a8, .LBB16_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a7, a9, a9
+; CHECK-NEXT: .LBB16_2:
+; CHECK-NEXT: l32i a11, a4, 0
+; CHECK-NEXT: bgeu a2, a11, .LBB16_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: or a10, a9, a9
+; CHECK-NEXT: .LBB16_4:
+; CHECK-NEXT: beq a3, a8, .LBB16_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: or a10, a7, a7
+; CHECK-NEXT: .LBB16_6:
+; CHECK-NEXT: bne a10, a9, .LBB16_8
+; CHECK-NEXT: # %bb.7:
+; CHECK-NEXT: or a2, a11, a11
+; CHECK-NEXT: .LBB16_8:
+; CHECK-NEXT: bne a10, a9, .LBB16_10
+; CHECK-NEXT: # %bb.9:
+; CHECK-NEXT: or a3, a8, a8
+; CHECK-NEXT: .LBB16_10:
+; CHECK-NEXT: ret
+ %val1 = load i64, ptr %b
+ %tst1 = icmp uge i64 %a, %val1
+ %val2 = select i1 %tst1, i64 %a, i64 %val1
+ ret i64 %val2
+}
+
+define i64 @f_ult_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ult_i64:
+; CHECK: l32i a8, a4, 4
+; CHECK-NEXT: movi a9, 0
+; CHECK-NEXT: movi a10, 1
+; CHECK-NEXT: or a7, a10, a10
+; CHECK-NEXT: bltu a3, a8, .LBB17_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a7, a9, a9
+; CHECK-NEXT: .LBB17_2:
+; CHECK-NEXT: l32i a11, a4, 0
+; CHECK-NEXT: bltu a2, a11, .LBB17_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: or a10, a9, a9
+; CHECK-NEXT: .LBB17_4:
+; CHECK-NEXT: beq a3, a8, .LBB17_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: or a10, a7, a7
+; CHECK-NEXT: .LBB17_6:
+; CHECK-NEXT: bne a10, a9, .LBB17_8
+; CHECK-NEXT: # %bb.7:
+; CHECK-NEXT: or a2, a11, a11
+; CHECK-NEXT: .LBB17_8:
+; CHECK-NEXT: bne a10, a9, .LBB17_10
+; CHECK-NEXT: # %bb.9:
+; CHECK-NEXT: or a3, a8, a8
+; CHECK-NEXT: .LBB17_10:
+; CHECK-NEXT: ret
+ %val1 = load i64, ptr %b
+ %tst1 = icmp ult i64 %a, %val1
+ %val2 = select i1 %tst1, i64 %a, i64 %val1
+ ret i64 %val2
+}
+
+define i64 @f_ule_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_ule_i64:
+; CHECK: l32i a8, a4, 4
+; CHECK-NEXT: movi a9, 0
+; CHECK-NEXT: movi a10, 1
+; CHECK-NEXT: or a7, a10, a10
+; CHECK-NEXT: bgeu a8, a3, .LBB18_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a7, a9, a9
+; CHECK-NEXT: .LBB18_2:
+; CHECK-NEXT: l32i a11, a4, 0
+; CHECK-NEXT: bgeu a11, a2, .LBB18_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: or a10, a9, a9
+; CHECK-NEXT: .LBB18_4:
+; CHECK-NEXT: beq a3, a8, .LBB18_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: or a10, a7, a7
+; CHECK-NEXT: .LBB18_6:
+; CHECK-NEXT: bne a10, a9, .LBB18_8
+; CHECK-NEXT: # %bb.7:
+; CHECK-NEXT: or a2, a11, a11
+; CHECK-NEXT: .LBB18_8:
+; CHECK-NEXT: bne a10, a9, .LBB18_10
+; CHECK-NEXT: # %bb.9:
+; CHECK-NEXT: or a3, a8, a8
+; CHECK-NEXT: .LBB18_10:
+; CHECK-NEXT: ret
+ %val1 = load i64, ptr %b
+ %tst1 = icmp ule i64 %a, %val1
+ %val2 = select i1 %tst1, i64 %a, i64 %val1
+ ret i64 %val2
+}
+
+define i64 @f_sgt_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_sgt_i64:
+; CHECK: l32i a8, a4, 4
+; CHECK-NEXT: movi a9, 0
+; CHECK-NEXT: movi a10, 1
+; CHECK-NEXT: or a7, a10, a10
+; CHECK-NEXT: blt a8, a3, .LBB19_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a7, a9, a9
+; CHECK-NEXT: .LBB19_2:
+; CHECK-NEXT: l32i a11, a4, 0
+; CHECK-NEXT: bltu a11, a2, .LBB19_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: or a10, a9, a9
+; CHECK-NEXT: .LBB19_4:
+; CHECK-NEXT: beq a3, a8, .LBB19_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: or a10, a7, a7
+; CHECK-NEXT: .LBB19_6:
+; CHECK-NEXT: bne a10, a9, .LBB19_8
+; CHECK-NEXT: # %bb.7:
+; CHECK-NEXT: or a2, a11, a11
+; CHECK-NEXT: .LBB19_8:
+; CHECK-NEXT: bne a10, a9, .LBB19_10
+; CHECK-NEXT: # %bb.9:
+; CHECK-NEXT: or a3, a8, a8
+; CHECK-NEXT: .LBB19_10:
+; CHECK-NEXT: ret
+ %val1 = load i64, ptr %b
+ %tst1 = icmp sgt i64 %a, %val1
+ %val2 = select i1 %tst1, i64 %a, i64 %val1
+ ret i64 %val2
+}
+
+define i64 @f_sge_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_sge_i64:
+; CHECK: l32i a8, a4, 4
+; CHECK-NEXT: movi a9, 0
+; CHECK-NEXT: movi a10, 1
+; CHECK-NEXT: or a7, a10, a10
+; CHECK-NEXT: bge a3, a8, .LBB20_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a7, a9, a9
+; CHECK-NEXT: .LBB20_2:
+; CHECK-NEXT: l32i a11, a4, 0
+; CHECK-NEXT: bgeu a2, a11, .LBB20_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: or a10, a9, a9
+; CHECK-NEXT: .LBB20_4:
+; CHECK-NEXT: beq a3, a8, .LBB20_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: or a10, a7, a7
+; CHECK-NEXT: .LBB20_6:
+; CHECK-NEXT: bne a10, a9, .LBB20_8
+; CHECK-NEXT: # %bb.7:
+; CHECK-NEXT: or a2, a11, a11
+; CHECK-NEXT: .LBB20_8:
+; CHECK-NEXT: bne a10, a9, .LBB20_10
+; CHECK-NEXT: # %bb.9:
+; CHECK-NEXT: or a3, a8, a8
+; CHECK-NEXT: .LBB20_10:
+; CHECK-NEXT: ret
+ %val1 = load i64, ptr %b
+ %tst1 = icmp sge i64 %a, %val1
+ %val2 = select i1 %tst1, i64 %a, i64 %val1
+ ret i64 %val2
+}
+
+define i64 @f_slt_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_slt_i64:
+; CHECK: l32i a8, a4, 4
+; CHECK-NEXT: movi a9, 0
+; CHECK-NEXT: movi a10, 1
+; CHECK-NEXT: or a7, a10, a10
+; CHECK-NEXT: blt a3, a8, .LBB21_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a7, a9, a9
+; CHECK-NEXT: .LBB21_2:
+; CHECK-NEXT: l32i a11, a4, 0
+; CHECK-NEXT: bltu a2, a11, .LBB21_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: or a10, a9, a9
+; CHECK-NEXT: .LBB21_4:
+; CHECK-NEXT: beq a3, a8, .LBB21_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: or a10, a7, a7
+; CHECK-NEXT: .LBB21_6:
+; CHECK-NEXT: bne a10, a9, .LBB21_8
+; CHECK-NEXT: # %bb.7:
+; CHECK-NEXT: or a2, a11, a11
+; CHECK-NEXT: .LBB21_8:
+; CHECK-NEXT: bne a10, a9, .LBB21_10
+; CHECK-NEXT: # %bb.9:
+; CHECK-NEXT: or a3, a8, a8
+; CHECK-NEXT: .LBB21_10:
+; CHECK-NEXT: ret
+ %val1 = load i64, ptr %b
+ %tst1 = icmp slt i64 %a, %val1
+ %val2 = select i1 %tst1, i64 %a, i64 %val1
+ ret i64 %val2
+}
+
+define i64 @f_sle_i64(i64 %a, ptr %b) nounwind {
+; CHECK-LABEL: f_sle_i64:
+; CHECK: l32i a8, a4, 4
+; CHECK-NEXT: movi a9, 0
+; CHECK-NEXT: movi a10, 1
+; CHECK-NEXT: or a7, a10, a10
+; CHECK-NEXT: bge a8, a3, .LBB22_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: or a7, a9, a9
+; CHECK-NEXT: .LBB22_2:
+; CHECK-NEXT: l32i a11, a4, 0
+; CHECK-NEXT: bgeu a11, a2, .LBB22_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: or a10, a9, a9
+; CHECK-NEXT: .LBB22_4:
+; CHECK-NEXT: beq a3, a8, .LBB22_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: or a10, a7, a7
+; CHECK-NEXT: .LBB22_6:
+; CHECK-NEXT: bne a10, a9, .LBB22_8
+; CHECK-NEXT: # %bb.7:
+; CHECK-NEXT: or a2, a11, a11
+; CHECK-NEXT: .LBB22_8:
+; CHECK-NEXT: bne a10, a9, .LBB22_10
+; CHECK-NEXT: # %bb.9:
+; CHECK-NEXT: or a3, a8, a8
+; CHECK-NEXT: .LBB22_10:
+; CHECK-NEXT: ret
+ %val1 = load i64, ptr %b
+ %tst1 = icmp sle i64 %a, %val1
+ %val2 = select i1 %tst1, i64 %a, i64 %val1
+ ret i64 %val2
+}
diff --git a/llvm/test/CodeGen/Xtensa/setcc.ll b/llvm/test/CodeGen/Xtensa/setcc.ll
index f3dccf0d1bdcc..50382356c50e5 100644
--- a/llvm/test/CodeGen/Xtensa/setcc.ll
+++ b/llvm/test/CodeGen/Xtensa/setcc.ll
@@ -1,7 +1,8 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=xtensa-unkonwn-elf -O0 | FileCheck %s
-define i32 @f1(i32 %a, i32 %b) nounwind {
-; CHECK-LABEL: f1:
+define i32 @f_eq(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_eq:
; CHECK: addi a8, a1, -16
; CHECK-NEXT: or a1, a8, a8
; CHECK-NEXT: movi a8, 0
@@ -23,8 +24,8 @@ define i32 @f1(i32 %a, i32 %b) nounwind {
ret i32 %res
}
-define i32 @f2(i32 %a, i32 %b) nounwind {
-; CHECK-LABEL: f2:
+define i32 @f_slt(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_slt:
; CHECK: addi a8, a1, -16
; CHECK-NEXT: or a1, a8, a8
; CHECK-NEXT: movi a8, 0
@@ -46,8 +47,8 @@ define i32 @f2(i32 %a, i32 %b) nounwind {
ret i32 %res
}
-define i32 @f3(i32 %a, i32 %b) nounwind {
-; CHECK-LABEL: f3:
+define i32 @f_sle(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_sle:
; CHECK: addi a8, a1, -16
; CHECK-NEXT: or a1, a8, a8
; CHECK-NEXT: movi a8, 0
@@ -69,8 +70,8 @@ define i32 @f3(i32 %a, i32 %b) nounwind {
ret i32 %res
}
-define i32 @f4(i32 %a, i32 %b) nounwind {
-; CHECK-LABEL: f4:
+define i32 @f_sgt(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_sgt:
; CHECK: addi a8, a1, -16
; CHECK-NEXT: or a1, a8, a8
; CHECK-NEXT: movi a8, 0
@@ -92,8 +93,8 @@ define i32 @f4(i32 %a, i32 %b) nounwind {
ret i32 %res
}
-define i32 @f5(i32 %a, i32 %b) nounwind {
-; CHECK-LABEL: f5:
+define i32 @f_sge(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_sge:
; CHECK: addi a8, a1, -16
; CHECK-NEXT: or a1, a8, a8
; CHECK-NEXT: movi a8, 0
@@ -115,8 +116,8 @@ define i32 @f5(i32 %a, i32 %b) nounwind {
ret i32 %res
}
-define i32 @f6(i32 %a, i32 %b) nounwind {
-; CHECK-LABEL: f6:
+define i32 @f_ne(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_ne:
; CHECK: addi a8, a1, -16
; CHECK-NEXT: or a1, a8, a8
; CHECK-NEXT: movi a8, 0
@@ -138,8 +139,8 @@ define i32 @f6(i32 %a, i32 %b) nounwind {
ret i32 %res
}
-define i32 @f7(i32 %a, i32 %b) nounwind {
-; CHECK-LABEL: f7:
+define i32 @f_ult(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_ult:
; CHECK: addi a8, a1, -16
; CHECK-NEXT: or a1, a8, a8
; CHECK-NEXT: movi a8, 0
@@ -161,8 +162,8 @@ define i32 @f7(i32 %a, i32 %b) nounwind {
ret i32 %res
}
-define i32 @f8(i32 %a, i32 %b) nounwind {
-; CHECK-LABEL: f8:
+define i32 @f_ule(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_ule:
; CHECK: addi a8, a1, -16
; CHECK-NEXT: or a1, a8, a8
; CHECK-NEXT: movi a8, 0
@@ -184,8 +185,8 @@ define i32 @f8(i32 %a, i32 %b) nounwind {
ret i32 %res
}
-define i32 @f9(i32 %a, i32 %b) nounwind {
-; CHECK-LABEL: f9:
+define i32 @f_ugt(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_ugt:
; CHECK: addi a8, a1, -16
; CHECK-NEXT: or a1, a8, a8
; CHECK-NEXT: movi a8, 0
@@ -207,8 +208,8 @@ define i32 @f9(i32 %a, i32 %b) nounwind {
ret i32 %res
}
-define i32 @f10(i32 %a, i32 %b) nounwind {
-; CHECK-LABEL: f10:
+define i32 @f_uge(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: f_uge:
; CHECK: addi a8, a1, -16
; CHECK-NEXT: or a1, a8, a8
; CHECK-NEXT: movi a8, 0
@@ -229,3 +230,476 @@ define i32 @f10(i32 %a, i32 %b) nounwind {
%res = zext i1 %cond to i32
ret i32 %res
}
+
+
+; Tests for i64 operands
+
+define i64 @f_eq_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_eq_i64:
+; CHECK: addi a8, a1, -16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: # kill: def $a8 killed $a5
+; CHECK-NEXT: # kill: def $a8 killed $a4
+; CHECK-NEXT: # kill: def $a8 killed $a3
+; CHECK-NEXT: # kill: def $a8 killed $a2
+; CHECK-NEXT: xor a9, a3, a5
+; CHECK-NEXT: xor a8, a2, a4
+; CHECK-NEXT: or a8, a8, a9
+; CHECK-NEXT: movi a10, 1
+; CHECK-NEXT: movi a9, 0
+; CHECK-NEXT: s32i a9, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a10, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: beq a8, a9, .LBB10_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB10_2:
+; CHECK-NEXT: l32i a3, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp eq i64 %a, %b
+ %res = zext i1 %cond to i64
+ ret i64 %res
+}
+
+define i64 @f_slt_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_slt_i64:
+; CHECK: addi a8, a1, -48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT: # kill: def $a8 killed $a5
+; CHECK-NEXT: # kill: def $a8 killed $a3
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: blt a3, a5, .LBB11_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB11_2:
+; CHECK-NEXT: l32i a8, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: bltu a8, a9, .LBB11_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB11_4:
+; CHECK-NEXT: l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: beq a8, a9, .LBB11_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB11_6:
+; CHECK-NEXT: l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp slt i64 %a, %b
+ %res = zext i1 %cond to i64
+ ret i64 %res
+}
+
+define i64 @f_sle_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_sle_i64:
+; CHECK: addi a8, a1, -48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT: # kill: def $a8 killed $a5
+; CHECK-NEXT: # kill: def $a8 killed $a3
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: bge a5, a3, .LBB12_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB12_2:
+; CHECK-NEXT: l32i a8, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: bgeu a8, a9, .LBB12_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB12_4:
+; CHECK-NEXT: l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: beq a8, a9, .LBB12_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB12_6:
+; CHECK-NEXT: l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp sle i64 %a, %b
+ %res = zext i1 %cond to i64
+ ret i64 %res
+}
+
+define i64 @f_sgt_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_sgt_i64:
+; CHECK: addi a8, a1, -48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT: # kill: def $a8 killed $a5
+; CHECK-NEXT: # kill: def $a8 killed $a3
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: blt a5, a3, .LBB13_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB13_2:
+; CHECK-NEXT: l32i a8, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: bltu a8, a9, .LBB13_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB13_4:
+; CHECK-NEXT: l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: beq a8, a9, .LBB13_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB13_6:
+; CHECK-NEXT: l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp sgt i64 %a, %b
+ %res = zext i1 %cond to i64
+ ret i64 %res
+}
+
+define i64 @f_sge_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_sge_i64:
+; CHECK: addi a8, a1, -48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT: # kill: def $a8 killed $a5
+; CHECK-NEXT: # kill: def $a8 killed $a3
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: bge a3, a5, .LBB14_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB14_2:
+; CHECK-NEXT: l32i a8, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: bgeu a8, a9, .LBB14_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB14_4:
+; CHECK-NEXT: l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: beq a8, a9, .LBB14_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB14_6:
+; CHECK-NEXT: l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp sge i64 %a, %b
+ %res = zext i1 %cond to i64
+ ret i64 %res
+}
+
+define i64 @f_ne_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_ne_i64:
+; CHECK: addi a8, a1, -16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: # kill: def $a8 killed $a5
+; CHECK-NEXT: # kill: def $a8 killed $a4
+; CHECK-NEXT: # kill: def $a8 killed $a3
+; CHECK-NEXT: # kill: def $a8 killed $a2
+; CHECK-NEXT: xor a9, a3, a5
+; CHECK-NEXT: xor a8, a2, a4
+; CHECK-NEXT: or a8, a8, a9
+; CHECK-NEXT: movi a10, 1
+; CHECK-NEXT: movi a9, 0
+; CHECK-NEXT: s32i a9, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a10, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: bne a8, a9, .LBB15_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB15_2:
+; CHECK-NEXT: l32i a3, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a2, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 16
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp ne i64 %a, %b
+ %res = zext i1 %cond to i64
+ ret i64 %res
+}
+
+define i64 @f_ult_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_ult_i64:
+; CHECK: addi a8, a1, -48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT: # kill: def $a8 killed $a5
+; CHECK-NEXT: # kill: def $a8 killed $a3
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: bltu a3, a5, .LBB16_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB16_2:
+; CHECK-NEXT: l32i a8, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: bltu a8, a9, .LBB16_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB16_4:
+; CHECK-NEXT: l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: beq a8, a9, .LBB16_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB16_6:
+; CHECK-NEXT: l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp ult i64 %a, %b
+ %res = zext i1 %cond to i64
+ ret i64 %res
+}
+
+define i64 @f_ule_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_ule_i64:
+; CHECK: addi a8, a1, -48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT: # kill: def $a8 killed $a5
+; CHECK-NEXT: # kill: def $a8 killed $a3
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: bgeu a5, a3, .LBB17_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB17_2:
+; CHECK-NEXT: l32i a8, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: bgeu a8, a9, .LBB17_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB17_4:
+; CHECK-NEXT: l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: beq a8, a9, .LBB17_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB17_6:
+; CHECK-NEXT: l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp ule i64 %a, %b
+ %res = zext i1 %cond to i64
+ ret i64 %res
+}
+
+define i64 @f_ugt_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_ugt_i64:
+; CHECK: addi a8, a1, -48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT: # kill: def $a8 killed $a5
+; CHECK-NEXT: # kill: def $a8 killed $a3
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: bltu a5, a3, .LBB18_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB18_2:
+; CHECK-NEXT: l32i a8, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: bltu a8, a9, .LBB18_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB18_4:
+; CHECK-NEXT: l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: beq a8, a9, .LBB18_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB18_6:
+; CHECK-NEXT: l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp ugt i64 %a, %b
+ %res = zext i1 %cond to i64
+ ret i64 %res
+}
+
+define i64 @f_uge_i64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: f_uge_i64:
+; CHECK: addi a8, a1, -48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: s32i a5, a1, 12 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a4, a1, 16 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a3, a1, 20 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a2, a1, 24 # 4-byte Folded Spill
+; CHECK-NEXT: # kill: def $a8 killed $a5
+; CHECK-NEXT: # kill: def $a8 killed $a3
+; CHECK-NEXT: movi a8, 0
+; CHECK-NEXT: s32i a8, a1, 28 # 4-byte Folded Spill
+; CHECK-NEXT: movi a8, 1
+; CHECK-NEXT: s32i a8, a1, 32 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: bgeu a3, a5, .LBB19_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 36 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB19_2:
+; CHECK-NEXT: l32i a8, a1, 24 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 16 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 32 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a11, a1, 36 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a11, a1, 4 # 4-byte Folded Spill
+; CHECK-NEXT: s32i a10, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: bgeu a8, a9, .LBB19_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: l32i a8, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 8 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB19_4:
+; CHECK-NEXT: l32i a8, a1, 20 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a9, a1, 12 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a10, a1, 8 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a10, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: beq a8, a9, .LBB19_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: l32i a8, a1, 4 # 4-byte Folded Reload
+; CHECK-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; CHECK-NEXT: .LBB19_6:
+; CHECK-NEXT: l32i a3, a1, 28 # 4-byte Folded Reload
+; CHECK-NEXT: l32i a2, a1, 0 # 4-byte Folded Reload
+; CHECK-NEXT: addi a8, a1, 48
+; CHECK-NEXT: or a1, a8, a8
+; CHECK-NEXT: ret
+
+ %cond = icmp uge i64 %a, %b
+ %res = zext i1 %cond to i64
+ ret i64 %res
+}
More information about the llvm-commits
mailing list