[llvm] [RISCV] Codegen support for XCVbi extension (PR #89719)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 23 01:42:20 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Liao Chunyu (ChunyuLiao)
<details>
<summary>Changes</summary>
spec: https://github.com/openhwgroup/cv32e40p/blob/master/docs/source/instruction_set_extensions.rst#immediate-branching-operations
---
Full diff: https://github.com/llvm/llvm-project/pull/89719.diff
7 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+26-6)
- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.h (+1)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfo.cpp (+16-7)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfo.h (+2-2)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td (+45)
- (modified) llvm/lib/Target/RISCV/RISCVRedundantCopyElimination.cpp (+4-2)
- (added) llvm/test/CodeGen/RISCV/xcvbi.ll (+248)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 41483c49ae03cd..d9f3ed8833098f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -7608,6 +7608,15 @@ SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
TargetCC = DAG.getCondCode(ISD::getSetCCInverse(CCVal, LHS.getValueType()));
}
+ if (Subtarget.hasVendorXCVbi() &&
+ (CCVal == ISD::SETEQ || CCVal == ISD::SETNE) &&
+ isa<ConstantSDNode>(RHS)) {
+ int32_t RHSImm = cast<ConstantSDNode>(RHS)->getSExtValue();
+ if (isInt<5>(RHSImm)) {
+ SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
+ return DAG.getNode(RISCVISD::SELECTIMM_CC, DL, VT, Ops);
+ }
+ }
SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
return DAG.getNode(RISCVISD::SELECT_CC, DL, VT, Ops);
}
@@ -17656,7 +17665,9 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
// is checked here and handled by a separate function -
// EmitLoweredCascadedSelect.
Register LHS = MI.getOperand(1).getReg();
- Register RHS = MI.getOperand(2).getReg();
+ Register RHS;
+ if (MI.getOpcode() != RISCV::Select_IMM_Using_CC_GPR)
+ RHS = MI.getOperand(2).getReg();
auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
SmallVector<MachineInstr *, 4> SelectDebugValues;
@@ -17727,10 +17738,16 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
HeadMBB->addSuccessor(TailMBB);
// Insert appropriate branch.
- BuildMI(HeadMBB, DL, TII.getBrCond(CC))
- .addReg(LHS)
- .addReg(RHS)
- .addMBB(TailMBB);
+ if (MI.getOperand(2).isImm())
+ BuildMI(HeadMBB, DL, TII.getBrCond(CC, MI.getOperand(2).isImm()))
+ .addReg(LHS)
+ .addImm(MI.getOperand(2).getImm())
+ .addMBB(TailMBB);
+ else
+ BuildMI(HeadMBB, DL, TII.getBrCond(CC))
+ .addReg(LHS)
+ .addReg(RHS)
+ .addMBB(TailMBB);
// IfFalseMBB just falls through to TailMBB.
IfFalseMBB->addSuccessor(TailMBB);
@@ -17741,7 +17758,8 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
auto InsertionPoint = TailMBB->begin();
while (SelectMBBI != SelectEnd) {
auto Next = std::next(SelectMBBI);
- if (isSelectPseudo(*SelectMBBI)) {
+ if (isSelectPseudo(*SelectMBBI) ||
+ MI.getOpcode() == RISCV::Select_IMM_Using_CC_GPR) {
// %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
@@ -18035,6 +18053,7 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
"ReadCounterWide is only to be used on riscv32");
return emitReadCounterWidePseudo(MI, BB);
case RISCV::Select_GPR_Using_CC_GPR:
+ case RISCV::Select_IMM_Using_CC_GPR:
case RISCV::Select_FPR16_Using_CC_GPR:
case RISCV::Select_FPR16INX_Using_CC_GPR:
case RISCV::Select_FPR32_Using_CC_GPR:
@@ -19660,6 +19679,7 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(MRET_GLUE)
NODE_NAME_CASE(CALL)
NODE_NAME_CASE(SELECT_CC)
+ NODE_NAME_CASE(SELECTIMM_CC)
NODE_NAME_CASE(BR_CC)
NODE_NAME_CASE(BuildPairF64)
NODE_NAME_CASE(SplitF64)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index b10da3d40befb7..e0d3f9ee655faa 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -41,6 +41,7 @@ enum NodeType : unsigned {
/// The lhs and rhs are XLenVT integers. The true and false values can be
/// integer or floating point.
SELECT_CC,
+ SELECTIMM_CC,
BR_CC,
BuildPairF64,
SplitF64,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 8331fc0b8c3024..12d23bef0f602b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -826,6 +826,10 @@ static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc) {
switch (Opc) {
default:
return RISCVCC::COND_INVALID;
+ case RISCV::CV_BEQIMM:
+ return RISCVCC::COND_EQ;
+ case RISCV::CV_BNEIMM:
+ return RISCVCC::COND_NE;
case RISCV::BEQ:
return RISCVCC::COND_EQ;
case RISCV::BNE:
@@ -856,14 +860,14 @@ static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
Cond.push_back(LastInst.getOperand(1));
}
-unsigned RISCVCC::getBrCond(RISCVCC::CondCode CC) {
+unsigned RISCVCC::getBrCond(RISCVCC::CondCode CC, bool Imm) {
switch (CC) {
default:
llvm_unreachable("Unknown condition code!");
case RISCVCC::COND_EQ:
- return RISCV::BEQ;
+ return Imm ? RISCV::CV_BEQIMM : RISCV::BEQ;
case RISCVCC::COND_NE:
- return RISCV::BNE;
+ return Imm ? RISCV::CV_BNEIMM : RISCV::BNE;
case RISCVCC::COND_LT:
return RISCV::BLT;
case RISCVCC::COND_GE:
@@ -875,8 +879,9 @@ unsigned RISCVCC::getBrCond(RISCVCC::CondCode CC) {
}
}
-const MCInstrDesc &RISCVInstrInfo::getBrCond(RISCVCC::CondCode CC) const {
- return get(RISCVCC::getBrCond(CC));
+const MCInstrDesc &RISCVInstrInfo::getBrCond(RISCVCC::CondCode CC,
+ bool Imm) const {
+ return get(RISCVCC::getBrCond(CC, Imm));
}
RISCVCC::CondCode RISCVCC::getOppositeBranchCondition(RISCVCC::CondCode CC) {
@@ -1025,8 +1030,10 @@ unsigned RISCVInstrInfo::insertBranch(
// Either a one or two-way conditional branch.
auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
- MachineInstr &CondMI =
- *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
+ MachineInstr &CondMI = *BuildMI(&MBB, DL, getBrCond(CC, Cond[2].isImm()))
+ .add(Cond[1])
+ .add(Cond[2])
+ .addMBB(TBB);
if (BytesAdded)
*BytesAdded += getInstSizeInBytes(CondMI);
@@ -1250,6 +1257,8 @@ bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
case RISCV::BGE:
case RISCV::BLTU:
case RISCV::BGEU:
+ case RISCV::CV_BEQIMM:
+ case RISCV::CV_BNEIMM:
return isIntN(13, BrOffset);
case RISCV::JAL:
case RISCV::PseudoBR:
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 70fe7da85be0e7..38badb39deea43 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -45,7 +45,7 @@ enum CondCode {
};
CondCode getOppositeBranchCondition(CondCode);
-unsigned getBrCond(CondCode CC);
+unsigned getBrCond(CondCode CC, bool Imm = false);
} // end of namespace RISCVCC
@@ -65,7 +65,7 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
explicit RISCVInstrInfo(RISCVSubtarget &STI);
MCInst getNop() const override;
- const MCInstrDesc &getBrCond(RISCVCC::CondCode CC) const;
+ const MCInstrDesc &getBrCond(RISCVCC::CondCode CC, bool Imm = false) const;
Register isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
index 924e91e15c348f..36c9298c6f2f6b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
@@ -10,6 +10,14 @@
//
//===----------------------------------------------------------------------===//
+def SDT_RISCVSelectImmCC : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>,
+ SDTCisVT<2, i32>,
+ SDTCisVT<3, OtherVT>,
+ SDTCisSameAs<0, 4>,
+ SDTCisSameAs<4, 5>]>;
+
+def riscv_selectimmcc : SDNode<"RISCVISD::SELECTIMM_CC", SDT_RISCVSelectImmCC>;
+
let DecoderNamespace = "XCVbitmanip" in {
class CVInstBitManipRII<bits<2> funct2, bits<3> funct3, dag outs, dag ins,
string opcodestr, string argstr>
@@ -704,3 +712,40 @@ let Predicates = [HasVendorXCVbitmanip, IsRV32] in {
(CV_BITREV GPR:$rs1, cv_tuimm2:$radix, cv_tuimm5:$pts)>;
def : Pat<(bitreverse (XLenVT GPR:$rs)), (CV_BITREV GPR:$rs, 0, 0)>;
}
+
+//===----------------------------------------------------------------------===//
+// Patterns for immediate branching operations
+//===----------------------------------------------------------------------===//
+
+def IMMCCtoRISCVCC : SDNodeXForm<riscv_selectimmcc, [{
+ ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
+ RISCVCC::CondCode BrCC = getRISCVCCForIntCC(CC);
+ return CurDAG->getTargetConstant(BrCC, SDLoc(N), Subtarget->getXLenVT());
+}]>;
+
+def riscv_selectimmcc_frag : PatFrag<(ops node:$lhs, node:$rhs, node:$cc,
+ node:$truev, node:$falsev),
+ (riscv_selectimmcc node:$lhs, node:$rhs,
+ node:$cc, node:$truev,
+ node:$falsev), [{}],
+ IMMCCtoRISCVCC>;
+
+let Predicates = [HasVendorXCVbi, IsRV32] in {
+ def : Pat<(riscv_brcc GPR:$rs1, simm5:$imm5, SETEQ, bb:$imm12),
+ (CV_BEQIMM GPR:$rs1, simm5:$imm5, simm13_lsb0:$imm12)>;
+ def : Pat<(riscv_brcc GPR:$rs1, simm5:$imm5, SETNE, bb:$imm12),
+ (CV_BNEIMM GPR:$rs1, simm5:$imm5, simm13_lsb0:$imm12)>;
+
+ let usesCustomInserter = 1 in
+ def Select_IMM_Using_CC_GPR : Pseudo<(outs GPR:$dst),
+ (ins GPR:$lhs, simm5:$imm5, ixlenimm:$cc,
+ GPR:$truev, GPR:$falsev),
+ [(set GPR:$dst,
+ (riscv_selectimmcc_frag:$cc (i32 GPR:$lhs), simm5:$imm5, cond,
+ (i32 GPR:$truev), GPR:$falsev))]>;
+
+ def : Pat<(riscv_selectimmcc_frag:$cc (i32 GPR:$lhs), simm5:$Constant, SETEQ, (i32 GPR:$truev),
+ GPR:$falsev),
+ (Select_IMM_Using_CC_GPR GPR:$lhs, simm5:$Constant,
+ (IMMCCtoRISCVCC $cc), GPR:$truev, GPR:$falsev)>;
+}
diff --git a/llvm/lib/Target/RISCV/RISCVRedundantCopyElimination.cpp b/llvm/lib/Target/RISCV/RISCVRedundantCopyElimination.cpp
index 61d605fda3f53a..65ff67b424796c 100644
--- a/llvm/lib/Target/RISCV/RISCVRedundantCopyElimination.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRedundantCopyElimination.cpp
@@ -77,9 +77,11 @@ guaranteesZeroRegInBlock(MachineBasicBlock &MBB,
assert(Cond.size() == 3 && "Unexpected number of operands");
assert(TBB != nullptr && "Expected branch target basic block");
auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
- if (CC == RISCVCC::COND_EQ && Cond[2].getReg() == RISCV::X0 && TBB == &MBB)
+ if (CC == RISCVCC::COND_EQ && Cond[2].isReg() &&
+ Cond[2].getReg() == RISCV::X0 && TBB == &MBB)
return true;
- if (CC == RISCVCC::COND_NE && Cond[2].getReg() == RISCV::X0 && TBB != &MBB)
+ if (CC == RISCVCC::COND_NE && Cond[2].isReg() &&
+ Cond[2].getReg() == RISCV::X0 && TBB != &MBB)
return true;
return false;
}
diff --git a/llvm/test/CodeGen/RISCV/xcvbi.ll b/llvm/test/CodeGen/RISCV/xcvbi.ll
new file mode 100644
index 00000000000000..afd30faa56f90b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xcvbi.ll
@@ -0,0 +1,248 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 -mtriple=riscv32 -mattr=+xcvbi -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK_NOPT
+; RUN: llc -O3 -mtriple=riscv32 -mattr=+xcvbi -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK_OPT
+
+define i32 @beqimm(i32 %a) {
+; CHECK_NOPT-LABEL: beqimm:
+; CHECK_NOPT: # %bb.0:
+; CHECK_NOPT-NEXT: cv.beqimm a0, 5, .LBB0_2
+; CHECK_NOPT-NEXT: j .LBB0_1
+; CHECK_NOPT-NEXT: .LBB0_1: # %f
+; CHECK_NOPT-NEXT: li a0, 0
+; CHECK_NOPT-NEXT: ret
+; CHECK_NOPT-NEXT: .LBB0_2: # %t
+; CHECK_NOPT-NEXT: li a0, 1
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: beqimm:
+; CHECK_OPT: # %bb.0:
+; CHECK_OPT-NEXT: cv.bneimm a0, 5, .LBB0_2
+; CHECK_OPT-NEXT: # %bb.1: # %t
+; CHECK_OPT-NEXT: li a0, 1
+; CHECK_OPT-NEXT: ret
+; CHECK_OPT-NEXT: .LBB0_2: # %f
+; CHECK_OPT-NEXT: li a0, 0
+; CHECK_OPT-NEXT: ret
+ %1 = icmp eq i32 %a, 5
+ br i1 %1, label %t, label %f
+f:
+ ret i32 0
+t:
+ ret i32 1
+}
+
+define i32 @bneimm(i32 %a) {
+; CHECK_NOPT-LABEL: bneimm:
+; CHECK_NOPT: # %bb.0:
+; CHECK_NOPT-NEXT: cv.bneimm a0, 5, .LBB1_2
+; CHECK_NOPT-NEXT: j .LBB1_1
+; CHECK_NOPT-NEXT: .LBB1_1: # %f
+; CHECK_NOPT-NEXT: li a0, 0
+; CHECK_NOPT-NEXT: ret
+; CHECK_NOPT-NEXT: .LBB1_2: # %t
+; CHECK_NOPT-NEXT: li a0, 1
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: bneimm:
+; CHECK_OPT: # %bb.0:
+; CHECK_OPT-NEXT: cv.beqimm a0, 5, .LBB1_2
+; CHECK_OPT-NEXT: # %bb.1: # %t
+; CHECK_OPT-NEXT: li a0, 1
+; CHECK_OPT-NEXT: ret
+; CHECK_OPT-NEXT: .LBB1_2: # %f
+; CHECK_OPT-NEXT: li a0, 0
+; CHECK_OPT-NEXT: ret
+ %1 = icmp ne i32 %a, 5
+ br i1 %1, label %t, label %f
+f:
+ ret i32 0
+t:
+ ret i32 1
+}
+
+define i32 @select_beqimm_1(i32 %a, i32 %x, i32 %y) {
+; CHECK_NOPT-LABEL: select_beqimm_1:
+; CHECK_NOPT: # %bb.0: # %entry
+; CHECK_NOPT-NEXT: addi sp, sp, -16
+; CHECK_NOPT-NEXT: .cfi_def_cfa_offset 16
+; CHECK_NOPT-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: sw a2, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: cv.beqimm a0, -16, .LBB2_2
+; CHECK_NOPT-NEXT: # %bb.1: # %entry
+; CHECK_NOPT-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: .LBB2_2: # %entry
+; CHECK_NOPT-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: addi sp, sp, 16
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: select_beqimm_1:
+; CHECK_OPT: # %bb.0: # %entry
+; CHECK_OPT-NEXT: cv.beqimm a0, -16, .LBB2_2
+; CHECK_OPT-NEXT: # %bb.1: # %entry
+; CHECK_OPT-NEXT: mv a2, a1
+; CHECK_OPT-NEXT: .LBB2_2: # %entry
+; CHECK_OPT-NEXT: mv a0, a2
+; CHECK_OPT-NEXT: ret
+entry:
+ %cmp.not = icmp eq i32 %a, -16
+ %cond = select i1 %cmp.not, i32 %y, i32 %x
+ ret i32 %cond
+}
+
+define i32 @select_beqimm_2(i32 %a, i32 %x, i32 %y) {
+; CHECK_NOPT-LABEL: select_beqimm_2:
+; CHECK_NOPT: # %bb.0: # %entry
+; CHECK_NOPT-NEXT: addi sp, sp, -16
+; CHECK_NOPT-NEXT: .cfi_def_cfa_offset 16
+; CHECK_NOPT-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: sw a2, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: cv.beqimm a0, 0, .LBB3_2
+; CHECK_NOPT-NEXT: # %bb.1: # %entry
+; CHECK_NOPT-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: .LBB3_2: # %entry
+; CHECK_NOPT-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: addi sp, sp, 16
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: select_beqimm_2:
+; CHECK_OPT: # %bb.0: # %entry
+; CHECK_OPT-NEXT: cv.beqimm a0, 0, .LBB3_2
+; CHECK_OPT-NEXT: # %bb.1: # %entry
+; CHECK_OPT-NEXT: mv a2, a1
+; CHECK_OPT-NEXT: .LBB3_2: # %entry
+; CHECK_OPT-NEXT: mv a0, a2
+; CHECK_OPT-NEXT: ret
+entry:
+ %cmp.not = icmp eq i32 %a, 0
+ %cond = select i1 %cmp.not, i32 %y, i32 %x
+ ret i32 %cond
+}
+
+define i32 @select_beqimm_3(i32 %a, i32 %x, i32 %y) {
+; CHECK_NOPT-LABEL: select_beqimm_3:
+; CHECK_NOPT: # %bb.0: # %entry
+; CHECK_NOPT-NEXT: addi sp, sp, -16
+; CHECK_NOPT-NEXT: .cfi_def_cfa_offset 16
+; CHECK_NOPT-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: sw a2, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: cv.beqimm a0, 15, .LBB4_2
+; CHECK_NOPT-NEXT: # %bb.1: # %entry
+; CHECK_NOPT-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: .LBB4_2: # %entry
+; CHECK_NOPT-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: addi sp, sp, 16
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: select_beqimm_3:
+; CHECK_OPT: # %bb.0: # %entry
+; CHECK_OPT-NEXT: cv.beqimm a0, 15, .LBB4_2
+; CHECK_OPT-NEXT: # %bb.1: # %entry
+; CHECK_OPT-NEXT: mv a2, a1
+; CHECK_OPT-NEXT: .LBB4_2: # %entry
+; CHECK_OPT-NEXT: mv a0, a2
+; CHECK_OPT-NEXT: ret
+entry:
+ %cmp.not = icmp eq i32 %a, 15
+ %cond = select i1 %cmp.not, i32 %y, i32 %x
+ ret i32 %cond
+}
+
+define i32 @select_no_beqimm_1(i32 %a, i32 %x, i32 %y) {
+; CHECK_NOPT-LABEL: select_no_beqimm_1:
+; CHECK_NOPT: # %bb.0: # %entry
+; CHECK_NOPT-NEXT: addi sp, sp, -16
+; CHECK_NOPT-NEXT: .cfi_def_cfa_offset 16
+; CHECK_NOPT-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: li a1, -17
+; CHECK_NOPT-NEXT: sw a2, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: beq a0, a1, .LBB5_2
+; CHECK_NOPT-NEXT: # %bb.1: # %entry
+; CHECK_NOPT-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: .LBB5_2: # %entry
+; CHECK_NOPT-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: addi sp, sp, 16
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: select_no_beqimm_1:
+; CHECK_OPT: # %bb.0: # %entry
+; CHECK_OPT-NEXT: li a3, -17
+; CHECK_OPT-NEXT: beq a0, a3, .LBB5_2
+; CHECK_OPT-NEXT: # %bb.1: # %entry
+; CHECK_OPT-NEXT: mv a2, a1
+; CHECK_OPT-NEXT: .LBB5_2: # %entry
+; CHECK_OPT-NEXT: mv a0, a2
+; CHECK_OPT-NEXT: ret
+entry:
+ %cmp.not = icmp eq i32 %a, -17
+ %cond = select i1 %cmp.not, i32 %y, i32 %x
+ ret i32 %cond
+}
+
+define i32 @select_no_beqimm_2(i32 %a, i32 %x, i32 %y) {
+; CHECK_NOPT-LABEL: select_no_beqimm_2:
+; CHECK_NOPT: # %bb.0: # %entry
+; CHECK_NOPT-NEXT: addi sp, sp, -16
+; CHECK_NOPT-NEXT: .cfi_def_cfa_offset 16
+; CHECK_NOPT-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: li a1, 16
+; CHECK_NOPT-NEXT: sw a2, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: beq a0, a1, .LBB6_2
+; CHECK_NOPT-NEXT: # %bb.1: # %entry
+; CHECK_NOPT-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: .LBB6_2: # %entry
+; CHECK_NOPT-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: addi sp, sp, 16
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: select_no_beqimm_2:
+; CHECK_OPT: # %bb.0: # %entry
+; CHECK_OPT-NEXT: li a3, 16
+; CHECK_OPT-NEXT: beq a0, a3, .LBB6_2
+; CHECK_OPT-NEXT: # %bb.1: # %entry
+; CHECK_OPT-NEXT: mv a2, a1
+; CHECK_OPT-NEXT: .LBB6_2: # %entry
+; CHECK_OPT-NEXT: mv a0, a2
+; CHECK_OPT-NEXT: ret
+entry:
+ %cmp.not = icmp eq i32 %a, 16
+ %cond = select i1 %cmp.not, i32 %y, i32 %x
+ ret i32 %cond
+}
+
+define i32 @select_bneimm_1(i32 %a, i32 %x, i32 %y) {
+; CHECK_NOPT-LABEL: select_bneimm_1:
+; CHECK_NOPT: # %bb.0: # %entry
+; CHECK_NOPT-NEXT: addi sp, sp, -16
+; CHECK_NOPT-NEXT: .cfi_def_cfa_offset 16
+; CHECK_NOPT-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: sw a2, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: cv.bneimm a0, 0, .LBB7_2
+; CHECK_NOPT-NEXT: # %bb.1: # %entry
+; CHECK_NOPT-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: .LBB7_2: # %entry
+; CHECK_NOPT-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: addi sp, sp, 16
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: select_bneimm_1:
+; CHECK_OPT: # %bb.0: # %entry
+; CHECK_OPT-NEXT: cv.bneimm a0, 0, .LBB7_2
+; CHECK_OPT-NEXT: # %bb.1: # %entry
+; CHECK_OPT-NEXT: mv a2, a1
+; CHECK_OPT-NEXT: .LBB7_2: # %entry
+; CHECK_OPT-NEXT: mv a0, a2
+; CHECK_OPT-NEXT: ret
+entry:
+ %cmp.not = icmp ne i32 %a, 0
+ %cond = select i1 %cmp.not, i32 %y, i32 %x
+ ret i32 %cond
+}
+
``````````
</details>
https://github.com/llvm/llvm-project/pull/89719
More information about the llvm-commits
mailing list