[llvm] [RISCV] Codegen support for XCVbi extension (PR #89719)
Liao Chunyu via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 25 05:15:52 PDT 2024
https://github.com/ChunyuLiao updated https://github.com/llvm/llvm-project/pull/89719
>From 5d9350ca70aaf736a5213bf897d3da9e1c6baabe Mon Sep 17 00:00:00 2001
From: Liao Chunyu <chunyu at iscas.ac.cn>
Date: Thu, 18 Apr 2024 22:11:56 -0400
Subject: [PATCH] [RISCV] Codegen support for XCVbi extension spec:
https://github.com/openhwgroup/cv32e40p/blob/master/docs/source/instruction_set_extensions.rst#immediate-branching-operations
Contributors: @CharKeaney, @jeremybennett, @lewis-revill, @NandniJamnadas,
@PaoloS02, @simonpcook, @xingmingjie, @realqhc, @melonedo, @PhilippvK
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 28 +-
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 23 +-
llvm/lib/Target/RISCV/RISCVInstrInfo.h | 4 +-
llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td | 26 ++
.../RISCV/RISCVRedundantCopyElimination.cpp | 6 +-
llvm/test/CodeGen/RISCV/xcvbi.ll | 248 ++++++++++++++++++
6 files changed, 316 insertions(+), 19 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/xcvbi.ll
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 41483c49ae03cd..7743b102f09963 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17473,6 +17473,7 @@ static bool isSelectPseudo(MachineInstr &MI) {
default:
return false;
case RISCV::Select_GPR_Using_CC_GPR:
+ case RISCV::Select_GPR_Using_CC_Imm:
case RISCV::Select_FPR16_Using_CC_GPR:
case RISCV::Select_FPR16INX_Using_CC_GPR:
case RISCV::Select_FPR32_Using_CC_GPR:
@@ -17656,7 +17657,9 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
// is checked here and handled by a separate function -
// EmitLoweredCascadedSelect.
Register LHS = MI.getOperand(1).getReg();
- Register RHS = MI.getOperand(2).getReg();
+ Register RHS;
+ if (MI.getOperand(2).isReg())
+ RHS = MI.getOperand(2).getReg();
auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
SmallVector<MachineInstr *, 4> SelectDebugValues;
@@ -17665,8 +17668,9 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
MachineInstr *LastSelectPseudo = &MI;
auto Next = next_nodbg(MI.getIterator(), BB->instr_end());
- if (MI.getOpcode() != RISCV::Select_GPR_Using_CC_GPR && Next != BB->end() &&
- Next->getOpcode() == MI.getOpcode() &&
+ if ((MI.getOpcode() != RISCV::Select_GPR_Using_CC_GPR &&
+ MI.getOpcode() != RISCV::Select_GPR_Using_CC_Imm) &&
+ Next != BB->end() && Next->getOpcode() == MI.getOpcode() &&
Next->getOperand(5).getReg() == MI.getOperand(0).getReg() &&
Next->getOperand(5).isKill()) {
return EmitLoweredCascadedSelect(MI, *Next, BB, Subtarget);
@@ -17678,7 +17682,8 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
continue;
if (isSelectPseudo(*SequenceMBBI)) {
if (SequenceMBBI->getOperand(1).getReg() != LHS ||
- SequenceMBBI->getOperand(2).getReg() != RHS ||
+ (SequenceMBBI->getOperand(2).isReg() &&
+ SequenceMBBI->getOperand(2).getReg() != RHS) ||
SequenceMBBI->getOperand(3).getImm() != CC ||
SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
@@ -17727,10 +17732,16 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
HeadMBB->addSuccessor(TailMBB);
// Insert appropriate branch.
- BuildMI(HeadMBB, DL, TII.getBrCond(CC))
- .addReg(LHS)
- .addReg(RHS)
- .addMBB(TailMBB);
+ if (MI.getOperand(2).isImm())
+ BuildMI(HeadMBB, DL, TII.getBrCond(CC, MI.getOperand(2).isImm()))
+ .addReg(LHS)
+ .addImm(MI.getOperand(2).getImm())
+ .addMBB(TailMBB);
+ else
+ BuildMI(HeadMBB, DL, TII.getBrCond(CC))
+ .addReg(LHS)
+ .addReg(RHS)
+ .addMBB(TailMBB);
// IfFalseMBB just falls through to TailMBB.
IfFalseMBB->addSuccessor(TailMBB);
@@ -18035,6 +18046,7 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
"ReadCounterWide is only to be used on riscv32");
return emitReadCounterWidePseudo(MI, BB);
case RISCV::Select_GPR_Using_CC_GPR:
+ case RISCV::Select_GPR_Using_CC_Imm:
case RISCV::Select_FPR16_Using_CC_GPR:
case RISCV::Select_FPR16INX_Using_CC_GPR:
case RISCV::Select_FPR32_Using_CC_GPR:
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 8331fc0b8c3024..12d23bef0f602b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -826,6 +826,10 @@ static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc) {
switch (Opc) {
default:
return RISCVCC::COND_INVALID;
+ case RISCV::CV_BEQIMM:
+ return RISCVCC::COND_EQ;
+ case RISCV::CV_BNEIMM:
+ return RISCVCC::COND_NE;
case RISCV::BEQ:
return RISCVCC::COND_EQ;
case RISCV::BNE:
@@ -856,14 +860,14 @@ static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
Cond.push_back(LastInst.getOperand(1));
}
-unsigned RISCVCC::getBrCond(RISCVCC::CondCode CC) {
+unsigned RISCVCC::getBrCond(RISCVCC::CondCode CC, bool Imm) {
switch (CC) {
default:
llvm_unreachable("Unknown condition code!");
case RISCVCC::COND_EQ:
- return RISCV::BEQ;
+ return Imm ? RISCV::CV_BEQIMM : RISCV::BEQ;
case RISCVCC::COND_NE:
- return RISCV::BNE;
+ return Imm ? RISCV::CV_BNEIMM : RISCV::BNE;
case RISCVCC::COND_LT:
return RISCV::BLT;
case RISCVCC::COND_GE:
@@ -875,8 +879,9 @@ unsigned RISCVCC::getBrCond(RISCVCC::CondCode CC) {
}
}
-const MCInstrDesc &RISCVInstrInfo::getBrCond(RISCVCC::CondCode CC) const {
- return get(RISCVCC::getBrCond(CC));
+const MCInstrDesc &RISCVInstrInfo::getBrCond(RISCVCC::CondCode CC,
+ bool Imm) const {
+ return get(RISCVCC::getBrCond(CC, Imm));
}
RISCVCC::CondCode RISCVCC::getOppositeBranchCondition(RISCVCC::CondCode CC) {
@@ -1025,8 +1030,10 @@ unsigned RISCVInstrInfo::insertBranch(
// Either a one or two-way conditional branch.
auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
- MachineInstr &CondMI =
- *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
+ MachineInstr &CondMI = *BuildMI(&MBB, DL, getBrCond(CC, Cond[2].isImm()))
+ .add(Cond[1])
+ .add(Cond[2])
+ .addMBB(TBB);
if (BytesAdded)
*BytesAdded += getInstSizeInBytes(CondMI);
@@ -1250,6 +1257,8 @@ bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
case RISCV::BGE:
case RISCV::BLTU:
case RISCV::BGEU:
+ case RISCV::CV_BEQIMM:
+ case RISCV::CV_BNEIMM:
return isIntN(13, BrOffset);
case RISCV::JAL:
case RISCV::PseudoBR:
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 70fe7da85be0e7..38badb39deea43 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -45,7 +45,7 @@ enum CondCode {
};
CondCode getOppositeBranchCondition(CondCode);
-unsigned getBrCond(CondCode CC);
+unsigned getBrCond(CondCode CC, bool Imm = false);
} // end of namespace RISCVCC
@@ -65,7 +65,7 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
explicit RISCVInstrInfo(RISCVSubtarget &STI);
MCInst getNop() const override;
- const MCInstrDesc &getBrCond(RISCVCC::CondCode CC) const;
+ const MCInstrDesc &getBrCond(RISCVCC::CondCode CC, bool Imm = false) const;
Register isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
index 924e91e15c348f..ff4482f74b2979 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
@@ -704,3 +704,29 @@ let Predicates = [HasVendorXCVbitmanip, IsRV32] in {
(CV_BITREV GPR:$rs1, cv_tuimm2:$radix, cv_tuimm5:$pts)>;
def : Pat<(bitreverse (XLenVT GPR:$rs)), (CV_BITREV GPR:$rs, 0, 0)>;
}
+
+//===----------------------------------------------------------------------===//
+// Patterns for immediate branching operations
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasVendorXCVbi, IsRV32], AddedComplexity = 2 in {
+ def : Pat<(riscv_brcc GPR:$rs1, simm5:$imm5, SETEQ, bb:$imm12),
+ (CV_BEQIMM GPR:$rs1, simm5:$imm5, simm13_lsb0:$imm12)>;
+ def : Pat<(riscv_brcc GPR:$rs1, simm5:$imm5, SETNE, bb:$imm12),
+ (CV_BNEIMM GPR:$rs1, simm5:$imm5, simm13_lsb0:$imm12)>;
+
+ let usesCustomInserter = 1 in
+ def Select_GPR_Using_CC_Imm : Pseudo<(outs GPR:$dst),
+ (ins GPR:$lhs, simm5:$imm5, ixlenimm:$cc,
+ GPR:$truev, GPR:$falsev), []>;
+
+
+ class Selectbi<CondCode Cond>
+ : Pat<(riscv_selectcc_frag:$cc (i32 GPR:$lhs), simm5:$Constant, Cond, (i32 GPR:$truev),
+ GPR:$falsev),
+ (Select_GPR_Using_CC_Imm GPR:$lhs, simm5:$Constant,
+ (IntCCtoRISCVCC $cc), GPR:$truev, GPR:$falsev)>;
+
+ def : Selectbi<SETEQ>;
+ def : Selectbi<SETNE>;
+}
diff --git a/llvm/lib/Target/RISCV/RISCVRedundantCopyElimination.cpp b/llvm/lib/Target/RISCV/RISCVRedundantCopyElimination.cpp
index 61d605fda3f53a..65ff67b424796c 100644
--- a/llvm/lib/Target/RISCV/RISCVRedundantCopyElimination.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRedundantCopyElimination.cpp
@@ -77,9 +77,11 @@ guaranteesZeroRegInBlock(MachineBasicBlock &MBB,
assert(Cond.size() == 3 && "Unexpected number of operands");
assert(TBB != nullptr && "Expected branch target basic block");
auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
- if (CC == RISCVCC::COND_EQ && Cond[2].getReg() == RISCV::X0 && TBB == &MBB)
+ if (CC == RISCVCC::COND_EQ && Cond[2].isReg() &&
+ Cond[2].getReg() == RISCV::X0 && TBB == &MBB)
return true;
- if (CC == RISCVCC::COND_NE && Cond[2].getReg() == RISCV::X0 && TBB != &MBB)
+ if (CC == RISCVCC::COND_NE && Cond[2].isReg() &&
+ Cond[2].getReg() == RISCV::X0 && TBB != &MBB)
return true;
return false;
}
diff --git a/llvm/test/CodeGen/RISCV/xcvbi.ll b/llvm/test/CodeGen/RISCV/xcvbi.ll
new file mode 100644
index 00000000000000..afd30faa56f90b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xcvbi.ll
@@ -0,0 +1,248 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 -mtriple=riscv32 -mattr=+xcvbi -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK_NOPT
+; RUN: llc -O3 -mtriple=riscv32 -mattr=+xcvbi -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK_OPT
+
+define i32 @beqimm(i32 %a) {
+; CHECK_NOPT-LABEL: beqimm:
+; CHECK_NOPT: # %bb.0:
+; CHECK_NOPT-NEXT: cv.beqimm a0, 5, .LBB0_2
+; CHECK_NOPT-NEXT: j .LBB0_1
+; CHECK_NOPT-NEXT: .LBB0_1: # %f
+; CHECK_NOPT-NEXT: li a0, 0
+; CHECK_NOPT-NEXT: ret
+; CHECK_NOPT-NEXT: .LBB0_2: # %t
+; CHECK_NOPT-NEXT: li a0, 1
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: beqimm:
+; CHECK_OPT: # %bb.0:
+; CHECK_OPT-NEXT: cv.bneimm a0, 5, .LBB0_2
+; CHECK_OPT-NEXT: # %bb.1: # %t
+; CHECK_OPT-NEXT: li a0, 1
+; CHECK_OPT-NEXT: ret
+; CHECK_OPT-NEXT: .LBB0_2: # %f
+; CHECK_OPT-NEXT: li a0, 0
+; CHECK_OPT-NEXT: ret
+ %1 = icmp eq i32 %a, 5
+ br i1 %1, label %t, label %f
+f:
+ ret i32 0
+t:
+ ret i32 1
+}
+
+define i32 @bneimm(i32 %a) {
+; CHECK_NOPT-LABEL: bneimm:
+; CHECK_NOPT: # %bb.0:
+; CHECK_NOPT-NEXT: cv.bneimm a0, 5, .LBB1_2
+; CHECK_NOPT-NEXT: j .LBB1_1
+; CHECK_NOPT-NEXT: .LBB1_1: # %f
+; CHECK_NOPT-NEXT: li a0, 0
+; CHECK_NOPT-NEXT: ret
+; CHECK_NOPT-NEXT: .LBB1_2: # %t
+; CHECK_NOPT-NEXT: li a0, 1
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: bneimm:
+; CHECK_OPT: # %bb.0:
+; CHECK_OPT-NEXT: cv.beqimm a0, 5, .LBB1_2
+; CHECK_OPT-NEXT: # %bb.1: # %t
+; CHECK_OPT-NEXT: li a0, 1
+; CHECK_OPT-NEXT: ret
+; CHECK_OPT-NEXT: .LBB1_2: # %f
+; CHECK_OPT-NEXT: li a0, 0
+; CHECK_OPT-NEXT: ret
+ %1 = icmp ne i32 %a, 5
+ br i1 %1, label %t, label %f
+f:
+ ret i32 0
+t:
+ ret i32 1
+}
+
+define i32 @select_beqimm_1(i32 %a, i32 %x, i32 %y) {
+; CHECK_NOPT-LABEL: select_beqimm_1:
+; CHECK_NOPT: # %bb.0: # %entry
+; CHECK_NOPT-NEXT: addi sp, sp, -16
+; CHECK_NOPT-NEXT: .cfi_def_cfa_offset 16
+; CHECK_NOPT-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: sw a2, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: cv.beqimm a0, -16, .LBB2_2
+; CHECK_NOPT-NEXT: # %bb.1: # %entry
+; CHECK_NOPT-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: .LBB2_2: # %entry
+; CHECK_NOPT-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: addi sp, sp, 16
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: select_beqimm_1:
+; CHECK_OPT: # %bb.0: # %entry
+; CHECK_OPT-NEXT: cv.beqimm a0, -16, .LBB2_2
+; CHECK_OPT-NEXT: # %bb.1: # %entry
+; CHECK_OPT-NEXT: mv a2, a1
+; CHECK_OPT-NEXT: .LBB2_2: # %entry
+; CHECK_OPT-NEXT: mv a0, a2
+; CHECK_OPT-NEXT: ret
+entry:
+ %cmp.not = icmp eq i32 %a, -16
+ %cond = select i1 %cmp.not, i32 %y, i32 %x
+ ret i32 %cond
+}
+
+define i32 @select_beqimm_2(i32 %a, i32 %x, i32 %y) {
+; CHECK_NOPT-LABEL: select_beqimm_2:
+; CHECK_NOPT: # %bb.0: # %entry
+; CHECK_NOPT-NEXT: addi sp, sp, -16
+; CHECK_NOPT-NEXT: .cfi_def_cfa_offset 16
+; CHECK_NOPT-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: sw a2, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: cv.beqimm a0, 0, .LBB3_2
+; CHECK_NOPT-NEXT: # %bb.1: # %entry
+; CHECK_NOPT-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: .LBB3_2: # %entry
+; CHECK_NOPT-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: addi sp, sp, 16
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: select_beqimm_2:
+; CHECK_OPT: # %bb.0: # %entry
+; CHECK_OPT-NEXT: cv.beqimm a0, 0, .LBB3_2
+; CHECK_OPT-NEXT: # %bb.1: # %entry
+; CHECK_OPT-NEXT: mv a2, a1
+; CHECK_OPT-NEXT: .LBB3_2: # %entry
+; CHECK_OPT-NEXT: mv a0, a2
+; CHECK_OPT-NEXT: ret
+entry:
+ %cmp.not = icmp eq i32 %a, 0
+ %cond = select i1 %cmp.not, i32 %y, i32 %x
+ ret i32 %cond
+}
+
+define i32 @select_beqimm_3(i32 %a, i32 %x, i32 %y) {
+; CHECK_NOPT-LABEL: select_beqimm_3:
+; CHECK_NOPT: # %bb.0: # %entry
+; CHECK_NOPT-NEXT: addi sp, sp, -16
+; CHECK_NOPT-NEXT: .cfi_def_cfa_offset 16
+; CHECK_NOPT-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: sw a2, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: cv.beqimm a0, 15, .LBB4_2
+; CHECK_NOPT-NEXT: # %bb.1: # %entry
+; CHECK_NOPT-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: .LBB4_2: # %entry
+; CHECK_NOPT-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: addi sp, sp, 16
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: select_beqimm_3:
+; CHECK_OPT: # %bb.0: # %entry
+; CHECK_OPT-NEXT: cv.beqimm a0, 15, .LBB4_2
+; CHECK_OPT-NEXT: # %bb.1: # %entry
+; CHECK_OPT-NEXT: mv a2, a1
+; CHECK_OPT-NEXT: .LBB4_2: # %entry
+; CHECK_OPT-NEXT: mv a0, a2
+; CHECK_OPT-NEXT: ret
+entry:
+ %cmp.not = icmp eq i32 %a, 15
+ %cond = select i1 %cmp.not, i32 %y, i32 %x
+ ret i32 %cond
+}
+
+define i32 @select_no_beqimm_1(i32 %a, i32 %x, i32 %y) {
+; CHECK_NOPT-LABEL: select_no_beqimm_1:
+; CHECK_NOPT: # %bb.0: # %entry
+; CHECK_NOPT-NEXT: addi sp, sp, -16
+; CHECK_NOPT-NEXT: .cfi_def_cfa_offset 16
+; CHECK_NOPT-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: li a1, -17
+; CHECK_NOPT-NEXT: sw a2, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: beq a0, a1, .LBB5_2
+; CHECK_NOPT-NEXT: # %bb.1: # %entry
+; CHECK_NOPT-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: .LBB5_2: # %entry
+; CHECK_NOPT-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: addi sp, sp, 16
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: select_no_beqimm_1:
+; CHECK_OPT: # %bb.0: # %entry
+; CHECK_OPT-NEXT: li a3, -17
+; CHECK_OPT-NEXT: beq a0, a3, .LBB5_2
+; CHECK_OPT-NEXT: # %bb.1: # %entry
+; CHECK_OPT-NEXT: mv a2, a1
+; CHECK_OPT-NEXT: .LBB5_2: # %entry
+; CHECK_OPT-NEXT: mv a0, a2
+; CHECK_OPT-NEXT: ret
+entry:
+ %cmp.not = icmp eq i32 %a, -17
+ %cond = select i1 %cmp.not, i32 %y, i32 %x
+ ret i32 %cond
+}
+
+define i32 @select_no_beqimm_2(i32 %a, i32 %x, i32 %y) {
+; CHECK_NOPT-LABEL: select_no_beqimm_2:
+; CHECK_NOPT: # %bb.0: # %entry
+; CHECK_NOPT-NEXT: addi sp, sp, -16
+; CHECK_NOPT-NEXT: .cfi_def_cfa_offset 16
+; CHECK_NOPT-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: li a1, 16
+; CHECK_NOPT-NEXT: sw a2, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: beq a0, a1, .LBB6_2
+; CHECK_NOPT-NEXT: # %bb.1: # %entry
+; CHECK_NOPT-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: .LBB6_2: # %entry
+; CHECK_NOPT-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: addi sp, sp, 16
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: select_no_beqimm_2:
+; CHECK_OPT: # %bb.0: # %entry
+; CHECK_OPT-NEXT: li a3, 16
+; CHECK_OPT-NEXT: beq a0, a3, .LBB6_2
+; CHECK_OPT-NEXT: # %bb.1: # %entry
+; CHECK_OPT-NEXT: mv a2, a1
+; CHECK_OPT-NEXT: .LBB6_2: # %entry
+; CHECK_OPT-NEXT: mv a0, a2
+; CHECK_OPT-NEXT: ret
+entry:
+ %cmp.not = icmp eq i32 %a, 16
+ %cond = select i1 %cmp.not, i32 %y, i32 %x
+ ret i32 %cond
+}
+
+define i32 @select_bneimm_1(i32 %a, i32 %x, i32 %y) {
+; CHECK_NOPT-LABEL: select_bneimm_1:
+; CHECK_NOPT: # %bb.0: # %entry
+; CHECK_NOPT-NEXT: addi sp, sp, -16
+; CHECK_NOPT-NEXT: .cfi_def_cfa_offset 16
+; CHECK_NOPT-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: sw a2, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: cv.bneimm a0, 0, .LBB7_2
+; CHECK_NOPT-NEXT: # %bb.1: # %entry
+; CHECK_NOPT-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
+; CHECK_NOPT-NEXT: .LBB7_2: # %entry
+; CHECK_NOPT-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; CHECK_NOPT-NEXT: addi sp, sp, 16
+; CHECK_NOPT-NEXT: ret
+;
+; CHECK_OPT-LABEL: select_bneimm_1:
+; CHECK_OPT: # %bb.0: # %entry
+; CHECK_OPT-NEXT: cv.bneimm a0, 0, .LBB7_2
+; CHECK_OPT-NEXT: # %bb.1: # %entry
+; CHECK_OPT-NEXT: mv a2, a1
+; CHECK_OPT-NEXT: .LBB7_2: # %entry
+; CHECK_OPT-NEXT: mv a0, a2
+; CHECK_OPT-NEXT: ret
+entry:
+ %cmp.not = icmp ne i32 %a, 0
+ %cond = select i1 %cmp.not, i32 %y, i32 %x
+ ret i32 %cond
+}
+
More information about the llvm-commits
mailing list