[llvm] 78ac964 - [RISCV][NFC] Prepare for Short Forward Branch of branches with immediates (#182456)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Mar 2 23:08:04 PST 2026
Author: quic_hchandel
Date: 2026-03-03T12:38:00+05:30
New Revision: 78ac964c47cbdbc64e84c3230ba76fe774e8dd1b
URL: https://github.com/llvm/llvm-project/commit/78ac964c47cbdbc64e84c3230ba76fe774e8dd1b
DIFF: https://github.com/llvm/llvm-project/commit/78ac964c47cbdbc64e84c3230ba76fe774e8dd1b.diff
LOG: [RISCV][NFC] Prepare for Short Forward Branch of branches with immediates (#182456)
This NFC patch introduces two key updates:
- It replaces the `gpr` operand type with `sfb_rhs` for the `rhs`
operand in the short forward branch optimization pseudos. The `sfb_rhs`
type supports both register and immediate operands.
- It updates the pseudos to use branch opcodes instead of condition
codes, which were used prior to this change.
Together, these changes prepare the existing codebase to support short
forward branches that compare a register with an immediate value.
Currently, short forward branch support is limited to
register-to-register comparisons
Added:
Modified:
llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
llvm/lib/Target/RISCV/RISCVInstrInfo.h
llvm/lib/Target/RISCV/RISCVInstrInfoSFB.td
llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
llvm/test/CodeGen/RISCV/opt-w-instrs.mir
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
index f90c44d5f109f..a1266ff4fc3fe 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
@@ -128,6 +128,13 @@ enum OperandType : unsigned {
// or vsetivli.
OPERAND_AVL,
+ // Operand is either a register or imm, this is used by short forward branch
+ // (SFB) pseudos to enable SFB with branches on reg-reg and reg-imm compares.
+ OPERAND_SFB_RHS,
+
+ // Operand is a branch opcode, this too is used by SFB pseudos.
+ OPERAND_BCC_OPCODE,
+
OPERAND_VMASK,
};
} // namespace RISCVOp
diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
index 8376a9c2e2236..ce6d2e2de9c96 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -214,26 +214,25 @@ bool RISCVExpandPseudo::expandCCOp(MachineBasicBlock &MBB,
MF->insert(++MBB.getIterator(), TrueBB);
MF->insert(++TrueBB->getIterator(), MergeBB);
- // We want to copy the "true" value when the condition is true which means
- // we need to invert the branch condition to jump over TrueBB when the
- // condition is false.
- auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
- CC = RISCVCC::getInverseBranchCondition(CC);
+ // We want to copy the "true" value only when the branch is executed.
+ // The SDNodeXform is responsible for the inversion.
+ unsigned BranchOpCode =
+ MI.getOperand(MI.getNumExplicitOperands() - 3).getImm();
// Insert branch instruction.
- BuildMI(MBB, MBBI, DL, TII->get(RISCVCC::getBrCond(CC)))
- .addReg(MI.getOperand(1).getReg())
- .addReg(MI.getOperand(2).getReg())
+ BuildMI(MBB, MBBI, DL, TII->get(BranchOpCode))
+ .add(MI.getOperand(MI.getNumExplicitOperands() - 2))
+ .add(MI.getOperand(MI.getNumExplicitOperands() - 1))
.addMBB(MergeBB);
Register DestReg = MI.getOperand(0).getReg();
- assert(MI.getOperand(4).getReg() == DestReg);
+ assert(MI.getOperand(1).getReg() == DestReg);
if (MI.getOpcode() == RISCV::PseudoCCMOVGPR ||
MI.getOpcode() == RISCV::PseudoCCMOVGPRNoX0) {
// Add MV.
BuildMI(TrueBB, DL, TII->get(RISCV::ADDI), DestReg)
- .add(MI.getOperand(5))
+ .add(MI.getOperand(2))
.addImm(0);
} else {
unsigned NewOpc;
@@ -295,16 +294,16 @@ bool RISCVExpandPseudo::expandCCOp(MachineBasicBlock &MBB,
if (NewOpc == RISCV::NDS_BFOZ || NewOpc == RISCV::NDS_BFOS) {
BuildMI(TrueBB, DL, TII->get(NewOpc), DestReg)
- .add(MI.getOperand(5))
- .add(MI.getOperand(6))
- .add(MI.getOperand(7));
+ .add(MI.getOperand(2))
+ .add(MI.getOperand(3))
+ .add(MI.getOperand(4));
} else if (NewOpc == RISCV::LUI || NewOpc == RISCV::QC_LI ||
NewOpc == RISCV::QC_E_LI) {
- BuildMI(TrueBB, DL, TII->get(NewOpc), DestReg).add(MI.getOperand(5));
+ BuildMI(TrueBB, DL, TII->get(NewOpc), DestReg).add(MI.getOperand(2));
} else {
BuildMI(TrueBB, DL, TII->get(NewOpc), DestReg)
- .add(MI.getOperand(5))
- .add(MI.getOperand(6));
+ .add(MI.getOperand(2))
+ .add(MI.getOperand(3));
}
}
@@ -340,54 +339,54 @@ bool RISCVExpandPseudo::expandCCOpToCMov(MachineBasicBlock &MBB,
return false;
// FIXME: Would be wonderful to support LHS=X0, but not very easy.
- if (MI.getOperand(1).getReg() == RISCV::X0 ||
- MI.getOperand(4).getReg() == RISCV::X0 ||
- MI.getOperand(5).getReg() == RISCV::X0)
+ if (MI.getOperand(MI.getNumExplicitOperands() - 2).getReg() == RISCV::X0 ||
+ MI.getOperand(1).getReg() == RISCV::X0 ||
+ MI.getOperand(2).getReg() == RISCV::X0)
return false;
- auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
-
+ // Use branch opcode to select appropriate Xqcicm instruction
+ unsigned BCC = MI.getOperand(MI.getNumExplicitOperands() - 3).getImm();
unsigned CMovOpcode, CMovIOpcode;
- switch (CC) {
+ switch (BCC) {
default:
- llvm_unreachable("Unhandled CC");
- case RISCVCC::COND_EQ:
+ return false; // Unhandled branch opcodes
+ case RISCV::BNE:
CMovOpcode = RISCV::QC_MVEQ;
CMovIOpcode = RISCV::QC_MVEQI;
break;
- case RISCVCC::COND_NE:
+ case RISCV::BEQ:
CMovOpcode = RISCV::QC_MVNE;
CMovIOpcode = RISCV::QC_MVNEI;
break;
- case RISCVCC::COND_LT:
+ case RISCV::BGE:
CMovOpcode = RISCV::QC_MVLT;
CMovIOpcode = RISCV::QC_MVLTI;
break;
- case RISCVCC::COND_GE:
+ case RISCV::BLT:
CMovOpcode = RISCV::QC_MVGE;
CMovIOpcode = RISCV::QC_MVGEI;
break;
- case RISCVCC::COND_LTU:
+ case RISCV::BGEU:
CMovOpcode = RISCV::QC_MVLTU;
CMovIOpcode = RISCV::QC_MVLTUI;
break;
- case RISCVCC::COND_GEU:
+ case RISCV::BLTU:
CMovOpcode = RISCV::QC_MVGEU;
CMovIOpcode = RISCV::QC_MVGEUI;
break;
}
- if (MI.getOperand(2).getReg() == RISCV::X0) {
+ if (MI.getOperand(MI.getNumExplicitOperands() - 1).getReg() == RISCV::X0) {
// $dst = PseudoCCMOVGPR $lhs, X0, $cc, $falsev (=$dst), $truev
// $dst = PseudoCCMOVGPRNoX0 $lhs, X0, $cc, $falsev (=$dst), $truev
// =>
// $dst = QC_MVccI $falsev (=$dst), $lhs, 0, $truev
BuildMI(MBB, MBBI, DL, TII->get(CMovIOpcode))
.addDef(MI.getOperand(0).getReg())
- .addReg(MI.getOperand(4).getReg())
.addReg(MI.getOperand(1).getReg())
+ .addReg(MI.getOperand(MI.getNumExplicitOperands() - 2).getReg())
.addImm(0)
- .addReg(MI.getOperand(5).getReg());
+ .addReg(MI.getOperand(2).getReg());
MI.eraseFromParent();
return true;
@@ -399,10 +398,10 @@ bool RISCVExpandPseudo::expandCCOpToCMov(MachineBasicBlock &MBB,
// $dst = QC_MVcc $falsev (=$dst), $lhs, $rhs, $truev
BuildMI(MBB, MBBI, DL, TII->get(CMovOpcode))
.addDef(MI.getOperand(0).getReg())
- .addReg(MI.getOperand(4).getReg())
.addReg(MI.getOperand(1).getReg())
- .addReg(MI.getOperand(2).getReg())
- .addReg(MI.getOperand(5).getReg());
+ .addReg(MI.getOperand(MI.getNumExplicitOperands() - 2).getReg())
+ .addReg(MI.getOperand(MI.getNumExplicitOperands() - 1).getReg())
+ .addReg(MI.getOperand(2).getReg());
MI.eraseFromParent();
return true;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index fa388a3ec64d8..9c71a761c3bd1 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -963,11 +963,11 @@ MachineInstr *RISCVInstrInfo::foldMemoryOperandImpl(
return nullptr;
MachineRegisterInfo &MRI = MF.getRegInfo();
- if (Ops.size() != 1 || (Ops[0] != 4 && Ops[0] != 5))
+ if (Ops.size() != 1 || (Ops[0] != 1 && Ops[0] != 2))
return nullptr;
- bool Invert = Ops[0] == 5;
- const MachineOperand &FalseReg = MI.getOperand(!Invert ? 5 : 4);
+ bool Invert = Ops[0] == 2;
+ const MachineOperand &FalseReg = MI.getOperand(!Invert ? 2 : 1);
Register DestReg = MI.getOperand(0).getReg();
const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
if (!MRI.constrainRegClass(DestReg, PreviousClass))
@@ -975,14 +975,7 @@ MachineInstr *RISCVInstrInfo::foldMemoryOperandImpl(
// Create a new predicated version of DefMI.
MachineInstrBuilder NewMI = BuildMI(*MI.getParent(), InsertPt,
- MI.getDebugLoc(), get(PredOpc), DestReg)
- .add({MI.getOperand(1), MI.getOperand(2)});
-
- // Add condition code, inverting if necessary.
- auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
- if (!Invert)
- CC = RISCVCC::getInverseBranchCondition(CC);
- NewMI.addImm(CC);
+ MI.getDebugLoc(), get(PredOpc), DestReg);
// Copy the false register.
NewMI.add(FalseReg);
@@ -992,6 +985,15 @@ MachineInstr *RISCVInstrInfo::foldMemoryOperandImpl(
for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e; ++i)
NewMI.add(LoadMI.getOperand(i));
+ // Add branch opcode, inverting if necessary.
+ unsigned BCC = MI.getOperand(MI.getNumExplicitOperands() - 3).getImm();
+ if (!Invert)
+ BCC = RISCVCC::getInverseBranchOpcode(BCC);
+ NewMI.addImm(BCC);
+
+ // Copy condition portion
+ NewMI.add({MI.getOperand(MI.getNumExplicitOperands() - 2),
+ MI.getOperand(MI.getNumExplicitOperands() - 1)});
NewMI.cloneMemRefs(LoadMI);
return NewMI;
}
@@ -1295,6 +1297,50 @@ RISCVCC::CondCode RISCVCC::getInverseBranchCondition(RISCVCC::CondCode CC) {
}
}
+// Return inverse branch
+unsigned RISCVCC::getInverseBranchOpcode(unsigned BCC) {
+ switch (BCC) {
+ default:
+ llvm_unreachable("Unexpected branch opcode!");
+ case RISCV::BEQ:
+ return RISCV::BNE;
+ case RISCV::BNE:
+ return RISCV::BEQ;
+ case RISCV::BLT:
+ return RISCV::BGE;
+ case RISCV::BGE:
+ return RISCV::BLT;
+ case RISCV::BLTU:
+ return RISCV::BGEU;
+ case RISCV::BGEU:
+ return RISCV::BLTU;
+ case RISCV::QC_BEQI:
+ return RISCV::QC_BNEI;
+ case RISCV::QC_BNEI:
+ return RISCV::QC_BEQI;
+ case RISCV::QC_BLTI:
+ return RISCV::QC_BGEI;
+ case RISCV::QC_BGEI:
+ return RISCV::QC_BLTI;
+ case RISCV::QC_BLTUI:
+ return RISCV::QC_BGEUI;
+ case RISCV::QC_BGEUI:
+ return RISCV::QC_BLTUI;
+ case RISCV::QC_E_BEQI:
+ return RISCV::QC_E_BNEI;
+ case RISCV::QC_E_BNEI:
+ return RISCV::QC_E_BEQI;
+ case RISCV::QC_E_BLTI:
+ return RISCV::QC_E_BGEI;
+ case RISCV::QC_E_BGEI:
+ return RISCV::QC_E_BLTI;
+ case RISCV::QC_E_BLTUI:
+ return RISCV::QC_E_BGEUI;
+ case RISCV::QC_E_BGEUI:
+ return RISCV::QC_E_BLTUI;
+ }
+}
+
bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
@@ -1926,15 +1972,15 @@ RISCVInstrInfo::optimizeSelect(MachineInstr &MI,
MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
MachineInstr *DefMI =
- canFoldAsPredicatedOp(MI.getOperand(5).getReg(), MRI, this, STI);
+ canFoldAsPredicatedOp(MI.getOperand(2).getReg(), MRI, this, STI);
bool Invert = !DefMI;
if (!DefMI)
- DefMI = canFoldAsPredicatedOp(MI.getOperand(4).getReg(), MRI, this, STI);
+ DefMI = canFoldAsPredicatedOp(MI.getOperand(1).getReg(), MRI, this, STI);
if (!DefMI)
return nullptr;
// Find new register class to use.
- MachineOperand FalseReg = MI.getOperand(Invert ? 5 : 4);
+ MachineOperand FalseReg = MI.getOperand(Invert ? 2 : 1);
Register DestReg = MI.getOperand(0).getReg();
const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
if (!MRI.constrainRegClass(DestReg, PreviousClass))
@@ -1947,16 +1993,6 @@ RISCVInstrInfo::optimizeSelect(MachineInstr &MI,
MachineInstrBuilder NewMI =
BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(PredOpc), DestReg);
- // Copy the condition portion.
- NewMI.add(MI.getOperand(1));
- NewMI.add(MI.getOperand(2));
-
- // Add condition code, inverting if necessary.
- auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
- if (Invert)
- CC = RISCVCC::getInverseBranchCondition(CC);
- NewMI.addImm(CC);
-
// Copy the false register.
NewMI.add(FalseReg);
@@ -1965,6 +2001,16 @@ RISCVInstrInfo::optimizeSelect(MachineInstr &MI,
for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e; ++i)
NewMI.add(DefMI->getOperand(i));
+ // Add branch opcode, inverting if necessary.
+ unsigned BCCOpcode = MI.getOperand(MI.getNumExplicitOperands() - 3).getImm();
+ if (Invert)
+ BCCOpcode = RISCVCC::getInverseBranchOpcode(BCCOpcode);
+ NewMI.addImm(BCCOpcode);
+
+ // Copy the condition portion.
+ NewMI.add(MI.getOperand(MI.getNumExplicitOperands() - 2));
+ NewMI.add(MI.getOperand(MI.getNumExplicitOperands() - 1));
+
// Update SeenMIs set: register newly created MI and erase removed DefMI.
SeenMIs.insert(NewMI);
SeenMIs.erase(DefMI);
@@ -2016,6 +2062,65 @@ unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
case RISCV::PseudoMV_FPR32INX:
// MV is always compressible to either c.mv or c.li rd, 0.
return STI.hasStdExtZca() ? 2 : 4;
+ // Below cases are for short forward branch pseudos
+ case RISCV::PseudoCCMOVGPRNoX0:
+ return get(MI.getOperand(MI.getNumExplicitOperands() - 3).getImm())
+ .getSize() +
+ 2;
+ case RISCV::PseudoCCMOVGPR:
+ case RISCV::PseudoCCADD:
+ case RISCV::PseudoCCSUB:
+ case RISCV::PseudoCCSLL:
+ case RISCV::PseudoCCSRL:
+ case RISCV::PseudoCCSRA:
+ case RISCV::PseudoCCAND:
+ case RISCV::PseudoCCOR:
+ case RISCV::PseudoCCXOR:
+ case RISCV::PseudoCCADDI:
+ case RISCV::PseudoCCANDI:
+ case RISCV::PseudoCCORI:
+ case RISCV::PseudoCCXORI:
+ case RISCV::PseudoCCLUI:
+ case RISCV::PseudoCCSLLI:
+ case RISCV::PseudoCCSRLI:
+ case RISCV::PseudoCCSRAI:
+ case RISCV::PseudoCCADDW:
+ case RISCV::PseudoCCSUBW:
+ case RISCV::PseudoCCSLLW:
+ case RISCV::PseudoCCSRLW:
+ case RISCV::PseudoCCSRAW:
+ case RISCV::PseudoCCADDIW:
+ case RISCV::PseudoCCSLLIW:
+ case RISCV::PseudoCCSRLIW:
+ case RISCV::PseudoCCSRAIW:
+ case RISCV::PseudoCCANDN:
+ case RISCV::PseudoCCORN:
+ case RISCV::PseudoCCXNOR:
+ case RISCV::PseudoCCMAX:
+ case RISCV::PseudoCCMIN:
+ case RISCV::PseudoCCMAXU:
+ case RISCV::PseudoCCMINU:
+ case RISCV::PseudoCCMUL:
+ case RISCV::PseudoCCLB:
+ case RISCV::PseudoCCLH:
+ case RISCV::PseudoCCLW:
+ case RISCV::PseudoCCLHU:
+ case RISCV::PseudoCCLBU:
+ case RISCV::PseudoCCLWU:
+ case RISCV::PseudoCCLD:
+ case RISCV::PseudoCCQC_LI:
+ return get(MI.getOperand(MI.getNumExplicitOperands() - 3).getImm())
+ .getSize() +
+ 4;
+ case RISCV::PseudoCCQC_E_LI:
+ case RISCV::PseudoCCQC_E_LB:
+ case RISCV::PseudoCCQC_E_LH:
+ case RISCV::PseudoCCQC_E_LW:
+ case RISCV::PseudoCCQC_E_LHU:
+ case RISCV::PseudoCCQC_E_LBU:
+ return get(MI.getOperand(MI.getNumExplicitOperands() - 3).getImm())
+ .getSize() +
+ 6;
case TargetOpcode::STACKMAP:
// The upper bound for a stackmap intrinsic is the full length of its shadow
return StackMapOpers(&MI).getNumPatchBytes();
@@ -3180,6 +3285,12 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
return false;
}
break;
+ case RISCVOp::OPERAND_SFB_RHS:
+ if (!MO.isReg() && !MO.isImm()) {
+ ErrInfo = "Expected a register or immediate operand.";
+ return false;
+ }
+ break;
}
}
@@ -4040,8 +4151,8 @@ bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
case RISCV::PseudoCCMOVGPRNoX0:
case RISCV::PseudoCCMOVGPR:
- // Operands 4 and 5 are commutable.
- return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
+ // Operands 1 and 2 are commutable.
+ return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
case CASE_RVV_OPCODE(VADD_VV):
case CASE_RVV_OPCODE(VAND_VV):
case CASE_RVV_OPCODE(VOR_VV):
@@ -4269,10 +4380,10 @@ MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
case RISCV::PseudoCCMOVGPRNoX0:
case RISCV::PseudoCCMOVGPR: {
// CCMOV can be commuted by inverting the condition.
- auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
- CC = RISCVCC::getInverseBranchCondition(CC);
+ unsigned BCC = MI.getOperand(MI.getNumExplicitOperands() - 3).getImm();
+ BCC = RISCVCC::getInverseBranchOpcode(BCC);
auto &WorkingMI = cloneIfNew(MI);
- WorkingMI.getOperand(3).setImm(CC);
+ WorkingMI.getOperand(MI.getNumExplicitOperands() - 3).setImm(BCC);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI*/ false,
OpIdx1, OpIdx2);
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 15d0e4d3e7d52..57a944d2977de 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -63,6 +63,7 @@ enum CondCode {
};
CondCode getInverseBranchCondition(CondCode);
+unsigned getInverseBranchOpcode(unsigned BCC);
unsigned getBrCond(CondCode CC, unsigned SelectOpc = 0);
} // end of namespace RISCVCC
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoSFB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoSFB.td
index b06b9c2847168..c02aa06c66c64 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoSFB.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoSFB.td
@@ -10,108 +10,123 @@
//
//===----------------------------------------------------------------------===//
+// Represents the Opcode for a branch instruction, used by SFB pseudos
+def bcc_opcode : RISCVOp<i32> {
+ let OperandType = "OPERAND_BCC_OPCODE";
+}
+
+def sfb_rhs : RegisterOperand<GPR> {
+ let OperandNamespace = "RISCVOp";
+ let OperandType = "OPERAND_SFB_RHS";
+}
+
+// cond -> bcc_opcode for reg, reg branches
+def CCtoRISCVBCC : SDNodeXForm<cond, [{
+ ISD::CondCode CC = N->get();
+ RISCVCC::CondCode RvCC = getRISCVCCForIntCC(CC);
+ RvCC = RISCVCC::getInverseBranchCondition(RvCC);
+ unsigned BccOpcode = RISCVCC::getBrCond(RvCC);
+ return CurDAG->getTargetConstant(BccOpcode, SDLoc(N), MVT::i32);
+}]>;
+
+// For each of the short forward branch pseudos, corresponding code for
+// getting correct size of the pseduo is needed in getInstSizeInBytes.
let Predicates = [HasShortForwardBranchIALU], isSelect = 1,
- Constraints = "$dst = $falsev", isCommutable = 1, Size = 8 in {
+ Constraints = "$dst = $falsev", isCommutable = 1 in {
// This instruction moves $truev to $dst when the condition is true. It will
// be expanded to control flow in RISCVExpandPseudoInsts.
def PseudoCCMOVGPR : Pseudo<(outs GPR:$dst),
- (ins GPR:$lhs, GPR:$rhs, cond_code:$cc,
- GPR:$falsev, GPR:$truev),
- [(set GPR:$dst,
- (riscv_selectcc (XLenVT GPR:$lhs), GPR:$rhs,
- riscv_cond:$cc,
- (XLenVT GPR:$truev),
- GPR:$falsev))]>,
- Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp,
- ReadSFBALU, ReadSFBALU]>;
+ (ins GPR:$falsev, GPR:$truev, bcc_opcode:$bcc,
+ GPR:$lhs, sfb_rhs:$rhs),
+ []>,
+ Sched<[WriteSFB, ReadSFBALU, ReadSFBALU,
+ ReadSFBJmp, ReadSFBJmp]>;
}
+let Predicates = [HasShortForwardBranchIALU] in
+def : Pat<(riscv_selectcc (XLenVT GPR:$lhs), GPR:$rhs, cond:$cc, (XLenVT GPR:$truev), GPR:$falsev),
+ (PseudoCCMOVGPR GPR:$falsev, GPR:$truev, (CCtoRISCVBCC $cc), GPR:$lhs, GPR:$rhs)>;
+
// This should always expand to a branch+c.mv so the size is 6 or 4 if the
// branch is compressible.
let Predicates = [HasConditionalMoveFusion, NoShortForwardBranch],
- Constraints = "$dst = $falsev", isCommutable = 1, Size = 6 in {
+ Constraints = "$dst = $falsev", isCommutable = 1 in {
// This instruction moves $truev to $dst when the condition is true. It will
// be expanded to control flow in RISCVExpandPseudoInsts.
// We use GPRNoX0 because c.mv cannot encode X0.
def PseudoCCMOVGPRNoX0 : Pseudo<(outs GPRNoX0:$dst),
- (ins GPR:$lhs, GPR:$rhs, cond_code:$cc,
- GPRNoX0:$falsev, GPRNoX0:$truev),
- [(set GPRNoX0:$dst,
- (riscv_selectcc (XLenVT GPR:$lhs), GPR:$rhs,
- riscv_cond:$cc,
- (XLenVT GPRNoX0:$truev),
- GPRNoX0:$falsev))]>,
+ (ins GPRNoX0:$falsev, GPRNoX0:$truev, bcc_opcode:$bcc,
+ GPR:$lhs, sfb_rhs:$rhs),
+ []>,
Sched<[]>;
}
+let Predicates = [HasConditionalMoveFusion, NoShortForwardBranch] in
+def :Pat<(riscv_selectcc (XLenVT GPR:$lhs), GPR:$rhs, cond:$cc, (XLenVT GPRNoX0:$truev), GPRNoX0:$falsev),
+ (PseudoCCMOVGPRNoX0 GPRNoX0:$falsev, GPRNoX0:$truev, (CCtoRISCVBCC $cc), GPR:$lhs, GPR:$rhs )>;
+
class SFBALU_rr
: Pseudo<(outs GPR:$dst),
- (ins GPR:$lhs, GPR:$rhs, cond_code:$cc, GPR:$falsev, GPR:$rs1,
- GPR:$rs2), []>,
- Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, ReadSFBALU,
- ReadSFBALU]> {
+ (ins GPR:$falsev, GPR:$rs1, GPR:$rs2, bcc_opcode:$bcc,
+ GPR:$lhs, sfb_rhs:$rhs), []>,
+ Sched<[WriteSFB, ReadSFBALU, ReadSFBALU,
+ ReadSFBALU, ReadSFBJmp, ReadSFBJmp]> {
let hasSideEffects = 0;
let mayLoad = 0;
let mayStore = 0;
- let Size = 8;
let Constraints = "$dst = $falsev";
}
class SFBALU_ri
: Pseudo<(outs GPR:$dst),
- (ins GPR:$lhs, GPR:$rhs, cond_code:$cc, GPR:$falsev, GPR:$rs1,
- simm12_lo:$imm), []>,
- Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, ReadSFBALU]> {
+ (ins GPR:$falsev, GPR:$rs1, simm12_lo:$imm, bcc_opcode:$bcc,
+ GPR:$lhs, sfb_rhs:$rhs), []>,
+ Sched<[WriteSFB, ReadSFBALU, ReadSFBALU, ReadSFBJmp, ReadSFBJmp]> {
let hasSideEffects = 0;
let mayLoad = 0;
let mayStore = 0;
- let Size = 8;
let Constraints = "$dst = $falsev";
}
class SFBLUI
: Pseudo<(outs GPR:$dst),
- (ins GPR:$lhs, GPR:$rhs, cond_code:$cc, GPR:$falsev,
- uimm20_lui:$imm), []> {
+ (ins GPR:$falsev, uimm20_lui:$imm, bcc_opcode:$bcc,
+ GPR:$lhs, sfb_rhs:$rhs), []> {
let hasSideEffects = 0;
let mayLoad = 0;
let mayStore = 0;
- let Size = 8;
let Constraints = "$dst = $falsev";
}
class SFBLoad
: Pseudo<(outs GPR:$dst),
- (ins GPR:$lhs, GPR:$rhs, cond_code:$cc, GPR:$falsev, GPR:$rs1,
- simm12_lo:$imm), []> {
+ (ins GPR:$falsev, GPR:$rs1, simm12_lo:$imm, bcc_opcode:$bcc,
+ GPR:$lhs, sfb_rhs:$rhs), []> {
let hasSideEffects = 0;
let mayLoad = 1;
let mayStore = 0;
- let Size = 8;
let Constraints = "$dst = $falsev";
}
class SFBShift_ri
: Pseudo<(outs GPR:$dst),
- (ins GPR:$lhs, GPR:$rhs, cond_code:$cc, GPR:$falsev, GPR:$rs1,
- uimmlog2xlen:$imm), []>,
- Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, ReadSFBALU]> {
+ (ins GPR:$falsev, GPR:$rs1, uimmlog2xlen:$imm, bcc_opcode:$bcc,
+ GPR:$lhs, sfb_rhs:$rhs), []>,
+ Sched<[WriteSFB, ReadSFBALU, ReadSFBALU, ReadSFBJmp, ReadSFBJmp]> {
let hasSideEffects = 0;
let mayLoad = 0;
let mayStore = 0;
- let Size = 8;
let Constraints = "$dst = $falsev";
}
class SFBShiftW_ri
: Pseudo<(outs GPR:$dst),
- (ins GPR:$lhs, GPR:$rhs, cond_code:$cc, GPR:$falsev, GPR:$rs1,
- uimm5:$imm), []>,
- Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, ReadSFBALU]> {
+ (ins GPR:$falsev, GPR:$rs1, uimm5:$imm, bcc_opcode:$bcc,
+ GPR:$lhs, sfb_rhs:$rhs), []>,
+ Sched<[WriteSFB, ReadSFBALU, ReadSFBALU, ReadSFBJmp, ReadSFBJmp]> {
let hasSideEffects = 0;
let mayLoad = 0;
let mayStore = 0;
- let Size = 8;
let Constraints = "$dst = $falsev";
}
@@ -159,14 +174,23 @@ def PseudoCCORN : SFBALU_rr;
def PseudoCCXNOR : SFBALU_rr;
}
+// imm -> bcc_opcode for reg, reg branches
+def RISCVCCtoRISCVBCC : SDNodeXForm<imm, [{
+ auto CCImm = cast<ConstantSDNode>(N)->getZExtValue();
+ auto RvCC = static_cast<RISCVCC::CondCode>(CCImm);
+ RvCC = RISCVCC::getInverseBranchCondition(RvCC);
+ unsigned BccOpcode = RISCVCC::getBrCond(RvCC);
+ return CurDAG->getTargetConstant(BccOpcode, SDLoc(N), MVT::i32);
+}]>;
+
let Predicates = [HasShortForwardBranchIALU] in
def : Pat<(XLenVT (abs GPR:$rs1)),
- (PseudoCCSUB (XLenVT GPR:$rs1), (XLenVT X0), /* COND_LT */ 2,
- (XLenVT GPR:$rs1), (XLenVT X0), (XLenVT GPR:$rs1))>;
+ (PseudoCCSUB (XLenVT GPR:$rs1), (XLenVT X0), (XLenVT GPR:$rs1),
+ (RISCVCCtoRISCVBCC (i32 /* COND_LT */ 2)),(XLenVT GPR:$rs1), (XLenVT X0))>;
let Predicates = [HasShortForwardBranchIALU, IsRV64] in
def : Pat<(sext_inreg (abs 33signbits_node:$rs1), i32),
- (PseudoCCSUBW (i64 GPR:$rs1), (i64 X0), /* COND_LT */ 2,
- (i64 GPR:$rs1), (i64 X0), (i64 GPR:$rs1))>;
+ (PseudoCCSUBW (i64 GPR:$rs1), (i64 X0), (i64 GPR:$rs1), (RISCVCCtoRISCVBCC (i32 /* COND_LT */ 2)),
+ (i64 GPR:$rs1), (i64 X0))>;
let Predicates = [HasShortForwardBranchIMinMax] in {
def PseudoCCMAX : SFBALU_rr;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
index b86db6efb555f..db11589cce1bc 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
@@ -937,9 +937,10 @@ defm : VPatTernaryVD4DOT_VV<"int_riscv_nds_vd4dotsu", "PseudoNDS_VD4DOTSU",
class SFBNDS_BFO
: Pseudo<(outs GPR:$dst),
- (ins GPR:$lhs, GPR:$rhs, cond_code:$cc, GPR:$falsev, GPR:$rs1,
- uimmlog2xlen:$msb, uimmlog2xlen:$lsb), []>,
- Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, ReadSFBALU]> {
+ (ins GPR:$falsev, GPR:$rs1, uimmlog2xlen:$msb, uimmlog2xlen:$lsb,
+ bcc_opcode:$bcc, GPR:$lhs, sfb_rhs:$rhs), []>,
+ Sched<[WriteSFB, ReadSFBALU, ReadSFBALU,
+ ReadSFBALU, ReadSFBJmp, ReadSFBJmp]> {
let hasSideEffects = 0;
let mayLoad = 0;
let mayStore = 0;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
index 434ecf81c2f3b..c2051973be186 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
@@ -838,34 +838,31 @@ class QCIRVInst48EJ<bits<2> func2, string opcodestr>
class SFBQC_LI
: Pseudo<(outs GPR:$dst),
- (ins GPR:$lhs, GPR:$rhs, cond_code:$cc, GPR:$falsev,
- simm20_li:$imm), []> {
+ (ins GPR:$falsev, simm20_li:$imm, bcc_opcode:$bcc,
+ GPR:$lhs, sfb_rhs:$rhs), []> {
let hasSideEffects = 0;
let mayLoad = 0;
let mayStore = 0;
- let Size = 8;
let Constraints = "$dst = $falsev";
}
class SFBQC_E_LI
: Pseudo<(outs GPR:$dst),
- (ins GPR:$lhs, GPR:$rhs, cond_code:$cc, GPR:$falsev,
- bare_simm32:$imm), []> {
+ (ins GPR:$falsev, bare_simm32:$imm, bcc_opcode:$bcc,
+ GPR:$lhs, sfb_rhs:$rhs), []> {
let hasSideEffects = 0;
let mayLoad = 0;
let mayStore = 0;
- let Size = 10;
let Constraints = "$dst = $falsev";
}
class SFBQCLoad
: Pseudo<(outs GPR:$dst),
- (ins GPR:$lhs, GPR:$rhs, cond_code:$cc, GPR:$falsev, GPR:$rs1,
- simm26:$imm), []> {
+ (ins GPR:$falsev, GPR:$rs1, simm26:$imm, bcc_opcode:$bcc,
+ GPR:$lhs, sfb_rhs:$rhs), []> {
let hasSideEffects = 0;
let mayLoad = 1;
let mayStore = 0;
- let Size = 10;
let Constraints = "$dst = $falsev";
}
diff --git a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
index bfa4d7d9de7fa..254a2e2b40b50 100644
--- a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
+++ b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
@@ -342,10 +342,10 @@ static bool hasAllNBitUsers(const MachineInstr &OrigMI,
case RISCV::PseudoCCMOVGPR:
case RISCV::PseudoCCMOVGPRNoX0:
- // Either operand 4 or operand 5 is returned by this instruction. If
+ // Either operand 1 or operand 2 is returned by this instruction. If
// only the lower word of the result is used, then only the lower word
- // of operand 4 and 5 is used.
- if (OpIdx != 4 && OpIdx != 5)
+ // of operand 1 and 2 is used.
+ if (OpIdx != 1 && OpIdx != 2)
return false;
Worklist.emplace_back(UserMI, Bits);
break;
@@ -557,9 +557,9 @@ static bool isSignExtendedW(Register SrcReg, const RISCVSubtarget &ST,
case RISCV::PseudoCCSLLIW:
case RISCV::PseudoCCSRLIW:
case RISCV::PseudoCCSRAIW:
- // Returns operand 4 or an ADDW/SUBW/etc. of operands 5 and 6. We only
- // need to check if operand 4 is sign extended.
- if (!AddRegToWorkList(MI->getOperand(4).getReg()))
+ // Returns operand 1 or an ADDW/SUBW/etc. of operands 2 and 3. We only
+ // need to check if operand 1 is sign extended.
+ if (!AddRegToWorkList(MI->getOperand(1).getReg()))
return false;
break;
case RISCV::REMU:
@@ -589,8 +589,8 @@ static bool isSignExtendedW(Register SrcReg, const RISCVSubtarget &ST,
// MIN, MAX, PHI, or bitwise merge instructions is also sign-extended.
// The input registers for PHI are operand 1, 3, ...
- // The input registers for PseudoCCMOVGPR(NoX0) are 4 and 5.
- // The input registers for PseudoCCAND/OR/XOR are 4, 5, and 6.
+ // The input registers for PseudoCCMOVGPR(NoX0) are 1 and 2.
+ // The input registers for PseudoCCAND/OR/XOR are 1, 2, and 3.
// The input registers for MERGE/MVM/MVMN are 1, 2, and 3.
// The input registers for others are operand 1 and 2.
unsigned B = 1, E = 3, D = 1;
@@ -601,8 +601,8 @@ static bool isSignExtendedW(Register SrcReg, const RISCVSubtarget &ST,
break;
case RISCV::PseudoCCMOVGPR:
case RISCV::PseudoCCMOVGPRNoX0:
- B = 4;
- E = 6;
+ B = 1;
+ E = 3;
break;
case RISCV::PseudoCCAND:
case RISCV::PseudoCCOR:
@@ -610,8 +610,8 @@ static bool isSignExtendedW(Register SrcReg, const RISCVSubtarget &ST,
case RISCV::PseudoCCANDN:
case RISCV::PseudoCCORN:
case RISCV::PseudoCCXNOR:
- B = 4;
- E = 7;
+ B = 1;
+ E = 4;
break;
case RISCV::MERGE:
case RISCV::MVM:
diff --git a/llvm/test/CodeGen/RISCV/opt-w-instrs.mir b/llvm/test/CodeGen/RISCV/opt-w-instrs.mir
index 47407c2270ead..fa961ea1fce5a 100644
--- a/llvm/test/CodeGen/RISCV/opt-w-instrs.mir
+++ b/llvm/test/CodeGen/RISCV/opt-w-instrs.mir
@@ -123,7 +123,7 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x12
; CHECK-NEXT: [[COPY3:%[0-9]+]]:gprnox0 = COPY $x13
- ; CHECK-NEXT: [[PseudoCCMOVGPRNoX0_:%[0-9]+]]:gprnox0 = PseudoCCMOVGPRNoX0 [[COPY]], [[COPY1]], 1, [[COPY2]], [[COPY3]]
+ ; CHECK-NEXT: [[PseudoCCMOVGPRNoX0_:%[0-9]+]]:gprnox0 = PseudoCCMOVGPRNoX0 [[COPY2]], [[COPY3]], 1, [[COPY]], [[COPY1]]
; CHECK-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[PseudoCCMOVGPRNoX0_]], 0
; CHECK-NEXT: $x10 = COPY [[ADDIW]]
; CHECK-NEXT: PseudoRET implicit $x10
@@ -133,7 +133,7 @@ body: |
%3:gpr = COPY $x13
%4:gprnox0 = ADDIW %2, 0
%5:gprnox0 = ADDIW %3, 0
- %6:gprnox0 = PseudoCCMOVGPRNoX0 %0, %1, 1, %4, %5
+ %6:gprnox0 = PseudoCCMOVGPRNoX0 %4, %5, 1, %0, %1
%7:gpr = ADDIW %6, 0
$x10 = COPY %7
PseudoRET implicit $x10
@@ -154,7 +154,7 @@ body: |
; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x13
; CHECK-NEXT: [[SRAIW:%[0-9]+]]:gprnox0 = SRAIW [[COPY2]], 0
; CHECK-NEXT: [[SRAIW1:%[0-9]+]]:gprnox0 = SRAIW [[COPY3]], 0
- ; CHECK-NEXT: [[PseudoCCMOVGPRNoX0_:%[0-9]+]]:gprnox0 = PseudoCCMOVGPRNoX0 [[COPY]], [[COPY1]], 1, [[SRAIW]], [[SRAIW1]]
+ ; CHECK-NEXT: [[PseudoCCMOVGPRNoX0_:%[0-9]+]]:gprnox0 = PseudoCCMOVGPRNoX0 [[SRAIW]], [[SRAIW1]], 1, [[COPY]], [[COPY1]]
; CHECK-NEXT: $x10 = COPY [[PseudoCCMOVGPRNoX0_]]
; CHECK-NEXT: PseudoRET implicit $x10
%0:gpr = COPY $x10
@@ -163,7 +163,7 @@ body: |
%3:gpr = COPY $x13
%4:gprnox0 = SRAIW %2, 0
%5:gprnox0 = SRAIW %3, 0
- %6:gprnox0 = PseudoCCMOVGPRNoX0 %0, %1, 1, %4, %5
+ %6:gprnox0 = PseudoCCMOVGPRNoX0 %4, %5, 1, %0, %1
%7:gpr = ADDIW %6, 0
$x10 = COPY %7
PseudoRET implicit $x10
More information about the llvm-commits
mailing list