[PATCH 1/2] TableGen: Generate a function for getting operand indices based on their defined names
Tom Stellard
tom at stellard.net
Thu Jun 13 07:18:00 PDT 2013
Ping.
On Fri, Jun 07, 2013 at 04:30:18PM -0700, Tom Stellard wrote:
> On Fri, Jun 07, 2013 at 09:48:55AM -0700, Jakob Stoklund Olesen wrote:
> >
> > On Jun 3, 2013, at 2:23 PM, Tom Stellard <tom at stellard.net> wrote:
> >
> > > From: Tom Stellard <thomas.stellard at amd.com>
> > >
> > > This patch modifies TableGen to generate a function in
> > > ${TARGET}GenInstrInfo.inc called getNamedOperandIdx(), which can be used
> > > to look up indices for operands based on their names.
> > >
> > > For example, if you have an instruction like:
> > >
> > > def ADD : TargetInstr <(outs GPR:$dst), (ins GPR:$src0, GPR:$src1)>;
> > >
> > > You can look up the operand indices using the new function, like this:
> > >
> > > Target::getNamedOperandIdx(Target::ADD, Target::OpName::DST) => 0
> > > Target::getNamedOperandIdx(Target::ADD, Target::OpName::SRC0) => 1
> > > Target::getNamedOperandIdx(Target::ADD, Target::OpName::SRC1) => 2
> > >
> > > The operand names are case insensitive, so $dst is equivalent to $DST.
> >
> > TableGen is not case insensitive, so won’t this create opportunities for collisions? It doesn’t seem to be necessary.
> >
> > The function you’re generating looks like it could get really big. Please use some form of table compression to reduce the size.
> >
>
> Hi Jakob,
>
> Here are updated patches that incorporate your comments.
>
> -Tom
> From ec847d22996221bf9a68c9dda285b239f3a458a9 Mon Sep 17 00:00:00 2001
> From: Tom Stellard <thomas.stellard at amd.com>
> Date: Thu, 30 May 2013 10:10:50 -0700
> Subject: [PATCH 1/2] TableGen: Generate a function for getting operand
> indices based on their defined names v2
>
> This patch modifies TableGen to generate a function in
> ${TARGET}GenInstrInfo.inc called getNamedOperandIdx(), which can be used
> to look up indices for operands based on their names.
>
> For example, if you have an instruction like:
>
> def ADD : TargetInstr <(outs GPR:$dst), (ins GPR:$src0, GPR:$src1)>;
>
> You can look up the operand indices using the new function, like this:
>
> Target::getNamedOperandIdx(Target::ADD, Target::OpName::DST) => 0
> Target::getNamedOperandIdx(Target::ADD, Target::OpName::SRC0) => 1
> Target::getNamedOperandIdx(Target::ADD, Target::OpName::SRC1) => 2
>
> The operand names are case insensitive, so $dst is equivalent to $DST.
>
> This change is useful for R600 which has instructions with a large number
> of operands, many of which model single bit instruction configuration
> values. These configuration bits are common across most instructions,
> but may have a different operand index depending on the instruction type.
> It is useful to have a convenient way to look up the operand indices,
> so these bits can be generically set on any instruction.
>
> v2:
> - Don't uppercase enum values
> - Use table compresion to reduce function size
> ---
> utils/TableGen/InstrInfoEmitter.cpp | 87 +++++++++++++++++++++++++++++++++++++
> 1 file changed, 87 insertions(+)
>
> diff --git a/utils/TableGen/InstrInfoEmitter.cpp b/utils/TableGen/InstrInfoEmitter.cpp
> index d6020a8..edb66c2 100644
> --- a/utils/TableGen/InstrInfoEmitter.cpp
> +++ b/utils/TableGen/InstrInfoEmitter.cpp
> @@ -45,6 +45,8 @@ private:
> void emitEnums(raw_ostream &OS);
>
> typedef std::map<std::vector<std::string>, unsigned> OperandInfoMapTy;
> + typedef std::map<std::map<unsigned, unsigned>,
> + std::vector<std::string> > OpNameMapTy;
> void emitRecord(const CodeGenInstruction &Inst, unsigned Num,
> Record *InstrInfo,
> std::map<std::vector<Record*>, unsigned> &EL,
> @@ -293,6 +295,91 @@ void InstrInfoEmitter::run(raw_ostream &OS) {
> OS << "} // End llvm namespace \n";
>
> OS << "#endif // GET_INSTRINFO_CTOR\n\n";
> +
> + // Operand name -> index mapping
> +
> + std::string Namespace = Target.getInstNamespace();
> + std::string OpNameNS = "OpName";
> + std::map<std::string, unsigned> Operands;
> + OpNameMapTy OperandMap;
> + for (unsigned i = 0, e = NumberedInstructions.size(), NumOperands = 0;
> + i != e; ++i) {
> + const CodeGenInstruction *Inst = NumberedInstructions[i];
> + std::map<unsigned, unsigned> OpList;
> + for (unsigned j = 0, je = Inst->Operands.size(); j != je; ++j) {
> + CGIOperandList::OperandInfo Info = Inst->Operands[j];
> + std::string Name = Info.Name;
> + if (Operands.count(Name) == 0) {
> + Operands[Name] = NumOperands++;
> + }
> + unsigned OperandId = Operands[Name];
> + OpList[OperandId] = Info.MIOperandNo;
> + }
> + OperandMap[OpList].push_back(Namespace + "::" + Inst->TheDef->getName());
> + }
> +
> + OS << "#ifdef GET_INSTRINFO_OPERAND_ENUM\n";
> + OS << "#undef GET_INSTRINFO_OPERAND_ENUM\n";
> + OS << "namespace llvm {";
> + OS << "namespace " << Namespace << " {\n";
> + OS << "namespace " << OpNameNS << " { \n";
> + OS << "enum {\n";
> + for (std::map<std::string, unsigned>::iterator i = Operands.begin(),
> + e = Operands.end();
> + i != e; ++i) {
> + OS << " " << i->first << " = " << i->second << ",\n";
> + }
> + OS << "OPERAND_LAST";
> + OS << "\n};\n";
> + OS << "} // End namespace OpName\n";
> + OS << "} // End namespace " << Namespace << "\n";
> + OS << "} // End namespace llvm\n";
> + OS << "#endif //GET_INSTRINFO_OPERAND_ENUM\n";
> +
> + OS << "#ifdef GET_INSTRINFO_NAMED_OPS\n";
> + OS << "#undef GET_INSTRINFO_NAMED_OPS\n";
> + OS << "namespace llvm {";
> + OS << "namespace " << Namespace << " {\n";
> + OS << "int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx) {\n";
> + OS << " static const int16_t OperandMap []["<< Operands.size() << "] = {\n";
> + for (OpNameMapTy::iterator i = OperandMap.begin(), e = OperandMap.end();
> + i != e; ++i) {
> + std::map<unsigned, unsigned> OpList = i->first;
> + OS << "{";
> + for (unsigned Idx = 0; Idx < Operands.size(); ++Idx) {
> + if (OpList.count(Idx) == 0) {
> + OS << "-1";
> + } else {
> + OS << OpList[Idx];
> + }
> + OS << ", ";
> + }
> + OS << "},\n";
> + }
> + OS << "};\n";
> +
> + OS << " switch(Opcode) {\n";
> + unsigned TableIndex = 0;
> + for (OpNameMapTy::iterator i = OperandMap.begin(), e = OperandMap.end();
> + i != e; ++i, ++TableIndex) {
> + std::map<unsigned, unsigned> OpList = i->first;
> + std::vector<std::string> OpcodeList = i->second;
> +
> + for (std::vector<std::string>::iterator ii = OpcodeList.begin(),
> + ie = OpcodeList.end();
> + ii != ie; ++ii) {
> + std::string OpName = *ii;
> + OS << " case " << OpName << ":\n";
> + }
> +
> + OS << " return OperandMap[" << TableIndex << "][NamedIdx];\n";
> + }
> + OS << " default: return -1;\n";
> + OS << " }\n";
> + OS << "}\n";
> + OS << "} // End namespace " << Namespace << "\n";
> + OS << "} // End namespace llvm\n";
> + OS << "#endif //GET_INSTRINFO_NAMED_OPS\n";
> }
>
> void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
> --
> 1.7.11.4
>
> From 8d31f78c212a2c4e8e8363f8f4c1895a758a6a7b Mon Sep 17 00:00:00 2001
> From: Tom Stellard <thomas.stellard at amd.com>
> Date: Thu, 30 May 2013 13:03:20 -0700
> Subject: [PATCH 2/2] R600: Use new getNamedOperandIdx function generated by
> TableGen v2
>
> v2:
> - Use the correct case for operand names
> ---
> lib/Target/R600/AMDGPUInstrInfo.cpp | 1 +
> lib/Target/R600/AMDGPUInstrInfo.h | 5 +
> lib/Target/R600/AMDILISelDAGToDAG.cpp | 91 ++++++-------
> lib/Target/R600/R600Defines.h | 41 +-----
> lib/Target/R600/R600ExpandSpecialInstrs.cpp | 16 +--
> lib/Target/R600/R600ISelLowering.cpp | 2 +-
> lib/Target/R600/R600InstrInfo.cpp | 190 +++++++++++-----------------
> lib/Target/R600/R600InstrInfo.h | 8 +-
> lib/Target/R600/R600Packetizer.cpp | 18 +--
> 9 files changed, 148 insertions(+), 224 deletions(-)
>
> diff --git a/lib/Target/R600/AMDGPUInstrInfo.cpp b/lib/Target/R600/AMDGPUInstrInfo.cpp
> index 31b3002..61437e9 100644
> --- a/lib/Target/R600/AMDGPUInstrInfo.cpp
> +++ b/lib/Target/R600/AMDGPUInstrInfo.cpp
> @@ -21,6 +21,7 @@
> #include "llvm/CodeGen/MachineRegisterInfo.h"
>
> #define GET_INSTRINFO_CTOR
> +#define GET_INSTRINFO_NAMED_OPS
> #define GET_INSTRMAP_INFO
> #include "AMDGPUGenInstrInfo.inc"
>
> diff --git a/lib/Target/R600/AMDGPUInstrInfo.h b/lib/Target/R600/AMDGPUInstrInfo.h
> index 3909e4e..306f467 100644
> --- a/lib/Target/R600/AMDGPUInstrInfo.h
> +++ b/lib/Target/R600/AMDGPUInstrInfo.h
> @@ -23,6 +23,7 @@
>
> #define GET_INSTRINFO_HEADER
> #define GET_INSTRINFO_ENUM
> +#define GET_INSTRINFO_OPERAND_ENUM
> #include "AMDGPUGenInstrInfo.inc"
>
> #define OPCODE_IS_ZERO_INT AMDGPU::PRED_SETE_INT
> @@ -198,6 +199,10 @@ public:
>
> };
>
> +namespace AMDGPU {
> + int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex);
> +} // End namespace AMDGPU
> +
> } // End llvm namespace
>
> #define AMDGPU_FLAG_REGISTER_LOAD (UINT64_C(1) << 63)
> diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDILISelDAGToDAG.cpp
> index 93432a2..a01879e 100644
> --- a/lib/Target/R600/AMDILISelDAGToDAG.cpp
> +++ b/lib/Target/R600/AMDILISelDAGToDAG.cpp
> @@ -281,7 +281,8 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
> continue;
> }
>
> - int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(), R600Operands::IMM);
> + int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(),
> + AMDGPU::OpName::literal);
> assert(ImmIdx != -1);
>
> // subtract one from ImmIdx, because the DST operand is usually index
> @@ -358,7 +359,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
> if (PotentialClamp->isMachineOpcode() &&
> PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) {
> unsigned ClampIdx =
> - TII->getOperandIdx(Result->getMachineOpcode(), R600Operands::CLAMP);
> + TII->getOperandIdx(Result->getMachineOpcode(), AMDGPU::OpName::clamp);
> std::vector<SDValue> Ops;
> unsigned NumOp = Result->getNumOperands();
> for (unsigned i = 0; i < NumOp; ++i) {
> @@ -416,23 +417,23 @@ bool AMDGPUDAGToDAGISel::FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg,
> bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
> const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
> int OperandIdx[] = {
> - TII->getOperandIdx(Opcode, R600Operands::SRC0),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1),
> - TII->getOperandIdx(Opcode, R600Operands::SRC2)
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src2)
> };
> int SelIdx[] = {
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL),
> - TII->getOperandIdx(Opcode, R600Operands::SRC2_SEL)
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_sel)
> };
> int NegIdx[] = {
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG),
> - TII->getOperandIdx(Opcode, R600Operands::SRC2_NEG)
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg)
> };
> int AbsIdx[] = {
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs),
> -1
> };
>
> @@ -467,44 +468,44 @@ bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
> bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode,
> const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
> int OperandIdx[] = {
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_X),
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_Y),
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_Z),
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_W),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_X),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_Y),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_Z),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_W)
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
> };
> int SelIdx[] = {
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_X),
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_Y),
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_Z),
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL_W),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_X),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_Y),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_Z),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL_W)
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_X),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Y),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Z),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_W),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_X),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Y),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Z),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_W)
> };
> int NegIdx[] = {
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_X),
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_Y),
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_Z),
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG_W),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_X),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_Y),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_Z),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG_W)
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W)
> };
> int AbsIdx[] = {
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_X),
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_Y),
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_Z),
> - TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS_W),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_X),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_Y),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_Z),
> - TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS_W)
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z),
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W)
> };
>
> // Gather constants values
> diff --git a/lib/Target/R600/R600Defines.h b/lib/Target/R600/R600Defines.h
> index aebe581..e30ea27 100644
> --- a/lib/Target/R600/R600Defines.h
> +++ b/lib/Target/R600/R600Defines.h
> @@ -57,46 +57,7 @@ namespace R600_InstFlag {
> #define IS_VTX(desc) ((desc).TSFlags & R600_InstFlag::VTX_INST)
> #define IS_TEX(desc) ((desc).TSFlags & R600_InstFlag::TEX_INST)
>
> -namespace R600Operands {
> - enum Ops {
> - DST,
> - UPDATE_EXEC_MASK,
> - UPDATE_PREDICATE,
> - WRITE,
> - OMOD,
> - DST_REL,
> - CLAMP,
> - SRC0,
> - SRC0_NEG,
> - SRC0_REL,
> - SRC0_ABS,
> - SRC0_SEL,
> - SRC1,
> - SRC1_NEG,
> - SRC1_REL,
> - SRC1_ABS,
> - SRC1_SEL,
> - SRC2,
> - SRC2_NEG,
> - SRC2_REL,
> - SRC2_SEL,
> - LAST,
> - PRED_SEL,
> - IMM,
> - BANK_SWIZZLE,
> - COUNT
> - };
> -
> - const static int ALUOpTable[3][R600Operands::COUNT] = {
> -// W C S S S S S S S S S S S
> -// R O D L S R R R R S R R R R S R R R L P
> -// D U I M R A R C C C C R C C C C R C C C A R I
> -// S E U T O E M C 0 0 0 0 C 1 1 1 1 C 2 2 2 S E M B
> -// T M P E D L P 0 N R A S 1 N R A S 2 N R S T D M S
> - {0,-1,-1, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1,-1,-1,-1,10,11,12,13},
> - {0, 1, 2, 3, 4 ,5 ,6 ,7, 8, 9,10,11,12,13,14,15,16,-1,-1,-1,-1,17,18,19,20},
> - {0,-1,-1,-1,-1, 1, 2, 3, 4, 5,-1, 6, 7, 8, 9,-1,10,11,12,13,14,15,16,17,18}
> - };
> +namespace OpName {
>
> enum VecOps {
> UPDATE_EXEC_MASK_X,
> diff --git a/lib/Target/R600/R600ExpandSpecialInstrs.cpp b/lib/Target/R600/R600ExpandSpecialInstrs.cpp
> index 40c058f..efc9523 100644
> --- a/lib/Target/R600/R600ExpandSpecialInstrs.cpp
> +++ b/lib/Target/R600/R600ExpandSpecialInstrs.cpp
> @@ -82,9 +82,9 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
> AMDGPU::ZERO); // src1
> TII->addFlag(PredSet, 0, MO_FLAG_MASK);
> if (Flags & MO_FLAG_PUSH) {
> - TII->setImmOperand(PredSet, R600Operands::UPDATE_EXEC_MASK, 1);
> + TII->setImmOperand(PredSet, AMDGPU::OpName::update_exec_mask, 1);
> } else {
> - TII->setImmOperand(PredSet, R600Operands::UPDATE_PREDICATE, 1);
> + TII->setImmOperand(PredSet, AMDGPU::OpName::update_pred, 1);
> }
> MI.eraseFromParent();
> continue;
> @@ -96,7 +96,7 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
> AMDGPU::ZERO,
> AMDGPU::ZERO);
> TII->addFlag(PredSet, 0, MO_FLAG_MASK);
> - TII->setImmOperand(PredSet, R600Operands::UPDATE_EXEC_MASK, 1);
> + TII->setImmOperand(PredSet, AMDGPU::OpName::update_exec_mask, 1);
>
> BuildMI(MBB, I, MBB.findDebugLoc(I),
> TII->get(AMDGPU::PREDICATED_BREAK))
> @@ -208,10 +208,10 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
> // While not strictly necessary from hw point of view, we force
> // all src operands of a dot4 inst to belong to the same slot.
> unsigned Src0 = BMI->getOperand(
> - TII->getOperandIdx(Opcode, R600Operands::SRC0))
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src0))
> .getReg();
> unsigned Src1 = BMI->getOperand(
> - TII->getOperandIdx(Opcode, R600Operands::SRC1))
> + TII->getOperandIdx(Opcode, AMDGPU::OpName::src1))
> .getReg();
> (void) Src0;
> (void) Src1;
> @@ -258,14 +258,14 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
> // T0_W = CUBE T1_Y, T1_Z
> for (unsigned Chan = 0; Chan < 4; Chan++) {
> unsigned DstReg = MI.getOperand(
> - TII->getOperandIdx(MI, R600Operands::DST)).getReg();
> + TII->getOperandIdx(MI, AMDGPU::OpName::dst)).getReg();
> unsigned Src0 = MI.getOperand(
> - TII->getOperandIdx(MI, R600Operands::SRC0)).getReg();
> + TII->getOperandIdx(MI, AMDGPU::OpName::src0)).getReg();
> unsigned Src1 = 0;
>
> // Determine the correct source registers
> if (!IsCube) {
> - int Src1Idx = TII->getOperandIdx(MI, R600Operands::SRC1);
> + int Src1Idx = TII->getOperandIdx(MI, AMDGPU::OpName::src1);
> if (Src1Idx != -1) {
> Src1 = MI.getOperand(Src1Idx).getReg();
> }
> diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp
> index 06c2100..ee13320 100644
> --- a/lib/Target/R600/R600ISelLowering.cpp
> +++ b/lib/Target/R600/R600ISelLowering.cpp
> @@ -168,7 +168,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
> case AMDGPU::CONST_COPY: {
> MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, MI, AMDGPU::MOV,
> MI->getOperand(0).getReg(), AMDGPU::ALU_CONST);
> - TII->setImmOperand(NewMI, R600Operands::SRC0_SEL,
> + TII->setImmOperand(NewMI, AMDGPU::OpName::src0_sel,
> MI->getOperand(1).getImm());
> break;
> }
> diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp
> index 4f5cfcd..b9da74c 100644
> --- a/lib/Target/R600/R600InstrInfo.cpp
> +++ b/lib/Target/R600/R600InstrInfo.cpp
> @@ -69,7 +69,7 @@ R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
>
> MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
> DestReg, SrcReg);
> - NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0))
> + NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
> .setIsKill(KillSrc);
> }
> }
> @@ -170,22 +170,24 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const {
> SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
>
> if (MI->getOpcode() == AMDGPU::DOT_4) {
> - static const R600Operands::VecOps OpTable[8][2] = {
> - {R600Operands::SRC0_X, R600Operands::SRC0_SEL_X},
> - {R600Operands::SRC0_Y, R600Operands::SRC0_SEL_Y},
> - {R600Operands::SRC0_Z, R600Operands::SRC0_SEL_Z},
> - {R600Operands::SRC0_W, R600Operands::SRC0_SEL_W},
> - {R600Operands::SRC1_X, R600Operands::SRC1_SEL_X},
> - {R600Operands::SRC1_Y, R600Operands::SRC1_SEL_Y},
> - {R600Operands::SRC1_Z, R600Operands::SRC1_SEL_Z},
> - {R600Operands::SRC1_W, R600Operands::SRC1_SEL_W},
> + static const unsigned OpTable[8][2] = {
> + {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
> + {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
> + {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
> + {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
> + {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
> + {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
> + {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
> + {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
> };
>
> for (unsigned j = 0; j < 8; j++) {
> - MachineOperand &MO = MI->getOperand(OpTable[j][0] + 1);
> + MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
> + OpTable[j][0]));
> unsigned Reg = MO.getReg();
> if (Reg == AMDGPU::ALU_CONST) {
> - unsigned Sel = MI->getOperand(OpTable[j][1] + 1).getImm();
> + unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(),
> + OpTable[j][1])).getImm();
> Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
> continue;
> }
> @@ -194,10 +196,10 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const {
> return Result;
> }
>
> - static const R600Operands::Ops OpTable[3][2] = {
> - {R600Operands::SRC0, R600Operands::SRC0_SEL},
> - {R600Operands::SRC1, R600Operands::SRC1_SEL},
> - {R600Operands::SRC2, R600Operands::SRC2_SEL},
> + static const unsigned OpTable[3][2] = {
> + {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
> + {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
> + {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
> };
>
> for (unsigned j = 0; j < 3; j++) {
> @@ -214,7 +216,7 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const {
> }
> if (Reg == AMDGPU::ALU_LITERAL_X) {
> unsigned Imm = MI->getOperand(
> - getOperandIdx(MI->getOpcode(), R600Operands::IMM)).getImm();
> + getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm();
> Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm));
> continue;
> }
> @@ -329,7 +331,7 @@ R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
> for (unsigned i = 0, e = IG.size(); i < e; ++i) {
> IGSrcs.push_back(ExtractSrcs(IG[i], PV));
> unsigned Op = getOperandIdx(IG[i]->getOpcode(),
> - R600Operands::BANK_SWIZZLE);
> + AMDGPU::OpName::bank_swizzle);
> ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
> IG[i]->getOperand(Op).getImm());
> }
> @@ -812,13 +814,13 @@ MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
> unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
> MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
> AMDGPU::AR_X, OffsetReg);
> - setImmOperand(MOVA, R600Operands::WRITE, 0);
> + setImmOperand(MOVA, AMDGPU::OpName::write, 0);
>
> MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
> AddrReg, ValueReg)
> .addReg(AMDGPU::AR_X,
> RegState::Implicit | RegState::Kill);
> - setImmOperand(Mov, R600Operands::DST_REL, 1);
> + setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1);
> return Mov;
> }
>
> @@ -830,13 +832,13 @@ MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
> MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
> AMDGPU::AR_X,
> OffsetReg);
> - setImmOperand(MOVA, R600Operands::WRITE, 0);
> + setImmOperand(MOVA, AMDGPU::OpName::write, 0);
> MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
> ValueReg,
> AddrReg)
> .addReg(AMDGPU::AR_X,
> RegState::Implicit | RegState::Kill);
> - setImmOperand(Mov, R600Operands::SRC0_REL, 1);
> + setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1);
>
> return Mov;
> }
> @@ -892,7 +894,7 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB
>
> #define OPERAND_CASE(Label) \
> case Label: { \
> - static const R600Operands::VecOps Ops[] = \
> + static const unsigned Ops[] = \
> { \
> Label##_X, \
> Label##_Y, \
> @@ -902,26 +904,25 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB
> return Ops[Slot]; \
> }
>
> -static R600Operands::VecOps
> -getSlotedOps(R600Operands::Ops Op, unsigned Slot) {
> +static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
> switch (Op) {
> - OPERAND_CASE(R600Operands::UPDATE_EXEC_MASK)
> - OPERAND_CASE(R600Operands::UPDATE_PREDICATE)
> - OPERAND_CASE(R600Operands::WRITE)
> - OPERAND_CASE(R600Operands::OMOD)
> - OPERAND_CASE(R600Operands::DST_REL)
> - OPERAND_CASE(R600Operands::CLAMP)
> - OPERAND_CASE(R600Operands::SRC0)
> - OPERAND_CASE(R600Operands::SRC0_NEG)
> - OPERAND_CASE(R600Operands::SRC0_REL)
> - OPERAND_CASE(R600Operands::SRC0_ABS)
> - OPERAND_CASE(R600Operands::SRC0_SEL)
> - OPERAND_CASE(R600Operands::SRC1)
> - OPERAND_CASE(R600Operands::SRC1_NEG)
> - OPERAND_CASE(R600Operands::SRC1_REL)
> - OPERAND_CASE(R600Operands::SRC1_ABS)
> - OPERAND_CASE(R600Operands::SRC1_SEL)
> - OPERAND_CASE(R600Operands::PRED_SEL)
> + OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
> + OPERAND_CASE(AMDGPU::OpName::update_pred)
> + OPERAND_CASE(AMDGPU::OpName::write)
> + OPERAND_CASE(AMDGPU::OpName::omod)
> + OPERAND_CASE(AMDGPU::OpName::dst_rel)
> + OPERAND_CASE(AMDGPU::OpName::clamp)
> + OPERAND_CASE(AMDGPU::OpName::src0)
> + OPERAND_CASE(AMDGPU::OpName::src0_neg)
> + OPERAND_CASE(AMDGPU::OpName::src0_rel)
> + OPERAND_CASE(AMDGPU::OpName::src0_abs)
> + OPERAND_CASE(AMDGPU::OpName::src0_sel)
> + OPERAND_CASE(AMDGPU::OpName::src1)
> + OPERAND_CASE(AMDGPU::OpName::src1_neg)
> + OPERAND_CASE(AMDGPU::OpName::src1_rel)
> + OPERAND_CASE(AMDGPU::OpName::src1_abs)
> + OPERAND_CASE(AMDGPU::OpName::src1_sel)
> + OPERAND_CASE(AMDGPU::OpName::pred_sel)
> default:
> llvm_unreachable("Wrong Operand");
> }
> @@ -929,12 +930,6 @@ getSlotedOps(R600Operands::Ops Op, unsigned Slot) {
>
> #undef OPERAND_CASE
>
> -static int
> -getVecOperandIdx(R600Operands::VecOps Op) {
> - return 1 + Op;
> -}
> -
> -
> MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
> MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
> const {
> @@ -947,31 +942,31 @@ MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
> Opcode = AMDGPU::DOT4_eg;
> MachineBasicBlock::iterator I = MI;
> MachineOperand &Src0 = MI->getOperand(
> - getVecOperandIdx(getSlotedOps(R600Operands::SRC0, Slot)));
> + getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
> MachineOperand &Src1 = MI->getOperand(
> - getVecOperandIdx(getSlotedOps(R600Operands::SRC1, Slot)));
> + getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
> MachineInstr *MIB = buildDefaultInstruction(
> MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
> - static const R600Operands::Ops Operands[14] = {
> - R600Operands::UPDATE_EXEC_MASK,
> - R600Operands::UPDATE_PREDICATE,
> - R600Operands::WRITE,
> - R600Operands::OMOD,
> - R600Operands::DST_REL,
> - R600Operands::CLAMP,
> - R600Operands::SRC0_NEG,
> - R600Operands::SRC0_REL,
> - R600Operands::SRC0_ABS,
> - R600Operands::SRC0_SEL,
> - R600Operands::SRC1_NEG,
> - R600Operands::SRC1_REL,
> - R600Operands::SRC1_ABS,
> - R600Operands::SRC1_SEL,
> + static const unsigned Operands[14] = {
> + AMDGPU::OpName::update_exec_mask,
> + AMDGPU::OpName::update_pred,
> + AMDGPU::OpName::write,
> + AMDGPU::OpName::omod,
> + AMDGPU::OpName::dst_rel,
> + AMDGPU::OpName::clamp,
> + AMDGPU::OpName::src0_neg,
> + AMDGPU::OpName::src0_rel,
> + AMDGPU::OpName::src0_abs,
> + AMDGPU::OpName::src0_sel,
> + AMDGPU::OpName::src1_neg,
> + AMDGPU::OpName::src1_rel,
> + AMDGPU::OpName::src1_abs,
> + AMDGPU::OpName::src1_sel,
> };
>
> for (unsigned i = 0; i < 14; i++) {
> MachineOperand &MO = MI->getOperand(
> - getVecOperandIdx(getSlotedOps(Operands[i], Slot)));
> + getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
> assert (MO.isImm());
> setImmOperand(MIB, Operands[i], MO.getImm());
> }
> @@ -985,56 +980,19 @@ MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
> uint64_t Imm) const {
> MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
> AMDGPU::ALU_LITERAL_X);
> - setImmOperand(MovImm, R600Operands::IMM, Imm);
> + setImmOperand(MovImm, AMDGPU::OpName::literal, Imm);
> return MovImm;
> }
>
> -int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
> - R600Operands::Ops Op) const {
> - return getOperandIdx(MI.getOpcode(), Op);
> -}
> -
> -int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
> - R600Operands::VecOps Op) const {
> +int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
> return getOperandIdx(MI.getOpcode(), Op);
> }
>
> -int R600InstrInfo::getOperandIdx(unsigned Opcode,
> - R600Operands::Ops Op) const {
> - unsigned TargetFlags = get(Opcode).TSFlags;
> - unsigned OpTableIdx;
> -
> - if (!HAS_NATIVE_OPERANDS(TargetFlags)) {
> - switch (Op) {
> - case R600Operands::DST: return 0;
> - case R600Operands::SRC0: return 1;
> - case R600Operands::SRC1: return 2;
> - case R600Operands::SRC2: return 3;
> - default:
> - assert(!"Unknown operand type for instruction");
> - return -1;
> - }
> - }
> -
> - if (TargetFlags & R600_InstFlag::OP1) {
> - OpTableIdx = 0;
> - } else if (TargetFlags & R600_InstFlag::OP2) {
> - OpTableIdx = 1;
> - } else {
> - assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined "
> - "for this instruction");
> - OpTableIdx = 2;
> - }
> -
> - return R600Operands::ALUOpTable[OpTableIdx][Op];
> -}
> -
> -int R600InstrInfo::getOperandIdx(unsigned Opcode,
> - R600Operands::VecOps Op) const {
> - return Op + 1;
> +int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
> + return AMDGPU::getNamedOperandIdx(Opcode, Op);
> }
>
> -void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op,
> +void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op,
> int64_t Imm) const {
> int Idx = getOperandIdx(*MI, Op);
> assert(Idx != -1 && "Operand not supported for this instruction.");
> @@ -1062,20 +1020,20 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
> bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
> switch (Flag) {
> case MO_FLAG_CLAMP:
> - FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP);
> + FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp);
> break;
> case MO_FLAG_MASK:
> - FlagIndex = getOperandIdx(*MI, R600Operands::WRITE);
> + FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write);
> break;
> case MO_FLAG_NOT_LAST:
> case MO_FLAG_LAST:
> - FlagIndex = getOperandIdx(*MI, R600Operands::LAST);
> + FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last);
> break;
> case MO_FLAG_NEG:
> switch (SrcIdx) {
> - case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break;
> - case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break;
> - case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break;
> + case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break;
> + case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break;
> + case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break;
> }
> break;
>
> @@ -1084,8 +1042,8 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
> "instructions.");
> (void)IsOP3;
> switch (SrcIdx) {
> - case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break;
> - case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break;
> + case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break;
> + case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break;
> }
> break;
>
> diff --git a/lib/Target/R600/R600InstrInfo.h b/lib/Target/R600/R600InstrInfo.h
> index 6a11c63..f06abf6 100644
> --- a/lib/Target/R600/R600InstrInfo.h
> +++ b/lib/Target/R600/R600InstrInfo.h
> @@ -210,17 +210,15 @@ namespace llvm {
> /// \brief Get the index of Op in the MachineInstr.
> ///
> /// \returns -1 if the Instruction does not contain the specified \p Op.
> - int getOperandIdx(const MachineInstr &MI, R600Operands::Ops Op) const;
> - int getOperandIdx(const MachineInstr &MI, R600Operands::VecOps Op) const;
> + int getOperandIdx(const MachineInstr &MI, unsigned Op) const;
>
> /// \brief Get the index of \p Op for the given Opcode.
> ///
> /// \returns -1 if the Instruction does not contain the specified \p Op.
> - int getOperandIdx(unsigned Opcode, R600Operands::Ops Op) const;
> - int getOperandIdx(unsigned Opcode, R600Operands::VecOps Op) const;
> + int getOperandIdx(unsigned Opcode, unsigned Op) const;
>
> /// \brief Helper function for setting instruction flag values.
> - void setImmOperand(MachineInstr *MI, R600Operands::Ops Op, int64_t Imm) const;
> + void setImmOperand(MachineInstr *MI, unsigned Op, int64_t Imm) const;
>
> /// \returns true if this instruction has an operand for storing target flags.
> bool hasFlagOperand(const MachineInstr &MI) const;
> diff --git a/lib/Target/R600/R600Packetizer.cpp b/lib/Target/R600/R600Packetizer.cpp
> index da614c7..6024fd5 100644
> --- a/lib/Target/R600/R600Packetizer.cpp
> +++ b/lib/Target/R600/R600Packetizer.cpp
> @@ -79,7 +79,7 @@ private:
> continue;
> if (TII->isTransOnly(BI))
> continue;
> - int OperandIdx = TII->getOperandIdx(BI->getOpcode(), R600Operands::WRITE);
> + int OperandIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::write);
> if (OperandIdx > -1 && BI->getOperand(OperandIdx).getImm() == 0)
> continue;
> unsigned Dst = BI->getOperand(0).getReg();
> @@ -112,10 +112,10 @@ private:
>
> void substitutePV(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PVs)
> const {
> - R600Operands::Ops Ops[] = {
> - R600Operands::SRC0,
> - R600Operands::SRC1,
> - R600Operands::SRC2
> + unsigned Ops[] = {
> + AMDGPU::OpName::src0,
> + AMDGPU::OpName::src1,
> + AMDGPU::OpName::src2
> };
> for (unsigned i = 0; i < 3; i++) {
> int OperandIdx = TII->getOperandIdx(MI->getOpcode(), Ops[i]);
> @@ -164,8 +164,8 @@ public:
> if (getSlot(MII) <= getSlot(MIJ))
> return false;
> // Does MII and MIJ share the same pred_sel ?
> - int OpI = TII->getOperandIdx(MII->getOpcode(), R600Operands::PRED_SEL),
> - OpJ = TII->getOperandIdx(MIJ->getOpcode(), R600Operands::PRED_SEL);
> + int OpI = TII->getOperandIdx(MII->getOpcode(), AMDGPU::OpName::pred_sel),
> + OpJ = TII->getOperandIdx(MIJ->getOpcode(), AMDGPU::OpName::pred_sel);
> unsigned PredI = (OpI > -1)?MII->getOperand(OpI).getReg():0,
> PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg():0;
> if (PredI != PredJ)
> @@ -191,7 +191,7 @@ public:
> bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {return false;}
>
> void setIsLastBit(MachineInstr *MI, unsigned Bit) const {
> - unsigned LastOp = TII->getOperandIdx(MI->getOpcode(), R600Operands::LAST);
> + unsigned LastOp = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::last);
> MI->getOperand(LastOp).setImm(Bit);
> }
>
> @@ -230,7 +230,7 @@ public:
> for (unsigned i = 0, e = CurrentPacketMIs.size(); i < e; i++) {
> MachineInstr *MI = CurrentPacketMIs[i];
> unsigned Op = TII->getOperandIdx(MI->getOpcode(),
> - R600Operands::BANK_SWIZZLE);
> + AMDGPU::OpName::bank_swizzle);
> MI->getOperand(Op).setImm(BS[i]);
> }
> }
> --
> 1.7.11.4
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
More information about the llvm-commits
mailing list