[PATCH 5/9] R600/SI: add post ISel folding for SI

Tom Stellard tom at stellard.net
Mon Feb 25 18:04:41 PST 2013


On Mon, Feb 25, 2013 at 03:49:00PM +0100, Christian König wrote:
> From: Christian König <christian.koenig at amd.com>
> 
> Include immediate folding and SGPR limit handling for VOP3 instructions.
> 
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
>  lib/Target/R600/AMDILISelDAGToDAG.cpp |    1 +
>  lib/Target/R600/SIISelLowering.cpp    |  205 ++++++++++++++++++++++++++++++++-
>  lib/Target/R600/SIISelLowering.h      |    9 ++
>  lib/Target/R600/SIInstrFormats.td     |    2 +
>  lib/Target/R600/SIInstrInfo.td        |   17 ++-
>  lib/Target/R600/SIInstructions.td     |   22 ++--
>  6 files changed, 233 insertions(+), 23 deletions(-)
> 
> diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDILISelDAGToDAG.cpp
> index 2f70fa9..e77b9dc 100644
> --- a/lib/Target/R600/AMDILISelDAGToDAG.cpp
> +++ b/lib/Target/R600/AMDILISelDAGToDAG.cpp
> @@ -16,6 +16,7 @@
>  #include "AMDGPURegisterInfo.h"
>  #include "AMDILDevices.h"
>  #include "R600InstrInfo.h"
> +#include "SIISelLowering.h"
>  #include "llvm/ADT/ValueMap.h"
>  #include "llvm/CodeGen/PseudoSourceValue.h"
>  #include "llvm/CodeGen/SelectionDAGISel.h"
> diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp
> index 13173e8..bf1f3bf 100644
> --- a/lib/Target/R600/SIISelLowering.cpp
> +++ b/lib/Target/R600/SIISelLowering.cpp
> @@ -26,7 +26,8 @@ using namespace llvm;
>  
>  SITargetLowering::SITargetLowering(TargetMachine &TM) :
>      AMDGPUTargetLowering(TM),
> -    TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo())) {
> +    TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo())),
> +    TRI(TM.getRegisterInfo()) {
>    addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
>    addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass);
>    addRegisterClass(MVT::i32, &AMDGPU::VReg_32RegClass);
> @@ -358,8 +359,206 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
>    return SDValue();
>  }
>  
> +/// \brief Test if RegClass is one of the VSrc classes 
> +static bool isVSrc(unsigned RegClass) {
> +  return AMDGPU::VSrc_32RegClassID == RegClass ||
> +         AMDGPU::VSrc_64RegClassID == RegClass;
> +}
> +
> +/// \brief Test if RegClass is one of the SSrc classes 
> +static bool isSSrc(unsigned RegClass) {
> +  return AMDGPU::SSrc_32RegClassID == RegClass ||
> +         AMDGPU::SSrc_64RegClassID == RegClass;
> +}
> +
> +/// \brief Analyze the possible immediate value Op
> +///
> +/// Returns -1 if it isn't an immediate, 0 if it's and inline immediate
> +/// and the immediate value if it's a literal immediate
> +int32_t SITargetLowering::analyzeImmediate(const SDNode *N) const {
> +
> +  union {
> +    int32_t I;
> +    float F;
> +  } Imm;
> +
> +  if (const ConstantSDNode *Node = dyn_cast<ConstantSDNode>(N))
> +    Imm.I = Node->getSExtValue();
> +  else if (const ConstantFPSDNode *Node = dyn_cast<ConstantFPSDNode>(N))
> +    Imm.F = Node->getValueAPF().convertToFloat();
> +  else
> +    return -1; // It isn't an immediate
> +
> +  if ((Imm.I >= -16 && Imm.I <= 64) ||
> +      Imm.F == 0.5f || Imm.F == -0.5f ||
> +      Imm.F == 1.0f || Imm.F == -1.0f ||
> +      Imm.F == 2.0f || Imm.F == -2.0f ||
> +      Imm.F == 4.0f || Imm.F == -4.0f)
> +    return 0; // It's an inline immediate
> +
> +  return Imm.I; // It's a literal immediate
> +}
> +
> +/// \brief Try to fold an immediate directly into an instruction
> +bool SITargetLowering::foldImm(SDValue &Operand, int32_t &Immediate,
> +                               bool &ScalarSlotUsed) const {
> +
> +  MachineSDNode *Mov = dyn_cast<MachineSDNode>(Operand);
> +  if (Mov == 0 || !TII->isMov(Mov->getMachineOpcode()))
> +    return false;
> +
> +  const SDValue &Op = Mov->getOperand(0);
> +  int32_t Value = analyzeImmediate(Op.getNode());
> +  if (Value == -1) {
> +    // Not an immediate at all
> +    return false;
> +
> +  } else if (Value == 0) {
> +    // Inline immediates can always be fold
> +    Operand = Op;
> +    return true;
> +
> +  } else if (Value == Immediate) {
> +    // Already fold literal immediate
> +    Operand = Op;
> +    return true;
> +
> +  } else if (!ScalarSlotUsed && !Immediate) {
> +    // Fold this literal immediate
> +    ScalarSlotUsed = true;
> +    Immediate = Value;
> +    Operand = Op;
> +    return true;
> +
> +  }
> +
> +  return false;
> +}
> +
> +/// \brief Does "Op" fit into register class "RegClass" ?
> +bool SITargetLowering::fitsRegClass(SelectionDAG &DAG, SDValue &Op,
> +                                    unsigned RegClass) const {
> +
> +  MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 
> +  SDNode *Node = Op.getNode();
> +
> +  int OpClass;
> +  if (MachineSDNode *MN = dyn_cast<MachineSDNode>(Node)) {
> +    const MCInstrDesc &Desc = TII->get(MN->getMachineOpcode());
> +    OpClass = Desc.OpInfo[Op.getResNo()].RegClass;
> +
> +  } else if (Node->getOpcode() == ISD::CopyFromReg) {
> +    RegisterSDNode *Reg = cast<RegisterSDNode>(Node->getOperand(1).getNode());
> +    OpClass = MRI.getRegClass(Reg->getReg())->getID();
> +
> +  } else
> +    return false;
> +
> +  if (OpClass == -1)
> +    return false;
> +
> +  return TRI->getRegClass(RegClass)->hasSubClassEq(TRI->getRegClass(OpClass));
> +}
> +
> +/// \brief Make sure that we don't exeed the number of allowed scalars
> +void SITargetLowering::ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand,
> +                                       unsigned RegClass,
> +                                       bool &ScalarSlotUsed) const {
> +
> +  // First map the operands register class to a destination class
> +  if (RegClass == AMDGPU::VSrc_32RegClassID)
> +    RegClass = AMDGPU::VReg_32RegClassID;
> +  else if (RegClass == AMDGPU::VSrc_64RegClassID)
> +    RegClass = AMDGPU::VReg_64RegClassID;
> +  else
> +    return;
> +
> +  // Nothing todo if they fit naturaly
> +  if (fitsRegClass(DAG, Operand, RegClass))
> +    return;
> +
> +  // If the scalar slot isn't used yet use it now
> +  if (!ScalarSlotUsed) {
> +    ScalarSlotUsed = true;
> +    return;
> +  }
> +
> +  // This is a conservative aproach, it is possible that we can't determine
> +  // the correct register class and copy too often, but better save than sorry.
> +  SDValue RC = DAG.getTargetConstant(RegClass, MVT::i32);
> +  SDNode *Node = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DebugLoc(),
> +                                    Operand.getValueType(), Operand, RC);
> +  Operand = SDValue(Node, 0);
> +}
> +
>  SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
>                                            SelectionDAG &DAG) const {
> -  // TODO: Implement immediate folding
> -  return Node;
> +
> +  // Original encoding (either e32 or e64)
> +  int Opcode = Node->getMachineOpcode();
> +  const MCInstrDesc *Desc = &TII->get(Opcode);
> +
> +  unsigned NumDefs = Desc->getNumDefs();
> +  unsigned NumOps = Desc->getNumOperands();
> +
> +  int32_t Immediate = Desc->getSize() == 4 ? 0 : -1;
> +  bool HaveVSrc = false, HaveSSrc = false;
> +
> +  // First figure out what we alread have in this instruction
> +  for (unsigned i = 0, e = Node->getNumOperands(), Op = NumDefs;
> +       i != e && Op < NumOps; ++i, ++Op) {
> +
> +    unsigned RegClass = Desc->OpInfo[Op].RegClass;
> +    if (isVSrc(RegClass))
> +      HaveVSrc = true;
> +    else if (isSSrc(RegClass))
> +      HaveSSrc = true;
> +    else
> +      continue;
> +
> +    int32_t Imm = analyzeImmediate(Node->getOperand(i).getNode());
> +    if (Imm != -1 && Imm != 0) {
> +      // Literal immediate
> +      Immediate = Imm;
> +    }
> +  }
> +
> +  // If we neither have VSrc nor SSrc it makes no sense to continue
> +  if (!HaveVSrc && !HaveSSrc)
> +    return Node;
> +
> +  // No scalar allowed when we have both VSrc and SSrc
> +  bool ScalarSlotUsed = HaveVSrc && HaveSSrc;
> +
> +  // Second go over the operands and try to fold them
> +  std::vector<SDValue> Ops;
> +  for (unsigned i = 0, e = Node->getNumOperands(), Op = NumDefs;
> +       i != e && Op < NumOps; ++i, ++Op) {
> +
> +    const SDValue &Operand = Node->getOperand(i);
> +    Ops.push_back(Operand);
> +
> +    // Already folded immediate ?
> +    if (isa<ConstantSDNode>(Operand.getNode()) ||
> +        isa<ConstantFPSDNode>(Operand.getNode()))
> +      continue;
> +
> +    // Is this a VSrc or SSrc operand ?
> +    unsigned RegClass = Desc->OpInfo[Op].RegClass;
> +    if (!isVSrc(RegClass) && !isSSrc(RegClass))
> +      continue;
> +
> +    // Try to fold the immediates
> +    if (!foldImm(Ops[i], Immediate, ScalarSlotUsed)) {
> +      // Folding didn't worked, make sure we don't hit the SReg limit
> +      ensureSRegLimit(DAG, Ops[i], RegClass, ScalarSlotUsed);
> +    }
> +  }
> +
> +  // Add optional chain and glue
> +  for (unsigned i = NumOps - NumDefs, e = Node->getNumOperands(); i < e; ++i)
> +    Ops.push_back(Node->getOperand(i));
> +
> +  // Update the instruction parameters
> +  return DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size());
>  }
> diff --git a/lib/Target/R600/SIISelLowering.h b/lib/Target/R600/SIISelLowering.h
> index 71f99ac..737162f 100644
> --- a/lib/Target/R600/SIISelLowering.h
> +++ b/lib/Target/R600/SIISelLowering.h
> @@ -22,6 +22,7 @@ namespace llvm {
>  
>  class SITargetLowering : public AMDGPUTargetLowering {
>    const SIInstrInfo * TII;
> +  const TargetRegisterInfo * TRI;
>  
>    void LowerMOV_IMM(MachineInstr *MI, MachineBasicBlock &BB,
>                MachineBasicBlock::iterator I, unsigned Opocde) const;
> @@ -34,6 +35,12 @@ class SITargetLowering : public AMDGPUTargetLowering {
>    SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
>    SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
>  
> +  bool foldImm(SDValue &Operand, int32_t &Immediate,
> +               bool &ScalarSlotUsed) const;
> +  bool fitsRegClass(SelectionDAG &DAG, SDValue &Op, unsigned RegClass) const;
> +  void ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand, 
> +                       unsigned RegClass, bool &ScalarSlotUsed) const;
> +
>  public:
>    SITargetLowering(TargetMachine &tm);
>    virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr * MI,
> @@ -42,6 +49,8 @@ public:
>    virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
>    virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
>    virtual SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const;
> +
> +  int32_t analyzeImmediate(const SDNode *N) const;
>  };
>  
>  } // End namespace llvm
> diff --git a/lib/Target/R600/SIInstrFormats.td b/lib/Target/R600/SIInstrFormats.td
> index fe417d6..2689166 100644
> --- a/lib/Target/R600/SIInstrFormats.td
> +++ b/lib/Target/R600/SIInstrFormats.td
> @@ -208,6 +208,7 @@ class VOP3 <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
>    let mayLoad = 0;
>    let mayStore = 0;
>    let hasSideEffects = 0;
> +  let hasExtraSrcRegAllocReq = 1;
>  }
>  
>  class VOP3b <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
> @@ -234,6 +235,7 @@ class VOP3b <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
>    let mayLoad = 0;
>    let mayStore = 0;
>    let hasSideEffects = 0;
> +  let hasExtraSrcRegAllocReq = 1;

I'm curious, what does the hasExtraSrcRegAllocReg do for the VOP3
instructions?

-Tom

>  }
>  
>  class VOPC <bits<8> op, dag ins, string asm, list<dag> pattern> :
> diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/R600/SIInstrInfo.td
> index d379185..075c013 100644
> --- a/lib/Target/R600/SIInstrInfo.td
> +++ b/lib/Target/R600/SIInstrInfo.td
> @@ -40,11 +40,10 @@ def IMM12bit : ImmLeaf <
>    [{return isUInt<12>(Imm);}]
>  >;
>  
> -class InlineImm <ValueType vt> : ImmLeaf <vt, [{
> -  return -16 <= Imm && Imm <= 64;
> +class InlineImm <ValueType vt> : PatLeaf <(vt imm), [{
> +  return ((const SITargetLowering &)TLI).analyzeImmediate(N) == 0;
>  }]>;
>  
> -
>  //===----------------------------------------------------------------------===//
>  // SI assembler operands
>  //===----------------------------------------------------------------------===//
> @@ -181,7 +180,7 @@ multiclass VOP2_Helper <bits<6> op, RegisterClass vrc, RegisterClass arc,
>    def _e64 : VOP3 <
>      {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
>      (outs vrc:$dst),
> -    (ins arc:$src0, vrc:$src1,
> +    (ins arc:$src0, arc:$src1,
>           i32imm:$abs, i32imm:$clamp,
>           i32imm:$omod, i32imm:$neg),
>      opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", []
> @@ -206,7 +205,7 @@ multiclass VOP2b_32 <bits<6> op, string opName, list<dag> pattern> {
>    def _e64 : VOP3b <
>      {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
>      (outs VReg_32:$dst),
> -    (ins VSrc_32:$src0, VReg_32:$src1,
> +    (ins VSrc_32:$src0, VSrc_32:$src1,
>           i32imm:$abs, i32imm:$clamp,
>           i32imm:$omod, i32imm:$neg),
>      opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", []
> @@ -228,12 +227,12 @@ multiclass VOPC_Helper <bits<8> op, RegisterClass vrc, RegisterClass arc,
>    def _e64 : VOP3 <
>      {0, op{7}, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
>      (outs SReg_64:$dst),
> -    (ins arc:$src0, vrc:$src1,
> +    (ins arc:$src0, arc:$src1,
>           InstFlag:$abs, InstFlag:$clamp,
>           InstFlag:$omod, InstFlag:$neg),
>      opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg",
>      !if(!eq(!cast<string>(cond), "COND_NULL"), []<dag>,
> -      [(set SReg_64:$dst, (i1 (setcc (vt arc:$src0), vrc:$src1, cond)))]
> +      [(set SReg_64:$dst, (i1 (setcc (vt arc:$src0), arc:$src1, cond)))]
>      )
>    > {
>      let SRC2 = SIOperand.ZERO;
> @@ -250,14 +249,14 @@ multiclass VOPC_64 <bits<8> op, string opName,
>  
>  class VOP3_32 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
>    op, (outs VReg_32:$dst),
> -  (ins VSrc_32:$src0, VReg_32:$src1, VReg_32:$src2,
> +  (ins VSrc_32:$src0, VSrc_32:$src1, VSrc_32:$src2,
>     i32imm:$abs, i32imm:$clamp, i32imm:$omod, i32imm:$neg),
>    opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern
>  >;
>  
>  class VOP3_64 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
>    op, (outs VReg_64:$dst),
> -  (ins VSrc_64:$src0, VReg_64:$src1, VReg_64:$src2,
> +  (ins VSrc_64:$src0, VSrc_64:$src1, VSrc_64:$src2,
>     i32imm:$abs, i32imm:$clamp, i32imm:$omod, i32imm:$neg),
>    opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern
>  >;
> diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
> index f999025..822be18 100644
> --- a/lib/Target/R600/SIInstructions.td
> +++ b/lib/Target/R600/SIInstructions.td
> @@ -732,17 +732,17 @@ def V_CNDMASK_B32_e32 : VOP2 <0x00000000, (outs VReg_32:$dst),
>  }
>  
>  def V_CNDMASK_B32_e64 : VOP3 <0x00000100, (outs VReg_32:$dst),
> -  (ins VReg_32:$src0, VReg_32:$src1, SReg_64:$src2,
> +  (ins VSrc_32:$src0, VSrc_32:$src1, SSrc_64:$src2,
>     InstFlag:$abs, InstFlag:$clamp, InstFlag:$omod, InstFlag:$neg),
>    "V_CNDMASK_B32_e64 $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg",
> -  [(set (i32 VReg_32:$dst), (select (i1 SReg_64:$src2),
> -   VReg_32:$src1, VReg_32:$src0))]
> +  [(set (i32 VReg_32:$dst), (select (i1 SSrc_64:$src2),
> +   VSrc_32:$src1, VSrc_32:$src0))]
>  >;
>  
>  //f32 pattern for V_CNDMASK_B32_e64
>  def : Pat <
> -  (f32 (select (i1 SReg_64:$src2), VReg_32:$src1, VReg_32:$src0)),
> -  (V_CNDMASK_B32_e64 VReg_32:$src0, VReg_32:$src1, SReg_64:$src2)
> +  (f32 (select (i1 SSrc_64:$src2), VSrc_32:$src1, VSrc_32:$src0)),
> +  (V_CNDMASK_B32_e64 VSrc_32:$src0, VSrc_32:$src1, SSrc_64:$src2)
>  >;
>  
>  defm V_READLANE_B32 : VOP2_32 <0x00000001, "V_READLANE_B32", []>;
> @@ -895,7 +895,7 @@ def V_MUL_HI_U32 : VOP3_32 <0x0000016a, "V_MUL_HI_U32", []>;
>  def V_MUL_LO_I32 : VOP3_32 <0x0000016b, "V_MUL_LO_I32", []>;
>  def : Pat <
>    (mul VSrc_32:$src0, VReg_32:$src1),
> -  (V_MUL_LO_I32 VSrc_32:$src0, VReg_32:$src1, (i32 SIOperand.ZERO), 0, 0, 0, 0)
> +  (V_MUL_LO_I32 VSrc_32:$src0, VReg_32:$src1, (i32 0), 0, 0, 0, 0)
>  >;
>  def V_MUL_HI_I32 : VOP3_32 <0x0000016c, "V_MUL_HI_I32", []>;
>  def V_DIV_SCALE_F32 : VOP3_32 <0x0000016d, "V_DIV_SCALE_F32", []>;
> @@ -1219,19 +1219,19 @@ def : BitConvert <f32, i32, VReg_32>;
>  
>  def : Pat <
>    (int_AMDIL_clamp VReg_32:$src, (f32 FP_ZERO), (f32 FP_ONE)),
> -  (V_ADD_F32_e64 VReg_32:$src, (i32 0x80 /* SRC1 */),
> +  (V_ADD_F32_e64 VReg_32:$src, (i32 0 /* SRC1 */),
>     0 /* ABS */, 1 /* CLAMP */, 0 /* OMOD */, 0 /* NEG */)
>  >;
>  
>  def : Pat <
>    (fabs VReg_32:$src),
> -  (V_ADD_F32_e64 VReg_32:$src, (i32 0x80 /* SRC1 */),
> +  (V_ADD_F32_e64 VReg_32:$src, (i32 0 /* SRC1 */),
>     1 /* ABS */, 0 /* CLAMP */, 0 /* OMOD */, 0 /* NEG */)
>  >;
>  
>  def : Pat <
>    (fneg VReg_32:$src),
> -  (V_ADD_F32_e64 VReg_32:$src, (i32 0x80 /* SRC1 */),
> +  (V_ADD_F32_e64 VReg_32:$src, (i32 0 /* SRC1 */),
>     0 /* ABS */, 0 /* CLAMP */, 0 /* OMOD */, 1 /* NEG */)
>  >;
>  
> @@ -1394,8 +1394,8 @@ def : Pat <
>  /**********   VOP3 Patterns    **********/
>  /********** ================== **********/
>  
> -def : Pat <(f32 (fadd (fmul VSrc_32:$src0, VReg_32:$src1), VReg_32:$src2)),
> -           (V_MAD_F32 VSrc_32:$src0, VReg_32:$src1, VReg_32:$src2,
> +def : Pat <(f32 (fadd (fmul VSrc_32:$src0, VSrc_32:$src1), VSrc_32:$src2)),
> +           (V_MAD_F32 VSrc_32:$src0, VSrc_32:$src1, VSrc_32:$src2,
>              0, 0, 0, 0)>;
>  
>  /********** ================== **********/
> -- 
> 1.7.10.4
> 
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits




More information about the llvm-commits mailing list