[llvm] r239657 - R600 -> AMDGPU rename

Tom Stellard thomas.stellard at amd.com
Fri Jun 12 20:28:16 PDT 2015


Removed: llvm/trunk/lib/Target/R600/R600InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600InstrInfo.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/R600/R600InstrInfo.cpp (removed)
@@ -1,1435 +0,0 @@
-//===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief R600 Implementation of TargetInstrInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#include "R600InstrInfo.h"
-#include "AMDGPU.h"
-#include "AMDGPUSubtarget.h"
-#include "AMDGPUTargetMachine.h"
-#include "R600Defines.h"
-#include "R600MachineFunctionInfo.h"
-#include "R600RegisterInfo.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-
-using namespace llvm;
-
-#define GET_INSTRINFO_CTOR_DTOR
-#include "AMDGPUGenDFAPacketizer.inc"
-
-R600InstrInfo::R600InstrInfo(const AMDGPUSubtarget &st)
-    : AMDGPUInstrInfo(st), RI() {}
-
-const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
-  return RI;
-}
-
-bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
-  return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
-}
-
-bool R600InstrInfo::isVector(const MachineInstr &MI) const {
-  return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
-}
-
-void
-R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
-                           MachineBasicBlock::iterator MI, DebugLoc DL,
-                           unsigned DestReg, unsigned SrcReg,
-                           bool KillSrc) const {
-  unsigned VectorComponents = 0;
-  if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) ||
-      AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) &&
-      (AMDGPU::R600_Reg128RegClass.contains(SrcReg) ||
-       AMDGPU::R600_Reg128VerticalRegClass.contains(SrcReg))) {
-    VectorComponents = 4;
-  } else if((AMDGPU::R600_Reg64RegClass.contains(DestReg) ||
-            AMDGPU::R600_Reg64VerticalRegClass.contains(DestReg)) &&
-            (AMDGPU::R600_Reg64RegClass.contains(SrcReg) ||
-             AMDGPU::R600_Reg64VerticalRegClass.contains(SrcReg))) {
-    VectorComponents = 2;
-  }
-
-  if (VectorComponents > 0) {
-    for (unsigned I = 0; I < VectorComponents; I++) {
-      unsigned SubRegIndex = RI.getSubRegFromChannel(I);
-      buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
-                              RI.getSubReg(DestReg, SubRegIndex),
-                              RI.getSubReg(SrcReg, SubRegIndex))
-                              .addReg(DestReg,
-                                      RegState::Define | RegState::Implicit);
-    }
-  } else {
-    MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
-                                                  DestReg, SrcReg);
-    NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
-                                    .setIsKill(KillSrc);
-  }
-}
-
-/// \returns true if \p MBBI can be moved into a new basic.
-bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
-                                       MachineBasicBlock::iterator MBBI) const {
-  for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
-                                        E = MBBI->operands_end(); I != E; ++I) {
-    if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) &&
-        I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg()))
-      return false;
-  }
-  return true;
-}
-
-bool R600InstrInfo::isMov(unsigned Opcode) const {
-
-
-  switch(Opcode) {
-  default: return false;
-  case AMDGPU::MOV:
-  case AMDGPU::MOV_IMM_F32:
-  case AMDGPU::MOV_IMM_I32:
-    return true;
-  }
-}
-
-// Some instructions act as place holders to emulate operations that the GPU
-// hardware does automatically. This function can be used to check if
-// an opcode falls into this category.
-bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
-  switch (Opcode) {
-  default: return false;
-  case AMDGPU::RETURN:
-    return true;
-  }
-}
-
-bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
-  return false;
-}
-
-bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
-  switch(Opcode) {
-    default: return false;
-    case AMDGPU::CUBE_r600_pseudo:
-    case AMDGPU::CUBE_r600_real:
-    case AMDGPU::CUBE_eg_pseudo:
-    case AMDGPU::CUBE_eg_real:
-      return true;
-  }
-}
-
-bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
-  unsigned TargetFlags = get(Opcode).TSFlags;
-
-  return (TargetFlags & R600_InstFlag::ALU_INST);
-}
-
-bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
-  unsigned TargetFlags = get(Opcode).TSFlags;
-
-  return ((TargetFlags & R600_InstFlag::OP1) |
-          (TargetFlags & R600_InstFlag::OP2) |
-          (TargetFlags & R600_InstFlag::OP3));
-}
-
-bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
-  unsigned TargetFlags = get(Opcode).TSFlags;
-
-  return ((TargetFlags & R600_InstFlag::LDS_1A) |
-          (TargetFlags & R600_InstFlag::LDS_1A1D) |
-          (TargetFlags & R600_InstFlag::LDS_1A2D));
-}
-
-bool R600InstrInfo::isLDSNoRetInstr(unsigned Opcode) const {
-  return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) == -1;
-}
-
-bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
-  return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1;
-}
-
-bool R600InstrInfo::canBeConsideredALU(const MachineInstr *MI) const {
-  if (isALUInstr(MI->getOpcode()))
-    return true;
-  if (isVector(*MI) || isCubeOp(MI->getOpcode()))
-    return true;
-  switch (MI->getOpcode()) {
-  case AMDGPU::PRED_X:
-  case AMDGPU::INTERP_PAIR_XY:
-  case AMDGPU::INTERP_PAIR_ZW:
-  case AMDGPU::INTERP_VEC_LOAD:
-  case AMDGPU::COPY:
-  case AMDGPU::DOT_4:
-    return true;
-  default:
-    return false;
-  }
-}
-
-bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
-  if (ST.hasCaymanISA())
-    return false;
-  return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU);
-}
-
-bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const {
-  return isTransOnly(MI->getOpcode());
-}
-
-bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
-  return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU);
-}
-
-bool R600InstrInfo::isVectorOnly(const MachineInstr *MI) const {
-  return isVectorOnly(MI->getOpcode());
-}
-
-bool R600InstrInfo::isExport(unsigned Opcode) const {
-  return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
-}
-
-bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
-  return ST.hasVertexCache() && IS_VTX(get(Opcode));
-}
-
-bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
-  const MachineFunction *MF = MI->getParent()->getParent();
-  const R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>();
-  return MFI->getShaderType() != ShaderType::COMPUTE &&
-    usesVertexCache(MI->getOpcode());
-}
-
-bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
-  return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
-}
-
-bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
-  const MachineFunction *MF = MI->getParent()->getParent();
-  const R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>();
-  return (MFI->getShaderType() == ShaderType::COMPUTE &&
-          usesVertexCache(MI->getOpcode())) ||
-    usesTextureCache(MI->getOpcode());
-}
-
-bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
-  switch (Opcode) {
-  case AMDGPU::KILLGT:
-  case AMDGPU::GROUP_BARRIER:
-    return true;
-  default:
-    return false;
-  }
-}
-
-bool R600InstrInfo::usesAddressRegister(MachineInstr *MI) const {
-  return  MI->findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
-}
-
-bool R600InstrInfo::definesAddressRegister(MachineInstr *MI) const {
-  return MI->findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
-}
-
-bool R600InstrInfo::readsLDSSrcReg(const MachineInstr *MI) const {
-  if (!isALUInstr(MI->getOpcode())) {
-    return false;
-  }
-  for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
-                                        E = MI->operands_end(); I != E; ++I) {
-    if (!I->isReg() || !I->isUse() ||
-        TargetRegisterInfo::isVirtualRegister(I->getReg()))
-      continue;
-
-    if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
-      return true;
-  }
-  return false;
-}
-
-int R600InstrInfo::getSrcIdx(unsigned Opcode, unsigned SrcNum) const {
-  static const unsigned OpTable[] = {
-    AMDGPU::OpName::src0,
-    AMDGPU::OpName::src1,
-    AMDGPU::OpName::src2
-  };
-
-  assert (SrcNum < 3);
-  return getOperandIdx(Opcode, OpTable[SrcNum]);
-}
-
-int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
-  static const unsigned SrcSelTable[][2] = {
-    {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
-    {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
-    {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
-    {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
-    {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
-    {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
-    {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
-    {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
-    {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
-    {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
-    {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}
-  };
-
-  for (const auto &Row : SrcSelTable) {
-    if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) {
-      return getOperandIdx(Opcode, Row[1]);
-    }
-  }
-  return -1;
-}
-
-SmallVector<std::pair<MachineOperand *, int64_t>, 3>
-R600InstrInfo::getSrcs(MachineInstr *MI) const {
-  SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
-
-  if (MI->getOpcode() == AMDGPU::DOT_4) {
-    static const unsigned OpTable[8][2] = {
-      {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
-      {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
-      {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
-      {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
-      {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
-      {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
-      {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
-      {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
-    };
-
-    for (unsigned j = 0; j < 8; j++) {
-      MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
-                                                        OpTable[j][0]));
-      unsigned Reg = MO.getReg();
-      if (Reg == AMDGPU::ALU_CONST) {
-        unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(),
-                                                    OpTable[j][1])).getImm();
-        Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
-        continue;
-      }
-
-    }
-    return Result;
-  }
-
-  static const unsigned OpTable[3][2] = {
-    {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
-    {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
-    {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
-  };
-
-  for (unsigned j = 0; j < 3; j++) {
-    int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
-    if (SrcIdx < 0)
-      break;
-    MachineOperand &MO = MI->getOperand(SrcIdx);
-    unsigned Reg = MI->getOperand(SrcIdx).getReg();
-    if (Reg == AMDGPU::ALU_CONST) {
-      unsigned Sel = MI->getOperand(
-          getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
-      Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
-      continue;
-    }
-    if (Reg == AMDGPU::ALU_LITERAL_X) {
-      unsigned Imm = MI->getOperand(
-          getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm();
-      Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm));
-      continue;
-    }
-    Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0));
-  }
-  return Result;
-}
-
-std::vector<std::pair<int, unsigned> >
-R600InstrInfo::ExtractSrcs(MachineInstr *MI,
-                           const DenseMap<unsigned, unsigned> &PV,
-                           unsigned &ConstCount) const {
-  ConstCount = 0;
-  ArrayRef<std::pair<MachineOperand *, int64_t>> Srcs = getSrcs(MI);
-  const std::pair<int, unsigned> DummyPair(-1, 0);
-  std::vector<std::pair<int, unsigned> > Result;
-  unsigned i = 0;
-  for (unsigned n = Srcs.size(); i < n; ++i) {
-    unsigned Reg = Srcs[i].first->getReg();
-    unsigned Index = RI.getEncodingValue(Reg) & 0xff;
-    if (Reg == AMDGPU::OQAP) {
-      Result.push_back(std::pair<int, unsigned>(Index, 0));
-    }
-    if (PV.find(Reg) != PV.end()) {
-      // 255 is used to tells its a PS/PV reg
-      Result.push_back(std::pair<int, unsigned>(255, 0));
-      continue;
-    }
-    if (Index > 127) {
-      ConstCount++;
-      Result.push_back(DummyPair);
-      continue;
-    }
-    unsigned Chan = RI.getHWRegChan(Reg);
-    Result.push_back(std::pair<int, unsigned>(Index, Chan));
-  }
-  for (; i < 3; ++i)
-    Result.push_back(DummyPair);
-  return Result;
-}
-
-static std::vector<std::pair<int, unsigned> >
-Swizzle(std::vector<std::pair<int, unsigned> > Src,
-        R600InstrInfo::BankSwizzle Swz) {
-  if (Src[0] == Src[1])
-    Src[1].first = -1;
-  switch (Swz) {
-  case R600InstrInfo::ALU_VEC_012_SCL_210:
-    break;
-  case R600InstrInfo::ALU_VEC_021_SCL_122:
-    std::swap(Src[1], Src[2]);
-    break;
-  case R600InstrInfo::ALU_VEC_102_SCL_221:
-    std::swap(Src[0], Src[1]);
-    break;
-  case R600InstrInfo::ALU_VEC_120_SCL_212:
-    std::swap(Src[0], Src[1]);
-    std::swap(Src[0], Src[2]);
-    break;
-  case R600InstrInfo::ALU_VEC_201:
-    std::swap(Src[0], Src[2]);
-    std::swap(Src[0], Src[1]);
-    break;
-  case R600InstrInfo::ALU_VEC_210:
-    std::swap(Src[0], Src[2]);
-    break;
-  }
-  return Src;
-}
-
-static unsigned
-getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
-  switch (Swz) {
-  case R600InstrInfo::ALU_VEC_012_SCL_210: {
-    unsigned Cycles[3] = { 2, 1, 0};
-    return Cycles[Op];
-  }
-  case R600InstrInfo::ALU_VEC_021_SCL_122: {
-    unsigned Cycles[3] = { 1, 2, 2};
-    return Cycles[Op];
-  }
-  case R600InstrInfo::ALU_VEC_120_SCL_212: {
-    unsigned Cycles[3] = { 2, 1, 2};
-    return Cycles[Op];
-  }
-  case R600InstrInfo::ALU_VEC_102_SCL_221: {
-    unsigned Cycles[3] = { 2, 2, 1};
-    return Cycles[Op];
-  }
-  default:
-    llvm_unreachable("Wrong Swizzle for Trans Slot");
-    return 0;
-  }
-}
-
-/// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
-/// in the same Instruction Group while meeting read port limitations given a
-/// Swz swizzle sequence.
-unsigned  R600InstrInfo::isLegalUpTo(
-    const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
-    const std::vector<R600InstrInfo::BankSwizzle> &Swz,
-    const std::vector<std::pair<int, unsigned> > &TransSrcs,
-    R600InstrInfo::BankSwizzle TransSwz) const {
-  int Vector[4][3];
-  memset(Vector, -1, sizeof(Vector));
-  for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
-    const std::vector<std::pair<int, unsigned> > &Srcs =
-        Swizzle(IGSrcs[i], Swz[i]);
-    for (unsigned j = 0; j < 3; j++) {
-      const std::pair<int, unsigned> &Src = Srcs[j];
-      if (Src.first < 0 || Src.first == 255)
-        continue;
-      if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) {
-        if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
-            Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
-            // The value from output queue A (denoted by register OQAP) can
-            // only be fetched during the first cycle.
-            return false;
-        }
-        // OQAP does not count towards the normal read port restrictions
-        continue;
-      }
-      if (Vector[Src.second][j] < 0)
-        Vector[Src.second][j] = Src.first;
-      if (Vector[Src.second][j] != Src.first)
-        return i;
-    }
-  }
-  // Now check Trans Alu
-  for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
-    const std::pair<int, unsigned> &Src = TransSrcs[i];
-    unsigned Cycle = getTransSwizzle(TransSwz, i);
-    if (Src.first < 0)
-      continue;
-    if (Src.first == 255)
-      continue;
-    if (Vector[Src.second][Cycle] < 0)
-      Vector[Src.second][Cycle] = Src.first;
-    if (Vector[Src.second][Cycle] != Src.first)
-      return IGSrcs.size() - 1;
-  }
-  return IGSrcs.size();
-}
-
-/// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
-/// (in lexicographic term) swizzle sequence assuming that all swizzles after
-/// Idx can be skipped
-static bool
-NextPossibleSolution(
-    std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
-    unsigned Idx) {
-  assert(Idx < SwzCandidate.size());
-  int ResetIdx = Idx;
-  while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
-    ResetIdx --;
-  for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
-    SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
-  }
-  if (ResetIdx == -1)
-    return false;
-  int NextSwizzle = SwzCandidate[ResetIdx] + 1;
-  SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
-  return true;
-}
-
-/// Enumerate all possible Swizzle sequence to find one that can meet all
-/// read port requirements.
-bool R600InstrInfo::FindSwizzleForVectorSlot(
-    const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
-    std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
-    const std::vector<std::pair<int, unsigned> > &TransSrcs,
-    R600InstrInfo::BankSwizzle TransSwz) const {
-  unsigned ValidUpTo = 0;
-  do {
-    ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
-    if (ValidUpTo == IGSrcs.size())
-      return true;
-  } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
-  return false;
-}
-
-/// Instructions in Trans slot can't read gpr at cycle 0 if they also read
-/// a const, and can't read a gpr at cycle 1 if they read 2 const.
-static bool
-isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
-                  const std::vector<std::pair<int, unsigned> > &TransOps,
-                  unsigned ConstCount) {
-  // TransALU can't read 3 constants
-  if (ConstCount > 2)
-    return false;
-  for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
-    const std::pair<int, unsigned> &Src = TransOps[i];
-    unsigned Cycle = getTransSwizzle(TransSwz, i);
-    if (Src.first < 0)
-      continue;
-    if (ConstCount > 0 && Cycle == 0)
-      return false;
-    if (ConstCount > 1 && Cycle == 1)
-      return false;
-  }
-  return true;
-}
-
-bool
-R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
-                                       const DenseMap<unsigned, unsigned> &PV,
-                                       std::vector<BankSwizzle> &ValidSwizzle,
-                                       bool isLastAluTrans)
-    const {
-  //Todo : support shared src0 - src1 operand
-
-  std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs;
-  ValidSwizzle.clear();
-  unsigned ConstCount;
-  BankSwizzle TransBS = ALU_VEC_012_SCL_210;
-  for (unsigned i = 0, e = IG.size(); i < e; ++i) {
-    IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount));
-    unsigned Op = getOperandIdx(IG[i]->getOpcode(),
-        AMDGPU::OpName::bank_swizzle);
-    ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
-        IG[i]->getOperand(Op).getImm());
-  }
-  std::vector<std::pair<int, unsigned> > TransOps;
-  if (!isLastAluTrans)
-    return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
-
-  TransOps = std::move(IGSrcs.back());
-  IGSrcs.pop_back();
-  ValidSwizzle.pop_back();
-
-  static const R600InstrInfo::BankSwizzle TransSwz[] = {
-    ALU_VEC_012_SCL_210,
-    ALU_VEC_021_SCL_122,
-    ALU_VEC_120_SCL_212,
-    ALU_VEC_102_SCL_221
-  };
-  for (unsigned i = 0; i < 4; i++) {
-    TransBS = TransSwz[i];
-    if (!isConstCompatible(TransBS, TransOps, ConstCount))
-      continue;
-    bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
-        TransBS);
-    if (Result) {
-      ValidSwizzle.push_back(TransBS);
-      return true;
-    }
-  }
-
-  return false;
-}
-
-
-bool
-R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
-    const {
-  assert (Consts.size() <= 12 && "Too many operands in instructions group");
-  unsigned Pair1 = 0, Pair2 = 0;
-  for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
-    unsigned ReadConstHalf = Consts[i] & 2;
-    unsigned ReadConstIndex = Consts[i] & (~3);
-    unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
-    if (!Pair1) {
-      Pair1 = ReadHalfConst;
-      continue;
-    }
-    if (Pair1 == ReadHalfConst)
-      continue;
-    if (!Pair2) {
-      Pair2 = ReadHalfConst;
-      continue;
-    }
-    if (Pair2 != ReadHalfConst)
-      return false;
-  }
-  return true;
-}
-
-bool
-R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
-    const {
-  std::vector<unsigned> Consts;
-  SmallSet<int64_t, 4> Literals;
-  for (unsigned i = 0, n = MIs.size(); i < n; i++) {
-    MachineInstr *MI = MIs[i];
-    if (!isALUInstr(MI->getOpcode()))
-      continue;
-
-    ArrayRef<std::pair<MachineOperand *, int64_t>> Srcs = getSrcs(MI);
-
-    for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
-      std::pair<MachineOperand *, unsigned> Src = Srcs[j];
-      if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X)
-        Literals.insert(Src.second);
-      if (Literals.size() > 4)
-        return false;
-      if (Src.first->getReg() == AMDGPU::ALU_CONST)
-        Consts.push_back(Src.second);
-      if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
-          AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
-        unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
-        unsigned Chan = RI.getHWRegChan(Src.first->getReg());
-        Consts.push_back((Index << 2) | Chan);
-      }
-    }
-  }
-  return fitsConstReadLimitations(Consts);
-}
-
-DFAPacketizer *
-R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const {
-  const InstrItineraryData *II = STI.getInstrItineraryData();
-  return static_cast<const AMDGPUSubtarget &>(STI).createDFAPacketizer(II);
-}
-
-static bool
-isPredicateSetter(unsigned Opcode) {
-  switch (Opcode) {
-  case AMDGPU::PRED_X:
-    return true;
-  default:
-    return false;
-  }
-}
-
-static MachineInstr *
-findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
-                             MachineBasicBlock::iterator I) {
-  while (I != MBB.begin()) {
-    --I;
-    MachineInstr *MI = I;
-    if (isPredicateSetter(MI->getOpcode()))
-      return MI;
-  }
-
-  return nullptr;
-}
-
-static
-bool isJump(unsigned Opcode) {
-  return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
-}
-
-static bool isBranch(unsigned Opcode) {
-  return Opcode == AMDGPU::BRANCH || Opcode == AMDGPU::BRANCH_COND_i32 ||
-      Opcode == AMDGPU::BRANCH_COND_f32;
-}
-
-bool
-R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
-                             MachineBasicBlock *&TBB,
-                             MachineBasicBlock *&FBB,
-                             SmallVectorImpl<MachineOperand> &Cond,
-                             bool AllowModify) const {
-  // Most of the following comes from the ARM implementation of AnalyzeBranch
-
-  // If the block has no terminators, it just falls into the block after it.
-  MachineBasicBlock::iterator I = MBB.end();
-  if (I == MBB.begin())
-    return false;
-  --I;
-  while (I->isDebugValue()) {
-    if (I == MBB.begin())
-      return false;
-    --I;
-  }
-  // AMDGPU::BRANCH* instructions are only available after isel and are not
-  // handled
-  if (isBranch(I->getOpcode()))
-    return true;
-  if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
-    return false;
-  }
-
-  // Remove successive JUMP
-  while (I != MBB.begin() && std::prev(I)->getOpcode() == AMDGPU::JUMP) {
-      MachineBasicBlock::iterator PriorI = std::prev(I);
-      if (AllowModify)
-        I->removeFromParent();
-      I = PriorI;
-  }
-  MachineInstr *LastInst = I;
-
-  // If there is only one terminator instruction, process it.
-  unsigned LastOpc = LastInst->getOpcode();
-  if (I == MBB.begin() ||
-          !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) {
-    if (LastOpc == AMDGPU::JUMP) {
-      TBB = LastInst->getOperand(0).getMBB();
-      return false;
-    } else if (LastOpc == AMDGPU::JUMP_COND) {
-      MachineInstr *predSet = I;
-      while (!isPredicateSetter(predSet->getOpcode())) {
-        predSet = --I;
-      }
-      TBB = LastInst->getOperand(0).getMBB();
-      Cond.push_back(predSet->getOperand(1));
-      Cond.push_back(predSet->getOperand(2));
-      Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
-      return false;
-    }
-    return true;  // Can't handle indirect branch.
-  }
-
-  // Get the instruction before it if it is a terminator.
-  MachineInstr *SecondLastInst = I;
-  unsigned SecondLastOpc = SecondLastInst->getOpcode();
-
-  // If the block ends with a B and a Bcc, handle it.
-  if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
-    MachineInstr *predSet = --I;
-    while (!isPredicateSetter(predSet->getOpcode())) {
-      predSet = --I;
-    }
-    TBB = SecondLastInst->getOperand(0).getMBB();
-    FBB = LastInst->getOperand(0).getMBB();
-    Cond.push_back(predSet->getOperand(1));
-    Cond.push_back(predSet->getOperand(2));
-    Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
-    return false;
-  }
-
-  // Otherwise, can't handle this.
-  return true;
-}
-
-static
-MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
-  for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
-      It != E; ++It) {
-    if (It->getOpcode() == AMDGPU::CF_ALU ||
-        It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
-      return std::prev(It.base());
-  }
-  return MBB.end();
-}
-
-unsigned
-R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
-                            MachineBasicBlock *TBB,
-                            MachineBasicBlock *FBB,
-                            ArrayRef<MachineOperand> Cond,
-                            DebugLoc DL) const {
-  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
-
-  if (!FBB) {
-    if (Cond.empty()) {
-      BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
-      return 1;
-    } else {
-      MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
-      assert(PredSet && "No previous predicate !");
-      addFlag(PredSet, 0, MO_FLAG_PUSH);
-      PredSet->getOperand(2).setImm(Cond[1].getImm());
-
-      BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
-             .addMBB(TBB)
-             .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
-      MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
-      if (CfAlu == MBB.end())
-        return 1;
-      assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
-      CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
-      return 1;
-    }
-  } else {
-    MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
-    assert(PredSet && "No previous predicate !");
-    addFlag(PredSet, 0, MO_FLAG_PUSH);
-    PredSet->getOperand(2).setImm(Cond[1].getImm());
-    BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
-            .addMBB(TBB)
-            .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
-    BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
-    MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
-    if (CfAlu == MBB.end())
-      return 2;
-    assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
-    CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
-    return 2;
-  }
-}
-
-unsigned
-R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
-
-  // Note : we leave PRED* instructions there.
-  // They may be needed when predicating instructions.
-
-  MachineBasicBlock::iterator I = MBB.end();
-
-  if (I == MBB.begin()) {
-    return 0;
-  }
-  --I;
-  switch (I->getOpcode()) {
-  default:
-    return 0;
-  case AMDGPU::JUMP_COND: {
-    MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
-    clearFlag(predSet, 0, MO_FLAG_PUSH);
-    I->eraseFromParent();
-    MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
-    if (CfAlu == MBB.end())
-      break;
-    assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
-    CfAlu->setDesc(get(AMDGPU::CF_ALU));
-    break;
-  }
-  case AMDGPU::JUMP:
-    I->eraseFromParent();
-    break;
-  }
-  I = MBB.end();
-
-  if (I == MBB.begin()) {
-    return 1;
-  }
-  --I;
-  switch (I->getOpcode()) {
-    // FIXME: only one case??
-  default:
-    return 1;
-  case AMDGPU::JUMP_COND: {
-    MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
-    clearFlag(predSet, 0, MO_FLAG_PUSH);
-    I->eraseFromParent();
-    MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
-    if (CfAlu == MBB.end())
-      break;
-    assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
-    CfAlu->setDesc(get(AMDGPU::CF_ALU));
-    break;
-  }
-  case AMDGPU::JUMP:
-    I->eraseFromParent();
-    break;
-  }
-  return 2;
-}
-
-bool
-R600InstrInfo::isPredicated(const MachineInstr *MI) const {
-  int idx = MI->findFirstPredOperandIdx();
-  if (idx < 0)
-    return false;
-
-  unsigned Reg = MI->getOperand(idx).getReg();
-  switch (Reg) {
-  default: return false;
-  case AMDGPU::PRED_SEL_ONE:
-  case AMDGPU::PRED_SEL_ZERO:
-  case AMDGPU::PREDICATE_BIT:
-    return true;
-  }
-}
-
-bool
-R600InstrInfo::isPredicable(MachineInstr *MI) const {
-  // XXX: KILL* instructions can be predicated, but they must be the last
-  // instruction in a clause, so this means any instructions after them cannot
-  // be predicated.  Until we have proper support for instruction clauses in the
-  // backend, we will mark KILL* instructions as unpredicable.
-
-  if (MI->getOpcode() == AMDGPU::KILLGT) {
-    return false;
-  } else if (MI->getOpcode() == AMDGPU::CF_ALU) {
-    // If the clause start in the middle of MBB then the MBB has more
-    // than a single clause, unable to predicate several clauses.
-    if (MI->getParent()->begin() != MachineBasicBlock::iterator(MI))
-      return false;
-    // TODO: We don't support KC merging atm
-    if (MI->getOperand(3).getImm() != 0 || MI->getOperand(4).getImm() != 0)
-      return false;
-    return true;
-  } else if (isVector(*MI)) {
-    return false;
-  } else {
-    return AMDGPUInstrInfo::isPredicable(MI);
-  }
-}
-
-
-bool
-R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
-                                   unsigned NumCyles,
-                                   unsigned ExtraPredCycles,
-                                   const BranchProbability &Probability) const{
-  return true;
-}
-
-bool
-R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
-                                   unsigned NumTCycles,
-                                   unsigned ExtraTCycles,
-                                   MachineBasicBlock &FMBB,
-                                   unsigned NumFCycles,
-                                   unsigned ExtraFCycles,
-                                   const BranchProbability &Probability) const {
-  return true;
-}
-
-bool
-R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
-                                         unsigned NumCyles,
-                                         const BranchProbability &Probability)
-                                         const {
-  return true;
-}
-
-bool
-R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
-                                         MachineBasicBlock &FMBB) const {
-  return false;
-}
-
-
-bool
-R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
-  MachineOperand &MO = Cond[1];
-  switch (MO.getImm()) {
-  case OPCODE_IS_ZERO_INT:
-    MO.setImm(OPCODE_IS_NOT_ZERO_INT);
-    break;
-  case OPCODE_IS_NOT_ZERO_INT:
-    MO.setImm(OPCODE_IS_ZERO_INT);
-    break;
-  case OPCODE_IS_ZERO:
-    MO.setImm(OPCODE_IS_NOT_ZERO);
-    break;
-  case OPCODE_IS_NOT_ZERO:
-    MO.setImm(OPCODE_IS_ZERO);
-    break;
-  default:
-    return true;
-  }
-
-  MachineOperand &MO2 = Cond[2];
-  switch (MO2.getReg()) {
-  case AMDGPU::PRED_SEL_ZERO:
-    MO2.setReg(AMDGPU::PRED_SEL_ONE);
-    break;
-  case AMDGPU::PRED_SEL_ONE:
-    MO2.setReg(AMDGPU::PRED_SEL_ZERO);
-    break;
-  default:
-    return true;
-  }
-  return false;
-}
-
-bool
-R600InstrInfo::DefinesPredicate(MachineInstr *MI,
-                                std::vector<MachineOperand> &Pred) const {
-  return isPredicateSetter(MI->getOpcode());
-}
-
-
-bool
-R600InstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
-                                 ArrayRef<MachineOperand> Pred2) const {
-  return false;
-}
-
-
-bool
-R600InstrInfo::PredicateInstruction(MachineInstr *MI,
-                                    ArrayRef<MachineOperand> Pred) const {
-  int PIdx = MI->findFirstPredOperandIdx();
-
-  if (MI->getOpcode() == AMDGPU::CF_ALU) {
-    MI->getOperand(8).setImm(0);
-    return true;
-  }
-
-  if (MI->getOpcode() == AMDGPU::DOT_4) {
-    MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_X))
-        .setReg(Pred[2].getReg());
-    MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_Y))
-        .setReg(Pred[2].getReg());
-    MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_Z))
-        .setReg(Pred[2].getReg());
-    MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_W))
-        .setReg(Pred[2].getReg());
-    MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
-    MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
-    return true;
-  }
-
-  if (PIdx != -1) {
-    MachineOperand &PMO = MI->getOperand(PIdx);
-    PMO.setReg(Pred[2].getReg());
-    MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
-    MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
-    return true;
-  }
-
-  return false;
-}
-
-unsigned int R600InstrInfo::getPredicationCost(const MachineInstr *) const {
-  return 2;
-}
-
-unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
-                                            const MachineInstr *MI,
-                                            unsigned *PredCost) const {
-  if (PredCost)
-    *PredCost = 2;
-  return 2;
-}
-
-bool R600InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
-
-  switch(MI->getOpcode()) {
-  default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
-  case AMDGPU::R600_EXTRACT_ELT_V2:
-  case AMDGPU::R600_EXTRACT_ELT_V4:
-    buildIndirectRead(MI->getParent(), MI, MI->getOperand(0).getReg(),
-                      RI.getHWRegIndex(MI->getOperand(1).getReg()), //  Address
-                      MI->getOperand(2).getReg(),
-                      RI.getHWRegChan(MI->getOperand(1).getReg()));
-    break;
-  case AMDGPU::R600_INSERT_ELT_V2:
-  case AMDGPU::R600_INSERT_ELT_V4:
-    buildIndirectWrite(MI->getParent(), MI, MI->getOperand(2).getReg(), // Value
-                       RI.getHWRegIndex(MI->getOperand(1).getReg()),  // Address
-                       MI->getOperand(3).getReg(),                    // Offset
-                       RI.getHWRegChan(MI->getOperand(1).getReg()));  // Channel
-    break;
-  }
-  MI->eraseFromParent();
-  return true;
-}
-
-void  R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
-                                             const MachineFunction &MF) const {
-  const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering *>(
-      MF.getSubtarget().getFrameLowering());
-
-  unsigned StackWidth = TFL->getStackWidth(MF);
-  int End = getIndirectIndexEnd(MF);
-
-  if (End == -1)
-    return;
-
-  for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
-    unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
-    Reserved.set(SuperReg);
-    for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
-      unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
-      Reserved.set(Reg);
-    }
-  }
-}
-
-unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
-                                                 unsigned Channel) const {
-  // XXX: Remove when we support a stack width > 2
-  assert(Channel == 0);
-  return RegIndex;
-}
-
-const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
-  return &AMDGPU::R600_TReg32_XRegClass;
-}
-
-MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
-                                       MachineBasicBlock::iterator I,
-                                       unsigned ValueReg, unsigned Address,
-                                       unsigned OffsetReg) const {
-  return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, 0);
-}
-
-MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
-                                       MachineBasicBlock::iterator I,
-                                       unsigned ValueReg, unsigned Address,
-                                       unsigned OffsetReg,
-                                       unsigned AddrChan) const {
-  unsigned AddrReg;
-  switch (AddrChan) {
-    default: llvm_unreachable("Invalid Channel");
-    case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
-    case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
-    case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
-    case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
-  }
-  MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
-                                               AMDGPU::AR_X, OffsetReg);
-  setImmOperand(MOVA, AMDGPU::OpName::write, 0);
-
-  MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
-                                      AddrReg, ValueReg)
-                                      .addReg(AMDGPU::AR_X,
-                                           RegState::Implicit | RegState::Kill);
-  setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1);
-  return Mov;
-}
-
-MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
-                                       MachineBasicBlock::iterator I,
-                                       unsigned ValueReg, unsigned Address,
-                                       unsigned OffsetReg) const {
-  return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, 0);
-}
-
-MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
-                                       MachineBasicBlock::iterator I,
-                                       unsigned ValueReg, unsigned Address,
-                                       unsigned OffsetReg,
-                                       unsigned AddrChan) const {
-  unsigned AddrReg;
-  switch (AddrChan) {
-    default: llvm_unreachable("Invalid Channel");
-    case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
-    case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
-    case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
-    case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
-  }
-  MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
-                                                       AMDGPU::AR_X,
-                                                       OffsetReg);
-  setImmOperand(MOVA, AMDGPU::OpName::write, 0);
-  MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
-                                      ValueReg,
-                                      AddrReg)
-                                      .addReg(AMDGPU::AR_X,
-                                           RegState::Implicit | RegState::Kill);
-  setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1);
-
-  return Mov;
-}
-
-unsigned R600InstrInfo::getMaxAlusPerClause() const {
-  return 115;
-}
-
-MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
-                                                  MachineBasicBlock::iterator I,
-                                                  unsigned Opcode,
-                                                  unsigned DstReg,
-                                                  unsigned Src0Reg,
-                                                  unsigned Src1Reg) const {
-  MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
-    DstReg);           // $dst
-
-  if (Src1Reg) {
-    MIB.addImm(0)     // $update_exec_mask
-       .addImm(0);    // $update_predicate
-  }
-  MIB.addImm(1)        // $write
-     .addImm(0)        // $omod
-     .addImm(0)        // $dst_rel
-     .addImm(0)        // $dst_clamp
-     .addReg(Src0Reg)  // $src0
-     .addImm(0)        // $src0_neg
-     .addImm(0)        // $src0_rel
-     .addImm(0)        // $src0_abs
-     .addImm(-1);       // $src0_sel
-
-  if (Src1Reg) {
-    MIB.addReg(Src1Reg) // $src1
-       .addImm(0)       // $src1_neg
-       .addImm(0)       // $src1_rel
-       .addImm(0)       // $src1_abs
-       .addImm(-1);      // $src1_sel
-  }
-
-  //XXX: The r600g finalizer expects this to be 1, once we've moved the
-  //scheduling to the backend, we can change the default to 0.
-  MIB.addImm(1)        // $last
-      .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
-      .addImm(0)         // $literal
-      .addImm(0);        // $bank_swizzle
-
-  return MIB;
-}
-
-#define OPERAND_CASE(Label) \
-  case Label: { \
-    static const unsigned Ops[] = \
-    { \
-      Label##_X, \
-      Label##_Y, \
-      Label##_Z, \
-      Label##_W \
-    }; \
-    return Ops[Slot]; \
-  }
-
-static unsigned getSlotedOps(unsigned  Op, unsigned Slot) {
-  switch (Op) {
-  OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
-  OPERAND_CASE(AMDGPU::OpName::update_pred)
-  OPERAND_CASE(AMDGPU::OpName::write)
-  OPERAND_CASE(AMDGPU::OpName::omod)
-  OPERAND_CASE(AMDGPU::OpName::dst_rel)
-  OPERAND_CASE(AMDGPU::OpName::clamp)
-  OPERAND_CASE(AMDGPU::OpName::src0)
-  OPERAND_CASE(AMDGPU::OpName::src0_neg)
-  OPERAND_CASE(AMDGPU::OpName::src0_rel)
-  OPERAND_CASE(AMDGPU::OpName::src0_abs)
-  OPERAND_CASE(AMDGPU::OpName::src0_sel)
-  OPERAND_CASE(AMDGPU::OpName::src1)
-  OPERAND_CASE(AMDGPU::OpName::src1_neg)
-  OPERAND_CASE(AMDGPU::OpName::src1_rel)
-  OPERAND_CASE(AMDGPU::OpName::src1_abs)
-  OPERAND_CASE(AMDGPU::OpName::src1_sel)
-  OPERAND_CASE(AMDGPU::OpName::pred_sel)
-  default:
-    llvm_unreachable("Wrong Operand");
-  }
-}
-
-#undef OPERAND_CASE
-
-MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
-    MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
-    const {
-  assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
-  unsigned Opcode;
-  if (ST.getGeneration() <= AMDGPUSubtarget::R700)
-    Opcode = AMDGPU::DOT4_r600;
-  else
-    Opcode = AMDGPU::DOT4_eg;
-  MachineBasicBlock::iterator I = MI;
-  MachineOperand &Src0 = MI->getOperand(
-      getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
-  MachineOperand &Src1 = MI->getOperand(
-      getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
-  MachineInstr *MIB = buildDefaultInstruction(
-      MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
-  static const unsigned  Operands[14] = {
-    AMDGPU::OpName::update_exec_mask,
-    AMDGPU::OpName::update_pred,
-    AMDGPU::OpName::write,
-    AMDGPU::OpName::omod,
-    AMDGPU::OpName::dst_rel,
-    AMDGPU::OpName::clamp,
-    AMDGPU::OpName::src0_neg,
-    AMDGPU::OpName::src0_rel,
-    AMDGPU::OpName::src0_abs,
-    AMDGPU::OpName::src0_sel,
-    AMDGPU::OpName::src1_neg,
-    AMDGPU::OpName::src1_rel,
-    AMDGPU::OpName::src1_abs,
-    AMDGPU::OpName::src1_sel,
-  };
-
-  MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
-      getSlotedOps(AMDGPU::OpName::pred_sel, Slot)));
-  MIB->getOperand(getOperandIdx(Opcode, AMDGPU::OpName::pred_sel))
-      .setReg(MO.getReg());
-
-  for (unsigned i = 0; i < 14; i++) {
-    MachineOperand &MO = MI->getOperand(
-        getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
-    assert (MO.isImm());
-    setImmOperand(MIB, Operands[i], MO.getImm());
-  }
-  MIB->getOperand(20).setImm(0);
-  return MIB;
-}
-
-MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
-                                         MachineBasicBlock::iterator I,
-                                         unsigned DstReg,
-                                         uint64_t Imm) const {
-  MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
-                                                  AMDGPU::ALU_LITERAL_X);
-  setImmOperand(MovImm, AMDGPU::OpName::literal, Imm);
-  return MovImm;
-}
-
-MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
-                                       MachineBasicBlock::iterator I,
-                                       unsigned DstReg, unsigned SrcReg) const {
-  return buildDefaultInstruction(*MBB, I, AMDGPU::MOV, DstReg, SrcReg);
-}
-
-int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
-  return getOperandIdx(MI.getOpcode(), Op);
-}
-
-int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
-  return AMDGPU::getNamedOperandIdx(Opcode, Op);
-}
-
-void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op,
-                                  int64_t Imm) const {
-  int Idx = getOperandIdx(*MI, Op);
-  assert(Idx != -1 && "Operand not supported for this instruction.");
-  assert(MI->getOperand(Idx).isImm());
-  MI->getOperand(Idx).setImm(Imm);
-}
-
-//===----------------------------------------------------------------------===//
-// Instruction flag getters/setters
-//===----------------------------------------------------------------------===//
-
-bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
-  return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
-}
-
-MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
-                                         unsigned Flag) const {
-  unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
-  int FlagIndex = 0;
-  if (Flag != 0) {
-    // If we pass something other than the default value of Flag to this
-    // function, it means we are want to set a flag on an instruction
-    // that uses native encoding.
-    assert(HAS_NATIVE_OPERANDS(TargetFlags));
-    bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
-    switch (Flag) {
-    case MO_FLAG_CLAMP:
-      FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp);
-      break;
-    case MO_FLAG_MASK:
-      FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write);
-      break;
-    case MO_FLAG_NOT_LAST:
-    case MO_FLAG_LAST:
-      FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last);
-      break;
-    case MO_FLAG_NEG:
-      switch (SrcIdx) {
-      case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break;
-      case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break;
-      case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break;
-      }
-      break;
-
-    case MO_FLAG_ABS:
-      assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
-                       "instructions.");
-      (void)IsOP3;
-      switch (SrcIdx) {
-      case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break;
-      case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break;
-      }
-      break;
-
-    default:
-      FlagIndex = -1;
-      break;
-    }
-    assert(FlagIndex != -1 && "Flag not supported for this instruction");
-  } else {
-      FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
-      assert(FlagIndex != 0 &&
-         "Instruction flags not supported for this instruction");
-  }
-
-  MachineOperand &FlagOp = MI->getOperand(FlagIndex);
-  assert(FlagOp.isImm());
-  return FlagOp;
-}
-
-void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
-                            unsigned Flag) const {
-  unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
-  if (Flag == 0) {
-    return;
-  }
-  if (HAS_NATIVE_OPERANDS(TargetFlags)) {
-    MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
-    if (Flag == MO_FLAG_NOT_LAST) {
-      clearFlag(MI, Operand, MO_FLAG_LAST);
-    } else if (Flag == MO_FLAG_MASK) {
-      clearFlag(MI, Operand, Flag);
-    } else {
-      FlagOp.setImm(1);
-    }
-  } else {
-      MachineOperand &FlagOp = getFlagOp(MI, Operand);
-      FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
-  }
-}
-
-void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
-                              unsigned Flag) const {
-  unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
-  if (HAS_NATIVE_OPERANDS(TargetFlags)) {
-    MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
-    FlagOp.setImm(0);
-  } else {
-    MachineOperand &FlagOp = getFlagOp(MI);
-    unsigned InstFlags = FlagOp.getImm();
-    InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
-    FlagOp.setImm(InstFlags);
-  }
-}

Removed: llvm/trunk/lib/Target/R600/R600InstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600InstrInfo.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600InstrInfo.h (original)
+++ llvm/trunk/lib/Target/R600/R600InstrInfo.h (removed)
@@ -1,303 +0,0 @@
-//===-- R600InstrInfo.h - R600 Instruction Info Interface -------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Interface definition for R600InstrInfo
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_R600INSTRINFO_H
-#define LLVM_LIB_TARGET_R600_R600INSTRINFO_H
-
-#include "AMDGPUInstrInfo.h"
-#include "R600Defines.h"
-#include "R600RegisterInfo.h"
-#include <map>
-
-namespace llvm {
-
-  class AMDGPUTargetMachine;
-  class DFAPacketizer;
-  class ScheduleDAG;
-  class MachineFunction;
-  class MachineInstr;
-  class MachineInstrBuilder;
-
-  class R600InstrInfo : public AMDGPUInstrInfo {
-  private:
-  const R600RegisterInfo RI;
-
-  std::vector<std::pair<int, unsigned> >
-  ExtractSrcs(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PV, unsigned &ConstCount) const;
-
-
-  MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
-                                        MachineBasicBlock::iterator I,
-                                        unsigned ValueReg, unsigned Address,
-                                        unsigned OffsetReg,
-                                        unsigned AddrChan) const;
-
-  MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
-                                        MachineBasicBlock::iterator I,
-                                        unsigned ValueReg, unsigned Address,
-                                        unsigned OffsetReg,
-                                        unsigned AddrChan) const;
-  public:
-  enum BankSwizzle {
-    ALU_VEC_012_SCL_210 = 0,
-    ALU_VEC_021_SCL_122,
-    ALU_VEC_120_SCL_212,
-    ALU_VEC_102_SCL_221,
-    ALU_VEC_201,
-    ALU_VEC_210
-  };
-
-  explicit R600InstrInfo(const AMDGPUSubtarget &st);
-
-  const R600RegisterInfo &getRegisterInfo() const override;
-  void copyPhysReg(MachineBasicBlock &MBB,
-                   MachineBasicBlock::iterator MI, DebugLoc DL,
-                   unsigned DestReg, unsigned SrcReg,
-                   bool KillSrc) const override;
-  bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
-                           MachineBasicBlock::iterator MBBI) const override;
-
-  bool isTrig(const MachineInstr &MI) const;
-  bool isPlaceHolderOpcode(unsigned opcode) const;
-  bool isReductionOp(unsigned opcode) const;
-  bool isCubeOp(unsigned opcode) const;
-
-  /// \returns true if this \p Opcode represents an ALU instruction.
-  bool isALUInstr(unsigned Opcode) const;
-  bool hasInstrModifiers(unsigned Opcode) const;
-  bool isLDSInstr(unsigned Opcode) const;
-  bool isLDSNoRetInstr(unsigned Opcode) const;
-  bool isLDSRetInstr(unsigned Opcode) const;
-
-  /// \returns true if this \p Opcode represents an ALU instruction or an
-  /// instruction that will be lowered in ExpandSpecialInstrs Pass.
-  bool canBeConsideredALU(const MachineInstr *MI) const;
-
-  bool isTransOnly(unsigned Opcode) const;
-  bool isTransOnly(const MachineInstr *MI) const;
-  bool isVectorOnly(unsigned Opcode) const;
-  bool isVectorOnly(const MachineInstr *MI) const;
-  bool isExport(unsigned Opcode) const;
-
-  bool usesVertexCache(unsigned Opcode) const;
-  bool usesVertexCache(const MachineInstr *MI) const;
-  bool usesTextureCache(unsigned Opcode) const;
-  bool usesTextureCache(const MachineInstr *MI) const;
-
-  bool mustBeLastInClause(unsigned Opcode) const;
-  bool usesAddressRegister(MachineInstr *MI) const;
-  bool definesAddressRegister(MachineInstr *MI) const;
-  bool readsLDSSrcReg(const MachineInstr *MI) const;
-
-  /// \returns The operand index for the given source number.  Legal values
-  /// for SrcNum are 0, 1, and 2.
-  int getSrcIdx(unsigned Opcode, unsigned SrcNum) const;
-  /// \returns The operand Index for the Sel operand given an index to one
-  /// of the instruction's src operands.
-  int getSelIdx(unsigned Opcode, unsigned SrcIdx) const;
-
-  /// \returns a pair for each src of an ALU instructions.
-  /// The first member of a pair is the register id.
-  /// If register is ALU_CONST, second member is SEL.
-  /// If register is ALU_LITERAL, second member is IMM.
-  /// Otherwise, second member value is undefined.
-  SmallVector<std::pair<MachineOperand *, int64_t>, 3>
-      getSrcs(MachineInstr *MI) const;
-
-  unsigned  isLegalUpTo(
-    const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
-    const std::vector<R600InstrInfo::BankSwizzle> &Swz,
-    const std::vector<std::pair<int, unsigned> > &TransSrcs,
-    R600InstrInfo::BankSwizzle TransSwz) const;
-
-  bool FindSwizzleForVectorSlot(
-    const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
-    std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
-    const std::vector<std::pair<int, unsigned> > &TransSrcs,
-    R600InstrInfo::BankSwizzle TransSwz) const;
-
-  /// Given the order VEC_012 < VEC_021 < VEC_120 < VEC_102 < VEC_201 < VEC_210
-  /// returns true and the first (in lexical order) BankSwizzle affectation
-  /// starting from the one already provided in the Instruction Group MIs that
-  /// fits Read Port limitations in BS if available. Otherwise returns false
-  /// and undefined content in BS.
-  /// isLastAluTrans should be set if the last Alu of MIs will be executed on
-  /// Trans ALU. In this case, ValidTSwizzle returns the BankSwizzle value to
-  /// apply to the last instruction.
-  /// PV holds GPR to PV registers in the Instruction Group MIs.
-  bool fitsReadPortLimitations(const std::vector<MachineInstr *> &MIs,
-                               const DenseMap<unsigned, unsigned> &PV,
-                               std::vector<BankSwizzle> &BS,
-                               bool isLastAluTrans) const;
-
-  /// An instruction group can only access 2 channel pair (either [XY] or [ZW])
-  /// from KCache bank on R700+. This function check if MI set in input meet
-  /// this limitations
-  bool fitsConstReadLimitations(const std::vector<MachineInstr *> &) const;
-  /// Same but using const index set instead of MI set.
-  bool fitsConstReadLimitations(const std::vector<unsigned>&) const;
-
-  /// \brief Vector instructions are instructions that must fill all
-  /// instruction slots within an instruction group.
-  bool isVector(const MachineInstr &MI) const;
-
-  bool isMov(unsigned Opcode) const override;
-
-  DFAPacketizer *
-  CreateTargetScheduleState(const TargetSubtargetInfo &) const override;
-
-  bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
-
-  bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
-                     SmallVectorImpl<MachineOperand> &Cond, bool AllowModify) const override;
-
-  unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
-                        MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
-                        DebugLoc DL) const override;
-
-  unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
-
-  bool isPredicated(const MachineInstr *MI) const override;
-
-  bool isPredicable(MachineInstr *MI) const override;
-
-  bool
-   isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
-                             const BranchProbability &Probability) const override;
-
-  bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
-                           unsigned ExtraPredCycles,
-                           const BranchProbability &Probability) const override ;
-
-  bool
-   isProfitableToIfCvt(MachineBasicBlock &TMBB,
-                       unsigned NumTCycles, unsigned ExtraTCycles,
-                       MachineBasicBlock &FMBB,
-                       unsigned NumFCycles, unsigned ExtraFCycles,
-                       const BranchProbability &Probability) const override;
-
-  bool DefinesPredicate(MachineInstr *MI,
-                                  std::vector<MachineOperand> &Pred) const override;
-
-  bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
-                         ArrayRef<MachineOperand> Pred2) const override;
-
-  bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
-                                          MachineBasicBlock &FMBB) const override;
-
-  bool PredicateInstruction(MachineInstr *MI,
-                            ArrayRef<MachineOperand> Pred) const override;
-
-  unsigned int getPredicationCost(const MachineInstr *) const override;
-
-  unsigned int getInstrLatency(const InstrItineraryData *ItinData,
-                               const MachineInstr *MI,
-                               unsigned *PredCost = nullptr) const override;
-
-  int getInstrLatency(const InstrItineraryData *ItinData,
-                      SDNode *Node) const override { return 1;}
-
-  bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
-
-  /// \brief Reserve the registers that may be accesed using indirect addressing.
-  void reserveIndirectRegisters(BitVector &Reserved,
-                                const MachineFunction &MF) const;
-
-  unsigned calculateIndirectAddress(unsigned RegIndex,
-                                    unsigned Channel) const override;
-
-  const TargetRegisterClass *getIndirectAddrRegClass() const override;
-
-  MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
-                          MachineBasicBlock::iterator I,
-                          unsigned ValueReg, unsigned Address,
-                          unsigned OffsetReg) const override;
-
-  MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
-                                        MachineBasicBlock::iterator I,
-                                        unsigned ValueReg, unsigned Address,
-                                        unsigned OffsetReg) const override;
-
-  unsigned getMaxAlusPerClause() const;
-
-  ///buildDefaultInstruction - This function returns a MachineInstr with
-  /// all the instruction modifiers initialized to their default values.
-  /// You can use this function to avoid manually specifying each instruction
-  /// modifier operand when building a new instruction.
-  ///
-  /// \returns a MachineInstr with all the instruction modifiers initialized
-  /// to their default values.
-  MachineInstrBuilder buildDefaultInstruction(MachineBasicBlock &MBB,
-                                              MachineBasicBlock::iterator I,
-                                              unsigned Opcode,
-                                              unsigned DstReg,
-                                              unsigned Src0Reg,
-                                              unsigned Src1Reg = 0) const;
-
-  MachineInstr *buildSlotOfVectorInstruction(MachineBasicBlock &MBB,
-                                             MachineInstr *MI,
-                                             unsigned Slot,
-                                             unsigned DstReg) const;
-
-  MachineInstr *buildMovImm(MachineBasicBlock &BB,
-                                  MachineBasicBlock::iterator I,
-                                  unsigned DstReg,
-                                  uint64_t Imm) const;
-
-  MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
-                              MachineBasicBlock::iterator I,
-                              unsigned DstReg, unsigned SrcReg) const override;
-
-  /// \brief Get the index of Op in the MachineInstr.
-  ///
-  /// \returns -1 if the Instruction does not contain the specified \p Op.
-  int getOperandIdx(const MachineInstr &MI, unsigned Op) const;
-
-  /// \brief Get the index of \p Op for the given Opcode.
-  ///
-  /// \returns -1 if the Instruction does not contain the specified \p Op.
-  int getOperandIdx(unsigned Opcode, unsigned Op) const;
-
-  /// \brief Helper function for setting instruction flag values.
-  void setImmOperand(MachineInstr *MI, unsigned Op, int64_t Imm) const;
-
-  /// \returns true if this instruction has an operand for storing target flags.
-  bool hasFlagOperand(const MachineInstr &MI) const;
-
-  ///\brief Add one of the MO_FLAG* flags to the specified \p Operand.
-  void addFlag(MachineInstr *MI, unsigned Operand, unsigned Flag) const;
-
-  ///\brief Determine if the specified \p Flag is set on this \p Operand.
-  bool isFlagSet(const MachineInstr &MI, unsigned Operand, unsigned Flag) const;
-
-  /// \param SrcIdx The register source to set the flag on (e.g src0, src1, src2)
-  /// \param Flag The flag being set.
-  ///
-  /// \returns the operand containing the flags for this instruction.
-  MachineOperand &getFlagOp(MachineInstr *MI, unsigned SrcIdx = 0,
-                            unsigned Flag = 0) const;
-
-  /// \brief Clear the specified flag on the instruction.
-  void clearFlag(MachineInstr *MI, unsigned Operand, unsigned Flag) const;
-};
-
-namespace AMDGPU {
-
-int getLDSNoRetOp(uint16_t Opcode);
-
-} //End namespace AMDGPU
-
-} // End llvm namespace
-
-#endif

Removed: llvm/trunk/lib/Target/R600/R600Instructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600Instructions.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600Instructions.td (original)
+++ llvm/trunk/lib/Target/R600/R600Instructions.td (removed)
@@ -1,1744 +0,0 @@
-//===-- R600Instructions.td - R600 Instruction defs  -------*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// TableGen definitions for instructions which are available on R600 family
-// GPUs.
-//
-//===----------------------------------------------------------------------===//
-
-include "R600Intrinsics.td"
-include "R600InstrFormats.td"
-
-class InstR600ISA <dag outs, dag ins, string asm, list<dag> pattern> :
-    InstR600 <outs, ins, asm, pattern, NullALU> {
-
-  let Namespace = "AMDGPU";
-}
-
-def MEMxi : Operand<iPTR> {
-  let MIOperandInfo = (ops R600_TReg32_X:$ptr, i32imm:$index);
-  let PrintMethod = "printMemOperand";
-}
-
-def MEMrr : Operand<iPTR> {
-  let MIOperandInfo = (ops R600_Reg32:$ptr, R600_Reg32:$index);
-}
-
-// Operands for non-registers
-
-class InstFlag<string PM = "printOperand", int Default = 0>
-    : OperandWithDefaultOps <i32, (ops (i32 Default))> {
-  let PrintMethod = PM;
-}
-
-// src_sel for ALU src operands, see also ALU_CONST, ALU_PARAM registers
-def SEL : OperandWithDefaultOps <i32, (ops (i32 -1))> {
-  let PrintMethod = "printSel";
-}
-def BANK_SWIZZLE : OperandWithDefaultOps <i32, (ops (i32 0))> {
-  let PrintMethod = "printBankSwizzle";
-}
-
-def LITERAL : InstFlag<"printLiteral">;
-
-def WRITE : InstFlag <"printWrite", 1>;
-def OMOD : InstFlag <"printOMOD">;
-def REL : InstFlag <"printRel">;
-def CLAMP : InstFlag <"printClamp">;
-def NEG : InstFlag <"printNeg">;
-def ABS : InstFlag <"printAbs">;
-def UEM : InstFlag <"printUpdateExecMask">;
-def UP : InstFlag <"printUpdatePred">;
-
-// XXX: The r600g finalizer in Mesa expects last to be one in most cases.
-// Once we start using the packetizer in this backend we should have this
-// default to 0.
-def LAST : InstFlag<"printLast", 1>;
-def RSel : Operand<i32> {
-  let PrintMethod = "printRSel";
-}
-def CT: Operand<i32> {
-  let PrintMethod = "printCT";
-}
-
-def FRAMEri : Operand<iPTR> {
-  let MIOperandInfo = (ops R600_Reg32:$ptr, i32imm:$index);
-}
-
-def ADDRParam : ComplexPattern<i32, 2, "SelectADDRParam", [], []>;
-def ADDRDWord : ComplexPattern<i32, 1, "SelectADDRDWord", [], []>;
-def ADDRVTX_READ : ComplexPattern<i32, 2, "SelectADDRVTX_READ", [], []>;
-def ADDRGA_CONST_OFFSET : ComplexPattern<i32, 1, "SelectGlobalValueConstantOffset", [], []>;
-def ADDRGA_VAR_OFFSET : ComplexPattern<i32, 2, "SelectGlobalValueVariableOffset", [], []>;
-
-
-def R600_Pred : PredicateOperand<i32, (ops R600_Predicate),
-                                     (ops PRED_SEL_OFF)>;
-
-
-let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
-
-// Class for instructions with only one source register.
-// If you add new ins to this instruction, make sure they are listed before
-// $literal, because the backend currently assumes that the last operand is
-// a literal.  Also be sure to update the enum R600Op1OperandIndex::ROI in
-// R600Defines.h, R600InstrInfo::buildDefaultInstruction(),
-// and R600InstrInfo::getOperandIdx().
-class R600_1OP <bits<11> inst, string opName, list<dag> pattern,
-                InstrItinClass itin = AnyALU> :
-    InstR600 <(outs R600_Reg32:$dst),
-              (ins WRITE:$write, OMOD:$omod, REL:$dst_rel, CLAMP:$clamp,
-                   R600_Reg32:$src0, NEG:$src0_neg, REL:$src0_rel, ABS:$src0_abs, SEL:$src0_sel,
-                   LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal,
-                   BANK_SWIZZLE:$bank_swizzle),
-              !strconcat("  ", opName,
-                   "$clamp $last $dst$write$dst_rel$omod, "
-                   "$src0_neg$src0_abs$src0$src0_abs$src0_rel, "
-                   "$pred_sel $bank_swizzle"),
-              pattern,
-              itin>,
-    R600ALU_Word0,
-    R600ALU_Word1_OP2 <inst> {
-
-  let src1 = 0;
-  let src1_rel = 0;
-  let src1_neg = 0;
-  let src1_abs = 0;
-  let update_exec_mask = 0;
-  let update_pred = 0;
-  let HasNativeOperands = 1;
-  let Op1 = 1;
-  let ALUInst = 1;
-  let DisableEncoding = "$literal";
-  let UseNamedOperandTable = 1;
-
-  let Inst{31-0}  = Word0;
-  let Inst{63-32} = Word1;
-}
-
-class R600_1OP_Helper <bits<11> inst, string opName, SDPatternOperator node,
-                    InstrItinClass itin = AnyALU> :
-    R600_1OP <inst, opName,
-              [(set R600_Reg32:$dst, (node R600_Reg32:$src0))], itin
->;
-
-// If you add or change the operands for R600_2OP instructions, you must
-// also update the R600Op2OperandIndex::ROI enum in R600Defines.h,
-// R600InstrInfo::buildDefaultInstruction(), and R600InstrInfo::getOperandIdx().
-class R600_2OP <bits<11> inst, string opName, list<dag> pattern,
-                InstrItinClass itin = AnyALU> :
-  InstR600 <(outs R600_Reg32:$dst),
-          (ins UEM:$update_exec_mask, UP:$update_pred, WRITE:$write,
-               OMOD:$omod, REL:$dst_rel, CLAMP:$clamp,
-               R600_Reg32:$src0, NEG:$src0_neg, REL:$src0_rel, ABS:$src0_abs, SEL:$src0_sel,
-               R600_Reg32:$src1, NEG:$src1_neg, REL:$src1_rel, ABS:$src1_abs, SEL:$src1_sel,
-               LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal,
-               BANK_SWIZZLE:$bank_swizzle),
-          !strconcat("  ", opName,
-                "$clamp $last $update_exec_mask$update_pred$dst$write$dst_rel$omod, "
-                "$src0_neg$src0_abs$src0$src0_abs$src0_rel, "
-                "$src1_neg$src1_abs$src1$src1_abs$src1_rel, "
-                "$pred_sel $bank_swizzle"),
-          pattern,
-          itin>,
-    R600ALU_Word0,
-    R600ALU_Word1_OP2 <inst> {
-
-  let HasNativeOperands = 1;
-  let Op2 = 1;
-  let ALUInst = 1;
-  let DisableEncoding = "$literal";
-  let UseNamedOperandTable = 1;
-
-  let Inst{31-0}  = Word0;
-  let Inst{63-32} = Word1;
-}
-
-class R600_2OP_Helper <bits<11> inst, string opName, SDPatternOperator node,
-                       InstrItinClass itin = AnyALU> :
-    R600_2OP <inst, opName,
-              [(set R600_Reg32:$dst, (node R600_Reg32:$src0,
-                                           R600_Reg32:$src1))], itin
->;
-
-// If you add our change the operands for R600_3OP instructions, you must
-// also update the R600Op3OperandIndex::ROI enum in R600Defines.h,
-// R600InstrInfo::buildDefaultInstruction(), and
-// R600InstrInfo::getOperandIdx().
-class R600_3OP <bits<5> inst, string opName, list<dag> pattern,
-                InstrItinClass itin = AnyALU> :
-  InstR600 <(outs R600_Reg32:$dst),
-          (ins REL:$dst_rel, CLAMP:$clamp,
-               R600_Reg32:$src0, NEG:$src0_neg, REL:$src0_rel, SEL:$src0_sel,
-               R600_Reg32:$src1, NEG:$src1_neg, REL:$src1_rel, SEL:$src1_sel,
-               R600_Reg32:$src2, NEG:$src2_neg, REL:$src2_rel, SEL:$src2_sel,
-               LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal,
-               BANK_SWIZZLE:$bank_swizzle),
-          !strconcat("  ", opName, "$clamp $last $dst$dst_rel, "
-                             "$src0_neg$src0$src0_rel, "
-                             "$src1_neg$src1$src1_rel, "
-                             "$src2_neg$src2$src2_rel, "
-                             "$pred_sel"
-                             "$bank_swizzle"),
-          pattern,
-          itin>,
-    R600ALU_Word0,
-    R600ALU_Word1_OP3<inst>{
-
-  let HasNativeOperands = 1;
-  let DisableEncoding = "$literal";
-  let Op3 = 1;
-  let UseNamedOperandTable = 1;
-  let ALUInst = 1;
-
-  let Inst{31-0}  = Word0;
-  let Inst{63-32} = Word1;
-}
-
-class R600_REDUCTION <bits<11> inst, dag ins, string asm, list<dag> pattern,
-                      InstrItinClass itin = VecALU> :
-  InstR600 <(outs R600_Reg32:$dst),
-          ins,
-          asm,
-          pattern,
-          itin>;
-
-
-
-} // End mayLoad = 1, mayStore = 0, hasSideEffects = 0
-
-def TEX_SHADOW : PatLeaf<
-  (imm),
-  [{uint32_t TType = (uint32_t)N->getZExtValue();
-    return (TType >= 6 && TType <= 8) || TType == 13;
-  }]
->;
-
-def TEX_RECT : PatLeaf<
-  (imm),
-  [{uint32_t TType = (uint32_t)N->getZExtValue();
-    return TType == 5;
-  }]
->;
-
-def TEX_ARRAY : PatLeaf<
-  (imm),
-  [{uint32_t TType = (uint32_t)N->getZExtValue();
-    return TType == 9 || TType == 10 || TType == 16;
-  }]
->;
-
-def TEX_SHADOW_ARRAY : PatLeaf<
-  (imm),
-  [{uint32_t TType = (uint32_t)N->getZExtValue();
-    return TType == 11 || TType == 12 || TType == 17;
-  }]
->;
-
-def TEX_MSAA : PatLeaf<
-  (imm),
-  [{uint32_t TType = (uint32_t)N->getZExtValue();
-    return TType == 14;
-  }]
->;
-
-def TEX_ARRAY_MSAA : PatLeaf<
-  (imm),
-  [{uint32_t TType = (uint32_t)N->getZExtValue();
-    return TType == 15;
-  }]
->;
-
-class EG_CF_RAT <bits <8> cfinst, bits <6> ratinst, bits<4> ratid, bits<4> mask,
-                 dag outs, dag ins, string asm, list<dag> pattern> :
-    InstR600ISA <outs, ins, asm, pattern>,
-    CF_ALLOC_EXPORT_WORD0_RAT, CF_ALLOC_EXPORT_WORD1_BUF  {
-
-  let rat_id = ratid;
-  let rat_inst = ratinst;
-  let rim         = 0;
-  // XXX: Have a separate instruction for non-indexed writes.
-  let type        = 1;
-  let rw_rel      = 0;
-  let elem_size   = 0;
-
-  let array_size  = 0;
-  let comp_mask   = mask;
-  let burst_count = 0;
-  let vpm         = 0;
-  let cf_inst = cfinst;
-  let mark        = 0;
-  let barrier     = 1;
-
-  let Inst{31-0} = Word0;
-  let Inst{63-32} = Word1;
-  let IsExport = 1;
-
-}
-
-class VTX_READ <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
-    : InstR600ISA <outs, (ins MEMxi:$src_gpr), name, pattern>,
-      VTX_WORD1_GPR {
-
-  // Static fields
-  let DST_REL = 0;
-  // The docs say that if this bit is set, then DATA_FORMAT, NUM_FORMAT_ALL,
-  // FORMAT_COMP_ALL, SRF_MODE_ALL, and ENDIAN_SWAP fields will be ignored,
-  // however, based on my testing if USE_CONST_FIELDS is set, then all
-  // these fields need to be set to 0.
-  let USE_CONST_FIELDS = 0;
-  let NUM_FORMAT_ALL = 1;
-  let FORMAT_COMP_ALL = 0;
-  let SRF_MODE_ALL = 0;
-
-  let Inst{63-32} = Word1;
-  // LLVM can only encode 64-bit instructions, so these fields are manually
-  // encoded in R600CodeEmitter
-  //
-  // bits<16> OFFSET;
-  // bits<2>  ENDIAN_SWAP = 0;
-  // bits<1>  CONST_BUF_NO_STRIDE = 0;
-  // bits<1>  MEGA_FETCH = 0;
-  // bits<1>  ALT_CONST = 0;
-  // bits<2>  BUFFER_INDEX_MODE = 0;
-
-  // VTX_WORD2 (LLVM can only encode 64-bit instructions, so WORD2 encoding
-  // is done in R600CodeEmitter
-  //
-  // Inst{79-64} = OFFSET;
-  // Inst{81-80} = ENDIAN_SWAP;
-  // Inst{82}    = CONST_BUF_NO_STRIDE;
-  // Inst{83}    = MEGA_FETCH;
-  // Inst{84}    = ALT_CONST;
-  // Inst{86-85} = BUFFER_INDEX_MODE;
-  // Inst{95-86} = 0; Reserved
-
-  // VTX_WORD3 (Padding)
-  //
-  // Inst{127-96} = 0;
-
-  let VTXInst = 1;
-}
-
-class LoadParamFrag <PatFrag load_type> : PatFrag <
-  (ops node:$ptr), (load_type node:$ptr),
-  [{ return isConstantLoad(dyn_cast<LoadSDNode>(N), 0); }]
->;
-
-def load_param : LoadParamFrag<load>;
-def load_param_exti8 : LoadParamFrag<az_extloadi8>;
-def load_param_exti16 : LoadParamFrag<az_extloadi16>;
-
-def isR600 : Predicate<"Subtarget->getGeneration() <= AMDGPUSubtarget::R700">;
-
-def isR600toCayman
-    : Predicate<
-          "Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS">;
-
-//===----------------------------------------------------------------------===//
-// R600 SDNodes
-//===----------------------------------------------------------------------===//
-
-def INTERP_PAIR_XY :  AMDGPUShaderInst <
-  (outs R600_TReg32_X:$dst0, R600_TReg32_Y:$dst1),
-  (ins i32imm:$src0, R600_TReg32_Y:$src1, R600_TReg32_X:$src2),
-  "INTERP_PAIR_XY $src0 $src1 $src2 : $dst0 dst1",
-  []>;
-
-def INTERP_PAIR_ZW :  AMDGPUShaderInst <
-  (outs R600_TReg32_Z:$dst0, R600_TReg32_W:$dst1),
-  (ins i32imm:$src0, R600_TReg32_Y:$src1, R600_TReg32_X:$src2),
-  "INTERP_PAIR_ZW $src0 $src1 $src2 : $dst0 dst1",
-  []>;
-
-def CONST_ADDRESS: SDNode<"AMDGPUISD::CONST_ADDRESS",
-  SDTypeProfile<1, -1, [SDTCisInt<0>, SDTCisPtrTy<1>]>,
-  [SDNPVariadic]
->;
-
-def DOT4 : SDNode<"AMDGPUISD::DOT4",
-  SDTypeProfile<1, 8, [SDTCisFP<0>, SDTCisVT<1, f32>, SDTCisVT<2, f32>,
-      SDTCisVT<3, f32>, SDTCisVT<4, f32>, SDTCisVT<5, f32>,
-      SDTCisVT<6, f32>, SDTCisVT<7, f32>, SDTCisVT<8, f32>]>,
-  []
->;
-
-def COS_HW : SDNode<"AMDGPUISD::COS_HW",
-  SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>
->;
-
-def SIN_HW : SDNode<"AMDGPUISD::SIN_HW",
-  SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>
->;
-
-def TEXTURE_FETCH_Type : SDTypeProfile<1, 19, [SDTCisFP<0>]>;
-
-def TEXTURE_FETCH: SDNode<"AMDGPUISD::TEXTURE_FETCH", TEXTURE_FETCH_Type, []>;
-
-multiclass TexPattern<bits<32> TextureOp, Instruction inst, ValueType vt = v4f32> {
-def : Pat<(TEXTURE_FETCH (i32 TextureOp), vt:$SRC_GPR,
-          (i32 imm:$srcx), (i32 imm:$srcy), (i32 imm:$srcz), (i32 imm:$srcw),
-          (i32 imm:$offsetx), (i32 imm:$offsety), (i32 imm:$offsetz),
-          (i32 imm:$DST_SEL_X), (i32 imm:$DST_SEL_Y), (i32 imm:$DST_SEL_Z),
-          (i32 imm:$DST_SEL_W),
-          (i32 imm:$RESOURCE_ID), (i32 imm:$SAMPLER_ID),
-          (i32 imm:$COORD_TYPE_X), (i32 imm:$COORD_TYPE_Y), (i32 imm:$COORD_TYPE_Z),
-          (i32 imm:$COORD_TYPE_W)),
-          (inst R600_Reg128:$SRC_GPR,
-          imm:$srcx, imm:$srcy, imm:$srcz, imm:$srcw,
-          imm:$offsetx, imm:$offsety, imm:$offsetz,
-          imm:$DST_SEL_X, imm:$DST_SEL_Y, imm:$DST_SEL_Z,
-          imm:$DST_SEL_W,
-          imm:$RESOURCE_ID, imm:$SAMPLER_ID,
-          imm:$COORD_TYPE_X, imm:$COORD_TYPE_Y, imm:$COORD_TYPE_Z,
-          imm:$COORD_TYPE_W)>;
-}
-
-//===----------------------------------------------------------------------===//
-// Interpolation Instructions
-//===----------------------------------------------------------------------===//
-
-def INTERP_VEC_LOAD :  AMDGPUShaderInst <
-  (outs R600_Reg128:$dst),
-  (ins i32imm:$src0),
-  "INTERP_LOAD $src0 : $dst",
-  [(set R600_Reg128:$dst, (int_R600_interp_const imm:$src0))]>;
-
-def INTERP_XY : R600_2OP <0xD6, "INTERP_XY", []> {
-  let bank_swizzle = 5;
-}
-
-def INTERP_ZW : R600_2OP <0xD7, "INTERP_ZW", []> {
-  let bank_swizzle = 5;
-}
-
-def INTERP_LOAD_P0 : R600_1OP <0xE0, "INTERP_LOAD_P0", []>;
-
-//===----------------------------------------------------------------------===//
-// Export Instructions
-//===----------------------------------------------------------------------===//
-
-def ExportType : SDTypeProfile<0, 7, [SDTCisFP<0>, SDTCisInt<1>]>;
-
-def EXPORT: SDNode<"AMDGPUISD::EXPORT", ExportType,
-  [SDNPHasChain, SDNPSideEffect]>;
-
-class ExportWord0 {
-  field bits<32> Word0;
-
-  bits<13> arraybase;
-  bits<2> type;
-  bits<7> gpr;
-  bits<2> elem_size;
-
-  let Word0{12-0} = arraybase;
-  let Word0{14-13} = type;
-  let Word0{21-15} = gpr;
-  let Word0{22} = 0; // RW_REL
-  let Word0{29-23} = 0; // INDEX_GPR
-  let Word0{31-30} = elem_size;
-}
-
-class ExportSwzWord1 {
-  field bits<32> Word1;
-
-  bits<3> sw_x;
-  bits<3> sw_y;
-  bits<3> sw_z;
-  bits<3> sw_w;
-  bits<1> eop;
-  bits<8> inst;
-
-  let Word1{2-0} = sw_x;
-  let Word1{5-3} = sw_y;
-  let Word1{8-6} = sw_z;
-  let Word1{11-9} = sw_w;
-}
-
-class ExportBufWord1 {
-  field bits<32> Word1;
-
-  bits<12> arraySize;
-  bits<4> compMask;
-  bits<1> eop;
-  bits<8> inst;
-
-  let Word1{11-0} = arraySize;
-  let Word1{15-12} = compMask;
-}
-
-multiclass ExportPattern<Instruction ExportInst, bits<8> cf_inst> {
-  def : Pat<(int_R600_store_pixel_depth R600_Reg32:$reg),
-    (ExportInst
-        (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), $reg, sub0),
-        0, 61, 0, 7, 7, 7, cf_inst, 0)
-  >;
-
-  def : Pat<(int_R600_store_pixel_stencil R600_Reg32:$reg),
-    (ExportInst
-        (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), $reg, sub0),
-        0, 61, 7, 0, 7, 7, cf_inst, 0)
-  >;
-
-  def : Pat<(int_R600_store_dummy (i32 imm:$type)),
-    (ExportInst
-        (v4f32 (IMPLICIT_DEF)), imm:$type, 0, 7, 7, 7, 7, cf_inst, 0)
-  >;
-
-  def : Pat<(int_R600_store_dummy 1),
-    (ExportInst
-        (v4f32 (IMPLICIT_DEF)), 1, 60, 7, 7, 7, 7, cf_inst, 0)
-  >;
-
-  def : Pat<(EXPORT (v4f32 R600_Reg128:$src), (i32 imm:$base), (i32 imm:$type),
-    (i32 imm:$swz_x), (i32 imm:$swz_y), (i32 imm:$swz_z), (i32 imm:$swz_w)),
-        (ExportInst R600_Reg128:$src, imm:$type, imm:$base,
-        imm:$swz_x, imm:$swz_y, imm:$swz_z, imm:$swz_w, cf_inst, 0)
-  >;
-
-}
-
-multiclass SteamOutputExportPattern<Instruction ExportInst,
-    bits<8> buf0inst, bits<8> buf1inst, bits<8> buf2inst, bits<8> buf3inst> {
-// Stream0
-  def : Pat<(int_R600_store_stream_output (v4f32 R600_Reg128:$src),
-      (i32 imm:$arraybase), (i32 0), (i32 imm:$mask)),
-      (ExportInst R600_Reg128:$src, 0, imm:$arraybase,
-      4095, imm:$mask, buf0inst, 0)>;
-// Stream1
-  def : Pat<(int_R600_store_stream_output (v4f32 R600_Reg128:$src),
-      (i32 imm:$arraybase), (i32 1), (i32 imm:$mask)),
-      (ExportInst $src, 0, imm:$arraybase,
-      4095, imm:$mask, buf1inst, 0)>;
-// Stream2
-  def : Pat<(int_R600_store_stream_output (v4f32 R600_Reg128:$src),
-      (i32 imm:$arraybase), (i32 2), (i32 imm:$mask)),
-      (ExportInst $src, 0, imm:$arraybase,
-      4095, imm:$mask, buf2inst, 0)>;
-// Stream3
-  def : Pat<(int_R600_store_stream_output (v4f32 R600_Reg128:$src),
-      (i32 imm:$arraybase), (i32 3), (i32 imm:$mask)),
-      (ExportInst $src, 0, imm:$arraybase,
-      4095, imm:$mask, buf3inst, 0)>;
-}
-
-// Export Instructions should not be duplicated by TailDuplication pass
-// (which assumes that duplicable instruction are affected by exec mask)
-let usesCustomInserter = 1, isNotDuplicable = 1 in {
-
-class ExportSwzInst : InstR600ISA<(
-    outs),
-    (ins R600_Reg128:$gpr, i32imm:$type, i32imm:$arraybase,
-    RSel:$sw_x, RSel:$sw_y, RSel:$sw_z, RSel:$sw_w, i32imm:$inst,
-    i32imm:$eop),
-    !strconcat("EXPORT", " $gpr.$sw_x$sw_y$sw_z$sw_w"),
-    []>, ExportWord0, ExportSwzWord1 {
-  let elem_size = 3;
-  let Inst{31-0} = Word0;
-  let Inst{63-32} = Word1;
-  let IsExport = 1;
-}
-
-} // End usesCustomInserter = 1
-
-class ExportBufInst : InstR600ISA<(
-    outs),
-    (ins R600_Reg128:$gpr, i32imm:$type, i32imm:$arraybase,
-    i32imm:$arraySize, i32imm:$compMask, i32imm:$inst, i32imm:$eop),
-    !strconcat("EXPORT", " $gpr"),
-    []>, ExportWord0, ExportBufWord1 {
-  let elem_size = 0;
-  let Inst{31-0} = Word0;
-  let Inst{63-32} = Word1;
-  let IsExport = 1;
-}
-
-//===----------------------------------------------------------------------===//
-// Control Flow Instructions
-//===----------------------------------------------------------------------===//
-
-
-def KCACHE : InstFlag<"printKCache">;
-
-class ALU_CLAUSE<bits<4> inst, string OpName> : AMDGPUInst <(outs),
-(ins i32imm:$ADDR, i32imm:$KCACHE_BANK0, i32imm:$KCACHE_BANK1,
-KCACHE:$KCACHE_MODE0, KCACHE:$KCACHE_MODE1,
-i32imm:$KCACHE_ADDR0, i32imm:$KCACHE_ADDR1,
-i32imm:$COUNT, i32imm:$Enabled),
-!strconcat(OpName, " $COUNT, @$ADDR, "
-"KC0[$KCACHE_MODE0], KC1[$KCACHE_MODE1]"),
-[] >, CF_ALU_WORD0, CF_ALU_WORD1 {
-  field bits<64> Inst;
-
-  let CF_INST = inst;
-  let ALT_CONST = 0;
-  let WHOLE_QUAD_MODE = 0;
-  let BARRIER = 1;
-  let isCodeGenOnly = 1;
-  let UseNamedOperandTable = 1;
-
-  let Inst{31-0} = Word0;
-  let Inst{63-32} = Word1;
-}
-
-class CF_WORD0_R600 {
-  field bits<32> Word0;
-
-  bits<32> ADDR;
-
-  let Word0 = ADDR;
-}
-
-class CF_CLAUSE_R600 <bits<7> inst, dag ins, string AsmPrint> : AMDGPUInst <(outs),
-ins, AsmPrint, [] >, CF_WORD0_R600, CF_WORD1_R600 {
-  field bits<64> Inst;
-  bits<4> CNT;
-
-  let CF_INST = inst;
-  let BARRIER = 1;
-  let CF_CONST = 0;
-  let VALID_PIXEL_MODE = 0;
-  let COND = 0;
-  let COUNT = CNT{2-0};
-  let CALL_COUNT = 0;
-  let COUNT_3 = CNT{3};
-  let END_OF_PROGRAM = 0;
-  let WHOLE_QUAD_MODE = 0;
-
-  let Inst{31-0} = Word0;
-  let Inst{63-32} = Word1;
-}
-
-class CF_CLAUSE_EG <bits<8> inst, dag ins, string AsmPrint> : AMDGPUInst <(outs),
-ins, AsmPrint, [] >, CF_WORD0_EG, CF_WORD1_EG {
-  field bits<64> Inst;
-
-  let CF_INST = inst;
-  let BARRIER = 1;
-  let JUMPTABLE_SEL = 0;
-  let CF_CONST = 0;
-  let VALID_PIXEL_MODE = 0;
-  let COND = 0;
-  let END_OF_PROGRAM = 0;
-
-  let Inst{31-0} = Word0;
-  let Inst{63-32} = Word1;
-}
-
-def CF_ALU : ALU_CLAUSE<8, "ALU">;
-def CF_ALU_PUSH_BEFORE : ALU_CLAUSE<9, "ALU_PUSH_BEFORE">;
-def CF_ALU_POP_AFTER : ALU_CLAUSE<10, "ALU_POP_AFTER">;
-def CF_ALU_CONTINUE : ALU_CLAUSE<13, "ALU_CONTINUE">;
-def CF_ALU_BREAK : ALU_CLAUSE<14, "ALU_BREAK">;
-def CF_ALU_ELSE_AFTER : ALU_CLAUSE<15, "ALU_ELSE_AFTER">;
-
-def FETCH_CLAUSE : AMDGPUInst <(outs),
-(ins i32imm:$addr), "Fetch clause starting at $addr:", [] > {
-  field bits<8> Inst;
-  bits<8> num;
-  let Inst = num;
-  let isCodeGenOnly = 1;
-}
-
-def ALU_CLAUSE : AMDGPUInst <(outs),
-(ins i32imm:$addr), "ALU clause starting at $addr:", [] > {
-  field bits<8> Inst;
-  bits<8> num;
-  let Inst = num;
-  let isCodeGenOnly = 1;
-}
-
-def LITERALS : AMDGPUInst <(outs),
-(ins LITERAL:$literal1, LITERAL:$literal2), "$literal1, $literal2", [] > {
-  let isCodeGenOnly = 1;
-
-  field bits<64> Inst;
-  bits<32> literal1;
-  bits<32> literal2;
-
-  let Inst{31-0} = literal1;
-  let Inst{63-32} = literal2;
-}
-
-def PAD : AMDGPUInst <(outs), (ins), "PAD", [] > {
-  field bits<64> Inst;
-}
-
-let Predicates = [isR600toCayman] in {
-
-//===----------------------------------------------------------------------===//
-// Common Instructions R600, R700, Evergreen, Cayman
-//===----------------------------------------------------------------------===//
-
-def ADD : R600_2OP_Helper <0x0, "ADD", fadd>;
-// Non-IEEE MUL: 0 * anything = 0
-def MUL : R600_2OP_Helper <0x1, "MUL NON-IEEE", int_AMDGPU_mul>;
-def MUL_IEEE : R600_2OP_Helper <0x2, "MUL_IEEE", fmul>;
-// TODO: Do these actually match the regular fmin/fmax behavior?
-def MAX : R600_2OP_Helper <0x3, "MAX", AMDGPUfmax_legacy>;
-def MIN : R600_2OP_Helper <0x4, "MIN", AMDGPUfmin_legacy>;
-// According to https://msdn.microsoft.com/en-us/library/windows/desktop/cc308050%28v=vs.85%29.aspx
-// DX10 min/max returns the other operand if one is NaN,
-// this matches http://llvm.org/docs/LangRef.html#llvm-minnum-intrinsic
-def MAX_DX10 : R600_2OP_Helper <0x5, "MAX_DX10", fmaxnum>;
-def MIN_DX10 : R600_2OP_Helper <0x6, "MIN_DX10", fminnum>;
-
-// For the SET* instructions there is a naming conflict in TargetSelectionDAG.td,
-// so some of the instruction names don't match the asm string.
-// XXX: Use the defs in TargetSelectionDAG.td instead of intrinsics.
-def SETE : R600_2OP <
-  0x08, "SETE",
-  [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_OEQ))]
->;
-
-def SGT : R600_2OP <
-  0x09, "SETGT",
-  [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_OGT))]
->;
-
-def SGE : R600_2OP <
-  0xA, "SETGE",
-  [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_OGE))]
->;
-
-def SNE : R600_2OP <
-  0xB, "SETNE",
-  [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_UNE_NE))]
->;
-
-def SETE_DX10 : R600_2OP <
-  0xC, "SETE_DX10",
-  [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_OEQ))]
->;
-
-def SETGT_DX10 : R600_2OP <
-  0xD, "SETGT_DX10",
-  [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_OGT))]
->;
-
-def SETGE_DX10 : R600_2OP <
-  0xE, "SETGE_DX10",
-  [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_OGE))]
->;
-
-// FIXME: This should probably be COND_ONE
-def SETNE_DX10 : R600_2OP <
-  0xF, "SETNE_DX10",
-  [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_UNE_NE))]
->;
-
-def FRACT : R600_1OP_Helper <0x10, "FRACT", AMDGPUfract>;
-def TRUNC : R600_1OP_Helper <0x11, "TRUNC", ftrunc>;
-def CEIL : R600_1OP_Helper <0x12, "CEIL", fceil>;
-def RNDNE : R600_1OP_Helper <0x13, "RNDNE", frint>;
-def FLOOR : R600_1OP_Helper <0x14, "FLOOR", ffloor>;
-
-def MOV : R600_1OP <0x19, "MOV", []>;
-
-let isPseudo = 1, isCodeGenOnly = 1, usesCustomInserter = 1 in {
-
-class MOV_IMM <ValueType vt, Operand immType> : AMDGPUInst <
-  (outs R600_Reg32:$dst),
-  (ins immType:$imm),
-  "",
-  []
->;
-
-} // end let isPseudo = 1, isCodeGenOnly = 1, usesCustomInserter = 1
-
-def MOV_IMM_I32 : MOV_IMM<i32, i32imm>;
-def : Pat <
-  (imm:$val),
-  (MOV_IMM_I32 imm:$val)
->;
-
-def MOV_IMM_F32 : MOV_IMM<f32, f32imm>;
-def : Pat <
-  (fpimm:$val),
-  (MOV_IMM_F32  fpimm:$val)
->;
-
-def PRED_SETE : R600_2OP <0x20, "PRED_SETE", []>;
-def PRED_SETGT : R600_2OP <0x21, "PRED_SETGT", []>;
-def PRED_SETGE : R600_2OP <0x22, "PRED_SETGE", []>;
-def PRED_SETNE : R600_2OP <0x23, "PRED_SETNE", []>;
-
-let hasSideEffects = 1 in {
-
-def KILLGT : R600_2OP <0x2D, "KILLGT", []>;
-
-} // end hasSideEffects
-
-def AND_INT : R600_2OP_Helper <0x30, "AND_INT", and>;
-def OR_INT : R600_2OP_Helper <0x31, "OR_INT", or>;
-def XOR_INT : R600_2OP_Helper <0x32, "XOR_INT", xor>;
-def NOT_INT : R600_1OP_Helper <0x33, "NOT_INT", not>;
-def ADD_INT : R600_2OP_Helper <0x34, "ADD_INT", add>;
-def SUB_INT : R600_2OP_Helper <0x35, "SUB_INT", sub>;
-def MAX_INT : R600_2OP_Helper <0x36, "MAX_INT", smax>;
-def MIN_INT : R600_2OP_Helper <0x37, "MIN_INT", smin>;
-def MAX_UINT : R600_2OP_Helper <0x38, "MAX_UINT", umax>;
-def MIN_UINT : R600_2OP_Helper <0x39, "MIN_UINT", umin>;
-
-def SETE_INT : R600_2OP <
-  0x3A, "SETE_INT",
-  [(set i32:$dst, (selectcc i32:$src0, i32:$src1, -1, 0, SETEQ))]
->;
-
-def SETGT_INT : R600_2OP <
-  0x3B, "SETGT_INT",
-  [(set i32:$dst, (selectcc i32:$src0, i32:$src1, -1, 0, SETGT))]
->;
-
-def SETGE_INT : R600_2OP <
-  0x3C, "SETGE_INT",
-  [(set i32:$dst, (selectcc i32:$src0, i32:$src1, -1, 0, SETGE))]
->;
-
-def SETNE_INT : R600_2OP <
-  0x3D, "SETNE_INT",
-  [(set i32:$dst, (selectcc i32:$src0, i32:$src1, -1, 0, SETNE))]
->;
-
-def SETGT_UINT : R600_2OP <
-  0x3E, "SETGT_UINT",
-  [(set i32:$dst, (selectcc i32:$src0, i32:$src1, -1, 0, SETUGT))]
->;
-
-def SETGE_UINT : R600_2OP <
-  0x3F, "SETGE_UINT",
-  [(set i32:$dst, (selectcc i32:$src0, i32:$src1, -1, 0, SETUGE))]
->;
-
-def PRED_SETE_INT : R600_2OP <0x42, "PRED_SETE_INT", []>;
-def PRED_SETGT_INT : R600_2OP <0x43, "PRED_SETGE_INT", []>;
-def PRED_SETGE_INT : R600_2OP <0x44, "PRED_SETGE_INT", []>;
-def PRED_SETNE_INT : R600_2OP <0x45, "PRED_SETNE_INT", []>;
-
-def CNDE_INT : R600_3OP <
-  0x1C, "CNDE_INT",
-  [(set i32:$dst, (selectcc i32:$src0, 0, i32:$src1, i32:$src2, COND_EQ))]
->;
-
-def CNDGE_INT : R600_3OP <
-  0x1E, "CNDGE_INT",
-  [(set i32:$dst, (selectcc i32:$src0, 0, i32:$src1, i32:$src2, COND_SGE))]
->;
-
-def CNDGT_INT : R600_3OP <
-  0x1D, "CNDGT_INT",
-  [(set i32:$dst, (selectcc i32:$src0, 0, i32:$src1, i32:$src2, COND_SGT))]
->;
-
-//===----------------------------------------------------------------------===//
-// Texture instructions
-//===----------------------------------------------------------------------===//
-
-let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
-
-class R600_TEX <bits<11> inst, string opName> :
-  InstR600 <(outs R600_Reg128:$DST_GPR),
-          (ins R600_Reg128:$SRC_GPR,
-          RSel:$srcx, RSel:$srcy, RSel:$srcz, RSel:$srcw,
-          i32imm:$offsetx, i32imm:$offsety, i32imm:$offsetz,
-          RSel:$DST_SEL_X, RSel:$DST_SEL_Y, RSel:$DST_SEL_Z, RSel:$DST_SEL_W,
-          i32imm:$RESOURCE_ID, i32imm:$SAMPLER_ID,
-          CT:$COORD_TYPE_X, CT:$COORD_TYPE_Y, CT:$COORD_TYPE_Z,
-          CT:$COORD_TYPE_W),
-          !strconcat(opName,
-          " $DST_GPR.$DST_SEL_X$DST_SEL_Y$DST_SEL_Z$DST_SEL_W, "
-          "$SRC_GPR.$srcx$srcy$srcz$srcw "
-          "RID:$RESOURCE_ID SID:$SAMPLER_ID "
-          "CT:$COORD_TYPE_X$COORD_TYPE_Y$COORD_TYPE_Z$COORD_TYPE_W"),
-          [],
-          NullALU>, TEX_WORD0, TEX_WORD1, TEX_WORD2 {
-  let Inst{31-0} = Word0;
-  let Inst{63-32} = Word1;
-
-  let TEX_INST = inst{4-0};
-  let SRC_REL = 0;
-  let DST_REL = 0;
-  let LOD_BIAS = 0;
-
-  let INST_MOD = 0;
-  let FETCH_WHOLE_QUAD = 0;
-  let ALT_CONST = 0;
-  let SAMPLER_INDEX_MODE = 0;
-  let RESOURCE_INDEX_MODE = 0;
-
-  let TEXInst = 1;
-}
-
-} // End mayLoad = 0, mayStore = 0, hasSideEffects = 0
-
-
-
-def TEX_SAMPLE : R600_TEX <0x10, "TEX_SAMPLE">;
-def TEX_SAMPLE_C : R600_TEX <0x18, "TEX_SAMPLE_C">;
-def TEX_SAMPLE_L : R600_TEX <0x11, "TEX_SAMPLE_L">;
-def TEX_SAMPLE_C_L : R600_TEX <0x19, "TEX_SAMPLE_C_L">;
-def TEX_SAMPLE_LB : R600_TEX <0x12, "TEX_SAMPLE_LB">;
-def TEX_SAMPLE_C_LB : R600_TEX <0x1A, "TEX_SAMPLE_C_LB">;
-def TEX_LD : R600_TEX <0x03, "TEX_LD">;
-def TEX_LDPTR : R600_TEX <0x03, "TEX_LDPTR"> {
-  let INST_MOD = 1;
-}
-def TEX_GET_TEXTURE_RESINFO : R600_TEX <0x04, "TEX_GET_TEXTURE_RESINFO">;
-def TEX_GET_GRADIENTS_H : R600_TEX <0x07, "TEX_GET_GRADIENTS_H">;
-def TEX_GET_GRADIENTS_V : R600_TEX <0x08, "TEX_GET_GRADIENTS_V">;
-def TEX_SET_GRADIENTS_H : R600_TEX <0x0B, "TEX_SET_GRADIENTS_H">;
-def TEX_SET_GRADIENTS_V : R600_TEX <0x0C, "TEX_SET_GRADIENTS_V">;
-def TEX_SAMPLE_G : R600_TEX <0x14, "TEX_SAMPLE_G">;
-def TEX_SAMPLE_C_G : R600_TEX <0x1C, "TEX_SAMPLE_C_G">;
-
-defm : TexPattern<0, TEX_SAMPLE>;
-defm : TexPattern<1, TEX_SAMPLE_C>;
-defm : TexPattern<2, TEX_SAMPLE_L>;
-defm : TexPattern<3, TEX_SAMPLE_C_L>;
-defm : TexPattern<4, TEX_SAMPLE_LB>;
-defm : TexPattern<5, TEX_SAMPLE_C_LB>;
-defm : TexPattern<6, TEX_LD, v4i32>;
-defm : TexPattern<7, TEX_GET_TEXTURE_RESINFO, v4i32>;
-defm : TexPattern<8, TEX_GET_GRADIENTS_H>;
-defm : TexPattern<9, TEX_GET_GRADIENTS_V>;
-defm : TexPattern<10, TEX_LDPTR, v4i32>;
-
-//===----------------------------------------------------------------------===//
-// Helper classes for common instructions
-//===----------------------------------------------------------------------===//
-
-class MUL_LIT_Common <bits<5> inst> : R600_3OP <
-  inst, "MUL_LIT",
-  []
->;
-
-class MULADD_Common <bits<5> inst> : R600_3OP <
-  inst, "MULADD",
-  []
->;
-
-class MULADD_IEEE_Common <bits<5> inst> : R600_3OP <
-  inst, "MULADD_IEEE",
-  [(set f32:$dst, (fmad f32:$src0, f32:$src1, f32:$src2))]
->;
-
-class FMA_Common <bits<5> inst> : R600_3OP <
-  inst, "FMA",
-  [(set f32:$dst, (fma f32:$src0, f32:$src1, f32:$src2))], VecALU
->;
-
-class CNDE_Common <bits<5> inst> : R600_3OP <
-  inst, "CNDE",
-  [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_OEQ))]
->;
-
-class CNDGT_Common <bits<5> inst> : R600_3OP <
-  inst, "CNDGT",
-  [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_OGT))]
-> {
-  let Itinerary = VecALU;
-}
-
-class CNDGE_Common <bits<5> inst> : R600_3OP <
-  inst, "CNDGE",
-  [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_OGE))]
-> {
-  let Itinerary = VecALU;
-}
-
-
-let isCodeGenOnly = 1, isPseudo = 1, Namespace = "AMDGPU"  in {
-class R600_VEC2OP<list<dag> pattern> : InstR600 <(outs R600_Reg32:$dst), (ins
-// Slot X
-   UEM:$update_exec_mask_X, UP:$update_pred_X, WRITE:$write_X,
-   OMOD:$omod_X, REL:$dst_rel_X, CLAMP:$clamp_X,
-   R600_TReg32_X:$src0_X, NEG:$src0_neg_X, REL:$src0_rel_X, ABS:$src0_abs_X, SEL:$src0_sel_X,
-   R600_TReg32_X:$src1_X, NEG:$src1_neg_X, REL:$src1_rel_X, ABS:$src1_abs_X, SEL:$src1_sel_X,
-   R600_Pred:$pred_sel_X,
-// Slot Y
-   UEM:$update_exec_mask_Y, UP:$update_pred_Y, WRITE:$write_Y,
-   OMOD:$omod_Y, REL:$dst_rel_Y, CLAMP:$clamp_Y,
-   R600_TReg32_Y:$src0_Y, NEG:$src0_neg_Y, REL:$src0_rel_Y, ABS:$src0_abs_Y, SEL:$src0_sel_Y,
-   R600_TReg32_Y:$src1_Y, NEG:$src1_neg_Y, REL:$src1_rel_Y, ABS:$src1_abs_Y, SEL:$src1_sel_Y,
-   R600_Pred:$pred_sel_Y,
-// Slot Z
-   UEM:$update_exec_mask_Z, UP:$update_pred_Z, WRITE:$write_Z,
-   OMOD:$omod_Z, REL:$dst_rel_Z, CLAMP:$clamp_Z,
-   R600_TReg32_Z:$src0_Z, NEG:$src0_neg_Z, REL:$src0_rel_Z, ABS:$src0_abs_Z, SEL:$src0_sel_Z,
-   R600_TReg32_Z:$src1_Z, NEG:$src1_neg_Z, REL:$src1_rel_Z, ABS:$src1_abs_Z, SEL:$src1_sel_Z,
-   R600_Pred:$pred_sel_Z,
-// Slot W
-   UEM:$update_exec_mask_W, UP:$update_pred_W, WRITE:$write_W,
-   OMOD:$omod_W, REL:$dst_rel_W, CLAMP:$clamp_W,
-   R600_TReg32_W:$src0_W, NEG:$src0_neg_W, REL:$src0_rel_W, ABS:$src0_abs_W, SEL:$src0_sel_W,
-   R600_TReg32_W:$src1_W, NEG:$src1_neg_W, REL:$src1_rel_W, ABS:$src1_abs_W, SEL:$src1_sel_W,
-   R600_Pred:$pred_sel_W,
-   LITERAL:$literal0, LITERAL:$literal1),
-  "",
-  pattern,
-  AnyALU> {
-
-  let UseNamedOperandTable = 1;
-
-}
-}
-
-def DOT_4 : R600_VEC2OP<[(set R600_Reg32:$dst, (DOT4
-  R600_TReg32_X:$src0_X, R600_TReg32_X:$src1_X,
-  R600_TReg32_Y:$src0_Y, R600_TReg32_Y:$src1_Y,
-  R600_TReg32_Z:$src0_Z, R600_TReg32_Z:$src1_Z,
-  R600_TReg32_W:$src0_W, R600_TReg32_W:$src1_W))]>;
-
-
-class DOT4_Common <bits<11> inst> : R600_2OP <inst, "DOT4", []>;
-
-
-let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
-multiclass CUBE_Common <bits<11> inst> {
-
-  def _pseudo : InstR600 <
-    (outs R600_Reg128:$dst),
-    (ins R600_Reg128:$src0),
-    "CUBE $dst $src0",
-    [(set v4f32:$dst, (int_AMDGPU_cube v4f32:$src0))],
-    VecALU
-  > {
-    let isPseudo = 1;
-    let UseNamedOperandTable = 1;
-  }
-
-  def _real : R600_2OP <inst, "CUBE", []>;
-}
-} // End mayLoad = 0, mayStore = 0, hasSideEffects = 0
-
-class EXP_IEEE_Common <bits<11> inst> : R600_1OP_Helper <
-  inst, "EXP_IEEE", fexp2
-> {
-  let Itinerary = TransALU;
-}
-
-class FLT_TO_INT_Common <bits<11> inst> : R600_1OP_Helper <
-  inst, "FLT_TO_INT", fp_to_sint
-> {
-  let Itinerary = TransALU;
-}
-
-class INT_TO_FLT_Common <bits<11> inst> : R600_1OP_Helper <
-  inst, "INT_TO_FLT", sint_to_fp
-> {
-  let Itinerary = TransALU;
-}
-
-class FLT_TO_UINT_Common <bits<11> inst> : R600_1OP_Helper <
-  inst, "FLT_TO_UINT", fp_to_uint
-> {
-  let Itinerary = TransALU;
-}
-
-class UINT_TO_FLT_Common <bits<11> inst> : R600_1OP_Helper <
-  inst, "UINT_TO_FLT", uint_to_fp
-> {
-  let Itinerary = TransALU;
-}
-
-class LOG_CLAMPED_Common <bits<11> inst> : R600_1OP <
-  inst, "LOG_CLAMPED", []
->;
-
-class LOG_IEEE_Common <bits<11> inst> : R600_1OP_Helper <
-  inst, "LOG_IEEE", flog2
-> {
-  let Itinerary = TransALU;
-}
-
-class LSHL_Common <bits<11> inst> : R600_2OP_Helper <inst, "LSHL", shl>;
-class LSHR_Common <bits<11> inst> : R600_2OP_Helper <inst, "LSHR", srl>;
-class ASHR_Common <bits<11> inst> : R600_2OP_Helper <inst, "ASHR", sra>;
-class MULHI_INT_Common <bits<11> inst> : R600_2OP_Helper <
-  inst, "MULHI_INT", mulhs
-> {
-  let Itinerary = TransALU;
-}
-class MULHI_UINT_Common <bits<11> inst> : R600_2OP_Helper <
-  inst, "MULHI", mulhu
-> {
-  let Itinerary = TransALU;
-}
-class MULLO_INT_Common <bits<11> inst> : R600_2OP_Helper <
-  inst, "MULLO_INT", mul
-> {
-  let Itinerary = TransALU;
-}
-class MULLO_UINT_Common <bits<11> inst> : R600_2OP <inst, "MULLO_UINT", []> {
-  let Itinerary = TransALU;
-}
-
-class RECIP_CLAMPED_Common <bits<11> inst> : R600_1OP <
-  inst, "RECIP_CLAMPED", []
-> {
-  let Itinerary = TransALU;
-}
-
-class RECIP_IEEE_Common <bits<11> inst> : R600_1OP <
-  inst, "RECIP_IEEE", [(set f32:$dst, (AMDGPUrcp f32:$src0))]
-> {
-  let Itinerary = TransALU;
-}
-
-class RECIP_UINT_Common <bits<11> inst> : R600_1OP_Helper <
-  inst, "RECIP_UINT", AMDGPUurecip
-> {
-  let Itinerary = TransALU;
-}
-
-// Clamped to maximum.
-class RECIPSQRT_CLAMPED_Common <bits<11> inst> : R600_1OP_Helper <
-  inst, "RECIPSQRT_CLAMPED", AMDGPUrsq_clamped
-> {
-  let Itinerary = TransALU;
-}
-
-class RECIPSQRT_IEEE_Common <bits<11> inst> : R600_1OP_Helper <
-  inst, "RECIPSQRT_IEEE", AMDGPUrsq_legacy
-> {
-  let Itinerary = TransALU;
-}
-
-// TODO: There is also RECIPSQRT_FF which clamps to zero.
-
-class SIN_Common <bits<11> inst> : R600_1OP <
-  inst, "SIN", [(set f32:$dst, (SIN_HW f32:$src0))]>{
-  let Trig = 1;
-  let Itinerary = TransALU;
-}
-
-class COS_Common <bits<11> inst> : R600_1OP <
-  inst, "COS", [(set f32:$dst, (COS_HW f32:$src0))]> {
-  let Trig = 1;
-  let Itinerary = TransALU;
-}
-
-def CLAMP_R600 :  CLAMP <R600_Reg32>;
-def FABS_R600 : FABS<R600_Reg32>;
-def FNEG_R600 : FNEG<R600_Reg32>;
-
-//===----------------------------------------------------------------------===//
-// Helper patterns for complex intrinsics
-//===----------------------------------------------------------------------===//
-
-// FIXME: Should be predicated on unsafe fp math.
-multiclass DIV_Common <InstR600 recip_ieee> {
-def : Pat<
-  (int_AMDGPU_div f32:$src0, f32:$src1),
-  (MUL_IEEE $src0, (recip_ieee $src1))
->;
-
-def : Pat<
-  (fdiv f32:$src0, f32:$src1),
-  (MUL_IEEE $src0, (recip_ieee $src1))
->;
-
-def : RcpPat<recip_ieee, f32>;
-}
-
-class TGSI_LIT_Z_Common <InstR600 mul_lit, InstR600 log_clamped, InstR600 exp_ieee>
-  : Pat <
-  (int_TGSI_lit_z f32:$src_x, f32:$src_y, f32:$src_w),
-  (exp_ieee (mul_lit (log_clamped (MAX $src_y, (f32 ZERO))), $src_w, $src_x))
->;
-
-//===----------------------------------------------------------------------===//
-// R600 / R700 Instructions
-//===----------------------------------------------------------------------===//
-
-let Predicates = [isR600] in {
-
-  def MUL_LIT_r600 : MUL_LIT_Common<0x0C>;
-  def MULADD_r600 : MULADD_Common<0x10>;
-  def MULADD_IEEE_r600 : MULADD_IEEE_Common<0x14>;
-  def CNDE_r600 : CNDE_Common<0x18>;
-  def CNDGT_r600 : CNDGT_Common<0x19>;
-  def CNDGE_r600 : CNDGE_Common<0x1A>;
-  def DOT4_r600 : DOT4_Common<0x50>;
-  defm CUBE_r600 : CUBE_Common<0x52>;
-  def EXP_IEEE_r600 : EXP_IEEE_Common<0x61>;
-  def LOG_CLAMPED_r600 : LOG_CLAMPED_Common<0x62>;
-  def LOG_IEEE_r600 : LOG_IEEE_Common<0x63>;
-  def RECIP_CLAMPED_r600 : RECIP_CLAMPED_Common<0x64>;
-  def RECIP_IEEE_r600 : RECIP_IEEE_Common<0x66>;
-  def RECIPSQRT_CLAMPED_r600 : RECIPSQRT_CLAMPED_Common<0x67>;
-  def RECIPSQRT_IEEE_r600 : RECIPSQRT_IEEE_Common<0x69>;
-  def FLT_TO_INT_r600 : FLT_TO_INT_Common<0x6b>;
-  def INT_TO_FLT_r600 : INT_TO_FLT_Common<0x6c>;
-  def FLT_TO_UINT_r600 : FLT_TO_UINT_Common<0x79>;
-  def UINT_TO_FLT_r600 : UINT_TO_FLT_Common<0x6d>;
-  def SIN_r600 : SIN_Common<0x6E>;
-  def COS_r600 : COS_Common<0x6F>;
-  def ASHR_r600 : ASHR_Common<0x70>;
-  def LSHR_r600 : LSHR_Common<0x71>;
-  def LSHL_r600 : LSHL_Common<0x72>;
-  def MULLO_INT_r600 : MULLO_INT_Common<0x73>;
-  def MULHI_INT_r600 : MULHI_INT_Common<0x74>;
-  def MULLO_UINT_r600 : MULLO_UINT_Common<0x75>;
-  def MULHI_UINT_r600 : MULHI_UINT_Common<0x76>;
-  def RECIP_UINT_r600 : RECIP_UINT_Common <0x78>;
-
-  defm DIV_r600 : DIV_Common<RECIP_IEEE_r600>;
-  def : POW_Common <LOG_IEEE_r600, EXP_IEEE_r600, MUL>;
-  def TGSI_LIT_Z_r600 : TGSI_LIT_Z_Common<MUL_LIT_r600, LOG_CLAMPED_r600, EXP_IEEE_r600>;
-
-  def : Pat<(fsqrt f32:$src), (MUL $src, (RECIPSQRT_CLAMPED_r600 $src))>;
-  def : RsqPat<RECIPSQRT_IEEE_r600, f32>;
-
-  def R600_ExportSwz : ExportSwzInst {
-    let Word1{20-17} = 0; // BURST_COUNT
-    let Word1{21} = eop;
-    let Word1{22} = 0; // VALID_PIXEL_MODE
-    let Word1{30-23} = inst;
-    let Word1{31} = 1; // BARRIER
-  }
-  defm : ExportPattern<R600_ExportSwz, 39>;
-
-  def R600_ExportBuf : ExportBufInst {
-    let Word1{20-17} = 0; // BURST_COUNT
-    let Word1{21} = eop;
-    let Word1{22} = 0; // VALID_PIXEL_MODE
-    let Word1{30-23} = inst;
-    let Word1{31} = 1; // BARRIER
-  }
-  defm : SteamOutputExportPattern<R600_ExportBuf, 0x20, 0x21, 0x22, 0x23>;
-
-  def CF_TC_R600 : CF_CLAUSE_R600<1, (ins i32imm:$ADDR, i32imm:$CNT),
-  "TEX $CNT @$ADDR"> {
-    let POP_COUNT = 0;
-  }
-  def CF_VC_R600 : CF_CLAUSE_R600<2, (ins i32imm:$ADDR, i32imm:$CNT),
-  "VTX $CNT @$ADDR"> {
-    let POP_COUNT = 0;
-  }
-  def WHILE_LOOP_R600 : CF_CLAUSE_R600<6, (ins i32imm:$ADDR),
-  "LOOP_START_DX10 @$ADDR"> {
-    let POP_COUNT = 0;
-    let CNT = 0;
-  }
-  def END_LOOP_R600 : CF_CLAUSE_R600<5, (ins i32imm:$ADDR), "END_LOOP @$ADDR"> {
-    let POP_COUNT = 0;
-    let CNT = 0;
-  }
-  def LOOP_BREAK_R600 : CF_CLAUSE_R600<9, (ins i32imm:$ADDR),
-  "LOOP_BREAK @$ADDR"> {
-    let POP_COUNT = 0;
-    let CNT = 0;
-  }
-  def CF_CONTINUE_R600 : CF_CLAUSE_R600<8, (ins i32imm:$ADDR),
-  "CONTINUE @$ADDR"> {
-    let POP_COUNT = 0;
-    let CNT = 0;
-  }
-  def CF_JUMP_R600 : CF_CLAUSE_R600<10, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
-  "JUMP @$ADDR POP:$POP_COUNT"> {
-    let CNT = 0;
-  }
-  def CF_PUSH_ELSE_R600 : CF_CLAUSE_R600<12, (ins i32imm:$ADDR),
-  "PUSH_ELSE @$ADDR"> {
-    let CNT = 0;
-    let POP_COUNT = 0; // FIXME?
-  }
-  def CF_ELSE_R600 : CF_CLAUSE_R600<13, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
-  "ELSE @$ADDR POP:$POP_COUNT"> {
-    let CNT = 0;
-  }
-  def CF_CALL_FS_R600 : CF_CLAUSE_R600<19, (ins), "CALL_FS"> {
-    let ADDR = 0;
-    let CNT = 0;
-    let POP_COUNT = 0;
-  }
-  def POP_R600 : CF_CLAUSE_R600<14, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
-  "POP @$ADDR POP:$POP_COUNT"> {
-    let CNT = 0;
-  }
-  def CF_END_R600 : CF_CLAUSE_R600<0, (ins), "CF_END"> {
-    let CNT = 0;
-    let POP_COUNT = 0;
-    let ADDR = 0;
-    let END_OF_PROGRAM = 1;
-  }
-
-}
-
-
-//===----------------------------------------------------------------------===//
-// Regist loads and stores - for indirect addressing
-//===----------------------------------------------------------------------===//
-
-defm R600_ : RegisterLoadStore <R600_Reg32, FRAMEri, ADDRIndirect>;
-
-
-//===----------------------------------------------------------------------===//
-// Pseudo instructions
-//===----------------------------------------------------------------------===//
-
-let isPseudo = 1 in {
-
-def PRED_X : InstR600 <
-  (outs R600_Predicate_Bit:$dst),
-  (ins R600_Reg32:$src0, i32imm:$src1, i32imm:$flags),
-  "", [], NullALU> {
-  let FlagOperandIdx = 3;
-}
-
-let isTerminator = 1, isBranch = 1 in {
-def JUMP_COND : InstR600 <
-          (outs),
-          (ins brtarget:$target, R600_Predicate_Bit:$p),
-          "JUMP $target ($p)",
-          [], AnyALU
-  >;
-
-def JUMP : InstR600 <
-          (outs),
-          (ins brtarget:$target),
-          "JUMP $target",
-          [], AnyALU
-  >
-{
-  let isPredicable = 1;
-  let isBarrier = 1;
-}
-
-}  // End isTerminator = 1, isBranch = 1
-
-let usesCustomInserter = 1 in {
-
-let mayLoad = 0, mayStore = 0, hasSideEffects = 1 in {
-
-def MASK_WRITE : AMDGPUShaderInst <
-    (outs),
-    (ins R600_Reg32:$src),
-    "MASK_WRITE $src",
-    []
->;
-
-} // End mayLoad = 0, mayStore = 0, hasSideEffects = 1
-
-
-def TXD: InstR600 <
-  (outs R600_Reg128:$dst),
-  (ins R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2,
-       i32imm:$resourceId, i32imm:$samplerId, i32imm:$textureTarget),
-  "TXD $dst, $src0, $src1, $src2, $resourceId, $samplerId, $textureTarget",
-  [(set v4f32:$dst, (int_AMDGPU_txd v4f32:$src0, v4f32:$src1, v4f32:$src2,
-                     imm:$resourceId, imm:$samplerId, imm:$textureTarget))],
-  NullALU > {
-  let TEXInst = 1;
-}
-
-def TXD_SHADOW: InstR600 <
-  (outs R600_Reg128:$dst),
-  (ins R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2,
-       i32imm:$resourceId, i32imm:$samplerId, i32imm:$textureTarget),
-  "TXD_SHADOW $dst, $src0, $src1, $src2, $resourceId, $samplerId, $textureTarget",
-  [(set v4f32:$dst, (int_AMDGPU_txd v4f32:$src0, v4f32:$src1, v4f32:$src2,
-        imm:$resourceId, imm:$samplerId, TEX_SHADOW:$textureTarget))],
-   NullALU
-> {
-  let TEXInst = 1;
-}
-} // End isPseudo = 1
-} // End usesCustomInserter = 1
-
-
-//===----------------------------------------------------------------------===//
-// Constant Buffer Addressing Support
-//===----------------------------------------------------------------------===//
-
-let usesCustomInserter = 1, isCodeGenOnly = 1, isPseudo = 1, Namespace = "AMDGPU"  in {
-def CONST_COPY : Instruction {
-  let OutOperandList = (outs R600_Reg32:$dst);
-  let InOperandList = (ins i32imm:$src);
-  let Pattern =
-      [(set R600_Reg32:$dst, (CONST_ADDRESS ADDRGA_CONST_OFFSET:$src))];
-  let AsmString = "CONST_COPY";
-  let hasSideEffects = 0;
-  let isAsCheapAsAMove = 1;
-  let Itinerary = NullALU;
-}
-} // end usesCustomInserter = 1, isCodeGenOnly = 1, isPseudo = 1, Namespace = "AMDGPU"
-
-def TEX_VTX_CONSTBUF :
-  InstR600ISA <(outs R600_Reg128:$dst), (ins MEMxi:$ptr, i32imm:$BUFFER_ID), "VTX_READ_eg $dst, $ptr",
-      [(set v4i32:$dst, (CONST_ADDRESS ADDRGA_VAR_OFFSET:$ptr, (i32 imm:$BUFFER_ID)))]>,
-  VTX_WORD1_GPR, VTX_WORD0_eg {
-
-  let VC_INST = 0;
-  let FETCH_TYPE = 2;
-  let FETCH_WHOLE_QUAD = 0;
-  let SRC_REL = 0;
-  let SRC_SEL_X = 0;
-  let DST_REL = 0;
-  let USE_CONST_FIELDS = 0;
-  let NUM_FORMAT_ALL = 2;
-  let FORMAT_COMP_ALL = 1;
-  let SRF_MODE_ALL = 1;
-  let MEGA_FETCH_COUNT = 16;
-  let DST_SEL_X        = 0;
-  let DST_SEL_Y        = 1;
-  let DST_SEL_Z        = 2;
-  let DST_SEL_W        = 3;
-  let DATA_FORMAT      = 35;
-
-  let Inst{31-0} = Word0;
-  let Inst{63-32} = Word1;
-
-// LLVM can only encode 64-bit instructions, so these fields are manually
-// encoded in R600CodeEmitter
-//
-// bits<16> OFFSET;
-// bits<2>  ENDIAN_SWAP = 0;
-// bits<1>  CONST_BUF_NO_STRIDE = 0;
-// bits<1>  MEGA_FETCH = 0;
-// bits<1>  ALT_CONST = 0;
-// bits<2>  BUFFER_INDEX_MODE = 0;
-
-
-
-// VTX_WORD2 (LLVM can only encode 64-bit instructions, so WORD2 encoding
-// is done in R600CodeEmitter
-//
-// Inst{79-64} = OFFSET;
-// Inst{81-80} = ENDIAN_SWAP;
-// Inst{82}    = CONST_BUF_NO_STRIDE;
-// Inst{83}    = MEGA_FETCH;
-// Inst{84}    = ALT_CONST;
-// Inst{86-85} = BUFFER_INDEX_MODE;
-// Inst{95-86} = 0; Reserved
-
-// VTX_WORD3 (Padding)
-//
-// Inst{127-96} = 0;
-  let VTXInst = 1;
-}
-
-def TEX_VTX_TEXBUF:
-  InstR600ISA <(outs R600_Reg128:$dst), (ins MEMxi:$ptr, i32imm:$BUFFER_ID), "TEX_VTX_EXPLICIT_READ $dst, $ptr",
-      [(set v4f32:$dst, (int_R600_load_texbuf ADDRGA_VAR_OFFSET:$ptr, imm:$BUFFER_ID))]>,
-VTX_WORD1_GPR, VTX_WORD0_eg {
-
-let VC_INST = 0;
-let FETCH_TYPE = 2;
-let FETCH_WHOLE_QUAD = 0;
-let SRC_REL = 0;
-let SRC_SEL_X = 0;
-let DST_REL = 0;
-let USE_CONST_FIELDS = 1;
-let NUM_FORMAT_ALL = 0;
-let FORMAT_COMP_ALL = 0;
-let SRF_MODE_ALL = 1;
-let MEGA_FETCH_COUNT = 16;
-let DST_SEL_X        = 0;
-let DST_SEL_Y        = 1;
-let DST_SEL_Z        = 2;
-let DST_SEL_W        = 3;
-let DATA_FORMAT      = 0;
-
-let Inst{31-0} = Word0;
-let Inst{63-32} = Word1;
-
-// LLVM can only encode 64-bit instructions, so these fields are manually
-// encoded in R600CodeEmitter
-//
-// bits<16> OFFSET;
-// bits<2>  ENDIAN_SWAP = 0;
-// bits<1>  CONST_BUF_NO_STRIDE = 0;
-// bits<1>  MEGA_FETCH = 0;
-// bits<1>  ALT_CONST = 0;
-// bits<2>  BUFFER_INDEX_MODE = 0;
-
-
-
-// VTX_WORD2 (LLVM can only encode 64-bit instructions, so WORD2 encoding
-// is done in R600CodeEmitter
-//
-// Inst{79-64} = OFFSET;
-// Inst{81-80} = ENDIAN_SWAP;
-// Inst{82}    = CONST_BUF_NO_STRIDE;
-// Inst{83}    = MEGA_FETCH;
-// Inst{84}    = ALT_CONST;
-// Inst{86-85} = BUFFER_INDEX_MODE;
-// Inst{95-86} = 0; Reserved
-
-// VTX_WORD3 (Padding)
-//
-// Inst{127-96} = 0;
-  let VTXInst = 1;
-}
-
-//===---------------------------------------------------------------------===//
-// Flow and Program control Instructions
-//===---------------------------------------------------------------------===//
-class ILFormat<dag outs, dag ins, string asmstr, list<dag> pattern>
-: Instruction {
-
-     let Namespace = "AMDGPU";
-     dag OutOperandList = outs;
-     dag InOperandList = ins;
-     let Pattern = pattern;
-     let AsmString = !strconcat(asmstr, "\n");
-     let isPseudo = 1;
-     let Itinerary = NullALU;
-     bit hasIEEEFlag = 0;
-     bit hasZeroOpFlag = 0;
-     let mayLoad = 0;
-     let mayStore = 0;
-     let hasSideEffects = 0;
-     let isCodeGenOnly = 1;
-}
-
-multiclass BranchConditional<SDNode Op, RegisterClass rci, RegisterClass rcf> {
-    def _i32 : ILFormat<(outs),
-  (ins brtarget:$target, rci:$src0),
-        "; i32 Pseudo branch instruction",
-  [(Op bb:$target, (i32 rci:$src0))]>;
-    def _f32 : ILFormat<(outs),
-  (ins brtarget:$target, rcf:$src0),
-        "; f32 Pseudo branch instruction",
-  [(Op bb:$target, (f32 rcf:$src0))]>;
-}
-
-// Only scalar types should generate flow control
-multiclass BranchInstr<string name> {
-  def _i32 : ILFormat<(outs), (ins R600_Reg32:$src),
-      !strconcat(name, " $src"), []>;
-  def _f32 : ILFormat<(outs), (ins R600_Reg32:$src),
-      !strconcat(name, " $src"), []>;
-}
-// Only scalar types should generate flow control
-multiclass BranchInstr2<string name> {
-  def _i32 : ILFormat<(outs), (ins R600_Reg32:$src0, R600_Reg32:$src1),
-      !strconcat(name, " $src0, $src1"), []>;
-  def _f32 : ILFormat<(outs), (ins R600_Reg32:$src0, R600_Reg32:$src1),
-      !strconcat(name, " $src0, $src1"), []>;
-}
-
-//===---------------------------------------------------------------------===//
-// Custom Inserter for Branches and returns, this eventually will be a
-// separate pass
-//===---------------------------------------------------------------------===//
-let isTerminator = 1, usesCustomInserter = 1, isBranch = 1, isBarrier = 1 in {
-  def BRANCH : ILFormat<(outs), (ins brtarget:$target),
-      "; Pseudo unconditional branch instruction",
-      [(br bb:$target)]>;
-  defm BRANCH_COND : BranchConditional<IL_brcond, R600_Reg32, R600_Reg32>;
-}
-
-//===---------------------------------------------------------------------===//
-// Return instruction
-//===---------------------------------------------------------------------===//
-let isTerminator = 1, isReturn = 1, hasCtrlDep = 1,
-    usesCustomInserter = 1 in {
-  def RETURN          : ILFormat<(outs), (ins variable_ops),
-      "RETURN", [(IL_retflag)]>;
-}
-
-//===----------------------------------------------------------------------===//
-// Branch Instructions
-//===----------------------------------------------------------------------===//
-
-def IF_PREDICATE_SET  : ILFormat<(outs), (ins R600_Reg32:$src),
-  "IF_PREDICATE_SET $src", []>;
-
-let isTerminator=1 in {
-  def BREAK       : ILFormat< (outs), (ins),
-      "BREAK", []>;
-  def CONTINUE    : ILFormat< (outs), (ins),
-      "CONTINUE", []>;
-  def DEFAULT     : ILFormat< (outs), (ins),
-      "DEFAULT", []>;
-  def ELSE        : ILFormat< (outs), (ins),
-      "ELSE", []>;
-  def ENDSWITCH   : ILFormat< (outs), (ins),
-      "ENDSWITCH", []>;
-  def ENDMAIN     : ILFormat< (outs), (ins),
-      "ENDMAIN", []>;
-  def END         : ILFormat< (outs), (ins),
-      "END", []>;
-  def ENDFUNC     : ILFormat< (outs), (ins),
-      "ENDFUNC", []>;
-  def ENDIF       : ILFormat< (outs), (ins),
-      "ENDIF", []>;
-  def WHILELOOP   : ILFormat< (outs), (ins),
-      "WHILE", []>;
-  def ENDLOOP     : ILFormat< (outs), (ins),
-      "ENDLOOP", []>;
-  def FUNC        : ILFormat< (outs), (ins),
-      "FUNC", []>;
-  def RETDYN      : ILFormat< (outs), (ins),
-      "RET_DYN", []>;
-  // This opcode has custom swizzle pattern encoded in Swizzle Encoder
-  defm IF_LOGICALNZ  : BranchInstr<"IF_LOGICALNZ">;
-  // This opcode has custom swizzle pattern encoded in Swizzle Encoder
-  defm IF_LOGICALZ   : BranchInstr<"IF_LOGICALZ">;
-  // This opcode has custom swizzle pattern encoded in Swizzle Encoder
-  defm BREAK_LOGICALNZ : BranchInstr<"BREAK_LOGICALNZ">;
-  // This opcode has custom swizzle pattern encoded in Swizzle Encoder
-  defm BREAK_LOGICALZ : BranchInstr<"BREAK_LOGICALZ">;
-  // This opcode has custom swizzle pattern encoded in Swizzle Encoder
-  defm CONTINUE_LOGICALNZ : BranchInstr<"CONTINUE_LOGICALNZ">;
-  // This opcode has custom swizzle pattern encoded in Swizzle Encoder
-  defm CONTINUE_LOGICALZ : BranchInstr<"CONTINUE_LOGICALZ">;
-  defm IFC         : BranchInstr2<"IFC">;
-  defm BREAKC      : BranchInstr2<"BREAKC">;
-  defm CONTINUEC   : BranchInstr2<"CONTINUEC">;
-}
-
-//===----------------------------------------------------------------------===//
-// Indirect addressing pseudo instructions
-//===----------------------------------------------------------------------===//
-
-let isPseudo = 1 in {
-
-class ExtractVertical <RegisterClass vec_rc> : InstR600 <
-  (outs R600_Reg32:$dst),
-  (ins vec_rc:$vec, R600_Reg32:$index), "",
-  [],
-  AnyALU
->;
-
-let Constraints = "$dst = $vec" in {
-
-class InsertVertical <RegisterClass vec_rc> : InstR600 <
-  (outs vec_rc:$dst),
-  (ins vec_rc:$vec, R600_Reg32:$value, R600_Reg32:$index), "",
-  [],
-  AnyALU
->;
-
-} // End Constraints = "$dst = $vec"
-
-} // End isPseudo = 1
-
-def R600_EXTRACT_ELT_V2 : ExtractVertical <R600_Reg64Vertical>;
-def R600_EXTRACT_ELT_V4 : ExtractVertical <R600_Reg128Vertical>;
-
-def R600_INSERT_ELT_V2 : InsertVertical <R600_Reg64Vertical>;
-def R600_INSERT_ELT_V4 : InsertVertical <R600_Reg128Vertical>;
-
-class ExtractVerticalPat <Instruction inst, ValueType vec_ty,
-                          ValueType scalar_ty> : Pat <
-  (scalar_ty (extractelt vec_ty:$vec, i32:$index)),
-  (inst $vec, $index)
->;
-
-def : ExtractVerticalPat <R600_EXTRACT_ELT_V2, v2i32, i32>;
-def : ExtractVerticalPat <R600_EXTRACT_ELT_V2, v2f32, f32>;
-def : ExtractVerticalPat <R600_EXTRACT_ELT_V4, v4i32, i32>;
-def : ExtractVerticalPat <R600_EXTRACT_ELT_V4, v4f32, f32>;
-
-class InsertVerticalPat <Instruction inst, ValueType vec_ty,
-                         ValueType scalar_ty> : Pat <
-  (vec_ty (insertelt vec_ty:$vec, scalar_ty:$value, i32:$index)),
-  (inst $vec, $value, $index)
->;
-
-def : InsertVerticalPat <R600_INSERT_ELT_V2, v2i32, i32>;
-def : InsertVerticalPat <R600_INSERT_ELT_V2, v2f32, f32>;
-def : InsertVerticalPat <R600_INSERT_ELT_V4, v4i32, i32>;
-def : InsertVerticalPat <R600_INSERT_ELT_V4, v4f32, f32>;
-
-//===----------------------------------------------------------------------===//
-// ISel Patterns
-//===----------------------------------------------------------------------===//
-
-// CND*_INT Pattterns for f32 True / False values
-
-class CND_INT_f32 <InstR600 cnd, CondCode cc> : Pat <
-  (selectcc i32:$src0, 0, f32:$src1, f32:$src2, cc),
-  (cnd $src0, $src1, $src2)
->;
-
-def : CND_INT_f32 <CNDE_INT,  SETEQ>;
-def : CND_INT_f32 <CNDGT_INT, SETGT>;
-def : CND_INT_f32 <CNDGE_INT, SETGE>;
-
-//CNDGE_INT extra pattern
-def : Pat <
-  (selectcc i32:$src0, -1, i32:$src1, i32:$src2, COND_SGT),
-  (CNDGE_INT $src0, $src1, $src2)
->;
-
-// KIL Patterns
-def KILP : Pat <
-  (int_AMDGPU_kilp),
-  (MASK_WRITE (KILLGT (f32 ONE), (f32 ZERO)))
->;
-
-def KIL : Pat <
-  (int_AMDGPU_kill f32:$src0),
-  (MASK_WRITE (KILLGT (f32 ZERO), $src0))
->;
-
-def : Extract_Element <f32, v4f32, 0, sub0>;
-def : Extract_Element <f32, v4f32, 1, sub1>;
-def : Extract_Element <f32, v4f32, 2, sub2>;
-def : Extract_Element <f32, v4f32, 3, sub3>;
-
-def : Insert_Element <f32, v4f32, 0, sub0>;
-def : Insert_Element <f32, v4f32, 1, sub1>;
-def : Insert_Element <f32, v4f32, 2, sub2>;
-def : Insert_Element <f32, v4f32, 3, sub3>;
-
-def : Extract_Element <i32, v4i32, 0, sub0>;
-def : Extract_Element <i32, v4i32, 1, sub1>;
-def : Extract_Element <i32, v4i32, 2, sub2>;
-def : Extract_Element <i32, v4i32, 3, sub3>;
-
-def : Insert_Element <i32, v4i32, 0, sub0>;
-def : Insert_Element <i32, v4i32, 1, sub1>;
-def : Insert_Element <i32, v4i32, 2, sub2>;
-def : Insert_Element <i32, v4i32, 3, sub3>;
-
-def : Extract_Element <f32, v2f32, 0, sub0>;
-def : Extract_Element <f32, v2f32, 1, sub1>;
-
-def : Insert_Element <f32, v2f32, 0, sub0>;
-def : Insert_Element <f32, v2f32, 1, sub1>;
-
-def : Extract_Element <i32, v2i32, 0, sub0>;
-def : Extract_Element <i32, v2i32, 1, sub1>;
-
-def : Insert_Element <i32, v2i32, 0, sub0>;
-def : Insert_Element <i32, v2i32, 1, sub1>;
-
-// bitconvert patterns
-
-def : BitConvert <i32, f32, R600_Reg32>;
-def : BitConvert <f32, i32, R600_Reg32>;
-def : BitConvert <v2f32, v2i32, R600_Reg64>;
-def : BitConvert <v2i32, v2f32, R600_Reg64>;
-def : BitConvert <v4f32, v4i32, R600_Reg128>;
-def : BitConvert <v4i32, v4f32, R600_Reg128>;
-
-// DWORDADDR pattern
-def : DwordAddrPat  <i32, R600_Reg32>;
-
-} // End isR600toCayman Predicate
-
-let Predicates = [isR600] in {
-// Intrinsic patterns
-defm : Expand24IBitOps<MULLO_INT_r600, ADD_INT>;
-defm : Expand24UBitOps<MULLO_UINT_r600, ADD_INT>;
-} // End isR600
-
-def getLDSNoRetOp : InstrMapping {
-  let FilterClass = "R600_LDS_1A1D";
-  let RowFields = ["BaseOp"];
-  let ColFields = ["DisableEncoding"];
-  let KeyCol = ["$dst"];
-  let ValueCols = [[""""]];
-}

Removed: llvm/trunk/lib/Target/R600/R600Intrinsics.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600Intrinsics.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600Intrinsics.td (original)
+++ llvm/trunk/lib/Target/R600/R600Intrinsics.td (removed)
@@ -1,75 +0,0 @@
-//===-- R600Intrinsics.td - R600 Instrinsic defs -------*- tablegen -*-----===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// R600 Intrinsic Definitions
-//
-//===----------------------------------------------------------------------===//
-
-let TargetPrefix = "R600", isTarget = 1 in {
-  class TextureIntrinsicFloatInput :
-    Intrinsic<[llvm_v4f32_ty], [
-      llvm_v4f32_ty, // Coord
-      llvm_i32_ty, // offset_x
-      llvm_i32_ty, // offset_y,
-      llvm_i32_ty, // offset_z,
-      llvm_i32_ty, // resource_id
-      llvm_i32_ty, // samplerid
-      llvm_i32_ty, // coord_type_x
-      llvm_i32_ty, // coord_type_y
-      llvm_i32_ty, // coord_type_z
-      llvm_i32_ty // coord_type_w
-    ], [IntrNoMem]>;
-  class TextureIntrinsicInt32Input :
-    Intrinsic<[llvm_v4i32_ty], [
-      llvm_v4i32_ty, // Coord
-      llvm_i32_ty, // offset_x
-      llvm_i32_ty, // offset_y,
-      llvm_i32_ty, // offset_z,
-      llvm_i32_ty, // resource_id
-      llvm_i32_ty, // samplerid
-      llvm_i32_ty, // coord_type_x
-      llvm_i32_ty, // coord_type_y
-      llvm_i32_ty, // coord_type_z
-      llvm_i32_ty // coord_type_w
-    ], [IntrNoMem]>;
-
-  def int_R600_load_input :
-    Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
-  def int_R600_interp_input :
-    Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_R600_interp_const :
-    Intrinsic<[llvm_v4f32_ty], [llvm_i32_ty], [IntrNoMem]>;
-def int_R600_interp_xy :
-    Intrinsic<[llvm_v2f32_ty], [llvm_i32_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-def int_R600_interp_zw :
-    Intrinsic<[llvm_v2f32_ty], [llvm_i32_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-  def int_R600_load_texbuf :
-    Intrinsic<[llvm_v4f32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_R600_tex : TextureIntrinsicFloatInput;
-  def int_R600_texc : TextureIntrinsicFloatInput;
-  def int_R600_txl : TextureIntrinsicFloatInput;
-  def int_R600_txlc : TextureIntrinsicFloatInput;
-  def int_R600_txb : TextureIntrinsicFloatInput;
-  def int_R600_txbc : TextureIntrinsicFloatInput;
-  def int_R600_txf : TextureIntrinsicInt32Input;
-  def int_R600_ldptr : TextureIntrinsicInt32Input;
-  def int_R600_txq : TextureIntrinsicInt32Input;
-  def int_R600_ddx : TextureIntrinsicFloatInput;
-  def int_R600_ddy : TextureIntrinsicFloatInput;
-  def int_R600_store_swizzle :
-    Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], []>;
-  def int_R600_store_stream_output :
-    Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
-  def int_R600_store_pixel_depth :
-      Intrinsic<[], [llvm_float_ty], []>;
-  def int_R600_store_pixel_stencil :
-      Intrinsic<[], [llvm_float_ty], []>;
-  def int_R600_store_dummy :
-      Intrinsic<[], [llvm_i32_ty], []>;
-}

Removed: llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.cpp (original)
+++ llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.cpp (removed)
@@ -1,20 +0,0 @@
-//===-- R600MachineFunctionInfo.cpp - R600 Machine Function Info-*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//===----------------------------------------------------------------------===//
-
-#include "R600MachineFunctionInfo.h"
-
-using namespace llvm;
-
-
-// Pin the vtable to this file.
-void R600MachineFunctionInfo::anchor() {}
-
-R600MachineFunctionInfo::R600MachineFunctionInfo(const MachineFunction &MF)
-  : AMDGPUMachineFunction(MF) { }

Removed: llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.h (original)
+++ llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.h (removed)
@@ -1,34 +0,0 @@
-//===-- R600MachineFunctionInfo.h - R600 Machine Function Info ----*- C++ -*-=//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_R600MACHINEFUNCTIONINFO_H
-#define LLVM_LIB_TARGET_R600_R600MACHINEFUNCTIONINFO_H
-
-#include "AMDGPUMachineFunction.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include <vector>
-
-namespace llvm {
-
-class R600MachineFunctionInfo : public AMDGPUMachineFunction {
-  void anchor() override;
-public:
-  R600MachineFunctionInfo(const MachineFunction &MF);
-  SmallVector<unsigned, 4> LiveOuts;
-  std::vector<unsigned> IndirectRegs;
-  unsigned StackSize;
-};
-
-} // End llvm namespace
-
-#endif

Removed: llvm/trunk/lib/Target/R600/R600MachineScheduler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600MachineScheduler.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600MachineScheduler.cpp (original)
+++ llvm/trunk/lib/Target/R600/R600MachineScheduler.cpp (removed)
@@ -1,469 +0,0 @@
-//===-- R600MachineScheduler.cpp - R600 Scheduler Interface -*- C++ -*-----===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief R600 Machine Scheduler interface
-//
-//===----------------------------------------------------------------------===//
-
-#include "R600MachineScheduler.h"
-#include "AMDGPUSubtarget.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Pass.h"
-#include "llvm/IR/LegacyPassManager.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "misched"
-
-void R600SchedStrategy::initialize(ScheduleDAGMI *dag) {
-  assert(dag->hasVRegLiveness() && "R600SchedStrategy needs vreg liveness");
-  DAG = static_cast<ScheduleDAGMILive*>(dag);
-  const AMDGPUSubtarget &ST = DAG->MF.getSubtarget<AMDGPUSubtarget>();
-  TII = static_cast<const R600InstrInfo*>(DAG->TII);
-  TRI = static_cast<const R600RegisterInfo*>(DAG->TRI);
-  VLIW5 = !ST.hasCaymanISA();
-  MRI = &DAG->MRI;
-  CurInstKind = IDOther;
-  CurEmitted = 0;
-  OccupedSlotsMask = 31;
-  InstKindLimit[IDAlu] = TII->getMaxAlusPerClause();
-  InstKindLimit[IDOther] = 32;
-  InstKindLimit[IDFetch] = ST.getTexVTXClauseSize();
-  AluInstCount = 0;
-  FetchInstCount = 0;
-}
-
-void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc,
-                                  std::vector<SUnit *> &QDst)
-{
-  QDst.insert(QDst.end(), QSrc.begin(), QSrc.end());
-  QSrc.clear();
-}
-
-static
-unsigned getWFCountLimitedByGPR(unsigned GPRCount) {
-  assert (GPRCount && "GPRCount cannot be 0");
-  return 248 / GPRCount;
-}
-
-SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
-  SUnit *SU = nullptr;
-  NextInstKind = IDOther;
-
-  IsTopNode = false;
-
-  // check if we might want to switch current clause type
-  bool AllowSwitchToAlu = (CurEmitted >= InstKindLimit[CurInstKind]) ||
-      (Available[CurInstKind].empty());
-  bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) &&
-      (!Available[IDFetch].empty() || !Available[IDOther].empty());
-
-  if (CurInstKind == IDAlu && !Available[IDFetch].empty()) {
-    // We use the heuristic provided by AMD Accelerated Parallel Processing
-    // OpenCL Programming Guide :
-    // The approx. number of WF that allows TEX inst to hide ALU inst is :
-    // 500 (cycles for TEX) / (AluFetchRatio * 8 (cycles for ALU))
-    float ALUFetchRationEstimate =
-        (AluInstCount + AvailablesAluCount() + Pending[IDAlu].size()) /
-        (FetchInstCount + Available[IDFetch].size());
-    if (ALUFetchRationEstimate == 0) {
-      AllowSwitchFromAlu = true;
-    } else {
-      unsigned NeededWF = 62.5f / ALUFetchRationEstimate;
-      DEBUG( dbgs() << NeededWF << " approx. Wavefronts Required\n" );
-      // We assume the local GPR requirements to be "dominated" by the requirement
-      // of the TEX clause (which consumes 128 bits regs) ; ALU inst before and
-      // after TEX are indeed likely to consume or generate values from/for the
-      // TEX clause.
-      // Available[IDFetch].size() * 2 : GPRs required in the Fetch clause
-      // We assume that fetch instructions are either TnXYZW = TEX TnXYZW (need
-      // one GPR) or TmXYZW = TnXYZW (need 2 GPR).
-      // (TODO : use RegisterPressure)
-      // If we are going too use too many GPR, we flush Fetch instruction to lower
-      // register pressure on 128 bits regs.
-      unsigned NearRegisterRequirement = 2 * Available[IDFetch].size();
-      if (NeededWF > getWFCountLimitedByGPR(NearRegisterRequirement))
-        AllowSwitchFromAlu = true;
-    }
-  }
-
-  if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
-      (!AllowSwitchFromAlu && CurInstKind == IDAlu))) {
-    // try to pick ALU
-    SU = pickAlu();
-    if (!SU && !PhysicalRegCopy.empty()) {
-      SU = PhysicalRegCopy.front();
-      PhysicalRegCopy.erase(PhysicalRegCopy.begin());
-    }
-    if (SU) {
-      if (CurEmitted >= InstKindLimit[IDAlu])
-        CurEmitted = 0;
-      NextInstKind = IDAlu;
-    }
-  }
-
-  if (!SU) {
-    // try to pick FETCH
-    SU = pickOther(IDFetch);
-    if (SU)
-      NextInstKind = IDFetch;
-  }
-
-  // try to pick other
-  if (!SU) {
-    SU = pickOther(IDOther);
-    if (SU)
-      NextInstKind = IDOther;
-  }
-
-  DEBUG(
-      if (SU) {
-        dbgs() << " ** Pick node **\n";
-        SU->dump(DAG);
-      } else {
-        dbgs() << "NO NODE \n";
-        for (unsigned i = 0; i < DAG->SUnits.size(); i++) {
-          const SUnit &S = DAG->SUnits[i];
-          if (!S.isScheduled)
-            S.dump(DAG);
-        }
-      }
-  );
-
-  return SU;
-}
-
-void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
-  if (NextInstKind != CurInstKind) {
-    DEBUG(dbgs() << "Instruction Type Switch\n");
-    if (NextInstKind != IDAlu)
-      OccupedSlotsMask |= 31;
-    CurEmitted = 0;
-    CurInstKind = NextInstKind;
-  }
-
-  if (CurInstKind == IDAlu) {
-    AluInstCount ++;
-    switch (getAluKind(SU)) {
-    case AluT_XYZW:
-      CurEmitted += 4;
-      break;
-    case AluDiscarded:
-      break;
-    default: {
-      ++CurEmitted;
-      for (MachineInstr::mop_iterator It = SU->getInstr()->operands_begin(),
-          E = SU->getInstr()->operands_end(); It != E; ++It) {
-        MachineOperand &MO = *It;
-        if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X)
-          ++CurEmitted;
-      }
-    }
-    }
-  } else {
-    ++CurEmitted;
-  }
-
-
-  DEBUG(dbgs() << CurEmitted << " Instructions Emitted in this clause\n");
-
-  if (CurInstKind != IDFetch) {
-    MoveUnits(Pending[IDFetch], Available[IDFetch]);
-  } else
-    FetchInstCount++;
-}
-
-static bool
-isPhysicalRegCopy(MachineInstr *MI) {
-  if (MI->getOpcode() != AMDGPU::COPY)
-    return false;
-
-  return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg());
-}
-
-void R600SchedStrategy::releaseTopNode(SUnit *SU) {
-  DEBUG(dbgs() << "Top Releasing ";SU->dump(DAG););
-}
-
-void R600SchedStrategy::releaseBottomNode(SUnit *SU) {
-  DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG););
-  if (isPhysicalRegCopy(SU->getInstr())) {
-    PhysicalRegCopy.push_back(SU);
-    return;
-  }
-
-  int IK = getInstKind(SU);
-
-  // There is no export clause, we can schedule one as soon as its ready
-  if (IK == IDOther)
-    Available[IDOther].push_back(SU);
-  else
-    Pending[IK].push_back(SU);
-
-}
-
-bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
-                                          const TargetRegisterClass *RC) const {
-  if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
-    return RC->contains(Reg);
-  } else {
-    return MRI->getRegClass(Reg) == RC;
-  }
-}
-
-R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
-  MachineInstr *MI = SU->getInstr();
-
-  if (TII->isTransOnly(MI))
-    return AluTrans;
-
-    switch (MI->getOpcode()) {
-    case AMDGPU::PRED_X:
-      return AluPredX;
-    case AMDGPU::INTERP_PAIR_XY:
-    case AMDGPU::INTERP_PAIR_ZW:
-    case AMDGPU::INTERP_VEC_LOAD:
-    case AMDGPU::DOT_4:
-      return AluT_XYZW;
-    case AMDGPU::COPY:
-      if (MI->getOperand(1).isUndef()) {
-        // MI will become a KILL, don't considers it in scheduling
-        return AluDiscarded;
-      }
-    default:
-      break;
-    }
-
-    // Does the instruction take a whole IG ?
-    // XXX: Is it possible to add a helper function in R600InstrInfo that can
-    // be used here and in R600PacketizerList::isSoloInstruction() ?
-    if(TII->isVector(*MI) ||
-        TII->isCubeOp(MI->getOpcode()) ||
-        TII->isReductionOp(MI->getOpcode()) ||
-        MI->getOpcode() == AMDGPU::GROUP_BARRIER) {
-      return AluT_XYZW;
-    }
-
-    if (TII->isLDSInstr(MI->getOpcode())) {
-      return AluT_X;
-    }
-
-    // Is the result already assigned to a channel ?
-    unsigned DestSubReg = MI->getOperand(0).getSubReg();
-    switch (DestSubReg) {
-    case AMDGPU::sub0:
-      return AluT_X;
-    case AMDGPU::sub1:
-      return AluT_Y;
-    case AMDGPU::sub2:
-      return AluT_Z;
-    case AMDGPU::sub3:
-      return AluT_W;
-    default:
-      break;
-    }
-
-    // Is the result already member of a X/Y/Z/W class ?
-    unsigned DestReg = MI->getOperand(0).getReg();
-    if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_XRegClass) ||
-        regBelongsToClass(DestReg, &AMDGPU::R600_AddrRegClass))
-      return AluT_X;
-    if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_YRegClass))
-      return AluT_Y;
-    if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass))
-      return AluT_Z;
-    if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_WRegClass))
-      return AluT_W;
-    if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass))
-      return AluT_XYZW;
-
-    // LDS src registers cannot be used in the Trans slot.
-    if (TII->readsLDSSrcReg(MI))
-      return AluT_XYZW;
-
-    return AluAny;
-
-}
-
-int R600SchedStrategy::getInstKind(SUnit* SU) {
-  int Opcode = SU->getInstr()->getOpcode();
-
-  if (TII->usesTextureCache(Opcode) || TII->usesVertexCache(Opcode))
-    return IDFetch;
-
-  if (TII->isALUInstr(Opcode)) {
-    return IDAlu;
-  }
-
-  switch (Opcode) {
-  case AMDGPU::PRED_X:
-  case AMDGPU::COPY:
-  case AMDGPU::CONST_COPY:
-  case AMDGPU::INTERP_PAIR_XY:
-  case AMDGPU::INTERP_PAIR_ZW:
-  case AMDGPU::INTERP_VEC_LOAD:
-  case AMDGPU::DOT_4:
-    return IDAlu;
-  default:
-    return IDOther;
-  }
-}
-
-SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q, bool AnyALU) {
-  if (Q.empty())
-    return nullptr;
-  for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend();
-      It != E; ++It) {
-    SUnit *SU = *It;
-    InstructionsGroupCandidate.push_back(SU->getInstr());
-    if (TII->fitsConstReadLimitations(InstructionsGroupCandidate)
-        && (!AnyALU || !TII->isVectorOnly(SU->getInstr()))
-    ) {
-      InstructionsGroupCandidate.pop_back();
-      Q.erase((It + 1).base());
-      return SU;
-    } else {
-      InstructionsGroupCandidate.pop_back();
-    }
-  }
-  return nullptr;
-}
-
-void R600SchedStrategy::LoadAlu() {
-  std::vector<SUnit *> &QSrc = Pending[IDAlu];
-  for (unsigned i = 0, e = QSrc.size(); i < e; ++i) {
-    AluKind AK = getAluKind(QSrc[i]);
-    AvailableAlus[AK].push_back(QSrc[i]);
-  }
-  QSrc.clear();
-}
-
-void R600SchedStrategy::PrepareNextSlot() {
-  DEBUG(dbgs() << "New Slot\n");
-  assert (OccupedSlotsMask && "Slot wasn't filled");
-  OccupedSlotsMask = 0;
-//  if (HwGen == AMDGPUSubtarget::NORTHERN_ISLANDS)
-//    OccupedSlotsMask |= 16;
-  InstructionsGroupCandidate.clear();
-  LoadAlu();
-}
-
-void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
-  int DstIndex = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
-  if (DstIndex == -1) {
-    return;
-  }
-  unsigned DestReg = MI->getOperand(DstIndex).getReg();
-  // PressureRegister crashes if an operand is def and used in the same inst
-  // and we try to constraint its regclass
-  for (MachineInstr::mop_iterator It = MI->operands_begin(),
-      E = MI->operands_end(); It != E; ++It) {
-    MachineOperand &MO = *It;
-    if (MO.isReg() && !MO.isDef() &&
-        MO.getReg() == DestReg)
-      return;
-  }
-  // Constrains the regclass of DestReg to assign it to Slot
-  switch (Slot) {
-  case 0:
-    MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_XRegClass);
-    break;
-  case 1:
-    MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_YRegClass);
-    break;
-  case 2:
-    MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass);
-    break;
-  case 3:
-    MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_WRegClass);
-    break;
-  }
-}
-
-SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot, bool AnyAlu) {
-  static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W};
-  SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]], AnyAlu);
-  if (SlotedSU)
-    return SlotedSU;
-  SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny], AnyAlu);
-  if (UnslotedSU)
-    AssignSlot(UnslotedSU->getInstr(), Slot);
-  return UnslotedSU;
-}
-
-unsigned R600SchedStrategy::AvailablesAluCount() const {
-  return AvailableAlus[AluAny].size() + AvailableAlus[AluT_XYZW].size() +
-      AvailableAlus[AluT_X].size() + AvailableAlus[AluT_Y].size() +
-      AvailableAlus[AluT_Z].size() + AvailableAlus[AluT_W].size() +
-      AvailableAlus[AluTrans].size() + AvailableAlus[AluDiscarded].size() +
-      AvailableAlus[AluPredX].size();
-}
-
-SUnit* R600SchedStrategy::pickAlu() {
-  while (AvailablesAluCount() || !Pending[IDAlu].empty()) {
-    if (!OccupedSlotsMask) {
-      // Bottom up scheduling : predX must comes first
-      if (!AvailableAlus[AluPredX].empty()) {
-        OccupedSlotsMask |= 31;
-        return PopInst(AvailableAlus[AluPredX], false);
-      }
-      // Flush physical reg copies (RA will discard them)
-      if (!AvailableAlus[AluDiscarded].empty()) {
-        OccupedSlotsMask |= 31;
-        return PopInst(AvailableAlus[AluDiscarded], false);
-      }
-      // If there is a T_XYZW alu available, use it
-      if (!AvailableAlus[AluT_XYZW].empty()) {
-        OccupedSlotsMask |= 15;
-        return PopInst(AvailableAlus[AluT_XYZW], false);
-      }
-    }
-    bool TransSlotOccuped = OccupedSlotsMask & 16;
-    if (!TransSlotOccuped && VLIW5) {
-      if (!AvailableAlus[AluTrans].empty()) {
-        OccupedSlotsMask |= 16;
-        return PopInst(AvailableAlus[AluTrans], false);
-      }
-      SUnit *SU = AttemptFillSlot(3, true);
-      if (SU) {
-        OccupedSlotsMask |= 16;
-        return SU;
-      }
-    }
-    for (int Chan = 3; Chan > -1; --Chan) {
-      bool isOccupied = OccupedSlotsMask & (1 << Chan);
-      if (!isOccupied) {
-        SUnit *SU = AttemptFillSlot(Chan, false);
-        if (SU) {
-          OccupedSlotsMask |= (1 << Chan);
-          InstructionsGroupCandidate.push_back(SU->getInstr());
-          return SU;
-        }
-      }
-    }
-    PrepareNextSlot();
-  }
-  return nullptr;
-}
-
-SUnit* R600SchedStrategy::pickOther(int QID) {
-  SUnit *SU = nullptr;
-  std::vector<SUnit *> &AQ = Available[QID];
-
-  if (AQ.empty()) {
-    MoveUnits(Pending[QID], AQ);
-  }
-  if (!AQ.empty()) {
-    SU = AQ.back();
-    AQ.resize(AQ.size() - 1);
-  }
-  return SU;
-}

Removed: llvm/trunk/lib/Target/R600/R600MachineScheduler.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600MachineScheduler.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600MachineScheduler.h (original)
+++ llvm/trunk/lib/Target/R600/R600MachineScheduler.h (removed)
@@ -1,103 +0,0 @@
-//===-- R600MachineScheduler.h - R600 Scheduler Interface -*- C++ -*-------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief R600 Machine Scheduler interface
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_R600MACHINESCHEDULER_H
-#define LLVM_LIB_TARGET_R600_R600MACHINESCHEDULER_H
-
-#include "R600InstrInfo.h"
-#include "llvm/ADT/PriorityQueue.h"
-#include "llvm/CodeGen/MachineScheduler.h"
-#include "llvm/Support/Debug.h"
-
-using namespace llvm;
-
-namespace llvm {
-
-class R600SchedStrategy : public MachineSchedStrategy {
-
-  const ScheduleDAGMILive *DAG;
-  const R600InstrInfo *TII;
-  const R600RegisterInfo *TRI;
-  MachineRegisterInfo *MRI;
-
-  enum InstKind {
-    IDAlu,
-    IDFetch,
-    IDOther,
-    IDLast
-  };
-
-  enum AluKind {
-    AluAny,
-    AluT_X,
-    AluT_Y,
-    AluT_Z,
-    AluT_W,
-    AluT_XYZW,
-    AluPredX,
-    AluTrans,
-    AluDiscarded, // LLVM Instructions that are going to be eliminated
-    AluLast
-  };
-
-  std::vector<SUnit *> Available[IDLast], Pending[IDLast];
-  std::vector<SUnit *> AvailableAlus[AluLast];
-  std::vector<SUnit *> PhysicalRegCopy;
-
-  InstKind CurInstKind;
-  int CurEmitted;
-  InstKind NextInstKind;
-
-  unsigned AluInstCount;
-  unsigned FetchInstCount;
-
-  int InstKindLimit[IDLast];
-
-  int OccupedSlotsMask;
-
-public:
-  R600SchedStrategy() :
-    DAG(nullptr), TII(nullptr), TRI(nullptr), MRI(nullptr) {
-  }
-
-  virtual ~R600SchedStrategy() {}
-
-  void initialize(ScheduleDAGMI *dag) override;
-  SUnit *pickNode(bool &IsTopNode) override;
-  void schedNode(SUnit *SU, bool IsTopNode) override;
-  void releaseTopNode(SUnit *SU) override;
-  void releaseBottomNode(SUnit *SU) override;
-
-private:
-  std::vector<MachineInstr *> InstructionsGroupCandidate;
-  bool VLIW5;
-
-  int getInstKind(SUnit *SU);
-  bool regBelongsToClass(unsigned Reg, const TargetRegisterClass *RC) const;
-  AluKind getAluKind(SUnit *SU) const;
-  void LoadAlu();
-  unsigned AvailablesAluCount() const;
-  SUnit *AttemptFillSlot (unsigned Slot, bool AnyAlu);
-  void PrepareNextSlot();
-  SUnit *PopInst(std::vector<SUnit*> &Q, bool AnyALU);
-
-  void AssignSlot(MachineInstr *MI, unsigned Slot);
-  SUnit* pickAlu();
-  SUnit* pickOther(int QID);
-  void MoveUnits(std::vector<SUnit *> &QSrc, std::vector<SUnit *> &QDst);
-};
-
-} // namespace llvm
-
-#endif /* R600MACHINESCHEDULER_H_ */

Removed: llvm/trunk/lib/Target/R600/R600OptimizeVectorRegisters.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600OptimizeVectorRegisters.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600OptimizeVectorRegisters.cpp (original)
+++ llvm/trunk/lib/Target/R600/R600OptimizeVectorRegisters.cpp (removed)
@@ -1,382 +0,0 @@
-//===--------------------- R600MergeVectorRegisters.cpp -------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// This pass merges inputs of swizzeable instructions into vector sharing
-/// common data and/or have enough undef subreg using swizzle abilities.
-///
-/// For instance let's consider the following pseudo code :
-/// vreg5<def> = REG_SEQ vreg1, sub0, vreg2, sub1, vreg3, sub2, undef, sub3
-/// ...
-/// vreg7<def> = REG_SEQ vreg1, sub0, vreg3, sub1, undef, sub2, vreg4, sub3
-/// (swizzable Inst) vreg7, SwizzleMask : sub0, sub1, sub2, sub3
-///
-/// is turned into :
-/// vreg5<def> = REG_SEQ vreg1, sub0, vreg2, sub1, vreg3, sub2, undef, sub3
-/// ...
-/// vreg7<def> = INSERT_SUBREG vreg4, sub3
-/// (swizzable Inst) vreg7, SwizzleMask : sub0, sub2, sub1, sub3
-///
-/// This allow regalloc to reduce register pressure for vector registers and
-/// to reduce MOV count.
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "AMDGPUSubtarget.h"
-#include "R600InstrInfo.h"
-#include "llvm/CodeGen/DFAPacketizer.h"
-#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "vec-merger"
-
-namespace {
-
-static bool
-isImplicitlyDef(MachineRegisterInfo &MRI, unsigned Reg) {
-  for (MachineRegisterInfo::def_instr_iterator It = MRI.def_instr_begin(Reg),
-      E = MRI.def_instr_end(); It != E; ++It) {
-    return (*It).isImplicitDef();
-  }
-  if (MRI.isReserved(Reg)) {
-    return false;
-  }
-  llvm_unreachable("Reg without a def");
-  return false;
-}
-
-class RegSeqInfo {
-public:
-  MachineInstr *Instr;
-  DenseMap<unsigned, unsigned> RegToChan;
-  std::vector<unsigned> UndefReg;
-  RegSeqInfo(MachineRegisterInfo &MRI, MachineInstr *MI) : Instr(MI) {
-    assert(MI->getOpcode() == AMDGPU::REG_SEQUENCE);
-    for (unsigned i = 1, e = Instr->getNumOperands(); i < e; i+=2) {
-      MachineOperand &MO = Instr->getOperand(i);
-      unsigned Chan = Instr->getOperand(i + 1).getImm();
-      if (isImplicitlyDef(MRI, MO.getReg()))
-        UndefReg.push_back(Chan);
-      else
-        RegToChan[MO.getReg()] = Chan;
-    }
-  }
-  RegSeqInfo() {}
-
-  bool operator==(const RegSeqInfo &RSI) const {
-    return RSI.Instr == Instr;
-  }
-};
-
-class R600VectorRegMerger : public MachineFunctionPass {
-private:
-  MachineRegisterInfo *MRI;
-  const R600InstrInfo *TII;
-  bool canSwizzle(const MachineInstr &) const;
-  bool areAllUsesSwizzeable(unsigned Reg) const;
-  void SwizzleInput(MachineInstr &,
-      const std::vector<std::pair<unsigned, unsigned> > &) const;
-  bool tryMergeVector(const RegSeqInfo *, RegSeqInfo *,
-      std::vector<std::pair<unsigned, unsigned> > &Remap) const;
-  bool tryMergeUsingCommonSlot(RegSeqInfo &RSI, RegSeqInfo &CompatibleRSI,
-      std::vector<std::pair<unsigned, unsigned> > &RemapChan);
-  bool tryMergeUsingFreeSlot(RegSeqInfo &RSI, RegSeqInfo &CompatibleRSI,
-      std::vector<std::pair<unsigned, unsigned> > &RemapChan);
-  MachineInstr *RebuildVector(RegSeqInfo *MI,
-      const RegSeqInfo *BaseVec,
-      const std::vector<std::pair<unsigned, unsigned> > &RemapChan) const;
-  void RemoveMI(MachineInstr *);
-  void trackRSI(const RegSeqInfo &RSI);
-
-  typedef DenseMap<unsigned, std::vector<MachineInstr *> > InstructionSetMap;
-  DenseMap<MachineInstr *, RegSeqInfo> PreviousRegSeq;
-  InstructionSetMap PreviousRegSeqByReg;
-  InstructionSetMap PreviousRegSeqByUndefCount;
-public:
-  static char ID;
-  R600VectorRegMerger(TargetMachine &tm) : MachineFunctionPass(ID),
-  TII(nullptr) { }
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.setPreservesCFG();
-    AU.addRequired<MachineDominatorTree>();
-    AU.addPreserved<MachineDominatorTree>();
-    AU.addRequired<MachineLoopInfo>();
-    AU.addPreserved<MachineLoopInfo>();
-    MachineFunctionPass::getAnalysisUsage(AU);
-  }
-
-  const char *getPassName() const override {
-    return "R600 Vector Registers Merge Pass";
-  }
-
-  bool runOnMachineFunction(MachineFunction &Fn) override;
-};
-
-char R600VectorRegMerger::ID = 0;
-
-bool R600VectorRegMerger::canSwizzle(const MachineInstr &MI)
-    const {
-  if (TII->get(MI.getOpcode()).TSFlags & R600_InstFlag::TEX_INST)
-    return true;
-  switch (MI.getOpcode()) {
-  case AMDGPU::R600_ExportSwz:
-  case AMDGPU::EG_ExportSwz:
-    return true;
-  default:
-    return false;
-  }
-}
-
-bool R600VectorRegMerger::tryMergeVector(const RegSeqInfo *Untouched,
-    RegSeqInfo *ToMerge, std::vector< std::pair<unsigned, unsigned> > &Remap)
-    const {
-  unsigned CurrentUndexIdx = 0;
-  for (DenseMap<unsigned, unsigned>::iterator It = ToMerge->RegToChan.begin(),
-      E = ToMerge->RegToChan.end(); It != E; ++It) {
-    DenseMap<unsigned, unsigned>::const_iterator PosInUntouched =
-        Untouched->RegToChan.find((*It).first);
-    if (PosInUntouched != Untouched->RegToChan.end()) {
-      Remap.push_back(std::pair<unsigned, unsigned>
-          ((*It).second, (*PosInUntouched).second));
-      continue;
-    }
-    if (CurrentUndexIdx >= Untouched->UndefReg.size())
-      return false;
-    Remap.push_back(std::pair<unsigned, unsigned>
-        ((*It).second, Untouched->UndefReg[CurrentUndexIdx++]));
-  }
-
-  return true;
-}
-
-static
-unsigned getReassignedChan(
-    const std::vector<std::pair<unsigned, unsigned> > &RemapChan,
-    unsigned Chan) {
-  for (unsigned j = 0, je = RemapChan.size(); j < je; j++) {
-    if (RemapChan[j].first == Chan)
-      return RemapChan[j].second;
-  }
-  llvm_unreachable("Chan wasn't reassigned");
-}
-
-MachineInstr *R600VectorRegMerger::RebuildVector(
-    RegSeqInfo *RSI, const RegSeqInfo *BaseRSI,
-    const std::vector<std::pair<unsigned, unsigned> > &RemapChan) const {
-  unsigned Reg = RSI->Instr->getOperand(0).getReg();
-  MachineBasicBlock::iterator Pos = RSI->Instr;
-  MachineBasicBlock &MBB = *Pos->getParent();
-  DebugLoc DL = Pos->getDebugLoc();
-
-  unsigned SrcVec = BaseRSI->Instr->getOperand(0).getReg();
-  DenseMap<unsigned, unsigned> UpdatedRegToChan = BaseRSI->RegToChan;
-  std::vector<unsigned> UpdatedUndef = BaseRSI->UndefReg;
-  for (DenseMap<unsigned, unsigned>::iterator It = RSI->RegToChan.begin(),
-      E = RSI->RegToChan.end(); It != E; ++It) {
-    unsigned DstReg = MRI->createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
-    unsigned SubReg = (*It).first;
-    unsigned Swizzle = (*It).second;
-    unsigned Chan = getReassignedChan(RemapChan, Swizzle);
-
-    MachineInstr *Tmp = BuildMI(MBB, Pos, DL, TII->get(AMDGPU::INSERT_SUBREG),
-        DstReg)
-        .addReg(SrcVec)
-        .addReg(SubReg)
-        .addImm(Chan);
-    UpdatedRegToChan[SubReg] = Chan;
-    std::vector<unsigned>::iterator ChanPos =
-        std::find(UpdatedUndef.begin(), UpdatedUndef.end(), Chan);
-    if (ChanPos != UpdatedUndef.end())
-      UpdatedUndef.erase(ChanPos);
-    assert(std::find(UpdatedUndef.begin(), UpdatedUndef.end(), Chan) ==
-               UpdatedUndef.end() &&
-           "UpdatedUndef shouldn't contain Chan more than once!");
-    DEBUG(dbgs() << "    ->"; Tmp->dump(););
-    (void)Tmp;
-    SrcVec = DstReg;
-  }
-  Pos = BuildMI(MBB, Pos, DL, TII->get(AMDGPU::COPY), Reg)
-      .addReg(SrcVec);
-  DEBUG(dbgs() << "    ->"; Pos->dump(););
-
-  DEBUG(dbgs() << "  Updating Swizzle:\n");
-  for (MachineRegisterInfo::use_instr_iterator It = MRI->use_instr_begin(Reg),
-      E = MRI->use_instr_end(); It != E; ++It) {
-    DEBUG(dbgs() << "    ";(*It).dump(); dbgs() << "    ->");
-    SwizzleInput(*It, RemapChan);
-    DEBUG((*It).dump());
-  }
-  RSI->Instr->eraseFromParent();
-
-  // Update RSI
-  RSI->Instr = Pos;
-  RSI->RegToChan = UpdatedRegToChan;
-  RSI->UndefReg = UpdatedUndef;
-
-  return Pos;
-}
-
-void R600VectorRegMerger::RemoveMI(MachineInstr *MI) {
-  for (InstructionSetMap::iterator It = PreviousRegSeqByReg.begin(),
-      E = PreviousRegSeqByReg.end(); It != E; ++It) {
-    std::vector<MachineInstr *> &MIs = (*It).second;
-    MIs.erase(std::find(MIs.begin(), MIs.end(), MI), MIs.end());
-  }
-  for (InstructionSetMap::iterator It = PreviousRegSeqByUndefCount.begin(),
-      E = PreviousRegSeqByUndefCount.end(); It != E; ++It) {
-    std::vector<MachineInstr *> &MIs = (*It).second;
-    MIs.erase(std::find(MIs.begin(), MIs.end(), MI), MIs.end());
-  }
-}
-
-void R600VectorRegMerger::SwizzleInput(MachineInstr &MI,
-    const std::vector<std::pair<unsigned, unsigned> > &RemapChan) const {
-  unsigned Offset;
-  if (TII->get(MI.getOpcode()).TSFlags & R600_InstFlag::TEX_INST)
-    Offset = 2;
-  else
-    Offset = 3;
-  for (unsigned i = 0; i < 4; i++) {
-    unsigned Swizzle = MI.getOperand(i + Offset).getImm() + 1;
-    for (unsigned j = 0, e = RemapChan.size(); j < e; j++) {
-      if (RemapChan[j].first == Swizzle) {
-        MI.getOperand(i + Offset).setImm(RemapChan[j].second - 1);
-        break;
-      }
-    }
-  }
-}
-
-bool R600VectorRegMerger::areAllUsesSwizzeable(unsigned Reg) const {
-  for (MachineRegisterInfo::use_instr_iterator It = MRI->use_instr_begin(Reg),
-      E = MRI->use_instr_end(); It != E; ++It) {
-    if (!canSwizzle(*It))
-      return false;
-  }
-  return true;
-}
-
-bool R600VectorRegMerger::tryMergeUsingCommonSlot(RegSeqInfo &RSI,
-    RegSeqInfo &CompatibleRSI,
-    std::vector<std::pair<unsigned, unsigned> > &RemapChan) {
-  for (MachineInstr::mop_iterator MOp = RSI.Instr->operands_begin(),
-      MOE = RSI.Instr->operands_end(); MOp != MOE; ++MOp) {
-    if (!MOp->isReg())
-      continue;
-    if (PreviousRegSeqByReg[MOp->getReg()].empty())
-      continue;
-    for (MachineInstr *MI : PreviousRegSeqByReg[MOp->getReg()]) {
-      CompatibleRSI = PreviousRegSeq[MI];
-      if (RSI == CompatibleRSI)
-        continue;
-      if (tryMergeVector(&CompatibleRSI, &RSI, RemapChan))
-        return true;
-    }
-  }
-  return false;
-}
-
-bool R600VectorRegMerger::tryMergeUsingFreeSlot(RegSeqInfo &RSI,
-    RegSeqInfo &CompatibleRSI,
-    std::vector<std::pair<unsigned, unsigned> > &RemapChan) {
-  unsigned NeededUndefs = 4 - RSI.UndefReg.size();
-  if (PreviousRegSeqByUndefCount[NeededUndefs].empty())
-    return false;
-  std::vector<MachineInstr *> &MIs =
-      PreviousRegSeqByUndefCount[NeededUndefs];
-  CompatibleRSI = PreviousRegSeq[MIs.back()];
-  tryMergeVector(&CompatibleRSI, &RSI, RemapChan);
-  return true;
-}
-
-void R600VectorRegMerger::trackRSI(const RegSeqInfo &RSI) {
-  for (DenseMap<unsigned, unsigned>::const_iterator
-  It = RSI.RegToChan.begin(), E = RSI.RegToChan.end(); It != E; ++It) {
-    PreviousRegSeqByReg[(*It).first].push_back(RSI.Instr);
-  }
-  PreviousRegSeqByUndefCount[RSI.UndefReg.size()].push_back(RSI.Instr);
-  PreviousRegSeq[RSI.Instr] = RSI;
-}
-
-bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
-  TII = static_cast<const R600InstrInfo *>(Fn.getSubtarget().getInstrInfo());
-  MRI = &(Fn.getRegInfo());
-  for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
-       MBB != MBBe; ++MBB) {
-    MachineBasicBlock *MB = MBB;
-    PreviousRegSeq.clear();
-    PreviousRegSeqByReg.clear();
-    PreviousRegSeqByUndefCount.clear();
-
-    for (MachineBasicBlock::iterator MII = MB->begin(), MIIE = MB->end();
-         MII != MIIE; ++MII) {
-      MachineInstr *MI = MII;
-      if (MI->getOpcode() != AMDGPU::REG_SEQUENCE) {
-        if (TII->get(MI->getOpcode()).TSFlags & R600_InstFlag::TEX_INST) {
-          unsigned Reg = MI->getOperand(1).getReg();
-          for (MachineRegisterInfo::def_instr_iterator
-               It = MRI->def_instr_begin(Reg), E = MRI->def_instr_end();
-               It != E; ++It) {
-            RemoveMI(&(*It));
-          }
-        }
-        continue;
-      }
-
-
-      RegSeqInfo RSI(*MRI, MI);
-
-      // All uses of MI are swizzeable ?
-      unsigned Reg = MI->getOperand(0).getReg();
-      if (!areAllUsesSwizzeable(Reg))
-        continue;
-
-      DEBUG (dbgs() << "Trying to optimize ";
-          MI->dump();
-      );
-
-      RegSeqInfo CandidateRSI;
-      std::vector<std::pair<unsigned, unsigned> > RemapChan;
-      DEBUG(dbgs() << "Using common slots...\n";);
-      if (tryMergeUsingCommonSlot(RSI, CandidateRSI, RemapChan)) {
-        // Remove CandidateRSI mapping
-        RemoveMI(CandidateRSI.Instr);
-        MII = RebuildVector(&RSI, &CandidateRSI, RemapChan);
-        trackRSI(RSI);
-        continue;
-      }
-      DEBUG(dbgs() << "Using free slots...\n";);
-      RemapChan.clear();
-      if (tryMergeUsingFreeSlot(RSI, CandidateRSI, RemapChan)) {
-        RemoveMI(CandidateRSI.Instr);
-        MII = RebuildVector(&RSI, &CandidateRSI, RemapChan);
-        trackRSI(RSI);
-        continue;
-      }
-      //Failed to merge
-      trackRSI(RSI);
-    }
-  }
-  return false;
-}
-
-}
-
-llvm::FunctionPass *llvm::createR600VectorRegMerger(TargetMachine &tm) {
-  return new R600VectorRegMerger(tm);
-}

Removed: llvm/trunk/lib/Target/R600/R600Packetizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600Packetizer.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600Packetizer.cpp (original)
+++ llvm/trunk/lib/Target/R600/R600Packetizer.cpp (removed)
@@ -1,408 +0,0 @@
-//===----- R600Packetizer.cpp - VLIW packetizer ---------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// This pass implements instructions packetization for R600. It unsets isLast
-/// bit of instructions inside a bundle and substitutes src register with
-/// PreviousVector when applicable.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/Debug.h"
-#include "AMDGPU.h"
-#include "AMDGPUSubtarget.h"
-#include "R600InstrInfo.h"
-#include "llvm/CodeGen/DFAPacketizer.h"
-#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "packets"
-
-namespace {
-
-class R600Packetizer : public MachineFunctionPass {
-
-public:
-  static char ID;
-  R600Packetizer(const TargetMachine &TM) : MachineFunctionPass(ID) {}
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.setPreservesCFG();
-    AU.addRequired<MachineDominatorTree>();
-    AU.addPreserved<MachineDominatorTree>();
-    AU.addRequired<MachineLoopInfo>();
-    AU.addPreserved<MachineLoopInfo>();
-    MachineFunctionPass::getAnalysisUsage(AU);
-  }
-
-  const char *getPassName() const override {
-    return "R600 Packetizer";
-  }
-
-  bool runOnMachineFunction(MachineFunction &Fn) override;
-};
-char R600Packetizer::ID = 0;
-
-class R600PacketizerList : public VLIWPacketizerList {
-
-private:
-  const R600InstrInfo *TII;
-  const R600RegisterInfo &TRI;
-  bool VLIW5;
-  bool ConsideredInstUsesAlreadyWrittenVectorElement;
-
-  unsigned getSlot(const MachineInstr *MI) const {
-    return TRI.getHWRegChan(MI->getOperand(0).getReg());
-  }
-
-  /// \returns register to PV chan mapping for bundle/single instructions that
-  /// immediately precedes I.
-  DenseMap<unsigned, unsigned> getPreviousVector(MachineBasicBlock::iterator I)
-      const {
-    DenseMap<unsigned, unsigned> Result;
-    I--;
-    if (!TII->isALUInstr(I->getOpcode()) && !I->isBundle())
-      return Result;
-    MachineBasicBlock::instr_iterator BI = I.getInstrIterator();
-    if (I->isBundle())
-      BI++;
-    int LastDstChan = -1;
-    do {
-      bool isTrans = false;
-      int BISlot = getSlot(BI);
-      if (LastDstChan >= BISlot)
-        isTrans = true;
-      LastDstChan = BISlot;
-      if (TII->isPredicated(BI))
-        continue;
-      int OperandIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::write);
-      if (OperandIdx > -1 && BI->getOperand(OperandIdx).getImm() == 0)
-        continue;
-      int DstIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::dst);
-      if (DstIdx == -1) {
-        continue;
-      }
-      unsigned Dst = BI->getOperand(DstIdx).getReg();
-      if (isTrans || TII->isTransOnly(BI)) {
-        Result[Dst] = AMDGPU::PS;
-        continue;
-      }
-      if (BI->getOpcode() == AMDGPU::DOT4_r600 ||
-          BI->getOpcode() == AMDGPU::DOT4_eg) {
-        Result[Dst] = AMDGPU::PV_X;
-        continue;
-      }
-      if (Dst == AMDGPU::OQAP) {
-        continue;
-      }
-      unsigned PVReg = 0;
-      switch (TRI.getHWRegChan(Dst)) {
-      case 0:
-        PVReg = AMDGPU::PV_X;
-        break;
-      case 1:
-        PVReg = AMDGPU::PV_Y;
-        break;
-      case 2:
-        PVReg = AMDGPU::PV_Z;
-        break;
-      case 3:
-        PVReg = AMDGPU::PV_W;
-        break;
-      default:
-        llvm_unreachable("Invalid Chan");
-      }
-      Result[Dst] = PVReg;
-    } while ((++BI)->isBundledWithPred());
-    return Result;
-  }
-
-  void substitutePV(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PVs)
-      const {
-    unsigned Ops[] = {
-      AMDGPU::OpName::src0,
-      AMDGPU::OpName::src1,
-      AMDGPU::OpName::src2
-    };
-    for (unsigned i = 0; i < 3; i++) {
-      int OperandIdx = TII->getOperandIdx(MI->getOpcode(), Ops[i]);
-      if (OperandIdx < 0)
-        continue;
-      unsigned Src = MI->getOperand(OperandIdx).getReg();
-      const DenseMap<unsigned, unsigned>::const_iterator It = PVs.find(Src);
-      if (It != PVs.end())
-        MI->getOperand(OperandIdx).setReg(It->second);
-    }
-  }
-public:
-  // Ctor.
-  R600PacketizerList(MachineFunction &MF, MachineLoopInfo &MLI)
-      : VLIWPacketizerList(MF, MLI, true),
-        TII(static_cast<const R600InstrInfo *>(
-            MF.getSubtarget().getInstrInfo())),
-        TRI(TII->getRegisterInfo()) {
-    VLIW5 = !MF.getSubtarget<AMDGPUSubtarget>().hasCaymanISA();
-  }
-
-  // initPacketizerState - initialize some internal flags.
-  void initPacketizerState() override {
-    ConsideredInstUsesAlreadyWrittenVectorElement = false;
-  }
-
-  // ignorePseudoInstruction - Ignore bundling of pseudo instructions.
-  bool ignorePseudoInstruction(MachineInstr *MI,
-                               MachineBasicBlock *MBB) override {
-    return false;
-  }
-
-  // isSoloInstruction - return true if instruction MI can not be packetized
-  // with any other instruction, which means that MI itself is a packet.
-  bool isSoloInstruction(MachineInstr *MI) override {
-    if (TII->isVector(*MI))
-      return true;
-    if (!TII->isALUInstr(MI->getOpcode()))
-      return true;
-    if (MI->getOpcode() == AMDGPU::GROUP_BARRIER)
-      return true;
-    // XXX: This can be removed once the packetizer properly handles all the
-    // LDS instruction group restrictions.
-    if (TII->isLDSInstr(MI->getOpcode()))
-      return true;
-    return false;
-  }
-
-  // isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
-  // together.
-  bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) override {
-    MachineInstr *MII = SUI->getInstr(), *MIJ = SUJ->getInstr();
-    if (getSlot(MII) == getSlot(MIJ))
-      ConsideredInstUsesAlreadyWrittenVectorElement = true;
-    // Does MII and MIJ share the same pred_sel ?
-    int OpI = TII->getOperandIdx(MII->getOpcode(), AMDGPU::OpName::pred_sel),
-        OpJ = TII->getOperandIdx(MIJ->getOpcode(), AMDGPU::OpName::pred_sel);
-    unsigned PredI = (OpI > -1)?MII->getOperand(OpI).getReg():0,
-        PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg():0;
-    if (PredI != PredJ)
-      return false;
-    if (SUJ->isSucc(SUI)) {
-      for (unsigned i = 0, e = SUJ->Succs.size(); i < e; ++i) {
-        const SDep &Dep = SUJ->Succs[i];
-        if (Dep.getSUnit() != SUI)
-          continue;
-        if (Dep.getKind() == SDep::Anti)
-          continue;
-        if (Dep.getKind() == SDep::Output)
-          if (MII->getOperand(0).getReg() != MIJ->getOperand(0).getReg())
-            continue;
-        return false;
-      }
-    }
-
-    bool ARDef = TII->definesAddressRegister(MII) ||
-                 TII->definesAddressRegister(MIJ);
-    bool ARUse = TII->usesAddressRegister(MII) ||
-                 TII->usesAddressRegister(MIJ);
-    if (ARDef && ARUse)
-      return false;
-
-    return true;
-  }
-
-  // isLegalToPruneDependencies - Is it legal to prune dependece between SUI
-  // and SUJ.
-  bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) override {
-    return false;
-  }
-
-  void setIsLastBit(MachineInstr *MI, unsigned Bit) const {
-    unsigned LastOp = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::last);
-    MI->getOperand(LastOp).setImm(Bit);
-  }
-
-  bool isBundlableWithCurrentPMI(MachineInstr *MI,
-                                 const DenseMap<unsigned, unsigned> &PV,
-                                 std::vector<R600InstrInfo::BankSwizzle> &BS,
-                                 bool &isTransSlot) {
-    isTransSlot = TII->isTransOnly(MI);
-    assert (!isTransSlot || VLIW5);
-
-    // Is the dst reg sequence legal ?
-    if (!isTransSlot && !CurrentPacketMIs.empty()) {
-      if (getSlot(MI) <= getSlot(CurrentPacketMIs.back())) {
-        if (ConsideredInstUsesAlreadyWrittenVectorElement  &&
-            !TII->isVectorOnly(MI) && VLIW5) {
-          isTransSlot = true;
-          DEBUG(dbgs() << "Considering as Trans Inst :"; MI->dump(););
-        }
-        else
-          return false;
-      }
-    }
-
-    // Are the Constants limitations met ?
-    CurrentPacketMIs.push_back(MI);
-    if (!TII->fitsConstReadLimitations(CurrentPacketMIs)) {
-      DEBUG(
-        dbgs() << "Couldn't pack :\n";
-        MI->dump();
-        dbgs() << "with the following packets :\n";
-        for (unsigned i = 0, e = CurrentPacketMIs.size() - 1; i < e; i++) {
-          CurrentPacketMIs[i]->dump();
-          dbgs() << "\n";
-        }
-        dbgs() << "because of Consts read limitations\n";
-      );
-      CurrentPacketMIs.pop_back();
-      return false;
-    }
-
-    // Is there a BankSwizzle set that meet Read Port limitations ?
-    if (!TII->fitsReadPortLimitations(CurrentPacketMIs,
-            PV, BS, isTransSlot)) {
-      DEBUG(
-        dbgs() << "Couldn't pack :\n";
-        MI->dump();
-        dbgs() << "with the following packets :\n";
-        for (unsigned i = 0, e = CurrentPacketMIs.size() - 1; i < e; i++) {
-          CurrentPacketMIs[i]->dump();
-          dbgs() << "\n";
-        }
-        dbgs() << "because of Read port limitations\n";
-      );
-      CurrentPacketMIs.pop_back();
-      return false;
-    }
-
-    // We cannot read LDS source registrs from the Trans slot.
-    if (isTransSlot && TII->readsLDSSrcReg(MI))
-      return false;
-
-    CurrentPacketMIs.pop_back();
-    return true;
-  }
-
-  MachineBasicBlock::iterator addToPacket(MachineInstr *MI) override {
-    MachineBasicBlock::iterator FirstInBundle =
-        CurrentPacketMIs.empty() ? MI : CurrentPacketMIs.front();
-    const DenseMap<unsigned, unsigned> &PV =
-        getPreviousVector(FirstInBundle);
-    std::vector<R600InstrInfo::BankSwizzle> BS;
-    bool isTransSlot;
-
-    if (isBundlableWithCurrentPMI(MI, PV, BS, isTransSlot)) {
-      for (unsigned i = 0, e = CurrentPacketMIs.size(); i < e; i++) {
-        MachineInstr *MI = CurrentPacketMIs[i];
-        unsigned Op = TII->getOperandIdx(MI->getOpcode(),
-            AMDGPU::OpName::bank_swizzle);
-        MI->getOperand(Op).setImm(BS[i]);
-      }
-      unsigned Op = TII->getOperandIdx(MI->getOpcode(),
-          AMDGPU::OpName::bank_swizzle);
-      MI->getOperand(Op).setImm(BS.back());
-      if (!CurrentPacketMIs.empty())
-        setIsLastBit(CurrentPacketMIs.back(), 0);
-      substitutePV(MI, PV);
-      MachineBasicBlock::iterator It = VLIWPacketizerList::addToPacket(MI);
-      if (isTransSlot) {
-        endPacket(std::next(It)->getParent(), std::next(It));
-      }
-      return It;
-    }
-    endPacket(MI->getParent(), MI);
-    if (TII->isTransOnly(MI))
-      return MI;
-    return VLIWPacketizerList::addToPacket(MI);
-  }
-};
-
-bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) {
-  const TargetInstrInfo *TII = Fn.getSubtarget().getInstrInfo();
-  MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
-
-  // Instantiate the packetizer.
-  R600PacketizerList Packetizer(Fn, MLI);
-
-  // DFA state table should not be empty.
-  assert(Packetizer.getResourceTracker() && "Empty DFA table!");
-
-  //
-  // Loop over all basic blocks and remove KILL pseudo-instructions
-  // These instructions confuse the dependence analysis. Consider:
-  // D0 = ...   (Insn 0)
-  // R0 = KILL R0, D0 (Insn 1)
-  // R0 = ... (Insn 2)
-  // Here, Insn 1 will result in the dependence graph not emitting an output
-  // dependence between Insn 0 and Insn 2. This can lead to incorrect
-  // packetization
-  //
-  for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
-       MBB != MBBe; ++MBB) {
-    MachineBasicBlock::iterator End = MBB->end();
-    MachineBasicBlock::iterator MI = MBB->begin();
-    while (MI != End) {
-      if (MI->isKill() || MI->getOpcode() == AMDGPU::IMPLICIT_DEF ||
-          (MI->getOpcode() == AMDGPU::CF_ALU && !MI->getOperand(8).getImm())) {
-        MachineBasicBlock::iterator DeleteMI = MI;
-        ++MI;
-        MBB->erase(DeleteMI);
-        End = MBB->end();
-        continue;
-      }
-      ++MI;
-    }
-  }
-
-  // Loop over all of the basic blocks.
-  for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
-       MBB != MBBe; ++MBB) {
-    // Find scheduling regions and schedule / packetize each region.
-    unsigned RemainingCount = MBB->size();
-    for(MachineBasicBlock::iterator RegionEnd = MBB->end();
-        RegionEnd != MBB->begin();) {
-      // The next region starts above the previous region. Look backward in the
-      // instruction stream until we find the nearest boundary.
-      MachineBasicBlock::iterator I = RegionEnd;
-      for(;I != MBB->begin(); --I, --RemainingCount) {
-        if (TII->isSchedulingBoundary(std::prev(I), MBB, Fn))
-          break;
-      }
-      I = MBB->begin();
-
-      // Skip empty scheduling regions.
-      if (I == RegionEnd) {
-        RegionEnd = std::prev(RegionEnd);
-        --RemainingCount;
-        continue;
-      }
-      // Skip regions with one instruction.
-      if (I == std::prev(RegionEnd)) {
-        RegionEnd = std::prev(RegionEnd);
-        continue;
-      }
-
-      Packetizer.PacketizeMIs(MBB, I, RegionEnd);
-      RegionEnd = I;
-    }
-  }
-
-  return true;
-
-}
-
-} // end anonymous namespace
-
-llvm::FunctionPass *llvm::createR600Packetizer(TargetMachine &tm) {
-  return new R600Packetizer(tm);
-}

Removed: llvm/trunk/lib/Target/R600/R600RegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600RegisterInfo.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600RegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/R600/R600RegisterInfo.cpp (removed)
@@ -1,91 +0,0 @@
-//===-- R600RegisterInfo.cpp - R600 Register Information ------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief R600 implementation of the TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "R600RegisterInfo.h"
-#include "AMDGPUTargetMachine.h"
-#include "R600Defines.h"
-#include "R600InstrInfo.h"
-#include "R600MachineFunctionInfo.h"
-
-using namespace llvm;
-
-R600RegisterInfo::R600RegisterInfo() : AMDGPURegisterInfo() {
-  RCW.RegWeight = 0;
-  RCW.WeightLimit = 0;
-}
-
-BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
-  BitVector Reserved(getNumRegs());
-
-  const R600InstrInfo *TII =
-      static_cast<const R600InstrInfo *>(MF.getSubtarget().getInstrInfo());
-
-  Reserved.set(AMDGPU::ZERO);
-  Reserved.set(AMDGPU::HALF);
-  Reserved.set(AMDGPU::ONE);
-  Reserved.set(AMDGPU::ONE_INT);
-  Reserved.set(AMDGPU::NEG_HALF);
-  Reserved.set(AMDGPU::NEG_ONE);
-  Reserved.set(AMDGPU::PV_X);
-  Reserved.set(AMDGPU::ALU_LITERAL_X);
-  Reserved.set(AMDGPU::ALU_CONST);
-  Reserved.set(AMDGPU::PREDICATE_BIT);
-  Reserved.set(AMDGPU::PRED_SEL_OFF);
-  Reserved.set(AMDGPU::PRED_SEL_ZERO);
-  Reserved.set(AMDGPU::PRED_SEL_ONE);
-  Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
-
-  for (TargetRegisterClass::iterator I = AMDGPU::R600_AddrRegClass.begin(),
-                        E = AMDGPU::R600_AddrRegClass.end(); I != E; ++I) {
-    Reserved.set(*I);
-  }
-
-  TII->reserveIndirectRegisters(Reserved, MF);
-
-  return Reserved;
-}
-
-unsigned R600RegisterInfo::getHWRegChan(unsigned reg) const {
-  return this->getEncodingValue(reg) >> HW_CHAN_SHIFT;
-}
-
-unsigned R600RegisterInfo::getHWRegIndex(unsigned Reg) const {
-  return GET_REG_INDEX(getEncodingValue(Reg));
-}
-
-const TargetRegisterClass * R600RegisterInfo::getCFGStructurizerRegClass(
-                                                                   MVT VT) const {
-  switch(VT.SimpleTy) {
-  default:
-  case MVT::i32: return &AMDGPU::R600_TReg32RegClass;
-  }
-}
-
-const RegClassWeight &R600RegisterInfo::getRegClassWeight(
-  const TargetRegisterClass *RC) const {
-  return RCW;
-}
-
-bool R600RegisterInfo::isPhysRegLiveAcrossClauses(unsigned Reg) const {
-  assert(!TargetRegisterInfo::isVirtualRegister(Reg));
-
-  switch (Reg) {
-  case AMDGPU::OQAP:
-  case AMDGPU::OQBP:
-  case AMDGPU::AR_X:
-    return false;
-  default:
-    return true;
-  }
-}

Removed: llvm/trunk/lib/Target/R600/R600RegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600RegisterInfo.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600RegisterInfo.h (original)
+++ llvm/trunk/lib/Target/R600/R600RegisterInfo.h (removed)
@@ -1,49 +0,0 @@
-//===-- R600RegisterInfo.h - R600 Register Info Interface ------*- C++ -*--===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Interface definition for R600RegisterInfo
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_R600REGISTERINFO_H
-#define LLVM_LIB_TARGET_R600_R600REGISTERINFO_H
-
-#include "AMDGPURegisterInfo.h"
-
-namespace llvm {
-
-class AMDGPUSubtarget;
-
-struct R600RegisterInfo : public AMDGPURegisterInfo {
-  RegClassWeight RCW;
-
-  R600RegisterInfo();
-
-  BitVector getReservedRegs(const MachineFunction &MF) const override;
-
-  /// \brief get the HW encoding for a register's channel.
-  unsigned getHWRegChan(unsigned reg) const;
-
-  unsigned getHWRegIndex(unsigned Reg) const override;
-
-  /// \brief get the register class of the specified type to use in the
-  /// CFGStructurizer
-  const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT) const override;
-
-  const RegClassWeight &
-    getRegClassWeight(const TargetRegisterClass *RC) const override;
-
-  // \returns true if \p Reg can be defined in one ALU caluse and used in another.
-  bool isPhysRegLiveAcrossClauses(unsigned Reg) const;
-};
-
-} // End namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/R600/R600RegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600RegisterInfo.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600RegisterInfo.td (original)
+++ llvm/trunk/lib/Target/R600/R600RegisterInfo.td (removed)
@@ -1,252 +0,0 @@
-
-class R600Reg <string name, bits<16> encoding> : Register<name> {
-  let Namespace = "AMDGPU";
-  let HWEncoding = encoding;
-}
-
-class R600RegWithChan <string name, bits<9> sel, string chan> :
-    Register <name> {
-
-  field bits<2> chan_encoding = !if(!eq(chan, "X"), 0,
-                                !if(!eq(chan, "Y"), 1,
-                                !if(!eq(chan, "Z"), 2,
-                                !if(!eq(chan, "W"), 3, 0))));
-  let HWEncoding{8-0}  = sel;
-  let HWEncoding{10-9} = chan_encoding;
-  let Namespace = "AMDGPU";
-}
-
-class R600Reg_128<string n, list<Register> subregs, bits<16> encoding> :
-    RegisterWithSubRegs<n, subregs> {
-  field bits<2> chan_encoding = 0;
-  let Namespace = "AMDGPU";
-  let SubRegIndices = [sub0, sub1, sub2, sub3];
-  let HWEncoding{8-0} = encoding{8-0};
-  let HWEncoding{10-9} = chan_encoding;
-}
-
-class R600Reg_64<string n, list<Register> subregs, bits<16> encoding> :
-    RegisterWithSubRegs<n, subregs> {
-  field bits<2> chan_encoding = 0;
-  let Namespace = "AMDGPU";
-  let SubRegIndices = [sub0, sub1];
-  let HWEncoding = encoding;
-  let HWEncoding{8-0} = encoding{8-0};
-  let HWEncoding{10-9} = chan_encoding;
-}
-
-class R600Reg_64Vertical<int lo, int hi, string chan> : R600Reg_64 <
-  "V"#lo#hi#"_"#chan,
-  [!cast<Register>("T"#lo#"_"#chan), !cast<Register>("T"#hi#"_"#chan)],
-  lo
->;
-
-foreach Index = 0-127 in {
-  foreach Chan = [ "X", "Y", "Z", "W" ] in {
-    // 32-bit Temporary Registers
-    def T#Index#_#Chan : R600RegWithChan <"T"#Index#"."#Chan, Index, Chan>;
-
-    // Indirect addressing offset registers
-    def Addr#Index#_#Chan : R600RegWithChan <"T("#Index#" + AR.x)."#Chan,
-                                              Index, Chan>;
-  }
-  // 128-bit Temporary Registers
-  def T#Index#_XYZW : R600Reg_128 <"T"#Index#"",
-                                   [!cast<Register>("T"#Index#"_X"),
-                                    !cast<Register>("T"#Index#"_Y"),
-                                    !cast<Register>("T"#Index#"_Z"),
-                                    !cast<Register>("T"#Index#"_W")],
-                                   Index>;
-
-  def T#Index#_XY : R600Reg_64 <"T"#Index#"",
-                                   [!cast<Register>("T"#Index#"_X"),
-                                    !cast<Register>("T"#Index#"_Y")],
-                                   Index>;
-}
-
-foreach Chan = [ "X", "Y", "Z", "W"] in {
-
-  let chan_encoding = !if(!eq(Chan, "X"), 0,
-                      !if(!eq(Chan, "Y"), 1,
-                      !if(!eq(Chan, "Z"), 2,
-                      !if(!eq(Chan, "W"), 3, 0)))) in {
-    def V0123_#Chan : R600Reg_128 <"V0123_"#Chan,
-                                   [!cast<Register>("T0_"#Chan),
-                                    !cast<Register>("T1_"#Chan),
-                                    !cast<Register>("T2_"#Chan),
-                                    !cast<Register>("T3_"#Chan)],
-                                    0>;
-    def V01_#Chan : R600Reg_64Vertical<0, 1, Chan>;
-    def V23_#Chan : R600Reg_64Vertical<2, 3, Chan>;
-  }
-}
-
-
-// KCACHE_BANK0
-foreach Index = 159-128 in {
-  foreach Chan = [ "X", "Y", "Z", "W" ] in {
-    // 32-bit Temporary Registers
-    def KC0_#Index#_#Chan : R600RegWithChan <"KC0["#!add(Index,-128)#"]."#Chan, Index, Chan>;
-  }
-  // 128-bit Temporary Registers
-  def KC0_#Index#_XYZW : R600Reg_128 <"KC0["#!add(Index, -128)#"].XYZW",
-                                 [!cast<Register>("KC0_"#Index#"_X"),
-                                  !cast<Register>("KC0_"#Index#"_Y"),
-                                  !cast<Register>("KC0_"#Index#"_Z"),
-                                  !cast<Register>("KC0_"#Index#"_W")],
-                                 Index>;
-}
-
-// KCACHE_BANK1
-foreach Index = 191-160 in {
-  foreach Chan = [ "X", "Y", "Z", "W" ] in {
-    // 32-bit Temporary Registers
-    def KC1_#Index#_#Chan : R600RegWithChan <"KC1["#!add(Index,-160)#"]."#Chan, Index, Chan>;
-  }
-  // 128-bit Temporary Registers
-  def KC1_#Index#_XYZW : R600Reg_128 <"KC1["#!add(Index, -160)#"].XYZW",
-                                 [!cast<Register>("KC1_"#Index#"_X"),
-                                  !cast<Register>("KC1_"#Index#"_Y"),
-                                  !cast<Register>("KC1_"#Index#"_Z"),
-                                  !cast<Register>("KC1_"#Index#"_W")],
-                                 Index>;
-}
-
-
-// Array Base Register holding input in FS
-foreach Index = 448-480 in {
-  def ArrayBase#Index :  R600Reg<"ARRAY_BASE", Index>;
-}
-
-
-// Special Registers
-
-def OQA : R600Reg<"OQA", 219>;
-def OQB : R600Reg<"OQB", 220>;
-def OQAP : R600Reg<"OQAP", 221>;
-def OQBP : R600Reg<"OQAP", 222>;
-def LDS_DIRECT_A : R600Reg<"LDS_DIRECT_A", 223>;
-def LDS_DIRECT_B : R600Reg<"LDS_DIRECT_B", 224>;
-def ZERO : R600Reg<"0.0", 248>;
-def ONE : R600Reg<"1.0", 249>;
-def NEG_ONE : R600Reg<"-1.0", 249>;
-def ONE_INT : R600Reg<"1", 250>;
-def HALF : R600Reg<"0.5", 252>;
-def NEG_HALF : R600Reg<"-0.5", 252>;
-def ALU_LITERAL_X : R600RegWithChan<"literal.x", 253, "X">;
-def ALU_LITERAL_Y : R600RegWithChan<"literal.y", 253, "Y">;
-def ALU_LITERAL_Z : R600RegWithChan<"literal.z", 253, "Z">;
-def ALU_LITERAL_W : R600RegWithChan<"literal.w", 253, "W">;
-def PV_X : R600RegWithChan<"PV.X", 254, "X">;
-def PV_Y : R600RegWithChan<"PV.Y", 254, "Y">;
-def PV_Z : R600RegWithChan<"PV.Z", 254, "Z">;
-def PV_W : R600RegWithChan<"PV.W", 254, "W">;
-def PS: R600Reg<"PS", 255>;
-def PREDICATE_BIT : R600Reg<"PredicateBit", 0>;
-def PRED_SEL_OFF: R600Reg<"Pred_sel_off", 0>;
-def PRED_SEL_ZERO : R600Reg<"Pred_sel_zero", 2>;
-def PRED_SEL_ONE : R600Reg<"Pred_sel_one", 3>;
-def AR_X : R600Reg<"AR.x", 0>;
-
-def R600_ArrayBase : RegisterClass <"AMDGPU", [f32, i32], 32,
-                          (add (sequence "ArrayBase%u", 448, 480))>;
-// special registers for ALU src operands
-// const buffer reference, SRCx_SEL contains index
-def ALU_CONST : R600Reg<"CBuf", 0>;
-// interpolation param reference, SRCx_SEL contains index
-def ALU_PARAM : R600Reg<"Param", 0>;
-
-let isAllocatable = 0 in {
-
-def R600_Addr : RegisterClass <"AMDGPU", [i32], 32, (add (sequence "Addr%u_X", 0, 127))>;
-
-// We only use Addr_[YZW] for vertical vectors.
-// FIXME if we add more vertical vector registers we will need to ad more
-// registers to these classes.
-def R600_Addr_Y : RegisterClass <"AMDGPU", [i32], 32, (add Addr0_Y)>;
-def R600_Addr_Z : RegisterClass <"AMDGPU", [i32], 32, (add Addr0_Z)>;
-def R600_Addr_W : RegisterClass <"AMDGPU", [i32], 32, (add Addr0_W)>;
-
-def R600_LDS_SRC_REG : RegisterClass<"AMDGPU", [i32], 32,
-  (add OQA, OQB, OQAP, OQBP, LDS_DIRECT_A, LDS_DIRECT_B)>;
-
-def R600_KC0_X : RegisterClass <"AMDGPU", [f32, i32], 32,
-                              (add (sequence "KC0_%u_X", 128, 159))>;
-
-def R600_KC0_Y : RegisterClass <"AMDGPU", [f32, i32], 32,
-                              (add (sequence "KC0_%u_Y", 128, 159))>;
-
-def R600_KC0_Z : RegisterClass <"AMDGPU", [f32, i32], 32,
-                              (add (sequence "KC0_%u_Z", 128, 159))>;
-
-def R600_KC0_W : RegisterClass <"AMDGPU", [f32, i32], 32,
-                              (add (sequence "KC0_%u_W", 128, 159))>;
-
-def R600_KC0 : RegisterClass <"AMDGPU", [f32, i32], 32,
-                                   (interleave R600_KC0_X, R600_KC0_Y,
-                                               R600_KC0_Z, R600_KC0_W)>;
-
-def R600_KC1_X : RegisterClass <"AMDGPU", [f32, i32], 32,
-                              (add (sequence "KC1_%u_X", 160, 191))>;
-
-def R600_KC1_Y : RegisterClass <"AMDGPU", [f32, i32], 32,
-                              (add (sequence "KC1_%u_Y", 160, 191))>;
-
-def R600_KC1_Z : RegisterClass <"AMDGPU", [f32, i32], 32,
-                              (add (sequence "KC1_%u_Z", 160, 191))>;
-
-def R600_KC1_W : RegisterClass <"AMDGPU", [f32, i32], 32,
-                              (add (sequence "KC1_%u_W", 160, 191))>;
-
-def R600_KC1 : RegisterClass <"AMDGPU", [f32, i32], 32,
-                                   (interleave R600_KC1_X, R600_KC1_Y,
-                                               R600_KC1_Z, R600_KC1_W)>;
-
-} // End isAllocatable = 0
-
-def R600_TReg32_X : RegisterClass <"AMDGPU", [f32, i32], 32,
-                                   (add (sequence "T%u_X", 0, 127), AR_X)>;
-
-def R600_TReg32_Y : RegisterClass <"AMDGPU", [f32, i32], 32,
-                                   (add (sequence "T%u_Y", 0, 127))>;
-
-def R600_TReg32_Z : RegisterClass <"AMDGPU", [f32, i32], 32,
-                                   (add (sequence "T%u_Z", 0, 127))>;
-
-def R600_TReg32_W : RegisterClass <"AMDGPU", [f32, i32], 32,
-                                   (add (sequence "T%u_W", 0, 127))>;
-
-def R600_TReg32 : RegisterClass <"AMDGPU", [f32, i32], 32,
-                                   (interleave R600_TReg32_X, R600_TReg32_Y,
-                                               R600_TReg32_Z, R600_TReg32_W)>;
-
-def R600_Reg32 : RegisterClass <"AMDGPU", [f32, i32], 32, (add
-    R600_TReg32,
-    R600_ArrayBase,
-    R600_Addr,
-    R600_KC0, R600_KC1,
-    ZERO, HALF, ONE, ONE_INT, PV_X, ALU_LITERAL_X, NEG_ONE, NEG_HALF,
-    ALU_CONST, ALU_PARAM, OQAP
-    )>;
-
-def R600_Predicate : RegisterClass <"AMDGPU", [i32], 32, (add
-    PRED_SEL_OFF, PRED_SEL_ZERO, PRED_SEL_ONE)>;
-
-def R600_Predicate_Bit: RegisterClass <"AMDGPU", [i32], 32, (add
-    PREDICATE_BIT)>;
-
-def R600_Reg128 : RegisterClass<"AMDGPU", [v4f32, v4i32], 128,
-                                (add (sequence "T%u_XYZW", 0, 127))> {
-  let CopyCost = -1;
-}
-
-def R600_Reg128Vertical : RegisterClass<"AMDGPU", [v4f32, v4i32], 128,
-  (add V0123_W, V0123_Z, V0123_Y, V0123_X)
->;
-
-def R600_Reg64 : RegisterClass<"AMDGPU", [v2f32, v2i32], 64,
-                                (add (sequence "T%u_XY", 0, 63))>;
-
-def R600_Reg64Vertical : RegisterClass<"AMDGPU", [v2f32, v2i32], 64,
-                                      (add V01_X, V01_Y, V01_Z, V01_W,
-                                           V23_X, V23_Y, V23_Z, V23_W)>;

Removed: llvm/trunk/lib/Target/R600/R600Schedule.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600Schedule.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600Schedule.td (original)
+++ llvm/trunk/lib/Target/R600/R600Schedule.td (removed)
@@ -1,49 +0,0 @@
-//===-- R600Schedule.td - R600 Scheduling definitions ------*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// R600 has a VLIW architecture.  On pre-cayman cards there are 5 instruction
-// slots ALU.X, ALU.Y, ALU.Z, ALU.W, and TRANS.  For cayman cards, the TRANS
-// slot has been removed. 
-//
-//===----------------------------------------------------------------------===//
-
-
-def ALU_X : FuncUnit;
-def ALU_Y : FuncUnit;
-def ALU_Z : FuncUnit;
-def ALU_W : FuncUnit;
-def TRANS : FuncUnit;
-
-def AnyALU : InstrItinClass;
-def VecALU : InstrItinClass;
-def TransALU : InstrItinClass;
-def XALU : InstrItinClass;
-
-def R600_VLIW5_Itin : ProcessorItineraries <
-  [ALU_X, ALU_Y, ALU_Z, ALU_W, TRANS, ALU_NULL],
-  [],
-  [
-    InstrItinData<AnyALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W, TRANS]>]>,
-    InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W]>]>,
-    InstrItinData<TransALU, [InstrStage<1, [TRANS]>]>,
-    InstrItinData<XALU, [InstrStage<1, [ALU_X]>]>,
-    InstrItinData<NullALU, [InstrStage<1, [ALU_NULL]>]>
-  ]
->;
-
-def R600_VLIW4_Itin : ProcessorItineraries <
-  [ALU_X, ALU_Y, ALU_Z, ALU_W, ALU_NULL],
-  [],
-  [
-    InstrItinData<AnyALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W]>]>,
-    InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W]>]>,
-    InstrItinData<TransALU, [InstrStage<1, [ALU_NULL]>]>,
-    InstrItinData<NullALU, [InstrStage<1, [ALU_NULL]>]>
-  ]
->;

Removed: llvm/trunk/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp (original)
+++ llvm/trunk/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp (removed)
@@ -1,303 +0,0 @@
-//===-- R600TextureIntrinsicsReplacer.cpp ---------------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// This pass translates tgsi-like texture intrinsics into R600 texture
-/// closer to hardware intrinsics.
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Passes.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/GlobalValue.h"
-#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/InstVisitor.h"
-
-using namespace llvm;
-
-namespace {
-class R600TextureIntrinsicsReplacer :
-    public FunctionPass, public InstVisitor<R600TextureIntrinsicsReplacer> {
-  static char ID;
-
-  Module *Mod;
-  Type *FloatType;
-  Type *Int32Type;
-  Type *V4f32Type;
-  Type *V4i32Type;
-  FunctionType *TexSign;
-  FunctionType *TexQSign;
-
-  void getAdjustmentFromTextureTarget(unsigned TextureType, bool hasLOD,
-                                      unsigned SrcSelect[4], unsigned CT[4],
-                                      bool &useShadowVariant) {
-    enum TextureTypes {
-      TEXTURE_1D = 1,
-      TEXTURE_2D,
-      TEXTURE_3D,
-      TEXTURE_CUBE,
-      TEXTURE_RECT,
-      TEXTURE_SHADOW1D,
-      TEXTURE_SHADOW2D,
-      TEXTURE_SHADOWRECT,
-      TEXTURE_1D_ARRAY,
-      TEXTURE_2D_ARRAY,
-      TEXTURE_SHADOW1D_ARRAY,
-      TEXTURE_SHADOW2D_ARRAY,
-      TEXTURE_SHADOWCUBE,
-      TEXTURE_2D_MSAA,
-      TEXTURE_2D_ARRAY_MSAA,
-      TEXTURE_CUBE_ARRAY,
-      TEXTURE_SHADOWCUBE_ARRAY
-    };
-
-    switch (TextureType) {
-    case 0:
-      useShadowVariant = false;
-      return;
-    case TEXTURE_RECT:
-    case TEXTURE_1D:
-    case TEXTURE_2D:
-    case TEXTURE_3D:
-    case TEXTURE_CUBE:
-    case TEXTURE_1D_ARRAY:
-    case TEXTURE_2D_ARRAY:
-    case TEXTURE_CUBE_ARRAY:
-    case TEXTURE_2D_MSAA:
-    case TEXTURE_2D_ARRAY_MSAA:
-      useShadowVariant = false;
-      break;
-    case TEXTURE_SHADOW1D:
-    case TEXTURE_SHADOW2D:
-    case TEXTURE_SHADOWRECT:
-    case TEXTURE_SHADOW1D_ARRAY:
-    case TEXTURE_SHADOW2D_ARRAY:
-    case TEXTURE_SHADOWCUBE:
-    case TEXTURE_SHADOWCUBE_ARRAY:
-      useShadowVariant = true;
-      break;
-    default:
-      llvm_unreachable("Unknow Texture Type");
-    }
-
-    if (TextureType == TEXTURE_RECT ||
-        TextureType == TEXTURE_SHADOWRECT) {
-      CT[0] = 0;
-      CT[1] = 0;
-    }
-
-    if (TextureType == TEXTURE_CUBE_ARRAY ||
-        TextureType == TEXTURE_SHADOWCUBE_ARRAY)
-      CT[2] = 0;
-
-    if (TextureType == TEXTURE_1D_ARRAY ||
-        TextureType == TEXTURE_SHADOW1D_ARRAY) {
-      if (hasLOD && useShadowVariant) {
-        CT[1] = 0;
-      } else {
-        CT[2] = 0;
-        SrcSelect[2] = 1;
-      }
-    } else if (TextureType == TEXTURE_2D_ARRAY ||
-        TextureType == TEXTURE_SHADOW2D_ARRAY) {
-      CT[2] = 0;
-    }
-
-    if ((TextureType == TEXTURE_SHADOW1D ||
-        TextureType == TEXTURE_SHADOW2D ||
-        TextureType == TEXTURE_SHADOWRECT ||
-        TextureType == TEXTURE_SHADOW1D_ARRAY) &&
-        !(hasLOD && useShadowVariant))
-      SrcSelect[3] = 2;
-  }
-
-  void ReplaceCallInst(CallInst &I, FunctionType *FT, const char *Name,
-                       unsigned SrcSelect[4], Value *Offset[3], Value *Resource,
-                       Value *Sampler, unsigned CT[4], Value *Coord) {
-    IRBuilder<> Builder(&I);
-    Constant *Mask[] = {
-      ConstantInt::get(Int32Type, SrcSelect[0]),
-      ConstantInt::get(Int32Type, SrcSelect[1]),
-      ConstantInt::get(Int32Type, SrcSelect[2]),
-      ConstantInt::get(Int32Type, SrcSelect[3])
-    };
-    Value *SwizzleMask = ConstantVector::get(Mask);
-    Value *SwizzledCoord =
-        Builder.CreateShuffleVector(Coord, Coord, SwizzleMask);
-
-    Value *Args[] = {
-      SwizzledCoord,
-      Offset[0],
-      Offset[1],
-      Offset[2],
-      Resource,
-      Sampler,
-      ConstantInt::get(Int32Type, CT[0]),
-      ConstantInt::get(Int32Type, CT[1]),
-      ConstantInt::get(Int32Type, CT[2]),
-      ConstantInt::get(Int32Type, CT[3])
-    };
-
-    Function *F = Mod->getFunction(Name);
-    if (!F) {
-      F = Function::Create(FT, GlobalValue::ExternalLinkage, Name, Mod);
-      F->addFnAttr(Attribute::ReadNone);
-    }
-    I.replaceAllUsesWith(Builder.CreateCall(F, Args));
-    I.eraseFromParent();
-  }
-
-  void ReplaceTexIntrinsic(CallInst &I, bool hasLOD, FunctionType *FT,
-                           const char *VanillaInt,
-                           const char *ShadowInt) {
-    Value *Coord = I.getArgOperand(0);
-    Value *ResourceId = I.getArgOperand(1);
-    Value *SamplerId = I.getArgOperand(2);
-
-    unsigned TextureType =
-        cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
-
-    unsigned SrcSelect[4] = { 0, 1, 2, 3 };
-    unsigned CT[4] = {1, 1, 1, 1};
-    Value *Offset[3] = {
-      ConstantInt::get(Int32Type, 0),
-      ConstantInt::get(Int32Type, 0),
-      ConstantInt::get(Int32Type, 0)
-    };
-    bool useShadowVariant;
-
-    getAdjustmentFromTextureTarget(TextureType, hasLOD, SrcSelect, CT,
-                                   useShadowVariant);
-
-    ReplaceCallInst(I, FT, useShadowVariant?ShadowInt:VanillaInt, SrcSelect,
-                    Offset, ResourceId, SamplerId, CT, Coord);
-  }
-
-  void ReplaceTXF(CallInst &I) {
-    Value *Coord = I.getArgOperand(0);
-    Value *ResourceId = I.getArgOperand(4);
-    Value *SamplerId = I.getArgOperand(5);
-
-    unsigned TextureType =
-        cast<ConstantInt>(I.getArgOperand(6))->getZExtValue();
-
-    unsigned SrcSelect[4] = { 0, 1, 2, 3 };
-    unsigned CT[4] = {1, 1, 1, 1};
-    Value *Offset[3] = {
-      I.getArgOperand(1),
-      I.getArgOperand(2),
-      I.getArgOperand(3),
-    };
-    bool useShadowVariant;
-
-    getAdjustmentFromTextureTarget(TextureType, false, SrcSelect, CT,
-                                   useShadowVariant);
-
-    ReplaceCallInst(I, TexQSign, "llvm.R600.txf", SrcSelect,
-                    Offset, ResourceId, SamplerId, CT, Coord);
-  }
-
-public:
-  R600TextureIntrinsicsReplacer():
-    FunctionPass(ID) {
-  }
-
-  bool doInitialization(Module &M) override {
-    LLVMContext &Ctx = M.getContext();
-    Mod = &M;
-    FloatType = Type::getFloatTy(Ctx);
-    Int32Type = Type::getInt32Ty(Ctx);
-    V4f32Type = VectorType::get(FloatType, 4);
-    V4i32Type = VectorType::get(Int32Type, 4);
-    Type *ArgsType[] = {
-      V4f32Type,
-      Int32Type,
-      Int32Type,
-      Int32Type,
-      Int32Type,
-      Int32Type,
-      Int32Type,
-      Int32Type,
-      Int32Type,
-      Int32Type,
-    };
-    TexSign = FunctionType::get(V4f32Type, ArgsType, /*isVarArg=*/false);
-    Type *ArgsQType[] = {
-      V4i32Type,
-      Int32Type,
-      Int32Type,
-      Int32Type,
-      Int32Type,
-      Int32Type,
-      Int32Type,
-      Int32Type,
-      Int32Type,
-      Int32Type,
-    };
-    TexQSign = FunctionType::get(V4f32Type, ArgsQType, /*isVarArg=*/false);
-    return false;
-  }
-
-  bool runOnFunction(Function &F) override {
-    visit(F);
-    return false;
-  }
-
-  const char *getPassName() const override {
-    return "R600 Texture Intrinsics Replacer";
-  }
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-  }
-
-  void visitCallInst(CallInst &I) {
-    if (!I.getCalledFunction())
-      return;
-
-    StringRef Name = I.getCalledFunction()->getName();
-    if (Name == "llvm.AMDGPU.tex") {
-      ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.tex", "llvm.R600.texc");
-      return;
-    }
-    if (Name == "llvm.AMDGPU.txl") {
-      ReplaceTexIntrinsic(I, true, TexSign, "llvm.R600.txl", "llvm.R600.txlc");
-      return;
-    }
-    if (Name == "llvm.AMDGPU.txb") {
-      ReplaceTexIntrinsic(I, true, TexSign, "llvm.R600.txb", "llvm.R600.txbc");
-      return;
-    }
-    if (Name == "llvm.AMDGPU.txf") {
-      ReplaceTXF(I);
-      return;
-    }
-    if (Name == "llvm.AMDGPU.txq") {
-      ReplaceTexIntrinsic(I, false, TexQSign, "llvm.R600.txq", "llvm.R600.txq");
-      return;
-    }
-    if (Name == "llvm.AMDGPU.ddx") {
-      ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.ddx", "llvm.R600.ddx");
-      return;
-    }
-    if (Name == "llvm.AMDGPU.ddy") {
-      ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.ddy", "llvm.R600.ddy");
-      return;
-    }
-  }
-
-};
-
-char R600TextureIntrinsicsReplacer::ID = 0;
-
-}
-
-FunctionPass *llvm::createR600TextureIntrinsicsReplacer() {
-  return new R600TextureIntrinsicsReplacer();
-}

Removed: llvm/trunk/lib/Target/R600/R700Instructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R700Instructions.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R700Instructions.td (original)
+++ llvm/trunk/lib/Target/R600/R700Instructions.td (removed)
@@ -1,21 +0,0 @@
-//===-- R700Instructions.td - R700 Instruction defs  -------*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// TableGen definitions for instructions which are:
-// - Available to R700 and newer VLIW4/VLIW5 GPUs
-// - Available only on R700 family GPUs.
-//
-//===----------------------------------------------------------------------===//
-
-def isR700 : Predicate<"Subtarget->getGeneration() == AMDGPUSubtarget::R700">;
-
-let Predicates = [isR700] in {
-  def SIN_r700 : SIN_Common<0x6E>;
-  def COS_r700 : COS_Common<0x6F>;
-}

Removed: llvm/trunk/lib/Target/R600/SIAnnotateControlFlow.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIAnnotateControlFlow.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIAnnotateControlFlow.cpp (original)
+++ llvm/trunk/lib/Target/R600/SIAnnotateControlFlow.cpp (removed)
@@ -1,365 +0,0 @@
-//===-- SIAnnotateControlFlow.cpp -  ------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// Annotates the control flow with hardware specific intrinsics.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Pass.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/SSAUpdater.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "si-annotate-control-flow"
-
-namespace {
-
-// Complex types used in this pass
-typedef std::pair<BasicBlock *, Value *> StackEntry;
-typedef SmallVector<StackEntry, 16> StackVector;
-
-// Intrinsic names the control flow is annotated with
-static const char *const IfIntrinsic = "llvm.SI.if";
-static const char *const ElseIntrinsic = "llvm.SI.else";
-static const char *const BreakIntrinsic = "llvm.SI.break";
-static const char *const IfBreakIntrinsic = "llvm.SI.if.break";
-static const char *const ElseBreakIntrinsic = "llvm.SI.else.break";
-static const char *const LoopIntrinsic = "llvm.SI.loop";
-static const char *const EndCfIntrinsic = "llvm.SI.end.cf";
-
-class SIAnnotateControlFlow : public FunctionPass {
-
-  static char ID;
-
-  Type *Boolean;
-  Type *Void;
-  Type *Int64;
-  Type *ReturnStruct;
-
-  ConstantInt *BoolTrue;
-  ConstantInt *BoolFalse;
-  UndefValue *BoolUndef;
-  Constant *Int64Zero;
-
-  Constant *If;
-  Constant *Else;
-  Constant *Break;
-  Constant *IfBreak;
-  Constant *ElseBreak;
-  Constant *Loop;
-  Constant *EndCf;
-
-  DominatorTree *DT;
-  StackVector Stack;
-
-  LoopInfo *LI;
-
-  bool isTopOfStack(BasicBlock *BB);
-
-  Value *popSaved();
-
-  void push(BasicBlock *BB, Value *Saved);
-
-  bool isElse(PHINode *Phi);
-
-  void eraseIfUnused(PHINode *Phi);
-
-  void openIf(BranchInst *Term);
-
-  void insertElse(BranchInst *Term);
-
-  Value *handleLoopCondition(Value *Cond, PHINode *Broken, llvm::Loop *L);
-
-  void handleLoop(BranchInst *Term);
-
-  void closeControlFlow(BasicBlock *BB);
-
-public:
-  SIAnnotateControlFlow():
-    FunctionPass(ID) { }
-
-  bool doInitialization(Module &M) override;
-
-  bool runOnFunction(Function &F) override;
-
-  const char *getPassName() const override {
-    return "SI annotate control flow";
-  }
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.addRequired<LoopInfoWrapperPass>();
-    AU.addRequired<DominatorTreeWrapperPass>();
-    AU.addPreserved<DominatorTreeWrapperPass>();
-    FunctionPass::getAnalysisUsage(AU);
-  }
-
-};
-
-} // end anonymous namespace
-
-char SIAnnotateControlFlow::ID = 0;
-
-/// \brief Initialize all the types and constants used in the pass
-bool SIAnnotateControlFlow::doInitialization(Module &M) {
-  LLVMContext &Context = M.getContext();
-
-  Void = Type::getVoidTy(Context);
-  Boolean = Type::getInt1Ty(Context);
-  Int64 = Type::getInt64Ty(Context);
-  ReturnStruct = StructType::get(Boolean, Int64, (Type *)nullptr);
-
-  BoolTrue = ConstantInt::getTrue(Context);
-  BoolFalse = ConstantInt::getFalse(Context);
-  BoolUndef = UndefValue::get(Boolean);
-  Int64Zero = ConstantInt::get(Int64, 0);
-
-  If = M.getOrInsertFunction(
-    IfIntrinsic, ReturnStruct, Boolean, (Type *)nullptr);
-
-  Else = M.getOrInsertFunction(
-    ElseIntrinsic, ReturnStruct, Int64, (Type *)nullptr);
-
-  Break = M.getOrInsertFunction(
-    BreakIntrinsic, Int64, Int64, (Type *)nullptr);
-
-  IfBreak = M.getOrInsertFunction(
-    IfBreakIntrinsic, Int64, Boolean, Int64, (Type *)nullptr);
-
-  ElseBreak = M.getOrInsertFunction(
-    ElseBreakIntrinsic, Int64, Int64, Int64, (Type *)nullptr);
-
-  Loop = M.getOrInsertFunction(
-    LoopIntrinsic, Boolean, Int64, (Type *)nullptr);
-
-  EndCf = M.getOrInsertFunction(
-    EndCfIntrinsic, Void, Int64, (Type *)nullptr);
-
-  return false;
-}
-
-/// \brief Is BB the last block saved on the stack ?
-bool SIAnnotateControlFlow::isTopOfStack(BasicBlock *BB) {
-  return !Stack.empty() && Stack.back().first == BB;
-}
-
-/// \brief Pop the last saved value from the control flow stack
-Value *SIAnnotateControlFlow::popSaved() {
-  return Stack.pop_back_val().second;
-}
-
-/// \brief Push a BB and saved value to the control flow stack
-void SIAnnotateControlFlow::push(BasicBlock *BB, Value *Saved) {
-  Stack.push_back(std::make_pair(BB, Saved));
-}
-
-/// \brief Can the condition represented by this PHI node treated like
-/// an "Else" block?
-bool SIAnnotateControlFlow::isElse(PHINode *Phi) {
-  BasicBlock *IDom = DT->getNode(Phi->getParent())->getIDom()->getBlock();
-  for (unsigned i = 0, e = Phi->getNumIncomingValues(); i != e; ++i) {
-    if (Phi->getIncomingBlock(i) == IDom) {
-
-      if (Phi->getIncomingValue(i) != BoolTrue)
-        return false;
-
-    } else {
-      if (Phi->getIncomingValue(i) != BoolFalse)
-        return false;
-
-    }
-  }
-  return true;
-}
-
-// \brief Erase "Phi" if it is not used any more
-void SIAnnotateControlFlow::eraseIfUnused(PHINode *Phi) {
-  if (!Phi->hasNUsesOrMore(1))
-    Phi->eraseFromParent();
-}
-
-/// \brief Open a new "If" block
-void SIAnnotateControlFlow::openIf(BranchInst *Term) {
-  Value *Ret = CallInst::Create(If, Term->getCondition(), "", Term);
-  Term->setCondition(ExtractValueInst::Create(Ret, 0, "", Term));
-  push(Term->getSuccessor(1), ExtractValueInst::Create(Ret, 1, "", Term));
-}
-
-/// \brief Close the last "If" block and open a new "Else" block
-void SIAnnotateControlFlow::insertElse(BranchInst *Term) {
-  Value *Ret = CallInst::Create(Else, popSaved(), "", Term);
-  Term->setCondition(ExtractValueInst::Create(Ret, 0, "", Term));
-  push(Term->getSuccessor(1), ExtractValueInst::Create(Ret, 1, "", Term));
-}
-
-/// \brief Recursively handle the condition leading to a loop
-Value *SIAnnotateControlFlow::handleLoopCondition(Value *Cond, PHINode *Broken,
-                                                  llvm::Loop *L) {
-
-  // Only search through PHI nodes which are inside the loop.  If we try this
-  // with PHI nodes that are outside of the loop, we end up inserting new PHI
-  // nodes outside of the loop which depend on values defined inside the loop.
-  // This will break the module with
-  // 'Instruction does not dominate all users!' errors.
-  PHINode *Phi = nullptr;
-  if ((Phi = dyn_cast<PHINode>(Cond)) && L->contains(Phi)) {
-
-    BasicBlock *Parent = Phi->getParent();
-    PHINode *NewPhi = PHINode::Create(Int64, 0, "", &Parent->front());
-    Value *Ret = NewPhi;
-
-    // Handle all non-constant incoming values first
-    for (unsigned i = 0, e = Phi->getNumIncomingValues(); i != e; ++i) {
-      Value *Incoming = Phi->getIncomingValue(i);
-      BasicBlock *From = Phi->getIncomingBlock(i);
-      if (isa<ConstantInt>(Incoming)) {
-        NewPhi->addIncoming(Broken, From);
-        continue;
-      }
-
-      Phi->setIncomingValue(i, BoolFalse);
-      Value *PhiArg = handleLoopCondition(Incoming, Broken, L);
-      NewPhi->addIncoming(PhiArg, From);
-    }
-
-    BasicBlock *IDom = DT->getNode(Parent)->getIDom()->getBlock();
-
-    for (unsigned i = 0, e = Phi->getNumIncomingValues(); i != e; ++i) {
-
-      Value *Incoming = Phi->getIncomingValue(i);
-      if (Incoming != BoolTrue)
-        continue;
-
-      BasicBlock *From = Phi->getIncomingBlock(i);
-      if (From == IDom) {
-        CallInst *OldEnd = dyn_cast<CallInst>(Parent->getFirstInsertionPt());
-        if (OldEnd && OldEnd->getCalledFunction() == EndCf) {
-          Value *Args[] = { OldEnd->getArgOperand(0), NewPhi };
-          Ret = CallInst::Create(ElseBreak, Args, "", OldEnd);
-          continue;
-        }
-      }
-      TerminatorInst *Insert = From->getTerminator();
-      Value *PhiArg = CallInst::Create(Break, Broken, "", Insert);
-      NewPhi->setIncomingValue(i, PhiArg);
-    }
-    eraseIfUnused(Phi);
-    return Ret;
-
-  } else if (Instruction *Inst = dyn_cast<Instruction>(Cond)) {
-    BasicBlock *Parent = Inst->getParent();
-    Instruction *Insert;
-    if (L->contains(Inst)) {
-      Insert = Parent->getTerminator();
-    } else {
-      Insert = L->getHeader()->getFirstNonPHIOrDbgOrLifetime();
-    }
-    Value *Args[] = { Cond, Broken };
-    return CallInst::Create(IfBreak, Args, "", Insert);
-
-  } else {
-    llvm_unreachable("Unhandled loop condition!");
-  }
-  return 0;
-}
-
-/// \brief Handle a back edge (loop)
-void SIAnnotateControlFlow::handleLoop(BranchInst *Term) {
-  BasicBlock *BB = Term->getParent();
-  llvm::Loop *L = LI->getLoopFor(BB);
-  BasicBlock *Target = Term->getSuccessor(1);
-  PHINode *Broken = PHINode::Create(Int64, 0, "", &Target->front());
-
-  Value *Cond = Term->getCondition();
-  Term->setCondition(BoolTrue);
-  Value *Arg = handleLoopCondition(Cond, Broken, L);
-
-  for (pred_iterator PI = pred_begin(Target), PE = pred_end(Target);
-       PI != PE; ++PI) {
-
-    Broken->addIncoming(*PI == BB ? Arg : Int64Zero, *PI);
-  }
-
-  Term->setCondition(CallInst::Create(Loop, Arg, "", Term));
-  push(Term->getSuccessor(0), Arg);
-}/// \brief Close the last opened control flow
-void SIAnnotateControlFlow::closeControlFlow(BasicBlock *BB) {
-  llvm::Loop *L = LI->getLoopFor(BB);
-
-  if (L && L->getHeader() == BB) {
-    // We can't insert an EndCF call into a loop header, because it will
-    // get executed on every iteration of the loop, when it should be
-    // executed only once before the loop.
-    SmallVector <BasicBlock*, 8> Latches;
-    L->getLoopLatches(Latches);
-
-    std::vector<BasicBlock*> Preds;
-    for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
-      if (std::find(Latches.begin(), Latches.end(), *PI) == Latches.end())
-        Preds.push_back(*PI);
-    }
-    BB = llvm::SplitBlockPredecessors(BB, Preds, "endcf.split", nullptr, DT,
-                                      LI, false);
-  }
-
-  CallInst::Create(EndCf, popSaved(), "", BB->getFirstInsertionPt());
-}
-
-/// \brief Annotate the control flow with intrinsics so the backend can
-/// recognize if/then/else and loops.
-bool SIAnnotateControlFlow::runOnFunction(Function &F) {
-  DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
-  LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
-
-  for (df_iterator<BasicBlock *> I = df_begin(&F.getEntryBlock()),
-       E = df_end(&F.getEntryBlock()); I != E; ++I) {
-
-    BranchInst *Term = dyn_cast<BranchInst>((*I)->getTerminator());
-
-    if (!Term || Term->isUnconditional()) {
-      if (isTopOfStack(*I))
-        closeControlFlow(*I);
-      continue;
-    }
-
-    if (I.nodeVisited(Term->getSuccessor(1))) {
-      if (isTopOfStack(*I))
-        closeControlFlow(*I);
-      handleLoop(Term);
-      continue;
-    }
-
-    if (isTopOfStack(*I)) {
-      PHINode *Phi = dyn_cast<PHINode>(Term->getCondition());
-      if (Phi && Phi->getParent() == *I && isElse(Phi)) {
-        insertElse(Term);
-        eraseIfUnused(Phi);
-        continue;
-      }
-      closeControlFlow(*I);
-    }
-    openIf(Term);
-  }
-
-  assert(Stack.empty());
-  return true;
-}
-
-/// \brief Create the annotation pass
-FunctionPass *llvm::createSIAnnotateControlFlowPass() {
-  return new SIAnnotateControlFlow();
-}

Removed: llvm/trunk/lib/Target/R600/SIDefines.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIDefines.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIDefines.h (original)
+++ llvm/trunk/lib/Target/R600/SIDefines.h (removed)
@@ -1,172 +0,0 @@
-//===-- SIDefines.h - SI Helper Macros ----------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//===----------------------------------------------------------------------===//
-
-#include "llvm/MC/MCInstrDesc.h"
-
-#ifndef LLVM_LIB_TARGET_R600_SIDEFINES_H
-#define LLVM_LIB_TARGET_R600_SIDEFINES_H
-
-namespace SIInstrFlags {
-// This needs to be kept in sync with the field bits in InstSI.
-enum {
-  SALU = 1 << 3,
-  VALU = 1 << 4,
-
-  SOP1 = 1 << 5,
-  SOP2 = 1 << 6,
-  SOPC = 1 << 7,
-  SOPK = 1 << 8,
-  SOPP = 1 << 9,
-
-  VOP1 = 1 << 10,
-  VOP2 = 1 << 11,
-  VOP3 = 1 << 12,
-  VOPC = 1 << 13,
-
-  MUBUF = 1 << 14,
-  MTBUF = 1 << 15,
-  SMRD = 1 << 16,
-  DS = 1 << 17,
-  MIMG = 1 << 18,
-  FLAT = 1 << 19,
-  WQM = 1 << 20,
-  VGPRSpill = 1 << 21
-};
-}
-
-namespace llvm {
-namespace AMDGPU {
-  enum OperandType {
-    /// Operand with register or 32-bit immediate
-    OPERAND_REG_IMM32 = llvm::MCOI::OPERAND_FIRST_TARGET,
-    /// Operand with register or inline constant
-    OPERAND_REG_INLINE_C
-  };
-}
-}
-
-namespace SIInstrFlags {
-  enum Flags {
-    // First 4 bits are the instruction encoding
-    VM_CNT = 1 << 0,
-    EXP_CNT = 1 << 1,
-    LGKM_CNT = 1 << 2
-  };
-
-  // v_cmp_class_* etc. use a 10-bit mask for what operation is checked.
-  // The result is true if any of these tests are true.
-  enum ClassFlags {
-    S_NAN = 1 << 0,        // Signaling NaN
-    Q_NAN = 1 << 1,        // Quiet NaN
-    N_INFINITY = 1 << 2,   // Negative infinity
-    N_NORMAL = 1 << 3,     // Negative normal
-    N_SUBNORMAL = 1 << 4,  // Negative subnormal
-    N_ZERO = 1 << 5,       // Negative zero
-    P_ZERO = 1 << 6,       // Positive zero
-    P_SUBNORMAL = 1 << 7,  // Positive subnormal
-    P_NORMAL = 1 << 8,     // Positive normal
-    P_INFINITY = 1 << 9    // Positive infinity
-  };
-}
-
-namespace SISrcMods {
-  enum {
-   NEG = 1 << 0,
-   ABS = 1 << 1
-  };
-}
-
-namespace SIOutMods {
-  enum {
-    NONE = 0,
-    MUL2 = 1,
-    MUL4 = 2,
-    DIV2 = 3
-  };
-}
-
-#define R_00B028_SPI_SHADER_PGM_RSRC1_PS                                0x00B028
-#define R_00B02C_SPI_SHADER_PGM_RSRC2_PS                                0x00B02C
-#define   S_00B02C_EXTRA_LDS_SIZE(x)                                  (((x) & 0xFF) << 8)
-#define R_00B128_SPI_SHADER_PGM_RSRC1_VS                                0x00B128
-#define R_00B228_SPI_SHADER_PGM_RSRC1_GS                                0x00B228
-#define R_00B848_COMPUTE_PGM_RSRC1                                      0x00B848
-#define   S_00B028_VGPRS(x)                                           (((x) & 0x3F) << 0)
-#define   S_00B028_SGPRS(x)                                           (((x) & 0x0F) << 6)
-#define R_00B84C_COMPUTE_PGM_RSRC2                                      0x00B84C
-#define   S_00B84C_SCRATCH_EN(x)                                      (((x) & 0x1) << 0)
-#define   S_00B84C_USER_SGPR(x)                                       (((x) & 0x1F) << 1)
-#define   S_00B84C_TGID_X_EN(x)                                       (((x) & 0x1) << 7)
-#define   S_00B84C_TGID_Y_EN(x)                                       (((x) & 0x1) << 8)
-#define   S_00B84C_TGID_Z_EN(x)                                       (((x) & 0x1) << 9)
-#define   S_00B84C_TG_SIZE_EN(x)                                      (((x) & 0x1) << 10)
-#define   S_00B84C_TIDIG_COMP_CNT(x)                                  (((x) & 0x03) << 11)
-
-#define   S_00B84C_LDS_SIZE(x)                                        (((x) & 0x1FF) << 15)
-#define R_0286CC_SPI_PS_INPUT_ENA                                       0x0286CC
-
-
-#define R_00B848_COMPUTE_PGM_RSRC1                                      0x00B848
-#define   S_00B848_VGPRS(x)                                           (((x) & 0x3F) << 0)
-#define   G_00B848_VGPRS(x)                                           (((x) >> 0) & 0x3F)
-#define   C_00B848_VGPRS                                              0xFFFFFFC0
-#define   S_00B848_SGPRS(x)                                           (((x) & 0x0F) << 6)
-#define   G_00B848_SGPRS(x)                                           (((x) >> 6) & 0x0F)
-#define   C_00B848_SGPRS                                              0xFFFFFC3F
-#define   S_00B848_PRIORITY(x)                                        (((x) & 0x03) << 10)
-#define   G_00B848_PRIORITY(x)                                        (((x) >> 10) & 0x03)
-#define   C_00B848_PRIORITY                                           0xFFFFF3FF
-#define   S_00B848_FLOAT_MODE(x)                                      (((x) & 0xFF) << 12)
-#define   G_00B848_FLOAT_MODE(x)                                      (((x) >> 12) & 0xFF)
-#define   C_00B848_FLOAT_MODE                                         0xFFF00FFF
-#define   S_00B848_PRIV(x)                                            (((x) & 0x1) << 20)
-#define   G_00B848_PRIV(x)                                            (((x) >> 20) & 0x1)
-#define   C_00B848_PRIV                                               0xFFEFFFFF
-#define   S_00B848_DX10_CLAMP(x)                                      (((x) & 0x1) << 21)
-#define   G_00B848_DX10_CLAMP(x)                                      (((x) >> 21) & 0x1)
-#define   C_00B848_DX10_CLAMP                                         0xFFDFFFFF
-#define   S_00B848_DEBUG_MODE(x)                                      (((x) & 0x1) << 22)
-#define   G_00B848_DEBUG_MODE(x)                                      (((x) >> 22) & 0x1)
-#define   C_00B848_DEBUG_MODE                                         0xFFBFFFFF
-#define   S_00B848_IEEE_MODE(x)                                       (((x) & 0x1) << 23)
-#define   G_00B848_IEEE_MODE(x)                                       (((x) >> 23) & 0x1)
-#define   C_00B848_IEEE_MODE                                          0xFF7FFFFF
-
-
-// Helpers for setting FLOAT_MODE
-#define FP_ROUND_ROUND_TO_NEAREST 0
-#define FP_ROUND_ROUND_TO_INF 1
-#define FP_ROUND_ROUND_TO_NEGINF 2
-#define FP_ROUND_ROUND_TO_ZERO 3
-
-// Bits 3:0 control rounding mode. 1:0 control single precision, 3:2 double
-// precision.
-#define FP_ROUND_MODE_SP(x) ((x) & 0x3)
-#define FP_ROUND_MODE_DP(x) (((x) & 0x3) << 2)
-
-#define FP_DENORM_FLUSH_IN_FLUSH_OUT 0
-#define FP_DENORM_FLUSH_OUT 1
-#define FP_DENORM_FLUSH_IN 2
-#define FP_DENORM_FLUSH_NONE 3
-
-
-// Bits 7:4 control denormal handling. 5:4 control single precision, 6:7 double
-// precision.
-#define FP_DENORM_MODE_SP(x) (((x) & 0x3) << 4)
-#define FP_DENORM_MODE_DP(x) (((x) & 0x3) << 6)
-
-#define R_00B860_COMPUTE_TMPRING_SIZE                                   0x00B860
-#define   S_00B860_WAVESIZE(x)                                        (((x) & 0x1FFF) << 12)
-
-#define R_0286E8_SPI_TMPRING_SIZE                                       0x0286E8
-#define   S_0286E8_WAVESIZE(x)                                        (((x) & 0x1FFF) << 12)
-
-
-#endif

Removed: llvm/trunk/lib/Target/R600/SIFixControlFlowLiveIntervals.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIFixControlFlowLiveIntervals.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIFixControlFlowLiveIntervals.cpp (original)
+++ llvm/trunk/lib/Target/R600/SIFixControlFlowLiveIntervals.cpp (removed)
@@ -1,96 +0,0 @@
-//===-- SIFixControlFlowLiveIntervals.cpp - Fix CF live intervals ---------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Spilling of EXEC masks used for control flow messes up control flow
-/// lowering, so mark all live intervals associated with CF instructions as
-/// non-spillable.
-///
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "SIInstrInfo.h"
-#include "SIRegisterInfo.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachinePostDominators.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetMachine.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "si-fix-cf-live-intervals"
-
-namespace {
-
-class SIFixControlFlowLiveIntervals : public MachineFunctionPass {
-public:
-  static char ID;
-
-public:
-  SIFixControlFlowLiveIntervals() : MachineFunctionPass(ID) {
-    initializeSIFixControlFlowLiveIntervalsPass(*PassRegistry::getPassRegistry());
-  }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  const char *getPassName() const override {
-    return "SI Fix CF Live Intervals";
-  }
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.addRequired<LiveIntervals>();
-    AU.setPreservesAll();
-    MachineFunctionPass::getAnalysisUsage(AU);
-  }
-};
-
-} // End anonymous namespace.
-
-INITIALIZE_PASS_BEGIN(SIFixControlFlowLiveIntervals, DEBUG_TYPE,
-                      "SI Fix CF Live Intervals", false, false)
-INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
-INITIALIZE_PASS_END(SIFixControlFlowLiveIntervals, DEBUG_TYPE,
-                    "SI Fix CF Live Intervals", false, false)
-
-char SIFixControlFlowLiveIntervals::ID = 0;
-
-char &llvm::SIFixControlFlowLiveIntervalsID = SIFixControlFlowLiveIntervals::ID;
-
-FunctionPass *llvm::createSIFixControlFlowLiveIntervalsPass() {
-  return new SIFixControlFlowLiveIntervals();
-}
-
-bool SIFixControlFlowLiveIntervals::runOnMachineFunction(MachineFunction &MF) {
-  LiveIntervals *LIS = &getAnalysis<LiveIntervals>();
-
-  for (const MachineBasicBlock &MBB : MF) {
-    for (const MachineInstr &MI : MBB) {
-      switch (MI.getOpcode()) {
-        case AMDGPU::SI_IF:
-        case AMDGPU::SI_ELSE:
-        case AMDGPU::SI_BREAK:
-        case AMDGPU::SI_IF_BREAK:
-        case AMDGPU::SI_ELSE_BREAK:
-        case AMDGPU::SI_END_CF: {
-          unsigned Reg = MI.getOperand(0).getReg();
-          LIS->getInterval(Reg).markNotSpillable();
-          break;
-        }
-        default:
-          break;
-      }
-    }
-  }
-
-  return false;
-}

Removed: llvm/trunk/lib/Target/R600/SIFixSGPRCopies.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIFixSGPRCopies.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIFixSGPRCopies.cpp (original)
+++ llvm/trunk/lib/Target/R600/SIFixSGPRCopies.cpp (removed)
@@ -1,338 +0,0 @@
-//===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// Copies from VGPR to SGPR registers are illegal and the register coalescer
-/// will sometimes generate these illegal copies in situations like this:
-///
-///  Register Class <vsrc> is the union of <vgpr> and <sgpr>
-///
-/// BB0:
-///   %vreg0 <sgpr> = SCALAR_INST
-///   %vreg1 <vsrc> = COPY %vreg0 <sgpr>
-///    ...
-///    BRANCH %cond BB1, BB2
-///  BB1:
-///    %vreg2 <vgpr> = VECTOR_INST
-///    %vreg3 <vsrc> = COPY %vreg2 <vgpr>
-///  BB2:
-///    %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
-///    %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
-///
-///
-/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
-/// code will look like this:
-///
-/// BB0:
-///   %vreg0 <sgpr> = SCALAR_INST
-///    ...
-///    BRANCH %cond BB1, BB2
-/// BB1:
-///   %vreg2 <vgpr> = VECTOR_INST
-///   %vreg3 <vsrc> = COPY %vreg2 <vgpr>
-/// BB2:
-///   %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
-///   %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
-///
-/// Now that the result of the PHI instruction is an SGPR, the register
-/// allocator is now forced to constrain the register class of %vreg3 to
-/// <sgpr> so we end up with final code like this:
-///
-/// BB0:
-///   %vreg0 <sgpr> = SCALAR_INST
-///    ...
-///    BRANCH %cond BB1, BB2
-/// BB1:
-///   %vreg2 <vgpr> = VECTOR_INST
-///   %vreg3 <sgpr> = COPY %vreg2 <vgpr>
-/// BB2:
-///   %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
-///   %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
-///
-/// Now this code contains an illegal copy from a VGPR to an SGPR.
-///
-/// In order to avoid this problem, this pass searches for PHI instructions
-/// which define a <vsrc> register and constrains its definition class to
-/// <vgpr> if the user of the PHI's definition register is a vector instruction.
-/// If the PHI's definition class is constrained to <vgpr> then the coalescer
-/// will be unable to perform the COPY removal from the above example  which
-/// ultimately led to the creation of an illegal COPY.
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "AMDGPUSubtarget.h"
-#include "SIInstrInfo.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetMachine.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "sgpr-copies"
-
-namespace {
-
-class SIFixSGPRCopies : public MachineFunctionPass {
-
-private:
-  static char ID;
-  const TargetRegisterClass *inferRegClassFromUses(const SIRegisterInfo *TRI,
-                                           const MachineRegisterInfo &MRI,
-                                           unsigned Reg,
-                                           unsigned SubReg) const;
-  const TargetRegisterClass *inferRegClassFromDef(const SIRegisterInfo *TRI,
-                                                 const MachineRegisterInfo &MRI,
-                                                 unsigned Reg,
-                                                 unsigned SubReg) const;
-  bool isVGPRToSGPRCopy(const MachineInstr &Copy, const SIRegisterInfo *TRI,
-                        const MachineRegisterInfo &MRI) const;
-
-public:
-  SIFixSGPRCopies(TargetMachine &tm) : MachineFunctionPass(ID) { }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  const char *getPassName() const override {
-    return "SI Fix SGPR copies";
-  }
-
-};
-
-} // End anonymous namespace
-
-char SIFixSGPRCopies::ID = 0;
-
-FunctionPass *llvm::createSIFixSGPRCopiesPass(TargetMachine &tm) {
-  return new SIFixSGPRCopies(tm);
-}
-
-static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
-  const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
-  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
-    if (!MI.getOperand(i).isReg() ||
-        !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
-      continue;
-
-    if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
-      return true;
-  }
-  return false;
-}
-
-/// This functions walks the use list of Reg until it finds an Instruction
-/// that isn't a COPY returns the register class of that instruction.
-/// \return The register defined by the first non-COPY instruction.
-const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromUses(
-                                                 const SIRegisterInfo *TRI,
-                                                 const MachineRegisterInfo &MRI,
-                                                 unsigned Reg,
-                                                 unsigned SubReg) const {
-
-  const TargetRegisterClass *RC
-    = TargetRegisterInfo::isVirtualRegister(Reg) ?
-    MRI.getRegClass(Reg) :
-    TRI->getPhysRegClass(Reg);
-
-  RC = TRI->getSubRegClass(RC, SubReg);
-  for (MachineRegisterInfo::use_instr_iterator
-       I = MRI.use_instr_begin(Reg), E = MRI.use_instr_end(); I != E; ++I) {
-    switch (I->getOpcode()) {
-    case AMDGPU::COPY:
-      RC = TRI->getCommonSubClass(RC, inferRegClassFromUses(TRI, MRI,
-                                  I->getOperand(0).getReg(),
-                                  I->getOperand(0).getSubReg()));
-      break;
-    }
-  }
-
-  return RC;
-}
-
-const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromDef(
-                                                 const SIRegisterInfo *TRI,
-                                                 const MachineRegisterInfo &MRI,
-                                                 unsigned Reg,
-                                                 unsigned SubReg) const {
-  if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
-    const TargetRegisterClass *RC = TRI->getPhysRegClass(Reg);
-    return TRI->getSubRegClass(RC, SubReg);
-  }
-  MachineInstr *Def = MRI.getVRegDef(Reg);
-  if (Def->getOpcode() != AMDGPU::COPY) {
-    return TRI->getSubRegClass(MRI.getRegClass(Reg), SubReg);
-  }
-
-  return inferRegClassFromDef(TRI, MRI, Def->getOperand(1).getReg(),
-                                   Def->getOperand(1).getSubReg());
-}
-
-bool SIFixSGPRCopies::isVGPRToSGPRCopy(const MachineInstr &Copy,
-                                      const SIRegisterInfo *TRI,
-                                      const MachineRegisterInfo &MRI) const {
-
-  unsigned DstReg = Copy.getOperand(0).getReg();
-  unsigned SrcReg = Copy.getOperand(1).getReg();
-  unsigned SrcSubReg = Copy.getOperand(1).getSubReg();
-
-  if (!TargetRegisterInfo::isVirtualRegister(DstReg)) {
-    // If the destination register is a physical register there isn't really
-    // much we can do to fix this.
-    return false;
-  }
-
-  const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
-
-  const TargetRegisterClass *SrcRC;
-
-  if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
-      MRI.getRegClass(SrcReg) == &AMDGPU::VReg_1RegClass)
-    return false;
-
-  SrcRC = TRI->getSubRegClass(MRI.getRegClass(SrcReg), SrcSubReg);
-  return TRI->isSGPRClass(DstRC) && TRI->hasVGPRs(SrcRC);
-}
-
-bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
-  MachineRegisterInfo &MRI = MF.getRegInfo();
-  const SIRegisterInfo *TRI =
-      static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
-  const SIInstrInfo *TII =
-      static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
-  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
-                                                  BI != BE; ++BI) {
-
-    MachineBasicBlock &MBB = *BI;
-    for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
-                                                      I != E; ++I) {
-      MachineInstr &MI = *I;
-      if (MI.getOpcode() == AMDGPU::COPY && isVGPRToSGPRCopy(MI, TRI, MRI)) {
-        DEBUG(dbgs() << "Fixing VGPR -> SGPR copy:\n");
-        DEBUG(MI.print(dbgs()));
-        TII->moveToVALU(MI);
-
-      }
-
-      switch (MI.getOpcode()) {
-      default: continue;
-      case AMDGPU::PHI: {
-        DEBUG(dbgs() << "Fixing PHI: " << MI);
-
-        for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
-          const MachineOperand &Op = MI.getOperand(i);
-          unsigned Reg = Op.getReg();
-          const TargetRegisterClass *RC
-            = inferRegClassFromDef(TRI, MRI, Reg, Op.getSubReg());
-
-          MRI.constrainRegClass(Op.getReg(), RC);
-        }
-        unsigned Reg = MI.getOperand(0).getReg();
-        const TargetRegisterClass *RC = inferRegClassFromUses(TRI, MRI, Reg,
-                                                  MI.getOperand(0).getSubReg());
-        if (TRI->getCommonSubClass(RC, &AMDGPU::VGPR_32RegClass)) {
-          MRI.constrainRegClass(Reg, &AMDGPU::VGPR_32RegClass);
-        }
-
-        if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
-          break;
-
-        // If a PHI node defines an SGPR and any of its operands are VGPRs,
-        // then we need to move it to the VALU.
-        //
-        // Also, if a PHI node defines an SGPR and has all SGPR operands
-        // we must move it to the VALU, because the SGPR operands will
-        // all end up being assigned the same register, which means
-        // there is a potential for a conflict if different threads take
-        // different control flow paths.
-        //
-        // For Example:
-        //
-        // sgpr0 = def;
-        // ...
-        // sgpr1 = def;
-        // ...
-        // sgpr2 = PHI sgpr0, sgpr1
-        // use sgpr2;
-        //
-        // Will Become:
-        //
-        // sgpr2 = def;
-        // ...
-        // sgpr2 = def;
-        // ...
-        // use sgpr2
-        //
-        // FIXME: This is OK if the branching decision is made based on an
-        // SGPR value.
-        bool SGPRBranch = false;
-
-        // The one exception to this rule is when one of the operands
-        // is defined by a SI_BREAK, SI_IF_BREAK, or SI_ELSE_BREAK
-        // instruction.  In this case, there we know the program will
-        // never enter the second block (the loop) without entering
-        // the first block (where the condition is computed), so there
-        // is no chance for values to be over-written.
-
-        bool HasBreakDef = false;
-        for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
-          unsigned Reg = MI.getOperand(i).getReg();
-          if (TRI->hasVGPRs(MRI.getRegClass(Reg))) {
-            TII->moveToVALU(MI);
-            break;
-          }
-          MachineInstr *DefInstr = MRI.getUniqueVRegDef(Reg);
-          assert(DefInstr);
-          switch(DefInstr->getOpcode()) {
-
-          case AMDGPU::SI_BREAK:
-          case AMDGPU::SI_IF_BREAK:
-          case AMDGPU::SI_ELSE_BREAK:
-          // If we see a PHI instruction that defines an SGPR, then that PHI
-          // instruction has already been considered and should have
-          // a *_BREAK as an operand.
-          case AMDGPU::PHI:
-            HasBreakDef = true;
-            break;
-          }
-        }
-
-        if (!SGPRBranch && !HasBreakDef)
-          TII->moveToVALU(MI);
-        break;
-      }
-      case AMDGPU::REG_SEQUENCE: {
-        if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
-            !hasVGPROperands(MI, TRI))
-          continue;
-
-        DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
-
-        TII->moveToVALU(MI);
-        break;
-      }
-      case AMDGPU::INSERT_SUBREG: {
-        const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
-        DstRC = MRI.getRegClass(MI.getOperand(0).getReg());
-        Src0RC = MRI.getRegClass(MI.getOperand(1).getReg());
-        Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
-        if (TRI->isSGPRClass(DstRC) &&
-            (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
-          DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
-          TII->moveToVALU(MI);
-        }
-        break;
-      }
-      }
-    }
-  }
-
-  return true;
-}

Removed: llvm/trunk/lib/Target/R600/SIFixSGPRLiveRanges.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIFixSGPRLiveRanges.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIFixSGPRLiveRanges.cpp (original)
+++ llvm/trunk/lib/Target/R600/SIFixSGPRLiveRanges.cpp (removed)
@@ -1,192 +0,0 @@
-//===-- SIFixSGPRLiveRanges.cpp - Fix SGPR live ranges ----------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// SALU instructions ignore control flow, so we need to modify the live ranges
-/// of the registers they define in some cases.
-///
-/// The main case we need to handle is when a def is used in one side of a
-/// branch and not another.  For example:
-///
-/// %def
-/// IF
-///   ...
-///   ...
-/// ELSE
-///   %use
-///   ...
-/// ENDIF
-///
-/// Here we need the register allocator to avoid assigning any of the defs
-/// inside of the IF to the same register as %def.  In traditional live
-/// interval analysis %def is not live inside the IF branch, however, since
-/// SALU instructions inside of IF will be executed even if the branch is not
-/// taken, there is the chance that one of the instructions will overwrite the
-/// value of %def, so the use in ELSE will see the wrong value.
-///
-/// The strategy we use for solving this is to add an extra use after the ENDIF:
-///
-/// %def
-/// IF
-///   ...
-///   ...
-/// ELSE
-///   %use
-///   ...
-/// ENDIF
-/// %use
-///
-/// Adding this use will make the def live thoughout the IF branch, which is
-/// what we want.
-
-#include "AMDGPU.h"
-#include "SIInstrInfo.h"
-#include "SIRegisterInfo.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachinePostDominators.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetMachine.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "si-fix-sgpr-live-ranges"
-
-namespace {
-
-class SIFixSGPRLiveRanges : public MachineFunctionPass {
-public:
-  static char ID;
-
-public:
-  SIFixSGPRLiveRanges() : MachineFunctionPass(ID) {
-    initializeSIFixSGPRLiveRangesPass(*PassRegistry::getPassRegistry());
-  }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  const char *getPassName() const override {
-    return "SI Fix SGPR live ranges";
-  }
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.addRequired<LiveIntervals>();
-    AU.addRequired<MachinePostDominatorTree>();
-    AU.setPreservesCFG();
-    MachineFunctionPass::getAnalysisUsage(AU);
-  }
-};
-
-} // End anonymous namespace.
-
-INITIALIZE_PASS_BEGIN(SIFixSGPRLiveRanges, DEBUG_TYPE,
-                      "SI Fix SGPR Live Ranges", false, false)
-INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
-INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
-INITIALIZE_PASS_END(SIFixSGPRLiveRanges, DEBUG_TYPE,
-                    "SI Fix SGPR Live Ranges", false, false)
-
-char SIFixSGPRLiveRanges::ID = 0;
-
-char &llvm::SIFixSGPRLiveRangesID = SIFixSGPRLiveRanges::ID;
-
-FunctionPass *llvm::createSIFixSGPRLiveRangesPass() {
-  return new SIFixSGPRLiveRanges();
-}
-
-bool SIFixSGPRLiveRanges::runOnMachineFunction(MachineFunction &MF) {
-  MachineRegisterInfo &MRI = MF.getRegInfo();
-  const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
-  const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(
-      MF.getSubtarget().getRegisterInfo());
-  LiveIntervals *LIS = &getAnalysis<LiveIntervals>();
- MachinePostDominatorTree *PDT = &getAnalysis<MachinePostDominatorTree>();
-  std::vector<std::pair<unsigned, LiveRange *>> SGPRLiveRanges;
-
-  // First pass, collect all live intervals for SGPRs
-  for (const MachineBasicBlock &MBB : MF) {
-    for (const MachineInstr &MI : MBB) {
-      for (const MachineOperand &MO : MI.defs()) {
-        if (MO.isImplicit())
-          continue;
-        unsigned Def = MO.getReg();
-        if (TargetRegisterInfo::isVirtualRegister(Def)) {
-          if (TRI->isSGPRClass(MRI.getRegClass(Def)))
-            SGPRLiveRanges.push_back(
-                std::make_pair(Def, &LIS->getInterval(Def)));
-        } else if (TRI->isSGPRClass(TRI->getPhysRegClass(Def))) {
-            SGPRLiveRanges.push_back(
-                std::make_pair(Def, &LIS->getRegUnit(Def)));
-        }
-      }
-    }
-  }
-
-  // Second pass fix the intervals
-  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
-                                                  BI != BE; ++BI) {
-    MachineBasicBlock &MBB = *BI;
-    if (MBB.succ_size() < 2)
-      continue;
-
-    // We have structured control flow, so number of succesors should be two.
-    assert(MBB.succ_size() == 2);
-    MachineBasicBlock *SuccA = *MBB.succ_begin();
-    MachineBasicBlock *SuccB = *(++MBB.succ_begin());
-    MachineBasicBlock *NCD = PDT->findNearestCommonDominator(SuccA, SuccB);
-
-    if (!NCD)
-      continue;
-
-    MachineBasicBlock::iterator NCDTerm = NCD->getFirstTerminator();
-
-    if (NCDTerm != NCD->end() && NCDTerm->getOpcode() == AMDGPU::SI_ELSE) {
-      assert(NCD->succ_size() == 2);
-      // We want to make sure we insert the Use after the ENDIF, not after
-      // the ELSE.
-      NCD = PDT->findNearestCommonDominator(*NCD->succ_begin(),
-                                            *(++NCD->succ_begin()));
-    }
-    assert(SuccA && SuccB);
-    for (std::pair<unsigned, LiveRange*> RegLR : SGPRLiveRanges) {
-      unsigned Reg = RegLR.first;
-      LiveRange *LR = RegLR.second;
-
-      // FIXME: We could be smarter here.  If the register is Live-In to
-      // one block, but the other doesn't have any SGPR defs, then there
-      // won't be a conflict.  Also, if the branch decision is based on
-      // a value in an SGPR, then there will be no conflict.
-      bool LiveInToA = LIS->isLiveInToMBB(*LR, SuccA);
-      bool LiveInToB = LIS->isLiveInToMBB(*LR, SuccB);
-
-      if ((!LiveInToA && !LiveInToB) ||
-          (LiveInToA && LiveInToB))
-        continue;
-
-      // This interval is live in to one successor, but not the other, so
-      // we need to update its range so it is live in to both.
-      DEBUG(dbgs() << "Possible SGPR conflict detected " <<  " in " << *LR <<
-                      " BB#" << SuccA->getNumber() << ", BB#" <<
-                      SuccB->getNumber() <<
-                      " with NCD = " << NCD->getNumber() << '\n');
-
-      // FIXME: Need to figure out how to update LiveRange here so this pass
-      // will be able to preserve LiveInterval analysis.
-      BuildMI(*NCD, NCD->getFirstNonPHI(), DebugLoc(),
-              TII->get(AMDGPU::SGPR_USE))
-              .addReg(Reg, RegState::Implicit);
-      DEBUG(NCD->getFirstNonPHI()->dump());
-    }
-  }
-
-  return false;
-}

Removed: llvm/trunk/lib/Target/R600/SIFoldOperands.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIFoldOperands.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIFoldOperands.cpp (original)
+++ llvm/trunk/lib/Target/R600/SIFoldOperands.cpp (removed)
@@ -1,288 +0,0 @@
-//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//===----------------------------------------------------------------------===//
-//
-
-#include "AMDGPU.h"
-#include "AMDGPUSubtarget.h"
-#include "SIInstrInfo.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetMachine.h"
-
-#define DEBUG_TYPE "si-fold-operands"
-using namespace llvm;
-
-namespace {
-
-class SIFoldOperands : public MachineFunctionPass {
-public:
-  static char ID;
-
-public:
-  SIFoldOperands() : MachineFunctionPass(ID) {
-    initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
-  }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  const char *getPassName() const override {
-    return "SI Fold Operands";
-  }
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.addRequired<MachineDominatorTree>();
-    AU.setPreservesCFG();
-    MachineFunctionPass::getAnalysisUsage(AU);
-  }
-};
-
-struct FoldCandidate {
-  MachineInstr *UseMI;
-  unsigned UseOpNo;
-  MachineOperand *OpToFold;
-  uint64_t ImmToFold;
-
-  FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) :
-                UseMI(MI), UseOpNo(OpNo) {
-
-    if (FoldOp->isImm()) {
-      OpToFold = nullptr;
-      ImmToFold = FoldOp->getImm();
-    } else {
-      assert(FoldOp->isReg());
-      OpToFold = FoldOp;
-    }
-  }
-
-  bool isImm() const {
-    return !OpToFold;
-  }
-};
-
-} // End anonymous namespace.
-
-INITIALIZE_PASS_BEGIN(SIFoldOperands, DEBUG_TYPE,
-                      "SI Fold Operands", false, false)
-INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
-INITIALIZE_PASS_END(SIFoldOperands, DEBUG_TYPE,
-                    "SI Fold Operands", false, false)
-
-char SIFoldOperands::ID = 0;
-
-char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
-
-FunctionPass *llvm::createSIFoldOperandsPass() {
-  return new SIFoldOperands();
-}
-
-static bool isSafeToFold(unsigned Opcode) {
-  switch(Opcode) {
-  case AMDGPU::V_MOV_B32_e32:
-  case AMDGPU::V_MOV_B32_e64:
-  case AMDGPU::V_MOV_B64_PSEUDO:
-  case AMDGPU::S_MOV_B32:
-  case AMDGPU::S_MOV_B64:
-  case AMDGPU::COPY:
-    return true;
-  default:
-    return false;
-  }
-}
-
-static bool updateOperand(FoldCandidate &Fold,
-                          const TargetRegisterInfo &TRI) {
-  MachineInstr *MI = Fold.UseMI;
-  MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
-  assert(Old.isReg());
-
-  if (Fold.isImm()) {
-    Old.ChangeToImmediate(Fold.ImmToFold);
-    return true;
-  }
-
-  MachineOperand *New = Fold.OpToFold;
-  if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
-      TargetRegisterInfo::isVirtualRegister(New->getReg())) {
-    Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
-    return true;
-  }
-
-  // FIXME: Handle physical registers.
-
-  return false;
-}
-
-static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
-                             MachineInstr *MI, unsigned OpNo,
-                             MachineOperand *OpToFold,
-                             const SIInstrInfo *TII) {
-  if (!TII->isOperandLegal(MI, OpNo, OpToFold)) {
-    // Operand is not legal, so try to commute the instruction to
-    // see if this makes it possible to fold.
-    unsigned CommuteIdx0;
-    unsigned CommuteIdx1;
-    bool CanCommute = TII->findCommutedOpIndices(MI, CommuteIdx0, CommuteIdx1);
-
-    if (CanCommute) {
-      if (CommuteIdx0 == OpNo)
-        OpNo = CommuteIdx1;
-      else if (CommuteIdx1 == OpNo)
-        OpNo = CommuteIdx0;
-    }
-
-    if (!CanCommute || !TII->commuteInstruction(MI))
-      return false;
-
-    if (!TII->isOperandLegal(MI, OpNo, OpToFold))
-      return false;
-  }
-
-  FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
-  return true;
-}
-
-bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
-  MachineRegisterInfo &MRI = MF.getRegInfo();
-  const SIInstrInfo *TII =
-      static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
-  const SIRegisterInfo &TRI = TII->getRegisterInfo();
-
-  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
-                                                  BI != BE; ++BI) {
-
-    MachineBasicBlock &MBB = *BI;
-    MachineBasicBlock::iterator I, Next;
-    for (I = MBB.begin(); I != MBB.end(); I = Next) {
-      Next = std::next(I);
-      MachineInstr &MI = *I;
-
-      if (!isSafeToFold(MI.getOpcode()))
-        continue;
-
-      unsigned OpSize = TII->getOpSize(MI, 1);
-      MachineOperand &OpToFold = MI.getOperand(1);
-      bool FoldingImm = OpToFold.isImm();
-
-      // FIXME: We could also be folding things like FrameIndexes and
-      // TargetIndexes.
-      if (!FoldingImm && !OpToFold.isReg())
-        continue;
-
-      // Folding immediates with more than one use will increase program size.
-      // FIXME: This will also reduce register usage, which may be better
-      // in some cases.  A better heuristic is needed.
-      if (FoldingImm && !TII->isInlineConstant(OpToFold, OpSize) &&
-          !MRI.hasOneUse(MI.getOperand(0).getReg()))
-        continue;
-
-      // FIXME: Fold operands with subregs.
-      if (OpToFold.isReg() &&
-          (!TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()) ||
-           OpToFold.getSubReg()))
-        continue;
-
-      std::vector<FoldCandidate> FoldList;
-      for (MachineRegisterInfo::use_iterator
-           Use = MRI.use_begin(MI.getOperand(0).getReg()), E = MRI.use_end();
-           Use != E; ++Use) {
-
-        MachineInstr *UseMI = Use->getParent();
-        const MachineOperand &UseOp = UseMI->getOperand(Use.getOperandNo());
-
-        // FIXME: Fold operands with subregs.
-        if (UseOp.isReg() && ((UseOp.getSubReg() && OpToFold.isReg()) ||
-            UseOp.isImplicit())) {
-          continue;
-        }
-
-        APInt Imm;
-
-        if (FoldingImm) {
-          unsigned UseReg = UseOp.getReg();
-          const TargetRegisterClass *UseRC
-            = TargetRegisterInfo::isVirtualRegister(UseReg) ?
-            MRI.getRegClass(UseReg) :
-            TRI.getPhysRegClass(UseReg);
-
-          Imm = APInt(64, OpToFold.getImm());
-
-          // Split 64-bit constants into 32-bits for folding.
-          if (UseOp.getSubReg()) {
-            if (UseRC->getSize() != 8)
-              continue;
-
-            if (UseOp.getSubReg() == AMDGPU::sub0) {
-              Imm = Imm.getLoBits(32);
-            } else {
-              assert(UseOp.getSubReg() == AMDGPU::sub1);
-              Imm = Imm.getHiBits(32);
-            }
-          }
-
-          // In order to fold immediates into copies, we need to change the
-          // copy to a MOV.
-          if (UseMI->getOpcode() == AMDGPU::COPY) {
-            unsigned DestReg = UseMI->getOperand(0).getReg();
-            const TargetRegisterClass *DestRC
-              = TargetRegisterInfo::isVirtualRegister(DestReg) ?
-              MRI.getRegClass(DestReg) :
-              TRI.getPhysRegClass(DestReg);
-
-            unsigned MovOp = TII->getMovOpcode(DestRC);
-            if (MovOp == AMDGPU::COPY)
-              continue;
-
-            UseMI->setDesc(TII->get(MovOp));
-          }
-        }
-
-        const MCInstrDesc &UseDesc = UseMI->getDesc();
-
-        // Don't fold into target independent nodes.  Target independent opcodes
-        // don't have defined register classes.
-        if (UseDesc.isVariadic() ||
-            UseDesc.OpInfo[Use.getOperandNo()].RegClass == -1)
-          continue;
-
-        if (FoldingImm) {
-          MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
-          tryAddToFoldList(FoldList, UseMI, Use.getOperandNo(), &ImmOp, TII);
-          continue;
-        }
-
-        tryAddToFoldList(FoldList, UseMI, Use.getOperandNo(), &OpToFold, TII);
-
-        // FIXME: We could try to change the instruction from 64-bit to 32-bit
-        // to enable more folding opportunites.  The shrink operands pass
-        // already does this.
-      }
-
-      for (FoldCandidate &Fold : FoldList) {
-        if (updateOperand(Fold, TRI)) {
-          // Clear kill flags.
-          if (!Fold.isImm()) {
-            assert(Fold.OpToFold && Fold.OpToFold->isReg());
-            Fold.OpToFold->setIsKill(false);
-          }
-          DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
-                Fold.UseOpNo << " of " << *Fold.UseMI << '\n');
-        }
-      }
-    }
-  }
-  return false;
-}

Removed: llvm/trunk/lib/Target/R600/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIISelLowering.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/R600/SIISelLowering.cpp (removed)
@@ -1,2241 +0,0 @@
-//===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Custom DAG lowering for SI
-//
-//===----------------------------------------------------------------------===//
-
-#ifdef _MSC_VER
-// Provide M_PI.
-#define _USE_MATH_DEFINES
-#include <cmath>
-#endif
-
-#include "SIISelLowering.h"
-#include "AMDGPU.h"
-#include "AMDGPUIntrinsicInfo.h"
-#include "AMDGPUSubtarget.h"
-#include "SIInstrInfo.h"
-#include "SIMachineFunctionInfo.h"
-#include "SIRegisterInfo.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/IR/Function.h"
-#include "llvm/ADT/SmallString.h"
-
-using namespace llvm;
-
-SITargetLowering::SITargetLowering(TargetMachine &TM,
-                                   const AMDGPUSubtarget &STI)
-    : AMDGPUTargetLowering(TM, STI) {
-  addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
-  addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
-
-  addRegisterClass(MVT::v32i8, &AMDGPU::SReg_256RegClass);
-  addRegisterClass(MVT::v64i8, &AMDGPU::SReg_512RegClass);
-
-  addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass);
-  addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
-
-  addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
-  addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
-  addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
-
-  addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
-  addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
-
-  addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
-  addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
-
-  addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
-  addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
-
-  computeRegisterProperties(STI.getRegisterInfo());
-
-  setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
-  setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
-  setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
-  setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
-
-  setOperationAction(ISD::ADD, MVT::i32, Legal);
-  setOperationAction(ISD::ADDC, MVT::i32, Legal);
-  setOperationAction(ISD::ADDE, MVT::i32, Legal);
-  setOperationAction(ISD::SUBC, MVT::i32, Legal);
-  setOperationAction(ISD::SUBE, MVT::i32, Legal);
-
-  setOperationAction(ISD::FSIN, MVT::f32, Custom);
-  setOperationAction(ISD::FCOS, MVT::f32, Custom);
-
-  setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
-  setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
-
-  // We need to custom lower vector stores from local memory
-  setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
-  setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
-  setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
-
-  setOperationAction(ISD::STORE, MVT::v8i32, Custom);
-  setOperationAction(ISD::STORE, MVT::v16i32, Custom);
-
-  setOperationAction(ISD::STORE, MVT::i1, Custom);
-  setOperationAction(ISD::STORE, MVT::v4i32, Custom);
-
-  setOperationAction(ISD::SELECT, MVT::i64, Custom);
-  setOperationAction(ISD::SELECT, MVT::f64, Promote);
-  AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
-
-  setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
-  setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
-  setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
-  setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
-
-  setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
-  setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
-
-  setOperationAction(ISD::BSWAP, MVT::i32, Legal);
-
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Legal);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
-
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Legal);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
-
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
-
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
-
-  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
-  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
-  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v16i8, Custom);
-  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
-
-  setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
-  setOperationAction(ISD::BRCOND, MVT::Other, Custom);
-
-  for (MVT VT : MVT::integer_valuetypes()) {
-    if (VT == MVT::i64)
-      continue;
-
-    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
-    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal);
-    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal);
-    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
-
-    setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
-    setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal);
-    setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal);
-    setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
-
-    setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
-    setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal);
-    setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal);
-    setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
-  }
-
-  for (MVT VT : MVT::integer_vector_valuetypes()) {
-    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i16, Expand);
-    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v16i16, Expand);
-  }
-
-  for (MVT VT : MVT::fp_valuetypes())
-    setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
-
-  setTruncStoreAction(MVT::i64, MVT::i32, Expand);
-  setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
-  setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
-
-  setOperationAction(ISD::LOAD, MVT::i1, Custom);
-
-  setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
-  setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
-  setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
-
-  // These should use UDIVREM, so set them to expand
-  setOperationAction(ISD::UDIV, MVT::i64, Expand);
-  setOperationAction(ISD::UREM, MVT::i64, Expand);
-
-  setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
-  setOperationAction(ISD::SELECT, MVT::i1, Promote);
-
-  // We only support LOAD/STORE and vector manipulation ops for vectors
-  // with > 4 elements.
-  for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32}) {
-    for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
-      switch(Op) {
-      case ISD::LOAD:
-      case ISD::STORE:
-      case ISD::BUILD_VECTOR:
-      case ISD::BITCAST:
-      case ISD::EXTRACT_VECTOR_ELT:
-      case ISD::INSERT_VECTOR_ELT:
-      case ISD::INSERT_SUBVECTOR:
-      case ISD::EXTRACT_SUBVECTOR:
-        break;
-      case ISD::CONCAT_VECTORS:
-        setOperationAction(Op, VT, Custom);
-        break;
-      default:
-        setOperationAction(Op, VT, Expand);
-        break;
-      }
-    }
-  }
-
-  if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
-    setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
-    setOperationAction(ISD::FCEIL, MVT::f64, Legal);
-    setOperationAction(ISD::FRINT, MVT::f64, Legal);
-  }
-
-  setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
-  setOperationAction(ISD::FDIV, MVT::f32, Custom);
-  setOperationAction(ISD::FDIV, MVT::f64, Custom);
-
-  setTargetDAGCombine(ISD::FADD);
-  setTargetDAGCombine(ISD::FSUB);
-  setTargetDAGCombine(ISD::FMINNUM);
-  setTargetDAGCombine(ISD::FMAXNUM);
-  setTargetDAGCombine(ISD::SMIN);
-  setTargetDAGCombine(ISD::SMAX);
-  setTargetDAGCombine(ISD::UMIN);
-  setTargetDAGCombine(ISD::UMAX);
-  setTargetDAGCombine(ISD::SELECT_CC);
-  setTargetDAGCombine(ISD::SETCC);
-  setTargetDAGCombine(ISD::AND);
-  setTargetDAGCombine(ISD::OR);
-  setTargetDAGCombine(ISD::UINT_TO_FP);
-
-  // All memory operations. Some folding on the pointer operand is done to help
-  // matching the constant offsets in the addressing modes.
-  setTargetDAGCombine(ISD::LOAD);
-  setTargetDAGCombine(ISD::STORE);
-  setTargetDAGCombine(ISD::ATOMIC_LOAD);
-  setTargetDAGCombine(ISD::ATOMIC_STORE);
-  setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
-  setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
-  setTargetDAGCombine(ISD::ATOMIC_SWAP);
-  setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
-  setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
-  setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
-  setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
-  setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
-  setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
-  setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
-  setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
-  setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
-  setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
-
-  setSchedulingPreference(Sched::RegPressure);
-}
-
-//===----------------------------------------------------------------------===//
-// TargetLowering queries
-//===----------------------------------------------------------------------===//
-
-bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &,
-                                          EVT) const {
-  // SI has some legal vector types, but no legal vector operations. Say no
-  // shuffles are legal in order to prefer scalarizing some vector operations.
-  return false;
-}
-
-bool SITargetLowering::isLegalAddressingMode(const AddrMode &AM,
-                                             Type *Ty, unsigned AS) const {
-  // No global is ever allowed as a base.
-  if (AM.BaseGV)
-    return false;
-
-  switch (AS) {
-  case AMDGPUAS::GLOBAL_ADDRESS:
-  case AMDGPUAS::CONSTANT_ADDRESS: // XXX - Should we assume SMRD instructions?
-  case AMDGPUAS::PRIVATE_ADDRESS:
-  case AMDGPUAS::UNKNOWN_ADDRESS_SPACE: {
-    // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
-    // additionally can do r + r + i with addr64. 32-bit has more addressing
-    // mode options. Depending on the resource constant, it can also do
-    // (i64 r0) + (i32 r1) * (i14 i).
-    //
-    // SMRD instructions have an 8-bit, dword offset.
-    //
-    // Assume nonunifom access, since the address space isn't enough to know
-    // what instruction we will use, and since we don't know if this is a load
-    // or store and scalar stores are only available on VI.
-    //
-    // We also know if we are doing an extload, we can't do a scalar load.
-    //
-    // Private arrays end up using a scratch buffer most of the time, so also
-    // assume those use MUBUF instructions. Scratch loads / stores are currently
-    // implemented as mubuf instructions with offen bit set, so slightly
-    // different than the normal addr64.
-    if (!isUInt<12>(AM.BaseOffs))
-      return false;
-
-    // FIXME: Since we can split immediate into soffset and immediate offset,
-    // would it make sense to allow any immediate?
-
-    switch (AM.Scale) {
-    case 0: // r + i or just i, depending on HasBaseReg.
-      return true;
-    case 1:
-      return true; // We have r + r or r + i.
-    case 2:
-      if (AM.HasBaseReg) {
-        // Reject 2 * r + r.
-        return false;
-      }
-
-      // Allow 2 * r as r + r
-      // Or  2 * r + i is allowed as r + r + i.
-      return true;
-    default: // Don't allow n * r
-      return false;
-    }
-  }
-  case AMDGPUAS::LOCAL_ADDRESS:
-  case AMDGPUAS::REGION_ADDRESS: {
-    // Basic, single offset DS instructions allow a 16-bit unsigned immediate
-    // field.
-    // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
-    // an 8-bit dword offset but we don't know the alignment here.
-    if (!isUInt<16>(AM.BaseOffs))
-      return false;
-
-    if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
-      return true;
-
-    if (AM.Scale == 1 && AM.HasBaseReg)
-      return true;
-
-    return false;
-  }
-  case AMDGPUAS::FLAT_ADDRESS: {
-    // Flat instructions do not have offsets, and only have the register
-    // address.
-    return AM.BaseOffs == 0 && (AM.Scale == 0 || AM.Scale == 1);
-  }
-  default:
-    llvm_unreachable("unhandled address space");
-  }
-}
-
-bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
-                                                      unsigned AddrSpace,
-                                                      unsigned Align,
-                                                      bool *IsFast) const {
-  if (IsFast)
-    *IsFast = false;
-
-  // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
-  // which isn't a simple VT.
-  if (!VT.isSimple() || VT == MVT::Other)
-    return false;
-
-  // TODO - CI+ supports unaligned memory accesses, but this requires driver
-  // support.
-
-  // XXX - The only mention I see of this in the ISA manual is for LDS direct
-  // reads the "byte address and must be dword aligned". Is it also true for the
-  // normal loads and stores?
-  if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS) {
-    // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
-    // aligned, 8 byte access in a single operation using ds_read2/write2_b32
-    // with adjacent offsets.
-    return Align % 4 == 0;
-  }
-
-  // Smaller than dword value must be aligned.
-  // FIXME: This should be allowed on CI+
-  if (VT.bitsLT(MVT::i32))
-    return false;
-
-  // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
-  // byte-address are ignored, thus forcing Dword alignment.
-  // This applies to private, global, and constant memory.
-  if (IsFast)
-    *IsFast = true;
-
-  return VT.bitsGT(MVT::i32) && Align % 4 == 0;
-}
-
-EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
-                                          unsigned SrcAlign, bool IsMemset,
-                                          bool ZeroMemset,
-                                          bool MemcpyStrSrc,
-                                          MachineFunction &MF) const {
-  // FIXME: Should account for address space here.
-
-  // The default fallback uses the private pointer size as a guess for a type to
-  // use. Make sure we switch these to 64-bit accesses.
-
-  if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
-    return MVT::v4i32;
-
-  if (Size >= 8 && DstAlign >= 4)
-    return MVT::v2i32;
-
-  // Use the default.
-  return MVT::Other;
-}
-
-TargetLoweringBase::LegalizeTypeAction
-SITargetLowering::getPreferredVectorAction(EVT VT) const {
-  if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
-    return TypeSplitVector;
-
-  return TargetLoweringBase::getPreferredVectorAction(VT);
-}
-
-bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
-                                                         Type *Ty) const {
-  const SIInstrInfo *TII =
-      static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
-  return TII->isInlineConstant(Imm);
-}
-
-static EVT toIntegerVT(EVT VT) {
-  if (VT.isVector())
-    return VT.changeVectorElementTypeToInteger();
-  return MVT::getIntegerVT(VT.getSizeInBits());
-}
-
-SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
-                                         SDLoc SL, SDValue Chain,
-                                         unsigned Offset, bool Signed) const {
-  const DataLayout *DL = getDataLayout();
-  MachineFunction &MF = DAG.getMachineFunction();
-  const SIRegisterInfo *TRI =
-      static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
-  unsigned InputPtrReg = TRI->getPreloadedValue(MF, SIRegisterInfo::INPUT_PTR);
-
-  Type *Ty = VT.getTypeForEVT(*DAG.getContext());
-
-  MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
-  MVT PtrVT = getPointerTy(AMDGPUAS::CONSTANT_ADDRESS);
-  PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
-  SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
-                                       MRI.getLiveInVirtReg(InputPtrReg), PtrVT);
-  SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
-                            DAG.getConstant(Offset, SL, PtrVT));
-  SDValue PtrOffset = DAG.getUNDEF(getPointerTy(AMDGPUAS::CONSTANT_ADDRESS));
-  MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
-
-  unsigned Align = DL->getABITypeAlignment(Ty);
-
-  if (VT != MemVT && VT.isFloatingPoint()) {
-    // Do an integer load and convert.
-    // FIXME: This is mostly because load legalization after type legalization
-    // doesn't handle FP extloads.
-    assert(VT.getScalarType() == MVT::f32 &&
-           MemVT.getScalarType() == MVT::f16);
-
-    EVT IVT = toIntegerVT(VT);
-    EVT MemIVT = toIntegerVT(MemVT);
-    SDValue Load = DAG.getLoad(ISD::UNINDEXED, ISD::ZEXTLOAD,
-                               IVT, SL, Chain, Ptr, PtrOffset, PtrInfo, MemIVT,
-                               false, // isVolatile
-                               true, // isNonTemporal
-                               true, // isInvariant
-                               Align); // Alignment
-    return DAG.getNode(ISD::FP16_TO_FP, SL, VT, Load);
-  }
-
-  ISD::LoadExtType ExtTy = Signed ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
-  return DAG.getLoad(ISD::UNINDEXED, ExtTy,
-                     VT, SL, Chain, Ptr, PtrOffset, PtrInfo, MemVT,
-                     false, // isVolatile
-                     true, // isNonTemporal
-                     true, // isInvariant
-                     Align); // Alignment
-}
-
-SDValue SITargetLowering::LowerFormalArguments(
-    SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
-    const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
-    SmallVectorImpl<SDValue> &InVals) const {
-  const SIRegisterInfo *TRI =
-      static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
-
-  MachineFunction &MF = DAG.getMachineFunction();
-  FunctionType *FType = MF.getFunction()->getFunctionType();
-  SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
-
-  assert(CallConv == CallingConv::C);
-
-  SmallVector<ISD::InputArg, 16> Splits;
-  BitVector Skipped(Ins.size());
-
-  for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) {
-    const ISD::InputArg &Arg = Ins[i];
-
-    // First check if it's a PS input addr
-    if (Info->getShaderType() == ShaderType::PIXEL && !Arg.Flags.isInReg() &&
-        !Arg.Flags.isByVal()) {
-
-      assert((PSInputNum <= 15) && "Too many PS inputs!");
-
-      if (!Arg.Used) {
-        // We can savely skip PS inputs
-        Skipped.set(i);
-        ++PSInputNum;
-        continue;
-      }
-
-      Info->PSInputAddr |= 1 << PSInputNum++;
-    }
-
-    // Second split vertices into their elements
-    if (Info->getShaderType() != ShaderType::COMPUTE && Arg.VT.isVector()) {
-      ISD::InputArg NewArg = Arg;
-      NewArg.Flags.setSplit();
-      NewArg.VT = Arg.VT.getVectorElementType();
-
-      // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
-      // three or five element vertex only needs three or five registers,
-      // NOT four or eigth.
-      Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
-      unsigned NumElements = ParamType->getVectorNumElements();
-
-      for (unsigned j = 0; j != NumElements; ++j) {
-        Splits.push_back(NewArg);
-        NewArg.PartOffset += NewArg.VT.getStoreSize();
-      }
-
-    } else if (Info->getShaderType() != ShaderType::COMPUTE) {
-      Splits.push_back(Arg);
-    }
-  }
-
-  SmallVector<CCValAssign, 16> ArgLocs;
-  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
-                 *DAG.getContext());
-
-  // At least one interpolation mode must be enabled or else the GPU will hang.
-  if (Info->getShaderType() == ShaderType::PIXEL &&
-      (Info->PSInputAddr & 0x7F) == 0) {
-    Info->PSInputAddr |= 1;
-    CCInfo.AllocateReg(AMDGPU::VGPR0);
-    CCInfo.AllocateReg(AMDGPU::VGPR1);
-  }
-
-  // The pointer to the list of arguments is stored in SGPR0, SGPR1
-	// The pointer to the scratch buffer is stored in SGPR2, SGPR3
-  if (Info->getShaderType() == ShaderType::COMPUTE) {
-    if (Subtarget->isAmdHsaOS())
-      Info->NumUserSGPRs = 2;  // FIXME: Need to support scratch buffers.
-    else
-      Info->NumUserSGPRs = 4;
-
-    unsigned InputPtrReg =
-        TRI->getPreloadedValue(MF, SIRegisterInfo::INPUT_PTR);
-    unsigned InputPtrRegLo =
-        TRI->getPhysRegSubReg(InputPtrReg, &AMDGPU::SReg_32RegClass, 0);
-    unsigned InputPtrRegHi =
-        TRI->getPhysRegSubReg(InputPtrReg, &AMDGPU::SReg_32RegClass, 1);
-
-    unsigned ScratchPtrReg =
-        TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_PTR);
-    unsigned ScratchPtrRegLo =
-        TRI->getPhysRegSubReg(ScratchPtrReg, &AMDGPU::SReg_32RegClass, 0);
-    unsigned ScratchPtrRegHi =
-        TRI->getPhysRegSubReg(ScratchPtrReg, &AMDGPU::SReg_32RegClass, 1);
-
-    CCInfo.AllocateReg(InputPtrRegLo);
-    CCInfo.AllocateReg(InputPtrRegHi);
-    CCInfo.AllocateReg(ScratchPtrRegLo);
-    CCInfo.AllocateReg(ScratchPtrRegHi);
-    MF.addLiveIn(InputPtrReg, &AMDGPU::SReg_64RegClass);
-    MF.addLiveIn(ScratchPtrReg, &AMDGPU::SReg_64RegClass);
-  }
-
-  if (Info->getShaderType() == ShaderType::COMPUTE) {
-    getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins,
-                            Splits);
-  }
-
-  AnalyzeFormalArguments(CCInfo, Splits);
-
-  for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
-
-    const ISD::InputArg &Arg = Ins[i];
-    if (Skipped[i]) {
-      InVals.push_back(DAG.getUNDEF(Arg.VT));
-      continue;
-    }
-
-    CCValAssign &VA = ArgLocs[ArgIdx++];
-    MVT VT = VA.getLocVT();
-
-    if (VA.isMemLoc()) {
-      VT = Ins[i].VT;
-      EVT MemVT = Splits[i].VT;
-      const unsigned Offset = 36 + VA.getLocMemOffset();
-      // The first 36 bytes of the input buffer contains information about
-      // thread group and global sizes.
-      SDValue Arg = LowerParameter(DAG, VT, MemVT,  DL, DAG.getRoot(),
-                                   Offset, Ins[i].Flags.isSExt());
-
-      const PointerType *ParamTy =
-        dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
-      if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
-          ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
-        // On SI local pointers are just offsets into LDS, so they are always
-        // less than 16-bits.  On CI and newer they could potentially be
-        // real pointers, so we can't guarantee their size.
-        Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
-                          DAG.getValueType(MVT::i16));
-      }
-
-      InVals.push_back(Arg);
-      Info->ABIArgOffset = Offset + MemVT.getStoreSize();
-      continue;
-    }
-    assert(VA.isRegLoc() && "Parameter must be in a register!");
-
-    unsigned Reg = VA.getLocReg();
-
-    if (VT == MVT::i64) {
-      // For now assume it is a pointer
-      Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0,
-                                     &AMDGPU::SReg_64RegClass);
-      Reg = MF.addLiveIn(Reg, &AMDGPU::SReg_64RegClass);
-      InVals.push_back(DAG.getCopyFromReg(Chain, DL, Reg, VT));
-      continue;
-    }
-
-    const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
-
-    Reg = MF.addLiveIn(Reg, RC);
-    SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
-
-    if (Arg.VT.isVector()) {
-
-      // Build a vector from the registers
-      Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
-      unsigned NumElements = ParamType->getVectorNumElements();
-
-      SmallVector<SDValue, 4> Regs;
-      Regs.push_back(Val);
-      for (unsigned j = 1; j != NumElements; ++j) {
-        Reg = ArgLocs[ArgIdx++].getLocReg();
-        Reg = MF.addLiveIn(Reg, RC);
-        Regs.push_back(DAG.getCopyFromReg(Chain, DL, Reg, VT));
-      }
-
-      // Fill up the missing vector elements
-      NumElements = Arg.VT.getVectorNumElements() - NumElements;
-      Regs.append(NumElements, DAG.getUNDEF(VT));
-
-      InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, DL, Arg.VT, Regs));
-      continue;
-    }
-
-    InVals.push_back(Val);
-  }
-
-  if (Info->getShaderType() != ShaderType::COMPUTE) {
-    unsigned ScratchIdx = CCInfo.getFirstUnallocated(ArrayRef<MCPhysReg>(
-        AMDGPU::SGPR_32RegClass.begin(), AMDGPU::SGPR_32RegClass.getNumRegs()));
-    Info->ScratchOffsetReg = AMDGPU::SGPR_32RegClass.getRegister(ScratchIdx);
-  }
-  return Chain;
-}
-
-MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
-    MachineInstr * MI, MachineBasicBlock * BB) const {
-
-  MachineBasicBlock::iterator I = *MI;
-  const SIInstrInfo *TII =
-      static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
-
-  switch (MI->getOpcode()) {
-  default:
-    return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
-  case AMDGPU::BRANCH:
-    return BB;
-  case AMDGPU::SI_RegisterStorePseudo: {
-    MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
-    unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
-    MachineInstrBuilder MIB =
-        BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::SI_RegisterStore),
-                Reg);
-    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i)
-      MIB.addOperand(MI->getOperand(i));
-
-    MI->eraseFromParent();
-    break;
-  }
-  }
-  return BB;
-}
-
-bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
-  // This currently forces unfolding various combinations of fsub into fma with
-  // free fneg'd operands. As long as we have fast FMA (controlled by
-  // isFMAFasterThanFMulAndFAdd), we should perform these.
-
-  // When fma is quarter rate, for f64 where add / sub are at best half rate,
-  // most of these combines appear to be cycle neutral but save on instruction
-  // count / code size.
-  return true;
-}
-
-EVT SITargetLowering::getSetCCResultType(LLVMContext &Ctx, EVT VT) const {
-  if (!VT.isVector()) {
-    return MVT::i1;
-  }
-  return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
-}
-
-MVT SITargetLowering::getScalarShiftAmountTy(EVT VT) const {
-  return MVT::i32;
-}
-
-// Answering this is somewhat tricky and depends on the specific device which
-// have different rates for fma or all f64 operations.
-//
-// v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
-// regardless of which device (although the number of cycles differs between
-// devices), so it is always profitable for f64.
-//
-// v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
-// only on full rate devices. Normally, we should prefer selecting v_mad_f32
-// which we can always do even without fused FP ops since it returns the same
-// result as the separate operations and since it is always full
-// rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
-// however does not support denormals, so we do report fma as faster if we have
-// a fast fma device and require denormals.
-//
-bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
-  VT = VT.getScalarType();
-
-  if (!VT.isSimple())
-    return false;
-
-  switch (VT.getSimpleVT().SimpleTy) {
-  case MVT::f32:
-    // This is as fast on some subtargets. However, we always have full rate f32
-    // mad available which returns the same result as the separate operations
-    // which we should prefer over fma. We can't use this if we want to support
-    // denormals, so only report this in these cases.
-    return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32();
-  case MVT::f64:
-    return true;
-  default:
-    break;
-  }
-
-  return false;
-}
-
-//===----------------------------------------------------------------------===//
-// Custom DAG Lowering Operations
-//===----------------------------------------------------------------------===//
-
-SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
-  switch (Op.getOpcode()) {
-  default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
-  case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
-  case ISD::BRCOND: return LowerBRCOND(Op, DAG);
-  case ISD::LOAD: {
-    SDValue Result = LowerLOAD(Op, DAG);
-    assert((!Result.getNode() ||
-            Result.getNode()->getNumValues() == 2) &&
-           "Load should return a value and a chain");
-    return Result;
-  }
-
-  case ISD::FSIN:
-  case ISD::FCOS:
-    return LowerTrig(Op, DAG);
-  case ISD::SELECT: return LowerSELECT(Op, DAG);
-  case ISD::FDIV: return LowerFDIV(Op, DAG);
-  case ISD::STORE: return LowerSTORE(Op, DAG);
-  case ISD::GlobalAddress: {
-    MachineFunction &MF = DAG.getMachineFunction();
-    SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
-    return LowerGlobalAddress(MFI, Op, DAG);
-  }
-  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
-  case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
-  }
-  return SDValue();
-}
-
-/// \brief Helper function for LowerBRCOND
-static SDNode *findUser(SDValue Value, unsigned Opcode) {
-
-  SDNode *Parent = Value.getNode();
-  for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
-       I != E; ++I) {
-
-    if (I.getUse().get() != Value)
-      continue;
-
-    if (I->getOpcode() == Opcode)
-      return *I;
-  }
-  return nullptr;
-}
-
-SDValue SITargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const {
-
-  FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Op);
-  unsigned FrameIndex = FINode->getIndex();
-
-  return DAG.getTargetFrameIndex(FrameIndex, MVT::i32);
-}
-
-/// This transforms the control flow intrinsics to get the branch destination as
-/// last parameter, also switches branch target with BR if the need arise
-SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
-                                      SelectionDAG &DAG) const {
-
-  SDLoc DL(BRCOND);
-
-  SDNode *Intr = BRCOND.getOperand(1).getNode();
-  SDValue Target = BRCOND.getOperand(2);
-  SDNode *BR = nullptr;
-
-  if (Intr->getOpcode() == ISD::SETCC) {
-    // As long as we negate the condition everything is fine
-    SDNode *SetCC = Intr;
-    assert(SetCC->getConstantOperandVal(1) == 1);
-    assert(cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
-           ISD::SETNE);
-    Intr = SetCC->getOperand(0).getNode();
-
-  } else {
-    // Get the target from BR if we don't negate the condition
-    BR = findUser(BRCOND, ISD::BR);
-    Target = BR->getOperand(1);
-  }
-
-  assert(Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN);
-
-  // Build the result and
-  ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
-
-  // operands of the new intrinsic call
-  SmallVector<SDValue, 4> Ops;
-  Ops.push_back(BRCOND.getOperand(0));
-  Ops.append(Intr->op_begin() + 1, Intr->op_end());
-  Ops.push_back(Target);
-
-  // build the new intrinsic call
-  SDNode *Result = DAG.getNode(
-    Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL,
-    DAG.getVTList(Res), Ops).getNode();
-
-  if (BR) {
-    // Give the branch instruction our target
-    SDValue Ops[] = {
-      BR->getOperand(0),
-      BRCOND.getOperand(2)
-    };
-    SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
-    DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
-    BR = NewBR.getNode();
-  }
-
-  SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
-
-  // Copy the intrinsic results to registers
-  for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
-    SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
-    if (!CopyToReg)
-      continue;
-
-    Chain = DAG.getCopyToReg(
-      Chain, DL,
-      CopyToReg->getOperand(1),
-      SDValue(Result, i - 1),
-      SDValue());
-
-    DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
-  }
-
-  // Remove the old intrinsic from the chain
-  DAG.ReplaceAllUsesOfValueWith(
-    SDValue(Intr, Intr->getNumValues() - 1),
-    Intr->getOperand(0));
-
-  return Chain;
-}
-
-SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
-                                             SDValue Op,
-                                             SelectionDAG &DAG) const {
-  GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
-
-  if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
-    return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
-
-  SDLoc DL(GSD);
-  const GlobalValue *GV = GSD->getGlobal();
-  MVT PtrVT = getPointerTy(GSD->getAddressSpace());
-
-  SDValue Ptr = DAG.getNode(AMDGPUISD::CONST_DATA_PTR, DL, PtrVT);
-  SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32);
-
-  SDValue PtrLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Ptr,
-                              DAG.getConstant(0, DL, MVT::i32));
-  SDValue PtrHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Ptr,
-                              DAG.getConstant(1, DL, MVT::i32));
-
-  SDValue Lo = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i32, MVT::Glue),
-                           PtrLo, GA);
-  SDValue Hi = DAG.getNode(ISD::ADDE, DL, DAG.getVTList(MVT::i32, MVT::Glue),
-                           PtrHi, DAG.getConstant(0, DL, MVT::i32),
-                           SDValue(Lo.getNode(), 1));
-  return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi);
-}
-
-SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, SDLoc DL,
-                                   SDValue V) const {
-  // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
-  // so we will end up with redundant moves to m0.
-  //
-  // We can't use S_MOV_B32, because there is no way to specify m0 as the
-  // destination register.
-  //
-  // We have to use them both.  Machine cse will combine all the S_MOV_B32
-  // instructions and the register coalescer eliminate the extra copies.
-  SDNode *M0 = DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, V.getValueType(), V);
-  return DAG.getCopyToReg(Chain, DL, DAG.getRegister(AMDGPU::M0, MVT::i32),
-                          SDValue(M0, 0), SDValue()); // Glue
-                                                      // A Null SDValue creates
-                                                      // a glue result.
-}
-
-SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
-                                                  SelectionDAG &DAG) const {
-  MachineFunction &MF = DAG.getMachineFunction();
-  const SIRegisterInfo *TRI =
-      static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
-
-  EVT VT = Op.getValueType();
-  SDLoc DL(Op);
-  unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
-
-  switch (IntrinsicID) {
-  case Intrinsic::r600_read_ngroups_x:
-    return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
-                          SI::KernelInputOffsets::NGROUPS_X, false);
-  case Intrinsic::r600_read_ngroups_y:
-    return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
-                          SI::KernelInputOffsets::NGROUPS_Y, false);
-  case Intrinsic::r600_read_ngroups_z:
-    return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
-                          SI::KernelInputOffsets::NGROUPS_Z, false);
-  case Intrinsic::r600_read_global_size_x:
-    return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
-                          SI::KernelInputOffsets::GLOBAL_SIZE_X, false);
-  case Intrinsic::r600_read_global_size_y:
-    return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
-                          SI::KernelInputOffsets::GLOBAL_SIZE_Y, false);
-  case Intrinsic::r600_read_global_size_z:
-    return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
-                          SI::KernelInputOffsets::GLOBAL_SIZE_Z, false);
-  case Intrinsic::r600_read_local_size_x:
-    return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
-                          SI::KernelInputOffsets::LOCAL_SIZE_X, false);
-  case Intrinsic::r600_read_local_size_y:
-    return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
-                          SI::KernelInputOffsets::LOCAL_SIZE_Y, false);
-  case Intrinsic::r600_read_local_size_z:
-    return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
-                          SI::KernelInputOffsets::LOCAL_SIZE_Z, false);
-
-  case Intrinsic::AMDGPU_read_workdim:
-    return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
-                          MF.getInfo<SIMachineFunctionInfo>()->ABIArgOffset,
-                          false);
-
-  case Intrinsic::r600_read_tgid_x:
-    return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
-      TRI->getPreloadedValue(MF, SIRegisterInfo::TGID_X), VT);
-  case Intrinsic::r600_read_tgid_y:
-    return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
-      TRI->getPreloadedValue(MF, SIRegisterInfo::TGID_Y), VT);
-  case Intrinsic::r600_read_tgid_z:
-    return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
-      TRI->getPreloadedValue(MF, SIRegisterInfo::TGID_Z), VT);
-  case Intrinsic::r600_read_tidig_x:
-    return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
-      TRI->getPreloadedValue(MF, SIRegisterInfo::TIDIG_X), VT);
-  case Intrinsic::r600_read_tidig_y:
-    return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
-      TRI->getPreloadedValue(MF, SIRegisterInfo::TIDIG_Y), VT);
-  case Intrinsic::r600_read_tidig_z:
-    return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
-      TRI->getPreloadedValue(MF, SIRegisterInfo::TIDIG_Z), VT);
-  case AMDGPUIntrinsic::SI_load_const: {
-    SDValue Ops[] = {
-      Op.getOperand(1),
-      Op.getOperand(2)
-    };
-
-    MachineMemOperand *MMO = MF.getMachineMemOperand(
-      MachinePointerInfo(),
-      MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant,
-      VT.getStoreSize(), 4);
-    return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
-                                   Op->getVTList(), Ops, VT, MMO);
-  }
-  case AMDGPUIntrinsic::SI_sample:
-    return LowerSampleIntrinsic(AMDGPUISD::SAMPLE, Op, DAG);
-  case AMDGPUIntrinsic::SI_sampleb:
-    return LowerSampleIntrinsic(AMDGPUISD::SAMPLEB, Op, DAG);
-  case AMDGPUIntrinsic::SI_sampled:
-    return LowerSampleIntrinsic(AMDGPUISD::SAMPLED, Op, DAG);
-  case AMDGPUIntrinsic::SI_samplel:
-    return LowerSampleIntrinsic(AMDGPUISD::SAMPLEL, Op, DAG);
-  case AMDGPUIntrinsic::SI_vs_load_input:
-    return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT,
-                       Op.getOperand(1),
-                       Op.getOperand(2),
-                       Op.getOperand(3));
-
-  case AMDGPUIntrinsic::AMDGPU_fract:
-  case AMDGPUIntrinsic::AMDIL_fraction: // Legacy name.
-    return DAG.getNode(ISD::FSUB, DL, VT, Op.getOperand(1),
-                       DAG.getNode(ISD::FFLOOR, DL, VT, Op.getOperand(1)));
-  case AMDGPUIntrinsic::SI_fs_constant: {
-    SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3));
-    SDValue Glue = M0.getValue(1);
-    return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32,
-                       DAG.getConstant(2, DL, MVT::i32), // P0
-                       Op.getOperand(1), Op.getOperand(2), Glue);
-  }
-  case AMDGPUIntrinsic::SI_fs_interp: {
-    SDValue IJ = Op.getOperand(4);
-    SDValue I = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ,
-                            DAG.getConstant(0, DL, MVT::i32));
-    SDValue J = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ,
-                            DAG.getConstant(1, DL, MVT::i32));
-    SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3));
-    SDValue Glue = M0.getValue(1);
-    SDValue P1 = DAG.getNode(AMDGPUISD::INTERP_P1, DL,
-                             DAG.getVTList(MVT::f32, MVT::Glue),
-                             I, Op.getOperand(1), Op.getOperand(2), Glue);
-    Glue = SDValue(P1.getNode(), 1);
-    return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, P1, J,
-                             Op.getOperand(1), Op.getOperand(2), Glue);
-  }
-  default:
-    return AMDGPUTargetLowering::LowerOperation(Op, DAG);
-  }
-}
-
-SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
-                                              SelectionDAG &DAG) const {
-  MachineFunction &MF = DAG.getMachineFunction();
-  SDLoc DL(Op);
-  SDValue Chain = Op.getOperand(0);
-  unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
-
-  switch (IntrinsicID) {
-  case AMDGPUIntrinsic::SI_sendmsg: {
-    Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
-    SDValue Glue = Chain.getValue(1);
-    return DAG.getNode(AMDGPUISD::SENDMSG, DL, MVT::Other, Chain,
-                       Op.getOperand(2), Glue);
-  }
-  case AMDGPUIntrinsic::SI_tbuffer_store: {
-    SDValue Ops[] = {
-      Chain,
-      Op.getOperand(2),
-      Op.getOperand(3),
-      Op.getOperand(4),
-      Op.getOperand(5),
-      Op.getOperand(6),
-      Op.getOperand(7),
-      Op.getOperand(8),
-      Op.getOperand(9),
-      Op.getOperand(10),
-      Op.getOperand(11),
-      Op.getOperand(12),
-      Op.getOperand(13),
-      Op.getOperand(14)
-    };
-
-    EVT VT = Op.getOperand(3).getValueType();
-
-    MachineMemOperand *MMO = MF.getMachineMemOperand(
-      MachinePointerInfo(),
-      MachineMemOperand::MOStore,
-      VT.getStoreSize(), 4);
-    return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL,
-                                   Op->getVTList(), Ops, VT, MMO);
-  }
-  default:
-    return SDValue();
-  }
-}
-
-SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  LoadSDNode *Load = cast<LoadSDNode>(Op);
-
-  if (Op.getValueType().isVector()) {
-    assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
-           "Custom lowering for non-i32 vectors hasn't been implemented.");
-    unsigned NumElements = Op.getValueType().getVectorNumElements();
-    assert(NumElements != 2 && "v2 loads are supported for all address spaces.");
-    switch (Load->getAddressSpace()) {
-      default: break;
-      case AMDGPUAS::GLOBAL_ADDRESS:
-      case AMDGPUAS::PRIVATE_ADDRESS:
-        // v4 loads are supported for private and global memory.
-        if (NumElements <= 4)
-          break;
-        // fall-through
-      case AMDGPUAS::LOCAL_ADDRESS:
-        return ScalarizeVectorLoad(Op, DAG);
-    }
-  }
-
-  return AMDGPUTargetLowering::LowerLOAD(Op, DAG);
-}
-
-SDValue SITargetLowering::LowerSampleIntrinsic(unsigned Opcode,
-                                               const SDValue &Op,
-                                               SelectionDAG &DAG) const {
-  return DAG.getNode(Opcode, SDLoc(Op), Op.getValueType(), Op.getOperand(1),
-                     Op.getOperand(2),
-                     Op.getOperand(3),
-                     Op.getOperand(4));
-}
-
-SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
-  if (Op.getValueType() != MVT::i64)
-    return SDValue();
-
-  SDLoc DL(Op);
-  SDValue Cond = Op.getOperand(0);
-
-  SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
-  SDValue One = DAG.getConstant(1, DL, MVT::i32);
-
-  SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
-  SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
-
-  SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
-  SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
-
-  SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
-
-  SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
-  SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
-
-  SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
-
-  SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32, Lo, Hi);
-  return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res);
-}
-
-// Catch division cases where we can use shortcuts with rcp and rsq
-// instructions.
-SDValue SITargetLowering::LowerFastFDIV(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc SL(Op);
-  SDValue LHS = Op.getOperand(0);
-  SDValue RHS = Op.getOperand(1);
-  EVT VT = Op.getValueType();
-  bool Unsafe = DAG.getTarget().Options.UnsafeFPMath;
-
-  if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
-    if ((Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals())) &&
-        CLHS->isExactlyValue(1.0)) {
-      // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
-      // the CI documentation has a worst case error of 1 ulp.
-      // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
-      // use it as long as we aren't trying to use denormals.
-
-      // 1.0 / sqrt(x) -> rsq(x)
-      //
-      // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
-      // error seems really high at 2^29 ULP.
-      if (RHS.getOpcode() == ISD::FSQRT)
-        return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
-
-      // 1.0 / x -> rcp(x)
-      return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
-    }
-  }
-
-  if (Unsafe) {
-    // Turn into multiply by the reciprocal.
-    // x / y -> x * (1.0 / y)
-    SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
-    return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip);
-  }
-
-  return SDValue();
-}
-
-SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
-  SDValue FastLowered = LowerFastFDIV(Op, DAG);
-  if (FastLowered.getNode())
-    return FastLowered;
-
-  // This uses v_rcp_f32 which does not handle denormals. Let this hit a
-  // selection error for now rather than do something incorrect.
-  if (Subtarget->hasFP32Denormals())
-    return SDValue();
-
-  SDLoc SL(Op);
-  SDValue LHS = Op.getOperand(0);
-  SDValue RHS = Op.getOperand(1);
-
-  SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
-
-  const APFloat K0Val(BitsToFloat(0x6f800000));
-  const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
-
-  const APFloat K1Val(BitsToFloat(0x2f800000));
-  const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
-
-  const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
-
-  EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f32);
-
-  SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
-
-  SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
-
-  r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
-
-  SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
-
-  SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
-
-  return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
-}
-
-SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
-  if (DAG.getTarget().Options.UnsafeFPMath)
-    return LowerFastFDIV(Op, DAG);
-
-  SDLoc SL(Op);
-  SDValue X = Op.getOperand(0);
-  SDValue Y = Op.getOperand(1);
-
-  const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
-
-  SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
-
-  SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
-
-  SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
-
-  SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
-
-  SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
-
-  SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
-
-  SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
-
-  SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
-
-  SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
-  SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
-
-  SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
-                             NegDivScale0, Mul, DivScale1);
-
-  SDValue Scale;
-
-  if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
-    // Workaround a hardware bug on SI where the condition output from div_scale
-    // is not usable.
-
-    const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
-
-    // Figure out if the scale to use for div_fmas.
-    SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
-    SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
-    SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
-    SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
-
-    SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
-    SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
-
-    SDValue Scale0Hi
-      = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
-    SDValue Scale1Hi
-      = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
-
-    SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
-    SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
-    Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
-  } else {
-    Scale = DivScale1.getValue(1);
-  }
-
-  SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
-                             Fma4, Fma3, Mul, Scale);
-
-  return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
-}
-
-SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
-  EVT VT = Op.getValueType();
-
-  if (VT == MVT::f32)
-    return LowerFDIV32(Op, DAG);
-
-  if (VT == MVT::f64)
-    return LowerFDIV64(Op, DAG);
-
-  llvm_unreachable("Unexpected type for fdiv");
-}
-
-SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  StoreSDNode *Store = cast<StoreSDNode>(Op);
-  EVT VT = Store->getMemoryVT();
-
-  // These stores are legal.
-  if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) {
-    if (VT.isVector() && VT.getVectorNumElements() > 4)
-      return ScalarizeVectorStore(Op, DAG);
-    return SDValue();
-  }
-
-  SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
-  if (Ret.getNode())
-    return Ret;
-
-  if (VT.isVector() && VT.getVectorNumElements() >= 8)
-      return ScalarizeVectorStore(Op, DAG);
-
-  if (VT == MVT::i1)
-    return DAG.getTruncStore(Store->getChain(), DL,
-                        DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
-                        Store->getBasePtr(), MVT::i1, Store->getMemOperand());
-
-  return SDValue();
-}
-
-SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  EVT VT = Op.getValueType();
-  SDValue Arg = Op.getOperand(0);
-  SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT,
-                                  DAG.getNode(ISD::FMUL, DL, VT, Arg,
-                                              DAG.getConstantFP(0.5/M_PI, DL,
-                                                                VT)));
-
-  switch (Op.getOpcode()) {
-  case ISD::FCOS:
-    return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart);
-  case ISD::FSIN:
-    return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart);
-  default:
-    llvm_unreachable("Wrong trig opcode");
-  }
-}
-
-//===----------------------------------------------------------------------===//
-// Custom DAG optimizations
-//===----------------------------------------------------------------------===//
-
-SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
-                                                     DAGCombinerInfo &DCI) const {
-  EVT VT = N->getValueType(0);
-  EVT ScalarVT = VT.getScalarType();
-  if (ScalarVT != MVT::f32)
-    return SDValue();
-
-  SelectionDAG &DAG = DCI.DAG;
-  SDLoc DL(N);
-
-  SDValue Src = N->getOperand(0);
-  EVT SrcVT = Src.getValueType();
-
-  // TODO: We could try to match extracting the higher bytes, which would be
-  // easier if i8 vectors weren't promoted to i32 vectors, particularly after
-  // types are legalized. v4i8 -> v4f32 is probably the only case to worry
-  // about in practice.
-  if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) {
-    if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
-      SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
-      DCI.AddToWorklist(Cvt.getNode());
-      return Cvt;
-    }
-  }
-
-  // We are primarily trying to catch operations on illegal vector types
-  // before they are expanded.
-  // For scalars, we can use the more flexible method of checking masked bits
-  // after legalization.
-  if (!DCI.isBeforeLegalize() ||
-      !SrcVT.isVector() ||
-      SrcVT.getVectorElementType() != MVT::i8) {
-    return SDValue();
-  }
-
-  assert(DCI.isBeforeLegalize() && "Unexpected legal type");
-
-  // Weird sized vectors are a pain to handle, but we know 3 is really the same
-  // size as 4.
-  unsigned NElts = SrcVT.getVectorNumElements();
-  if (!SrcVT.isSimple() && NElts != 3)
-    return SDValue();
-
-  // Handle v4i8 -> v4f32 extload. Replace the v4i8 with a legal i32 load to
-  // prevent a mess from expanding to v4i32 and repacking.
-  if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
-    EVT LoadVT = getEquivalentMemType(*DAG.getContext(), SrcVT);
-    EVT RegVT = getEquivalentLoadRegType(*DAG.getContext(), SrcVT);
-    EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32, NElts);
-    LoadSDNode *Load = cast<LoadSDNode>(Src);
-
-    unsigned AS = Load->getAddressSpace();
-    unsigned Align = Load->getAlignment();
-    Type *Ty = LoadVT.getTypeForEVT(*DAG.getContext());
-    unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty);
-
-    // Don't try to replace the load if we have to expand it due to alignment
-    // problems. Otherwise we will end up scalarizing the load, and trying to
-    // repack into the vector for no real reason.
-    if (Align < ABIAlignment &&
-        !allowsMisalignedMemoryAccesses(LoadVT, AS, Align, nullptr)) {
-      return SDValue();
-    }
-
-    SDValue NewLoad = DAG.getExtLoad(ISD::ZEXTLOAD, DL, RegVT,
-                                     Load->getChain(),
-                                     Load->getBasePtr(),
-                                     LoadVT,
-                                     Load->getMemOperand());
-
-    // Make sure successors of the original load stay after it by updating
-    // them to use the new Chain.
-    DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), NewLoad.getValue(1));
-
-    SmallVector<SDValue, 4> Elts;
-    if (RegVT.isVector())
-      DAG.ExtractVectorElements(NewLoad, Elts);
-    else
-      Elts.push_back(NewLoad);
-
-    SmallVector<SDValue, 4> Ops;
-
-    unsigned EltIdx = 0;
-    for (SDValue Elt : Elts) {
-      unsigned ComponentsInElt = std::min(4u, NElts - 4 * EltIdx);
-      for (unsigned I = 0; I < ComponentsInElt; ++I) {
-        unsigned Opc = AMDGPUISD::CVT_F32_UBYTE0 + I;
-        SDValue Cvt = DAG.getNode(Opc, DL, MVT::f32, Elt);
-        DCI.AddToWorklist(Cvt.getNode());
-        Ops.push_back(Cvt);
-      }
-
-      ++EltIdx;
-    }
-
-    assert(Ops.size() == NElts);
-
-    return DAG.getNode(ISD::BUILD_VECTOR, DL, FloatVT, Ops);
-  }
-
-  return SDValue();
-}
-
-/// \brief Return true if the given offset Size in bytes can be folded into
-/// the immediate offsets of a memory instruction for the given address space.
-static bool canFoldOffset(unsigned OffsetSize, unsigned AS,
-                          const AMDGPUSubtarget &STI) {
-  switch (AS) {
-  case AMDGPUAS::GLOBAL_ADDRESS: {
-    // MUBUF instructions a 12-bit offset in bytes.
-    return isUInt<12>(OffsetSize);
-  }
-  case AMDGPUAS::CONSTANT_ADDRESS: {
-    // SMRD instructions have an 8-bit offset in dwords on SI and
-    // a 20-bit offset in bytes on VI.
-    if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
-      return isUInt<20>(OffsetSize);
-    else
-      return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4);
-  }
-  case AMDGPUAS::LOCAL_ADDRESS:
-  case AMDGPUAS::REGION_ADDRESS: {
-    // The single offset versions have a 16-bit offset in bytes.
-    return isUInt<16>(OffsetSize);
-  }
-  case AMDGPUAS::PRIVATE_ADDRESS:
-  // Indirect register addressing does not use any offsets.
-  default:
-    return 0;
-  }
-}
-
-// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
-
-// This is a variant of
-// (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
-//
-// The normal DAG combiner will do this, but only if the add has one use since
-// that would increase the number of instructions.
-//
-// This prevents us from seeing a constant offset that can be folded into a
-// memory instruction's addressing mode. If we know the resulting add offset of
-// a pointer can be folded into an addressing offset, we can replace the pointer
-// operand with the add of new constant offset. This eliminates one of the uses,
-// and may allow the remaining use to also be simplified.
-//
-SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
-                                               unsigned AddrSpace,
-                                               DAGCombinerInfo &DCI) const {
-  SDValue N0 = N->getOperand(0);
-  SDValue N1 = N->getOperand(1);
-
-  if (N0.getOpcode() != ISD::ADD)
-    return SDValue();
-
-  const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
-  if (!CN1)
-    return SDValue();
-
-  const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
-  if (!CAdd)
-    return SDValue();
-
-  // If the resulting offset is too large, we can't fold it into the addressing
-  // mode offset.
-  APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
-  if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *Subtarget))
-    return SDValue();
-
-  SelectionDAG &DAG = DCI.DAG;
-  SDLoc SL(N);
-  EVT VT = N->getValueType(0);
-
-  SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
-  SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
-
-  return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset);
-}
-
-SDValue SITargetLowering::performAndCombine(SDNode *N,
-                                            DAGCombinerInfo &DCI) const {
-  if (DCI.isBeforeLegalize())
-    return SDValue();
-
-  SelectionDAG &DAG = DCI.DAG;
-
-  // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
-  // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
-  SDValue LHS = N->getOperand(0);
-  SDValue RHS = N->getOperand(1);
-
-  if (LHS.getOpcode() == ISD::SETCC &&
-      RHS.getOpcode() == ISD::SETCC) {
-    ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
-    ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
-
-    SDValue X = LHS.getOperand(0);
-    SDValue Y = RHS.getOperand(0);
-    if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
-      return SDValue();
-
-    if (LCC == ISD::SETO) {
-      if (X != LHS.getOperand(1))
-        return SDValue();
-
-      if (RCC == ISD::SETUNE) {
-        const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
-        if (!C1 || !C1->isInfinity() || C1->isNegative())
-          return SDValue();
-
-        const uint32_t Mask = SIInstrFlags::N_NORMAL |
-                              SIInstrFlags::N_SUBNORMAL |
-                              SIInstrFlags::N_ZERO |
-                              SIInstrFlags::P_ZERO |
-                              SIInstrFlags::P_SUBNORMAL |
-                              SIInstrFlags::P_NORMAL;
-
-        static_assert(((~(SIInstrFlags::S_NAN |
-                          SIInstrFlags::Q_NAN |
-                          SIInstrFlags::N_INFINITY |
-                          SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
-                      "mask not equal");
-
-        SDLoc DL(N);
-        return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
-                           X, DAG.getConstant(Mask, DL, MVT::i32));
-      }
-    }
-  }
-
-  return SDValue();
-}
-
-SDValue SITargetLowering::performOrCombine(SDNode *N,
-                                           DAGCombinerInfo &DCI) const {
-  SelectionDAG &DAG = DCI.DAG;
-  SDValue LHS = N->getOperand(0);
-  SDValue RHS = N->getOperand(1);
-
-  // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
-  if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
-      RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
-    SDValue Src = LHS.getOperand(0);
-    if (Src != RHS.getOperand(0))
-      return SDValue();
-
-    const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
-    const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
-    if (!CLHS || !CRHS)
-      return SDValue();
-
-    // Only 10 bits are used.
-    static const uint32_t MaxMask = 0x3ff;
-
-    uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
-    SDLoc DL(N);
-    return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
-                       Src, DAG.getConstant(NewMask, DL, MVT::i32));
-  }
-
-  return SDValue();
-}
-
-SDValue SITargetLowering::performClassCombine(SDNode *N,
-                                              DAGCombinerInfo &DCI) const {
-  SelectionDAG &DAG = DCI.DAG;
-  SDValue Mask = N->getOperand(1);
-
-  // fp_class x, 0 -> false
-  if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
-    if (CMask->isNullValue())
-      return DAG.getConstant(0, SDLoc(N), MVT::i1);
-  }
-
-  return SDValue();
-}
-
-static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
-  switch (Opc) {
-  case ISD::FMAXNUM:
-    return AMDGPUISD::FMAX3;
-  case ISD::SMAX:
-    return AMDGPUISD::SMAX3;
-  case ISD::UMAX:
-    return AMDGPUISD::UMAX3;
-  case ISD::FMINNUM:
-    return AMDGPUISD::FMIN3;
-  case ISD::SMIN:
-    return AMDGPUISD::SMIN3;
-  case ISD::UMIN:
-    return AMDGPUISD::UMIN3;
-  default:
-    llvm_unreachable("Not a min/max opcode");
-  }
-}
-
-SDValue SITargetLowering::performMin3Max3Combine(SDNode *N,
-                                                 DAGCombinerInfo &DCI) const {
-  SelectionDAG &DAG = DCI.DAG;
-
-  unsigned Opc = N->getOpcode();
-  SDValue Op0 = N->getOperand(0);
-  SDValue Op1 = N->getOperand(1);
-
-  // Only do this if the inner op has one use since this will just increases
-  // register pressure for no benefit.
-
-  // max(max(a, b), c)
-  if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
-    SDLoc DL(N);
-    return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
-                       DL,
-                       N->getValueType(0),
-                       Op0.getOperand(0),
-                       Op0.getOperand(1),
-                       Op1);
-  }
-
-  // max(a, max(b, c))
-  if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
-    SDLoc DL(N);
-    return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
-                       DL,
-                       N->getValueType(0),
-                       Op0,
-                       Op1.getOperand(0),
-                       Op1.getOperand(1));
-  }
-
-  return SDValue();
-}
-
-SDValue SITargetLowering::performSetCCCombine(SDNode *N,
-                                              DAGCombinerInfo &DCI) const {
-  SelectionDAG &DAG = DCI.DAG;
-  SDLoc SL(N);
-
-  SDValue LHS = N->getOperand(0);
-  SDValue RHS = N->getOperand(1);
-  EVT VT = LHS.getValueType();
-
-  if (VT != MVT::f32 && VT != MVT::f64)
-    return SDValue();
-
-  // Match isinf pattern
-  // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
-  ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
-  if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) {
-    const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
-    if (!CRHS)
-      return SDValue();
-
-    const APFloat &APF = CRHS->getValueAPF();
-    if (APF.isInfinity() && !APF.isNegative()) {
-      unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY;
-      return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
-                         DAG.getConstant(Mask, SL, MVT::i32));
-    }
-  }
-
-  return SDValue();
-}
-
-SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
-                                            DAGCombinerInfo &DCI) const {
-  SelectionDAG &DAG = DCI.DAG;
-  SDLoc DL(N);
-
-  switch (N->getOpcode()) {
-  default:
-    return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
-  case ISD::SETCC:
-    return performSetCCCombine(N, DCI);
-  case ISD::FMAXNUM: // TODO: What about fmax_legacy?
-  case ISD::FMINNUM:
-  case ISD::SMAX:
-  case ISD::SMIN:
-  case ISD::UMAX:
-  case ISD::UMIN: {
-    if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG &&
-        N->getValueType(0) != MVT::f64 &&
-        getTargetMachine().getOptLevel() > CodeGenOpt::None)
-      return performMin3Max3Combine(N, DCI);
-    break;
-  }
-
-  case AMDGPUISD::CVT_F32_UBYTE0:
-  case AMDGPUISD::CVT_F32_UBYTE1:
-  case AMDGPUISD::CVT_F32_UBYTE2:
-  case AMDGPUISD::CVT_F32_UBYTE3: {
-    unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
-
-    SDValue Src = N->getOperand(0);
-    APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
-
-    APInt KnownZero, KnownOne;
-    TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
-                                          !DCI.isBeforeLegalizeOps());
-    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-    if (TLO.ShrinkDemandedConstant(Src, Demanded) ||
-        TLI.SimplifyDemandedBits(Src, Demanded, KnownZero, KnownOne, TLO)) {
-      DCI.CommitTargetLoweringOpt(TLO);
-    }
-
-    break;
-  }
-
-  case ISD::UINT_TO_FP: {
-    return performUCharToFloatCombine(N, DCI);
-
-  case ISD::FADD: {
-    if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
-      break;
-
-    EVT VT = N->getValueType(0);
-    if (VT != MVT::f32)
-      break;
-
-    // Only do this if we are not trying to support denormals. v_mad_f32 does
-    // not support denormals ever.
-    if (Subtarget->hasFP32Denormals())
-      break;
-
-    SDValue LHS = N->getOperand(0);
-    SDValue RHS = N->getOperand(1);
-
-    // These should really be instruction patterns, but writing patterns with
-    // source modiifiers is a pain.
-
-    // fadd (fadd (a, a), b) -> mad 2.0, a, b
-    if (LHS.getOpcode() == ISD::FADD) {
-      SDValue A = LHS.getOperand(0);
-      if (A == LHS.getOperand(1)) {
-        const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32);
-        return DAG.getNode(ISD::FMAD, DL, VT, Two, A, RHS);
-      }
-    }
-
-    // fadd (b, fadd (a, a)) -> mad 2.0, a, b
-    if (RHS.getOpcode() == ISD::FADD) {
-      SDValue A = RHS.getOperand(0);
-      if (A == RHS.getOperand(1)) {
-        const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32);
-        return DAG.getNode(ISD::FMAD, DL, VT, Two, A, LHS);
-      }
-    }
-
-    return SDValue();
-  }
-  case ISD::FSUB: {
-    if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
-      break;
-
-    EVT VT = N->getValueType(0);
-
-    // Try to get the fneg to fold into the source modifier. This undoes generic
-    // DAG combines and folds them into the mad.
-    //
-    // Only do this if we are not trying to support denormals. v_mad_f32 does
-    // not support denormals ever.
-    if (VT == MVT::f32 &&
-        !Subtarget->hasFP32Denormals()) {
-      SDValue LHS = N->getOperand(0);
-      SDValue RHS = N->getOperand(1);
-      if (LHS.getOpcode() == ISD::FADD) {
-        // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
-
-        SDValue A = LHS.getOperand(0);
-        if (A == LHS.getOperand(1)) {
-          const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32);
-          SDValue NegRHS = DAG.getNode(ISD::FNEG, DL, VT, RHS);
-
-          return DAG.getNode(ISD::FMAD, DL, VT, Two, A, NegRHS);
-        }
-      }
-
-      if (RHS.getOpcode() == ISD::FADD) {
-        // (fsub c, (fadd a, a)) -> mad -2.0, a, c
-
-        SDValue A = RHS.getOperand(0);
-        if (A == RHS.getOperand(1)) {
-          const SDValue NegTwo = DAG.getConstantFP(-2.0, DL, MVT::f32);
-          return DAG.getNode(ISD::FMAD, DL, VT, NegTwo, A, LHS);
-        }
-      }
-
-      return SDValue();
-    }
-
-    break;
-  }
-  }
-  case ISD::LOAD:
-  case ISD::STORE:
-  case ISD::ATOMIC_LOAD:
-  case ISD::ATOMIC_STORE:
-  case ISD::ATOMIC_CMP_SWAP:
-  case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
-  case ISD::ATOMIC_SWAP:
-  case ISD::ATOMIC_LOAD_ADD:
-  case ISD::ATOMIC_LOAD_SUB:
-  case ISD::ATOMIC_LOAD_AND:
-  case ISD::ATOMIC_LOAD_OR:
-  case ISD::ATOMIC_LOAD_XOR:
-  case ISD::ATOMIC_LOAD_NAND:
-  case ISD::ATOMIC_LOAD_MIN:
-  case ISD::ATOMIC_LOAD_MAX:
-  case ISD::ATOMIC_LOAD_UMIN:
-  case ISD::ATOMIC_LOAD_UMAX: { // TODO: Target mem intrinsics.
-    if (DCI.isBeforeLegalize())
-      break;
-
-    MemSDNode *MemNode = cast<MemSDNode>(N);
-    SDValue Ptr = MemNode->getBasePtr();
-
-    // TODO: We could also do this for multiplies.
-    unsigned AS = MemNode->getAddressSpace();
-    if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUAS::PRIVATE_ADDRESS) {
-      SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI);
-      if (NewPtr) {
-        SmallVector<SDValue, 8> NewOps(MemNode->op_begin(), MemNode->op_end());
-
-        NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
-        return SDValue(DAG.UpdateNodeOperands(MemNode, NewOps), 0);
-      }
-    }
-    break;
-  }
-  case ISD::AND:
-    return performAndCombine(N, DCI);
-  case ISD::OR:
-    return performOrCombine(N, DCI);
-  case AMDGPUISD::FP_CLASS:
-    return performClassCombine(N, DCI);
-  }
-  return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
-}
-
-/// \brief Analyze the possible immediate value Op
-///
-/// Returns -1 if it isn't an immediate, 0 if it's and inline immediate
-/// and the immediate value if it's a literal immediate
-int32_t SITargetLowering::analyzeImmediate(const SDNode *N) const {
-
-  const SIInstrInfo *TII =
-      static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
-
-  if (const ConstantSDNode *Node = dyn_cast<ConstantSDNode>(N)) {
-    if (TII->isInlineConstant(Node->getAPIntValue()))
-      return 0;
-
-    uint64_t Val = Node->getZExtValue();
-    return isUInt<32>(Val) ? Val : -1;
-  }
-
-  if (const ConstantFPSDNode *Node = dyn_cast<ConstantFPSDNode>(N)) {
-    if (TII->isInlineConstant(Node->getValueAPF().bitcastToAPInt()))
-      return 0;
-
-    if (Node->getValueType(0) == MVT::f32)
-      return FloatToBits(Node->getValueAPF().convertToFloat());
-
-    return -1;
-  }
-
-  return -1;
-}
-
-/// \brief Helper function for adjustWritemask
-static unsigned SubIdx2Lane(unsigned Idx) {
-  switch (Idx) {
-  default: return 0;
-  case AMDGPU::sub0: return 0;
-  case AMDGPU::sub1: return 1;
-  case AMDGPU::sub2: return 2;
-  case AMDGPU::sub3: return 3;
-  }
-}
-
-/// \brief Adjust the writemask of MIMG instructions
-void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
-                                       SelectionDAG &DAG) const {
-  SDNode *Users[4] = { };
-  unsigned Lane = 0;
-  unsigned OldDmask = Node->getConstantOperandVal(0);
-  unsigned NewDmask = 0;
-
-  // Try to figure out the used register components
-  for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
-       I != E; ++I) {
-
-    // Abort if we can't understand the usage
-    if (!I->isMachineOpcode() ||
-        I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
-      return;
-
-    // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used.
-    // Note that subregs are packed, i.e. Lane==0 is the first bit set
-    // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
-    // set, etc.
-    Lane = SubIdx2Lane(I->getConstantOperandVal(1));
-
-    // Set which texture component corresponds to the lane.
-    unsigned Comp;
-    for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) {
-      assert(Dmask);
-      Comp = countTrailingZeros(Dmask);
-      Dmask &= ~(1 << Comp);
-    }
-
-    // Abort if we have more than one user per component
-    if (Users[Lane])
-      return;
-
-    Users[Lane] = *I;
-    NewDmask |= 1 << Comp;
-  }
-
-  // Abort if there's no change
-  if (NewDmask == OldDmask)
-    return;
-
-  // Adjust the writemask in the node
-  std::vector<SDValue> Ops;
-  Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
-  Ops.insert(Ops.end(), Node->op_begin() + 1, Node->op_end());
-  Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops);
-
-  // If we only got one lane, replace it with a copy
-  // (if NewDmask has only one bit set...)
-  if (NewDmask && (NewDmask & (NewDmask-1)) == 0) {
-    SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(),
-                                       MVT::i32);
-    SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
-                                      SDLoc(), Users[Lane]->getValueType(0),
-                                      SDValue(Node, 0), RC);
-    DAG.ReplaceAllUsesWith(Users[Lane], Copy);
-    return;
-  }
-
-  // Update the users of the node with the new indices
-  for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) {
-
-    SDNode *User = Users[i];
-    if (!User)
-      continue;
-
-    SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
-    DAG.UpdateNodeOperands(User, User->getOperand(0), Op);
-
-    switch (Idx) {
-    default: break;
-    case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
-    case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
-    case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
-    }
-  }
-}
-
-/// \brief Legalize target independent instructions (e.g. INSERT_SUBREG)
-/// with frame index operands.
-/// LLVM assumes that inputs are to these instructions are registers.
-void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
-                                                     SelectionDAG &DAG) const {
-
-  SmallVector<SDValue, 8> Ops;
-  for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
-    if (!isa<FrameIndexSDNode>(Node->getOperand(i))) {
-      Ops.push_back(Node->getOperand(i));
-      continue;
-    }
-
-    SDLoc DL(Node);
-    Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
-                                     Node->getOperand(i).getValueType(),
-                                     Node->getOperand(i)), 0));
-  }
-
-  DAG.UpdateNodeOperands(Node, Ops);
-}
-
-/// \brief Fold the instructions after selecting them.
-SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
-                                          SelectionDAG &DAG) const {
-  const SIInstrInfo *TII =
-      static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
-
-  if (TII->isMIMG(Node->getMachineOpcode()))
-    adjustWritemask(Node, DAG);
-
-  if (Node->getMachineOpcode() == AMDGPU::INSERT_SUBREG ||
-      Node->getMachineOpcode() == AMDGPU::REG_SEQUENCE) {
-    legalizeTargetIndependentNode(Node, DAG);
-    return Node;
-  }
-  return Node;
-}
-
-/// \brief Assign the register class depending on the number of
-/// bits set in the writemask
-void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
-                                                     SDNode *Node) const {
-  const SIInstrInfo *TII =
-      static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
-
-  MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
-  TII->legalizeOperands(MI);
-
-  if (TII->isMIMG(MI->getOpcode())) {
-    unsigned VReg = MI->getOperand(0).getReg();
-    unsigned Writemask = MI->getOperand(1).getImm();
-    unsigned BitsSet = 0;
-    for (unsigned i = 0; i < 4; ++i)
-      BitsSet += Writemask & (1 << i) ? 1 : 0;
-
-    const TargetRegisterClass *RC;
-    switch (BitsSet) {
-    default: return;
-    case 1:  RC = &AMDGPU::VGPR_32RegClass; break;
-    case 2:  RC = &AMDGPU::VReg_64RegClass; break;
-    case 3:  RC = &AMDGPU::VReg_96RegClass; break;
-    }
-
-    unsigned NewOpcode = TII->getMaskedMIMGOp(MI->getOpcode(), BitsSet);
-    MI->setDesc(TII->get(NewOpcode));
-    MRI.setRegClass(VReg, RC);
-    return;
-  }
-
-  // Replace unused atomics with the no return version.
-  int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI->getOpcode());
-  if (NoRetAtomicOp != -1) {
-    if (!Node->hasAnyUseOfValue(0)) {
-      MI->setDesc(TII->get(NoRetAtomicOp));
-      MI->RemoveOperand(0);
-    }
-
-    return;
-  }
-}
-
-static SDValue buildSMovImm32(SelectionDAG &DAG, SDLoc DL, uint64_t Val) {
-  SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
-  return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
-}
-
-MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
-                                                SDLoc DL,
-                                                SDValue Ptr) const {
-  const SIInstrInfo *TII =
-      static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
-#if 1
-    // XXX - Workaround for moveToVALU not handling different register class
-    // inserts for REG_SEQUENCE.
-
-    // Build the half of the subregister with the constants.
-    const SDValue Ops0[] = {
-      DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
-      buildSMovImm32(DAG, DL, 0),
-      DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
-      buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
-      DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
-    };
-
-    SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
-                                                  MVT::v2i32, Ops0), 0);
-
-    // Combine the constants and the pointer.
-    const SDValue Ops1[] = {
-      DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
-      Ptr,
-      DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
-      SubRegHi,
-      DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
-    };
-
-    return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
-#else
-    const SDValue Ops[] = {
-      DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32),
-      Ptr,
-      DAG.getTargetConstant(AMDGPU::sub0_sub1, MVT::i32),
-      buildSMovImm32(DAG, DL, 0),
-      DAG.getTargetConstant(AMDGPU::sub2, MVT::i32),
-      buildSMovImm32(DAG, DL, TII->getDefaultRsrcFormat() >> 32),
-      DAG.getTargetConstant(AMDGPU::sub3, MVT::i32)
-    };
-
-    return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
-
-#endif
-}
-
-/// \brief Return a resource descriptor with the 'Add TID' bit enabled
-///        The TID (Thread ID) is multipled by the stride value (bits [61:48]
-///        of the resource descriptor) to create an offset, which is added to the
-///        resource ponter.
-MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG,
-                                           SDLoc DL,
-                                           SDValue Ptr,
-                                           uint32_t RsrcDword1,
-                                           uint64_t RsrcDword2And3) const {
-  SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
-  SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
-  if (RsrcDword1) {
-    PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
-                                     DAG.getConstant(RsrcDword1, DL, MVT::i32)),
-                    0);
-  }
-
-  SDValue DataLo = buildSMovImm32(DAG, DL,
-                                  RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
-  SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
-
-  const SDValue Ops[] = {
-    DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
-    PtrLo,
-    DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
-    PtrHi,
-    DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
-    DataLo,
-    DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
-    DataHi,
-    DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
-  };
-
-  return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
-}
-
-MachineSDNode *SITargetLowering::buildScratchRSRC(SelectionDAG &DAG,
-                                                  SDLoc DL,
-                                                  SDValue Ptr) const {
-  const SIInstrInfo *TII =
-      static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
-  uint64_t Rsrc = TII->getDefaultRsrcDataFormat() | AMDGPU::RSRC_TID_ENABLE |
-                  0xffffffff; // Size
-
-  return buildRSRC(DAG, DL, Ptr, 0, Rsrc);
-}
-
-SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
-                                               const TargetRegisterClass *RC,
-                                               unsigned Reg, EVT VT) const {
-  SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT);
-
-  return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()),
-                            cast<RegisterSDNode>(VReg)->getReg(), VT);
-}
-
-//===----------------------------------------------------------------------===//
-//                         SI Inline Assembly Support
-//===----------------------------------------------------------------------===//
-
-std::pair<unsigned, const TargetRegisterClass *>
-SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
-                                               const std::string &Constraint,
-                                               MVT VT) const {
-  if (Constraint == "r") {
-    switch(VT.SimpleTy) {
-      default: llvm_unreachable("Unhandled type for 'r' inline asm constraint");
-      case MVT::i64:
-        return std::make_pair(0U, &AMDGPU::SGPR_64RegClass);
-      case MVT::i32:
-        return std::make_pair(0U, &AMDGPU::SGPR_32RegClass);
-    }
-  }
-
-  if (Constraint.size() > 1) {
-    const TargetRegisterClass *RC = nullptr;
-    if (Constraint[1] == 'v') {
-      RC = &AMDGPU::VGPR_32RegClass;
-    } else if (Constraint[1] == 's') {
-      RC = &AMDGPU::SGPR_32RegClass;
-    }
-
-    if (RC) {
-      unsigned Idx = std::atoi(Constraint.substr(2).c_str());
-      if (Idx < RC->getNumRegs())
-        return std::make_pair(RC->getRegister(Idx), RC);
-    }
-  }
-  return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
-}

Removed: llvm/trunk/lib/Target/R600/SIISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIISelLowering.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIISelLowering.h (original)
+++ llvm/trunk/lib/Target/R600/SIISelLowering.h (removed)
@@ -1,125 +0,0 @@
-//===-- SIISelLowering.h - SI DAG Lowering Interface ------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief SI DAG Lowering interface definition
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_SIISELLOWERING_H
-#define LLVM_LIB_TARGET_R600_SIISELLOWERING_H
-
-#include "AMDGPUISelLowering.h"
-#include "SIInstrInfo.h"
-
-namespace llvm {
-
-class SITargetLowering : public AMDGPUTargetLowering {
-  SDValue LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, SDLoc DL,
-                         SDValue Chain, unsigned Offset, bool Signed) const;
-  SDValue LowerSampleIntrinsic(unsigned Opcode, const SDValue &Op,
-                               SelectionDAG &DAG) const;
-  SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
-                             SelectionDAG &DAG) const override;
-
-  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFastFDIV(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFDIV32(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFDIV64(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, bool Signed) const;
-  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
-
-  void adjustWritemask(MachineSDNode *&N, SelectionDAG &DAG) const;
-
-  SDValue performUCharToFloatCombine(SDNode *N,
-                                     DAGCombinerInfo &DCI) const;
-  SDValue performSHLPtrCombine(SDNode *N,
-                               unsigned AS,
-                               DAGCombinerInfo &DCI) const;
-  SDValue performAndCombine(SDNode *N, DAGCombinerInfo &DCI) const;
-  SDValue performOrCombine(SDNode *N, DAGCombinerInfo &DCI) const;
-  SDValue performClassCombine(SDNode *N, DAGCombinerInfo &DCI) const;
-
-  SDValue performMin3Max3Combine(SDNode *N, DAGCombinerInfo &DCI) const;
-  SDValue performSetCCCombine(SDNode *N, DAGCombinerInfo &DCI) const;
-
-public:
-  SITargetLowering(TargetMachine &tm, const AMDGPUSubtarget &STI);
-
-  bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
-                          EVT /*VT*/) const override;
-
-  bool isLegalAddressingMode(const AddrMode &AM,
-                             Type *Ty, unsigned AS) const override;
-
-  bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS,
-                                      unsigned Align,
-                                      bool *IsFast) const override;
-
-  EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
-                          unsigned SrcAlign, bool IsMemset,
-                          bool ZeroMemset,
-                          bool MemcpyStrSrc,
-                          MachineFunction &MF) const override;
-
-  TargetLoweringBase::LegalizeTypeAction
-  getPreferredVectorAction(EVT VT) const override;
-
-  bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
-                                        Type *Ty) const override;
-
-  SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
-                               bool isVarArg,
-                               const SmallVectorImpl<ISD::InputArg> &Ins,
-                               SDLoc DL, SelectionDAG &DAG,
-                               SmallVectorImpl<SDValue> &InVals) const override;
-
-  MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr * MI,
-                                      MachineBasicBlock * BB) const override;
-  bool enableAggressiveFMAFusion(EVT VT) const override;
-  EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
-  MVT getScalarShiftAmountTy(EVT VT) const override;
-  bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
-  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
-  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
-  SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override;
-  void AdjustInstrPostInstrSelection(MachineInstr *MI,
-                                     SDNode *Node) const override;
-
-  int32_t analyzeImmediate(const SDNode *N) const;
-  SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC,
-                               unsigned Reg, EVT VT) const override;
-  void legalizeTargetIndependentNode(SDNode *Node, SelectionDAG &DAG) const;
-
-  MachineSDNode *wrapAddr64Rsrc(SelectionDAG &DAG, SDLoc DL, SDValue Ptr) const;
-  MachineSDNode *buildRSRC(SelectionDAG &DAG,
-                           SDLoc DL,
-                           SDValue Ptr,
-                           uint32_t RsrcDword1,
-                           uint64_t RsrcDword2And3) const;
-  MachineSDNode *buildScratchRSRC(SelectionDAG &DAG,
-                                  SDLoc DL,
-                                  SDValue Ptr) const;
-
-  std::pair<unsigned, const TargetRegisterClass *> getRegForInlineAsmConstraint(
-                                   const TargetRegisterInfo *TRI,
-                                   const std::string &Constraint, MVT VT) const override;
-  SDValue copyToM0(SelectionDAG &DAG, SDValue Chain, SDLoc DL, SDValue V) const;
-};
-
-} // End namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/R600/SIInsertWaits.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIInsertWaits.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIInsertWaits.cpp (original)
+++ llvm/trunk/lib/Target/R600/SIInsertWaits.cpp (removed)
@@ -1,480 +0,0 @@
-//===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Insert wait instructions for memory reads and writes.
-///
-/// Memory reads and writes are issued asynchronously, so we need to insert
-/// S_WAITCNT instructions when we want to access any of their results or
-/// overwrite any register that's used asynchronously.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "AMDGPUSubtarget.h"
-#include "SIDefines.h"
-#include "SIInstrInfo.h"
-#include "SIMachineFunctionInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-
-using namespace llvm;
-
-namespace {
-
-/// \brief One variable for each of the hardware counters
-typedef union {
-  struct {
-    unsigned VM;
-    unsigned EXP;
-    unsigned LGKM;
-  } Named;
-  unsigned Array[3];
-
-} Counters;
-
-typedef enum {
-  OTHER,
-  SMEM,
-  VMEM
-} InstType;
-
-typedef Counters RegCounters[512];
-typedef std::pair<unsigned, unsigned> RegInterval;
-
-class SIInsertWaits : public MachineFunctionPass {
-
-private:
-  static char ID;
-  const SIInstrInfo *TII;
-  const SIRegisterInfo *TRI;
-  const MachineRegisterInfo *MRI;
-
-  /// \brief Constant hardware limits
-  static const Counters WaitCounts;
-
-  /// \brief Constant zero value
-  static const Counters ZeroCounts;
-
-  /// \brief Counter values we have already waited on.
-  Counters WaitedOn;
-
-  /// \brief Counter values for last instruction issued.
-  Counters LastIssued;
-
-  /// \brief Registers used by async instructions.
-  RegCounters UsedRegs;
-
-  /// \brief Registers defined by async instructions.
-  RegCounters DefinedRegs;
-
-  /// \brief Different export instruction types seen since last wait.
-  unsigned ExpInstrTypesSeen;
-
-  /// \brief Type of the last opcode.
-  InstType LastOpcodeType;
-
-  bool LastInstWritesM0;
-
-  /// \brief Get increment/decrement amount for this instruction.
-  Counters getHwCounts(MachineInstr &MI);
-
-  /// \brief Is operand relevant for async execution?
-  bool isOpRelevant(MachineOperand &Op);
-
-  /// \brief Get register interval an operand affects.
-  RegInterval getRegInterval(MachineOperand &Op);
-
-  /// \brief Handle instructions async components
-  void pushInstruction(MachineBasicBlock &MBB,
-                       MachineBasicBlock::iterator I);
-
-  /// \brief Insert the actual wait instruction
-  bool insertWait(MachineBasicBlock &MBB,
-                  MachineBasicBlock::iterator I,
-                  const Counters &Counts);
-
-  /// \brief Do we need def2def checks?
-  bool unorderedDefines(MachineInstr &MI);
-
-  /// \brief Resolve all operand dependencies to counter requirements
-  Counters handleOperands(MachineInstr &MI);
-
-  /// \brief Insert S_NOP between an instruction writing M0 and S_SENDMSG.
-  void handleSendMsg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I);
-
-public:
-  SIInsertWaits(TargetMachine &tm) :
-    MachineFunctionPass(ID),
-    TII(nullptr),
-    TRI(nullptr),
-    ExpInstrTypesSeen(0) { }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  const char *getPassName() const override {
-    return "SI insert wait  instructions";
-  }
-
-};
-
-} // End anonymous namespace
-
-char SIInsertWaits::ID = 0;
-
-const Counters SIInsertWaits::WaitCounts = { { 15, 7, 7 } };
-const Counters SIInsertWaits::ZeroCounts = { { 0, 0, 0 } };
-
-FunctionPass *llvm::createSIInsertWaits(TargetMachine &tm) {
-  return new SIInsertWaits(tm);
-}
-
-Counters SIInsertWaits::getHwCounts(MachineInstr &MI) {
-
-  uint64_t TSFlags = TII->get(MI.getOpcode()).TSFlags;
-  Counters Result;
-
-  Result.Named.VM = !!(TSFlags & SIInstrFlags::VM_CNT);
-
-  // Only consider stores or EXP for EXP_CNT
-  Result.Named.EXP = !!(TSFlags & SIInstrFlags::EXP_CNT &&
-      (MI.getOpcode() == AMDGPU::EXP || MI.getDesc().mayStore()));
-
-  // LGKM may uses larger values
-  if (TSFlags & SIInstrFlags::LGKM_CNT) {
-
-    if (TII->isSMRD(MI.getOpcode())) {
-
-      MachineOperand &Op = MI.getOperand(0);
-      assert(Op.isReg() && "First LGKM operand must be a register!");
-
-      unsigned Reg = Op.getReg();
-      unsigned Size = TRI->getMinimalPhysRegClass(Reg)->getSize();
-      Result.Named.LGKM = Size > 4 ? 2 : 1;
-
-    } else {
-      // DS
-      Result.Named.LGKM = 1;
-    }
-
-  } else {
-    Result.Named.LGKM = 0;
-  }
-
-  return Result;
-}
-
-bool SIInsertWaits::isOpRelevant(MachineOperand &Op) {
-
-  // Constants are always irrelevant
-  if (!Op.isReg())
-    return false;
-
-  // Defines are always relevant
-  if (Op.isDef())
-    return true;
-
-  // For exports all registers are relevant
-  MachineInstr &MI = *Op.getParent();
-  if (MI.getOpcode() == AMDGPU::EXP)
-    return true;
-
-  // For stores the stored value is also relevant
-  if (!MI.getDesc().mayStore())
-    return false;
-
-  // Check if this operand is the value being stored.
-  // Special case for DS instructions, since the address
-  // operand comes before the value operand and it may have
-  // multiple data operands.
-
-  if (TII->isDS(MI.getOpcode())) {
-    MachineOperand *Data = TII->getNamedOperand(MI, AMDGPU::OpName::data);
-    if (Data && Op.isIdenticalTo(*Data))
-      return true;
-
-    MachineOperand *Data0 = TII->getNamedOperand(MI, AMDGPU::OpName::data0);
-    if (Data0 && Op.isIdenticalTo(*Data0))
-      return true;
-
-    MachineOperand *Data1 = TII->getNamedOperand(MI, AMDGPU::OpName::data1);
-    if (Data1 && Op.isIdenticalTo(*Data1))
-      return true;
-
-    return false;
-  }
-
-  // NOTE: This assumes that the value operand is before the
-  // address operand, and that there is only one value operand.
-  for (MachineInstr::mop_iterator I = MI.operands_begin(),
-       E = MI.operands_end(); I != E; ++I) {
-
-    if (I->isReg() && I->isUse())
-      return Op.isIdenticalTo(*I);
-  }
-
-  return false;
-}
-
-RegInterval SIInsertWaits::getRegInterval(MachineOperand &Op) {
-
-  if (!Op.isReg() || !TRI->isInAllocatableClass(Op.getReg()))
-    return std::make_pair(0, 0);
-
-  unsigned Reg = Op.getReg();
-  unsigned Size = TRI->getMinimalPhysRegClass(Reg)->getSize();
-
-  assert(Size >= 4);
-
-  RegInterval Result;
-  Result.first = TRI->getEncodingValue(Reg);
-  Result.second = Result.first + Size / 4;
-
-  return Result;
-}
-
-void SIInsertWaits::pushInstruction(MachineBasicBlock &MBB,
-                                    MachineBasicBlock::iterator I) {
-
-  // Get the hardware counter increments and sum them up
-  Counters Increment = getHwCounts(*I);
-  unsigned Sum = 0;
-
-  for (unsigned i = 0; i < 3; ++i) {
-    LastIssued.Array[i] += Increment.Array[i];
-    Sum += Increment.Array[i];
-  }
-
-  // If we don't increase anything then that's it
-  if (Sum == 0) {
-    LastOpcodeType = OTHER;
-    return;
-  }
-
-  if (MBB.getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() >=
-      AMDGPUSubtarget::VOLCANIC_ISLANDS) {
-    // Any occurence of consecutive VMEM or SMEM instructions forms a VMEM
-    // or SMEM clause, respectively.
-    //
-    // The temporary workaround is to break the clauses with S_NOP.
-    //
-    // The proper solution would be to allocate registers such that all source
-    // and destination registers don't overlap, e.g. this is illegal:
-    //   r0 = load r2
-    //   r2 = load r0
-    if ((LastOpcodeType == SMEM && TII->isSMRD(I->getOpcode())) ||
-        (LastOpcodeType == VMEM && Increment.Named.VM)) {
-      // Insert a NOP to break the clause.
-      BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP))
-          .addImm(0);
-      LastInstWritesM0 = false;
-    }
-
-    if (TII->isSMRD(I->getOpcode()))
-      LastOpcodeType = SMEM;
-    else if (Increment.Named.VM)
-      LastOpcodeType = VMEM;
-  }
-
-  // Remember which export instructions we have seen
-  if (Increment.Named.EXP) {
-    ExpInstrTypesSeen |= I->getOpcode() == AMDGPU::EXP ? 1 : 2;
-  }
-
-  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
-
-    MachineOperand &Op = I->getOperand(i);
-    if (!isOpRelevant(Op))
-      continue;
-
-    RegInterval Interval = getRegInterval(Op);
-    for (unsigned j = Interval.first; j < Interval.second; ++j) {
-
-      // Remember which registers we define
-      if (Op.isDef())
-        DefinedRegs[j] = LastIssued;
-
-      // and which one we are using
-      if (Op.isUse())
-        UsedRegs[j] = LastIssued;
-    }
-  }
-}
-
-bool SIInsertWaits::insertWait(MachineBasicBlock &MBB,
-                               MachineBasicBlock::iterator I,
-                               const Counters &Required) {
-
-  // End of program? No need to wait on anything
-  if (I != MBB.end() && I->getOpcode() == AMDGPU::S_ENDPGM)
-    return false;
-
-  // Figure out if the async instructions execute in order
-  bool Ordered[3];
-
-  // VM_CNT is always ordered
-  Ordered[0] = true;
-
-  // EXP_CNT is unordered if we have both EXP & VM-writes
-  Ordered[1] = ExpInstrTypesSeen == 3;
-
-  // LGKM_CNT is handled as always unordered. TODO: Handle LDS and GDS
-  Ordered[2] = false;
-
-  // The values we are going to put into the S_WAITCNT instruction
-  Counters Counts = WaitCounts;
-
-  // Do we really need to wait?
-  bool NeedWait = false;
-
-  for (unsigned i = 0; i < 3; ++i) {
-
-    if (Required.Array[i] <= WaitedOn.Array[i])
-      continue;
-
-    NeedWait = true;
-
-    if (Ordered[i]) {
-      unsigned Value = LastIssued.Array[i] - Required.Array[i];
-
-      // Adjust the value to the real hardware possibilities.
-      Counts.Array[i] = std::min(Value, WaitCounts.Array[i]);
-
-    } else
-      Counts.Array[i] = 0;
-
-    // Remember on what we have waited on.
-    WaitedOn.Array[i] = LastIssued.Array[i] - Counts.Array[i];
-  }
-
-  if (!NeedWait)
-    return false;
-
-  // Reset EXP_CNT instruction types
-  if (Counts.Named.EXP == 0)
-    ExpInstrTypesSeen = 0;
-
-  // Build the wait instruction
-  BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT))
-          .addImm((Counts.Named.VM & 0xF) |
-                  ((Counts.Named.EXP & 0x7) << 4) |
-                  ((Counts.Named.LGKM & 0x7) << 8));
-
-  LastOpcodeType = OTHER;
-  LastInstWritesM0 = false;
-  return true;
-}
-
-/// \brief helper function for handleOperands
-static void increaseCounters(Counters &Dst, const Counters &Src) {
-
-  for (unsigned i = 0; i < 3; ++i)
-    Dst.Array[i] = std::max(Dst.Array[i], Src.Array[i]);
-}
-
-Counters SIInsertWaits::handleOperands(MachineInstr &MI) {
-
-  Counters Result = ZeroCounts;
-
-  // S_SENDMSG implicitly waits for all outstanding LGKM transfers to finish,
-  // but we also want to wait for any other outstanding transfers before
-  // signalling other hardware blocks
-  if (MI.getOpcode() == AMDGPU::S_SENDMSG)
-    return LastIssued;
-
-  // For each register affected by this
-  // instruction increase the result sequence
-  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
-
-    MachineOperand &Op = MI.getOperand(i);
-    RegInterval Interval = getRegInterval(Op);
-    for (unsigned j = Interval.first; j < Interval.second; ++j) {
-
-      if (Op.isDef()) {
-        increaseCounters(Result, UsedRegs[j]);
-        increaseCounters(Result, DefinedRegs[j]);
-      }
-
-      if (Op.isUse())
-        increaseCounters(Result, DefinedRegs[j]);
-    }
-  }
-
-  return Result;
-}
-
-void SIInsertWaits::handleSendMsg(MachineBasicBlock &MBB,
-                                  MachineBasicBlock::iterator I) {
-  if (MBB.getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() <
-      AMDGPUSubtarget::VOLCANIC_ISLANDS)
-    return;
-
-  // There must be "S_NOP 0" between an instruction writing M0 and S_SENDMSG.
-  if (LastInstWritesM0 && I->getOpcode() == AMDGPU::S_SENDMSG) {
-    BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP)).addImm(0);
-    LastInstWritesM0 = false;
-    return;
-  }
-
-  // Set whether this instruction sets M0
-  LastInstWritesM0 = false;
-
-  unsigned NumOperands = I->getNumOperands();
-  for (unsigned i = 0; i < NumOperands; i++) {
-    const MachineOperand &Op = I->getOperand(i);
-
-    if (Op.isReg() && Op.isDef() && Op.getReg() == AMDGPU::M0)
-      LastInstWritesM0 = true;
-  }
-}
-
-// FIXME: Insert waits listed in Table 4.2 "Required User-Inserted Wait States"
-// around other non-memory instructions.
-bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
-  bool Changes = false;
-
-  TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
-  TRI =
-      static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
-
-  MRI = &MF.getRegInfo();
-
-  WaitedOn = ZeroCounts;
-  LastIssued = ZeroCounts;
-  LastOpcodeType = OTHER;
-  LastInstWritesM0 = false;
-
-  memset(&UsedRegs, 0, sizeof(UsedRegs));
-  memset(&DefinedRegs, 0, sizeof(DefinedRegs));
-
-  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
-       BI != BE; ++BI) {
-
-    MachineBasicBlock &MBB = *BI;
-    for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
-         I != E; ++I) {
-
-      // Wait for everything before a barrier.
-      if (I->getOpcode() == AMDGPU::S_BARRIER)
-        Changes |= insertWait(MBB, I, LastIssued);
-      else
-        Changes |= insertWait(MBB, I, handleOperands(*I));
-
-      pushInstruction(MBB, I);
-      handleSendMsg(MBB, I);
-    }
-
-    // Wait for everything at the end of the MBB
-    Changes |= insertWait(MBB, MBB.getFirstTerminator(), LastIssued);
-  }
-
-  return Changes;
-}

Removed: llvm/trunk/lib/Target/R600/SIInstrFormats.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIInstrFormats.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIInstrFormats.td (original)
+++ llvm/trunk/lib/Target/R600/SIInstrFormats.td (removed)
@@ -1,673 +0,0 @@
-//===-- SIInstrFormats.td - SI Instruction Encodings ----------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// SI Instruction format definitions.
-//
-//===----------------------------------------------------------------------===//
-
-class InstSI <dag outs, dag ins, string asm, list<dag> pattern> :
-    AMDGPUInst<outs, ins, asm, pattern>, PredicateControl {
-
-  field bits<1> VM_CNT = 0;
-  field bits<1> EXP_CNT = 0;
-  field bits<1> LGKM_CNT = 0;
-
-  field bits<1> SALU = 0;
-  field bits<1> VALU = 0;
-
-  field bits<1> SOP1 = 0;
-  field bits<1> SOP2 = 0;
-  field bits<1> SOPC = 0;
-  field bits<1> SOPK = 0;
-  field bits<1> SOPP = 0;
-
-  field bits<1> VOP1 = 0;
-  field bits<1> VOP2 = 0;
-  field bits<1> VOP3 = 0;
-  field bits<1> VOPC = 0;
-
-  field bits<1> MUBUF = 0;
-  field bits<1> MTBUF = 0;
-  field bits<1> SMRD = 0;
-  field bits<1> DS = 0;
-  field bits<1> MIMG = 0;
-  field bits<1> FLAT = 0;
-  field bits<1> WQM = 0;
-  field bits<1> VGPRSpill = 0;
-
-  // These need to be kept in sync with the enum in SIInstrFlags.
-  let TSFlags{0} = VM_CNT;
-  let TSFlags{1} = EXP_CNT;
-  let TSFlags{2} = LGKM_CNT;
-
-  let TSFlags{3} = SALU;
-  let TSFlags{4} = VALU;
-
-  let TSFlags{5} = SOP1;
-  let TSFlags{6} = SOP2;
-  let TSFlags{7} = SOPC;
-  let TSFlags{8} = SOPK;
-  let TSFlags{9} = SOPP;
-
-  let TSFlags{10} = VOP1;
-  let TSFlags{11} = VOP2;
-  let TSFlags{12} = VOP3;
-  let TSFlags{13} = VOPC;
-
-  let TSFlags{14} = MUBUF;
-  let TSFlags{15} = MTBUF;
-  let TSFlags{16} = SMRD;
-  let TSFlags{17} = DS;
-  let TSFlags{18} = MIMG;
-  let TSFlags{19} = FLAT;
-  let TSFlags{20} = WQM;
-  let TSFlags{21} = VGPRSpill;
-
-  // Most instructions require adjustments after selection to satisfy
-  // operand requirements.
-  let hasPostISelHook = 1;
-  let SchedRW = [Write32Bit];
-}
-
-class Enc32 {
-  field bits<32> Inst;
-  int Size = 4;
-}
-
-class Enc64 {
-  field bits<64> Inst;
-  int Size = 8;
-}
-
-class VOPDstOperand <RegisterClass rc> : RegisterOperand <rc, "printVOPDst">;
-def VOPDstVCC : VOPDstOperand <VCCReg>;
-
-let Uses = [EXEC] in {
-
-class VOPAnyCommon <dag outs, dag ins, string asm, list<dag> pattern> :
-    InstSI <outs, ins, asm, pattern> {
-
-  let mayLoad = 0;
-  let mayStore = 0;
-  let hasSideEffects = 0;
-  let UseNamedOperandTable = 1;
-  let VALU = 1;
-}
-
-class VOPCCommon <dag ins, string asm, list<dag> pattern> :
-    VOPAnyCommon <(outs VOPDstVCC:$dst), ins, asm, pattern> {
-
-  let DisableEncoding = "$dst";
-  let VOPC = 1;
-  let Size = 4;
-}
-
-class VOP1Common <dag outs, dag ins, string asm, list<dag> pattern> :
-    VOPAnyCommon <outs, ins, asm, pattern> {
-
-  let VOP1 = 1;
-  let Size = 4;
-}
-
-class VOP2Common <dag outs, dag ins, string asm, list<dag> pattern> :
-    VOPAnyCommon <outs, ins, asm, pattern> {
-
-  let VOP2 = 1;
-  let Size = 4;
-}
-
-class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern> :
-    VOPAnyCommon <outs, ins, asm, pattern> {
-
-  // Using complex patterns gives VOP3 patterns a very high complexity rating,
-  // but standalone patterns are almost always prefered, so we need to adjust the
-  // priority lower.  The goal is to use a high number to reduce complexity to
-  // zero (or less than zero).
-  let AddedComplexity = -1000;
-
-  let VOP3 = 1;
-  let VALU = 1;
-
-  let AsmMatchConverter = "cvtVOP3";
-  let isCodeGenOnly = 0;
-
-  int Size = 8;
-}
-
-} // End Uses = [EXEC]
-
-//===----------------------------------------------------------------------===//
-// Scalar operations
-//===----------------------------------------------------------------------===//
-
-class SOP1e <bits<8> op> : Enc32 {
-  bits<7> sdst;
-  bits<8> ssrc0;
-
-  let Inst{7-0} = ssrc0;
-  let Inst{15-8} = op;
-  let Inst{22-16} = sdst;
-  let Inst{31-23} = 0x17d; //encoding;
-}
-
-class SOP2e <bits<7> op> : Enc32 {
-  bits<7> sdst;
-  bits<8> ssrc0;
-  bits<8> ssrc1;
-
-  let Inst{7-0} = ssrc0;
-  let Inst{15-8} = ssrc1;
-  let Inst{22-16} = sdst;
-  let Inst{29-23} = op;
-  let Inst{31-30} = 0x2; // encoding
-}
-
-class SOPCe <bits<7> op> : Enc32 {
-  bits<8> ssrc0;
-  bits<8> ssrc1;
-
-  let Inst{7-0} = ssrc0;
-  let Inst{15-8} = ssrc1;
-  let Inst{22-16} = op;
-  let Inst{31-23} = 0x17e;
-}
-
-class SOPKe <bits<5> op> : Enc32 {
-  bits <7> sdst;
-  bits <16> simm16;
-
-  let Inst{15-0} = simm16;
-  let Inst{22-16} = sdst;
-  let Inst{27-23} = op;
-  let Inst{31-28} = 0xb; //encoding
-}
-
-class SOPK64e <bits<5> op> : Enc64 {
-  bits <7> sdst = 0;
-  bits <16> simm16;
-  bits <32> imm;
-
-  let Inst{15-0} = simm16;
-  let Inst{22-16} = sdst;
-  let Inst{27-23} = op;
-  let Inst{31-28} = 0xb;
-
-  let Inst{63-32} = imm;
-}
-
-class SOPPe <bits<7> op> : Enc32 {
-  bits <16> simm16;
-
-  let Inst{15-0} = simm16;
-  let Inst{22-16} = op;
-  let Inst{31-23} = 0x17f; // encoding
-}
-
-class SMRDe <bits<5> op, bits<1> imm> : Enc32 {
-  bits<7> sdst;
-  bits<7> sbase;
-  bits<8> offset;
-
-  let Inst{7-0} = offset;
-  let Inst{8} = imm;
-  let Inst{14-9} = sbase{6-1};
-  let Inst{21-15} = sdst;
-  let Inst{26-22} = op;
-  let Inst{31-27} = 0x18; //encoding
-}
-
-let SchedRW = [WriteSALU] in {
-class SOP1 <dag outs, dag ins, string asm, list<dag> pattern> :
-    InstSI<outs, ins, asm, pattern> {
-  let mayLoad = 0;
-  let mayStore = 0;
-  let hasSideEffects = 0;
-  let isCodeGenOnly = 0;
-  let SALU = 1;
-  let SOP1 = 1;
-}
-
-class SOP2 <dag outs, dag ins, string asm, list<dag> pattern> :
-    InstSI <outs, ins, asm, pattern> {
-
-  let mayLoad = 0;
-  let mayStore = 0;
-  let hasSideEffects = 0;
-  let isCodeGenOnly = 0;
-  let SALU = 1;
-  let SOP2 = 1;
-
-  let UseNamedOperandTable = 1;
-}
-
-class SOPC <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
-  InstSI<outs, ins, asm, pattern>, SOPCe <op> {
-
-  let DisableEncoding = "$dst";
-  let mayLoad = 0;
-  let mayStore = 0;
-  let hasSideEffects = 0;
-  let SALU = 1;
-  let SOPC = 1;
-  let isCodeGenOnly = 0;
-
-  let UseNamedOperandTable = 1;
-}
-
-class SOPK <dag outs, dag ins, string asm, list<dag> pattern> :
-   InstSI <outs, ins , asm, pattern> {
-
-  let mayLoad = 0;
-  let mayStore = 0;
-  let hasSideEffects = 0;
-  let SALU = 1;
-  let SOPK = 1;
-
-  let UseNamedOperandTable = 1;
-}
-
-class SOPP <bits<7> op, dag ins, string asm, list<dag> pattern = []> :
-		InstSI <(outs), ins, asm, pattern >, SOPPe <op> {
-
-  let mayLoad = 0;
-  let mayStore = 0;
-  let hasSideEffects = 0;
-  let SALU = 1;
-  let SOPP = 1;
-
-  let UseNamedOperandTable = 1;
-}
-
-} // let SchedRW = [WriteSALU]
-
-class SMRD <dag outs, dag ins, string asm, list<dag> pattern> :
-    InstSI<outs, ins, asm, pattern> {
-
-  let LGKM_CNT = 1;
-  let SMRD = 1;
-  let mayStore = 0;
-  let mayLoad = 1;
-  let hasSideEffects = 0;
-  let UseNamedOperandTable = 1;
-  let SchedRW = [WriteSMEM];
-}
-
-//===----------------------------------------------------------------------===//
-// Vector ALU operations
-//===----------------------------------------------------------------------===//
-
-class VOP1e <bits<8> op> : Enc32 {
-  bits<8> vdst;
-  bits<9> src0;
-
-  let Inst{8-0} = src0;
-  let Inst{16-9} = op;
-  let Inst{24-17} = vdst;
-  let Inst{31-25} = 0x3f; //encoding
-}
-
-class VOP2e <bits<6> op> : Enc32 {
-  bits<8> vdst;
-  bits<9> src0;
-  bits<8> src1;
-
-  let Inst{8-0} = src0;
-  let Inst{16-9} = src1;
-  let Inst{24-17} = vdst;
-  let Inst{30-25} = op;
-  let Inst{31} = 0x0; //encoding
-}
-
-class VOP2_MADKe <bits<6> op> : Enc64 {
-
-  bits<8>  vdst;
-  bits<9>  src0;
-  bits<8>  vsrc1;
-  bits<32> src2;
-
-  let Inst{8-0} = src0;
-  let Inst{16-9} = vsrc1;
-  let Inst{24-17} = vdst;
-  let Inst{30-25} = op;
-  let Inst{31} = 0x0; // encoding
-  let Inst{63-32} = src2;
-}
-
-class VOP3e <bits<9> op> : Enc64 {
-  bits<8> vdst;
-  bits<2> src0_modifiers;
-  bits<9> src0;
-  bits<2> src1_modifiers;
-  bits<9> src1;
-  bits<2> src2_modifiers;
-  bits<9> src2;
-  bits<1> clamp;
-  bits<2> omod;
-
-  let Inst{7-0} = vdst;
-  let Inst{8} = src0_modifiers{1};
-  let Inst{9} = src1_modifiers{1};
-  let Inst{10} = src2_modifiers{1};
-  let Inst{11} = clamp;
-  let Inst{25-17} = op;
-  let Inst{31-26} = 0x34; //encoding
-  let Inst{40-32} = src0;
-  let Inst{49-41} = src1;
-  let Inst{58-50} = src2;
-  let Inst{60-59} = omod;
-  let Inst{61} = src0_modifiers{0};
-  let Inst{62} = src1_modifiers{0};
-  let Inst{63} = src2_modifiers{0};
-}
-
-class VOP3be <bits<9> op> : Enc64 {
-  bits<8> vdst;
-  bits<2> src0_modifiers;
-  bits<9> src0;
-  bits<2> src1_modifiers;
-  bits<9> src1;
-  bits<2> src2_modifiers;
-  bits<9> src2;
-  bits<7> sdst;
-  bits<2> omod;
-
-  let Inst{7-0} = vdst;
-  let Inst{14-8} = sdst;
-  let Inst{25-17} = op;
-  let Inst{31-26} = 0x34; //encoding
-  let Inst{40-32} = src0;
-  let Inst{49-41} = src1;
-  let Inst{58-50} = src2;
-  let Inst{60-59} = omod;
-  let Inst{61} = src0_modifiers{0};
-  let Inst{62} = src1_modifiers{0};
-  let Inst{63} = src2_modifiers{0};
-}
-
-class VOPCe <bits<8> op> : Enc32 {
-  bits<9> src0;
-  bits<8> vsrc1;
-
-  let Inst{8-0} = src0;
-  let Inst{16-9} = vsrc1;
-  let Inst{24-17} = op;
-  let Inst{31-25} = 0x3e;
-}
-
-class VINTRPe <bits<2> op> : Enc32 {
-  bits<8> vdst;
-  bits<8> vsrc;
-  bits<2> attrchan;
-  bits<6> attr;
-
-  let Inst{7-0} = vsrc;
-  let Inst{9-8} = attrchan;
-  let Inst{15-10} = attr;
-  let Inst{17-16} = op;
-  let Inst{25-18} = vdst;
-  let Inst{31-26} = 0x32; // encoding
-}
-
-class DSe <bits<8> op> : Enc64 {
-  bits<8> vdst;
-  bits<1> gds;
-  bits<8> addr;
-  bits<8> data0;
-  bits<8> data1;
-  bits<8> offset0;
-  bits<8> offset1;
-
-  let Inst{7-0} = offset0;
-  let Inst{15-8} = offset1;
-  let Inst{17} = gds;
-  let Inst{25-18} = op;
-  let Inst{31-26} = 0x36; //encoding
-  let Inst{39-32} = addr;
-  let Inst{47-40} = data0;
-  let Inst{55-48} = data1;
-  let Inst{63-56} = vdst;
-}
-
-class MUBUFe <bits<7> op> : Enc64 {
-  bits<12> offset;
-  bits<1> offen;
-  bits<1> idxen;
-  bits<1> glc;
-  bits<1> addr64;
-  bits<1> lds;
-  bits<8> vaddr;
-  bits<8> vdata;
-  bits<7> srsrc;
-  bits<1> slc;
-  bits<1> tfe;
-  bits<8> soffset;
-
-  let Inst{11-0} = offset;
-  let Inst{12} = offen;
-  let Inst{13} = idxen;
-  let Inst{14} = glc;
-  let Inst{15} = addr64;
-  let Inst{16} = lds;
-  let Inst{24-18} = op;
-  let Inst{31-26} = 0x38; //encoding
-  let Inst{39-32} = vaddr;
-  let Inst{47-40} = vdata;
-  let Inst{52-48} = srsrc{6-2};
-  let Inst{54} = slc;
-  let Inst{55} = tfe;
-  let Inst{63-56} = soffset;
-}
-
-class MTBUFe <bits<3> op> : Enc64 {
-  bits<8> vdata;
-  bits<12> offset;
-  bits<1> offen;
-  bits<1> idxen;
-  bits<1> glc;
-  bits<1> addr64;
-  bits<4> dfmt;
-  bits<3> nfmt;
-  bits<8> vaddr;
-  bits<7> srsrc;
-  bits<1> slc;
-  bits<1> tfe;
-  bits<8> soffset;
-
-  let Inst{11-0} = offset;
-  let Inst{12} = offen;
-  let Inst{13} = idxen;
-  let Inst{14} = glc;
-  let Inst{15} = addr64;
-  let Inst{18-16} = op;
-  let Inst{22-19} = dfmt;
-  let Inst{25-23} = nfmt;
-  let Inst{31-26} = 0x3a; //encoding
-  let Inst{39-32} = vaddr;
-  let Inst{47-40} = vdata;
-  let Inst{52-48} = srsrc{6-2};
-  let Inst{54} = slc;
-  let Inst{55} = tfe;
-  let Inst{63-56} = soffset;
-}
-
-class MIMGe <bits<7> op> : Enc64 {
-  bits<8> vdata;
-  bits<4> dmask;
-  bits<1> unorm;
-  bits<1> glc;
-  bits<1> da;
-  bits<1> r128;
-  bits<1> tfe;
-  bits<1> lwe;
-  bits<1> slc;
-  bits<8> vaddr;
-  bits<7> srsrc;
-  bits<7> ssamp;
-
-  let Inst{11-8} = dmask;
-  let Inst{12} = unorm;
-  let Inst{13} = glc;
-  let Inst{14} = da;
-  let Inst{15} = r128;
-  let Inst{16} = tfe;
-  let Inst{17} = lwe;
-  let Inst{24-18} = op;
-  let Inst{25} = slc;
-  let Inst{31-26} = 0x3c;
-  let Inst{39-32} = vaddr;
-  let Inst{47-40} = vdata;
-  let Inst{52-48} = srsrc{6-2};
-  let Inst{57-53} = ssamp{6-2};
-}
-
-class FLATe<bits<7> op> : Enc64 {
-  bits<8> addr;
-  bits<8> data;
-  bits<8> vdst;
-  bits<1> slc;
-  bits<1> glc;
-  bits<1> tfe;
-
-  // 15-0 is reserved.
-  let Inst{16} = glc;
-  let Inst{17} = slc;
-  let Inst{24-18} = op;
-  let Inst{31-26} = 0x37; // Encoding.
-  let Inst{39-32} = addr;
-  let Inst{47-40} = data;
-  // 54-48 is reserved.
-  let Inst{55} = tfe;
-  let Inst{63-56} = vdst;
-}
-
-class EXPe : Enc64 {
-  bits<4> en;
-  bits<6> tgt;
-  bits<1> compr;
-  bits<1> done;
-  bits<1> vm;
-  bits<8> vsrc0;
-  bits<8> vsrc1;
-  bits<8> vsrc2;
-  bits<8> vsrc3;
-
-  let Inst{3-0} = en;
-  let Inst{9-4} = tgt;
-  let Inst{10} = compr;
-  let Inst{11} = done;
-  let Inst{12} = vm;
-  let Inst{31-26} = 0x3e;
-  let Inst{39-32} = vsrc0;
-  let Inst{47-40} = vsrc1;
-  let Inst{55-48} = vsrc2;
-  let Inst{63-56} = vsrc3;
-}
-
-let Uses = [EXEC] in {
-
-class VOP1 <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
-    VOP1Common <outs, ins, asm, pattern>,
-    VOP1e<op> {
-  let isCodeGenOnly = 0;
-}
-
-class VOP2 <bits<6> op, dag outs, dag ins, string asm, list<dag> pattern> :
-    VOP2Common <outs, ins, asm, pattern>, VOP2e<op> {
-  let isCodeGenOnly = 0;
-}
-
-class VOPC <bits<8> op, dag ins, string asm, list<dag> pattern> :
-    VOPCCommon <ins, asm, pattern>, VOPCe <op>;
-
-class VINTRPCommon <dag outs, dag ins, string asm, list<dag> pattern> :
-    InstSI <outs, ins, asm, pattern> {
-  let mayLoad = 1;
-  let mayStore = 0;
-  let hasSideEffects = 0;
-}
-
-} // End Uses = [EXEC]
-
-//===----------------------------------------------------------------------===//
-// Vector I/O operations
-//===----------------------------------------------------------------------===//
-
-let Uses = [EXEC] in {
-
-class DS <dag outs, dag ins, string asm, list<dag> pattern> :
-    InstSI <outs, ins, asm, pattern> {
-
-  let LGKM_CNT = 1;
-  let DS = 1;
-  let UseNamedOperandTable = 1;
-  let Uses = [M0];
-
-  // Most instruction load and store data, so set this as the default.
-  let mayLoad = 1;
-  let mayStore = 1;
-
-  let hasSideEffects = 0;
-  let AsmMatchConverter = "cvtDS";
-  let SchedRW = [WriteLDS];
-}
-
-class MUBUF <dag outs, dag ins, string asm, list<dag> pattern> :
-    InstSI<outs, ins, asm, pattern> {
-
-  let VM_CNT = 1;
-  let EXP_CNT = 1;
-  let MUBUF = 1;
-
-  let hasSideEffects = 0;
-  let UseNamedOperandTable = 1;
-  let AsmMatchConverter = "cvtMubuf";
-  let SchedRW = [WriteVMEM];
-}
-
-class MTBUF <dag outs, dag ins, string asm, list<dag> pattern> :
-    InstSI<outs, ins, asm, pattern> {
-
-  let VM_CNT = 1;
-  let EXP_CNT = 1;
-  let MTBUF = 1;
-
-  let hasSideEffects = 0;
-  let UseNamedOperandTable = 1;
-  let SchedRW = [WriteVMEM];
-}
-
-class FLAT <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
-    InstSI<outs, ins, asm, pattern>, FLATe <op> {
-  let FLAT = 1;
-  // Internally, FLAT instruction are executed as both an LDS and a
-  // Buffer instruction; so, they increment both VM_CNT and LGKM_CNT
-  // and are not considered done until both have been decremented.
-  let VM_CNT = 1;
-  let LGKM_CNT = 1;
-
-  let Uses = [EXEC, FLAT_SCR]; // M0
-
-  let UseNamedOperandTable = 1;
-  let hasSideEffects = 0;
-  let AsmMatchConverter = "cvtFlat";
-  let SchedRW = [WriteVMEM];
-}
-
-class MIMG <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
-    InstSI <outs, ins, asm, pattern>, MIMGe <op> {
-
-  let VM_CNT = 1;
-  let EXP_CNT = 1;
-  let MIMG = 1;
-
-  let hasSideEffects = 0; // XXX ????
-}
-
-
-} // End Uses = [EXEC]

Removed: llvm/trunk/lib/Target/R600/SIInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIInstrInfo.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/R600/SIInstrInfo.cpp (removed)
@@ -1,2723 +0,0 @@
-//===-- SIInstrInfo.cpp - SI Instruction Information  ---------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief SI Implementation of TargetInstrInfo.
-//
-//===----------------------------------------------------------------------===//
-
-
-#include "SIInstrInfo.h"
-#include "AMDGPUTargetMachine.h"
-#include "SIDefines.h"
-#include "SIMachineFunctionInfo.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/IR/Function.h"
-#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/MC/MCInstrDesc.h"
-#include "llvm/Support/Debug.h"
-
-using namespace llvm;
-
-SIInstrInfo::SIInstrInfo(const AMDGPUSubtarget &st)
-    : AMDGPUInstrInfo(st), RI() {}
-
-//===----------------------------------------------------------------------===//
-// TargetInstrInfo callbacks
-//===----------------------------------------------------------------------===//
-
-static unsigned getNumOperandsNoGlue(SDNode *Node) {
-  unsigned N = Node->getNumOperands();
-  while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
-    --N;
-  return N;
-}
-
-static SDValue findChainOperand(SDNode *Load) {
-  SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1);
-  assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node");
-  return LastOp;
-}
-
-/// \brief Returns true if both nodes have the same value for the given
-///        operand \p Op, or if both nodes do not have this operand.
-static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
-  unsigned Opc0 = N0->getMachineOpcode();
-  unsigned Opc1 = N1->getMachineOpcode();
-
-  int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName);
-  int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName);
-
-  if (Op0Idx == -1 && Op1Idx == -1)
-    return true;
-
-
-  if ((Op0Idx == -1 && Op1Idx != -1) ||
-      (Op1Idx == -1 && Op0Idx != -1))
-    return false;
-
-  // getNamedOperandIdx returns the index for the MachineInstr's operands,
-  // which includes the result as the first operand. We are indexing into the
-  // MachineSDNode's operands, so we need to skip the result operand to get
-  // the real index.
-  --Op0Idx;
-  --Op1Idx;
-
-  return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
-}
-
-bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
-                                                    AliasAnalysis *AA) const {
-  // TODO: The generic check fails for VALU instructions that should be
-  // rematerializable due to implicit reads of exec. We really want all of the
-  // generic logic for this except for this.
-  switch (MI->getOpcode()) {
-  case AMDGPU::V_MOV_B32_e32:
-  case AMDGPU::V_MOV_B32_e64:
-    return true;
-  default:
-    return false;
-  }
-}
-
-bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
-                                          int64_t &Offset0,
-                                          int64_t &Offset1) const {
-  if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode())
-    return false;
-
-  unsigned Opc0 = Load0->getMachineOpcode();
-  unsigned Opc1 = Load1->getMachineOpcode();
-
-  // Make sure both are actually loads.
-  if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad())
-    return false;
-
-  if (isDS(Opc0) && isDS(Opc1)) {
-
-    // FIXME: Handle this case:
-    if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1))
-      return false;
-
-    // Check base reg.
-    if (Load0->getOperand(1) != Load1->getOperand(1))
-      return false;
-
-    // Check chain.
-    if (findChainOperand(Load0) != findChainOperand(Load1))
-      return false;
-
-    // Skip read2 / write2 variants for simplicity.
-    // TODO: We should report true if the used offsets are adjacent (excluded
-    // st64 versions).
-    if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 ||
-        AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1)
-      return false;
-
-    Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue();
-    Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue();
-    return true;
-  }
-
-  if (isSMRD(Opc0) && isSMRD(Opc1)) {
-    assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1));
-
-    // Check base reg.
-    if (Load0->getOperand(0) != Load1->getOperand(0))
-      return false;
-
-    const ConstantSDNode *Load0Offset =
-        dyn_cast<ConstantSDNode>(Load0->getOperand(1));
-    const ConstantSDNode *Load1Offset =
-        dyn_cast<ConstantSDNode>(Load1->getOperand(1));
-
-    if (!Load0Offset || !Load1Offset)
-      return false;
-
-    // Check chain.
-    if (findChainOperand(Load0) != findChainOperand(Load1))
-      return false;
-
-    Offset0 = Load0Offset->getZExtValue();
-    Offset1 = Load1Offset->getZExtValue();
-    return true;
-  }
-
-  // MUBUF and MTBUF can access the same addresses.
-  if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) {
-
-    // MUBUF and MTBUF have vaddr at different indices.
-    if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) ||
-        findChainOperand(Load0) != findChainOperand(Load1) ||
-        !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) ||
-        !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc))
-      return false;
-
-    int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
-    int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
-
-    if (OffIdx0 == -1 || OffIdx1 == -1)
-      return false;
-
-    // getNamedOperandIdx returns the index for MachineInstrs.  Since they
-    // inlcude the output in the operand list, but SDNodes don't, we need to
-    // subtract the index by one.
-    --OffIdx0;
-    --OffIdx1;
-
-    SDValue Off0 = Load0->getOperand(OffIdx0);
-    SDValue Off1 = Load1->getOperand(OffIdx1);
-
-    // The offset might be a FrameIndexSDNode.
-    if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
-      return false;
-
-    Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
-    Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
-    return true;
-  }
-
-  return false;
-}
-
-static bool isStride64(unsigned Opc) {
-  switch (Opc) {
-  case AMDGPU::DS_READ2ST64_B32:
-  case AMDGPU::DS_READ2ST64_B64:
-  case AMDGPU::DS_WRITE2ST64_B32:
-  case AMDGPU::DS_WRITE2ST64_B64:
-    return true;
-  default:
-    return false;
-  }
-}
-
-bool SIInstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt,
-                                       unsigned &BaseReg, unsigned &Offset,
-                                       const TargetRegisterInfo *TRI) const {
-  unsigned Opc = LdSt->getOpcode();
-  if (isDS(Opc)) {
-    const MachineOperand *OffsetImm = getNamedOperand(*LdSt,
-                                                      AMDGPU::OpName::offset);
-    if (OffsetImm) {
-      // Normal, single offset LDS instruction.
-      const MachineOperand *AddrReg = getNamedOperand(*LdSt,
-                                                      AMDGPU::OpName::addr);
-
-      BaseReg = AddrReg->getReg();
-      Offset = OffsetImm->getImm();
-      return true;
-    }
-
-    // The 2 offset instructions use offset0 and offset1 instead. We can treat
-    // these as a load with a single offset if the 2 offsets are consecutive. We
-    // will use this for some partially aligned loads.
-    const MachineOperand *Offset0Imm = getNamedOperand(*LdSt,
-                                                       AMDGPU::OpName::offset0);
-    const MachineOperand *Offset1Imm = getNamedOperand(*LdSt,
-                                                       AMDGPU::OpName::offset1);
-
-    uint8_t Offset0 = Offset0Imm->getImm();
-    uint8_t Offset1 = Offset1Imm->getImm();
-    assert(Offset1 > Offset0);
-
-    if (Offset1 - Offset0 == 1) {
-      // Each of these offsets is in element sized units, so we need to convert
-      // to bytes of the individual reads.
-
-      unsigned EltSize;
-      if (LdSt->mayLoad())
-        EltSize = getOpRegClass(*LdSt, 0)->getSize() / 2;
-      else {
-        assert(LdSt->mayStore());
-        int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
-        EltSize = getOpRegClass(*LdSt, Data0Idx)->getSize();
-      }
-
-      if (isStride64(Opc))
-        EltSize *= 64;
-
-      const MachineOperand *AddrReg = getNamedOperand(*LdSt,
-                                                      AMDGPU::OpName::addr);
-      BaseReg = AddrReg->getReg();
-      Offset = EltSize * Offset0;
-      return true;
-    }
-
-    return false;
-  }
-
-  if (isMUBUF(Opc) || isMTBUF(Opc)) {
-    if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::soffset) != -1)
-      return false;
-
-    const MachineOperand *AddrReg = getNamedOperand(*LdSt,
-                                                    AMDGPU::OpName::vaddr);
-    if (!AddrReg)
-      return false;
-
-    const MachineOperand *OffsetImm = getNamedOperand(*LdSt,
-                                                      AMDGPU::OpName::offset);
-    BaseReg = AddrReg->getReg();
-    Offset = OffsetImm->getImm();
-    return true;
-  }
-
-  if (isSMRD(Opc)) {
-    const MachineOperand *OffsetImm = getNamedOperand(*LdSt,
-                                                      AMDGPU::OpName::offset);
-    if (!OffsetImm)
-      return false;
-
-    const MachineOperand *SBaseReg = getNamedOperand(*LdSt,
-                                                     AMDGPU::OpName::sbase);
-    BaseReg = SBaseReg->getReg();
-    Offset = OffsetImm->getImm();
-    return true;
-  }
-
-  return false;
-}
-
-bool SIInstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
-                                     MachineInstr *SecondLdSt,
-                                     unsigned NumLoads) const {
-  unsigned Opc0 = FirstLdSt->getOpcode();
-  unsigned Opc1 = SecondLdSt->getOpcode();
-
-  // TODO: This needs finer tuning
-  if (NumLoads > 4)
-    return false;
-
-  if (isDS(Opc0) && isDS(Opc1))
-    return true;
-
-  if (isSMRD(Opc0) && isSMRD(Opc1))
-    return true;
-
-  if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1)))
-    return true;
-
-  return false;
-}
-
-void
-SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
-                         MachineBasicBlock::iterator MI, DebugLoc DL,
-                         unsigned DestReg, unsigned SrcReg,
-                         bool KillSrc) const {
-
-  // If we are trying to copy to or from SCC, there is a bug somewhere else in
-  // the backend.  While it may be theoretically possible to do this, it should
-  // never be necessary.
-  assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
-
-  static const int16_t Sub0_15[] = {
-    AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
-    AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
-    AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
-    AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
-  };
-
-  static const int16_t Sub0_7[] = {
-    AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
-    AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
-  };
-
-  static const int16_t Sub0_3[] = {
-    AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
-  };
-
-  static const int16_t Sub0_2[] = {
-    AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
-  };
-
-  static const int16_t Sub0_1[] = {
-    AMDGPU::sub0, AMDGPU::sub1, 0
-  };
-
-  unsigned Opcode;
-  const int16_t *SubIndices;
-
-  if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
-    assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
-    BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
-            .addReg(SrcReg, getKillRegState(KillSrc));
-    return;
-
-  } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
-    if (DestReg == AMDGPU::VCC) {
-      if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
-        BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
-          .addReg(SrcReg, getKillRegState(KillSrc));
-      } else {
-        // FIXME: Hack until VReg_1 removed.
-        assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
-        BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_I32_e32), AMDGPU::VCC)
-          .addImm(0)
-          .addReg(SrcReg, getKillRegState(KillSrc));
-      }
-
-      return;
-    }
-
-    assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
-    BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
-            .addReg(SrcReg, getKillRegState(KillSrc));
-    return;
-
-  } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
-    assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
-    Opcode = AMDGPU::S_MOV_B32;
-    SubIndices = Sub0_3;
-
-  } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
-    assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
-    Opcode = AMDGPU::S_MOV_B32;
-    SubIndices = Sub0_7;
-
-  } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
-    assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
-    Opcode = AMDGPU::S_MOV_B32;
-    SubIndices = Sub0_15;
-
-  } else if (AMDGPU::VGPR_32RegClass.contains(DestReg)) {
-    assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
-           AMDGPU::SReg_32RegClass.contains(SrcReg));
-    BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
-            .addReg(SrcReg, getKillRegState(KillSrc));
-    return;
-
-  } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
-    assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
-           AMDGPU::SReg_64RegClass.contains(SrcReg));
-    Opcode = AMDGPU::V_MOV_B32_e32;
-    SubIndices = Sub0_1;
-
-  } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
-    assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
-    Opcode = AMDGPU::V_MOV_B32_e32;
-    SubIndices = Sub0_2;
-
-  } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
-    assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
-           AMDGPU::SReg_128RegClass.contains(SrcReg));
-    Opcode = AMDGPU::V_MOV_B32_e32;
-    SubIndices = Sub0_3;
-
-  } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
-    assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
-           AMDGPU::SReg_256RegClass.contains(SrcReg));
-    Opcode = AMDGPU::V_MOV_B32_e32;
-    SubIndices = Sub0_7;
-
-  } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
-    assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
-           AMDGPU::SReg_512RegClass.contains(SrcReg));
-    Opcode = AMDGPU::V_MOV_B32_e32;
-    SubIndices = Sub0_15;
-
-  } else {
-    llvm_unreachable("Can't copy register!");
-  }
-
-  while (unsigned SubIdx = *SubIndices++) {
-    MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
-      get(Opcode), RI.getSubReg(DestReg, SubIdx));
-
-    Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
-
-    if (*SubIndices)
-      Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
-  }
-}
-
-unsigned SIInstrInfo::commuteOpcode(const MachineInstr &MI) const {
-  const unsigned Opcode = MI.getOpcode();
-
-  int NewOpc;
-
-  // Try to map original to commuted opcode
-  NewOpc = AMDGPU::getCommuteRev(Opcode);
-  // Check if the commuted (REV) opcode exists on the target.
-  if (NewOpc != -1 && pseudoToMCOpcode(NewOpc) != -1)
-    return NewOpc;
-
-  // Try to map commuted to original opcode
-  NewOpc = AMDGPU::getCommuteOrig(Opcode);
-  // Check if the original (non-REV) opcode exists on the target.
-  if (NewOpc != -1 && pseudoToMCOpcode(NewOpc) != -1)
-    return NewOpc;
-
-  return Opcode;
-}
-
-unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
-
-  if (DstRC->getSize() == 4) {
-    return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
-  } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) {
-    return AMDGPU::S_MOV_B64;
-  } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) {
-    return  AMDGPU::V_MOV_B64_PSEUDO;
-  }
-  return AMDGPU::COPY;
-}
-
-void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
-                                      MachineBasicBlock::iterator MI,
-                                      unsigned SrcReg, bool isKill,
-                                      int FrameIndex,
-                                      const TargetRegisterClass *RC,
-                                      const TargetRegisterInfo *TRI) const {
-  MachineFunction *MF = MBB.getParent();
-  SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
-  MachineFrameInfo *FrameInfo = MF->getFrameInfo();
-  DebugLoc DL = MBB.findDebugLoc(MI);
-  int Opcode = -1;
-
-  if (RI.isSGPRClass(RC)) {
-    // We are only allowed to create one new instruction when spilling
-    // registers, so we need to use pseudo instruction for spilling
-    // SGPRs.
-    switch (RC->getSize() * 8) {
-      case 32:  Opcode = AMDGPU::SI_SPILL_S32_SAVE;  break;
-      case 64:  Opcode = AMDGPU::SI_SPILL_S64_SAVE;  break;
-      case 128: Opcode = AMDGPU::SI_SPILL_S128_SAVE; break;
-      case 256: Opcode = AMDGPU::SI_SPILL_S256_SAVE; break;
-      case 512: Opcode = AMDGPU::SI_SPILL_S512_SAVE; break;
-    }
-  } else if(RI.hasVGPRs(RC) && ST.isVGPRSpillingEnabled(MFI)) {
-    MFI->setHasSpilledVGPRs();
-
-    switch(RC->getSize() * 8) {
-      case 32: Opcode = AMDGPU::SI_SPILL_V32_SAVE; break;
-      case 64: Opcode = AMDGPU::SI_SPILL_V64_SAVE; break;
-      case 96: Opcode = AMDGPU::SI_SPILL_V96_SAVE; break;
-      case 128: Opcode = AMDGPU::SI_SPILL_V128_SAVE; break;
-      case 256: Opcode = AMDGPU::SI_SPILL_V256_SAVE; break;
-      case 512: Opcode = AMDGPU::SI_SPILL_V512_SAVE; break;
-    }
-  }
-
-  if (Opcode != -1) {
-    FrameInfo->setObjectAlignment(FrameIndex, 4);
-    BuildMI(MBB, MI, DL, get(Opcode))
-            .addReg(SrcReg)
-            .addFrameIndex(FrameIndex)
-            // Place-holder registers, these will be filled in by
-            // SIPrepareScratchRegs.
-            .addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Undef)
-            .addReg(AMDGPU::SGPR0, RegState::Undef);
-  } else {
-    LLVMContext &Ctx = MF->getFunction()->getContext();
-    Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to"
-                  " spill register");
-    BuildMI(MBB, MI, DL, get(AMDGPU::KILL))
-            .addReg(SrcReg);
-  }
-}
-
-void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
-                                       MachineBasicBlock::iterator MI,
-                                       unsigned DestReg, int FrameIndex,
-                                       const TargetRegisterClass *RC,
-                                       const TargetRegisterInfo *TRI) const {
-  MachineFunction *MF = MBB.getParent();
-  const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
-  MachineFrameInfo *FrameInfo = MF->getFrameInfo();
-  DebugLoc DL = MBB.findDebugLoc(MI);
-  int Opcode = -1;
-
-  if (RI.isSGPRClass(RC)){
-    switch(RC->getSize() * 8) {
-      case 32:  Opcode = AMDGPU::SI_SPILL_S32_RESTORE; break;
-      case 64:  Opcode = AMDGPU::SI_SPILL_S64_RESTORE;  break;
-      case 128: Opcode = AMDGPU::SI_SPILL_S128_RESTORE; break;
-      case 256: Opcode = AMDGPU::SI_SPILL_S256_RESTORE; break;
-      case 512: Opcode = AMDGPU::SI_SPILL_S512_RESTORE; break;
-    }
-  } else if(RI.hasVGPRs(RC) && ST.isVGPRSpillingEnabled(MFI)) {
-    switch(RC->getSize() * 8) {
-      case 32: Opcode = AMDGPU::SI_SPILL_V32_RESTORE; break;
-      case 64: Opcode = AMDGPU::SI_SPILL_V64_RESTORE; break;
-      case 96: Opcode = AMDGPU::SI_SPILL_V96_RESTORE; break;
-      case 128: Opcode = AMDGPU::SI_SPILL_V128_RESTORE; break;
-      case 256: Opcode = AMDGPU::SI_SPILL_V256_RESTORE; break;
-      case 512: Opcode = AMDGPU::SI_SPILL_V512_RESTORE; break;
-    }
-  }
-
-  if (Opcode != -1) {
-    FrameInfo->setObjectAlignment(FrameIndex, 4);
-    BuildMI(MBB, MI, DL, get(Opcode), DestReg)
-            .addFrameIndex(FrameIndex)
-            // Place-holder registers, these will be filled in by
-            // SIPrepareScratchRegs.
-            .addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Undef)
-            .addReg(AMDGPU::SGPR0, RegState::Undef);
-
-  } else {
-    LLVMContext &Ctx = MF->getFunction()->getContext();
-    Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to"
-                  " restore register");
-    BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg);
-  }
-}
-
-/// \param @Offset Offset in bytes of the FrameIndex being spilled
-unsigned SIInstrInfo::calculateLDSSpillAddress(MachineBasicBlock &MBB,
-                                               MachineBasicBlock::iterator MI,
-                                               RegScavenger *RS, unsigned TmpReg,
-                                               unsigned FrameOffset,
-                                               unsigned Size) const {
-  MachineFunction *MF = MBB.getParent();
-  SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
-  const AMDGPUSubtarget &ST = MF->getSubtarget<AMDGPUSubtarget>();
-  const SIRegisterInfo *TRI =
-      static_cast<const SIRegisterInfo*>(ST.getRegisterInfo());
-  DebugLoc DL = MBB.findDebugLoc(MI);
-  unsigned WorkGroupSize = MFI->getMaximumWorkGroupSize(*MF);
-  unsigned WavefrontSize = ST.getWavefrontSize();
-
-  unsigned TIDReg = MFI->getTIDReg();
-  if (!MFI->hasCalculatedTID()) {
-    MachineBasicBlock &Entry = MBB.getParent()->front();
-    MachineBasicBlock::iterator Insert = Entry.front();
-    DebugLoc DL = Insert->getDebugLoc();
-
-    TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass);
-    if (TIDReg == AMDGPU::NoRegister)
-      return TIDReg;
-
-
-    if (MFI->getShaderType() == ShaderType::COMPUTE &&
-        WorkGroupSize > WavefrontSize) {
-
-      unsigned TIDIGXReg = TRI->getPreloadedValue(*MF, SIRegisterInfo::TIDIG_X);
-      unsigned TIDIGYReg = TRI->getPreloadedValue(*MF, SIRegisterInfo::TIDIG_Y);
-      unsigned TIDIGZReg = TRI->getPreloadedValue(*MF, SIRegisterInfo::TIDIG_Z);
-      unsigned InputPtrReg =
-          TRI->getPreloadedValue(*MF, SIRegisterInfo::INPUT_PTR);
-      for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) {
-        if (!Entry.isLiveIn(Reg))
-          Entry.addLiveIn(Reg);
-      }
-
-      RS->enterBasicBlock(&Entry);
-      unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
-      unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
-      BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0)
-              .addReg(InputPtrReg)
-              .addImm(SI::KernelInputOffsets::NGROUPS_Z);
-      BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1)
-              .addReg(InputPtrReg)
-              .addImm(SI::KernelInputOffsets::NGROUPS_Y);
-
-      // NGROUPS.X * NGROUPS.Y
-      BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1)
-              .addReg(STmp1)
-              .addReg(STmp0);
-      // (NGROUPS.X * NGROUPS.Y) * TIDIG.X
-      BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg)
-              .addReg(STmp1)
-              .addReg(TIDIGXReg);
-      // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)
-      BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg)
-              .addReg(STmp0)
-              .addReg(TIDIGYReg)
-              .addReg(TIDReg);
-      // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z
-      BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg)
-              .addReg(TIDReg)
-              .addReg(TIDIGZReg);
-    } else {
-      // Get the wave id
-      BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64),
-              TIDReg)
-              .addImm(-1)
-              .addImm(0);
-
-      BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64),
-              TIDReg)
-              .addImm(-1)
-              .addReg(TIDReg);
-    }
-
-    BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32),
-            TIDReg)
-            .addImm(2)
-            .addReg(TIDReg);
-    MFI->setTIDReg(TIDReg);
-  }
-
-  // Add FrameIndex to LDS offset
-  unsigned LDSOffset = MFI->LDSSize + (FrameOffset * WorkGroupSize);
-  BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg)
-          .addImm(LDSOffset)
-          .addReg(TIDReg);
-
-  return TmpReg;
-}
-
-void SIInstrInfo::insertNOPs(MachineBasicBlock::iterator MI,
-                             int Count) const {
-  while (Count > 0) {
-    int Arg;
-    if (Count >= 8)
-      Arg = 7;
-    else
-      Arg = Count - 1;
-    Count -= 8;
-    BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(AMDGPU::S_NOP))
-            .addImm(Arg);
-  }
-}
-
-bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
-  MachineBasicBlock &MBB = *MI->getParent();
-  DebugLoc DL = MBB.findDebugLoc(MI);
-  switch (MI->getOpcode()) {
-  default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
-
-  case AMDGPU::SI_CONSTDATA_PTR: {
-    unsigned Reg = MI->getOperand(0).getReg();
-    unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
-    unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
-
-    BuildMI(MBB, MI, DL, get(AMDGPU::S_GETPC_B64), Reg);
-
-    // Add 32-bit offset from this instruction to the start of the constant data.
-    BuildMI(MBB, MI, DL, get(AMDGPU::S_ADD_U32), RegLo)
-            .addReg(RegLo)
-            .addTargetIndex(AMDGPU::TI_CONSTDATA_START)
-            .addReg(AMDGPU::SCC, RegState::Define | RegState::Implicit);
-    BuildMI(MBB, MI, DL, get(AMDGPU::S_ADDC_U32), RegHi)
-            .addReg(RegHi)
-            .addImm(0)
-            .addReg(AMDGPU::SCC, RegState::Define | RegState::Implicit)
-            .addReg(AMDGPU::SCC, RegState::Implicit);
-    MI->eraseFromParent();
-    break;
-  }
-  case AMDGPU::SGPR_USE:
-    // This is just a placeholder for register allocation.
-    MI->eraseFromParent();
-    break;
-
-  case AMDGPU::V_MOV_B64_PSEUDO: {
-    unsigned Dst = MI->getOperand(0).getReg();
-    unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
-    unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
-
-    const MachineOperand &SrcOp = MI->getOperand(1);
-    // FIXME: Will this work for 64-bit floating point immediates?
-    assert(!SrcOp.isFPImm());
-    if (SrcOp.isImm()) {
-      APInt Imm(64, SrcOp.getImm());
-      BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
-              .addImm(Imm.getLoBits(32).getZExtValue())
-              .addReg(Dst, RegState::Implicit);
-      BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
-              .addImm(Imm.getHiBits(32).getZExtValue())
-              .addReg(Dst, RegState::Implicit);
-    } else {
-      assert(SrcOp.isReg());
-      BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
-              .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
-              .addReg(Dst, RegState::Implicit);
-      BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
-              .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
-              .addReg(Dst, RegState::Implicit);
-    }
-    MI->eraseFromParent();
-    break;
-  }
-
-  case AMDGPU::V_CNDMASK_B64_PSEUDO: {
-    unsigned Dst = MI->getOperand(0).getReg();
-    unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
-    unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
-    unsigned Src0 = MI->getOperand(1).getReg();
-    unsigned Src1 = MI->getOperand(2).getReg();
-    const MachineOperand &SrcCond = MI->getOperand(3);
-
-    BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
-        .addReg(RI.getSubReg(Src0, AMDGPU::sub0))
-        .addReg(RI.getSubReg(Src1, AMDGPU::sub0))
-        .addOperand(SrcCond);
-    BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
-        .addReg(RI.getSubReg(Src0, AMDGPU::sub1))
-        .addReg(RI.getSubReg(Src1, AMDGPU::sub1))
-        .addOperand(SrcCond);
-    MI->eraseFromParent();
-    break;
-  }
-  }
-  return true;
-}
-
-MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
-                                              bool NewMI) const {
-
-  if (MI->getNumOperands() < 3)
-    return nullptr;
-
-  int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
-                                           AMDGPU::OpName::src0);
-  assert(Src0Idx != -1 && "Should always have src0 operand");
-
-  MachineOperand &Src0 = MI->getOperand(Src0Idx);
-  if (!Src0.isReg())
-    return nullptr;
-
-  int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
-                                           AMDGPU::OpName::src1);
-  if (Src1Idx == -1)
-    return nullptr;
-
-  MachineOperand &Src1 = MI->getOperand(Src1Idx);
-
-  // Make sure it's legal to commute operands for VOP2.
-  if (isVOP2(MI->getOpcode()) &&
-      (!isOperandLegal(MI, Src0Idx, &Src1) ||
-       !isOperandLegal(MI, Src1Idx, &Src0))) {
-    return nullptr;
-  }
-
-  if (!Src1.isReg()) {
-    // Allow commuting instructions with Imm operands.
-    if (NewMI || !Src1.isImm() ||
-       (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) {
-      return nullptr;
-    }
-
-    // Be sure to copy the source modifiers to the right place.
-    if (MachineOperand *Src0Mods
-          = getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) {
-      MachineOperand *Src1Mods
-        = getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers);
-
-      int Src0ModsVal = Src0Mods->getImm();
-      if (!Src1Mods && Src0ModsVal != 0)
-        return nullptr;
-
-      // XXX - This assert might be a lie. It might be useful to have a neg
-      // modifier with 0.0.
-      int Src1ModsVal = Src1Mods->getImm();
-      assert((Src1ModsVal == 0) && "Not expecting modifiers with immediates");
-
-      Src1Mods->setImm(Src0ModsVal);
-      Src0Mods->setImm(Src1ModsVal);
-    }
-
-    unsigned Reg = Src0.getReg();
-    unsigned SubReg = Src0.getSubReg();
-    if (Src1.isImm())
-      Src0.ChangeToImmediate(Src1.getImm());
-    else
-      llvm_unreachable("Should only have immediates");
-
-    Src1.ChangeToRegister(Reg, false);
-    Src1.setSubReg(SubReg);
-  } else {
-    MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
-  }
-
-  if (MI)
-    MI->setDesc(get(commuteOpcode(*MI)));
-
-  return MI;
-}
-
-// This needs to be implemented because the source modifiers may be inserted
-// between the true commutable operands, and the base
-// TargetInstrInfo::commuteInstruction uses it.
-bool SIInstrInfo::findCommutedOpIndices(MachineInstr *MI,
-                                        unsigned &SrcOpIdx1,
-                                        unsigned &SrcOpIdx2) const {
-  const MCInstrDesc &MCID = MI->getDesc();
-  if (!MCID.isCommutable())
-    return false;
-
-  unsigned Opc = MI->getOpcode();
-  int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
-  if (Src0Idx == -1)
-    return false;
-
-  // FIXME: Workaround TargetInstrInfo::commuteInstruction asserting on
-  // immediate.
-  if (!MI->getOperand(Src0Idx).isReg())
-    return false;
-
-  int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
-  if (Src1Idx == -1)
-    return false;
-
-  if (!MI->getOperand(Src1Idx).isReg())
-    return false;
-
-  // If any source modifiers are set, the generic instruction commuting won't
-  // understand how to copy the source modifiers.
-  if (hasModifiersSet(*MI, AMDGPU::OpName::src0_modifiers) ||
-      hasModifiersSet(*MI, AMDGPU::OpName::src1_modifiers))
-    return false;
-
-  SrcOpIdx1 = Src0Idx;
-  SrcOpIdx2 = Src1Idx;
-  return true;
-}
-
-MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB,
-                                         MachineBasicBlock::iterator I,
-                                         unsigned DstReg,
-                                         unsigned SrcReg) const {
-  return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32),
-                 DstReg) .addReg(SrcReg);
-}
-
-bool SIInstrInfo::isMov(unsigned Opcode) const {
-  switch(Opcode) {
-  default: return false;
-  case AMDGPU::S_MOV_B32:
-  case AMDGPU::S_MOV_B64:
-  case AMDGPU::V_MOV_B32_e32:
-  case AMDGPU::V_MOV_B32_e64:
-    return true;
-  }
-}
-
-bool
-SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
-  return RC != &AMDGPU::EXECRegRegClass;
-}
-
-static void removeModOperands(MachineInstr &MI) {
-  unsigned Opc = MI.getOpcode();
-  int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc,
-                                              AMDGPU::OpName::src0_modifiers);
-  int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc,
-                                              AMDGPU::OpName::src1_modifiers);
-  int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc,
-                                              AMDGPU::OpName::src2_modifiers);
-
-  MI.RemoveOperand(Src2ModIdx);
-  MI.RemoveOperand(Src1ModIdx);
-  MI.RemoveOperand(Src0ModIdx);
-}
-
-bool SIInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
-                                unsigned Reg, MachineRegisterInfo *MRI) const {
-  if (!MRI->hasOneNonDBGUse(Reg))
-    return false;
-
-  unsigned Opc = UseMI->getOpcode();
-  if (Opc == AMDGPU::V_MAD_F32) {
-    // Don't fold if we are using source modifiers. The new VOP2 instructions
-    // don't have them.
-    if (hasModifiersSet(*UseMI, AMDGPU::OpName::src0_modifiers) ||
-        hasModifiersSet(*UseMI, AMDGPU::OpName::src1_modifiers) ||
-        hasModifiersSet(*UseMI, AMDGPU::OpName::src2_modifiers)) {
-      return false;
-    }
-
-    MachineOperand *Src0 = getNamedOperand(*UseMI, AMDGPU::OpName::src0);
-    MachineOperand *Src1 = getNamedOperand(*UseMI, AMDGPU::OpName::src1);
-    MachineOperand *Src2 = getNamedOperand(*UseMI, AMDGPU::OpName::src2);
-
-    // Multiplied part is the constant: Use v_madmk_f32
-    // We should only expect these to be on src0 due to canonicalizations.
-    if (Src0->isReg() && Src0->getReg() == Reg) {
-      if (!Src1->isReg() ||
-          (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
-        return false;
-
-      if (!Src2->isReg() ||
-          (Src2->isReg() && RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))))
-        return false;
-
-      // We need to do some weird looking operand shuffling since the madmk
-      // operands are out of the normal expected order with the multiplied
-      // constant as the last operand.
-      //
-      // v_mad_f32 src0, src1, src2 -> v_madmk_f32 src0 * src2K + src1
-      // src0 -> src2 K
-      // src1 -> src0
-      // src2 -> src1
-
-      const int64_t Imm = DefMI->getOperand(1).getImm();
-
-      // FIXME: This would be a lot easier if we could return a new instruction
-      // instead of having to modify in place.
-
-      // Remove these first since they are at the end.
-      UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(AMDGPU::V_MAD_F32,
-                                                      AMDGPU::OpName::omod));
-      UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(AMDGPU::V_MAD_F32,
-                                                      AMDGPU::OpName::clamp));
-
-      unsigned Src1Reg = Src1->getReg();
-      unsigned Src1SubReg = Src1->getSubReg();
-      unsigned Src2Reg = Src2->getReg();
-      unsigned Src2SubReg = Src2->getSubReg();
-      Src0->setReg(Src1Reg);
-      Src0->setSubReg(Src1SubReg);
-      Src0->setIsKill(Src1->isKill());
-
-      Src1->setReg(Src2Reg);
-      Src1->setSubReg(Src2SubReg);
-      Src1->setIsKill(Src2->isKill());
-
-      Src2->ChangeToImmediate(Imm);
-
-      removeModOperands(*UseMI);
-      UseMI->setDesc(get(AMDGPU::V_MADMK_F32));
-
-      bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
-      if (DeleteDef)
-        DefMI->eraseFromParent();
-
-      return true;
-    }
-
-    // Added part is the constant: Use v_madak_f32
-    if (Src2->isReg() && Src2->getReg() == Reg) {
-      // Not allowed to use constant bus for another operand.
-      // We can however allow an inline immediate as src0.
-      if (!Src0->isImm() &&
-          (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))
-        return false;
-
-      if (!Src1->isReg() ||
-          (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
-        return false;
-
-      const int64_t Imm = DefMI->getOperand(1).getImm();
-
-      // FIXME: This would be a lot easier if we could return a new instruction
-      // instead of having to modify in place.
-
-      // Remove these first since they are at the end.
-      UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(AMDGPU::V_MAD_F32,
-                                                      AMDGPU::OpName::omod));
-      UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(AMDGPU::V_MAD_F32,
-                                                      AMDGPU::OpName::clamp));
-
-      Src2->ChangeToImmediate(Imm);
-
-      // These come before src2.
-      removeModOperands(*UseMI);
-      UseMI->setDesc(get(AMDGPU::V_MADAK_F32));
-
-      bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
-      if (DeleteDef)
-        DefMI->eraseFromParent();
-
-      return true;
-    }
-  }
-
-  return false;
-}
-
-bool
-SIInstrInfo::isTriviallyReMaterializable(const MachineInstr *MI,
-                                         AliasAnalysis *AA) const {
-  switch(MI->getOpcode()) {
-  default: return AMDGPUInstrInfo::isTriviallyReMaterializable(MI, AA);
-  case AMDGPU::S_MOV_B32:
-  case AMDGPU::S_MOV_B64:
-  case AMDGPU::V_MOV_B32_e32:
-    return MI->getOperand(1).isImm();
-  }
-}
-
-static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
-                                int WidthB, int OffsetB) {
-  int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
-  int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
-  int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
-  return LowOffset + LowWidth <= HighOffset;
-}
-
-bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr *MIa,
-                                               MachineInstr *MIb) const {
-  unsigned BaseReg0, Offset0;
-  unsigned BaseReg1, Offset1;
-
-  if (getLdStBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
-      getLdStBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
-    assert(MIa->hasOneMemOperand() && MIb->hasOneMemOperand() &&
-           "read2 / write2 not expected here yet");
-    unsigned Width0 = (*MIa->memoperands_begin())->getSize();
-    unsigned Width1 = (*MIb->memoperands_begin())->getSize();
-    if (BaseReg0 == BaseReg1 &&
-        offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) {
-      return true;
-    }
-  }
-
-  return false;
-}
-
-bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
-                                                  MachineInstr *MIb,
-                                                  AliasAnalysis *AA) const {
-  unsigned Opc0 = MIa->getOpcode();
-  unsigned Opc1 = MIb->getOpcode();
-
-  assert(MIa && (MIa->mayLoad() || MIa->mayStore()) &&
-         "MIa must load from or modify a memory location");
-  assert(MIb && (MIb->mayLoad() || MIb->mayStore()) &&
-         "MIb must load from or modify a memory location");
-
-  if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects())
-    return false;
-
-  // XXX - Can we relax this between address spaces?
-  if (MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef())
-    return false;
-
-  // TODO: Should we check the address space from the MachineMemOperand? That
-  // would allow us to distinguish objects we know don't alias based on the
-  // underlying addres space, even if it was lowered to a different one,
-  // e.g. private accesses lowered to use MUBUF instructions on a scratch
-  // buffer.
-  if (isDS(Opc0)) {
-    if (isDS(Opc1))
-      return checkInstOffsetsDoNotOverlap(MIa, MIb);
-
-    return !isFLAT(Opc1);
-  }
-
-  if (isMUBUF(Opc0) || isMTBUF(Opc0)) {
-    if (isMUBUF(Opc1) || isMTBUF(Opc1))
-      return checkInstOffsetsDoNotOverlap(MIa, MIb);
-
-    return !isFLAT(Opc1) && !isSMRD(Opc1);
-  }
-
-  if (isSMRD(Opc0)) {
-    if (isSMRD(Opc1))
-      return checkInstOffsetsDoNotOverlap(MIa, MIb);
-
-    return !isFLAT(Opc1) && !isMUBUF(Opc0) && !isMTBUF(Opc0);
-  }
-
-  if (isFLAT(Opc0)) {
-    if (isFLAT(Opc1))
-      return checkInstOffsetsDoNotOverlap(MIa, MIb);
-
-    return false;
-  }
-
-  return false;
-}
-
-bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
-  int64_t SVal = Imm.getSExtValue();
-  if (SVal >= -16 && SVal <= 64)
-    return true;
-
-  if (Imm.getBitWidth() == 64) {
-    uint64_t Val = Imm.getZExtValue();
-    return (DoubleToBits(0.0) == Val) ||
-           (DoubleToBits(1.0) == Val) ||
-           (DoubleToBits(-1.0) == Val) ||
-           (DoubleToBits(0.5) == Val) ||
-           (DoubleToBits(-0.5) == Val) ||
-           (DoubleToBits(2.0) == Val) ||
-           (DoubleToBits(-2.0) == Val) ||
-           (DoubleToBits(4.0) == Val) ||
-           (DoubleToBits(-4.0) == Val);
-  }
-
-  // The actual type of the operand does not seem to matter as long
-  // as the bits match one of the inline immediate values.  For example:
-  //
-  // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
-  // so it is a legal inline immediate.
-  //
-  // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
-  // floating-point, so it is a legal inline immediate.
-  uint32_t Val = Imm.getZExtValue();
-
-  return (FloatToBits(0.0f) == Val) ||
-         (FloatToBits(1.0f) == Val) ||
-         (FloatToBits(-1.0f) == Val) ||
-         (FloatToBits(0.5f) == Val) ||
-         (FloatToBits(-0.5f) == Val) ||
-         (FloatToBits(2.0f) == Val) ||
-         (FloatToBits(-2.0f) == Val) ||
-         (FloatToBits(4.0f) == Val) ||
-         (FloatToBits(-4.0f) == Val);
-}
-
-bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
-                                   unsigned OpSize) const {
-  if (MO.isImm()) {
-    // MachineOperand provides no way to tell the true operand size, since it
-    // only records a 64-bit value. We need to know the size to determine if a
-    // 32-bit floating point immediate bit pattern is legal for an integer
-    // immediate. It would be for any 32-bit integer operand, but would not be
-    // for a 64-bit one.
-
-    unsigned BitSize = 8 * OpSize;
-    return isInlineConstant(APInt(BitSize, MO.getImm(), true));
-  }
-
-  return false;
-}
-
-bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO,
-                                    unsigned OpSize) const {
-  return MO.isImm() && !isInlineConstant(MO, OpSize);
-}
-
-static bool compareMachineOp(const MachineOperand &Op0,
-                             const MachineOperand &Op1) {
-  if (Op0.getType() != Op1.getType())
-    return false;
-
-  switch (Op0.getType()) {
-  case MachineOperand::MO_Register:
-    return Op0.getReg() == Op1.getReg();
-  case MachineOperand::MO_Immediate:
-    return Op0.getImm() == Op1.getImm();
-  default:
-    llvm_unreachable("Didn't expect to be comparing these operand types");
-  }
-}
-
-bool SIInstrInfo::isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
-                                 const MachineOperand &MO) const {
-  const MCOperandInfo &OpInfo = get(MI->getOpcode()).OpInfo[OpNo];
-
-  assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
-
-  if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
-    return true;
-
-  if (OpInfo.RegClass < 0)
-    return false;
-
-  unsigned OpSize = RI.getRegClass(OpInfo.RegClass)->getSize();
-  if (isLiteralConstant(MO, OpSize))
-    return RI.opCanUseLiteralConstant(OpInfo.OperandType);
-
-  return RI.opCanUseInlineConstant(OpInfo.OperandType);
-}
-
-bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
-  int Op32 = AMDGPU::getVOPe32(Opcode);
-  if (Op32 == -1)
-    return false;
-
-  return pseudoToMCOpcode(Op32) != -1;
-}
-
-bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
-  // The src0_modifier operand is present on all instructions
-  // that have modifiers.
-
-  return AMDGPU::getNamedOperandIdx(Opcode,
-                                    AMDGPU::OpName::src0_modifiers) != -1;
-}
-
-bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI,
-                                  unsigned OpName) const {
-  const MachineOperand *Mods = getNamedOperand(MI, OpName);
-  return Mods && Mods->getImm();
-}
-
-bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
-                                  const MachineOperand &MO,
-                                  unsigned OpSize) const {
-  // Literal constants use the constant bus.
-  if (isLiteralConstant(MO, OpSize))
-    return true;
-
-  if (!MO.isReg() || !MO.isUse())
-    return false;
-
-  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
-    return RI.isSGPRClass(MRI.getRegClass(MO.getReg()));
-
-  // FLAT_SCR is just an SGPR pair.
-  if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR))
-    return true;
-
-  // EXEC register uses the constant bus.
-  if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
-    return true;
-
-  // SGPRs use the constant bus
-  if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
-      (!MO.isImplicit() &&
-      (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
-       AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
-    return true;
-  }
-
-  return false;
-}
-
-bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
-                                    StringRef &ErrInfo) const {
-  uint16_t Opcode = MI->getOpcode();
-  const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
-  int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
-  int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
-  int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
-
-  // Make sure the number of operands is correct.
-  const MCInstrDesc &Desc = get(Opcode);
-  if (!Desc.isVariadic() &&
-      Desc.getNumOperands() != MI->getNumExplicitOperands()) {
-     ErrInfo = "Instruction has wrong number of operands.";
-     return false;
-  }
-
-  // Make sure the register classes are correct
-  for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
-    if (MI->getOperand(i).isFPImm()) {
-      ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
-                "all fp values to integers.";
-      return false;
-    }
-
-    int RegClass = Desc.OpInfo[i].RegClass;
-
-    switch (Desc.OpInfo[i].OperandType) {
-    case MCOI::OPERAND_REGISTER:
-      if (MI->getOperand(i).isImm()) {
-        ErrInfo = "Illegal immediate value for operand.";
-        return false;
-      }
-      break;
-    case AMDGPU::OPERAND_REG_IMM32:
-      break;
-    case AMDGPU::OPERAND_REG_INLINE_C:
-      if (isLiteralConstant(MI->getOperand(i),
-                            RI.getRegClass(RegClass)->getSize())) {
-        ErrInfo = "Illegal immediate value for operand.";
-        return false;
-      }
-      break;
-    case MCOI::OPERAND_IMMEDIATE:
-      // Check if this operand is an immediate.
-      // FrameIndex operands will be replaced by immediates, so they are
-      // allowed.
-      if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFI()) {
-        ErrInfo = "Expected immediate, but got non-immediate";
-        return false;
-      }
-      // Fall-through
-    default:
-      continue;
-    }
-
-    if (!MI->getOperand(i).isReg())
-      continue;
-
-    if (RegClass != -1) {
-      unsigned Reg = MI->getOperand(i).getReg();
-      if (TargetRegisterInfo::isVirtualRegister(Reg))
-        continue;
-
-      const TargetRegisterClass *RC = RI.getRegClass(RegClass);
-      if (!RC->contains(Reg)) {
-        ErrInfo = "Operand has incorrect register class.";
-        return false;
-      }
-    }
-  }
-
-
-  // Verify VOP*
-  if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
-    // Only look at the true operands. Only a real operand can use the constant
-    // bus, and we don't want to check pseudo-operands like the source modifier
-    // flags.
-    const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
-
-    unsigned ConstantBusCount = 0;
-    unsigned SGPRUsed = AMDGPU::NoRegister;
-    for (int OpIdx : OpIndices) {
-      if (OpIdx == -1)
-        break;
-      const MachineOperand &MO = MI->getOperand(OpIdx);
-      if (usesConstantBus(MRI, MO, getOpSize(Opcode, OpIdx))) {
-        if (MO.isReg()) {
-          if (MO.getReg() != SGPRUsed)
-            ++ConstantBusCount;
-          SGPRUsed = MO.getReg();
-        } else {
-          ++ConstantBusCount;
-        }
-      }
-    }
-    if (ConstantBusCount > 1) {
-      ErrInfo = "VOP* instruction uses the constant bus more than once";
-      return false;
-    }
-  }
-
-  // Verify misc. restrictions on specific instructions.
-  if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 ||
-      Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) {
-    const MachineOperand &Src0 = MI->getOperand(Src0Idx);
-    const MachineOperand &Src1 = MI->getOperand(Src1Idx);
-    const MachineOperand &Src2 = MI->getOperand(Src2Idx);
-    if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
-      if (!compareMachineOp(Src0, Src1) &&
-          !compareMachineOp(Src0, Src2)) {
-        ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
-        return false;
-      }
-    }
-  }
-
-  return true;
-}
-
-unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
-  switch (MI.getOpcode()) {
-  default: return AMDGPU::INSTRUCTION_LIST_END;
-  case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
-  case AMDGPU::COPY: return AMDGPU::COPY;
-  case AMDGPU::PHI: return AMDGPU::PHI;
-  case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
-  case AMDGPU::S_MOV_B32:
-    return MI.getOperand(1).isReg() ?
-           AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
-  case AMDGPU::S_ADD_I32:
-  case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32;
-  case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
-  case AMDGPU::S_SUB_I32:
-  case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32;
-  case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
-  case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32;
-  case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32;
-  case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32;
-  case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32;
-  case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e32;
-  case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e32;
-  case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e32;
-  case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e32;
-  case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
-  case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
-  case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
-  case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
-  case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
-  case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
-  case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
-  case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
-  case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
-  case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
-  case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64;
-  case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
-  case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
-  case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
-  case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
-  case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
-  case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
-  case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
-  case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
-  case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
-  case AMDGPU::S_LOAD_DWORD_IMM:
-  case AMDGPU::S_LOAD_DWORD_SGPR: return AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
-  case AMDGPU::S_LOAD_DWORDX2_IMM:
-  case AMDGPU::S_LOAD_DWORDX2_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
-  case AMDGPU::S_LOAD_DWORDX4_IMM:
-  case AMDGPU::S_LOAD_DWORDX4_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
-  case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
-  case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
-  case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
-  case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64;
-  }
-}
-
-bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
-  return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
-}
-
-const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
-                                                      unsigned OpNo) const {
-  const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
-  const MCInstrDesc &Desc = get(MI.getOpcode());
-  if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
-      Desc.OpInfo[OpNo].RegClass == -1) {
-    unsigned Reg = MI.getOperand(OpNo).getReg();
-
-    if (TargetRegisterInfo::isVirtualRegister(Reg))
-      return MRI.getRegClass(Reg);
-    return RI.getPhysRegClass(Reg);
-  }
-
-  unsigned RCID = Desc.OpInfo[OpNo].RegClass;
-  return RI.getRegClass(RCID);
-}
-
-bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
-  switch (MI.getOpcode()) {
-  case AMDGPU::COPY:
-  case AMDGPU::REG_SEQUENCE:
-  case AMDGPU::PHI:
-  case AMDGPU::INSERT_SUBREG:
-    return RI.hasVGPRs(getOpRegClass(MI, 0));
-  default:
-    return RI.hasVGPRs(getOpRegClass(MI, OpNo));
-  }
-}
-
-void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
-  MachineBasicBlock::iterator I = MI;
-  MachineBasicBlock *MBB = MI->getParent();
-  MachineOperand &MO = MI->getOperand(OpIdx);
-  MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
-  unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass;
-  const TargetRegisterClass *RC = RI.getRegClass(RCID);
-  unsigned Opcode = AMDGPU::V_MOV_B32_e32;
-  if (MO.isReg())
-    Opcode = AMDGPU::COPY;
-  else if (RI.isSGPRClass(RC))
-    Opcode = AMDGPU::S_MOV_B32;
-
-
-  const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
-  if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC))
-    VRC = &AMDGPU::VReg_64RegClass;
-  else
-    VRC = &AMDGPU::VGPR_32RegClass;
-
-  unsigned Reg = MRI.createVirtualRegister(VRC);
-  DebugLoc DL = MBB->findDebugLoc(I);
-  BuildMI(*MI->getParent(), I, DL, get(Opcode), Reg)
-    .addOperand(MO);
-  MO.ChangeToRegister(Reg, false);
-}
-
-unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
-                                         MachineRegisterInfo &MRI,
-                                         MachineOperand &SuperReg,
-                                         const TargetRegisterClass *SuperRC,
-                                         unsigned SubIdx,
-                                         const TargetRegisterClass *SubRC)
-                                         const {
-  assert(SuperReg.isReg());
-
-  unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
-  unsigned SubReg = MRI.createVirtualRegister(SubRC);
-
-  // Just in case the super register is itself a sub-register, copy it to a new
-  // value so we don't need to worry about merging its subreg index with the
-  // SubIdx passed to this function. The register coalescer should be able to
-  // eliminate this extra copy.
-  MachineBasicBlock *MBB = MI->getParent();
-  DebugLoc DL = MI->getDebugLoc();
-
-  BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg)
-    .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg());
-
-  BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
-    .addReg(NewSuperReg, 0, SubIdx);
-
-  return SubReg;
-}
-
-MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
-  MachineBasicBlock::iterator MII,
-  MachineRegisterInfo &MRI,
-  MachineOperand &Op,
-  const TargetRegisterClass *SuperRC,
-  unsigned SubIdx,
-  const TargetRegisterClass *SubRC) const {
-  if (Op.isImm()) {
-    // XXX - Is there a better way to do this?
-    if (SubIdx == AMDGPU::sub0)
-      return MachineOperand::CreateImm(Op.getImm() & 0xFFFFFFFF);
-    if (SubIdx == AMDGPU::sub1)
-      return MachineOperand::CreateImm(Op.getImm() >> 32);
-
-    llvm_unreachable("Unhandled register index for immediate");
-  }
-
-  unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
-                                       SubIdx, SubRC);
-  return MachineOperand::CreateReg(SubReg, false);
-}
-
-unsigned SIInstrInfo::split64BitImm(SmallVectorImpl<MachineInstr *> &Worklist,
-                                    MachineBasicBlock::iterator MI,
-                                    MachineRegisterInfo &MRI,
-                                    const TargetRegisterClass *RC,
-                                    const MachineOperand &Op) const {
-  MachineBasicBlock *MBB = MI->getParent();
-  DebugLoc DL = MI->getDebugLoc();
-  unsigned LoDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
-  unsigned HiDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
-  unsigned Dst = MRI.createVirtualRegister(RC);
-
-  MachineInstr *Lo = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
-                             LoDst)
-    .addImm(Op.getImm() & 0xFFFFFFFF);
-  MachineInstr *Hi = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
-                             HiDst)
-    .addImm(Op.getImm() >> 32);
-
-  BuildMI(*MBB, MI, DL, get(TargetOpcode::REG_SEQUENCE), Dst)
-    .addReg(LoDst)
-    .addImm(AMDGPU::sub0)
-    .addReg(HiDst)
-    .addImm(AMDGPU::sub1);
-
-  Worklist.push_back(Lo);
-  Worklist.push_back(Hi);
-
-  return Dst;
-}
-
-// Change the order of operands from (0, 1, 2) to (0, 2, 1)
-void SIInstrInfo::swapOperands(MachineBasicBlock::iterator Inst) const {
-  assert(Inst->getNumExplicitOperands() == 3);
-  MachineOperand Op1 = Inst->getOperand(1);
-  Inst->RemoveOperand(1);
-  Inst->addOperand(Op1);
-}
-
-bool SIInstrInfo::isOperandLegal(const MachineInstr *MI, unsigned OpIdx,
-                                 const MachineOperand *MO) const {
-  const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
-  const MCInstrDesc &InstDesc = get(MI->getOpcode());
-  const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
-  const TargetRegisterClass *DefinedRC =
-      OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
-  if (!MO)
-    MO = &MI->getOperand(OpIdx);
-
-  if (isVALU(InstDesc.Opcode) &&
-      usesConstantBus(MRI, *MO, DefinedRC->getSize())) {
-    unsigned SGPRUsed =
-        MO->isReg() ? MO->getReg() : (unsigned)AMDGPU::NoRegister;
-    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
-      if (i == OpIdx)
-        continue;
-      const MachineOperand &Op = MI->getOperand(i);
-      if (Op.isReg() && Op.getReg() != SGPRUsed &&
-          usesConstantBus(MRI, Op, getOpSize(*MI, i))) {
-        return false;
-      }
-    }
-  }
-
-  if (MO->isReg()) {
-    assert(DefinedRC);
-    const TargetRegisterClass *RC = MRI.getRegClass(MO->getReg());
-
-    // In order to be legal, the common sub-class must be equal to the
-    // class of the current operand.  For example:
-    //
-    // v_mov_b32 s0 ; Operand defined as vsrc_32
-    //              ; RI.getCommonSubClass(s0,vsrc_32) = sgpr ; LEGAL
-    //
-    // s_sendmsg 0, s0 ; Operand defined as m0reg
-    //                 ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL
-
-    return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC;
-  }
-
-
-  // Handle non-register types that are treated like immediates.
-  assert(MO->isImm() || MO->isTargetIndex() || MO->isFI());
-
-  if (!DefinedRC) {
-    // This operand expects an immediate.
-    return true;
-  }
-
-  return isImmOperandLegal(MI, OpIdx, *MO);
-}
-
-void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
-  MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
-
-  int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
-                                           AMDGPU::OpName::src0);
-  int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
-                                           AMDGPU::OpName::src1);
-  int Src2Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
-                                           AMDGPU::OpName::src2);
-
-  // Legalize VOP2
-  if (isVOP2(MI->getOpcode()) && Src1Idx != -1) {
-    // Legalize src0
-    if (!isOperandLegal(MI, Src0Idx))
-      legalizeOpWithMove(MI, Src0Idx);
-
-    // Legalize src1
-    if (isOperandLegal(MI, Src1Idx))
-      return;
-
-    // Usually src0 of VOP2 instructions allow more types of inputs
-    // than src1, so try to commute the instruction to decrease our
-    // chances of having to insert a MOV instruction to legalize src1.
-    if (MI->isCommutable()) {
-      if (commuteInstruction(MI))
-        // If we are successful in commuting, then we know MI is legal, so
-        // we are done.
-        return;
-    }
-
-    legalizeOpWithMove(MI, Src1Idx);
-    return;
-  }
-
-  // XXX - Do any VOP3 instructions read VCC?
-  // Legalize VOP3
-  if (isVOP3(MI->getOpcode())) {
-    int VOP3Idx[3] = { Src0Idx, Src1Idx, Src2Idx };
-
-    // Find the one SGPR operand we are allowed to use.
-    unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx);
-
-    for (unsigned i = 0; i < 3; ++i) {
-      int Idx = VOP3Idx[i];
-      if (Idx == -1)
-        break;
-      MachineOperand &MO = MI->getOperand(Idx);
-
-      if (MO.isReg()) {
-        if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
-          continue; // VGPRs are legal
-
-        assert(MO.getReg() != AMDGPU::SCC && "SCC operand to VOP3 instruction");
-
-        if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
-          SGPRReg = MO.getReg();
-          // We can use one SGPR in each VOP3 instruction.
-          continue;
-        }
-      } else if (!isLiteralConstant(MO, getOpSize(MI->getOpcode(), Idx))) {
-        // If it is not a register and not a literal constant, then it must be
-        // an inline constant which is always legal.
-        continue;
-      }
-      // If we make it this far, then the operand is not legal and we must
-      // legalize it.
-      legalizeOpWithMove(MI, Idx);
-    }
-  }
-
-  // Legalize REG_SEQUENCE and PHI
-  // The register class of the operands much be the same type as the register
-  // class of the output.
-  if (MI->getOpcode() == AMDGPU::REG_SEQUENCE ||
-      MI->getOpcode() == AMDGPU::PHI) {
-    const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
-    for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
-      if (!MI->getOperand(i).isReg() ||
-          !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
-        continue;
-      const TargetRegisterClass *OpRC =
-              MRI.getRegClass(MI->getOperand(i).getReg());
-      if (RI.hasVGPRs(OpRC)) {
-        VRC = OpRC;
-      } else {
-        SRC = OpRC;
-      }
-    }
-
-    // If any of the operands are VGPR registers, then they all most be
-    // otherwise we will create illegal VGPR->SGPR copies when legalizing
-    // them.
-    if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) {
-      if (!VRC) {
-        assert(SRC);
-        VRC = RI.getEquivalentVGPRClass(SRC);
-      }
-      RC = VRC;
-    } else {
-      RC = SRC;
-    }
-
-    // Update all the operands so they have the same type.
-    for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
-      if (!MI->getOperand(i).isReg() ||
-          !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
-        continue;
-      unsigned DstReg = MRI.createVirtualRegister(RC);
-      MachineBasicBlock *InsertBB;
-      MachineBasicBlock::iterator Insert;
-      if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
-        InsertBB = MI->getParent();
-        Insert = MI;
-      } else {
-        // MI is a PHI instruction.
-        InsertBB = MI->getOperand(i + 1).getMBB();
-        Insert = InsertBB->getFirstTerminator();
-      }
-      BuildMI(*InsertBB, Insert, MI->getDebugLoc(),
-              get(AMDGPU::COPY), DstReg)
-              .addOperand(MI->getOperand(i));
-      MI->getOperand(i).setReg(DstReg);
-    }
-  }
-
-  // Legalize INSERT_SUBREG
-  // src0 must have the same register class as dst
-  if (MI->getOpcode() == AMDGPU::INSERT_SUBREG) {
-    unsigned Dst = MI->getOperand(0).getReg();
-    unsigned Src0 = MI->getOperand(1).getReg();
-    const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
-    const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
-    if (DstRC != Src0RC) {
-      MachineBasicBlock &MBB = *MI->getParent();
-      unsigned NewSrc0 = MRI.createVirtualRegister(DstRC);
-      BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), NewSrc0)
-              .addReg(Src0);
-      MI->getOperand(1).setReg(NewSrc0);
-    }
-    return;
-  }
-
-  // Legalize MUBUF* instructions
-  // FIXME: If we start using the non-addr64 instructions for compute, we
-  // may need to legalize them here.
-  int SRsrcIdx =
-      AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::srsrc);
-  if (SRsrcIdx != -1) {
-    // We have an MUBUF instruction
-    MachineOperand *SRsrc = &MI->getOperand(SRsrcIdx);
-    unsigned SRsrcRC = get(MI->getOpcode()).OpInfo[SRsrcIdx].RegClass;
-    if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()),
-                                             RI.getRegClass(SRsrcRC))) {
-      // The operands are legal.
-      // FIXME: We may need to legalize operands besided srsrc.
-      return;
-    }
-
-    MachineBasicBlock &MBB = *MI->getParent();
-    // Extract the the ptr from the resource descriptor.
-
-    // SRsrcPtrLo = srsrc:sub0
-    unsigned SRsrcPtrLo = buildExtractSubReg(MI, MRI, *SRsrc,
-        &AMDGPU::VReg_128RegClass, AMDGPU::sub0, &AMDGPU::VGPR_32RegClass);
-
-    // SRsrcPtrHi = srsrc:sub1
-    unsigned SRsrcPtrHi = buildExtractSubReg(MI, MRI, *SRsrc,
-        &AMDGPU::VReg_128RegClass, AMDGPU::sub1, &AMDGPU::VGPR_32RegClass);
-
-    // Create an empty resource descriptor
-    unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
-    unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
-    unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
-    unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
-    uint64_t RsrcDataFormat = getDefaultRsrcDataFormat();
-
-    // Zero64 = 0
-    BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64),
-            Zero64)
-            .addImm(0);
-
-    // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
-    BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
-            SRsrcFormatLo)
-            .addImm(RsrcDataFormat & 0xFFFFFFFF);
-
-    // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
-    BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
-            SRsrcFormatHi)
-            .addImm(RsrcDataFormat >> 32);
-
-    // NewSRsrc = {Zero64, SRsrcFormat}
-    BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
-            NewSRsrc)
-            .addReg(Zero64)
-            .addImm(AMDGPU::sub0_sub1)
-            .addReg(SRsrcFormatLo)
-            .addImm(AMDGPU::sub2)
-            .addReg(SRsrcFormatHi)
-            .addImm(AMDGPU::sub3);
-
-    MachineOperand *VAddr = getNamedOperand(*MI, AMDGPU::OpName::vaddr);
-    unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
-    unsigned NewVAddrLo;
-    unsigned NewVAddrHi;
-    if (VAddr) {
-      // This is already an ADDR64 instruction so we need to add the pointer
-      // extracted from the resource descriptor to the current value of VAddr.
-      NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
-      NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
-
-      // NewVaddrLo = SRsrcPtrLo + VAddr:sub0
-      BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADD_I32_e32),
-              NewVAddrLo)
-              .addReg(SRsrcPtrLo)
-              .addReg(VAddr->getReg(), 0, AMDGPU::sub0)
-              .addReg(AMDGPU::VCC, RegState::ImplicitDefine);
-
-      // NewVaddrHi = SRsrcPtrHi + VAddr:sub1
-      BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADDC_U32_e32),
-              NewVAddrHi)
-              .addReg(SRsrcPtrHi)
-              .addReg(VAddr->getReg(), 0, AMDGPU::sub1)
-              .addReg(AMDGPU::VCC, RegState::ImplicitDefine)
-              .addReg(AMDGPU::VCC, RegState::Implicit);
-
-    } else {
-      // This instructions is the _OFFSET variant, so we need to convert it to
-      // ADDR64.
-      MachineOperand *VData = getNamedOperand(*MI, AMDGPU::OpName::vdata);
-      MachineOperand *Offset = getNamedOperand(*MI, AMDGPU::OpName::offset);
-      MachineOperand *SOffset = getNamedOperand(*MI, AMDGPU::OpName::soffset);
-
-      // Create the new instruction.
-      unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI->getOpcode());
-      MachineInstr *Addr64 =
-          BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode))
-                  .addOperand(*VData)
-                  .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
-                                              // This will be replaced later
-                                              // with the new value of vaddr.
-                  .addOperand(*SRsrc)
-                  .addOperand(*SOffset)
-                  .addOperand(*Offset)
-                  .addImm(0) // glc
-                  .addImm(0) // slc
-                  .addImm(0); // tfe
-
-      MI->removeFromParent();
-      MI = Addr64;
-
-      NewVAddrLo = SRsrcPtrLo;
-      NewVAddrHi = SRsrcPtrHi;
-      VAddr = getNamedOperand(*MI, AMDGPU::OpName::vaddr);
-      SRsrc = getNamedOperand(*MI, AMDGPU::OpName::srsrc);
-    }
-
-    // NewVaddr = {NewVaddrHi, NewVaddrLo}
-    BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
-            NewVAddr)
-            .addReg(NewVAddrLo)
-            .addImm(AMDGPU::sub0)
-            .addReg(NewVAddrHi)
-            .addImm(AMDGPU::sub1);
-
-
-    // Update the instruction to use NewVaddr
-    VAddr->setReg(NewVAddr);
-    // Update the instruction to use NewSRsrc
-    SRsrc->setReg(NewSRsrc);
-  }
-}
-
-void SIInstrInfo::splitSMRD(MachineInstr *MI,
-                            const TargetRegisterClass *HalfRC,
-                            unsigned HalfImmOp, unsigned HalfSGPROp,
-                            MachineInstr *&Lo, MachineInstr *&Hi) const {
-
-  DebugLoc DL = MI->getDebugLoc();
-  MachineBasicBlock *MBB = MI->getParent();
-  MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
-  unsigned RegLo = MRI.createVirtualRegister(HalfRC);
-  unsigned RegHi = MRI.createVirtualRegister(HalfRC);
-  unsigned HalfSize = HalfRC->getSize();
-  const MachineOperand *OffOp =
-      getNamedOperand(*MI, AMDGPU::OpName::offset);
-  const MachineOperand *SBase = getNamedOperand(*MI, AMDGPU::OpName::sbase);
-
-  // The SMRD has an 8-bit offset in dwords on SI and a 20-bit offset in bytes
-  // on VI.
-
-  bool IsKill = SBase->isKill();
-  if (OffOp) {
-    bool isVI =
-        MBB->getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() >=
-        AMDGPUSubtarget::VOLCANIC_ISLANDS;
-    unsigned OffScale = isVI ? 1 : 4;
-    // Handle the _IMM variant
-    unsigned LoOffset = OffOp->getImm() * OffScale;
-    unsigned HiOffset = LoOffset + HalfSize;
-    Lo = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegLo)
-                  // Use addReg instead of addOperand
-                  // to make sure kill flag is cleared.
-                  .addReg(SBase->getReg(), 0, SBase->getSubReg())
-                  .addImm(LoOffset / OffScale);
-
-    if (!isUInt<20>(HiOffset) || (!isVI && !isUInt<8>(HiOffset / OffScale))) {
-      unsigned OffsetSGPR =
-          MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-      BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32), OffsetSGPR)
-              .addImm(HiOffset); // The offset in register is in bytes.
-      Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegHi)
-                    .addReg(SBase->getReg(), getKillRegState(IsKill),
-                            SBase->getSubReg())
-                    .addReg(OffsetSGPR);
-    } else {
-      Hi = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegHi)
-                     .addReg(SBase->getReg(), getKillRegState(IsKill),
-                             SBase->getSubReg())
-                     .addImm(HiOffset / OffScale);
-    }
-  } else {
-    // Handle the _SGPR variant
-    MachineOperand *SOff = getNamedOperand(*MI, AMDGPU::OpName::soff);
-    Lo = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegLo)
-                  .addReg(SBase->getReg(), 0, SBase->getSubReg())
-                  .addOperand(*SOff);
-    unsigned OffsetSGPR = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-    BuildMI(*MBB, MI, DL, get(AMDGPU::S_ADD_I32), OffsetSGPR)
-            .addOperand(*SOff)
-            .addImm(HalfSize);
-    Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp))
-                  .addReg(SBase->getReg(), getKillRegState(IsKill),
-                          SBase->getSubReg())
-                  .addReg(OffsetSGPR);
-  }
-
-  unsigned SubLo, SubHi;
-  switch (HalfSize) {
-    case 4:
-      SubLo = AMDGPU::sub0;
-      SubHi = AMDGPU::sub1;
-      break;
-    case 8:
-      SubLo = AMDGPU::sub0_sub1;
-      SubHi = AMDGPU::sub2_sub3;
-      break;
-    case 16:
-      SubLo = AMDGPU::sub0_sub1_sub2_sub3;
-      SubHi = AMDGPU::sub4_sub5_sub6_sub7;
-      break;
-    case 32:
-      SubLo = AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
-      SubHi = AMDGPU::sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15;
-      break;
-    default:
-      llvm_unreachable("Unhandled HalfSize");
-  }
-
-  BuildMI(*MBB, MI, DL, get(AMDGPU::REG_SEQUENCE))
-          .addOperand(MI->getOperand(0))
-          .addReg(RegLo)
-          .addImm(SubLo)
-          .addReg(RegHi)
-          .addImm(SubHi);
-}
-
-void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, MachineRegisterInfo &MRI) const {
-  MachineBasicBlock *MBB = MI->getParent();
-  switch (MI->getOpcode()) {
-    case AMDGPU::S_LOAD_DWORD_IMM:
-    case AMDGPU::S_LOAD_DWORD_SGPR:
-    case AMDGPU::S_LOAD_DWORDX2_IMM:
-    case AMDGPU::S_LOAD_DWORDX2_SGPR:
-    case AMDGPU::S_LOAD_DWORDX4_IMM:
-    case AMDGPU::S_LOAD_DWORDX4_SGPR: {
-      unsigned NewOpcode = getVALUOp(*MI);
-      unsigned RegOffset;
-      unsigned ImmOffset;
-
-      if (MI->getOperand(2).isReg()) {
-        RegOffset = MI->getOperand(2).getReg();
-        ImmOffset = 0;
-      } else {
-        assert(MI->getOperand(2).isImm());
-        // SMRD instructions take a dword offsets on SI and byte offset on VI
-        // and MUBUF instructions always take a byte offset.
-        ImmOffset = MI->getOperand(2).getImm();
-        if (MBB->getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() <=
-            AMDGPUSubtarget::SEA_ISLANDS)
-          ImmOffset <<= 2;
-        RegOffset = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
-
-        if (isUInt<12>(ImmOffset)) {
-          BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
-                  RegOffset)
-                  .addImm(0);
-        } else {
-          BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
-                  RegOffset)
-                  .addImm(ImmOffset);
-          ImmOffset = 0;
-        }
-      }
-
-      unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
-      unsigned DWord0 = RegOffset;
-      unsigned DWord1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
-      unsigned DWord2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
-      unsigned DWord3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
-      uint64_t RsrcDataFormat = getDefaultRsrcDataFormat();
-
-      BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord1)
-              .addImm(0);
-      BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord2)
-              .addImm(RsrcDataFormat & 0xFFFFFFFF);
-      BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord3)
-              .addImm(RsrcDataFormat >> 32);
-      BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), SRsrc)
-              .addReg(DWord0)
-              .addImm(AMDGPU::sub0)
-              .addReg(DWord1)
-              .addImm(AMDGPU::sub1)
-              .addReg(DWord2)
-              .addImm(AMDGPU::sub2)
-              .addReg(DWord3)
-              .addImm(AMDGPU::sub3);
-      MI->setDesc(get(NewOpcode));
-      if (MI->getOperand(2).isReg()) {
-        MI->getOperand(2).setReg(SRsrc);
-      } else {
-        MI->getOperand(2).ChangeToRegister(SRsrc, false);
-      }
-      MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(0));
-      MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(ImmOffset));
-      MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(0)); // glc
-      MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(0)); // slc
-      MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(0)); // tfe
-
-      const TargetRegisterClass *NewDstRC =
-          RI.getRegClass(get(NewOpcode).OpInfo[0].RegClass);
-
-      unsigned DstReg = MI->getOperand(0).getReg();
-      unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
-      MRI.replaceRegWith(DstReg, NewDstReg);
-      break;
-    }
-    case AMDGPU::S_LOAD_DWORDX8_IMM:
-    case AMDGPU::S_LOAD_DWORDX8_SGPR: {
-      MachineInstr *Lo, *Hi;
-      splitSMRD(MI, &AMDGPU::SReg_128RegClass, AMDGPU::S_LOAD_DWORDX4_IMM,
-                AMDGPU::S_LOAD_DWORDX4_SGPR, Lo, Hi);
-      MI->eraseFromParent();
-      moveSMRDToVALU(Lo, MRI);
-      moveSMRDToVALU(Hi, MRI);
-      break;
-    }
-
-    case AMDGPU::S_LOAD_DWORDX16_IMM:
-    case AMDGPU::S_LOAD_DWORDX16_SGPR: {
-      MachineInstr *Lo, *Hi;
-      splitSMRD(MI, &AMDGPU::SReg_256RegClass, AMDGPU::S_LOAD_DWORDX8_IMM,
-                AMDGPU::S_LOAD_DWORDX8_SGPR, Lo, Hi);
-      MI->eraseFromParent();
-      moveSMRDToVALU(Lo, MRI);
-      moveSMRDToVALU(Hi, MRI);
-      break;
-    }
-  }
-}
-
-void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
-  SmallVector<MachineInstr *, 128> Worklist;
-  Worklist.push_back(&TopInst);
-
-  while (!Worklist.empty()) {
-    MachineInstr *Inst = Worklist.pop_back_val();
-    MachineBasicBlock *MBB = Inst->getParent();
-    MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
-
-    unsigned Opcode = Inst->getOpcode();
-    unsigned NewOpcode = getVALUOp(*Inst);
-
-    // Handle some special cases
-    switch (Opcode) {
-    default:
-      if (isSMRD(Inst->getOpcode())) {
-        moveSMRDToVALU(Inst, MRI);
-      }
-      break;
-    case AMDGPU::S_MOV_B64: {
-      DebugLoc DL = Inst->getDebugLoc();
-
-      // If the source operand is a register we can replace this with a
-      // copy.
-      if (Inst->getOperand(1).isReg()) {
-        MachineInstr *Copy = BuildMI(*MBB, Inst, DL, get(TargetOpcode::COPY))
-          .addOperand(Inst->getOperand(0))
-          .addOperand(Inst->getOperand(1));
-        Worklist.push_back(Copy);
-      } else {
-        // Otherwise, we need to split this into two movs, because there is
-        // no 64-bit VALU move instruction.
-        unsigned Reg = Inst->getOperand(0).getReg();
-        unsigned Dst = split64BitImm(Worklist,
-                                     Inst,
-                                     MRI,
-                                     MRI.getRegClass(Reg),
-                                     Inst->getOperand(1));
-        MRI.replaceRegWith(Reg, Dst);
-      }
-      Inst->eraseFromParent();
-      continue;
-    }
-    case AMDGPU::S_AND_B64:
-      splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32);
-      Inst->eraseFromParent();
-      continue;
-
-    case AMDGPU::S_OR_B64:
-      splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32);
-      Inst->eraseFromParent();
-      continue;
-
-    case AMDGPU::S_XOR_B64:
-      splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32);
-      Inst->eraseFromParent();
-      continue;
-
-    case AMDGPU::S_NOT_B64:
-      splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32);
-      Inst->eraseFromParent();
-      continue;
-
-    case AMDGPU::S_BCNT1_I32_B64:
-      splitScalar64BitBCNT(Worklist, Inst);
-      Inst->eraseFromParent();
-      continue;
-
-    case AMDGPU::S_BFE_I64: {
-      splitScalar64BitBFE(Worklist, Inst);
-      Inst->eraseFromParent();
-      continue;
-    }
-
-    case AMDGPU::S_LSHL_B32:
-      if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
-        NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
-        swapOperands(Inst);
-      }
-      break;
-    case AMDGPU::S_ASHR_I32:
-      if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
-        NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
-        swapOperands(Inst);
-      }
-      break;
-    case AMDGPU::S_LSHR_B32:
-      if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
-        NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
-        swapOperands(Inst);
-      }
-      break;
-    case AMDGPU::S_LSHL_B64:
-      if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
-        NewOpcode = AMDGPU::V_LSHLREV_B64;
-        swapOperands(Inst);
-      }
-      break;
-    case AMDGPU::S_ASHR_I64:
-      if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
-        NewOpcode = AMDGPU::V_ASHRREV_I64;
-        swapOperands(Inst);
-      }
-      break;
-    case AMDGPU::S_LSHR_B64:
-      if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
-        NewOpcode = AMDGPU::V_LSHRREV_B64;
-        swapOperands(Inst);
-      }
-      break;
-
-    case AMDGPU::S_BFE_U64:
-    case AMDGPU::S_BFM_B64:
-      llvm_unreachable("Moving this op to VALU not implemented");
-    }
-
-    if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
-      // We cannot move this instruction to the VALU, so we should try to
-      // legalize its operands instead.
-      legalizeOperands(Inst);
-      continue;
-    }
-
-    // Use the new VALU Opcode.
-    const MCInstrDesc &NewDesc = get(NewOpcode);
-    Inst->setDesc(NewDesc);
-
-    // Remove any references to SCC. Vector instructions can't read from it, and
-    // We're just about to add the implicit use / defs of VCC, and we don't want
-    // both.
-    for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) {
-      MachineOperand &Op = Inst->getOperand(i);
-      if (Op.isReg() && Op.getReg() == AMDGPU::SCC)
-        Inst->RemoveOperand(i);
-    }
-
-    if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
-      // We are converting these to a BFE, so we need to add the missing
-      // operands for the size and offset.
-      unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
-      Inst->addOperand(MachineOperand::CreateImm(0));
-      Inst->addOperand(MachineOperand::CreateImm(Size));
-
-    } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
-      // The VALU version adds the second operand to the result, so insert an
-      // extra 0 operand.
-      Inst->addOperand(MachineOperand::CreateImm(0));
-    }
-
-    addDescImplicitUseDef(NewDesc, Inst);
-
-    if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
-      const MachineOperand &OffsetWidthOp = Inst->getOperand(2);
-      // If we need to move this to VGPRs, we need to unpack the second operand
-      // back into the 2 separate ones for bit offset and width.
-      assert(OffsetWidthOp.isImm() &&
-             "Scalar BFE is only implemented for constant width and offset");
-      uint32_t Imm = OffsetWidthOp.getImm();
-
-      uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
-      uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
-      Inst->RemoveOperand(2); // Remove old immediate.
-      Inst->addOperand(MachineOperand::CreateImm(Offset));
-      Inst->addOperand(MachineOperand::CreateImm(BitWidth));
-    }
-
-    // Update the destination register class.
-
-    const TargetRegisterClass *NewDstRC = getOpRegClass(*Inst, 0);
-
-    switch (Opcode) {
-      // For target instructions, getOpRegClass just returns the virtual
-      // register class associated with the operand, so we need to find an
-      // equivalent VGPR register class in order to move the instruction to the
-      // VALU.
-    case AMDGPU::COPY:
-    case AMDGPU::PHI:
-    case AMDGPU::REG_SEQUENCE:
-    case AMDGPU::INSERT_SUBREG:
-      if (RI.hasVGPRs(NewDstRC))
-        continue;
-      NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
-      if (!NewDstRC)
-        continue;
-      break;
-    default:
-      break;
-    }
-
-    unsigned DstReg = Inst->getOperand(0).getReg();
-    unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
-    MRI.replaceRegWith(DstReg, NewDstReg);
-
-    // Legalize the operands
-    legalizeOperands(Inst);
-
-    for (MachineRegisterInfo::use_iterator I = MRI.use_begin(NewDstReg),
-           E = MRI.use_end(); I != E; ++I) {
-      MachineInstr &UseMI = *I->getParent();
-      if (!canReadVGPR(UseMI, I.getOperandNo())) {
-        Worklist.push_back(&UseMI);
-      }
-    }
-  }
-}
-
-//===----------------------------------------------------------------------===//
-// Indirect addressing callbacks
-//===----------------------------------------------------------------------===//
-
-unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
-                                                 unsigned Channel) const {
-  assert(Channel == 0);
-  return RegIndex;
-}
-
-const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
-  return &AMDGPU::VGPR_32RegClass;
-}
-
-void SIInstrInfo::splitScalar64BitUnaryOp(
-  SmallVectorImpl<MachineInstr *> &Worklist,
-  MachineInstr *Inst,
-  unsigned Opcode) const {
-  MachineBasicBlock &MBB = *Inst->getParent();
-  MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
-
-  MachineOperand &Dest = Inst->getOperand(0);
-  MachineOperand &Src0 = Inst->getOperand(1);
-  DebugLoc DL = Inst->getDebugLoc();
-
-  MachineBasicBlock::iterator MII = Inst;
-
-  const MCInstrDesc &InstDesc = get(Opcode);
-  const TargetRegisterClass *Src0RC = Src0.isReg() ?
-    MRI.getRegClass(Src0.getReg()) :
-    &AMDGPU::SGPR_32RegClass;
-
-  const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
-
-  MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
-                                                       AMDGPU::sub0, Src0SubRC);
-
-  const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
-  const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
-
-  unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
-  MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
-    .addOperand(SrcReg0Sub0);
-
-  MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
-                                                       AMDGPU::sub1, Src0SubRC);
-
-  unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
-  MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
-    .addOperand(SrcReg0Sub1);
-
-  unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
-  BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
-    .addReg(DestSub0)
-    .addImm(AMDGPU::sub0)
-    .addReg(DestSub1)
-    .addImm(AMDGPU::sub1);
-
-  MRI.replaceRegWith(Dest.getReg(), FullDestReg);
-
-  // Try to legalize the operands in case we need to swap the order to keep it
-  // valid.
-  Worklist.push_back(LoHalf);
-  Worklist.push_back(HiHalf);
-}
-
-void SIInstrInfo::splitScalar64BitBinaryOp(
-  SmallVectorImpl<MachineInstr *> &Worklist,
-  MachineInstr *Inst,
-  unsigned Opcode) const {
-  MachineBasicBlock &MBB = *Inst->getParent();
-  MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
-
-  MachineOperand &Dest = Inst->getOperand(0);
-  MachineOperand &Src0 = Inst->getOperand(1);
-  MachineOperand &Src1 = Inst->getOperand(2);
-  DebugLoc DL = Inst->getDebugLoc();
-
-  MachineBasicBlock::iterator MII = Inst;
-
-  const MCInstrDesc &InstDesc = get(Opcode);
-  const TargetRegisterClass *Src0RC = Src0.isReg() ?
-    MRI.getRegClass(Src0.getReg()) :
-    &AMDGPU::SGPR_32RegClass;
-
-  const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
-  const TargetRegisterClass *Src1RC = Src1.isReg() ?
-    MRI.getRegClass(Src1.getReg()) :
-    &AMDGPU::SGPR_32RegClass;
-
-  const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
-
-  MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
-                                                       AMDGPU::sub0, Src0SubRC);
-  MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
-                                                       AMDGPU::sub0, Src1SubRC);
-
-  const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
-  const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
-
-  unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
-  MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
-    .addOperand(SrcReg0Sub0)
-    .addOperand(SrcReg1Sub0);
-
-  MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
-                                                       AMDGPU::sub1, Src0SubRC);
-  MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
-                                                       AMDGPU::sub1, Src1SubRC);
-
-  unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
-  MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
-    .addOperand(SrcReg0Sub1)
-    .addOperand(SrcReg1Sub1);
-
-  unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
-  BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
-    .addReg(DestSub0)
-    .addImm(AMDGPU::sub0)
-    .addReg(DestSub1)
-    .addImm(AMDGPU::sub1);
-
-  MRI.replaceRegWith(Dest.getReg(), FullDestReg);
-
-  // Try to legalize the operands in case we need to swap the order to keep it
-  // valid.
-  Worklist.push_back(LoHalf);
-  Worklist.push_back(HiHalf);
-}
-
-void SIInstrInfo::splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist,
-                                       MachineInstr *Inst) const {
-  MachineBasicBlock &MBB = *Inst->getParent();
-  MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
-
-  MachineBasicBlock::iterator MII = Inst;
-  DebugLoc DL = Inst->getDebugLoc();
-
-  MachineOperand &Dest = Inst->getOperand(0);
-  MachineOperand &Src = Inst->getOperand(1);
-
-  const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64);
-  const TargetRegisterClass *SrcRC = Src.isReg() ?
-    MRI.getRegClass(Src.getReg()) :
-    &AMDGPU::SGPR_32RegClass;
-
-  unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
-  unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
-
-  const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
-
-  MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
-                                                      AMDGPU::sub0, SrcSubRC);
-  MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
-                                                      AMDGPU::sub1, SrcSubRC);
-
-  MachineInstr *First = BuildMI(MBB, MII, DL, InstDesc, MidReg)
-    .addOperand(SrcRegSub0)
-    .addImm(0);
-
-  MachineInstr *Second = BuildMI(MBB, MII, DL, InstDesc, ResultReg)
-    .addOperand(SrcRegSub1)
-    .addReg(MidReg);
-
-  MRI.replaceRegWith(Dest.getReg(), ResultReg);
-
-  Worklist.push_back(First);
-  Worklist.push_back(Second);
-}
-
-void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist,
-                                      MachineInstr *Inst) const {
-  MachineBasicBlock &MBB = *Inst->getParent();
-  MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
-  MachineBasicBlock::iterator MII = Inst;
-  DebugLoc DL = Inst->getDebugLoc();
-
-  MachineOperand &Dest = Inst->getOperand(0);
-  uint32_t Imm = Inst->getOperand(2).getImm();
-  uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
-  uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
-
-  (void) Offset;
-
-  // Only sext_inreg cases handled.
-  assert(Inst->getOpcode() == AMDGPU::S_BFE_I64 &&
-         BitWidth <= 32 &&
-         Offset == 0 &&
-         "Not implemented");
-
-  if (BitWidth < 32) {
-    unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
-    unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
-    unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
-
-    BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo)
-      .addReg(Inst->getOperand(1).getReg(), 0, AMDGPU::sub0)
-      .addImm(0)
-      .addImm(BitWidth);
-
-    BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi)
-      .addImm(31)
-      .addReg(MidRegLo);
-
-    BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
-      .addReg(MidRegLo)
-      .addImm(AMDGPU::sub0)
-      .addReg(MidRegHi)
-      .addImm(AMDGPU::sub1);
-
-    MRI.replaceRegWith(Dest.getReg(), ResultReg);
-    return;
-  }
-
-  MachineOperand &Src = Inst->getOperand(1);
-  unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
-  unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
-
-  BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg)
-    .addImm(31)
-    .addReg(Src.getReg(), 0, AMDGPU::sub0);
-
-  BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
-    .addReg(Src.getReg(), 0, AMDGPU::sub0)
-    .addImm(AMDGPU::sub0)
-    .addReg(TmpReg)
-    .addImm(AMDGPU::sub1);
-
-  MRI.replaceRegWith(Dest.getReg(), ResultReg);
-}
-
-void SIInstrInfo::addDescImplicitUseDef(const MCInstrDesc &NewDesc,
-                                        MachineInstr *Inst) const {
-  // Add the implict and explicit register definitions.
-  if (NewDesc.ImplicitUses) {
-    for (unsigned i = 0; NewDesc.ImplicitUses[i]; ++i) {
-      unsigned Reg = NewDesc.ImplicitUses[i];
-      Inst->addOperand(MachineOperand::CreateReg(Reg, false, true));
-    }
-  }
-
-  if (NewDesc.ImplicitDefs) {
-    for (unsigned i = 0; NewDesc.ImplicitDefs[i]; ++i) {
-      unsigned Reg = NewDesc.ImplicitDefs[i];
-      Inst->addOperand(MachineOperand::CreateReg(Reg, true, true));
-    }
-  }
-}
-
-unsigned SIInstrInfo::findUsedSGPR(const MachineInstr *MI,
-                                   int OpIndices[3]) const {
-  const MCInstrDesc &Desc = get(MI->getOpcode());
-
-  // Find the one SGPR operand we are allowed to use.
-  unsigned SGPRReg = AMDGPU::NoRegister;
-
-  // First we need to consider the instruction's operand requirements before
-  // legalizing. Some operands are required to be SGPRs, such as implicit uses
-  // of VCC, but we are still bound by the constant bus requirement to only use
-  // one.
-  //
-  // If the operand's class is an SGPR, we can never move it.
-
-  for (const MachineOperand &MO : MI->implicit_operands()) {
-    // We only care about reads.
-    if (MO.isDef())
-      continue;
-
-    if (MO.getReg() == AMDGPU::VCC)
-      return AMDGPU::VCC;
-
-    if (MO.getReg() == AMDGPU::FLAT_SCR)
-      return AMDGPU::FLAT_SCR;
-  }
-
-  unsigned UsedSGPRs[3] = { AMDGPU::NoRegister };
-  const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
-
-  for (unsigned i = 0; i < 3; ++i) {
-    int Idx = OpIndices[i];
-    if (Idx == -1)
-      break;
-
-    const MachineOperand &MO = MI->getOperand(Idx);
-    if (RI.isSGPRClassID(Desc.OpInfo[Idx].RegClass))
-      SGPRReg = MO.getReg();
-
-    if (MO.isReg() && RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
-      UsedSGPRs[i] = MO.getReg();
-  }
-
-  if (SGPRReg != AMDGPU::NoRegister)
-    return SGPRReg;
-
-  // We don't have a required SGPR operand, so we have a bit more freedom in
-  // selecting operands to move.
-
-  // Try to select the most used SGPR. If an SGPR is equal to one of the
-  // others, we choose that.
-  //
-  // e.g.
-  // V_FMA_F32 v0, s0, s0, s0 -> No moves
-  // V_FMA_F32 v0, s0, s1, s0 -> Move s1
-
-  if (UsedSGPRs[0] != AMDGPU::NoRegister) {
-    if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
-      SGPRReg = UsedSGPRs[0];
-  }
-
-  if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) {
-    if (UsedSGPRs[1] == UsedSGPRs[2])
-      SGPRReg = UsedSGPRs[1];
-  }
-
-  return SGPRReg;
-}
-
-MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
-                                   MachineBasicBlock *MBB,
-                                   MachineBasicBlock::iterator I,
-                                   unsigned ValueReg,
-                                   unsigned Address, unsigned OffsetReg) const {
-  const DebugLoc &DL = MBB->findDebugLoc(I);
-  unsigned IndirectBaseReg = AMDGPU::VGPR_32RegClass.getRegister(
-                                      getIndirectIndexBegin(*MBB->getParent()));
-
-  return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1))
-          .addReg(IndirectBaseReg, RegState::Define)
-          .addOperand(I->getOperand(0))
-          .addReg(IndirectBaseReg)
-          .addReg(OffsetReg)
-          .addImm(0)
-          .addReg(ValueReg);
-}
-
-MachineInstrBuilder SIInstrInfo::buildIndirectRead(
-                                   MachineBasicBlock *MBB,
-                                   MachineBasicBlock::iterator I,
-                                   unsigned ValueReg,
-                                   unsigned Address, unsigned OffsetReg) const {
-  const DebugLoc &DL = MBB->findDebugLoc(I);
-  unsigned IndirectBaseReg = AMDGPU::VGPR_32RegClass.getRegister(
-                                      getIndirectIndexBegin(*MBB->getParent()));
-
-  return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC))
-          .addOperand(I->getOperand(0))
-          .addOperand(I->getOperand(1))
-          .addReg(IndirectBaseReg)
-          .addReg(OffsetReg)
-          .addImm(0);
-
-}
-
-void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
-                                            const MachineFunction &MF) const {
-  int End = getIndirectIndexEnd(MF);
-  int Begin = getIndirectIndexBegin(MF);
-
-  if (End == -1)
-    return;
-
-
-  for (int Index = Begin; Index <= End; ++Index)
-    Reserved.set(AMDGPU::VGPR_32RegClass.getRegister(Index));
-
-  for (int Index = std::max(0, Begin - 1); Index <= End; ++Index)
-    Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index));
-
-  for (int Index = std::max(0, Begin - 2); Index <= End; ++Index)
-    Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index));
-
-  for (int Index = std::max(0, Begin - 3); Index <= End; ++Index)
-    Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index));
-
-  for (int Index = std::max(0, Begin - 7); Index <= End; ++Index)
-    Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index));
-
-  for (int Index = std::max(0, Begin - 15); Index <= End; ++Index)
-    Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));
-}
-
-MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI,
-                                             unsigned OperandName) const {
-  int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
-  if (Idx == -1)
-    return nullptr;
-
-  return &MI.getOperand(Idx);
-}
-
-uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const {
-  uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT;
-  if (ST.isAmdHsaOS())
-    RsrcDataFormat |= (1ULL << 56);
-
-  return RsrcDataFormat;
-}

Removed: llvm/trunk/lib/Target/R600/SIInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIInstrInfo.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIInstrInfo.h (original)
+++ llvm/trunk/lib/Target/R600/SIInstrInfo.h (removed)
@@ -1,391 +0,0 @@
-//===-- SIInstrInfo.h - SI Instruction Info Interface -----------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Interface definition for SIInstrInfo.
-//
-//===----------------------------------------------------------------------===//
-
-
-#ifndef LLVM_LIB_TARGET_R600_SIINSTRINFO_H
-#define LLVM_LIB_TARGET_R600_SIINSTRINFO_H
-
-#include "AMDGPUInstrInfo.h"
-#include "SIDefines.h"
-#include "SIRegisterInfo.h"
-
-namespace llvm {
-
-class SIInstrInfo : public AMDGPUInstrInfo {
-private:
-  const SIRegisterInfo RI;
-
-  unsigned buildExtractSubReg(MachineBasicBlock::iterator MI,
-                              MachineRegisterInfo &MRI,
-                              MachineOperand &SuperReg,
-                              const TargetRegisterClass *SuperRC,
-                              unsigned SubIdx,
-                              const TargetRegisterClass *SubRC) const;
-  MachineOperand buildExtractSubRegOrImm(MachineBasicBlock::iterator MI,
-                                         MachineRegisterInfo &MRI,
-                                         MachineOperand &SuperReg,
-                                         const TargetRegisterClass *SuperRC,
-                                         unsigned SubIdx,
-                                         const TargetRegisterClass *SubRC) const;
-
-  unsigned split64BitImm(SmallVectorImpl<MachineInstr *> &Worklist,
-                         MachineBasicBlock::iterator MI,
-                         MachineRegisterInfo &MRI,
-                         const TargetRegisterClass *RC,
-                         const MachineOperand &Op) const;
-
-  void swapOperands(MachineBasicBlock::iterator Inst) const;
-
-  void splitScalar64BitUnaryOp(SmallVectorImpl<MachineInstr *> &Worklist,
-                               MachineInstr *Inst, unsigned Opcode) const;
-
-  void splitScalar64BitBinaryOp(SmallVectorImpl<MachineInstr *> &Worklist,
-                                MachineInstr *Inst, unsigned Opcode) const;
-
-  void splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist,
-                            MachineInstr *Inst) const;
-  void splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist,
-                           MachineInstr *Inst) const;
-
-  void addDescImplicitUseDef(const MCInstrDesc &Desc, MachineInstr *MI) const;
-
-  bool checkInstOffsetsDoNotOverlap(MachineInstr *MIa,
-                                    MachineInstr *MIb) const;
-
-  unsigned findUsedSGPR(const MachineInstr *MI, int OpIndices[3]) const;
-
-public:
-  explicit SIInstrInfo(const AMDGPUSubtarget &st);
-
-  const SIRegisterInfo &getRegisterInfo() const override {
-    return RI;
-  }
-
-  bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
-                                         AliasAnalysis *AA) const override;
-
-  bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
-                               int64_t &Offset1,
-                               int64_t &Offset2) const override;
-
-  bool getLdStBaseRegImmOfs(MachineInstr *LdSt,
-                            unsigned &BaseReg, unsigned &Offset,
-                            const TargetRegisterInfo *TRI) const final;
-
-  bool shouldClusterLoads(MachineInstr *FirstLdSt,
-                          MachineInstr *SecondLdSt,
-                          unsigned NumLoads) const final;
-
-  void copyPhysReg(MachineBasicBlock &MBB,
-                   MachineBasicBlock::iterator MI, DebugLoc DL,
-                   unsigned DestReg, unsigned SrcReg,
-                   bool KillSrc) const override;
-
-  unsigned calculateLDSSpillAddress(MachineBasicBlock &MBB,
-                                    MachineBasicBlock::iterator MI,
-                                    RegScavenger *RS,
-                                    unsigned TmpReg,
-                                    unsigned Offset,
-                                    unsigned Size) const;
-
-  void storeRegToStackSlot(MachineBasicBlock &MBB,
-                           MachineBasicBlock::iterator MI,
-                           unsigned SrcReg, bool isKill, int FrameIndex,
-                           const TargetRegisterClass *RC,
-                           const TargetRegisterInfo *TRI) const override;
-
-  void loadRegFromStackSlot(MachineBasicBlock &MBB,
-                            MachineBasicBlock::iterator MI,
-                            unsigned DestReg, int FrameIndex,
-                            const TargetRegisterClass *RC,
-                            const TargetRegisterInfo *TRI) const override;
-
-  bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
-
-  // \brief Returns an opcode that can be used to move a value to a \p DstRC
-  // register.  If there is no hardware instruction that can store to \p
-  // DstRC, then AMDGPU::COPY is returned.
-  unsigned getMovOpcode(const TargetRegisterClass *DstRC) const;
-  unsigned commuteOpcode(const MachineInstr &MI) const;
-
-  MachineInstr *commuteInstruction(MachineInstr *MI,
-                                   bool NewMI = false) const override;
-  bool findCommutedOpIndices(MachineInstr *MI,
-                             unsigned &SrcOpIdx1,
-                             unsigned &SrcOpIdx2) const override;
-
-  bool isTriviallyReMaterializable(const MachineInstr *MI,
-                                   AliasAnalysis *AA = nullptr) const;
-
-  bool areMemAccessesTriviallyDisjoint(
-    MachineInstr *MIa, MachineInstr *MIb,
-    AliasAnalysis *AA = nullptr) const override;
-
-  MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
-                              MachineBasicBlock::iterator I,
-                              unsigned DstReg, unsigned SrcReg) const override;
-  bool isMov(unsigned Opcode) const override;
-
-  bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override;
-
-  bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
-                     unsigned Reg, MachineRegisterInfo *MRI) const final;
-
-  unsigned getMachineCSELookAheadLimit() const override { return 500; }
-
-  bool isSALU(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::SALU;
-  }
-
-  bool isVALU(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::VALU;
-  }
-
-  bool isSOP1(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::SOP1;
-  }
-
-  bool isSOP2(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::SOP2;
-  }
-
-  bool isSOPC(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::SOPC;
-  }
-
-  bool isSOPK(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::SOPK;
-  }
-
-  bool isSOPP(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::SOPP;
-  }
-
-  bool isVOP1(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::VOP1;
-  }
-
-  bool isVOP2(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::VOP2;
-  }
-
-  bool isVOP3(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::VOP3;
-  }
-
-  bool isVOPC(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::VOPC;
-  }
-
-  bool isMUBUF(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::MUBUF;
-  }
-
-  bool isMTBUF(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::MTBUF;
-  }
-
-  bool isSMRD(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::SMRD;
-  }
-
-  bool isDS(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::DS;
-  }
-
-  bool isMIMG(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::MIMG;
-  }
-
-  bool isFLAT(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::FLAT;
-  }
-
-  bool isWQM(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::WQM;
-  }
-
-  bool isVGPRSpill(uint16_t Opcode) const {
-    return get(Opcode).TSFlags & SIInstrFlags::VGPRSpill;
-  }
-
-  bool isInlineConstant(const APInt &Imm) const;
-  bool isInlineConstant(const MachineOperand &MO, unsigned OpSize) const;
-  bool isLiteralConstant(const MachineOperand &MO, unsigned OpSize) const;
-
-  bool isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
-                         const MachineOperand &MO) const;
-
-  /// \brief Return true if this 64-bit VALU instruction has a 32-bit encoding.
-  /// This function will return false if you pass it a 32-bit instruction.
-  bool hasVALU32BitEncoding(unsigned Opcode) const;
-
-  /// \brief Returns true if this operand uses the constant bus.
-  bool usesConstantBus(const MachineRegisterInfo &MRI,
-                       const MachineOperand &MO,
-                       unsigned OpSize) const;
-
-  /// \brief Return true if this instruction has any modifiers.
-  ///  e.g. src[012]_mod, omod, clamp.
-  bool hasModifiers(unsigned Opcode) const;
-
-  bool hasModifiersSet(const MachineInstr &MI,
-                       unsigned OpName) const;
-
-  bool verifyInstruction(const MachineInstr *MI,
-                         StringRef &ErrInfo) const override;
-
-  static unsigned getVALUOp(const MachineInstr &MI);
-
-  bool isSALUOpSupportedOnVALU(const MachineInstr &MI) const;
-
-  /// \brief Return the correct register class for \p OpNo.  For target-specific
-  /// instructions, this will return the register class that has been defined
-  /// in tablegen.  For generic instructions, like REG_SEQUENCE it will return
-  /// the register class of its machine operand.
-  /// to infer the correct register class base on the other operands.
-  const TargetRegisterClass *getOpRegClass(const MachineInstr &MI,
-                                           unsigned OpNo) const;
-
-  /// \brief Return the size in bytes of the operand OpNo on the given
-  // instruction opcode.
-  unsigned getOpSize(uint16_t Opcode, unsigned OpNo) const {
-    const MCOperandInfo &OpInfo = get(Opcode).OpInfo[OpNo];
-
-    if (OpInfo.RegClass == -1) {
-      // If this is an immediate operand, this must be a 32-bit literal.
-      assert(OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE);
-      return 4;
-    }
-
-    return RI.getRegClass(OpInfo.RegClass)->getSize();
-  }
-
-  /// \brief This form should usually be preferred since it handles operands
-  /// with unknown register classes.
-  unsigned getOpSize(const MachineInstr &MI, unsigned OpNo) const {
-    return getOpRegClass(MI, OpNo)->getSize();
-  }
-
-  /// \returns true if it is legal for the operand at index \p OpNo
-  /// to read a VGPR.
-  bool canReadVGPR(const MachineInstr &MI, unsigned OpNo) const;
-
-  /// \brief Legalize the \p OpIndex operand of this instruction by inserting
-  /// a MOV.  For example:
-  /// ADD_I32_e32 VGPR0, 15
-  /// to
-  /// MOV VGPR1, 15
-  /// ADD_I32_e32 VGPR0, VGPR1
-  ///
-  /// If the operand being legalized is a register, then a COPY will be used
-  /// instead of MOV.
-  void legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const;
-
-  /// \brief Check if \p MO is a legal operand if it was the \p OpIdx Operand
-  /// for \p MI.
-  bool isOperandLegal(const MachineInstr *MI, unsigned OpIdx,
-                      const MachineOperand *MO = nullptr) const;
-
-  /// \brief Legalize all operands in this instruction.  This function may
-  /// create new instruction and insert them before \p MI.
-  void legalizeOperands(MachineInstr *MI) const;
-
-  /// \brief Split an SMRD instruction into two smaller loads of half the
-  //  size storing the results in \p Lo and \p Hi.
-  void splitSMRD(MachineInstr *MI, const TargetRegisterClass *HalfRC,
-                 unsigned HalfImmOp, unsigned HalfSGPROp,
-                 MachineInstr *&Lo, MachineInstr *&Hi) const;
-
-  void moveSMRDToVALU(MachineInstr *MI, MachineRegisterInfo &MRI) const;
-
-  /// \brief Replace this instruction's opcode with the equivalent VALU
-  /// opcode.  This function will also move the users of \p MI to the
-  /// VALU if necessary.
-  void moveToVALU(MachineInstr &MI) const;
-
-  unsigned calculateIndirectAddress(unsigned RegIndex,
-                                    unsigned Channel) const override;
-
-  const TargetRegisterClass *getIndirectAddrRegClass() const override;
-
-  MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
-                                         MachineBasicBlock::iterator I,
-                                         unsigned ValueReg,
-                                         unsigned Address,
-                                         unsigned OffsetReg) const override;
-
-  MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
-                                        MachineBasicBlock::iterator I,
-                                        unsigned ValueReg,
-                                        unsigned Address,
-                                        unsigned OffsetReg) const override;
-  void reserveIndirectRegisters(BitVector &Reserved,
-                                const MachineFunction &MF) const;
-
-  void LoadM0(MachineInstr *MoveRel, MachineBasicBlock::iterator I,
-              unsigned SavReg, unsigned IndexReg) const;
-
-  void insertNOPs(MachineBasicBlock::iterator MI, int Count) const;
-
-  /// \brief Returns the operand named \p Op.  If \p MI does not have an
-  /// operand named \c Op, this function returns nullptr.
-  MachineOperand *getNamedOperand(MachineInstr &MI, unsigned OperandName) const;
-
-  const MachineOperand *getNamedOperand(const MachineInstr &MI,
-                                        unsigned OpName) const {
-    return getNamedOperand(const_cast<MachineInstr &>(MI), OpName);
-  }
-
-  uint64_t getDefaultRsrcDataFormat() const;
-
-};
-
-namespace AMDGPU {
-
-  int getVOPe64(uint16_t Opcode);
-  int getVOPe32(uint16_t Opcode);
-  int getCommuteRev(uint16_t Opcode);
-  int getCommuteOrig(uint16_t Opcode);
-  int getAddr64Inst(uint16_t Opcode);
-  int getAtomicRetOp(uint16_t Opcode);
-  int getAtomicNoRetOp(uint16_t Opcode);
-
-  const uint64_t RSRC_DATA_FORMAT = 0xf00000000000LL;
-  const uint64_t RSRC_TID_ENABLE = 1LL << 55;
-
-} // End namespace AMDGPU
-
-namespace SI {
-namespace KernelInputOffsets {
-
-/// Offsets in bytes from the start of the input buffer
-enum Offsets {
-  NGROUPS_X = 0,
-  NGROUPS_Y = 4,
-  NGROUPS_Z = 8,
-  GLOBAL_SIZE_X = 12,
-  GLOBAL_SIZE_Y = 16,
-  GLOBAL_SIZE_Z = 20,
-  LOCAL_SIZE_X = 24,
-  LOCAL_SIZE_Y = 28,
-  LOCAL_SIZE_Z = 32
-};
-
-} // End namespace KernelInputOffsets
-} // End namespace SI
-
-} // End namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/R600/SIInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIInstrInfo.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIInstrInfo.td (original)
+++ llvm/trunk/lib/Target/R600/SIInstrInfo.td (removed)
@@ -1,2647 +0,0 @@
-//===-- SIInstrInfo.td - SI Instruction Infos -------------*- tablegen -*--===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-def isCI : Predicate<"Subtarget->getGeneration() "
-                      ">= AMDGPUSubtarget::SEA_ISLANDS">;
-def isVI : Predicate <
-  "Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS">,
-  AssemblerPredicate<"FeatureGCN3Encoding">;
-
-def DisableInst : Predicate <"false">, AssemblerPredicate<"FeatureDisable">;
-
-class vop {
-  field bits<9> SI3;
-  field bits<10> VI3;
-}
-
-class vopc <bits<8> si, bits<8> vi = !add(0x40, si)> : vop {
-  field bits<8> SI = si;
-  field bits<8> VI = vi;
-
-  field bits<9>  SI3 = {0, si{7-0}};
-  field bits<10> VI3 = {0, 0, vi{7-0}};
-}
-
-class vop1 <bits<8> si, bits<8> vi = si> : vop {
-  field bits<8> SI = si;
-  field bits<8> VI = vi;
-
-  field bits<9>  SI3 = {1, 1, si{6-0}};
-  field bits<10> VI3 = !add(0x140, vi);
-}
-
-class vop2 <bits<6> si, bits<6> vi = si> : vop {
-  field bits<6> SI = si;
-  field bits<6> VI = vi;
-
-  field bits<9>  SI3 = {1, 0, 0, si{5-0}};
-  field bits<10> VI3 = {0, 1, 0, 0, vi{5-0}};
-}
-
-// Specify a VOP2 opcode for SI and VOP3 opcode for VI
-// that doesn't have VOP2 encoding on VI
-class vop23 <bits<6> si, bits<10> vi> : vop2 <si> {
-  let VI3 = vi;
-}
-
-class vop3 <bits<9> si, bits<10> vi = {0, si}> : vop {
-  let SI3 = si;
-  let VI3 = vi;
-}
-
-class sop1 <bits<8> si, bits<8> vi = si> {
-  field bits<8> SI = si;
-  field bits<8> VI = vi;
-}
-
-class sop2 <bits<7> si, bits<7> vi = si> {
-  field bits<7> SI = si;
-  field bits<7> VI = vi;
-}
-
-class sopk <bits<5> si, bits<5> vi = si> {
-  field bits<5> SI = si;
-  field bits<5> VI = vi;
-}
-
-// Execpt for the NONE field, this must be kept in sync with the SISubtarget enum
-// in AMDGPUInstrInfo.cpp
-def SISubtarget {
-  int NONE = -1;
-  int SI = 0;
-  int VI = 1;
-}
-
-//===----------------------------------------------------------------------===//
-// SI DAG Nodes
-//===----------------------------------------------------------------------===//
-
-def SIload_constant : SDNode<"AMDGPUISD::LOAD_CONSTANT",
-  SDTypeProfile<1, 2, [SDTCisVT<0, f32>, SDTCisVT<1, v4i32>, SDTCisVT<2, i32>]>,
-                      [SDNPMayLoad, SDNPMemOperand]
->;
-
-def SItbuffer_store : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT",
-  SDTypeProfile<0, 13,
-    [SDTCisVT<0, v4i32>,   // rsrc(SGPR)
-     SDTCisVT<1, iAny>,   // vdata(VGPR)
-     SDTCisVT<2, i32>,    // num_channels(imm)
-     SDTCisVT<3, i32>,    // vaddr(VGPR)
-     SDTCisVT<4, i32>,    // soffset(SGPR)
-     SDTCisVT<5, i32>,    // inst_offset(imm)
-     SDTCisVT<6, i32>,    // dfmt(imm)
-     SDTCisVT<7, i32>,    // nfmt(imm)
-     SDTCisVT<8, i32>,    // offen(imm)
-     SDTCisVT<9, i32>,    // idxen(imm)
-     SDTCisVT<10, i32>,   // glc(imm)
-     SDTCisVT<11, i32>,   // slc(imm)
-     SDTCisVT<12, i32>    // tfe(imm)
-    ]>,
-  [SDNPMayStore, SDNPMemOperand, SDNPHasChain]
->;
-
-def SIload_input : SDNode<"AMDGPUISD::LOAD_INPUT",
-  SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisVT<1, v4i32>, SDTCisVT<2, i16>,
-                       SDTCisVT<3, i32>]>
->;
-
-class SDSample<string opcode> : SDNode <opcode,
-  SDTypeProfile<1, 4, [SDTCisVT<0, v4f32>, SDTCisVT<2, v32i8>,
-                       SDTCisVT<3, v4i32>, SDTCisVT<4, i32>]>
->;
-
-def SIsample : SDSample<"AMDGPUISD::SAMPLE">;
-def SIsampleb : SDSample<"AMDGPUISD::SAMPLEB">;
-def SIsampled : SDSample<"AMDGPUISD::SAMPLED">;
-def SIsamplel : SDSample<"AMDGPUISD::SAMPLEL">;
-
-def SIconstdata_ptr : SDNode<
-  "AMDGPUISD::CONST_DATA_PTR", SDTypeProfile <1, 0, [SDTCisVT<0, i64>]>
->;
-
-//===----------------------------------------------------------------------===//
-// SDNodes and PatFrag for local loads and stores to enable s_mov_b32 m0, -1
-// to be glued to the memory instructions.
-//===----------------------------------------------------------------------===//
-
-def SIld_local : SDNode <"ISD::LOAD", SDTLoad,
-  [SDNPHasChain, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
->;
-
-def si_ld_local : PatFrag <(ops node:$ptr), (SIld_local node:$ptr), [{
-  return isLocalLoad(cast<LoadSDNode>(N));
-}]>;
-
-def si_load_local : PatFrag <(ops node:$ptr), (si_ld_local node:$ptr), [{
-  return cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED &&
-         cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
-}]>;
-
-def si_load_local_align8 : Aligned8Bytes <
-  (ops node:$ptr), (si_load_local node:$ptr)
->;
-
-def si_sextload_local : PatFrag <(ops node:$ptr), (si_ld_local node:$ptr), [{
-  return cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
-}]>;
-def si_az_extload_local : AZExtLoadBase <si_ld_local>;
-
-multiclass SIExtLoadLocal <PatFrag ld_node> {
-
-  def _i8 : PatFrag <(ops node:$ptr), (ld_node node:$ptr),
-                     [{return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;}]
-  >;
-
-  def _i16 : PatFrag <(ops node:$ptr), (ld_node node:$ptr),
-                     [{return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;}]
-  >;
-}
-
-defm si_sextload_local : SIExtLoadLocal <si_sextload_local>;
-defm si_az_extload_local : SIExtLoadLocal <si_az_extload_local>;
-
-def SIst_local : SDNode <"ISD::STORE", SDTStore,
-  [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPInGlue]
->;
-
-def si_st_local : PatFrag <
-  (ops node:$val, node:$ptr), (SIst_local node:$val, node:$ptr), [{
-  return isLocalStore(cast<StoreSDNode>(N));
-}]>;
-
-def si_store_local : PatFrag <
-  (ops node:$val, node:$ptr), (si_st_local node:$val, node:$ptr), [{
-  return cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED &&
-         !cast<StoreSDNode>(N)->isTruncatingStore();
-}]>;
-
-def si_store_local_align8 : Aligned8Bytes <
-  (ops node:$val, node:$ptr), (si_store_local node:$val, node:$ptr)
->;
-
-def si_truncstore_local : PatFrag <
-  (ops node:$val, node:$ptr), (si_st_local node:$val, node:$ptr), [{
-  return cast<StoreSDNode>(N)->isTruncatingStore();
-}]>;
-
-def si_truncstore_local_i8 : PatFrag <
-  (ops node:$val, node:$ptr), (si_truncstore_local node:$val, node:$ptr), [{
-  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
-}]>;
-
-def si_truncstore_local_i16 : PatFrag <
-  (ops node:$val, node:$ptr), (si_truncstore_local node:$val, node:$ptr), [{
-  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
-}]>;
-
-multiclass SIAtomicM0Glue2 <string op_name> {
-
-  def _glue : SDNode <"ISD::ATOMIC_"#op_name, SDTAtomic2,
-    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
-  >;
-
-  def _local : local_binary_atomic_op <!cast<SDNode>(NAME#"_glue")>;
-}
-
-defm si_atomic_load_add : SIAtomicM0Glue2 <"LOAD_ADD">;
-defm si_atomic_load_and : SIAtomicM0Glue2 <"LOAD_AND">;
-defm si_atomic_load_min : SIAtomicM0Glue2 <"LOAD_MIN">;
-defm si_atomic_load_max : SIAtomicM0Glue2 <"LOAD_MAX">;
-defm si_atomic_load_or : SIAtomicM0Glue2 <"LOAD_OR">;
-defm si_atomic_load_sub : SIAtomicM0Glue2 <"LOAD_SUB">;
-defm si_atomic_load_xor : SIAtomicM0Glue2 <"LOAD_XOR">;
-defm si_atomic_load_umin : SIAtomicM0Glue2 <"LOAD_UMIN">;
-defm si_atomic_load_umax : SIAtomicM0Glue2 <"LOAD_UMAX">;
-defm si_atomic_swap : SIAtomicM0Glue2 <"SWAP">;
-
-def si_atomic_cmp_swap_glue : SDNode <"ISD::ATOMIC_CMP_SWAP", SDTAtomic3,
-  [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
->;
-
-defm si_atomic_cmp_swap : AtomicCmpSwapLocal <si_atomic_cmp_swap_glue>;
-
-// Transformation function, extract the lower 32bit of a 64bit immediate
-def LO32 : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(N->getZExtValue() & 0xffffffff, SDLoc(N),
-                                   MVT::i32);
-}]>;
-
-def LO32f : SDNodeXForm<fpimm, [{
-  APInt V = N->getValueAPF().bitcastToAPInt().trunc(32);
-  return CurDAG->getTargetConstantFP(APFloat(APFloat::IEEEsingle, V), MVT::f32);
-}]>;
-
-// Transformation function, extract the upper 32bit of a 64bit immediate
-def HI32 : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(N->getZExtValue() >> 32, SDLoc(N), MVT::i32);
-}]>;
-
-def HI32f : SDNodeXForm<fpimm, [{
-  APInt V = N->getValueAPF().bitcastToAPInt().lshr(32).trunc(32);
-  return CurDAG->getTargetConstantFP(APFloat(APFloat::IEEEsingle, V), SDLoc(N),
-                                     MVT::f32);
-}]>;
-
-def IMM8bitDWORD : PatLeaf <(imm),
-  [{return (N->getZExtValue() & ~0x3FC) == 0;}]
->;
-
-def as_dword_i32imm : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(N->getZExtValue() >> 2, SDLoc(N), MVT::i32);
-}]>;
-
-def as_i1imm : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i1);
-}]>;
-
-def as_i8imm : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i8);
-}]>;
-
-def as_i16imm : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i16);
-}]>;
-
-def as_i32imm: SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i32);
-}]>;
-
-def as_i64imm: SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i64);
-}]>;
-
-// Copied from the AArch64 backend:
-def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
-return CurDAG->getTargetConstant(
-  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
-}]>;
-
-// Copied from the AArch64 backend:
-def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
-return CurDAG->getTargetConstant(
-  N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
-}]>;
-
-def IMM8bit : PatLeaf <(imm),
-  [{return isUInt<8>(N->getZExtValue());}]
->;
-
-def IMM12bit : PatLeaf <(imm),
-  [{return isUInt<12>(N->getZExtValue());}]
->;
-
-def IMM16bit : PatLeaf <(imm),
-  [{return isUInt<16>(N->getZExtValue());}]
->;
-
-def IMM20bit : PatLeaf <(imm),
-  [{return isUInt<20>(N->getZExtValue());}]
->;
-
-def IMM32bit : PatLeaf <(imm),
-  [{return isUInt<32>(N->getZExtValue());}]
->;
-
-def mubuf_vaddr_offset : PatFrag<
-  (ops node:$ptr, node:$offset, node:$imm_offset),
-  (add (add node:$ptr, node:$offset), node:$imm_offset)
->;
-
-class InlineImm <ValueType vt> : PatLeaf <(vt imm), [{
-  return isInlineImmediate(N);
-}]>;
-
-class InlineFPImm <ValueType vt> : PatLeaf <(vt fpimm), [{
-  return isInlineImmediate(N);
-}]>;
-
-class SGPRImm <dag frag> : PatLeaf<frag, [{
-  if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) {
-    return false;
-  }
-  const SIRegisterInfo *SIRI =
-      static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
-  for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
-                                                U != E; ++U) {
-    if (SIRI->isSGPRClass(getOperandRegClass(*U, U.getOperandNo()))) {
-      return true;
-    }
-  }
-  return false;
-}]>;
-
-//===----------------------------------------------------------------------===//
-// Custom Operands
-//===----------------------------------------------------------------------===//
-
-def FRAMEri32 : Operand<iPTR> {
-  let MIOperandInfo = (ops i32:$ptr, i32imm:$index);
-}
-
-def SoppBrTarget : AsmOperandClass {
-  let Name = "SoppBrTarget";
-  let ParserMethod = "parseSOppBrTarget";
-}
-
-def sopp_brtarget : Operand<OtherVT> {
-  let EncoderMethod = "getSOPPBrEncoding";
-  let OperandType = "OPERAND_PCREL";
-  let ParserMatchClass = SoppBrTarget;
-}
-
-include "SIInstrFormats.td"
-include "VIInstrFormats.td"
-
-def MubufOffsetMatchClass : AsmOperandClass {
-  let Name = "MubufOffset";
-  let ParserMethod = "parseMubufOptionalOps";
-  let RenderMethod = "addImmOperands";
-}
-
-class DSOffsetBaseMatchClass <string parser> : AsmOperandClass {
-  let Name = "DSOffset"#parser;
-  let ParserMethod = parser;
-  let RenderMethod = "addImmOperands";
-  let PredicateMethod = "isDSOffset";
-}
-
-def DSOffsetMatchClass : DSOffsetBaseMatchClass <"parseDSOptionalOps">;
-def DSOffsetGDSMatchClass : DSOffsetBaseMatchClass <"parseDSOffsetOptional">;
-
-def DSOffset01MatchClass : AsmOperandClass {
-  let Name = "DSOffset1";
-  let ParserMethod = "parseDSOff01OptionalOps";
-  let RenderMethod = "addImmOperands";
-  let PredicateMethod = "isDSOffset01";
-}
-
-class GDSBaseMatchClass <string parser> : AsmOperandClass {
-  let Name = "GDS"#parser;
-  let PredicateMethod = "isImm";
-  let ParserMethod = parser;
-  let RenderMethod = "addImmOperands";
-}
-
-def GDSMatchClass : GDSBaseMatchClass <"parseDSOptionalOps">;
-def GDS01MatchClass : GDSBaseMatchClass <"parseDSOff01OptionalOps">;
-
-class GLCBaseMatchClass <string parser> : AsmOperandClass {
-  let Name = "GLC"#parser;
-  let PredicateMethod = "isImm";
-  let ParserMethod = parser; 
-  let RenderMethod = "addImmOperands";
-}
-
-def GLCMubufMatchClass : GLCBaseMatchClass <"parseMubufOptionalOps">;
-def GLCFlatMatchClass : GLCBaseMatchClass <"parseFlatOptionalOps">;
-
-class SLCBaseMatchClass <string parser> : AsmOperandClass {
-  let Name = "SLC"#parser;
-  let PredicateMethod = "isImm";
-  let ParserMethod = parser;
-  let RenderMethod = "addImmOperands";
-}
-
-def SLCMubufMatchClass : SLCBaseMatchClass <"parseMubufOptionalOps">;
-def SLCFlatMatchClass : SLCBaseMatchClass <"parseFlatOptionalOps">;
-def SLCFlatAtomicMatchClass : SLCBaseMatchClass <"parseFlatAtomicOptionalOps">;
-
-class TFEBaseMatchClass <string parser> : AsmOperandClass {
-  let Name = "TFE"#parser;
-  let PredicateMethod = "isImm";
-  let ParserMethod = parser;
-  let RenderMethod = "addImmOperands";
-}
-
-def TFEMubufMatchClass : TFEBaseMatchClass <"parseMubufOptionalOps">;
-def TFEFlatMatchClass : TFEBaseMatchClass <"parseFlatOptionalOps">;
-def TFEFlatAtomicMatchClass : TFEBaseMatchClass <"parseFlatAtomicOptionalOps">;
-
-def OModMatchClass : AsmOperandClass {
-  let Name = "OMod";
-  let PredicateMethod = "isImm";
-  let ParserMethod = "parseVOP3OptionalOps";
-  let RenderMethod = "addImmOperands";
-}
-
-def ClampMatchClass : AsmOperandClass {
-  let Name = "Clamp";
-  let PredicateMethod = "isImm";
-  let ParserMethod = "parseVOP3OptionalOps";
-  let RenderMethod = "addImmOperands";
-}
-
-let OperandType = "OPERAND_IMMEDIATE" in {
-
-def offen : Operand<i1> {
-  let PrintMethod = "printOffen";
-}
-def idxen : Operand<i1> {
-  let PrintMethod = "printIdxen";
-}
-def addr64 : Operand<i1> {
-  let PrintMethod = "printAddr64";
-}
-def mbuf_offset : Operand<i16> {
-  let PrintMethod = "printMBUFOffset";
-  let ParserMatchClass = MubufOffsetMatchClass;
-}
-class ds_offset_base <AsmOperandClass mc> : Operand<i16> {
-  let PrintMethod = "printDSOffset";
-  let ParserMatchClass = mc;
-}
-def ds_offset : ds_offset_base <DSOffsetMatchClass>;
-def ds_offset_gds : ds_offset_base <DSOffsetGDSMatchClass>;
-
-def ds_offset0 : Operand<i8> {
-  let PrintMethod = "printDSOffset0";
-  let ParserMatchClass = DSOffset01MatchClass;
-}
-def ds_offset1 : Operand<i8> {
-  let PrintMethod = "printDSOffset1";
-  let ParserMatchClass = DSOffset01MatchClass;
-}
-class gds_base <AsmOperandClass mc> : Operand <i1> {
-  let PrintMethod = "printGDS";
-  let ParserMatchClass = mc;
-}
-def gds : gds_base <GDSMatchClass>;
-
-def gds01 : gds_base <GDS01MatchClass>;
-
-class glc_base <AsmOperandClass mc> : Operand <i1> {
-  let PrintMethod = "printGLC";
-  let ParserMatchClass = mc;
-}
-
-def glc : glc_base <GLCMubufMatchClass>;
-def glc_flat : glc_base <GLCFlatMatchClass>;
-
-class slc_base <AsmOperandClass mc> : Operand <i1> {
-  let PrintMethod = "printSLC";
-  let ParserMatchClass = mc;
-}
-
-def slc : slc_base <SLCMubufMatchClass>;
-def slc_flat : slc_base <SLCFlatMatchClass>;
-def slc_flat_atomic : slc_base <SLCFlatAtomicMatchClass>;
-
-class tfe_base <AsmOperandClass mc> : Operand <i1> {
-  let PrintMethod = "printTFE";
-  let ParserMatchClass = mc;
-}
-
-def tfe : tfe_base <TFEMubufMatchClass>;
-def tfe_flat : tfe_base <TFEFlatMatchClass>;
-def tfe_flat_atomic : tfe_base <TFEFlatAtomicMatchClass>;
-
-def omod : Operand <i32> {
-  let PrintMethod = "printOModSI";
-  let ParserMatchClass = OModMatchClass;
-}
-
-def ClampMod : Operand <i1> {
-  let PrintMethod = "printClampSI";
-  let ParserMatchClass = ClampMatchClass;
-}
-
-} // End OperandType = "OPERAND_IMMEDIATE"
-
-def VOPDstS64 : VOPDstOperand <SReg_64>;
-
-//===----------------------------------------------------------------------===//
-// Complex patterns
-//===----------------------------------------------------------------------===//
-
-def DS1Addr1Offset : ComplexPattern<i32, 2, "SelectDS1Addr1Offset">;
-def DS64Bit4ByteAligned : ComplexPattern<i32, 3, "SelectDS64Bit4ByteAligned">;
-
-def MUBUFAddr32 : ComplexPattern<i64, 9, "SelectMUBUFAddr32">;
-def MUBUFAddr64 : ComplexPattern<i64, 7, "SelectMUBUFAddr64">;
-def MUBUFAddr64Atomic : ComplexPattern<i64, 5, "SelectMUBUFAddr64">;
-def MUBUFScratch : ComplexPattern<i64, 4, "SelectMUBUFScratch">;
-def MUBUFOffset : ComplexPattern<i64, 6, "SelectMUBUFOffset">;
-def MUBUFOffsetAtomic : ComplexPattern<i64, 4, "SelectMUBUFOffset">;
-
-def VOP3Mods0 : ComplexPattern<untyped, 4, "SelectVOP3Mods0">;
-def VOP3Mods0Clamp : ComplexPattern<untyped, 3, "SelectVOP3Mods0Clamp">;
-def VOP3Mods0Clamp0OMod : ComplexPattern<untyped, 4, "SelectVOP3Mods0Clamp0OMod">;
-def VOP3Mods  : ComplexPattern<untyped, 2, "SelectVOP3Mods">;
-
-//===----------------------------------------------------------------------===//
-// SI assembler operands
-//===----------------------------------------------------------------------===//
-
-def SIOperand {
-  int ZERO = 0x80;
-  int VCC = 0x6A;
-  int FLAT_SCR = 0x68;
-}
-
-def SRCMODS {
-  int NONE = 0;
-  int NEG = 1;
-}
-
-def DSTCLAMP {
-  int NONE = 0;
-}
-
-def DSTOMOD {
-  int NONE = 0;
-}
-
-//===----------------------------------------------------------------------===//
-//
-// SI Instruction multiclass helpers.
-//
-// Instructions with _32 take 32-bit operands.
-// Instructions with _64 take 64-bit operands.
-//
-// VOP_* instructions can use either a 32-bit or 64-bit encoding.  The 32-bit
-// encoding is the standard encoding, but instruction that make use of
-// any of the instruction modifiers must use the 64-bit encoding.
-//
-// Instructions with _e32 use the 32-bit encoding.
-// Instructions with _e64 use the 64-bit encoding.
-//
-//===----------------------------------------------------------------------===//
-
-class SIMCInstr <string pseudo, int subtarget> {
-  string PseudoInstr = pseudo;
-  int Subtarget = subtarget;
-}
-
-//===----------------------------------------------------------------------===//
-// EXP classes
-//===----------------------------------------------------------------------===//
-
-class EXPCommon : InstSI<
-  (outs),
-  (ins i32imm:$en, i32imm:$tgt, i32imm:$compr, i32imm:$done, i32imm:$vm,
-       VGPR_32:$src0, VGPR_32:$src1, VGPR_32:$src2, VGPR_32:$src3),
-  "exp $en, $tgt, $compr, $done, $vm, $src0, $src1, $src2, $src3",
-  [] > {
-
-  let EXP_CNT = 1;
-  let Uses = [EXEC];
-}
-
-multiclass EXP_m {
-
-  let isPseudo = 1, isCodeGenOnly = 1 in {
-    def "" : EXPCommon, SIMCInstr <"exp", SISubtarget.NONE> ;
-  }
-
-  def _si : EXPCommon, SIMCInstr <"exp", SISubtarget.SI>, EXPe;
-
-  def _vi : EXPCommon, SIMCInstr <"exp", SISubtarget.VI>, EXPe_vi;
-}
-
-//===----------------------------------------------------------------------===//
-// Scalar classes
-//===----------------------------------------------------------------------===//
-
-class SOP1_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
-  SOP1 <outs, ins, "", pattern>,
-  SIMCInstr<opName, SISubtarget.NONE> {
-  let isPseudo = 1;
-  let isCodeGenOnly = 1;
-}
-
-class SOP1_Real_si <sop1 op, string opName, dag outs, dag ins, string asm> :
-  SOP1 <outs, ins, asm, []>,
-  SOP1e <op.SI>,
-  SIMCInstr<opName, SISubtarget.SI> {
-  let isCodeGenOnly = 0;
-  let AssemblerPredicates = [isSICI];
-}
-
-class SOP1_Real_vi <sop1 op, string opName, dag outs, dag ins, string asm> :
-  SOP1 <outs, ins, asm, []>,
-  SOP1e <op.VI>,
-  SIMCInstr<opName, SISubtarget.VI> {
-  let isCodeGenOnly = 0;
-  let AssemblerPredicates = [isVI];
-}
-
-multiclass SOP1_m <sop1 op, string opName, dag outs, dag ins, string asm,
-                   list<dag> pattern> {
-
-  def "" : SOP1_Pseudo <opName, outs, ins, pattern>;
-
-  def _si : SOP1_Real_si <op, opName, outs, ins, asm>;
-
-  def _vi : SOP1_Real_vi <op, opName, outs, ins, asm>;
-
-}
-
-multiclass SOP1_32 <sop1 op, string opName, list<dag> pattern> : SOP1_m <
-    op, opName, (outs SReg_32:$dst), (ins SSrc_32:$src0),
-    opName#" $dst, $src0", pattern
->;
-
-multiclass SOP1_64 <sop1 op, string opName, list<dag> pattern> : SOP1_m <
-    op, opName, (outs SReg_64:$dst), (ins SSrc_64:$src0),
-    opName#" $dst, $src0", pattern
->;
-
-// no input, 64-bit output.
-multiclass SOP1_64_0 <sop1 op, string opName, list<dag> pattern> {
-  def "" : SOP1_Pseudo <opName, (outs SReg_64:$dst), (ins), pattern>;
-
-  def _si : SOP1_Real_si <op, opName, (outs SReg_64:$dst), (ins),
-    opName#" $dst"> {
-    let ssrc0 = 0;
-  }
-
-  def _vi : SOP1_Real_vi <op, opName, (outs SReg_64:$dst), (ins),
-    opName#" $dst"> {
-    let ssrc0 = 0;
-  }
-}
-
-// 64-bit input, no output
-multiclass SOP1_1 <sop1 op, string opName, list<dag> pattern> {
-  def "" : SOP1_Pseudo <opName, (outs), (ins SReg_64:$src0), pattern>;
-
-  def _si : SOP1_Real_si <op, opName, (outs), (ins SReg_64:$src0),
-    opName#" $src0"> {
-    let sdst = 0;
-  }
-
-  def _vi : SOP1_Real_vi <op, opName, (outs), (ins SReg_64:$src0),
-    opName#" $src0"> {
-    let sdst = 0;
-  }
-}
-
-// 64-bit input, 32-bit output.
-multiclass SOP1_32_64 <sop1 op, string opName, list<dag> pattern> : SOP1_m <
-    op, opName, (outs SReg_32:$dst), (ins SSrc_64:$src0),
-    opName#" $dst, $src0", pattern
->;
-
-class SOP2_Pseudo<string opName, dag outs, dag ins, list<dag> pattern> :
-  SOP2<outs, ins, "", pattern>,
-  SIMCInstr<opName, SISubtarget.NONE> {
-  let isPseudo = 1;
-  let isCodeGenOnly = 1;
-  let Size = 4;
-
-  // Pseudo instructions have no encodings, but adding this field here allows
-  // us to do:
-  // let sdst = xxx in {
-  // for multiclasses that include both real and pseudo instructions.
-  field bits<7> sdst = 0;
-}
-
-class SOP2_Real_si<sop2 op, string opName, dag outs, dag ins, string asm> :
-  SOP2<outs, ins, asm, []>,
-  SOP2e<op.SI>,
-  SIMCInstr<opName, SISubtarget.SI> {
-  let AssemblerPredicates = [isSICI];
-}
-
-class SOP2_Real_vi<sop2 op, string opName, dag outs, dag ins, string asm> :
-  SOP2<outs, ins, asm, []>,
-  SOP2e<op.VI>,
-  SIMCInstr<opName, SISubtarget.VI> {
-  let AssemblerPredicates = [isVI];
-}
-
-multiclass SOP2_SELECT_32 <sop2 op, string opName, list<dag> pattern> {
-  def "" : SOP2_Pseudo <opName, (outs SReg_32:$dst),
-    (ins SSrc_32:$src0, SSrc_32:$src1, SCCReg:$scc), pattern>;
-
-  def _si : SOP2_Real_si <op, opName, (outs SReg_32:$dst),
-    (ins SSrc_32:$src0, SSrc_32:$src1, SCCReg:$scc),
-    opName#" $dst, $src0, $src1 [$scc]">;
-
-  def _vi : SOP2_Real_vi <op, opName, (outs SReg_32:$dst),
-    (ins SSrc_32:$src0, SSrc_32:$src1, SCCReg:$scc),
-    opName#" $dst, $src0, $src1 [$scc]">;
-}
-
-multiclass SOP2_m <sop2 op, string opName, dag outs, dag ins, string asm,
-                   list<dag> pattern> {
-
-  def "" : SOP2_Pseudo <opName, outs, ins, pattern>;
-
-  def _si : SOP2_Real_si <op, opName, outs, ins, asm>;
-
-  def _vi : SOP2_Real_vi <op, opName, outs, ins, asm>;
-
-}
-
-multiclass SOP2_32 <sop2 op, string opName, list<dag> pattern> : SOP2_m <
-    op, opName, (outs SReg_32:$dst), (ins SSrc_32:$src0, SSrc_32:$src1),
-    opName#" $dst, $src0, $src1", pattern
->;
-
-multiclass SOP2_64 <sop2 op, string opName, list<dag> pattern> : SOP2_m <
-    op, opName, (outs SReg_64:$dst), (ins SSrc_64:$src0, SSrc_64:$src1),
-    opName#" $dst, $src0, $src1", pattern
->;
-
-multiclass SOP2_64_32 <sop2 op, string opName, list<dag> pattern> : SOP2_m <
-    op, opName, (outs SReg_64:$dst), (ins SSrc_64:$src0, SSrc_32:$src1),
-    opName#" $dst, $src0, $src1", pattern
->;
-
-class SOPC_Helper <bits<7> op, RegisterOperand rc, ValueType vt,
-                    string opName, PatLeaf cond> : SOPC <
-  op, (outs SCCReg:$dst), (ins rc:$src0, rc:$src1),
-  opName#" $src0, $src1", []>;
-
-class SOPC_32<bits<7> op, string opName, PatLeaf cond = COND_NULL>
-  : SOPC_Helper<op, SSrc_32, i32, opName, cond>;
-
-class SOPC_64<bits<7> op, string opName, PatLeaf cond = COND_NULL>
-  : SOPC_Helper<op, SSrc_64, i64, opName, cond>;
-
-class SOPK_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
-  SOPK <outs, ins, "", pattern>,
-  SIMCInstr<opName, SISubtarget.NONE> {
-  let isPseudo = 1;
-  let isCodeGenOnly = 1;
-}
-
-class SOPK_Real_si <sopk op, string opName, dag outs, dag ins, string asm> :
-  SOPK <outs, ins, asm, []>,
-  SOPKe <op.SI>,
-  SIMCInstr<opName, SISubtarget.SI> {
-  let AssemblerPredicates = [isSICI];
-  let isCodeGenOnly = 0;
-}
-
-class SOPK_Real_vi <sopk op, string opName, dag outs, dag ins, string asm> :
-  SOPK <outs, ins, asm, []>,
-  SOPKe <op.VI>,
-  SIMCInstr<opName, SISubtarget.VI> {
-  let AssemblerPredicates = [isVI];
-  let isCodeGenOnly = 0;
-}
-
-multiclass SOPK_m <sopk op, string opName, dag outs, dag ins, string opAsm,
-                   string asm = opName#opAsm> {
-  def "" : SOPK_Pseudo <opName, outs, ins, []>;
-
-  def _si : SOPK_Real_si <op, opName, outs, ins, asm>;
-
-  def _vi : SOPK_Real_vi <op, opName, outs, ins, asm>;
-
-}
-
-multiclass SOPK_32 <sopk op, string opName, list<dag> pattern> {
-  def "" : SOPK_Pseudo <opName, (outs SReg_32:$dst), (ins u16imm:$src0),
-    pattern>;
-
-  def _si : SOPK_Real_si <op, opName, (outs SReg_32:$dst), (ins u16imm:$src0),
-    opName#" $dst, $src0">;
-
-  def _vi : SOPK_Real_vi <op, opName, (outs SReg_32:$dst), (ins u16imm:$src0),
-    opName#" $dst, $src0">;
-}
-
-multiclass SOPK_SCC <sopk op, string opName, list<dag> pattern> {
-  def "" : SOPK_Pseudo <opName, (outs SCCReg:$dst),
-    (ins SReg_32:$src0, u16imm:$src1), pattern>;
-
-  let DisableEncoding = "$dst" in {
-    def _si : SOPK_Real_si <op, opName, (outs SCCReg:$dst),
-      (ins SReg_32:$sdst, u16imm:$simm16), opName#" $sdst, $simm16">;
-
-    def _vi : SOPK_Real_vi <op, opName, (outs SCCReg:$dst),
-      (ins SReg_32:$sdst, u16imm:$simm16), opName#" $sdst, $simm16">;
-  }
-}
-
-multiclass SOPK_32TIE <sopk op, string opName, list<dag> pattern> : SOPK_m <
-  op, opName, (outs SReg_32:$sdst), (ins SReg_32:$src0, u16imm:$simm16),
-  " $sdst, $simm16"
->;
-
-multiclass SOPK_IMM32 <sopk op, string opName, dag outs, dag ins,
-                       string argAsm, string asm = opName#argAsm> {
-
-  def "" : SOPK_Pseudo <opName, outs, ins, []>;
-
-  def _si : SOPK <outs, ins, asm, []>,
-            SOPK64e <op.SI>,
-            SIMCInstr<opName, SISubtarget.SI> {
-              let AssemblerPredicates = [isSICI];
-              let isCodeGenOnly = 0;
-            }
-
-  def _vi : SOPK <outs, ins, asm, []>,
-            SOPK64e <op.VI>,
-            SIMCInstr<opName, SISubtarget.VI> {
-              let AssemblerPredicates = [isVI];
-              let isCodeGenOnly = 0;
-            }
-}
-//===----------------------------------------------------------------------===//
-// SMRD classes
-//===----------------------------------------------------------------------===//
-
-class SMRD_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
-  SMRD <outs, ins, "", pattern>,
-  SIMCInstr<opName, SISubtarget.NONE> {
-  let isPseudo = 1;
-  let isCodeGenOnly = 1;
-}
-
-class SMRD_Real_si <bits<5> op, string opName, bit imm, dag outs, dag ins,
-                    string asm> :
-  SMRD <outs, ins, asm, []>,
-  SMRDe <op, imm>,
-  SIMCInstr<opName, SISubtarget.SI> {
-  let AssemblerPredicates = [isSICI];
-}
-
-class SMRD_Real_vi <bits<8> op, string opName, bit imm, dag outs, dag ins,
-                    string asm> :
-  SMRD <outs, ins, asm, []>,
-  SMEMe_vi <op, imm>,
-  SIMCInstr<opName, SISubtarget.VI> {
-  let AssemblerPredicates = [isVI];
-}
-
-multiclass SMRD_m <bits<5> op, string opName, bit imm, dag outs, dag ins,
-                   string asm, list<dag> pattern> {
-
-  def "" : SMRD_Pseudo <opName, outs, ins, pattern>;
-
-  def _si : SMRD_Real_si <op, opName, imm, outs, ins, asm>;
-
-  // glc is only applicable to scalar stores, which are not yet
-  // implemented.
-  let glc = 0 in {
-    def _vi : SMRD_Real_vi <{0, 0, 0, op}, opName, imm, outs, ins, asm>;
-  }
-}
-
-multiclass SMRD_Helper <bits<5> op, string opName, RegisterClass baseClass,
-                        RegisterClass dstClass> {
-  defm _IMM : SMRD_m <
-    op, opName#"_IMM", 1, (outs dstClass:$dst),
-    (ins baseClass:$sbase, u32imm:$offset),
-    opName#" $dst, $sbase, $offset", []
-  >;
-
-  defm _SGPR : SMRD_m <
-    op, opName#"_SGPR", 0, (outs dstClass:$dst),
-    (ins baseClass:$sbase, SReg_32:$soff),
-    opName#" $dst, $sbase, $soff", []
-  >;
-}
-
-//===----------------------------------------------------------------------===//
-// Vector ALU classes
-//===----------------------------------------------------------------------===//
-
-// This must always be right before the operand being input modified.
-def InputMods : OperandWithDefaultOps <i32, (ops (i32 0))> {
-  let PrintMethod = "printOperandAndMods";
-}
-
-def InputModsMatchClass : AsmOperandClass {
-  let Name = "RegWithInputMods";
-}
-
-def InputModsNoDefault : Operand <i32> {
-  let PrintMethod = "printOperandAndMods";
-  let ParserMatchClass = InputModsMatchClass;
-}
-
-class getNumSrcArgs<ValueType Src1, ValueType Src2> {
-  int ret =
-    !if (!eq(Src1.Value, untyped.Value),      1,   // VOP1
-         !if (!eq(Src2.Value, untyped.Value), 2,   // VOP2
-                                              3)); // VOP3
-}
-
-// Returns the register class to use for the destination of VOP[123C]
-// instructions for the given VT.
-class getVALUDstForVT<ValueType VT> {
-  RegisterOperand ret = !if(!eq(VT.Size, 32), VOPDstOperand<VGPR_32>,
-                          !if(!eq(VT.Size, 64), VOPDstOperand<VReg_64>,
-                            VOPDstOperand<SReg_64>)); // else VT == i1
-}
-
-// Returns the register class to use for source 0 of VOP[12C]
-// instructions for the given VT.
-class getVOPSrc0ForVT<ValueType VT> {
-  RegisterOperand ret = !if(!eq(VT.Size, 32), VSrc_32, VSrc_64);
-}
-
-// Returns the register class to use for source 1 of VOP[12C] for the
-// given VT.
-class getVOPSrc1ForVT<ValueType VT> {
-  RegisterClass ret = !if(!eq(VT.Size, 32), VGPR_32, VReg_64);
-}
-
-// Returns the register class to use for sources of VOP3 instructions for the
-// given VT.
-class getVOP3SrcForVT<ValueType VT> {
-  RegisterOperand ret = !if(!eq(VT.Size, 32), VCSrc_32, VCSrc_64);
-}
-
-// Returns 1 if the source arguments have modifiers, 0 if they do not.
-class hasModifiers<ValueType SrcVT> {
-  bit ret = !if(!eq(SrcVT.Value, f32.Value), 1,
-            !if(!eq(SrcVT.Value, f64.Value), 1, 0));
-}
-
-// Returns the input arguments for VOP[12C] instructions for the given SrcVT.
-class getIns32 <RegisterOperand Src0RC, RegisterClass Src1RC, int NumSrcArgs> {
-  dag ret = !if(!eq(NumSrcArgs, 1), (ins Src0RC:$src0),               // VOP1
-            !if(!eq(NumSrcArgs, 2), (ins Src0RC:$src0, Src1RC:$src1), // VOP2
-                                    (ins)));
-}
-
-// Returns the input arguments for VOP3 instructions for the given SrcVT.
-class getIns64 <RegisterOperand Src0RC, RegisterOperand Src1RC,
-                RegisterOperand Src2RC, int NumSrcArgs,
-                bit HasModifiers> {
-
-  dag ret =
-    !if (!eq(NumSrcArgs, 1),
-      !if (!eq(HasModifiers, 1),
-        // VOP1 with modifiers
-        (ins InputModsNoDefault:$src0_modifiers, Src0RC:$src0,
-             ClampMod:$clamp, omod:$omod)
-      /* else */,
-        // VOP1 without modifiers
-        (ins Src0RC:$src0)
-      /* endif */ ),
-    !if (!eq(NumSrcArgs, 2),
-      !if (!eq(HasModifiers, 1),
-        // VOP 2 with modifiers
-        (ins InputModsNoDefault:$src0_modifiers, Src0RC:$src0,
-             InputModsNoDefault:$src1_modifiers, Src1RC:$src1,
-             ClampMod:$clamp, omod:$omod)
-      /* else */,
-        // VOP2 without modifiers
-        (ins Src0RC:$src0, Src1RC:$src1)
-      /* endif */ )
-    /* NumSrcArgs == 3 */,
-      !if (!eq(HasModifiers, 1),
-        // VOP3 with modifiers
-        (ins InputModsNoDefault:$src0_modifiers, Src0RC:$src0,
-             InputModsNoDefault:$src1_modifiers, Src1RC:$src1,
-             InputModsNoDefault:$src2_modifiers, Src2RC:$src2,
-             ClampMod:$clamp, omod:$omod)
-      /* else */,
-        // VOP3 without modifiers
-        (ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2)
-      /* endif */ )));
-}
-
-// Returns the assembly string for the inputs and outputs of a VOP[12C]
-// instruction.  This does not add the _e32 suffix, so it can be reused
-// by getAsm64.
-class getAsm32 <int NumSrcArgs> {
-  string src1 = ", $src1";
-  string src2 = ", $src2";
-  string ret = "$dst, $src0"#
-               !if(!eq(NumSrcArgs, 1), "", src1)#
-               !if(!eq(NumSrcArgs, 3), src2, "");
-}
-
-// Returns the assembly string for the inputs and outputs of a VOP3
-// instruction.
-class getAsm64 <int NumSrcArgs, bit HasModifiers> {
-  string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
-  string src1 = !if(!eq(NumSrcArgs, 1), "",
-                   !if(!eq(NumSrcArgs, 2), " $src1_modifiers",
-                                           " $src1_modifiers,"));
-  string src2 = !if(!eq(NumSrcArgs, 3), " $src2_modifiers", "");
-  string ret =
-  !if(!eq(HasModifiers, 0),
-      getAsm32<NumSrcArgs>.ret,
-      "$dst, "#src0#src1#src2#"$clamp"#"$omod");
-}
-
-
-class VOPProfile <list<ValueType> _ArgVT> {
-
-  field list<ValueType> ArgVT = _ArgVT;
-
-  field ValueType DstVT = ArgVT[0];
-  field ValueType Src0VT = ArgVT[1];
-  field ValueType Src1VT = ArgVT[2];
-  field ValueType Src2VT = ArgVT[3];
-  field RegisterOperand DstRC = getVALUDstForVT<DstVT>.ret;
-  field RegisterOperand Src0RC32 = getVOPSrc0ForVT<Src0VT>.ret;
-  field RegisterClass Src1RC32 = getVOPSrc1ForVT<Src1VT>.ret;
-  field RegisterOperand Src0RC64 = getVOP3SrcForVT<Src0VT>.ret;
-  field RegisterOperand Src1RC64 = getVOP3SrcForVT<Src1VT>.ret;
-  field RegisterOperand Src2RC64 = getVOP3SrcForVT<Src2VT>.ret;
-
-  field int NumSrcArgs = getNumSrcArgs<Src1VT, Src2VT>.ret;
-  field bit HasModifiers = hasModifiers<Src0VT>.ret;
-
-  field dag Outs = (outs DstRC:$dst);
-
-  field dag Ins32 = getIns32<Src0RC32, Src1RC32, NumSrcArgs>.ret;
-  field dag Ins64 = getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
-                             HasModifiers>.ret;
-
-  field string Asm32 = getAsm32<NumSrcArgs>.ret;
-  field string Asm64 = getAsm64<NumSrcArgs, HasModifiers>.ret;
-}
-
-// FIXME: I think these F16/I16 profiles will need to use f16/i16 types in order
-//        for the instruction patterns to work.
-def VOP_F16_F16 : VOPProfile <[f32, f32, untyped, untyped]>;
-def VOP_F16_I16 : VOPProfile <[f32, i32, untyped, untyped]>;
-def VOP_I16_F16 : VOPProfile <[i32, f32, untyped, untyped]>;
-
-def VOP_F16_F16_F16 : VOPProfile <[f32, f32, f32, untyped]>;
-def VOP_F16_F16_I16 : VOPProfile <[f32, f32, i32, untyped]>;
-def VOP_I16_I16_I16 : VOPProfile <[i32, i32, i32, untyped]>;
-
-def VOP_F32_F32 : VOPProfile <[f32, f32, untyped, untyped]>;
-def VOP_F32_F64 : VOPProfile <[f32, f64, untyped, untyped]>;
-def VOP_F32_I32 : VOPProfile <[f32, i32, untyped, untyped]>;
-def VOP_F64_F32 : VOPProfile <[f64, f32, untyped, untyped]>;
-def VOP_F64_F64 : VOPProfile <[f64, f64, untyped, untyped]>;
-def VOP_F64_I32 : VOPProfile <[f64, i32, untyped, untyped]>;
-def VOP_I32_F32 : VOPProfile <[i32, f32, untyped, untyped]>;
-def VOP_I32_F64 : VOPProfile <[i32, f64, untyped, untyped]>;
-def VOP_I32_I32 : VOPProfile <[i32, i32, untyped, untyped]>;
-
-def VOP_F32_F32_F32 : VOPProfile <[f32, f32, f32, untyped]>;
-def VOP_F32_F32_I32 : VOPProfile <[f32, f32, i32, untyped]>;
-def VOP_F64_F64_F64 : VOPProfile <[f64, f64, f64, untyped]>;
-def VOP_F64_F64_I32 : VOPProfile <[f64, f64, i32, untyped]>;
-def VOP_I32_F32_F32 : VOPProfile <[i32, f32, f32, untyped]>;
-def VOP_I32_F32_I32 : VOPProfile <[i32, f32, i32, untyped]>;
-def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>;
-def VOP_I32_I32_I32_VCC : VOPProfile <[i32, i32, i32, untyped]> {
-  let Src0RC32 = VCSrc_32;
-}
-
-def VOP_I1_F32_I32 : VOPProfile <[i1, f32, i32, untyped]> {
-  let Ins64 = (ins InputModsNoDefault:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1);
-  let Asm64 = "$dst, $src0_modifiers, $src1";
-}
-
-def VOP_I1_F64_I32 : VOPProfile <[i1, f64, i32, untyped]> {
-  let Ins64 = (ins InputModsNoDefault:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1);
-  let Asm64 = "$dst, $src0_modifiers, $src1";
-}
-
-def VOP_I64_I64_I32 : VOPProfile <[i64, i64, i32, untyped]>;
-def VOP_I64_I32_I64 : VOPProfile <[i64, i32, i64, untyped]>;
-def VOP_I64_I64_I64 : VOPProfile <[i64, i64, i64, untyped]>;
-def VOP_CNDMASK : VOPProfile <[i32, i32, i32, untyped]> {
-  let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1, VCCReg:$src2);
-  let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, SSrc_64:$src2);
-  let Asm64 = "$dst, $src0, $src1, $src2";
-}
-
-def VOP_F32_F32_F32_F32 : VOPProfile <[f32, f32, f32, f32]>;
-def VOP_MADK : VOPProfile <[f32, f32, f32, f32]> {
-  field dag Ins = (ins VCSrc_32:$src0, VGPR_32:$vsrc1, u32imm:$src2);
-  field string Asm = "$dst, $src0, $vsrc1, $src2";
-}
-def VOP_F64_F64_F64_F64 : VOPProfile <[f64, f64, f64, f64]>;
-def VOP_I32_I32_I32_I32 : VOPProfile <[i32, i32, i32, i32]>;
-def VOP_I64_I32_I32_I64 : VOPProfile <[i64, i32, i32, i64]>;
-
-
-class VOP <string opName> {
-  string OpName = opName;
-}
-
-class VOP2_REV <string revOp, bit isOrig> {
-  string RevOp = revOp;
-  bit IsOrig = isOrig;
-}
-
-class AtomicNoRet <string noRetOp, bit isRet> {
-  string NoRetOp = noRetOp;
-  bit IsRet = isRet;
-}
-
-class VOP1_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
-  VOP1Common <outs, ins, "", pattern>,
-  VOP <opName>,
-  SIMCInstr <opName#"_e32", SISubtarget.NONE>,
-  MnemonicAlias<opName#"_e32", opName> {
-  let isPseudo = 1;
-  let isCodeGenOnly = 1;
-
-  field bits<8> vdst;
-  field bits<9> src0;
-}
-
-class VOP1_Real_si <string opName, vop1 op, dag outs, dag ins, string asm> :
-  VOP1<op.SI, outs, ins, asm, []>,
-  SIMCInstr <opName#"_e32", SISubtarget.SI> {
-  let AssemblerPredicate = SIAssemblerPredicate;
-}
-
-class VOP1_Real_vi <string opName, vop1 op, dag outs, dag ins, string asm> :
-  VOP1<op.VI, outs, ins, asm, []>,
-  SIMCInstr <opName#"_e32", SISubtarget.VI> {
-  let AssemblerPredicates = [isVI];
-}
-
-multiclass VOP1_m <vop1 op, dag outs, dag ins, string asm, list<dag> pattern,
-                   string opName> {
-  def "" : VOP1_Pseudo <outs, ins, pattern, opName>;
-
-  def _si : VOP1_Real_si <opName, op, outs, ins, asm>;
-
-  def _vi : VOP1_Real_vi <opName, op, outs, ins, asm>;
-}
-
-multiclass VOP1SI_m <vop1 op, dag outs, dag ins, string asm, list<dag> pattern,
-                   string opName> {
-  def "" : VOP1_Pseudo <outs, ins, pattern, opName>;
-
-  def _si : VOP1_Real_si <opName, op, outs, ins, asm>;
-}
-
-class VOP2_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
-  VOP2Common <outs, ins, "", pattern>,
-  VOP <opName>,
-  SIMCInstr<opName#"_e32", SISubtarget.NONE>,
-  MnemonicAlias<opName#"_e32", opName> {
-  let isPseudo = 1;
-  let isCodeGenOnly = 1;
-}
-
-class VOP2_Real_si <string opName, vop2 op, dag outs, dag ins, string asm> :
-  VOP2 <op.SI, outs, ins, opName#asm, []>,
-  SIMCInstr <opName#"_e32", SISubtarget.SI> {
-  let AssemblerPredicates = [isSICI];
-}
-
-class VOP2_Real_vi <string opName, vop2 op, dag outs, dag ins, string asm> :
-  VOP2 <op.VI, outs, ins, opName#asm, []>,
-  SIMCInstr <opName#"_e32", SISubtarget.VI> {
-  let AssemblerPredicates = [isVI];
-}
-
-multiclass VOP2SI_m <vop2 op, dag outs, dag ins, string asm, list<dag> pattern,
-                     string opName, string revOp> {
-  def "" : VOP2_Pseudo <outs, ins, pattern, opName>,
-           VOP2_REV<revOp#"_e32", !eq(revOp, opName)>;
-
-  def _si : VOP2_Real_si <opName, op, outs, ins, asm>;
-}
-
-multiclass VOP2_m <vop2 op, dag outs, dag ins, string asm, list<dag> pattern,
-                   string opName, string revOp> {
-  def "" : VOP2_Pseudo <outs, ins, pattern, opName>,
-           VOP2_REV<revOp#"_e32", !eq(revOp, opName)>;
-
-  def _si : VOP2_Real_si <opName, op, outs, ins, asm>;
-
-  def _vi : VOP2_Real_vi <opName, op, outs, ins, asm>;
-
-}
-
-class VOP3DisableFields <bit HasSrc1, bit HasSrc2, bit HasModifiers> {
-
-  bits<2> src0_modifiers = !if(HasModifiers, ?, 0);
-  bits<2> src1_modifiers = !if(HasModifiers, !if(HasSrc1, ?, 0), 0);
-  bits<2> src2_modifiers = !if(HasModifiers, !if(HasSrc2, ?, 0), 0);
-  bits<2> omod = !if(HasModifiers, ?, 0);
-  bits<1> clamp = !if(HasModifiers, ?, 0);
-  bits<9> src1 = !if(HasSrc1, ?, 0);
-  bits<9> src2 = !if(HasSrc2, ?, 0);
-}
-
-class VOP3DisableModFields <bit HasSrc0Mods,
-                            bit HasSrc1Mods = 0,
-                            bit HasSrc2Mods = 0,
-                            bit HasOutputMods = 0> {
-  bits<2> src0_modifiers = !if(HasSrc0Mods, ?, 0);
-  bits<2> src1_modifiers = !if(HasSrc1Mods, ?, 0);
-  bits<2> src2_modifiers = !if(HasSrc2Mods, ?, 0);
-  bits<2> omod = !if(HasOutputMods, ?, 0);
-  bits<1> clamp = !if(HasOutputMods, ?, 0);
-}
-
-class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
-  VOP3Common <outs, ins, "", pattern>,
-  VOP <opName>,
-  SIMCInstr<opName#"_e64", SISubtarget.NONE>,
-  MnemonicAlias<opName#"_e64", opName> {
-  let isPseudo = 1;
-  let isCodeGenOnly = 1;
-}
-
-class VOP3_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName> :
-  VOP3Common <outs, ins, asm, []>,
-  VOP3e <op>,
-  SIMCInstr<opName#"_e64", SISubtarget.SI> {
-  let AssemblerPredicates = [isSICI];
-}
-
-class VOP3_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName> :
-  VOP3Common <outs, ins, asm, []>,
-  VOP3e_vi <op>,
-  SIMCInstr <opName#"_e64", SISubtarget.VI> {
-  let AssemblerPredicates = [isVI];
-}
-
-class VOP3b_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName> :
-  VOP3Common <outs, ins, asm, []>,
-  VOP3be <op>,
-  SIMCInstr<opName#"_e64", SISubtarget.SI> {
-  let AssemblerPredicates = [isSICI];
-}
-
-class VOP3b_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName> :
-  VOP3Common <outs, ins, asm, []>,
-  VOP3be_vi <op>,
-  SIMCInstr <opName#"_e64", SISubtarget.VI> {
-  let AssemblerPredicates = [isVI];
-}
-
-multiclass VOP3_m <vop op, dag outs, dag ins, string asm, list<dag> pattern,
-                   string opName, int NumSrcArgs, bit HasMods = 1> {
-
-  def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
-
-  def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
-            VOP3DisableFields<!if(!eq(NumSrcArgs, 1), 0, 1),
-                              !if(!eq(NumSrcArgs, 2), 0, 1),
-                              HasMods>;
-  def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
-            VOP3DisableFields<!if(!eq(NumSrcArgs, 1), 0, 1),
-                              !if(!eq(NumSrcArgs, 2), 0, 1),
-                              HasMods>;
-}
-
-// VOP3_m without source modifiers
-multiclass VOP3_m_nomods <vop op, dag outs, dag ins, string asm, list<dag> pattern,
-                   string opName, int NumSrcArgs, bit HasMods = 1> {
-
-  def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
-
-  let src0_modifiers = 0,
-      src1_modifiers = 0,
-      src2_modifiers = 0,
-      clamp = 0,
-      omod = 0 in {
-    def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>;
-    def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>;
-  }
-}
-
-multiclass VOP3_1_m <vop op, dag outs, dag ins, string asm,
-                     list<dag> pattern, string opName, bit HasMods = 1> {
-
-  def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
-
-  def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
-            VOP3DisableFields<0, 0, HasMods>;
-
-  def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
-            VOP3DisableFields<0, 0, HasMods>;
-}
-
-multiclass VOP3SI_1_m <vop op, dag outs, dag ins, string asm,
-                     list<dag> pattern, string opName, bit HasMods = 1> {
-
-  def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
-
-  def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
-            VOP3DisableFields<0, 0, HasMods>;
-  // No VI instruction. This class is for SI only.
-}
-
-multiclass VOP3_2_m <vop op, dag outs, dag ins, string asm,
-                     list<dag> pattern, string opName, string revOp,
-                     bit HasMods = 1, bit UseFullOp = 0> {
-
-  def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
-           VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
-
-  def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
-            VOP3DisableFields<1, 0, HasMods>;
-
-  def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
-            VOP3DisableFields<1, 0, HasMods>;
-}
-
-multiclass VOP3SI_2_m <vop op, dag outs, dag ins, string asm,
-                     list<dag> pattern, string opName, string revOp,
-                     bit HasMods = 1, bit UseFullOp = 0> {
-
-  def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
-           VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
-
-  def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
-            VOP3DisableFields<1, 0, HasMods>;
-
-  // No VI instruction. This class is for SI only.
-}
-
-// XXX - Is v_div_scale_{f32|f64} only available in vop3b without
-// option of implicit vcc use?
-multiclass VOP3b_2_m <vop op, dag outs, dag ins, string asm,
-                      list<dag> pattern, string opName, string revOp,
-                      bit HasMods = 1, bit UseFullOp = 0> {
-  def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
-           VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
-
-  // The VOP2 variant puts the carry out into VCC, the VOP3 variant
-  // can write it into any SGPR. We currently don't use the carry out,
-  // so for now hardcode it to VCC as well.
-  let sdst = SIOperand.VCC, Defs = [VCC] in {
-    def _si : VOP3b_Real_si <op.SI3, outs, ins, asm, opName>,
-              VOP3DisableFields<1, 0, HasMods>;
-
-    def _vi : VOP3b_Real_vi <op.VI3, outs, ins, asm, opName>,
-              VOP3DisableFields<1, 0, HasMods>;
-  } // End sdst = SIOperand.VCC, Defs = [VCC]
-}
-
-multiclass VOP3b_3_m <vop op, dag outs, dag ins, string asm,
-                      list<dag> pattern, string opName, string revOp,
-                      bit HasMods = 1, bit UseFullOp = 0> {
-  def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
-
-
-  def _si : VOP3b_Real_si <op.SI3, outs, ins, asm, opName>,
-            VOP3DisableFields<1, 1, HasMods>;
-
-  def _vi : VOP3b_Real_vi <op.VI3, outs, ins, asm, opName>,
-            VOP3DisableFields<1, 1, HasMods>;
-}
-
-multiclass VOP3_C_m <vop op, dag outs, dag ins, string asm,
-                     list<dag> pattern, string opName,
-                     bit HasMods, bit defExec, string revOp> {
-
-  def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
-           VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
-
-  def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
-            VOP3DisableFields<1, 0, HasMods> {
-    let Defs = !if(defExec, [EXEC], []);
-  }
-
-  def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
-            VOP3DisableFields<1, 0, HasMods> {
-    let Defs = !if(defExec, [EXEC], []);
-  }
-}
-
-// An instruction that is VOP2 on SI and VOP3 on VI, no modifiers.
-multiclass VOP2SI_3VI_m <vop3 op, string opName, dag outs, dag ins,
-                         string asm, list<dag> pattern = []> {
-  let isPseudo = 1, isCodeGenOnly = 1 in {
-    def "" : VOPAnyCommon <outs, ins, "", pattern>,
-             SIMCInstr<opName, SISubtarget.NONE>;
-  }
-
-  def _si : VOP2 <op.SI3{5-0}, outs, ins, asm, []>,
-            SIMCInstr <opName, SISubtarget.SI> {
-            let AssemblerPredicates = [isSICI];
-  }
-
-  def _vi : VOP3Common <outs, ins, asm, []>,
-            VOP3e_vi <op.VI3>,
-            VOP3DisableFields <1, 0, 0>,
-            SIMCInstr <opName, SISubtarget.VI> {
-            let AssemblerPredicates = [isVI];
-  }
-}
-
-multiclass VOP1_Helper <vop1 op, string opName, dag outs,
-                        dag ins32, string asm32, list<dag> pat32,
-                        dag ins64, string asm64, list<dag> pat64,
-                        bit HasMods> {
-
-  defm _e32 : VOP1_m <op, outs, ins32, opName#asm32, pat32, opName>;
-
-  defm _e64 : VOP3_1_m <op, outs, ins64, opName#asm64, pat64, opName, HasMods>;
-}
-
-multiclass VOP1Inst <vop1 op, string opName, VOPProfile P,
-                     SDPatternOperator node = null_frag> : VOP1_Helper <
-  op, opName, P.Outs,
-  P.Ins32, P.Asm32, [],
-  P.Ins64, P.Asm64,
-  !if(P.HasModifiers,
-      [(set P.DstVT:$dst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0,
-                                i32:$src0_modifiers, i1:$clamp, i32:$omod))))],
-      [(set P.DstVT:$dst, (node P.Src0VT:$src0))]),
-  P.HasModifiers
->;
-
-multiclass VOP1InstSI <vop1 op, string opName, VOPProfile P,
-                       SDPatternOperator node = null_frag> {
-
-  defm _e32 : VOP1SI_m <op, P.Outs, P.Ins32, opName#P.Asm32, [], opName>;
-
-  defm _e64 : VOP3SI_1_m <op, P.Outs, P.Ins64, opName#P.Asm64,
-    !if(P.HasModifiers,
-      [(set P.DstVT:$dst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0,
-                                i32:$src0_modifiers, i1:$clamp, i32:$omod))))],
-      [(set P.DstVT:$dst, (node P.Src0VT:$src0))]),
-    opName, P.HasModifiers>;
-}
-
-multiclass VOP2_Helper <vop2 op, string opName, dag outs,
-                        dag ins32, string asm32, list<dag> pat32,
-                        dag ins64, string asm64, list<dag> pat64,
-                        string revOp, bit HasMods> {
-  defm _e32 : VOP2_m <op, outs, ins32, asm32, pat32, opName, revOp>;
-
-  defm _e64 : VOP3_2_m <op,
-    outs, ins64, opName#asm64, pat64, opName, revOp, HasMods
-  >;
-}
-
-multiclass VOP2Inst <vop2 op, string opName, VOPProfile P,
-                     SDPatternOperator node = null_frag,
-                     string revOp = opName> : VOP2_Helper <
-  op, opName, P.Outs,
-  P.Ins32, P.Asm32, [],
-  P.Ins64, P.Asm64,
-  !if(P.HasModifiers,
-      [(set P.DstVT:$dst,
-           (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
-                                      i1:$clamp, i32:$omod)),
-                 (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
-      [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
-  revOp, P.HasModifiers
->;
-
-multiclass VOP2InstSI <vop2 op, string opName, VOPProfile P,
-                       SDPatternOperator node = null_frag,
-                       string revOp = opName> {
-  defm _e32 : VOP2SI_m <op, P.Outs, P.Ins32, P.Asm32, [], opName, revOp>;
-
-  defm _e64 : VOP3SI_2_m <op, P.Outs, P.Ins64, opName#P.Asm64,
-    !if(P.HasModifiers,
-        [(set P.DstVT:$dst,
-             (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
-                                        i1:$clamp, i32:$omod)),
-                   (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
-        [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
-    opName, revOp, P.HasModifiers>;
-}
-
-multiclass VOP2b_Helper <vop2 op, string opName, dag outs,
-                         dag ins32, string asm32, list<dag> pat32,
-                         dag ins64, string asm64, list<dag> pat64,
-                         string revOp, bit HasMods> {
-
-  defm _e32 : VOP2_m <op, outs, ins32, asm32, pat32, opName, revOp>;
-
-  defm _e64 : VOP3b_2_m <op,
-    outs, ins64, opName#asm64, pat64, opName, revOp, HasMods
-  >;
-}
-
-multiclass VOP2bInst <vop2 op, string opName, VOPProfile P,
-                      SDPatternOperator node = null_frag,
-                      string revOp = opName> : VOP2b_Helper <
-  op, opName, P.Outs,
-  P.Ins32, P.Asm32, [],
-  P.Ins64, P.Asm64,
-  !if(P.HasModifiers,
-      [(set P.DstVT:$dst,
-           (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
-                                      i1:$clamp, i32:$omod)),
-                 (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
-      [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
-  revOp, P.HasModifiers
->;
-
-// A VOP2 instruction that is VOP3-only on VI.
-multiclass VOP2_VI3_Helper <vop23 op, string opName, dag outs,
-                            dag ins32, string asm32, list<dag> pat32,
-                            dag ins64, string asm64, list<dag> pat64,
-                            string revOp, bit HasMods> {
-  defm _e32 : VOP2SI_m <op, outs, ins32, asm32, pat32, opName, revOp>;
-
-  defm _e64 : VOP3_2_m <op, outs, ins64, opName#asm64, pat64, opName,
-                        revOp, HasMods>;
-}
-
-multiclass VOP2_VI3_Inst <vop23 op, string opName, VOPProfile P,
-                          SDPatternOperator node = null_frag,
-                          string revOp = opName>
-                          : VOP2_VI3_Helper <
-  op, opName, P.Outs,
-  P.Ins32, P.Asm32, [],
-  P.Ins64, P.Asm64,
-  !if(P.HasModifiers,
-      [(set P.DstVT:$dst,
-           (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
-                                      i1:$clamp, i32:$omod)),
-                 (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
-      [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
-  revOp, P.HasModifiers
->;
-
-multiclass VOP2MADK <vop2 op, string opName, list<dag> pattern = []> {
-
-  def "" : VOP2_Pseudo <VOP_MADK.Outs, VOP_MADK.Ins, pattern, opName>;
-
-let isCodeGenOnly = 0 in {
-  def _si : VOP2Common <VOP_MADK.Outs, VOP_MADK.Ins,
-                        !strconcat(opName, VOP_MADK.Asm), []>,
-            SIMCInstr <opName#"_e32", SISubtarget.SI>,
-            VOP2_MADKe <op.SI> {
-            let AssemblerPredicates = [isSICI];
-            }
-
-  def _vi : VOP2Common <VOP_MADK.Outs, VOP_MADK.Ins,
-                        !strconcat(opName, VOP_MADK.Asm), []>,
-            SIMCInstr <opName#"_e32", SISubtarget.VI>,
-            VOP2_MADKe <op.VI> {
-            let AssemblerPredicates = [isVI];
-            }
-} // End isCodeGenOnly = 0
-}
-
-class VOPC_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
-  VOPCCommon <ins, "", pattern>,
-  VOP <opName>,
-  SIMCInstr<opName#"_e32", SISubtarget.NONE>,
-  MnemonicAlias<opName#"_e32", opName> {
-  let isPseudo = 1;
-  let isCodeGenOnly = 1;
-}
-
-multiclass VOPC_m <vopc op, dag outs, dag ins, string asm, list<dag> pattern,
-                   string opName, bit DefExec, string revOpName = ""> {
-  def "" : VOPC_Pseudo <outs, ins, pattern, opName>;
-
-  def _si : VOPC<op.SI, ins, asm, []>,
-            SIMCInstr <opName#"_e32", SISubtarget.SI> {
-    let Defs = !if(DefExec, [EXEC], []);
-    let hasSideEffects = DefExec;
-  }
-
-  def _vi : VOPC<op.VI, ins, asm, []>,
-            SIMCInstr <opName#"_e32", SISubtarget.VI> {
-    let Defs = !if(DefExec, [EXEC], []);
-    let hasSideEffects = DefExec;
-  }
-}
-
-multiclass VOPC_Helper <vopc op, string opName,
-                        dag ins32, string asm32, list<dag> pat32,
-                        dag out64, dag ins64, string asm64, list<dag> pat64,
-                        bit HasMods, bit DefExec, string revOp> {
-  defm _e32 : VOPC_m <op, (outs), ins32, opName#asm32, pat32, opName, DefExec>;
-
-  defm _e64 : VOP3_C_m <op, out64, ins64, opName#asm64, pat64,
-                        opName, HasMods, DefExec, revOp>;
-}
-
-// Special case for class instructions which only have modifiers on
-// the 1st source operand.
-multiclass VOPC_Class_Helper <vopc op, string opName,
-                             dag ins32, string asm32, list<dag> pat32,
-                             dag out64, dag ins64, string asm64, list<dag> pat64,
-                             bit HasMods, bit DefExec, string revOp> {
-  defm _e32 : VOPC_m <op, (outs), ins32, opName#asm32, pat32, opName, DefExec>;
-
-  defm _e64 : VOP3_C_m <op, out64, ins64, opName#asm64, pat64,
-                        opName, HasMods, DefExec, revOp>,
-                        VOP3DisableModFields<1, 0, 0>;
-}
-
-multiclass VOPCInst <vopc op, string opName,
-                     VOPProfile P, PatLeaf cond = COND_NULL,
-                     string revOp = opName,
-                     bit DefExec = 0> : VOPC_Helper <
-  op, opName,
-  P.Ins32, P.Asm32, [],
-  (outs VOPDstS64:$dst), P.Ins64, P.Asm64,
-  !if(P.HasModifiers,
-      [(set i1:$dst,
-          (setcc (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
-                                      i1:$clamp, i32:$omod)),
-                 (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)),
-                 cond))],
-      [(set i1:$dst, (setcc P.Src0VT:$src0, P.Src1VT:$src1, cond))]),
-  P.HasModifiers, DefExec, revOp
->;
-
-multiclass VOPCClassInst <vopc op, string opName, VOPProfile P,
-                     bit DefExec = 0> : VOPC_Class_Helper <
-  op, opName,
-  P.Ins32, P.Asm32, [],
-  (outs VOPDstS64:$dst), P.Ins64, P.Asm64,
-  !if(P.HasModifiers,
-      [(set i1:$dst,
-          (AMDGPUfp_class (P.Src0VT (VOP3Mods0Clamp0OMod P.Src0VT:$src0, i32:$src0_modifiers)), P.Src1VT:$src1))],
-      [(set i1:$dst, (AMDGPUfp_class P.Src0VT:$src0, P.Src1VT:$src1))]),
-  P.HasModifiers, DefExec, opName
->;
-
-
-multiclass VOPC_F32 <vopc op, string opName, PatLeaf cond = COND_NULL, string revOp = opName> :
-  VOPCInst <op, opName, VOP_F32_F32_F32, cond, revOp>;
-
-multiclass VOPC_F64 <vopc op, string opName, PatLeaf cond = COND_NULL, string revOp = opName> :
-  VOPCInst <op, opName, VOP_F64_F64_F64, cond, revOp>;
-
-multiclass VOPC_I32 <vopc op, string opName, PatLeaf cond = COND_NULL, string revOp = opName> :
-  VOPCInst <op, opName, VOP_I32_I32_I32, cond, revOp>;
-
-multiclass VOPC_I64 <vopc op, string opName, PatLeaf cond = COND_NULL, string revOp = opName> :
-  VOPCInst <op, opName, VOP_I64_I64_I64, cond, revOp>;
-
-
-multiclass VOPCX <vopc op, string opName, VOPProfile P,
-                  PatLeaf cond = COND_NULL,
-                  string revOp = "">
-  : VOPCInst <op, opName, P, cond, revOp, 1>;
-
-multiclass VOPCX_F32 <vopc op, string opName, string revOp = opName> :
-  VOPCX <op, opName, VOP_F32_F32_F32, COND_NULL, revOp>;
-
-multiclass VOPCX_F64 <vopc op, string opName, string revOp = opName> :
-  VOPCX <op, opName, VOP_F64_F64_F64, COND_NULL, revOp>;
-
-multiclass VOPCX_I32 <vopc op, string opName, string revOp = opName> :
-  VOPCX <op, opName, VOP_I32_I32_I32, COND_NULL, revOp>;
-
-multiclass VOPCX_I64 <vopc op, string opName, string revOp = opName> :
-  VOPCX <op, opName, VOP_I64_I64_I64, COND_NULL, revOp>;
-
-multiclass VOP3_Helper <vop3 op, string opName, dag outs, dag ins, string asm,
-                        list<dag> pat, int NumSrcArgs, bit HasMods> : VOP3_m <
-    op, outs, ins, opName#" "#asm, pat, opName, NumSrcArgs, HasMods
->;
-
-multiclass VOPC_CLASS_F32 <vopc op, string opName> :
-  VOPCClassInst <op, opName, VOP_I1_F32_I32, 0>;
-
-multiclass VOPCX_CLASS_F32 <vopc op, string opName> :
-  VOPCClassInst <op, opName, VOP_I1_F32_I32, 1>;
-
-multiclass VOPC_CLASS_F64 <vopc op, string opName> :
-  VOPCClassInst <op, opName, VOP_I1_F64_I32, 0>;
-
-multiclass VOPCX_CLASS_F64 <vopc op, string opName> :
-  VOPCClassInst <op, opName, VOP_I1_F64_I32, 1>;
-
-multiclass VOP3Inst <vop3 op, string opName, VOPProfile P,
-                     SDPatternOperator node = null_frag> : VOP3_Helper <
-  op, opName, (outs P.DstRC.RegClass:$dst), P.Ins64, P.Asm64,
-  !if(!eq(P.NumSrcArgs, 3),
-    !if(P.HasModifiers,
-        [(set P.DstVT:$dst,
-            (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
-                                       i1:$clamp, i32:$omod)),
-                  (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)),
-                  (P.Src2VT (VOP3Mods P.Src2VT:$src2, i32:$src2_modifiers))))],
-        [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1,
-                                  P.Src2VT:$src2))]),
-  !if(!eq(P.NumSrcArgs, 2),
-    !if(P.HasModifiers,
-        [(set P.DstVT:$dst,
-            (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
-                                       i1:$clamp, i32:$omod)),
-                  (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
-        [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))])
-  /* P.NumSrcArgs == 1 */,
-    !if(P.HasModifiers,
-        [(set P.DstVT:$dst,
-            (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
-                                       i1:$clamp, i32:$omod))))],
-        [(set P.DstVT:$dst, (node P.Src0VT:$src0))]))),
-  P.NumSrcArgs, P.HasModifiers
->;
-
-// Special case for v_div_fmas_{f32|f64}, since it seems to be the
-// only VOP instruction that implicitly reads VCC.
-multiclass VOP3_VCC_Inst <vop3 op, string opName,
-                          VOPProfile P,
-                          SDPatternOperator node = null_frag> : VOP3_Helper <
-  op, opName,
-  (outs P.DstRC.RegClass:$dst),
-  (ins InputModsNoDefault:$src0_modifiers, P.Src0RC64:$src0,
-       InputModsNoDefault:$src1_modifiers, P.Src1RC64:$src1,
-       InputModsNoDefault:$src2_modifiers, P.Src2RC64:$src2,
-       ClampMod:$clamp,
-       omod:$omod),
-  " $dst, $src0_modifiers, $src1_modifiers, $src2_modifiers"#"$clamp"#"$omod",
-  [(set P.DstVT:$dst,
-            (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
-                                       i1:$clamp, i32:$omod)),
-                  (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)),
-                  (P.Src2VT (VOP3Mods P.Src2VT:$src2, i32:$src2_modifiers)),
-                  (i1 VCC)))],
-  3, 1
->;
-
-multiclass VOP3b_Helper <vop op, RegisterClass vrc, RegisterOperand arc,
-                    string opName, list<dag> pattern> :
-  VOP3b_3_m <
-  op, (outs vrc:$vdst, SReg_64:$sdst),
-      (ins InputModsNoDefault:$src0_modifiers, arc:$src0,
-           InputModsNoDefault:$src1_modifiers, arc:$src1,
-           InputModsNoDefault:$src2_modifiers, arc:$src2,
-           ClampMod:$clamp, omod:$omod),
-  opName#" $vdst, $sdst, $src0_modifiers, $src1_modifiers, $src2_modifiers"#"$clamp"#"$omod", pattern,
-  opName, opName, 1, 1
->;
-
-multiclass VOP3b_64 <vop3 op, string opName, list<dag> pattern> :
-  VOP3b_Helper <op, VReg_64, VSrc_64, opName, pattern>;
-
-multiclass VOP3b_32 <vop3 op, string opName, list<dag> pattern> :
-  VOP3b_Helper <op, VGPR_32, VSrc_32, opName, pattern>;
-
-
-class Vop3ModPat<Instruction Inst, VOPProfile P, SDPatternOperator node> : Pat<
-  (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod)),
-        (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)),
-        (P.Src2VT (VOP3Mods P.Src2VT:$src2, i32:$src2_modifiers))),
-  (Inst i32:$src0_modifiers, P.Src0VT:$src0,
-        i32:$src1_modifiers, P.Src1VT:$src1,
-        i32:$src2_modifiers, P.Src2VT:$src2,
-        i1:$clamp,
-        i32:$omod)>;
-
-//===----------------------------------------------------------------------===//
-// Interpolation opcodes
-//===----------------------------------------------------------------------===//
-
-class VINTRP_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
-  VINTRPCommon <outs, ins, "", pattern>,
-  SIMCInstr<opName, SISubtarget.NONE> {
-  let isPseudo = 1;
-  let isCodeGenOnly = 1;
-}
-
-class VINTRP_Real_si <bits <2> op, string opName, dag outs, dag ins,
-                      string asm> :
-  VINTRPCommon <outs, ins, asm, []>,
-  VINTRPe <op>,
-  SIMCInstr<opName, SISubtarget.SI>;
-
-class VINTRP_Real_vi <bits <2> op, string opName, dag outs, dag ins,
-                      string asm> :
-  VINTRPCommon <outs, ins, asm, []>,
-  VINTRPe_vi <op>,
-  SIMCInstr<opName, SISubtarget.VI>;
-
-multiclass VINTRP_m <bits <2> op, dag outs, dag ins, string asm,
-                     list<dag> pattern = []> {
-  def "" : VINTRP_Pseudo <NAME, outs, ins, pattern>;
-
-  def _si : VINTRP_Real_si <op, NAME, outs, ins, asm>;
-
-  def _vi : VINTRP_Real_vi <op, NAME, outs, ins, asm>;
-}
-
-//===----------------------------------------------------------------------===//
-// Vector I/O classes
-//===----------------------------------------------------------------------===//
-
-class DS_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
-  DS <outs, ins, "", pattern>,
-  SIMCInstr <opName, SISubtarget.NONE> {
-  let isPseudo = 1;
-  let isCodeGenOnly = 1;
-}
-
-class DS_Real_si <bits<8> op, string opName, dag outs, dag ins, string asm> :
-  DS <outs, ins, asm, []>,
-  DSe <op>,
-  SIMCInstr <opName, SISubtarget.SI> {
-  let isCodeGenOnly = 0;
-}
-
-class DS_Real_vi <bits<8> op, string opName, dag outs, dag ins, string asm> :
-  DS <outs, ins, asm, []>,
-  DSe_vi <op>,
-  SIMCInstr <opName, SISubtarget.VI>;
-
-class DS_Off16_Real_si <bits<8> op, string opName, dag outs, dag ins, string asm> :
-  DS_Real_si <op,opName, outs, ins, asm> {
-
-  // Single load interpret the 2 i8imm operands as a single i16 offset.
-  bits<16> offset;
-  let offset0 = offset{7-0};
-  let offset1 = offset{15-8};
-  let isCodeGenOnly = 0;
-}
-
-class DS_Off16_Real_vi <bits<8> op, string opName, dag outs, dag ins, string asm> :
-  DS_Real_vi <op, opName, outs, ins, asm> {
-
-  // Single load interpret the 2 i8imm operands as a single i16 offset.
-  bits<16> offset;
-  let offset0 = offset{7-0};
-  let offset1 = offset{15-8};
-}
-
-multiclass DS_1A_RET <bits<8> op, string opName, RegisterClass rc,
-  dag outs = (outs rc:$vdst),
-  dag ins = (ins VGPR_32:$addr, ds_offset:$offset, gds:$gds),
-  string asm = opName#" $vdst, $addr"#"$offset$gds"> {
-
-  def "" : DS_Pseudo <opName, outs, ins, []>;
-
-  let data0 = 0, data1 = 0 in {
-    def _si : DS_Off16_Real_si <op, opName, outs, ins, asm>;
-    def _vi : DS_Off16_Real_vi <op, opName, outs, ins, asm>;
-  }
-}
-
-multiclass DS_1A_Off8_RET <bits<8> op, string opName, RegisterClass rc,
-  dag outs = (outs rc:$vdst),
-  dag ins = (ins VGPR_32:$addr, ds_offset0:$offset0, ds_offset1:$offset1,
-                 gds01:$gds),
-  string asm = opName#" $vdst, $addr"#"$offset0"#"$offset1$gds"> {
-
-  def "" : DS_Pseudo <opName, outs, ins, []>;
-
-  let data0 = 0, data1 = 0, AsmMatchConverter = "cvtDSOffset01" in {
-    def _si : DS_Real_si <op, opName, outs, ins, asm>;
-    def _vi : DS_Real_vi <op, opName, outs, ins, asm>;
-  }
-}
-
-multiclass DS_1A1D_NORET <bits<8> op, string opName, RegisterClass rc,
-  dag outs = (outs),
-  dag ins = (ins VGPR_32:$addr, rc:$data0, ds_offset:$offset, gds:$gds),
-  string asm = opName#" $addr, $data0"#"$offset$gds"> {
-
-  def "" : DS_Pseudo <opName, outs, ins, []>,
-           AtomicNoRet<opName, 0>;
-
-  let data1 = 0, vdst = 0 in {
-    def _si : DS_Off16_Real_si <op, opName, outs, ins, asm>;
-    def _vi : DS_Off16_Real_vi <op, opName, outs, ins, asm>;
-  }
-}
-
-multiclass DS_1A1D_Off8_NORET <bits<8> op, string opName, RegisterClass rc,
-  dag outs = (outs),
-  dag ins = (ins VGPR_32:$addr, rc:$data0, rc:$data1,
-              ds_offset0:$offset0, ds_offset1:$offset1, gds01:$gds),
-  string asm = opName#" $addr, $data0, $data1"#"$offset0"#"$offset1"#"$gds"> {
-
-  def "" : DS_Pseudo <opName, outs, ins, []>;
-
-  let vdst = 0, AsmMatchConverter = "cvtDSOffset01" in {
-    def _si : DS_Real_si <op, opName, outs, ins, asm>;
-    def _vi : DS_Real_vi <op, opName, outs, ins, asm>;
-  }
-}
-
-multiclass DS_1A1D_RET <bits<8> op, string opName, RegisterClass rc,
-                        string noRetOp = "",
-  dag outs = (outs rc:$vdst),
-  dag ins = (ins VGPR_32:$addr, rc:$data0, ds_offset:$offset, gds:$gds),
-  string asm = opName#" $vdst, $addr, $data0"#"$offset$gds"> {
-
-  def "" : DS_Pseudo <opName, outs, ins, []>,
-           AtomicNoRet<noRetOp, 1>;
-
-  let data1 = 0 in {
-    def _si : DS_Off16_Real_si <op, opName, outs, ins, asm>;
-    def _vi : DS_Off16_Real_vi <op, opName, outs, ins, asm>;
-  }
-}
-
-multiclass DS_1A2D_RET_m <bits<8> op, string opName, RegisterClass rc,
-                          string noRetOp = "", dag ins,
-  dag outs = (outs rc:$vdst),
-  string asm = opName#" $vdst, $addr, $data0, $data1"#"$offset"#"$gds"> {
-
-  def "" : DS_Pseudo <opName, outs, ins, []>,
-           AtomicNoRet<noRetOp, 1>;
-
-  def _si : DS_Off16_Real_si <op, opName, outs, ins, asm>;
-  def _vi : DS_Off16_Real_vi <op, opName, outs, ins, asm>;
-}
-
-multiclass DS_1A2D_RET <bits<8> op, string asm, RegisterClass rc,
-                        string noRetOp = "", RegisterClass src = rc> :
-  DS_1A2D_RET_m <op, asm, rc, noRetOp,
-                 (ins VGPR_32:$addr, src:$data0, src:$data1,
-                      ds_offset:$offset, gds:$gds)
->;
-
-multiclass DS_1A2D_NORET <bits<8> op, string opName, RegisterClass rc,
-                          string noRetOp = opName,
-  dag outs = (outs),
-  dag ins = (ins VGPR_32:$addr, rc:$data0, rc:$data1,
-                 ds_offset:$offset, gds:$gds),
-  string asm = opName#" $addr, $data0, $data1"#"$offset"#"$gds"> {
-
-  def "" : DS_Pseudo <opName, outs, ins, []>,
-           AtomicNoRet<noRetOp, 0>;
-
-  let vdst = 0 in {
-    def _si : DS_Off16_Real_si <op, opName, outs, ins, asm>;
-    def _vi : DS_Off16_Real_vi <op, opName, outs, ins, asm>;
-  }
-}
-
-multiclass DS_0A_RET <bits<8> op, string opName,
-  dag outs = (outs VGPR_32:$vdst),
-  dag ins = (ins ds_offset:$offset, gds:$gds),
-  string asm = opName#" $vdst"#"$offset"#"$gds"> {
-
-  let mayLoad = 1, mayStore = 1 in {
-    def "" : DS_Pseudo <opName, outs, ins, []>;
-
-    let addr = 0, data0 = 0, data1 = 0 in {
-      def _si : DS_Off16_Real_si <op, opName, outs, ins, asm>;
-      def _vi : DS_Off16_Real_vi <op, opName, outs, ins, asm>;
-    } // end addr = 0, data0 = 0, data1 = 0
-  } // end mayLoad = 1, mayStore = 1
-}
-
-multiclass DS_1A_RET_GDS <bits<8> op, string opName,
-  dag outs = (outs VGPR_32:$vdst),
-  dag ins = (ins VGPR_32:$addr, ds_offset_gds:$offset),
-  string asm = opName#" $vdst, $addr"#"$offset gds"> {
-
-  def "" : DS_Pseudo <opName, outs, ins, []>;
-
-  let data0 = 0, data1 = 0, gds = 1 in {
-    def _si : DS_Off16_Real_si <op, opName, outs, ins, asm>;
-    def _vi : DS_Off16_Real_vi <op, opName, outs, ins, asm>;
-  } // end data0 = 0, data1 = 0, gds = 1
-}
-
-multiclass DS_1A_GDS <bits<8> op, string opName,
-  dag outs = (outs),
-  dag ins = (ins VGPR_32:$addr),
-  string asm = opName#" $addr gds"> {
-
-  def "" : DS_Pseudo <opName, outs, ins, []>;
-
-  let vdst = 0, data0 = 0, data1 = 0, offset0 = 0, offset1 = 0, gds = 1 in {
-    def _si : DS_Real_si <op, opName, outs, ins, asm>;
-    def _vi : DS_Real_vi <op, opName, outs, ins, asm>;
-  } // end vdst = 0, data = 0, data1 = 0, gds = 1
-}
-
-multiclass DS_1A <bits<8> op, string opName,
-  dag outs = (outs),
-  dag ins = (ins VGPR_32:$addr, ds_offset:$offset, gds:$gds),
-  string asm = opName#" $addr"#"$offset"#"$gds"> {
-
-  let mayLoad = 1, mayStore = 1 in {
-    def "" : DS_Pseudo <opName, outs, ins, []>;
-
-    let vdst = 0, data0 = 0, data1 = 0 in {
-      def _si : DS_Off16_Real_si <op, opName, outs, ins, asm>;
-      def _vi : DS_Off16_Real_vi <op, opName, outs, ins, asm>;
-    } // let vdst = 0, data0 = 0, data1 = 0
-  } // end mayLoad = 1, mayStore = 1
-}
-
-//===----------------------------------------------------------------------===//
-// MTBUF classes
-//===----------------------------------------------------------------------===//
-
-class MTBUF_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
-  MTBUF <outs, ins, "", pattern>,
-  SIMCInstr<opName, SISubtarget.NONE> {
-  let isPseudo = 1;
-  let isCodeGenOnly = 1;
-}
-
-class MTBUF_Real_si <bits<3> op, string opName, dag outs, dag ins,
-                    string asm> :
-  MTBUF <outs, ins, asm, []>,
-  MTBUFe <op>,
-  SIMCInstr<opName, SISubtarget.SI>;
-
-class MTBUF_Real_vi <bits<4> op, string opName, dag outs, dag ins, string asm> :
-  MTBUF <outs, ins, asm, []>,
-  MTBUFe_vi <op>,
-  SIMCInstr <opName, SISubtarget.VI>;
-
-multiclass MTBUF_m <bits<3> op, string opName, dag outs, dag ins, string asm,
-                    list<dag> pattern> {
-
-  def "" : MTBUF_Pseudo <opName, outs, ins, pattern>;
-
-  def _si : MTBUF_Real_si <op, opName, outs, ins, asm>;
-
-  def _vi : MTBUF_Real_vi <{0, op{2}, op{1}, op{0}}, opName, outs, ins, asm>;
-
-}
-
-let mayStore = 1, mayLoad = 0 in {
-
-multiclass MTBUF_Store_Helper <bits<3> op, string opName,
-                               RegisterClass regClass> : MTBUF_m <
-  op, opName, (outs),
-  (ins regClass:$vdata, u16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc,
-   i1imm:$addr64, i8imm:$dfmt, i8imm:$nfmt, VGPR_32:$vaddr,
-   SReg_128:$srsrc, i1imm:$slc, i1imm:$tfe, SCSrc_32:$soffset),
-  opName#" $vdata, $offset, $offen, $idxen, $glc, $addr64, $dfmt,"
-        #" $nfmt, $vaddr, $srsrc, $slc, $tfe, $soffset", []
->;
-
-} // mayStore = 1, mayLoad = 0
-
-let mayLoad = 1, mayStore = 0 in {
-
-multiclass MTBUF_Load_Helper <bits<3> op, string opName,
-                              RegisterClass regClass> : MTBUF_m <
-  op, opName, (outs regClass:$dst),
-  (ins u16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$addr64,
-       i8imm:$dfmt, i8imm:$nfmt, VGPR_32:$vaddr, SReg_128:$srsrc,
-       i1imm:$slc, i1imm:$tfe, SCSrc_32:$soffset),
-  opName#" $dst, $offset, $offen, $idxen, $glc, $addr64, $dfmt,"
-        #" $nfmt, $vaddr, $srsrc, $slc, $tfe, $soffset", []
->;
-
-} // mayLoad = 1, mayStore = 0
-
-//===----------------------------------------------------------------------===//
-// MUBUF classes
-//===----------------------------------------------------------------------===//
-
-class mubuf <bits<7> si, bits<7> vi = si> {
-  field bits<7> SI = si;
-  field bits<7> VI = vi;
-}
-
-let isCodeGenOnly = 0 in {
-
-class MUBUF_si <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
-  MUBUF <outs, ins, asm, pattern>, MUBUFe <op> {
-  let lds  = 0;
-}
-
-} // End let isCodeGenOnly = 0
-
-class MUBUF_vi <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
-  MUBUF <outs, ins, asm, pattern>, MUBUFe_vi <op> {
-  let lds = 0;
-}
-
-class MUBUFAddr64Table <bit is_addr64, string suffix = ""> {
-  bit IsAddr64 = is_addr64;
-  string OpName = NAME # suffix;
-}
-
-class MUBUF_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
-  MUBUF <outs, ins, "", pattern>,
-  SIMCInstr<opName, SISubtarget.NONE> {
-  let isPseudo = 1;
-  let isCodeGenOnly = 1;
-
-  // dummy fields, so that we can use let statements around multiclasses
-  bits<1> offen;
-  bits<1> idxen;
-  bits<8> vaddr;
-  bits<1> glc;
-  bits<1> slc;
-  bits<1> tfe;
-  bits<8> soffset;
-}
-
-class MUBUF_Real_si <mubuf op, string opName, dag outs, dag ins,
-                     string asm> :
-  MUBUF <outs, ins, asm, []>,
-  MUBUFe <op.SI>,
-  SIMCInstr<opName, SISubtarget.SI> {
-  let lds = 0;
-}
-
-class MUBUF_Real_vi <mubuf op, string opName, dag outs, dag ins,
-                     string asm> :
-  MUBUF <outs, ins, asm, []>,
-  MUBUFe_vi <op.VI>,
-  SIMCInstr<opName, SISubtarget.VI> {
-  let lds = 0;
-}
-
-multiclass MUBUF_m <mubuf op, string opName, dag outs, dag ins, string asm,
-                    list<dag> pattern> {
-
-  def "" : MUBUF_Pseudo <opName, outs, ins, pattern>,
-           MUBUFAddr64Table <0>;
-
-  let addr64 = 0, isCodeGenOnly = 0 in {
-    def _si : MUBUF_Real_si <op, opName, outs, ins, asm>;
-  }
-
-  def _vi : MUBUF_Real_vi <op, opName, outs, ins, asm>;
-}
-
-multiclass MUBUFAddr64_m <mubuf op, string opName, dag outs,
-                          dag ins, string asm, list<dag> pattern> {
-
-  def "" : MUBUF_Pseudo <opName, outs, ins, pattern>,
-           MUBUFAddr64Table <1>;
-
-  let addr64 = 1, isCodeGenOnly = 0 in {
-    def _si : MUBUF_Real_si <op, opName, outs, ins, asm>;
-  }
-
-  // There is no VI version. If the pseudo is selected, it should be lowered
-  // for VI appropriately.
-}
-
-multiclass MUBUFAtomicOffset_m <mubuf op, string opName, dag outs, dag ins,
-                                string asm, list<dag> pattern, bit is_return> {
-
-  def "" : MUBUF_Pseudo <opName, outs, ins, pattern>,
-           MUBUFAddr64Table <0, !if(is_return, "_RTN", "")>,
-           AtomicNoRet<NAME#"_OFFSET", is_return>;
-
-  let offen = 0, idxen = 0, tfe = 0, vaddr = 0 in {
-    let addr64 = 0 in {
-      def _si : MUBUF_Real_si <op, opName, outs, ins, asm>;
-    }
-
-    def _vi : MUBUF_Real_vi <op, opName, outs, ins, asm>;
-  }
-}
-
-multiclass MUBUFAtomicAddr64_m <mubuf op, string opName, dag outs, dag ins,
-                                string asm, list<dag> pattern, bit is_return> {
-
-  def "" : MUBUF_Pseudo <opName, outs, ins, pattern>,
-           MUBUFAddr64Table <1, !if(is_return, "_RTN", "")>,
-           AtomicNoRet<NAME#"_ADDR64", is_return>;
-
-  let offen = 0, idxen = 0, addr64 = 1, tfe = 0 in {
-    def _si : MUBUF_Real_si <op, opName, outs, ins, asm>;
-  }
-
-  // There is no VI version. If the pseudo is selected, it should be lowered
-  // for VI appropriately.
-}
-
-multiclass MUBUF_Atomic <mubuf op, string name, RegisterClass rc,
-                         ValueType vt, SDPatternOperator atomic> {
-
-  let mayStore = 1, mayLoad = 1, hasPostISelHook = 1 in {
-
-    // No return variants
-    let glc = 0 in {
-
-      defm _ADDR64 : MUBUFAtomicAddr64_m <
-        op, name#"_addr64", (outs),
-        (ins rc:$vdata, SReg_128:$srsrc, VReg_64:$vaddr,
-             SCSrc_32:$soffset, mbuf_offset:$offset, slc:$slc),
-        name#" $vdata, $vaddr, $srsrc, $soffset addr64"#"$offset"#"$slc", [], 0
-      >;
-
-      defm _OFFSET : MUBUFAtomicOffset_m <
-        op, name#"_offset", (outs),
-        (ins rc:$vdata, SReg_128:$srsrc, SCSrc_32:$soffset, mbuf_offset:$offset,
-             slc:$slc),
-        name#" $vdata, $srsrc, $soffset"#"$offset"#"$slc", [], 0
-      >;
-    } // glc = 0
-
-    // Variant that return values
-    let glc = 1, Constraints = "$vdata = $vdata_in",
-        DisableEncoding = "$vdata_in"  in {
-
-      defm _RTN_ADDR64 : MUBUFAtomicAddr64_m <
-        op, name#"_rtn_addr64", (outs rc:$vdata),
-        (ins rc:$vdata_in, SReg_128:$srsrc, VReg_64:$vaddr,
-             SCSrc_32:$soffset, mbuf_offset:$offset, slc:$slc),
-        name#" $vdata, $vaddr, $srsrc, $soffset addr64"#"$offset"#" glc"#"$slc",
-        [(set vt:$vdata,
-         (atomic (MUBUFAddr64Atomic v4i32:$srsrc, i64:$vaddr, i32:$soffset,
-	                            i16:$offset, i1:$slc), vt:$vdata_in))], 1
-      >;
-
-      defm _RTN_OFFSET : MUBUFAtomicOffset_m <
-        op, name#"_rtn_offset", (outs rc:$vdata),
-        (ins rc:$vdata_in, SReg_128:$srsrc, SCSrc_32:$soffset,
-             mbuf_offset:$offset, slc:$slc),
-        name#" $vdata, $srsrc, $soffset"#"$offset"#" glc $slc",
-        [(set vt:$vdata,
-         (atomic (MUBUFOffsetAtomic v4i32:$srsrc, i32:$soffset, i16:$offset,
-                                    i1:$slc), vt:$vdata_in))], 1
-      >;
-
-    } // glc = 1
-
-  } // mayStore = 1, mayLoad = 1, hasPostISelHook = 1
-}
-
-multiclass MUBUF_Load_Helper <mubuf op, string name, RegisterClass regClass,
-                              ValueType load_vt = i32,
-                              SDPatternOperator ld = null_frag> {
-
-  let mayLoad = 1, mayStore = 0 in {
-    let offen = 0, idxen = 0, vaddr = 0 in {
-      defm _OFFSET : MUBUF_m <op, name#"_offset", (outs regClass:$vdata),
-                           (ins SReg_128:$srsrc, SCSrc_32:$soffset,
-                           mbuf_offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
-                           name#" $vdata, $srsrc, $soffset"#"$offset"#"$glc"#"$slc"#"$tfe",
-                           [(set load_vt:$vdata, (ld (MUBUFOffset v4i32:$srsrc,
-                                                     i32:$soffset, i16:$offset,
-                                                     i1:$glc, i1:$slc, i1:$tfe)))]>;
-    }
-
-    let offen = 1, idxen = 0  in {
-      defm _OFFEN  : MUBUF_m <op, name#"_offen", (outs regClass:$vdata),
-                           (ins VGPR_32:$vaddr, SReg_128:$srsrc,
-                           SCSrc_32:$soffset, mbuf_offset:$offset, glc:$glc, slc:$slc,
-                           tfe:$tfe),
-                           name#" $vdata, $vaddr, $srsrc, $soffset offen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
-    }
-
-    let offen = 0, idxen = 1 in {
-      defm _IDXEN  : MUBUF_m <op, name#"_idxen", (outs regClass:$vdata),
-                           (ins VGPR_32:$vaddr, SReg_128:$srsrc,
-                           SCSrc_32:$soffset, mbuf_offset:$offset, glc:$glc,
-                           slc:$slc, tfe:$tfe),
-                           name#" $vdata, $vaddr, $srsrc, $soffset idxen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
-    }
-
-    let offen = 1, idxen = 1 in {
-      defm _BOTHEN : MUBUF_m <op, name#"_bothen", (outs regClass:$vdata),
-                           (ins VReg_64:$vaddr, SReg_128:$srsrc, SCSrc_32:$soffset,
-                           mbuf_offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
-                           name#" $vdata, $vaddr, $srsrc, $soffset idxen offen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
-    }
-
-    let offen = 0, idxen = 0 in {
-      defm _ADDR64 : MUBUFAddr64_m <op, name#"_addr64", (outs regClass:$vdata),
-                           (ins VReg_64:$vaddr, SReg_128:$srsrc,
-                                SCSrc_32:$soffset, mbuf_offset:$offset,
-				glc:$glc, slc:$slc, tfe:$tfe),
-                           name#" $vdata, $vaddr, $srsrc, $soffset addr64"#"$offset"#
-                                "$glc"#"$slc"#"$tfe",
-                           [(set load_vt:$vdata, (ld (MUBUFAddr64 v4i32:$srsrc,
-                                                  i64:$vaddr, i32:$soffset,
-                                                  i16:$offset, i1:$glc, i1:$slc,
-						  i1:$tfe)))]>;
-    }
-  }
-}
-
-multiclass MUBUF_Store_Helper <mubuf op, string name, RegisterClass vdataClass,
-                          ValueType store_vt = i32, SDPatternOperator st = null_frag> {
-  let mayLoad = 0, mayStore = 1 in {
-    defm : MUBUF_m <op, name, (outs),
-                    (ins vdataClass:$vdata, VGPR_32:$vaddr, SReg_128:$srsrc, SCSrc_32:$soffset,
-                    mbuf_offset:$offset, offen:$offen, idxen:$idxen, glc:$glc, slc:$slc,
-                    tfe:$tfe),
-                    name#" $vdata, $vaddr, $srsrc, $soffset"#"$offen"#"$idxen"#"$offset"#
-                         "$glc"#"$slc"#"$tfe", []>;
-
-    let offen = 0, idxen = 0, vaddr = 0 in {
-      defm _OFFSET : MUBUF_m <op, name#"_offset",(outs),
-                              (ins vdataClass:$vdata, SReg_128:$srsrc, SCSrc_32:$soffset,
-                              mbuf_offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
-                              name#" $vdata, $srsrc, $soffset"#"$offset"#"$glc"#"$slc"#"$tfe",
-                              [(st store_vt:$vdata, (MUBUFOffset v4i32:$srsrc, i32:$soffset,
-                                   i16:$offset, i1:$glc, i1:$slc, i1:$tfe))]>;
-    } // offen = 0, idxen = 0, vaddr = 0
-
-    let offen = 1, idxen = 0  in {
-      defm _OFFEN : MUBUF_m <op, name#"_offen", (outs),
-                             (ins vdataClass:$vdata, VGPR_32:$vaddr, SReg_128:$srsrc,
-                              SCSrc_32:$soffset, mbuf_offset:$offset, glc:$glc,
-                              slc:$slc, tfe:$tfe),
-                             name#" $vdata, $vaddr, $srsrc, $soffset offen"#"$offset"#
-                             "$glc"#"$slc"#"$tfe", []>;
-    } // end offen = 1, idxen = 0
-
-    let offen = 0, idxen = 1 in {
-      defm _IDXEN  : MUBUF_m <op, name#"_idxen", (outs),
-                           (ins vdataClass:$vdata, VGPR_32:$vaddr, SReg_128:$srsrc,
-                           SCSrc_32:$soffset, mbuf_offset:$offset, glc:$glc,
-                           slc:$slc, tfe:$tfe),
-                           name#" $vdata, $vaddr, $srsrc, $soffset idxen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
-    }
-
-    let offen = 1, idxen = 1 in {
-      defm _BOTHEN : MUBUF_m <op, name#"_bothen", (outs),
-                           (ins vdataClass:$vdata, VReg_64:$vaddr, SReg_128:$srsrc, SCSrc_32:$soffset,
-                           mbuf_offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
-                           name#" $vdata, $vaddr, $srsrc, $soffset idxen offen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
-    }
-
-    let offen = 0, idxen = 0 in {
-      defm _ADDR64 : MUBUFAddr64_m <op, name#"_addr64", (outs),
-                                    (ins vdataClass:$vdata, VReg_64:$vaddr, SReg_128:$srsrc,
-                                         SCSrc_32:$soffset,
-                                         mbuf_offset:$offset, glc:$glc, slc:$slc,
-                                         tfe:$tfe),
-                                    name#" $vdata, $vaddr, $srsrc, $soffset addr64"#
-                                         "$offset"#"$glc"#"$slc"#"$tfe",
-                                    [(st store_vt:$vdata,
-                                      (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr,
-                                                   i32:$soffset, i16:$offset,
-                                                   i1:$glc, i1:$slc, i1:$tfe))]>;
-    }
-  } // End mayLoad = 0, mayStore = 1
-}
-
-class FLAT_Load_Helper <bits<7> op, string asm, RegisterClass regClass> :
-      FLAT <op, (outs regClass:$vdst),
-                (ins VReg_64:$addr, glc_flat:$glc, slc_flat:$slc, tfe_flat:$tfe),
-            asm#" $vdst, $addr"#"$glc"#"$slc"#"$tfe", []> {
-  let data = 0;
-  let mayLoad = 1;
-}
-
-class FLAT_Store_Helper <bits<7> op, string name, RegisterClass vdataClass> :
-      FLAT <op, (outs), (ins vdataClass:$data, VReg_64:$addr,
-                             glc_flat:$glc, slc_flat:$slc, tfe_flat:$tfe),
-          name#" $data, $addr"#"$glc"#"$slc"#"$tfe",
-         []> {
-
-  let mayLoad = 0;
-  let mayStore = 1;
-
-  // Encoding
-  let vdst = 0;
-}
-
-multiclass FLAT_ATOMIC <bits<7> op, string name, RegisterClass vdst_rc,
-                        RegisterClass data_rc = vdst_rc> {
-
-  let mayLoad = 1, mayStore = 1 in {
-    def "" : FLAT <op, (outs),
-                  (ins VReg_64:$addr, data_rc:$data, slc_flat_atomic:$slc,
-                       tfe_flat_atomic:$tfe),
-                   name#" $addr, $data"#"$slc"#"$tfe", []>,
-             AtomicNoRet <NAME, 0> {
-      let glc = 0;
-      let vdst = 0;
-    }
-
-    def _RTN : FLAT <op, (outs vdst_rc:$vdst),
-                     (ins VReg_64:$addr, data_rc:$data, slc_flat_atomic:$slc,
-                          tfe_flat_atomic:$tfe),
-                     name#" $vdst, $addr, $data glc"#"$slc"#"$tfe", []>,
-               AtomicNoRet <NAME, 1> {
-      let glc = 1;
-    }
-  }
-}
-
-class MIMG_Mask <string op, int channels> {
-  string Op = op;
-  int Channels = channels;
-}
-
-class MIMG_NoSampler_Helper <bits<7> op, string asm,
-                             RegisterClass dst_rc,
-                             RegisterClass src_rc> : MIMG <
-  op,
-  (outs dst_rc:$vdata),
-  (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
-       i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr,
-       SReg_256:$srsrc),
-  asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
-     #" $tfe, $lwe, $slc, $vaddr, $srsrc",
-  []> {
-  let ssamp = 0;
-  let mayLoad = 1;
-  let mayStore = 0;
-  let hasPostISelHook = 1;
-}
-
-multiclass MIMG_NoSampler_Src_Helper <bits<7> op, string asm,
-                                      RegisterClass dst_rc,
-                                      int channels> {
-  def _V1 : MIMG_NoSampler_Helper <op, asm, dst_rc, VGPR_32>,
-            MIMG_Mask<asm#"_V1", channels>;
-  def _V2 : MIMG_NoSampler_Helper <op, asm, dst_rc, VReg_64>,
-            MIMG_Mask<asm#"_V2", channels>;
-  def _V4 : MIMG_NoSampler_Helper <op, asm, dst_rc, VReg_128>,
-            MIMG_Mask<asm#"_V4", channels>;
-}
-
-multiclass MIMG_NoSampler <bits<7> op, string asm> {
-  defm _V1 : MIMG_NoSampler_Src_Helper <op, asm, VGPR_32, 1>;
-  defm _V2 : MIMG_NoSampler_Src_Helper <op, asm, VReg_64, 2>;
-  defm _V3 : MIMG_NoSampler_Src_Helper <op, asm, VReg_96, 3>;
-  defm _V4 : MIMG_NoSampler_Src_Helper <op, asm, VReg_128, 4>;
-}
-
-class MIMG_Sampler_Helper <bits<7> op, string asm,
-                           RegisterClass dst_rc,
-                           RegisterClass src_rc, int wqm> : MIMG <
-  op,
-  (outs dst_rc:$vdata),
-  (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
-       i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr,
-       SReg_256:$srsrc, SReg_128:$ssamp),
-  asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
-     #" $tfe, $lwe, $slc, $vaddr, $srsrc, $ssamp",
-  []> {
-  let mayLoad = 1;
-  let mayStore = 0;
-  let hasPostISelHook = 1;
-  let WQM = wqm;
-}
-
-multiclass MIMG_Sampler_Src_Helper <bits<7> op, string asm,
-                                    RegisterClass dst_rc,
-                                    int channels, int wqm> {
-  def _V1 : MIMG_Sampler_Helper <op, asm, dst_rc, VGPR_32, wqm>,
-            MIMG_Mask<asm#"_V1", channels>;
-  def _V2 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_64, wqm>,
-            MIMG_Mask<asm#"_V2", channels>;
-  def _V4 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_128, wqm>,
-            MIMG_Mask<asm#"_V4", channels>;
-  def _V8 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_256, wqm>,
-            MIMG_Mask<asm#"_V8", channels>;
-  def _V16 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_512, wqm>,
-            MIMG_Mask<asm#"_V16", channels>;
-}
-
-multiclass MIMG_Sampler <bits<7> op, string asm> {
-  defm _V1 : MIMG_Sampler_Src_Helper<op, asm, VGPR_32, 1, 0>;
-  defm _V2 : MIMG_Sampler_Src_Helper<op, asm, VReg_64, 2, 0>;
-  defm _V3 : MIMG_Sampler_Src_Helper<op, asm, VReg_96, 3, 0>;
-  defm _V4 : MIMG_Sampler_Src_Helper<op, asm, VReg_128, 4, 0>;
-}
-
-multiclass MIMG_Sampler_WQM <bits<7> op, string asm> {
-  defm _V1 : MIMG_Sampler_Src_Helper<op, asm, VGPR_32, 1, 1>;
-  defm _V2 : MIMG_Sampler_Src_Helper<op, asm, VReg_64, 2, 1>;
-  defm _V3 : MIMG_Sampler_Src_Helper<op, asm, VReg_96, 3, 1>;
-  defm _V4 : MIMG_Sampler_Src_Helper<op, asm, VReg_128, 4, 1>;
-}
-
-class MIMG_Gather_Helper <bits<7> op, string asm,
-                          RegisterClass dst_rc,
-                          RegisterClass src_rc, int wqm> : MIMG <
-  op,
-  (outs dst_rc:$vdata),
-  (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
-       i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr,
-       SReg_256:$srsrc, SReg_128:$ssamp),
-  asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
-     #" $tfe, $lwe, $slc, $vaddr, $srsrc, $ssamp",
-  []> {
-  let mayLoad = 1;
-  let mayStore = 0;
-
-  // DMASK was repurposed for GATHER4. 4 components are always
-  // returned and DMASK works like a swizzle - it selects
-  // the component to fetch. The only useful DMASK values are
-  // 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
-  // (red,red,red,red) etc.) The ISA document doesn't mention
-  // this.
-  // Therefore, disable all code which updates DMASK by setting these two:
-  let MIMG = 0;
-  let hasPostISelHook = 0;
-  let WQM = wqm;
-}
-
-multiclass MIMG_Gather_Src_Helper <bits<7> op, string asm,
-                                    RegisterClass dst_rc,
-                                    int channels, int wqm> {
-  def _V1 : MIMG_Gather_Helper <op, asm, dst_rc, VGPR_32, wqm>,
-            MIMG_Mask<asm#"_V1", channels>;
-  def _V2 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_64, wqm>,
-            MIMG_Mask<asm#"_V2", channels>;
-  def _V4 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_128, wqm>,
-            MIMG_Mask<asm#"_V4", channels>;
-  def _V8 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_256, wqm>,
-            MIMG_Mask<asm#"_V8", channels>;
-  def _V16 : MIMG_Gather_Helper <op, asm, dst_rc, VReg_512, wqm>,
-            MIMG_Mask<asm#"_V16", channels>;
-}
-
-multiclass MIMG_Gather <bits<7> op, string asm> {
-  defm _V1 : MIMG_Gather_Src_Helper<op, asm, VGPR_32, 1, 0>;
-  defm _V2 : MIMG_Gather_Src_Helper<op, asm, VReg_64, 2, 0>;
-  defm _V3 : MIMG_Gather_Src_Helper<op, asm, VReg_96, 3, 0>;
-  defm _V4 : MIMG_Gather_Src_Helper<op, asm, VReg_128, 4, 0>;
-}
-
-multiclass MIMG_Gather_WQM <bits<7> op, string asm> {
-  defm _V1 : MIMG_Gather_Src_Helper<op, asm, VGPR_32, 1, 1>;
-  defm _V2 : MIMG_Gather_Src_Helper<op, asm, VReg_64, 2, 1>;
-  defm _V3 : MIMG_Gather_Src_Helper<op, asm, VReg_96, 3, 1>;
-  defm _V4 : MIMG_Gather_Src_Helper<op, asm, VReg_128, 4, 1>;
-}
-
-//===----------------------------------------------------------------------===//
-// Vector instruction mappings
-//===----------------------------------------------------------------------===//
-
-// Maps an opcode in e32 form to its e64 equivalent
-def getVOPe64 : InstrMapping {
-  let FilterClass = "VOP";
-  let RowFields = ["OpName"];
-  let ColFields = ["Size"];
-  let KeyCol = ["4"];
-  let ValueCols = [["8"]];
-}
-
-// Maps an opcode in e64 form to its e32 equivalent
-def getVOPe32 : InstrMapping {
-  let FilterClass = "VOP";
-  let RowFields = ["OpName"];
-  let ColFields = ["Size"];
-  let KeyCol = ["8"];
-  let ValueCols = [["4"]];
-}
-
-def getMaskedMIMGOp : InstrMapping {
-  let FilterClass = "MIMG_Mask";
-  let RowFields = ["Op"];
-  let ColFields = ["Channels"];
-  let KeyCol = ["4"];
-  let ValueCols = [["1"], ["2"], ["3"] ];
-}
-
-// Maps an commuted opcode to its original version
-def getCommuteOrig : InstrMapping {
-  let FilterClass = "VOP2_REV";
-  let RowFields = ["RevOp"];
-  let ColFields = ["IsOrig"];
-  let KeyCol = ["0"];
-  let ValueCols = [["1"]];
-}
-
-// Maps an original opcode to its commuted version
-def getCommuteRev : InstrMapping {
-  let FilterClass = "VOP2_REV";
-  let RowFields = ["RevOp"];
-  let ColFields = ["IsOrig"];
-  let KeyCol = ["1"];
-  let ValueCols = [["0"]];
-}
-
-def getCommuteCmpOrig : InstrMapping {
-  let FilterClass = "VOP2_REV";
-  let RowFields = ["RevOp"];
-  let ColFields = ["IsOrig"];
-  let KeyCol = ["0"];
-  let ValueCols = [["1"]];
-}
-
-// Maps an original opcode to its commuted version
-def getCommuteCmpRev : InstrMapping {
-  let FilterClass = "VOP2_REV";
-  let RowFields = ["RevOp"];
-  let ColFields = ["IsOrig"];
-  let KeyCol = ["1"];
-  let ValueCols = [["0"]];
-}
-
-
-def getMCOpcodeGen : InstrMapping {
-  let FilterClass = "SIMCInstr";
-  let RowFields = ["PseudoInstr"];
-  let ColFields = ["Subtarget"];
-  let KeyCol = [!cast<string>(SISubtarget.NONE)];
-  let ValueCols = [[!cast<string>(SISubtarget.SI)],[!cast<string>(SISubtarget.VI)]];
-}
-
-def getAddr64Inst : InstrMapping {
-  let FilterClass = "MUBUFAddr64Table";
-  let RowFields = ["OpName"];
-  let ColFields = ["IsAddr64"];
-  let KeyCol = ["0"];
-  let ValueCols = [["1"]];
-}
-
-// Maps an atomic opcode to its version with a return value.
-def getAtomicRetOp : InstrMapping {
-  let FilterClass = "AtomicNoRet";
-  let RowFields = ["NoRetOp"];
-  let ColFields = ["IsRet"];
-  let KeyCol = ["0"];
-  let ValueCols = [["1"]];
-}
-
-// Maps an atomic opcode to its returnless version.
-def getAtomicNoRetOp : InstrMapping {
-  let FilterClass = "AtomicNoRet";
-  let RowFields = ["NoRetOp"];
-  let ColFields = ["IsRet"];
-  let KeyCol = ["1"];
-  let ValueCols = [["0"]];
-}
-
-include "SIInstructions.td"
-include "CIInstructions.td"
-include "VIInstructions.td"

Removed: llvm/trunk/lib/Target/R600/SIInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIInstructions.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIInstructions.td (original)
+++ llvm/trunk/lib/Target/R600/SIInstructions.td (removed)
@@ -1,3327 +0,0 @@
-//===-- SIInstructions.td - SI Instruction Defintions ---------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// This file was originally auto-generated from a GPU register header file and
-// all the instruction definitions were originally commented out.  Instructions
-// that are not yet supported remain commented out.
-//===----------------------------------------------------------------------===//
-
-class InterpSlots {
-int P0 = 2;
-int P10 = 0;
-int P20 = 1;
-}
-def INTERP : InterpSlots;
-
-def InterpSlot : Operand<i32> {
-  let PrintMethod = "printInterpSlot";
-}
-
-def SendMsgImm : Operand<i32> {
-  let PrintMethod = "printSendMsg";
-}
-
-def isGCN : Predicate<"Subtarget->getGeneration() "
-                      ">= AMDGPUSubtarget::SOUTHERN_ISLANDS">,
-            AssemblerPredicate<"FeatureGCN">;
-def isSI : Predicate<"Subtarget->getGeneration() "
-                      "== AMDGPUSubtarget::SOUTHERN_ISLANDS">;
-
-def has16BankLDS : Predicate<"Subtarget->getLDSBankCount() == 16">;
-def has32BankLDS : Predicate<"Subtarget->getLDSBankCount() == 32">;
-
-def SWaitMatchClass : AsmOperandClass {
-  let Name = "SWaitCnt";
-  let RenderMethod = "addImmOperands";
-  let ParserMethod = "parseSWaitCntOps";
-}
-
-def WAIT_FLAG : InstFlag<"printWaitFlag"> {
-  let ParserMatchClass = SWaitMatchClass;
-}
-
-let SubtargetPredicate = isGCN in {
-
-//===----------------------------------------------------------------------===//
-// EXP Instructions
-//===----------------------------------------------------------------------===//
-
-defm EXP : EXP_m;
-
-//===----------------------------------------------------------------------===//
-// SMRD Instructions
-//===----------------------------------------------------------------------===//
-
-let mayLoad = 1 in {
-
-// We are using the SGPR_32 and not the SReg_32 register class for 32-bit
-// SMRD instructions, because the SGPR_32 register class does not include M0
-// and writing to M0 from an SMRD instruction will hang the GPU.
-defm S_LOAD_DWORD : SMRD_Helper <0x00, "s_load_dword", SReg_64, SGPR_32>;
-defm S_LOAD_DWORDX2 : SMRD_Helper <0x01, "s_load_dwordx2", SReg_64, SReg_64>;
-defm S_LOAD_DWORDX4 : SMRD_Helper <0x02, "s_load_dwordx4", SReg_64, SReg_128>;
-defm S_LOAD_DWORDX8 : SMRD_Helper <0x03, "s_load_dwordx8", SReg_64, SReg_256>;
-defm S_LOAD_DWORDX16 : SMRD_Helper <0x04, "s_load_dwordx16", SReg_64, SReg_512>;
-
-defm S_BUFFER_LOAD_DWORD : SMRD_Helper <
-  0x08, "s_buffer_load_dword", SReg_128, SGPR_32
->;
-
-defm S_BUFFER_LOAD_DWORDX2 : SMRD_Helper <
-  0x09, "s_buffer_load_dwordx2", SReg_128, SReg_64
->;
-
-defm S_BUFFER_LOAD_DWORDX4 : SMRD_Helper <
-  0x0a, "s_buffer_load_dwordx4", SReg_128, SReg_128
->;
-
-defm S_BUFFER_LOAD_DWORDX8 : SMRD_Helper <
-  0x0b, "s_buffer_load_dwordx8", SReg_128, SReg_256
->;
-
-defm S_BUFFER_LOAD_DWORDX16 : SMRD_Helper <
-  0x0c, "s_buffer_load_dwordx16", SReg_128, SReg_512
->;
-
-} // mayLoad = 1
-
-//def S_MEMTIME : SMRD_ <0x0000001e, "s_memtime", []>;
-//def S_DCACHE_INV : SMRD_ <0x0000001f, "s_dcache_inv", []>;
-
-//===----------------------------------------------------------------------===//
-// SOP1 Instructions
-//===----------------------------------------------------------------------===//
-
-let isMoveImm = 1 in {
-  let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
-    defm S_MOV_B32 : SOP1_32 <sop1<0x03, 0x00>, "s_mov_b32", []>;
-    defm S_MOV_B64 : SOP1_64 <sop1<0x04, 0x01>, "s_mov_b64", []>;
-  } // let isRematerializeable = 1
-
-  let Uses = [SCC] in {
-    defm S_CMOV_B32 : SOP1_32 <sop1<0x05, 0x02>, "s_cmov_b32", []>;
-    defm S_CMOV_B64 : SOP1_64 <sop1<0x06, 0x03>, "s_cmov_b64", []>;
-  } // End Uses = [SCC]
-} // End isMoveImm = 1
-
-let Defs = [SCC] in {
-  defm S_NOT_B32 : SOP1_32 <sop1<0x07, 0x04>, "s_not_b32",
-    [(set i32:$dst, (not i32:$src0))]
-  >;
-
-  defm S_NOT_B64 : SOP1_64 <sop1<0x08, 0x05>, "s_not_b64",
-    [(set i64:$dst, (not i64:$src0))]
-  >;
-  defm S_WQM_B32 : SOP1_32 <sop1<0x09, 0x06>, "s_wqm_b32", []>;
-  defm S_WQM_B64 : SOP1_64 <sop1<0x0a, 0x07>, "s_wqm_b64", []>;
-} // End Defs = [SCC]
-
-
-defm S_BREV_B32 : SOP1_32 <sop1<0x0b, 0x08>, "s_brev_b32",
-  [(set i32:$dst, (AMDGPUbrev i32:$src0))]
->;
-defm S_BREV_B64 : SOP1_64 <sop1<0x0c, 0x09>, "s_brev_b64", []>;
-
-let Defs = [SCC] in {
-  defm S_BCNT0_I32_B32 : SOP1_32 <sop1<0x0d, 0x0a>, "s_bcnt0_i32_b32", []>;
-  defm S_BCNT0_I32_B64 : SOP1_32_64 <sop1<0x0e, 0x0b>, "s_bcnt0_i32_b64", []>;
-  defm S_BCNT1_I32_B32 : SOP1_32 <sop1<0x0f, 0x0c>, "s_bcnt1_i32_b32",
-    [(set i32:$dst, (ctpop i32:$src0))]
-  >;
-  defm S_BCNT1_I32_B64 : SOP1_32_64 <sop1<0x10, 0x0d>, "s_bcnt1_i32_b64", []>;
-} // End Defs = [SCC]
-
-defm S_FF0_I32_B32 : SOP1_32 <sop1<0x11, 0x0e>, "s_ff0_i32_b32", []>;
-defm S_FF0_I32_B64 : SOP1_32_64 <sop1<0x12, 0x0f>, "s_ff0_i32_b64", []>;
-defm S_FF1_I32_B32 : SOP1_32 <sop1<0x13, 0x10>, "s_ff1_i32_b32",
-  [(set i32:$dst, (cttz_zero_undef i32:$src0))]
->;
-defm S_FF1_I32_B64 : SOP1_32_64 <sop1<0x14, 0x11>, "s_ff1_i32_b64", []>;
-
-defm S_FLBIT_I32_B32 : SOP1_32 <sop1<0x15, 0x12>, "s_flbit_i32_b32",
-  [(set i32:$dst, (ctlz_zero_undef i32:$src0))]
->;
-
-defm S_FLBIT_I32_B64 : SOP1_32_64 <sop1<0x16, 0x13>, "s_flbit_i32_b64", []>;
-defm S_FLBIT_I32 : SOP1_32 <sop1<0x17, 0x14>, "s_flbit_i32",
-  [(set i32:$dst, (int_AMDGPU_flbit_i32 i32:$src0))]
->;
-defm S_FLBIT_I32_I64 : SOP1_32_64 <sop1<0x18, 0x15>, "s_flbit_i32_i64", []>;
-defm S_SEXT_I32_I8 : SOP1_32 <sop1<0x19, 0x16>, "s_sext_i32_i8",
-  [(set i32:$dst, (sext_inreg i32:$src0, i8))]
->;
-defm S_SEXT_I32_I16 : SOP1_32 <sop1<0x1a, 0x17>, "s_sext_i32_i16",
-  [(set i32:$dst, (sext_inreg i32:$src0, i16))]
->;
-
-defm S_BITSET0_B32 : SOP1_32 <sop1<0x1b, 0x18>, "s_bitset0_b32", []>;
-defm S_BITSET0_B64 : SOP1_64 <sop1<0x1c, 0x19>, "s_bitset0_b64", []>;
-defm S_BITSET1_B32 : SOP1_32 <sop1<0x1d, 0x1a>, "s_bitset1_b32", []>;
-defm S_BITSET1_B64 : SOP1_64 <sop1<0x1e, 0x1b>, "s_bitset1_b64", []>;
-defm S_GETPC_B64 : SOP1_64_0 <sop1<0x1f, 0x1c>, "s_getpc_b64", []>;
-defm S_SETPC_B64 : SOP1_64 <sop1<0x20, 0x1d>, "s_setpc_b64", []>;
-defm S_SWAPPC_B64 : SOP1_64 <sop1<0x21, 0x1e>, "s_swappc_b64", []>;
-defm S_RFE_B64 : SOP1_64 <sop1<0x22, 0x1f>, "s_rfe_b64", []>;
-
-let hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC] in {
-
-defm S_AND_SAVEEXEC_B64 : SOP1_64 <sop1<0x24, 0x20>, "s_and_saveexec_b64", []>;
-defm S_OR_SAVEEXEC_B64 : SOP1_64 <sop1<0x25, 0x21>, "s_or_saveexec_b64", []>;
-defm S_XOR_SAVEEXEC_B64 : SOP1_64 <sop1<0x26, 0x22>, "s_xor_saveexec_b64", []>;
-defm S_ANDN2_SAVEEXEC_B64 : SOP1_64 <sop1<0x27, 0x23>, "s_andn2_saveexec_b64", []>;
-defm S_ORN2_SAVEEXEC_B64 : SOP1_64 <sop1<0x28, 0x24>, "s_orn2_saveexec_b64", []>;
-defm S_NAND_SAVEEXEC_B64 : SOP1_64 <sop1<0x29, 0x25>, "s_nand_saveexec_b64", []>;
-defm S_NOR_SAVEEXEC_B64 : SOP1_64 <sop1<0x2a, 0x26>, "s_nor_saveexec_b64", []>;
-defm S_XNOR_SAVEEXEC_B64 : SOP1_64 <sop1<0x2b, 0x27>, "s_xnor_saveexec_b64", []>;
-
-} // End hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC]
-
-defm S_QUADMASK_B32 : SOP1_32 <sop1<0x2c, 0x28>, "s_quadmask_b32", []>;
-defm S_QUADMASK_B64 : SOP1_64 <sop1<0x2d, 0x29>, "s_quadmask_b64", []>;
-defm S_MOVRELS_B32 : SOP1_32 <sop1<0x2e, 0x2a>, "s_movrels_b32", []>;
-defm S_MOVRELS_B64 : SOP1_64 <sop1<0x2f, 0x2b>, "s_movrels_b64", []>;
-defm S_MOVRELD_B32 : SOP1_32 <sop1<0x30, 0x2c>, "s_movreld_b32", []>;
-defm S_MOVRELD_B64 : SOP1_64 <sop1<0x31, 0x2d>, "s_movreld_b64", []>;
-defm S_CBRANCH_JOIN : SOP1_1 <sop1<0x32, 0x2e>, "s_cbranch_join", []>;
-defm S_MOV_REGRD_B32 : SOP1_32 <sop1<0x33, 0x2f>, "s_mov_regrd_b32", []>;
-let Defs = [SCC] in {
-  defm S_ABS_I32 : SOP1_32 <sop1<0x34, 0x30>, "s_abs_i32", []>;
-} // End Defs = [SCC]
-defm S_MOV_FED_B32 : SOP1_32 <sop1<0x35, 0x31>, "s_mov_fed_b32", []>;
-
-//===----------------------------------------------------------------------===//
-// SOP2 Instructions
-//===----------------------------------------------------------------------===//
-
-let Defs = [SCC] in { // Carry out goes to SCC
-let isCommutable = 1 in {
-defm S_ADD_U32 : SOP2_32 <sop2<0x00>, "s_add_u32", []>;
-defm S_ADD_I32 : SOP2_32 <sop2<0x02>, "s_add_i32",
-  [(set i32:$dst, (add SSrc_32:$src0, SSrc_32:$src1))]
->;
-} // End isCommutable = 1
-
-defm S_SUB_U32 : SOP2_32 <sop2<0x01>, "s_sub_u32", []>;
-defm S_SUB_I32 : SOP2_32 <sop2<0x03>, "s_sub_i32",
-  [(set i32:$dst, (sub SSrc_32:$src0, SSrc_32:$src1))]
->;
-
-let Uses = [SCC] in { // Carry in comes from SCC
-let isCommutable = 1 in {
-defm S_ADDC_U32 : SOP2_32 <sop2<0x04>, "s_addc_u32",
-  [(set i32:$dst, (adde (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
-} // End isCommutable = 1
-
-defm S_SUBB_U32 : SOP2_32 <sop2<0x05>, "s_subb_u32",
-  [(set i32:$dst, (sube (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
-} // End Uses = [SCC]
-
-defm S_MIN_I32 : SOP2_32 <sop2<0x06>, "s_min_i32",
-  [(set i32:$dst, (smin i32:$src0, i32:$src1))]
->;
-defm S_MIN_U32 : SOP2_32 <sop2<0x07>, "s_min_u32",
-  [(set i32:$dst, (umin i32:$src0, i32:$src1))]
->;
-defm S_MAX_I32 : SOP2_32 <sop2<0x08>, "s_max_i32",
-  [(set i32:$dst, (smax i32:$src0, i32:$src1))]
->;
-defm S_MAX_U32 : SOP2_32 <sop2<0x09>, "s_max_u32",
-  [(set i32:$dst, (umax i32:$src0, i32:$src1))]
->;
-} // End Defs = [SCC]
-
-
-let Uses = [SCC] in {
-  defm S_CSELECT_B32 : SOP2_32 <sop2<0x0a>, "s_cselect_b32", []>;
-  defm S_CSELECT_B64 : SOP2_64 <sop2<0x0b>, "s_cselect_b64", []>;
-} // End Uses = [SCC]
-
-let Defs = [SCC] in {
-defm S_AND_B32 : SOP2_32 <sop2<0x0e, 0x0c>, "s_and_b32",
-  [(set i32:$dst, (and i32:$src0, i32:$src1))]
->;
-
-defm S_AND_B64 : SOP2_64 <sop2<0x0f, 0x0d>, "s_and_b64",
-  [(set i64:$dst, (and i64:$src0, i64:$src1))]
->;
-
-defm S_OR_B32 : SOP2_32 <sop2<0x10, 0x0e>, "s_or_b32",
-  [(set i32:$dst, (or i32:$src0, i32:$src1))]
->;
-
-defm S_OR_B64 : SOP2_64 <sop2<0x11, 0x0f>, "s_or_b64",
-  [(set i64:$dst, (or i64:$src0, i64:$src1))]
->;
-
-defm S_XOR_B32 : SOP2_32 <sop2<0x12, 0x10>, "s_xor_b32",
-  [(set i32:$dst, (xor i32:$src0, i32:$src1))]
->;
-
-defm S_XOR_B64 : SOP2_64 <sop2<0x13, 0x11>, "s_xor_b64",
-  [(set i64:$dst, (xor i64:$src0, i64:$src1))]
->;
-defm S_ANDN2_B32 : SOP2_32 <sop2<0x14, 0x12>, "s_andn2_b32", []>;
-defm S_ANDN2_B64 : SOP2_64 <sop2<0x15, 0x13>, "s_andn2_b64", []>;
-defm S_ORN2_B32 : SOP2_32 <sop2<0x16, 0x14>, "s_orn2_b32", []>;
-defm S_ORN2_B64 : SOP2_64 <sop2<0x17, 0x15>, "s_orn2_b64", []>;
-defm S_NAND_B32 : SOP2_32 <sop2<0x18, 0x16>, "s_nand_b32", []>;
-defm S_NAND_B64 : SOP2_64 <sop2<0x19, 0x17>, "s_nand_b64", []>;
-defm S_NOR_B32 : SOP2_32 <sop2<0x1a, 0x18>, "s_nor_b32", []>;
-defm S_NOR_B64 : SOP2_64 <sop2<0x1b, 0x19>, "s_nor_b64", []>;
-defm S_XNOR_B32 : SOP2_32 <sop2<0x1c, 0x1a>, "s_xnor_b32", []>;
-defm S_XNOR_B64 : SOP2_64 <sop2<0x1d, 0x1b>, "s_xnor_b64", []>;
-} // End Defs = [SCC]
-
-// Use added complexity so these patterns are preferred to the VALU patterns.
-let AddedComplexity = 1 in {
-let Defs = [SCC] in {
-
-defm S_LSHL_B32 : SOP2_32 <sop2<0x1e, 0x1c>, "s_lshl_b32",
-  [(set i32:$dst, (shl i32:$src0, i32:$src1))]
->;
-defm S_LSHL_B64 : SOP2_64_32 <sop2<0x1f, 0x1d>, "s_lshl_b64",
-  [(set i64:$dst, (shl i64:$src0, i32:$src1))]
->;
-defm S_LSHR_B32 : SOP2_32 <sop2<0x20, 0x1e>, "s_lshr_b32",
-  [(set i32:$dst, (srl i32:$src0, i32:$src1))]
->;
-defm S_LSHR_B64 : SOP2_64_32 <sop2<0x21, 0x1f>, "s_lshr_b64",
-  [(set i64:$dst, (srl i64:$src0, i32:$src1))]
->;
-defm S_ASHR_I32 : SOP2_32 <sop2<0x22, 0x20>, "s_ashr_i32",
-  [(set i32:$dst, (sra i32:$src0, i32:$src1))]
->;
-defm S_ASHR_I64 : SOP2_64_32 <sop2<0x23, 0x21>, "s_ashr_i64",
-  [(set i64:$dst, (sra i64:$src0, i32:$src1))]
->;
-} // End Defs = [SCC]
-
-defm S_BFM_B32 : SOP2_32 <sop2<0x24, 0x22>, "s_bfm_b32",
-  [(set i32:$dst, (AMDGPUbfm i32:$src0, i32:$src1))]>;
-defm S_BFM_B64 : SOP2_64 <sop2<0x25, 0x23>, "s_bfm_b64", []>;
-defm S_MUL_I32 : SOP2_32 <sop2<0x26, 0x24>, "s_mul_i32",
-  [(set i32:$dst, (mul i32:$src0, i32:$src1))]
->;
-
-} // End AddedComplexity = 1
-
-let Defs = [SCC] in {
-defm S_BFE_U32 : SOP2_32 <sop2<0x27, 0x25>, "s_bfe_u32", []>;
-defm S_BFE_I32 : SOP2_32 <sop2<0x28, 0x26>, "s_bfe_i32", []>;
-defm S_BFE_U64 : SOP2_64 <sop2<0x29, 0x27>, "s_bfe_u64", []>;
-defm S_BFE_I64 : SOP2_64_32 <sop2<0x2a, 0x28>, "s_bfe_i64", []>;
-} // End Defs = [SCC]
-
-let sdst = 0 in {
-defm S_CBRANCH_G_FORK : SOP2_m <
-  sop2<0x2b, 0x29>, "s_cbranch_g_fork", (outs),
-  (ins SReg_64:$src0, SReg_64:$src1), "s_cbranch_g_fork $src0, $src1", []
->;
-}
-
-let Defs = [SCC] in {
-defm S_ABSDIFF_I32 : SOP2_32 <sop2<0x2c, 0x2a>, "s_absdiff_i32", []>;
-} // End Defs = [SCC]
-
-//===----------------------------------------------------------------------===//
-// SOPC Instructions
-//===----------------------------------------------------------------------===//
-
-def S_CMP_EQ_I32 : SOPC_32 <0x00000000, "s_cmp_eq_i32">;
-def S_CMP_LG_I32 : SOPC_32 <0x00000001, "s_cmp_lg_i32">;
-def S_CMP_GT_I32 : SOPC_32 <0x00000002, "s_cmp_gt_i32">;
-def S_CMP_GE_I32 : SOPC_32 <0x00000003, "s_cmp_ge_i32">;
-def S_CMP_LT_I32 : SOPC_32 <0x00000004, "s_cmp_lt_i32">;
-def S_CMP_LE_I32 : SOPC_32 <0x00000005, "s_cmp_le_i32">;
-def S_CMP_EQ_U32 : SOPC_32 <0x00000006, "s_cmp_eq_u32">;
-def S_CMP_LG_U32 : SOPC_32 <0x00000007, "s_cmp_lg_u32">;
-def S_CMP_GT_U32 : SOPC_32 <0x00000008, "s_cmp_gt_u32">;
-def S_CMP_GE_U32 : SOPC_32 <0x00000009, "s_cmp_ge_u32">;
-def S_CMP_LT_U32 : SOPC_32 <0x0000000a, "s_cmp_lt_u32">;
-def S_CMP_LE_U32 : SOPC_32 <0x0000000b, "s_cmp_le_u32">;
-////def S_BITCMP0_B32 : SOPC_BITCMP0 <0x0000000c, "s_bitcmp0_b32", []>;
-////def S_BITCMP1_B32 : SOPC_BITCMP1 <0x0000000d, "s_bitcmp1_b32", []>;
-////def S_BITCMP0_B64 : SOPC_BITCMP0 <0x0000000e, "s_bitcmp0_b64", []>;
-////def S_BITCMP1_B64 : SOPC_BITCMP1 <0x0000000f, "s_bitcmp1_b64", []>;
-//def S_SETVSKIP : SOPC_ <0x00000010, "s_setvskip", []>;
-
-//===----------------------------------------------------------------------===//
-// SOPK Instructions
-//===----------------------------------------------------------------------===//
-
-let isReMaterializable = 1 in {
-defm S_MOVK_I32 : SOPK_32 <sopk<0x00>, "s_movk_i32", []>;
-} // End isReMaterializable = 1
-let Uses = [SCC] in {
-  defm S_CMOVK_I32 : SOPK_32 <sopk<0x02, 0x01>, "s_cmovk_i32", []>;
-}
-
-let isCompare = 1 in {
-
-/*
-This instruction is disabled for now until we can figure out how to teach
-the instruction selector to correctly use the  S_CMP* vs V_CMP*
-instructions.
-
-When this instruction is enabled the code generator sometimes produces this
-invalid sequence:
-
-SCC = S_CMPK_EQ_I32 SGPR0, imm
-VCC = COPY SCC
-VGPR0 = V_CNDMASK VCC, VGPR0, VGPR1
-
-defm S_CMPK_EQ_I32 : SOPK_SCC <sopk<0x03, 0x02>, "s_cmpk_eq_i32",
-  [(set i1:$dst, (setcc i32:$src0, imm:$src1, SETEQ))]
->;
-*/
-
-defm S_CMPK_EQ_I32 : SOPK_SCC <sopk<0x03, 0x02>, "s_cmpk_eq_i32", []>;
-defm S_CMPK_LG_I32 : SOPK_SCC <sopk<0x04, 0x03>, "s_cmpk_lg_i32", []>;
-defm S_CMPK_GT_I32 : SOPK_SCC <sopk<0x05, 0x04>, "s_cmpk_gt_i32", []>;
-defm S_CMPK_GE_I32 : SOPK_SCC <sopk<0x06, 0x05>, "s_cmpk_ge_i32", []>;
-defm S_CMPK_LT_I32 : SOPK_SCC <sopk<0x07, 0x06>, "s_cmpk_lt_i32", []>;
-defm S_CMPK_LE_I32 : SOPK_SCC <sopk<0x08, 0x07>, "s_cmpk_le_i32", []>;
-defm S_CMPK_EQ_U32 : SOPK_SCC <sopk<0x09, 0x08>, "s_cmpk_eq_u32", []>;
-defm S_CMPK_LG_U32 : SOPK_SCC <sopk<0x0a, 0x09>, "s_cmpk_lg_u32", []>;
-defm S_CMPK_GT_U32 : SOPK_SCC <sopk<0x0b, 0x0a>, "s_cmpk_gt_u32", []>;
-defm S_CMPK_GE_U32 : SOPK_SCC <sopk<0x0c, 0x0b>, "s_cmpk_ge_u32", []>;
-defm S_CMPK_LT_U32 : SOPK_SCC <sopk<0x0d, 0x0c>, "s_cmpk_lt_u32", []>;
-defm S_CMPK_LE_U32 : SOPK_SCC <sopk<0x0e, 0x0d>, "s_cmpk_le_u32", []>;
-} // End isCompare = 1
-
-let Defs = [SCC], isCommutable = 1, DisableEncoding = "$src0",
-    Constraints = "$sdst = $src0" in {
-  defm S_ADDK_I32 : SOPK_32TIE <sopk<0x0f, 0x0e>, "s_addk_i32", []>;
-  defm S_MULK_I32 : SOPK_32TIE <sopk<0x10, 0x0f>, "s_mulk_i32", []>;
-}
-
-defm S_CBRANCH_I_FORK : SOPK_m <
-  sopk<0x11, 0x10>, "s_cbranch_i_fork", (outs),
-  (ins SReg_64:$sdst, u16imm:$simm16), " $sdst, $simm16"
->;
-defm S_GETREG_B32 : SOPK_32 <sopk<0x12, 0x11>, "s_getreg_b32", []>;
-defm S_SETREG_B32 : SOPK_m <
-  sopk<0x13, 0x12>, "s_setreg_b32", (outs),
-  (ins SReg_32:$sdst, u16imm:$simm16), " $sdst, $simm16"
->;
-// FIXME: Not on SI?
-//defm S_GETREG_REGRD_B32 : SOPK_32 <sopk<0x14, 0x13>, "s_getreg_regrd_b32", []>;
-defm S_SETREG_IMM32_B32 : SOPK_IMM32 <
-  sopk<0x15, 0x14>, "s_setreg_imm32_b32", (outs),
-  (ins i32imm:$imm, u16imm:$simm16), " $imm, $simm16"
->;
-
-//===----------------------------------------------------------------------===//
-// SOPP Instructions
-//===----------------------------------------------------------------------===//
-
-def S_NOP : SOPP <0x00000000, (ins i16imm:$simm16), "s_nop $simm16">;
-
-let isTerminator = 1 in {
-
-def S_ENDPGM : SOPP <0x00000001, (ins), "s_endpgm",
-  [(IL_retflag)]> {
-  let simm16 = 0;
-  let isBarrier = 1;
-  let hasCtrlDep = 1;
-}
-
-let isBranch = 1 in {
-def S_BRANCH : SOPP <
-  0x00000002, (ins sopp_brtarget:$simm16), "s_branch $simm16",
-  [(br bb:$simm16)]> {
-  let isBarrier = 1;
-}
-
-let DisableEncoding = "$scc" in {
-def S_CBRANCH_SCC0 : SOPP <
-  0x00000004, (ins sopp_brtarget:$simm16, SCCReg:$scc),
-  "s_cbranch_scc0 $simm16"
->;
-def S_CBRANCH_SCC1 : SOPP <
-  0x00000005, (ins sopp_brtarget:$simm16, SCCReg:$scc),
-  "s_cbranch_scc1 $simm16"
->;
-} // End DisableEncoding = "$scc"
-
-def S_CBRANCH_VCCZ : SOPP <
-  0x00000006, (ins sopp_brtarget:$simm16, VCCReg:$vcc),
-  "s_cbranch_vccz $simm16"
->;
-def S_CBRANCH_VCCNZ : SOPP <
-  0x00000007, (ins sopp_brtarget:$simm16, VCCReg:$vcc),
-  "s_cbranch_vccnz $simm16"
->;
-
-let DisableEncoding = "$exec" in {
-def S_CBRANCH_EXECZ : SOPP <
-  0x00000008, (ins sopp_brtarget:$simm16, EXECReg:$exec),
-  "s_cbranch_execz $simm16"
->;
-def S_CBRANCH_EXECNZ : SOPP <
-  0x00000009, (ins sopp_brtarget:$simm16, EXECReg:$exec),
-  "s_cbranch_execnz $simm16"
->;
-} // End DisableEncoding = "$exec"
-
-
-} // End isBranch = 1
-} // End isTerminator = 1
-
-let hasSideEffects = 1 in {
-def S_BARRIER : SOPP <0x0000000a, (ins), "s_barrier",
-  [(int_AMDGPU_barrier_local)]
-> {
-  let simm16 = 0;
-  let isBarrier = 1;
-  let hasCtrlDep = 1;
-  let mayLoad = 1;
-  let mayStore = 1;
-}
-
-def S_WAITCNT : SOPP <0x0000000c, (ins WAIT_FLAG:$simm16), "s_waitcnt $simm16">;
-def S_SETHALT : SOPP <0x0000000d, (ins i16imm:$simm16), "s_sethalt $simm16">;
-def S_SLEEP : SOPP <0x0000000e, (ins i16imm:$simm16), "s_sleep $simm16">;
-def S_SETPRIO : SOPP <0x0000000f, (ins i16imm:$sim16), "s_setprio $sim16">;
-
-let Uses = [EXEC, M0] in {
-  def S_SENDMSG : SOPP <0x00000010, (ins SendMsgImm:$simm16), "s_sendmsg $simm16",
-      [(AMDGPUsendmsg (i32 imm:$simm16))]
-  >;
-} // End Uses = [EXEC, M0]
-
-def S_SENDMSGHALT : SOPP <0x00000011, (ins i16imm:$simm16), "s_sendmsghalt $simm16">;
-def S_TRAP : SOPP <0x00000012, (ins i16imm:$simm16), "s_trap $simm16">;
-def S_ICACHE_INV : SOPP <0x00000013, (ins), "s_icache_inv"> {
-	let simm16 = 0;
-}
-def S_INCPERFLEVEL : SOPP <0x00000014, (ins i16imm:$simm16), "s_incperflevel $simm16">;
-def S_DECPERFLEVEL : SOPP <0x00000015, (ins i16imm:$simm16), "s_decperflevel $simm16">;
-def S_TTRACEDATA : SOPP <0x00000016, (ins), "s_ttracedata"> {
-  let simm16 = 0;
-}
-} // End hasSideEffects
-
-//===----------------------------------------------------------------------===//
-// VOPC Instructions
-//===----------------------------------------------------------------------===//
-
-let isCompare = 1, isCommutable = 1 in {
-
-defm V_CMP_F_F32 : VOPC_F32 <vopc<0x0, 0x40>, "v_cmp_f_f32">;
-defm V_CMP_LT_F32 : VOPC_F32 <vopc<0x1, 0x41>, "v_cmp_lt_f32", COND_OLT, "v_cmp_gt_f32">;
-defm V_CMP_EQ_F32 : VOPC_F32 <vopc<0x2, 0x42>, "v_cmp_eq_f32", COND_OEQ>;
-defm V_CMP_LE_F32 : VOPC_F32 <vopc<0x3, 0x43>, "v_cmp_le_f32", COND_OLE, "v_cmp_ge_f32">;
-defm V_CMP_GT_F32 : VOPC_F32 <vopc<0x4, 0x44>, "v_cmp_gt_f32", COND_OGT>;
-defm V_CMP_LG_F32 : VOPC_F32 <vopc<0x5, 0x45>, "v_cmp_lg_f32", COND_ONE>;
-defm V_CMP_GE_F32 : VOPC_F32 <vopc<0x6, 0x46>, "v_cmp_ge_f32", COND_OGE>;
-defm V_CMP_O_F32 : VOPC_F32 <vopc<0x7, 0x47>, "v_cmp_o_f32", COND_O>;
-defm V_CMP_U_F32 : VOPC_F32 <vopc<0x8, 0x48>, "v_cmp_u_f32", COND_UO>;
-defm V_CMP_NGE_F32 : VOPC_F32 <vopc<0x9, 0x49>, "v_cmp_nge_f32",  COND_ULT, "v_cmp_nle_f32">;
-defm V_CMP_NLG_F32 : VOPC_F32 <vopc<0xa, 0x4a>, "v_cmp_nlg_f32", COND_UEQ>;
-defm V_CMP_NGT_F32 : VOPC_F32 <vopc<0xb, 0x4b>, "v_cmp_ngt_f32", COND_ULE, "v_cmp_nlt_f32">;
-defm V_CMP_NLE_F32 : VOPC_F32 <vopc<0xc, 0x4c>, "v_cmp_nle_f32", COND_UGT>;
-defm V_CMP_NEQ_F32 : VOPC_F32 <vopc<0xd, 0x4d>, "v_cmp_neq_f32", COND_UNE>;
-defm V_CMP_NLT_F32 : VOPC_F32 <vopc<0xe, 0x4e>, "v_cmp_nlt_f32", COND_UGE>;
-defm V_CMP_TRU_F32 : VOPC_F32 <vopc<0xf, 0x4f>, "v_cmp_tru_f32">;
-
-
-defm V_CMPX_F_F32 : VOPCX_F32 <vopc<0x10, 0x50>, "v_cmpx_f_f32">;
-defm V_CMPX_LT_F32 : VOPCX_F32 <vopc<0x11, 0x51>, "v_cmpx_lt_f32", "v_cmpx_gt_f32">;
-defm V_CMPX_EQ_F32 : VOPCX_F32 <vopc<0x12, 0x52>, "v_cmpx_eq_f32">;
-defm V_CMPX_LE_F32 : VOPCX_F32 <vopc<0x13, 0x53>, "v_cmpx_le_f32", "v_cmpx_ge_f32">;
-defm V_CMPX_GT_F32 : VOPCX_F32 <vopc<0x14, 0x54>, "v_cmpx_gt_f32">;
-defm V_CMPX_LG_F32 : VOPCX_F32 <vopc<0x15, 0x55>, "v_cmpx_lg_f32">;
-defm V_CMPX_GE_F32 : VOPCX_F32 <vopc<0x16, 0x56>, "v_cmpx_ge_f32">;
-defm V_CMPX_O_F32 : VOPCX_F32 <vopc<0x17, 0x57>, "v_cmpx_o_f32">;
-defm V_CMPX_U_F32 : VOPCX_F32 <vopc<0x18, 0x58>, "v_cmpx_u_f32">;
-defm V_CMPX_NGE_F32 : VOPCX_F32 <vopc<0x19, 0x59>, "v_cmpx_nge_f32">;
-defm V_CMPX_NLG_F32 : VOPCX_F32 <vopc<0x1a, 0x5a>, "v_cmpx_nlg_f32">;
-defm V_CMPX_NGT_F32 : VOPCX_F32 <vopc<0x1b, 0x5b>, "v_cmpx_ngt_f32">;
-defm V_CMPX_NLE_F32 : VOPCX_F32 <vopc<0x1c, 0x5c>, "v_cmpx_nle_f32">;
-defm V_CMPX_NEQ_F32 : VOPCX_F32 <vopc<0x1d, 0x5d>, "v_cmpx_neq_f32">;
-defm V_CMPX_NLT_F32 : VOPCX_F32 <vopc<0x1e, 0x5e>, "v_cmpx_nlt_f32">;
-defm V_CMPX_TRU_F32 : VOPCX_F32 <vopc<0x1f, 0x5f>, "v_cmpx_tru_f32">;
-
-
-defm V_CMP_F_F64 : VOPC_F64 <vopc<0x20, 0x60>, "v_cmp_f_f64">;
-defm V_CMP_LT_F64 : VOPC_F64 <vopc<0x21, 0x61>, "v_cmp_lt_f64", COND_OLT, "v_cmp_gt_f64">;
-defm V_CMP_EQ_F64 : VOPC_F64 <vopc<0x22, 0x62>, "v_cmp_eq_f64", COND_OEQ>;
-defm V_CMP_LE_F64 : VOPC_F64 <vopc<0x23, 0x63>, "v_cmp_le_f64", COND_OLE, "v_cmp_ge_f64">;
-defm V_CMP_GT_F64 : VOPC_F64 <vopc<0x24, 0x64>, "v_cmp_gt_f64", COND_OGT>;
-defm V_CMP_LG_F64 : VOPC_F64 <vopc<0x25, 0x65>, "v_cmp_lg_f64", COND_ONE>;
-defm V_CMP_GE_F64 : VOPC_F64 <vopc<0x26, 0x66>, "v_cmp_ge_f64", COND_OGE>;
-defm V_CMP_O_F64 : VOPC_F64 <vopc<0x27, 0x67>, "v_cmp_o_f64", COND_O>;
-defm V_CMP_U_F64 : VOPC_F64 <vopc<0x28, 0x68>, "v_cmp_u_f64", COND_UO>;
-defm V_CMP_NGE_F64 : VOPC_F64 <vopc<0x29, 0x69>, "v_cmp_nge_f64", COND_ULT, "v_cmp_nle_f64">;
-defm V_CMP_NLG_F64 : VOPC_F64 <vopc<0x2a, 0x6a>, "v_cmp_nlg_f64", COND_UEQ>;
-defm V_CMP_NGT_F64 : VOPC_F64 <vopc<0x2b, 0x6b>, "v_cmp_ngt_f64", COND_ULE, "v_cmp_nlt_f64">;
-defm V_CMP_NLE_F64 : VOPC_F64 <vopc<0x2c, 0x6c>, "v_cmp_nle_f64", COND_UGT>;
-defm V_CMP_NEQ_F64 : VOPC_F64 <vopc<0x2d, 0x6d>, "v_cmp_neq_f64", COND_UNE>;
-defm V_CMP_NLT_F64 : VOPC_F64 <vopc<0x2e, 0x6e>, "v_cmp_nlt_f64", COND_UGE>;
-defm V_CMP_TRU_F64 : VOPC_F64 <vopc<0x2f, 0x6f>, "v_cmp_tru_f64">;
-
-
-defm V_CMPX_F_F64 : VOPCX_F64 <vopc<0x30, 0x70>, "v_cmpx_f_f64">;
-defm V_CMPX_LT_F64 : VOPCX_F64 <vopc<0x31, 0x71>, "v_cmpx_lt_f64", "v_cmpx_gt_f64">;
-defm V_CMPX_EQ_F64 : VOPCX_F64 <vopc<0x32, 0x72>, "v_cmpx_eq_f64">;
-defm V_CMPX_LE_F64 : VOPCX_F64 <vopc<0x33, 0x73>, "v_cmpx_le_f64", "v_cmpx_ge_f64">;
-defm V_CMPX_GT_F64 : VOPCX_F64 <vopc<0x34, 0x74>, "v_cmpx_gt_f64">;
-defm V_CMPX_LG_F64 : VOPCX_F64 <vopc<0x35, 0x75>, "v_cmpx_lg_f64">;
-defm V_CMPX_GE_F64 : VOPCX_F64 <vopc<0x36, 0x76>, "v_cmpx_ge_f64">;
-defm V_CMPX_O_F64 : VOPCX_F64 <vopc<0x37, 0x77>, "v_cmpx_o_f64">;
-defm V_CMPX_U_F64 : VOPCX_F64 <vopc<0x38, 0x78>, "v_cmpx_u_f64">;
-defm V_CMPX_NGE_F64 : VOPCX_F64 <vopc<0x39, 0x79>, "v_cmpx_nge_f64", "v_cmpx_nle_f64">;
-defm V_CMPX_NLG_F64 : VOPCX_F64 <vopc<0x3a, 0x7a>, "v_cmpx_nlg_f64">;
-defm V_CMPX_NGT_F64 : VOPCX_F64 <vopc<0x3b, 0x7b>, "v_cmpx_ngt_f64", "v_cmpx_nlt_f64">;
-defm V_CMPX_NLE_F64 : VOPCX_F64 <vopc<0x3c, 0x7c>, "v_cmpx_nle_f64">;
-defm V_CMPX_NEQ_F64 : VOPCX_F64 <vopc<0x3d, 0x7d>, "v_cmpx_neq_f64">;
-defm V_CMPX_NLT_F64 : VOPCX_F64 <vopc<0x3e, 0x7e>, "v_cmpx_nlt_f64">;
-defm V_CMPX_TRU_F64 : VOPCX_F64 <vopc<0x3f, 0x7f>, "v_cmpx_tru_f64">;
-
-
-let SubtargetPredicate = isSICI in {
-
-defm V_CMPS_F_F32 : VOPC_F32 <vopc<0x40>, "v_cmps_f_f32">;
-defm V_CMPS_LT_F32 : VOPC_F32 <vopc<0x41>, "v_cmps_lt_f32", COND_NULL, "v_cmps_gt_f32">;
-defm V_CMPS_EQ_F32 : VOPC_F32 <vopc<0x42>, "v_cmps_eq_f32">;
-defm V_CMPS_LE_F32 : VOPC_F32 <vopc<0x43>, "v_cmps_le_f32", COND_NULL, "v_cmps_ge_f32">;
-defm V_CMPS_GT_F32 : VOPC_F32 <vopc<0x44>, "v_cmps_gt_f32">;
-defm V_CMPS_LG_F32 : VOPC_F32 <vopc<0x45>, "v_cmps_lg_f32">;
-defm V_CMPS_GE_F32 : VOPC_F32 <vopc<0x46>, "v_cmps_ge_f32">;
-defm V_CMPS_O_F32 : VOPC_F32 <vopc<0x47>, "v_cmps_o_f32">;
-defm V_CMPS_U_F32 : VOPC_F32 <vopc<0x48>, "v_cmps_u_f32">;
-defm V_CMPS_NGE_F32 : VOPC_F32 <vopc<0x49>, "v_cmps_nge_f32", COND_NULL, "v_cmps_nle_f32">;
-defm V_CMPS_NLG_F32 : VOPC_F32 <vopc<0x4a>, "v_cmps_nlg_f32">;
-defm V_CMPS_NGT_F32 : VOPC_F32 <vopc<0x4b>, "v_cmps_ngt_f32", COND_NULL, "v_cmps_nlt_f32">;
-defm V_CMPS_NLE_F32 : VOPC_F32 <vopc<0x4c>, "v_cmps_nle_f32">;
-defm V_CMPS_NEQ_F32 : VOPC_F32 <vopc<0x4d>, "v_cmps_neq_f32">;
-defm V_CMPS_NLT_F32 : VOPC_F32 <vopc<0x4e>, "v_cmps_nlt_f32">;
-defm V_CMPS_TRU_F32 : VOPC_F32 <vopc<0x4f>, "v_cmps_tru_f32">;
-
-
-defm V_CMPSX_F_F32 : VOPCX_F32 <vopc<0x50>, "v_cmpsx_f_f32">;
-defm V_CMPSX_LT_F32 : VOPCX_F32 <vopc<0x51>, "v_cmpsx_lt_f32", "v_cmpsx_gt_f32">;
-defm V_CMPSX_EQ_F32 : VOPCX_F32 <vopc<0x52>, "v_cmpsx_eq_f32">;
-defm V_CMPSX_LE_F32 : VOPCX_F32 <vopc<0x53>, "v_cmpsx_le_f32", "v_cmpsx_ge_f32">;
-defm V_CMPSX_GT_F32 : VOPCX_F32 <vopc<0x54>, "v_cmpsx_gt_f32">;
-defm V_CMPSX_LG_F32 : VOPCX_F32 <vopc<0x55>, "v_cmpsx_lg_f32">;
-defm V_CMPSX_GE_F32 : VOPCX_F32 <vopc<0x56>, "v_cmpsx_ge_f32">;
-defm V_CMPSX_O_F32 : VOPCX_F32 <vopc<0x57>, "v_cmpsx_o_f32">;
-defm V_CMPSX_U_F32 : VOPCX_F32 <vopc<0x58>, "v_cmpsx_u_f32">;
-defm V_CMPSX_NGE_F32 : VOPCX_F32 <vopc<0x59>, "v_cmpsx_nge_f32", "v_cmpsx_nle_f32">;
-defm V_CMPSX_NLG_F32 : VOPCX_F32 <vopc<0x5a>, "v_cmpsx_nlg_f32">;
-defm V_CMPSX_NGT_F32 : VOPCX_F32 <vopc<0x5b>, "v_cmpsx_ngt_f32", "v_cmpsx_nlt_f32">;
-defm V_CMPSX_NLE_F32 : VOPCX_F32 <vopc<0x5c>, "v_cmpsx_nle_f32">;
-defm V_CMPSX_NEQ_F32 : VOPCX_F32 <vopc<0x5d>, "v_cmpsx_neq_f32">;
-defm V_CMPSX_NLT_F32 : VOPCX_F32 <vopc<0x5e>, "v_cmpsx_nlt_f32">;
-defm V_CMPSX_TRU_F32 : VOPCX_F32 <vopc<0x5f>, "v_cmpsx_tru_f32">;
-
-
-defm V_CMPS_F_F64 : VOPC_F64 <vopc<0x60>, "v_cmps_f_f64">;
-defm V_CMPS_LT_F64 : VOPC_F64 <vopc<0x61>, "v_cmps_lt_f64", COND_NULL, "v_cmps_gt_f64">;
-defm V_CMPS_EQ_F64 : VOPC_F64 <vopc<0x62>, "v_cmps_eq_f64">;
-defm V_CMPS_LE_F64 : VOPC_F64 <vopc<0x63>, "v_cmps_le_f64", COND_NULL, "v_cmps_ge_f64">;
-defm V_CMPS_GT_F64 : VOPC_F64 <vopc<0x64>, "v_cmps_gt_f64">;
-defm V_CMPS_LG_F64 : VOPC_F64 <vopc<0x65>, "v_cmps_lg_f64">;
-defm V_CMPS_GE_F64 : VOPC_F64 <vopc<0x66>, "v_cmps_ge_f64">;
-defm V_CMPS_O_F64 : VOPC_F64 <vopc<0x67>, "v_cmps_o_f64">;
-defm V_CMPS_U_F64 : VOPC_F64 <vopc<0x68>, "v_cmps_u_f64">;
-defm V_CMPS_NGE_F64 : VOPC_F64 <vopc<0x69>, "v_cmps_nge_f64", COND_NULL, "v_cmps_nle_f64">;
-defm V_CMPS_NLG_F64 : VOPC_F64 <vopc<0x6a>, "v_cmps_nlg_f64">;
-defm V_CMPS_NGT_F64 : VOPC_F64 <vopc<0x6b>, "v_cmps_ngt_f64", COND_NULL, "v_cmps_nlt_f64">;
-defm V_CMPS_NLE_F64 : VOPC_F64 <vopc<0x6c>, "v_cmps_nle_f64">;
-defm V_CMPS_NEQ_F64 : VOPC_F64 <vopc<0x6d>, "v_cmps_neq_f64">;
-defm V_CMPS_NLT_F64 : VOPC_F64 <vopc<0x6e>, "v_cmps_nlt_f64">;
-defm V_CMPS_TRU_F64 : VOPC_F64 <vopc<0x6f>, "v_cmps_tru_f64">;
-
-
-defm V_CMPSX_F_F64 : VOPCX_F64 <vopc<0x70>, "v_cmpsx_f_f64">;
-defm V_CMPSX_LT_F64 : VOPCX_F64 <vopc<0x71>, "v_cmpsx_lt_f64", "v_cmpsx_gt_f64">;
-defm V_CMPSX_EQ_F64 : VOPCX_F64 <vopc<0x72>, "v_cmpsx_eq_f64">;
-defm V_CMPSX_LE_F64 : VOPCX_F64 <vopc<0x73>, "v_cmpsx_le_f64", "v_cmpsx_ge_f64">;
-defm V_CMPSX_GT_F64 : VOPCX_F64 <vopc<0x74>, "v_cmpsx_gt_f64">;
-defm V_CMPSX_LG_F64 : VOPCX_F64 <vopc<0x75>, "v_cmpsx_lg_f64">;
-defm V_CMPSX_GE_F64 : VOPCX_F64 <vopc<0x76>, "v_cmpsx_ge_f64">;
-defm V_CMPSX_O_F64 : VOPCX_F64 <vopc<0x77>, "v_cmpsx_o_f64">;
-defm V_CMPSX_U_F64 : VOPCX_F64 <vopc<0x78>, "v_cmpsx_u_f64">;
-defm V_CMPSX_NGE_F64 : VOPCX_F64 <vopc<0x79>, "v_cmpsx_nge_f64", "v_cmpsx_nle_f64">;
-defm V_CMPSX_NLG_F64 : VOPCX_F64 <vopc<0x7a>, "v_cmpsx_nlg_f64">;
-defm V_CMPSX_NGT_F64 : VOPCX_F64 <vopc<0x7b>, "v_cmpsx_ngt_f64", "v_cmpsx_nlt_f64">;
-defm V_CMPSX_NLE_F64 : VOPCX_F64 <vopc<0x7c>, "v_cmpsx_nle_f64">;
-defm V_CMPSX_NEQ_F64 : VOPCX_F64 <vopc<0x7d>, "v_cmpsx_neq_f64">;
-defm V_CMPSX_NLT_F64 : VOPCX_F64 <vopc<0x7e>, "v_cmpsx_nlt_f64">;
-defm V_CMPSX_TRU_F64 : VOPCX_F64 <vopc<0x7f>, "v_cmpsx_tru_f64">;
-
-} // End SubtargetPredicate = isSICI
-
-defm V_CMP_F_I32 : VOPC_I32 <vopc<0x80, 0xc0>, "v_cmp_f_i32">;
-defm V_CMP_LT_I32 : VOPC_I32 <vopc<0x81, 0xc1>, "v_cmp_lt_i32", COND_SLT, "v_cmp_gt_i32">;
-defm V_CMP_EQ_I32 : VOPC_I32 <vopc<0x82, 0xc2>, "v_cmp_eq_i32", COND_EQ>;
-defm V_CMP_LE_I32 : VOPC_I32 <vopc<0x83, 0xc3>, "v_cmp_le_i32", COND_SLE, "v_cmp_ge_i32">;
-defm V_CMP_GT_I32 : VOPC_I32 <vopc<0x84, 0xc4>, "v_cmp_gt_i32", COND_SGT>;
-defm V_CMP_NE_I32 : VOPC_I32 <vopc<0x85, 0xc5>, "v_cmp_ne_i32", COND_NE>;
-defm V_CMP_GE_I32 : VOPC_I32 <vopc<0x86, 0xc6>, "v_cmp_ge_i32", COND_SGE>;
-defm V_CMP_T_I32 : VOPC_I32 <vopc<0x87, 0xc7>, "v_cmp_t_i32">;
-
-
-defm V_CMPX_F_I32 : VOPCX_I32 <vopc<0x90, 0xd0>, "v_cmpx_f_i32">;
-defm V_CMPX_LT_I32 : VOPCX_I32 <vopc<0x91, 0xd1>, "v_cmpx_lt_i32", "v_cmpx_gt_i32">;
-defm V_CMPX_EQ_I32 : VOPCX_I32 <vopc<0x92, 0xd2>, "v_cmpx_eq_i32">;
-defm V_CMPX_LE_I32 : VOPCX_I32 <vopc<0x93, 0xd3>, "v_cmpx_le_i32", "v_cmpx_ge_i32">;
-defm V_CMPX_GT_I32 : VOPCX_I32 <vopc<0x94, 0xd4>, "v_cmpx_gt_i32">;
-defm V_CMPX_NE_I32 : VOPCX_I32 <vopc<0x95, 0xd5>, "v_cmpx_ne_i32">;
-defm V_CMPX_GE_I32 : VOPCX_I32 <vopc<0x96, 0xd6>, "v_cmpx_ge_i32">;
-defm V_CMPX_T_I32 : VOPCX_I32 <vopc<0x97, 0xd7>, "v_cmpx_t_i32">;
-
-
-defm V_CMP_F_I64 : VOPC_I64 <vopc<0xa0, 0xe0>, "v_cmp_f_i64">;
-defm V_CMP_LT_I64 : VOPC_I64 <vopc<0xa1, 0xe1>, "v_cmp_lt_i64", COND_SLT, "v_cmp_gt_i64">;
-defm V_CMP_EQ_I64 : VOPC_I64 <vopc<0xa2, 0xe2>, "v_cmp_eq_i64", COND_EQ>;
-defm V_CMP_LE_I64 : VOPC_I64 <vopc<0xa3, 0xe3>, "v_cmp_le_i64", COND_SLE, "v_cmp_ge_i64">;
-defm V_CMP_GT_I64 : VOPC_I64 <vopc<0xa4, 0xe4>, "v_cmp_gt_i64", COND_SGT>;
-defm V_CMP_NE_I64 : VOPC_I64 <vopc<0xa5, 0xe5>, "v_cmp_ne_i64", COND_NE>;
-defm V_CMP_GE_I64 : VOPC_I64 <vopc<0xa6, 0xe6>, "v_cmp_ge_i64", COND_SGE>;
-defm V_CMP_T_I64 : VOPC_I64 <vopc<0xa7, 0xe7>, "v_cmp_t_i64">;
-
-
-defm V_CMPX_F_I64 : VOPCX_I64 <vopc<0xb0, 0xf0>, "v_cmpx_f_i64">;
-defm V_CMPX_LT_I64 : VOPCX_I64 <vopc<0xb1, 0xf1>, "v_cmpx_lt_i64", "v_cmpx_gt_i64">;
-defm V_CMPX_EQ_I64 : VOPCX_I64 <vopc<0xb2, 0xf2>, "v_cmpx_eq_i64">;
-defm V_CMPX_LE_I64 : VOPCX_I64 <vopc<0xb3, 0xf3>, "v_cmpx_le_i64", "v_cmpx_ge_i64">;
-defm V_CMPX_GT_I64 : VOPCX_I64 <vopc<0xb4, 0xf4>, "v_cmpx_gt_i64">;
-defm V_CMPX_NE_I64 : VOPCX_I64 <vopc<0xb5, 0xf5>, "v_cmpx_ne_i64">;
-defm V_CMPX_GE_I64 : VOPCX_I64 <vopc<0xb6, 0xf6>, "v_cmpx_ge_i64">;
-defm V_CMPX_T_I64 : VOPCX_I64 <vopc<0xb7, 0xf7>, "v_cmpx_t_i64">;
-
-
-defm V_CMP_F_U32 : VOPC_I32 <vopc<0xc0, 0xc8>, "v_cmp_f_u32">;
-defm V_CMP_LT_U32 : VOPC_I32 <vopc<0xc1, 0xc9>, "v_cmp_lt_u32", COND_ULT, "v_cmp_gt_u32">;
-defm V_CMP_EQ_U32 : VOPC_I32 <vopc<0xc2, 0xca>, "v_cmp_eq_u32", COND_EQ>;
-defm V_CMP_LE_U32 : VOPC_I32 <vopc<0xc3, 0xcb>, "v_cmp_le_u32", COND_ULE, "v_cmp_ge_u32">;
-defm V_CMP_GT_U32 : VOPC_I32 <vopc<0xc4, 0xcc>, "v_cmp_gt_u32", COND_UGT>;
-defm V_CMP_NE_U32 : VOPC_I32 <vopc<0xc5, 0xcd>, "v_cmp_ne_u32", COND_NE>;
-defm V_CMP_GE_U32 : VOPC_I32 <vopc<0xc6, 0xce>, "v_cmp_ge_u32", COND_UGE>;
-defm V_CMP_T_U32 : VOPC_I32 <vopc<0xc7, 0xcf>, "v_cmp_t_u32">;
-
-
-defm V_CMPX_F_U32 : VOPCX_I32 <vopc<0xd0, 0xd8>, "v_cmpx_f_u32">;
-defm V_CMPX_LT_U32 : VOPCX_I32 <vopc<0xd1, 0xd9>, "v_cmpx_lt_u32", "v_cmpx_gt_u32">;
-defm V_CMPX_EQ_U32 : VOPCX_I32 <vopc<0xd2, 0xda>, "v_cmpx_eq_u32">;
-defm V_CMPX_LE_U32 : VOPCX_I32 <vopc<0xd3, 0xdb>, "v_cmpx_le_u32", "v_cmpx_le_u32">;
-defm V_CMPX_GT_U32 : VOPCX_I32 <vopc<0xd4, 0xdc>, "v_cmpx_gt_u32">;
-defm V_CMPX_NE_U32 : VOPCX_I32 <vopc<0xd5, 0xdd>, "v_cmpx_ne_u32">;
-defm V_CMPX_GE_U32 : VOPCX_I32 <vopc<0xd6, 0xde>, "v_cmpx_ge_u32">;
-defm V_CMPX_T_U32 : VOPCX_I32 <vopc<0xd7, 0xdf>, "v_cmpx_t_u32">;
-
-
-defm V_CMP_F_U64 : VOPC_I64 <vopc<0xe0, 0xe8>, "v_cmp_f_u64">;
-defm V_CMP_LT_U64 : VOPC_I64 <vopc<0xe1, 0xe9>, "v_cmp_lt_u64", COND_ULT, "v_cmp_gt_u64">;
-defm V_CMP_EQ_U64 : VOPC_I64 <vopc<0xe2, 0xea>, "v_cmp_eq_u64", COND_EQ>;
-defm V_CMP_LE_U64 : VOPC_I64 <vopc<0xe3, 0xeb>, "v_cmp_le_u64", COND_ULE, "v_cmp_ge_u64">;
-defm V_CMP_GT_U64 : VOPC_I64 <vopc<0xe4, 0xec>, "v_cmp_gt_u64", COND_UGT>;
-defm V_CMP_NE_U64 : VOPC_I64 <vopc<0xe5, 0xed>, "v_cmp_ne_u64", COND_NE>;
-defm V_CMP_GE_U64 : VOPC_I64 <vopc<0xe6, 0xee>, "v_cmp_ge_u64", COND_UGE>;
-defm V_CMP_T_U64 : VOPC_I64 <vopc<0xe7, 0xef>, "v_cmp_t_u64">;
-
-defm V_CMPX_F_U64 : VOPCX_I64 <vopc<0xf0, 0xf8>, "v_cmpx_f_u64">;
-defm V_CMPX_LT_U64 : VOPCX_I64 <vopc<0xf1, 0xf9>, "v_cmpx_lt_u64", "v_cmpx_gt_u64">;
-defm V_CMPX_EQ_U64 : VOPCX_I64 <vopc<0xf2, 0xfa>, "v_cmpx_eq_u64">;
-defm V_CMPX_LE_U64 : VOPCX_I64 <vopc<0xf3, 0xfb>, "v_cmpx_le_u64", "v_cmpx_ge_u64">;
-defm V_CMPX_GT_U64 : VOPCX_I64 <vopc<0xf4, 0xfc>, "v_cmpx_gt_u64">;
-defm V_CMPX_NE_U64 : VOPCX_I64 <vopc<0xf5, 0xfd>, "v_cmpx_ne_u64">;
-defm V_CMPX_GE_U64 : VOPCX_I64 <vopc<0xf6, 0xfe>, "v_cmpx_ge_u64">;
-defm V_CMPX_T_U64 : VOPCX_I64 <vopc<0xf7, 0xff>, "v_cmpx_t_u64">;
-
-} // End isCompare = 1, isCommutable = 1
-
-defm V_CMP_CLASS_F32 : VOPC_CLASS_F32 <vopc<0x88, 0x10>, "v_cmp_class_f32">;
-defm V_CMPX_CLASS_F32 : VOPCX_CLASS_F32 <vopc<0x98, 0x11>, "v_cmpx_class_f32">;
-defm V_CMP_CLASS_F64 : VOPC_CLASS_F64 <vopc<0xa8, 0x12>, "v_cmp_class_f64">;
-defm V_CMPX_CLASS_F64 : VOPCX_CLASS_F64 <vopc<0xb8, 0x13>, "v_cmpx_class_f64">;
-
-//===----------------------------------------------------------------------===//
-// DS Instructions
-//===----------------------------------------------------------------------===//
-
-defm DS_ADD_U32 : DS_1A1D_NORET <0x0, "ds_add_u32", VGPR_32>;
-defm DS_SUB_U32 : DS_1A1D_NORET <0x1, "ds_sub_u32", VGPR_32>;
-defm DS_RSUB_U32 : DS_1A1D_NORET <0x2, "ds_rsub_u32", VGPR_32>;
-defm DS_INC_U32 : DS_1A1D_NORET <0x3, "ds_inc_u32", VGPR_32>;
-defm DS_DEC_U32 : DS_1A1D_NORET <0x4, "ds_dec_u32", VGPR_32>;
-defm DS_MIN_I32 : DS_1A1D_NORET <0x5, "ds_min_i32", VGPR_32>;
-defm DS_MAX_I32 : DS_1A1D_NORET <0x6, "ds_max_i32", VGPR_32>;
-defm DS_MIN_U32 : DS_1A1D_NORET <0x7, "ds_min_u32", VGPR_32>;
-defm DS_MAX_U32 : DS_1A1D_NORET <0x8, "ds_max_u32", VGPR_32>;
-defm DS_AND_B32 : DS_1A1D_NORET <0x9, "ds_and_b32", VGPR_32>;
-defm DS_OR_B32 : DS_1A1D_NORET <0xa, "ds_or_b32", VGPR_32>;
-defm DS_XOR_B32 : DS_1A1D_NORET <0xb, "ds_xor_b32", VGPR_32>;
-defm DS_MSKOR_B32 : DS_1A2D_NORET <0xc, "ds_mskor_b32", VGPR_32>;
-let mayLoad = 0 in {
-defm DS_WRITE_B32 : DS_1A1D_NORET <0xd, "ds_write_b32", VGPR_32>;
-defm DS_WRITE2_B32 : DS_1A1D_Off8_NORET <0xe, "ds_write2_b32", VGPR_32>;
-defm DS_WRITE2ST64_B32 : DS_1A1D_Off8_NORET <0xf, "ds_write2st64_b32", VGPR_32>;
-}
-defm DS_CMPST_B32 : DS_1A2D_NORET <0x10, "ds_cmpst_b32", VGPR_32>;
-defm DS_CMPST_F32 : DS_1A2D_NORET <0x11, "ds_cmpst_f32", VGPR_32>;
-defm DS_MIN_F32 : DS_1A2D_NORET <0x12, "ds_min_f32", VGPR_32>;
-defm DS_MAX_F32 : DS_1A2D_NORET <0x13, "ds_max_f32", VGPR_32>;
-
-defm DS_GWS_INIT : DS_1A_GDS <0x19, "ds_gws_init">;
-defm DS_GWS_SEMA_V : DS_1A_GDS <0x1a, "ds_gws_sema_v">;
-defm DS_GWS_SEMA_BR : DS_1A_GDS <0x1b, "ds_gws_sema_br">;
-defm DS_GWS_SEMA_P : DS_1A_GDS <0x1c, "ds_gws_sema_p">;
-defm DS_GWS_BARRIER : DS_1A_GDS <0x1d, "ds_gws_barrier">;
-let mayLoad = 0 in {
-defm DS_WRITE_B8 : DS_1A1D_NORET <0x1e, "ds_write_b8", VGPR_32>;
-defm DS_WRITE_B16 : DS_1A1D_NORET <0x1f, "ds_write_b16", VGPR_32>;
-}
-defm DS_ADD_RTN_U32 : DS_1A1D_RET <0x20, "ds_add_rtn_u32", VGPR_32, "ds_add_u32">;
-defm DS_SUB_RTN_U32 : DS_1A1D_RET <0x21, "ds_sub_rtn_u32", VGPR_32, "ds_sub_u32">;
-defm DS_RSUB_RTN_U32 : DS_1A1D_RET <0x22, "ds_rsub_rtn_u32", VGPR_32, "ds_rsub_u32">;
-defm DS_INC_RTN_U32 : DS_1A1D_RET <0x23, "ds_inc_rtn_u32", VGPR_32, "ds_inc_u32">;
-defm DS_DEC_RTN_U32 : DS_1A1D_RET <0x24, "ds_dec_rtn_u32", VGPR_32, "ds_dec_u32">;
-defm DS_MIN_RTN_I32 : DS_1A1D_RET <0x25, "ds_min_rtn_i32", VGPR_32, "ds_min_i32">;
-defm DS_MAX_RTN_I32 : DS_1A1D_RET <0x26, "ds_max_rtn_i32", VGPR_32, "ds_max_i32">;
-defm DS_MIN_RTN_U32 : DS_1A1D_RET <0x27, "ds_min_rtn_u32", VGPR_32, "ds_min_u32">;
-defm DS_MAX_RTN_U32 : DS_1A1D_RET <0x28, "ds_max_rtn_u32", VGPR_32, "ds_max_u32">;
-defm DS_AND_RTN_B32 : DS_1A1D_RET <0x29, "ds_and_rtn_b32", VGPR_32, "ds_and_b32">;
-defm DS_OR_RTN_B32 : DS_1A1D_RET <0x2a, "ds_or_rtn_b32", VGPR_32, "ds_or_b32">;
-defm DS_XOR_RTN_B32 : DS_1A1D_RET <0x2b, "ds_xor_rtn_b32", VGPR_32, "ds_xor_b32">;
-defm DS_MSKOR_RTN_B32 : DS_1A2D_RET <0x2c, "ds_mskor_rtn_b32", VGPR_32, "ds_mskor_b32">;
-defm DS_WRXCHG_RTN_B32 : DS_1A1D_RET <0x2d, "ds_wrxchg_rtn_b32", VGPR_32>;
-defm DS_WRXCHG2_RTN_B32 : DS_1A2D_RET <
-  0x2e, "ds_wrxchg2_rtn_b32", VReg_64, "", VGPR_32
->;
-defm DS_WRXCHG2ST64_RTN_B32 : DS_1A2D_RET <
-  0x2f, "ds_wrxchg2st64_rtn_b32", VReg_64, "", VGPR_32
->;
-defm DS_CMPST_RTN_B32 : DS_1A2D_RET <0x30, "ds_cmpst_rtn_b32", VGPR_32, "ds_cmpst_b32">;
-defm DS_CMPST_RTN_F32 : DS_1A2D_RET <0x31, "ds_cmpst_rtn_f32", VGPR_32, "ds_cmpst_f32">;
-defm DS_MIN_RTN_F32 : DS_1A2D_RET <0x32, "ds_min_rtn_f32", VGPR_32, "ds_min_f32">;
-defm DS_MAX_RTN_F32 : DS_1A2D_RET <0x33, "ds_max_rtn_f32", VGPR_32, "ds_max_f32">;
-let SubtargetPredicate = isCI in {
-defm DS_WRAP_RTN_F32 : DS_1A1D_RET <0x34, "ds_wrap_rtn_f32", VGPR_32, "ds_wrap_f32">;
-} // End isCI
-defm DS_SWIZZLE_B32 : DS_1A_RET <0x35, "ds_swizzle_b32", VGPR_32>;
-let mayStore = 0 in {
-defm DS_READ_B32 : DS_1A_RET <0x36, "ds_read_b32", VGPR_32>;
-defm DS_READ2_B32 : DS_1A_Off8_RET <0x37, "ds_read2_b32", VReg_64>;
-defm DS_READ2ST64_B32 : DS_1A_Off8_RET <0x38, "ds_read2st64_b32", VReg_64>;
-defm DS_READ_I8 : DS_1A_RET <0x39, "ds_read_i8", VGPR_32>;
-defm DS_READ_U8 : DS_1A_RET <0x3a, "ds_read_u8", VGPR_32>;
-defm DS_READ_I16 : DS_1A_RET <0x3b, "ds_read_i16", VGPR_32>;
-defm DS_READ_U16 : DS_1A_RET <0x3c, "ds_read_u16", VGPR_32>;
-}
-defm DS_CONSUME : DS_0A_RET <0x3d, "ds_consume">;
-defm DS_APPEND : DS_0A_RET <0x3e, "ds_append">;
-defm DS_ORDERED_COUNT : DS_1A_RET_GDS <0x3f, "ds_ordered_count">;
-defm DS_ADD_U64 : DS_1A1D_NORET <0x40, "ds_add_u64", VReg_64>;
-defm DS_SUB_U64 : DS_1A1D_NORET <0x41, "ds_sub_u64", VReg_64>;
-defm DS_RSUB_U64 : DS_1A1D_NORET <0x42, "ds_rsub_u64", VReg_64>;
-defm DS_INC_U64 : DS_1A1D_NORET <0x43, "ds_inc_u64", VReg_64>;
-defm DS_DEC_U64 : DS_1A1D_NORET <0x44, "ds_dec_u64", VReg_64>;
-defm DS_MIN_I64 : DS_1A1D_NORET <0x45, "ds_min_i64", VReg_64>;
-defm DS_MAX_I64 : DS_1A1D_NORET <0x46, "ds_max_i64", VReg_64>;
-defm DS_MIN_U64 : DS_1A1D_NORET <0x47, "ds_min_u64", VReg_64>;
-defm DS_MAX_U64 : DS_1A1D_NORET <0x48, "ds_max_u64", VReg_64>;
-defm DS_AND_B64 : DS_1A1D_NORET <0x49, "ds_and_b64", VReg_64>;
-defm DS_OR_B64 : DS_1A1D_NORET <0x4a, "ds_or_b64", VReg_64>;
-defm DS_XOR_B64 : DS_1A1D_NORET <0x4b, "ds_xor_b64", VReg_64>;
-defm DS_MSKOR_B64 : DS_1A2D_NORET <0x4c, "ds_mskor_b64", VReg_64>;
-let mayLoad = 0 in {
-defm DS_WRITE_B64 : DS_1A1D_NORET <0x4d, "ds_write_b64", VReg_64>;
-defm DS_WRITE2_B64 : DS_1A1D_Off8_NORET <0x4E, "ds_write2_b64", VReg_64>;
-defm DS_WRITE2ST64_B64 : DS_1A1D_Off8_NORET <0x4f, "ds_write2st64_b64", VReg_64>;
-}
-defm DS_CMPST_B64 : DS_1A2D_NORET <0x50, "ds_cmpst_b64", VReg_64>;
-defm DS_CMPST_F64 : DS_1A2D_NORET <0x51, "ds_cmpst_f64", VReg_64>;
-defm DS_MIN_F64 : DS_1A1D_NORET <0x52, "ds_min_f64", VReg_64>;
-defm DS_MAX_F64 : DS_1A1D_NORET <0x53, "ds_max_f64", VReg_64>;
-
-defm DS_ADD_RTN_U64 : DS_1A1D_RET <0x60, "ds_add_rtn_u64", VReg_64, "ds_add_u64">;
-defm DS_SUB_RTN_U64 : DS_1A1D_RET <0x61, "ds_sub_rtn_u64", VReg_64, "ds_sub_u64">;
-defm DS_RSUB_RTN_U64 : DS_1A1D_RET <0x62, "ds_rsub_rtn_u64", VReg_64, "ds_rsub_u64">;
-defm DS_INC_RTN_U64 : DS_1A1D_RET <0x63, "ds_inc_rtn_u64", VReg_64, "ds_inc_u64">;
-defm DS_DEC_RTN_U64 : DS_1A1D_RET <0x64, "ds_dec_rtn_u64", VReg_64, "ds_dec_u64">;
-defm DS_MIN_RTN_I64 : DS_1A1D_RET <0x65, "ds_min_rtn_i64", VReg_64, "ds_min_i64">;
-defm DS_MAX_RTN_I64 : DS_1A1D_RET <0x66, "ds_max_rtn_i64", VReg_64, "ds_max_i64">;
-defm DS_MIN_RTN_U64 : DS_1A1D_RET <0x67, "ds_min_rtn_u64", VReg_64, "ds_min_u64">;
-defm DS_MAX_RTN_U64 : DS_1A1D_RET <0x68, "ds_max_rtn_u64", VReg_64, "ds_max_u64">;
-defm DS_AND_RTN_B64 : DS_1A1D_RET <0x69, "ds_and_rtn_b64", VReg_64, "ds_and_b64">;
-defm DS_OR_RTN_B64 : DS_1A1D_RET <0x6a, "ds_or_rtn_b64", VReg_64, "ds_or_b64">;
-defm DS_XOR_RTN_B64 : DS_1A1D_RET <0x6b, "ds_xor_rtn_b64", VReg_64, "ds_xor_b64">;
-defm DS_MSKOR_RTN_B64 : DS_1A2D_RET <0x6c, "ds_mskor_rtn_b64", VReg_64, "ds_mskor_b64">;
-defm DS_WRXCHG_RTN_B64 : DS_1A1D_RET <0x6d, "ds_wrxchg_rtn_b64", VReg_64, "ds_wrxchg_b64">;
-defm DS_WRXCHG2_RTN_B64 : DS_1A2D_RET <0x6e, "ds_wrxchg2_rtn_b64", VReg_128, "ds_wrxchg2_b64", VReg_64>;
-defm DS_WRXCHG2ST64_RTN_B64 : DS_1A2D_RET <0x6f, "ds_wrxchg2st64_rtn_b64", VReg_128, "ds_wrxchg2st64_b64", VReg_64>;
-defm DS_CMPST_RTN_B64 : DS_1A2D_RET <0x70, "ds_cmpst_rtn_b64", VReg_64, "ds_cmpst_b64">;
-defm DS_CMPST_RTN_F64 : DS_1A2D_RET <0x71, "ds_cmpst_rtn_f64", VReg_64, "ds_cmpst_f64">;
-defm DS_MIN_RTN_F64 : DS_1A1D_RET <0x72, "ds_min_rtn_f64", VReg_64, "ds_min_f64">;
-defm DS_MAX_RTN_F64 : DS_1A1D_RET <0x73, "ds_max_rtn_f64", VReg_64, "ds_max_f64">;
-
-let mayStore = 0 in {
-defm DS_READ_B64 : DS_1A_RET <0x76, "ds_read_b64", VReg_64>;
-defm DS_READ2_B64 : DS_1A_Off8_RET <0x77, "ds_read2_b64", VReg_128>;
-defm DS_READ2ST64_B64 : DS_1A_Off8_RET <0x78, "ds_read2st64_b64", VReg_128>;
-}
-
-defm DS_ADD_SRC2_U32 : DS_1A <0x80, "ds_add_src2_u32">;
-defm DS_SUB_SRC2_U32 : DS_1A <0x81, "ds_sub_src2_u32">;
-defm DS_RSUB_SRC2_U32 : DS_1A <0x82, "ds_rsub_src2_u32">;
-defm DS_INC_SRC2_U32 : DS_1A <0x83, "ds_inc_src2_u32">;
-defm DS_DEC_SRC2_U32 : DS_1A <0x84, "ds_dec_src2_u32">;
-defm DS_MIN_SRC2_I32 : DS_1A <0x85, "ds_min_src2_i32">;
-defm DS_MAX_SRC2_I32 : DS_1A <0x86, "ds_max_src2_i32">;
-defm DS_MIN_SRC2_U32 : DS_1A <0x87, "ds_min_src2_u32">;
-defm DS_MAX_SRC2_U32 : DS_1A <0x88, "ds_max_src2_u32">;
-defm DS_AND_SRC2_B32 : DS_1A <0x89, "ds_and_src_b32">;
-defm DS_OR_SRC2_B32 : DS_1A <0x8a, "ds_or_src2_b32">;
-defm DS_XOR_SRC2_B32 : DS_1A <0x8b, "ds_xor_src2_b32">;
-defm DS_WRITE_SRC2_B32 : DS_1A <0x8c, "ds_write_src2_b32">;
-
-defm DS_MIN_SRC2_F32 : DS_1A <0x92, "ds_min_src2_f32">;
-defm DS_MAX_SRC2_F32 : DS_1A <0x93, "ds_max_src2_f32">;
-
-defm DS_ADD_SRC2_U64 : DS_1A <0xc0, "ds_add_src2_u64">;
-defm DS_SUB_SRC2_U64 : DS_1A <0xc1, "ds_sub_src2_u64">;
-defm DS_RSUB_SRC2_U64 : DS_1A <0xc2, "ds_rsub_src2_u64">;
-defm DS_INC_SRC2_U64 : DS_1A <0xc3, "ds_inc_src2_u64">;
-defm DS_DEC_SRC2_U64 : DS_1A <0xc4, "ds_dec_src2_u64">;
-defm DS_MIN_SRC2_I64 : DS_1A <0xc5, "ds_min_src2_i64">;
-defm DS_MAX_SRC2_I64 : DS_1A <0xc6, "ds_max_src2_i64">;
-defm DS_MIN_SRC2_U64 : DS_1A <0xc7, "ds_min_src2_u64">;
-defm DS_MAX_SRC2_U64 : DS_1A <0xc8, "ds_max_src2_u64">;
-defm DS_AND_SRC2_B64 : DS_1A <0xc9, "ds_and_src2_b64">;
-defm DS_OR_SRC2_B64 : DS_1A <0xca, "ds_or_src2_b64">;
-defm DS_XOR_SRC2_B64 : DS_1A <0xcb, "ds_xor_src2_b64">;
-defm DS_WRITE_SRC2_B64 : DS_1A <0xcc, "ds_write_src2_b64">;
-
-defm DS_MIN_SRC2_F64 : DS_1A <0xd2, "ds_min_src2_f64">;
-defm DS_MAX_SRC2_F64 : DS_1A <0xd3, "ds_max_src2_f64">;
-
-//let SubtargetPredicate = isCI in {
-// DS_CONDXCHG32_RTN_B64
-// DS_CONDXCHG32_RTN_B128
-//} // End isCI
-
-//===----------------------------------------------------------------------===//
-// MUBUF Instructions
-//===----------------------------------------------------------------------===//
-
-defm BUFFER_LOAD_FORMAT_X : MUBUF_Load_Helper <
-  mubuf<0x00>, "buffer_load_format_x", VGPR_32
->;
-defm BUFFER_LOAD_FORMAT_XY : MUBUF_Load_Helper <
-  mubuf<0x01>, "buffer_load_format_xy", VReg_64
->;
-defm BUFFER_LOAD_FORMAT_XYZ : MUBUF_Load_Helper <
-  mubuf<0x02>, "buffer_load_format_xyz", VReg_96
->;
-defm BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <
-  mubuf<0x03>, "buffer_load_format_xyzw", VReg_128
->;
-defm BUFFER_STORE_FORMAT_X : MUBUF_Store_Helper <
-  mubuf<0x04>, "buffer_store_format_x", VGPR_32
->;
-defm BUFFER_STORE_FORMAT_XY : MUBUF_Store_Helper <
-  mubuf<0x05>, "buffer_store_format_xy", VReg_64
->;
-defm BUFFER_STORE_FORMAT_XYZ : MUBUF_Store_Helper <
-  mubuf<0x06>, "buffer_store_format_xyz", VReg_96
->;
-defm BUFFER_STORE_FORMAT_XYZW : MUBUF_Store_Helper <
-  mubuf<0x07>, "buffer_store_format_xyzw", VReg_128
->;
-defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper <
-  mubuf<0x08, 0x10>, "buffer_load_ubyte", VGPR_32, i32, az_extloadi8_global
->;
-defm BUFFER_LOAD_SBYTE : MUBUF_Load_Helper <
-  mubuf<0x09, 0x11>, "buffer_load_sbyte", VGPR_32, i32, sextloadi8_global
->;
-defm BUFFER_LOAD_USHORT : MUBUF_Load_Helper <
-  mubuf<0x0a, 0x12>, "buffer_load_ushort", VGPR_32, i32, az_extloadi16_global
->;
-defm BUFFER_LOAD_SSHORT : MUBUF_Load_Helper <
-  mubuf<0x0b, 0x13>, "buffer_load_sshort", VGPR_32, i32, sextloadi16_global
->;
-defm BUFFER_LOAD_DWORD : MUBUF_Load_Helper <
-  mubuf<0x0c, 0x14>, "buffer_load_dword", VGPR_32, i32, global_load
->;
-defm BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper <
-  mubuf<0x0d, 0x15>, "buffer_load_dwordx2", VReg_64, v2i32, global_load
->;
-defm BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper <
-  mubuf<0x0e, 0x17>, "buffer_load_dwordx4", VReg_128, v4i32, global_load
->;
-
-defm BUFFER_STORE_BYTE : MUBUF_Store_Helper <
-  mubuf<0x18>, "buffer_store_byte", VGPR_32, i32, truncstorei8_global
->;
-
-defm BUFFER_STORE_SHORT : MUBUF_Store_Helper <
-  mubuf<0x1a>, "buffer_store_short", VGPR_32, i32, truncstorei16_global
->;
-
-defm BUFFER_STORE_DWORD : MUBUF_Store_Helper <
-  mubuf<0x1c>, "buffer_store_dword", VGPR_32, i32, global_store
->;
-
-defm BUFFER_STORE_DWORDX2 : MUBUF_Store_Helper <
-  mubuf<0x1d>, "buffer_store_dwordx2", VReg_64, v2i32, global_store
->;
-
-defm BUFFER_STORE_DWORDX4 : MUBUF_Store_Helper <
-  mubuf<0x1e, 0x1f>, "buffer_store_dwordx4", VReg_128, v4i32, global_store
->;
-
-defm BUFFER_ATOMIC_SWAP : MUBUF_Atomic <
-  mubuf<0x30, 0x40>, "buffer_atomic_swap", VGPR_32, i32, atomic_swap_global
->;
-//def BUFFER_ATOMIC_CMPSWAP : MUBUF_ <mubuf<0x31, 0x41>, "buffer_atomic_cmpswap", []>;
-defm BUFFER_ATOMIC_ADD : MUBUF_Atomic <
-  mubuf<0x32, 0x42>, "buffer_atomic_add", VGPR_32, i32, atomic_add_global
->;
-defm BUFFER_ATOMIC_SUB : MUBUF_Atomic <
-  mubuf<0x33, 0x43>, "buffer_atomic_sub", VGPR_32, i32, atomic_sub_global
->;
-//def BUFFER_ATOMIC_RSUB : MUBUF_ <mubuf<0x34>, "buffer_atomic_rsub", []>; // isn't on CI & VI
-defm BUFFER_ATOMIC_SMIN : MUBUF_Atomic <
-  mubuf<0x35, 0x44>, "buffer_atomic_smin", VGPR_32, i32, atomic_min_global
->;
-defm BUFFER_ATOMIC_UMIN : MUBUF_Atomic <
-  mubuf<0x36, 0x45>, "buffer_atomic_umin", VGPR_32, i32, atomic_umin_global
->;
-defm BUFFER_ATOMIC_SMAX : MUBUF_Atomic <
-  mubuf<0x37, 0x46>, "buffer_atomic_smax", VGPR_32, i32, atomic_max_global
->;
-defm BUFFER_ATOMIC_UMAX : MUBUF_Atomic <
-  mubuf<0x38, 0x47>, "buffer_atomic_umax", VGPR_32, i32, atomic_umax_global
->;
-defm BUFFER_ATOMIC_AND : MUBUF_Atomic <
-  mubuf<0x39, 0x48>, "buffer_atomic_and", VGPR_32, i32, atomic_and_global
->;
-defm BUFFER_ATOMIC_OR : MUBUF_Atomic <
-  mubuf<0x3a, 0x49>, "buffer_atomic_or", VGPR_32, i32, atomic_or_global
->;
-defm BUFFER_ATOMIC_XOR : MUBUF_Atomic <
-  mubuf<0x3b, 0x4a>, "buffer_atomic_xor", VGPR_32, i32, atomic_xor_global
->;
-//def BUFFER_ATOMIC_INC : MUBUF_ <mubuf<0x3c, 0x4b>, "buffer_atomic_inc", []>;
-//def BUFFER_ATOMIC_DEC : MUBUF_ <mubuf<0x3d, 0x4c>, "buffer_atomic_dec", []>;
-//def BUFFER_ATOMIC_FCMPSWAP : MUBUF_ <mubuf<0x3e>, "buffer_atomic_fcmpswap", []>; // isn't on VI
-//def BUFFER_ATOMIC_FMIN : MUBUF_ <mubuf<0x3f>, "buffer_atomic_fmin", []>; // isn't on VI
-//def BUFFER_ATOMIC_FMAX : MUBUF_ <mubuf<0x40>, "buffer_atomic_fmax", []>; // isn't on VI
-//def BUFFER_ATOMIC_SWAP_X2 : MUBUF_X2 <mubuf<0x50, 0x60>, "buffer_atomic_swap_x2", []>;
-//def BUFFER_ATOMIC_CMPSWAP_X2 : MUBUF_X2 <mubuf<0x51, 0x61>, "buffer_atomic_cmpswap_x2", []>;
-//def BUFFER_ATOMIC_ADD_X2 : MUBUF_X2 <mubuf<0x52, 0x62>, "buffer_atomic_add_x2", []>;
-//def BUFFER_ATOMIC_SUB_X2 : MUBUF_X2 <mubuf<0x53, 0x63>, "buffer_atomic_sub_x2", []>;
-//def BUFFER_ATOMIC_RSUB_X2 : MUBUF_X2 <mubuf<0x54>, "buffer_atomic_rsub_x2", []>; // isn't on CI & VI
-//def BUFFER_ATOMIC_SMIN_X2 : MUBUF_X2 <mubuf<0x55, 0x64>, "buffer_atomic_smin_x2", []>;
-//def BUFFER_ATOMIC_UMIN_X2 : MUBUF_X2 <mubuf<0x56, 0x65>, "buffer_atomic_umin_x2", []>;
-//def BUFFER_ATOMIC_SMAX_X2 : MUBUF_X2 <mubuf<0x57, 0x66>, "buffer_atomic_smax_x2", []>;
-//def BUFFER_ATOMIC_UMAX_X2 : MUBUF_X2 <mubuf<0x58, 0x67>, "buffer_atomic_umax_x2", []>;
-//def BUFFER_ATOMIC_AND_X2 : MUBUF_X2 <mubuf<0x59, 0x68>, "buffer_atomic_and_x2", []>;
-//def BUFFER_ATOMIC_OR_X2 : MUBUF_X2 <mubuf<0x5a, 0x69>, "buffer_atomic_or_x2", []>;
-//def BUFFER_ATOMIC_XOR_X2 : MUBUF_X2 <mubuf<0x5b, 0x6a>, "buffer_atomic_xor_x2", []>;
-//def BUFFER_ATOMIC_INC_X2 : MUBUF_X2 <mubuf<0x5c, 0x6b>, "buffer_atomic_inc_x2", []>;
-//def BUFFER_ATOMIC_DEC_X2 : MUBUF_X2 <mubuf<0x5d, 0x6c>, "buffer_atomic_dec_x2", []>;
-//def BUFFER_ATOMIC_FCMPSWAP_X2 : MUBUF_X2 <mubuf<0x5e>, "buffer_atomic_fcmpswap_x2", []>; // isn't on VI
-//def BUFFER_ATOMIC_FMIN_X2 : MUBUF_X2 <mubuf<0x5f>, "buffer_atomic_fmin_x2", []>; // isn't on VI
-//def BUFFER_ATOMIC_FMAX_X2 : MUBUF_X2 <mubuf<0x60>, "buffer_atomic_fmax_x2", []>; // isn't on VI
-//def BUFFER_WBINVL1_SC : MUBUF_WBINVL1 <mubuf<0x70>, "buffer_wbinvl1_sc", []>; // isn't on CI & VI
-//def BUFFER_WBINVL1_VOL : MUBUF_WBINVL1 <mubuf<0x70, 0x3f>, "buffer_wbinvl1_vol", []>; // isn't on SI
-//def BUFFER_WBINVL1 : MUBUF_WBINVL1 <mubuf<0x71, 0x3e>, "buffer_wbinvl1", []>;
-
-//===----------------------------------------------------------------------===//
-// MTBUF Instructions
-//===----------------------------------------------------------------------===//
-
-//def TBUFFER_LOAD_FORMAT_X : MTBUF_ <0x00000000, "tbuffer_load_format_x", []>;
-//def TBUFFER_LOAD_FORMAT_XY : MTBUF_ <0x00000001, "tbuffer_load_format_xy", []>;
-//def TBUFFER_LOAD_FORMAT_XYZ : MTBUF_ <0x00000002, "tbuffer_load_format_xyz", []>;
-defm TBUFFER_LOAD_FORMAT_XYZW : MTBUF_Load_Helper <0x00000003, "tbuffer_load_format_xyzw", VReg_128>;
-defm TBUFFER_STORE_FORMAT_X : MTBUF_Store_Helper <0x00000004, "tbuffer_store_format_x", VGPR_32>;
-defm TBUFFER_STORE_FORMAT_XY : MTBUF_Store_Helper <0x00000005, "tbuffer_store_format_xy", VReg_64>;
-defm TBUFFER_STORE_FORMAT_XYZ : MTBUF_Store_Helper <0x00000006, "tbuffer_store_format_xyz", VReg_128>;
-defm TBUFFER_STORE_FORMAT_XYZW : MTBUF_Store_Helper <0x00000007, "tbuffer_store_format_xyzw", VReg_128>;
-
-//===----------------------------------------------------------------------===//
-// MIMG Instructions
-//===----------------------------------------------------------------------===//
-
-defm IMAGE_LOAD : MIMG_NoSampler <0x00000000, "image_load">;
-defm IMAGE_LOAD_MIP : MIMG_NoSampler <0x00000001, "image_load_mip">;
-//def IMAGE_LOAD_PCK : MIMG_NoPattern_ <"image_load_pck", 0x00000002>;
-//def IMAGE_LOAD_PCK_SGN : MIMG_NoPattern_ <"image_load_pck_sgn", 0x00000003>;
-//def IMAGE_LOAD_MIP_PCK : MIMG_NoPattern_ <"image_load_mip_pck", 0x00000004>;
-//def IMAGE_LOAD_MIP_PCK_SGN : MIMG_NoPattern_ <"image_load_mip_pck_sgn", 0x00000005>;
-//def IMAGE_STORE : MIMG_NoPattern_ <"image_store", 0x00000008>;
-//def IMAGE_STORE_MIP : MIMG_NoPattern_ <"image_store_mip", 0x00000009>;
-//def IMAGE_STORE_PCK : MIMG_NoPattern_ <"image_store_pck", 0x0000000a>;
-//def IMAGE_STORE_MIP_PCK : MIMG_NoPattern_ <"image_store_mip_pck", 0x0000000b>;
-defm IMAGE_GET_RESINFO : MIMG_NoSampler <0x0000000e, "image_get_resinfo">;
-//def IMAGE_ATOMIC_SWAP : MIMG_NoPattern_ <"image_atomic_swap", 0x0000000f>;
-//def IMAGE_ATOMIC_CMPSWAP : MIMG_NoPattern_ <"image_atomic_cmpswap", 0x00000010>;
-//def IMAGE_ATOMIC_ADD : MIMG_NoPattern_ <"image_atomic_add", 0x00000011>;
-//def IMAGE_ATOMIC_SUB : MIMG_NoPattern_ <"image_atomic_sub", 0x00000012>;
-//def IMAGE_ATOMIC_RSUB : MIMG_NoPattern_ <"image_atomic_rsub", 0x00000013>;
-//def IMAGE_ATOMIC_SMIN : MIMG_NoPattern_ <"image_atomic_smin", 0x00000014>;
-//def IMAGE_ATOMIC_UMIN : MIMG_NoPattern_ <"image_atomic_umin", 0x00000015>;
-//def IMAGE_ATOMIC_SMAX : MIMG_NoPattern_ <"image_atomic_smax", 0x00000016>;
-//def IMAGE_ATOMIC_UMAX : MIMG_NoPattern_ <"image_atomic_umax", 0x00000017>;
-//def IMAGE_ATOMIC_AND : MIMG_NoPattern_ <"image_atomic_and", 0x00000018>;
-//def IMAGE_ATOMIC_OR : MIMG_NoPattern_ <"image_atomic_or", 0x00000019>;
-//def IMAGE_ATOMIC_XOR : MIMG_NoPattern_ <"image_atomic_xor", 0x0000001a>;
-//def IMAGE_ATOMIC_INC : MIMG_NoPattern_ <"image_atomic_inc", 0x0000001b>;
-//def IMAGE_ATOMIC_DEC : MIMG_NoPattern_ <"image_atomic_dec", 0x0000001c>;
-//def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"image_atomic_fcmpswap", 0x0000001d>;
-//def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"image_atomic_fmin", 0x0000001e>;
-//def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"image_atomic_fmax", 0x0000001f>;
-defm IMAGE_SAMPLE           : MIMG_Sampler_WQM <0x00000020, "image_sample">;
-defm IMAGE_SAMPLE_CL        : MIMG_Sampler_WQM <0x00000021, "image_sample_cl">;
-defm IMAGE_SAMPLE_D         : MIMG_Sampler <0x00000022, "image_sample_d">;
-defm IMAGE_SAMPLE_D_CL      : MIMG_Sampler <0x00000023, "image_sample_d_cl">;
-defm IMAGE_SAMPLE_L         : MIMG_Sampler <0x00000024, "image_sample_l">;
-defm IMAGE_SAMPLE_B         : MIMG_Sampler_WQM <0x00000025, "image_sample_b">;
-defm IMAGE_SAMPLE_B_CL      : MIMG_Sampler_WQM <0x00000026, "image_sample_b_cl">;
-defm IMAGE_SAMPLE_LZ        : MIMG_Sampler <0x00000027, "image_sample_lz">;
-defm IMAGE_SAMPLE_C         : MIMG_Sampler_WQM <0x00000028, "image_sample_c">;
-defm IMAGE_SAMPLE_C_CL      : MIMG_Sampler_WQM <0x00000029, "image_sample_c_cl">;
-defm IMAGE_SAMPLE_C_D       : MIMG_Sampler <0x0000002a, "image_sample_c_d">;
-defm IMAGE_SAMPLE_C_D_CL    : MIMG_Sampler <0x0000002b, "image_sample_c_d_cl">;
-defm IMAGE_SAMPLE_C_L       : MIMG_Sampler <0x0000002c, "image_sample_c_l">;
-defm IMAGE_SAMPLE_C_B       : MIMG_Sampler_WQM <0x0000002d, "image_sample_c_b">;
-defm IMAGE_SAMPLE_C_B_CL    : MIMG_Sampler_WQM <0x0000002e, "image_sample_c_b_cl">;
-defm IMAGE_SAMPLE_C_LZ      : MIMG_Sampler <0x0000002f, "image_sample_c_lz">;
-defm IMAGE_SAMPLE_O         : MIMG_Sampler_WQM <0x00000030, "image_sample_o">;
-defm IMAGE_SAMPLE_CL_O      : MIMG_Sampler_WQM <0x00000031, "image_sample_cl_o">;
-defm IMAGE_SAMPLE_D_O       : MIMG_Sampler <0x00000032, "image_sample_d_o">;
-defm IMAGE_SAMPLE_D_CL_O    : MIMG_Sampler <0x00000033, "image_sample_d_cl_o">;
-defm IMAGE_SAMPLE_L_O       : MIMG_Sampler <0x00000034, "image_sample_l_o">;
-defm IMAGE_SAMPLE_B_O       : MIMG_Sampler_WQM <0x00000035, "image_sample_b_o">;
-defm IMAGE_SAMPLE_B_CL_O    : MIMG_Sampler_WQM <0x00000036, "image_sample_b_cl_o">;
-defm IMAGE_SAMPLE_LZ_O      : MIMG_Sampler <0x00000037, "image_sample_lz_o">;
-defm IMAGE_SAMPLE_C_O       : MIMG_Sampler_WQM <0x00000038, "image_sample_c_o">;
-defm IMAGE_SAMPLE_C_CL_O    : MIMG_Sampler_WQM <0x00000039, "image_sample_c_cl_o">;
-defm IMAGE_SAMPLE_C_D_O     : MIMG_Sampler <0x0000003a, "image_sample_c_d_o">;
-defm IMAGE_SAMPLE_C_D_CL_O  : MIMG_Sampler <0x0000003b, "image_sample_c_d_cl_o">;
-defm IMAGE_SAMPLE_C_L_O     : MIMG_Sampler <0x0000003c, "image_sample_c_l_o">;
-defm IMAGE_SAMPLE_C_B_O     : MIMG_Sampler_WQM <0x0000003d, "image_sample_c_b_o">;
-defm IMAGE_SAMPLE_C_B_CL_O  : MIMG_Sampler_WQM <0x0000003e, "image_sample_c_b_cl_o">;
-defm IMAGE_SAMPLE_C_LZ_O    : MIMG_Sampler <0x0000003f, "image_sample_c_lz_o">;
-defm IMAGE_GATHER4          : MIMG_Gather_WQM <0x00000040, "image_gather4">;
-defm IMAGE_GATHER4_CL       : MIMG_Gather_WQM <0x00000041, "image_gather4_cl">;
-defm IMAGE_GATHER4_L        : MIMG_Gather <0x00000044, "image_gather4_l">;
-defm IMAGE_GATHER4_B        : MIMG_Gather_WQM <0x00000045, "image_gather4_b">;
-defm IMAGE_GATHER4_B_CL     : MIMG_Gather_WQM <0x00000046, "image_gather4_b_cl">;
-defm IMAGE_GATHER4_LZ       : MIMG_Gather <0x00000047, "image_gather4_lz">;
-defm IMAGE_GATHER4_C        : MIMG_Gather_WQM <0x00000048, "image_gather4_c">;
-defm IMAGE_GATHER4_C_CL     : MIMG_Gather_WQM <0x00000049, "image_gather4_c_cl">;
-defm IMAGE_GATHER4_C_L      : MIMG_Gather <0x0000004c, "image_gather4_c_l">;
-defm IMAGE_GATHER4_C_B      : MIMG_Gather_WQM <0x0000004d, "image_gather4_c_b">;
-defm IMAGE_GATHER4_C_B_CL   : MIMG_Gather_WQM <0x0000004e, "image_gather4_c_b_cl">;
-defm IMAGE_GATHER4_C_LZ     : MIMG_Gather <0x0000004f, "image_gather4_c_lz">;
-defm IMAGE_GATHER4_O        : MIMG_Gather_WQM <0x00000050, "image_gather4_o">;
-defm IMAGE_GATHER4_CL_O     : MIMG_Gather_WQM <0x00000051, "image_gather4_cl_o">;
-defm IMAGE_GATHER4_L_O      : MIMG_Gather <0x00000054, "image_gather4_l_o">;
-defm IMAGE_GATHER4_B_O      : MIMG_Gather_WQM <0x00000055, "image_gather4_b_o">;
-defm IMAGE_GATHER4_B_CL_O   : MIMG_Gather <0x00000056, "image_gather4_b_cl_o">;
-defm IMAGE_GATHER4_LZ_O     : MIMG_Gather <0x00000057, "image_gather4_lz_o">;
-defm IMAGE_GATHER4_C_O      : MIMG_Gather_WQM <0x00000058, "image_gather4_c_o">;
-defm IMAGE_GATHER4_C_CL_O   : MIMG_Gather_WQM <0x00000059, "image_gather4_c_cl_o">;
-defm IMAGE_GATHER4_C_L_O    : MIMG_Gather <0x0000005c, "image_gather4_c_l_o">;
-defm IMAGE_GATHER4_C_B_O    : MIMG_Gather_WQM <0x0000005d, "image_gather4_c_b_o">;
-defm IMAGE_GATHER4_C_B_CL_O : MIMG_Gather_WQM <0x0000005e, "image_gather4_c_b_cl_o">;
-defm IMAGE_GATHER4_C_LZ_O   : MIMG_Gather <0x0000005f, "image_gather4_c_lz_o">;
-defm IMAGE_GET_LOD          : MIMG_Sampler_WQM <0x00000060, "image_get_lod">;
-defm IMAGE_SAMPLE_CD        : MIMG_Sampler <0x00000068, "image_sample_cd">;
-defm IMAGE_SAMPLE_CD_CL     : MIMG_Sampler <0x00000069, "image_sample_cd_cl">;
-defm IMAGE_SAMPLE_C_CD      : MIMG_Sampler <0x0000006a, "image_sample_c_cd">;
-defm IMAGE_SAMPLE_C_CD_CL   : MIMG_Sampler <0x0000006b, "image_sample_c_cd_cl">;
-defm IMAGE_SAMPLE_CD_O      : MIMG_Sampler <0x0000006c, "image_sample_cd_o">;
-defm IMAGE_SAMPLE_CD_CL_O   : MIMG_Sampler <0x0000006d, "image_sample_cd_cl_o">;
-defm IMAGE_SAMPLE_C_CD_O    : MIMG_Sampler <0x0000006e, "image_sample_c_cd_o">;
-defm IMAGE_SAMPLE_C_CD_CL_O : MIMG_Sampler <0x0000006f, "image_sample_c_cd_cl_o">;
-//def IMAGE_RSRC256 : MIMG_NoPattern_RSRC256 <"image_rsrc256", 0x0000007e>;
-//def IMAGE_SAMPLER : MIMG_NoPattern_ <"image_sampler", 0x0000007f>;
-
-//===----------------------------------------------------------------------===//
-// VOP1 Instructions
-//===----------------------------------------------------------------------===//
-
-let vdst = 0, src0 = 0 in {
-defm V_NOP : VOP1_m <vop1<0x0>, (outs), (ins), "v_nop", [], "v_nop">;
-}
-
-let isMoveImm = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in {
-defm V_MOV_B32 : VOP1Inst <vop1<0x1>, "v_mov_b32", VOP_I32_I32>;
-} // End isMoveImm = 1
-
-let Uses = [EXEC] in {
-
-// FIXME: Specify SchedRW for READFIRSTLANE_B32
-
-def V_READFIRSTLANE_B32 : VOP1 <
-  0x00000002,
-  (outs SReg_32:$vdst),
-  (ins VGPR_32:$src0),
-  "v_readfirstlane_b32 $vdst, $src0",
-  []
->;
-
-}
-
-let SchedRW = [WriteQuarterRate32] in {
-
-defm V_CVT_I32_F64 : VOP1Inst <vop1<0x3>, "v_cvt_i32_f64",
-  VOP_I32_F64, fp_to_sint
->;
-defm V_CVT_F64_I32 : VOP1Inst <vop1<0x4>, "v_cvt_f64_i32",
-  VOP_F64_I32, sint_to_fp
->;
-defm V_CVT_F32_I32 : VOP1Inst <vop1<0x5>, "v_cvt_f32_i32",
-  VOP_F32_I32, sint_to_fp
->;
-defm V_CVT_F32_U32 : VOP1Inst <vop1<0x6>, "v_cvt_f32_u32",
-  VOP_F32_I32, uint_to_fp
->;
-defm V_CVT_U32_F32 : VOP1Inst <vop1<0x7>, "v_cvt_u32_f32",
-  VOP_I32_F32, fp_to_uint
->;
-defm V_CVT_I32_F32 : VOP1Inst <vop1<0x8>, "v_cvt_i32_f32",
-  VOP_I32_F32, fp_to_sint
->;
-defm V_CVT_F16_F32 : VOP1Inst <vop1<0xa>, "v_cvt_f16_f32",
-  VOP_I32_F32, fp_to_f16
->;
-defm V_CVT_F32_F16 : VOP1Inst <vop1<0xb>, "v_cvt_f32_f16",
-  VOP_F32_I32, f16_to_fp
->;
-defm V_CVT_RPI_I32_F32 : VOP1Inst <vop1<0xc>, "v_cvt_rpi_i32_f32",
-  VOP_I32_F32, cvt_rpi_i32_f32>;
-defm V_CVT_FLR_I32_F32 : VOP1Inst <vop1<0xd>, "v_cvt_flr_i32_f32",
-  VOP_I32_F32, cvt_flr_i32_f32>;
-defm V_CVT_OFF_F32_I4 : VOP1Inst  <vop1<0x0e>, "v_cvt_off_f32_i4", VOP_F32_I32>;
-defm V_CVT_F32_F64 : VOP1Inst <vop1<0xf>, "v_cvt_f32_f64",
-  VOP_F32_F64, fround
->;
-defm V_CVT_F64_F32 : VOP1Inst <vop1<0x10>, "v_cvt_f64_f32",
-  VOP_F64_F32, fextend
->;
-defm V_CVT_F32_UBYTE0 : VOP1Inst <vop1<0x11>, "v_cvt_f32_ubyte0",
-  VOP_F32_I32, AMDGPUcvt_f32_ubyte0
->;
-defm V_CVT_F32_UBYTE1 : VOP1Inst <vop1<0x12>, "v_cvt_f32_ubyte1",
-  VOP_F32_I32, AMDGPUcvt_f32_ubyte1
->;
-defm V_CVT_F32_UBYTE2 : VOP1Inst <vop1<0x13>, "v_cvt_f32_ubyte2",
-  VOP_F32_I32, AMDGPUcvt_f32_ubyte2
->;
-defm V_CVT_F32_UBYTE3 : VOP1Inst <vop1<0x14>, "v_cvt_f32_ubyte3",
-  VOP_F32_I32, AMDGPUcvt_f32_ubyte3
->;
-defm V_CVT_U32_F64 : VOP1Inst <vop1<0x15>, "v_cvt_u32_f64",
-  VOP_I32_F64, fp_to_uint
->;
-defm V_CVT_F64_U32 : VOP1Inst <vop1<0x16>, "v_cvt_f64_u32",
-  VOP_F64_I32, uint_to_fp
->;
-
-} // let SchedRW = [WriteQuarterRate32]
-
-defm V_FRACT_F32 : VOP1Inst <vop1<0x20, 0x1b>, "v_fract_f32",
-  VOP_F32_F32, AMDGPUfract
->;
-defm V_TRUNC_F32 : VOP1Inst <vop1<0x21, 0x1c>, "v_trunc_f32",
-  VOP_F32_F32, ftrunc
->;
-defm V_CEIL_F32 : VOP1Inst <vop1<0x22, 0x1d>, "v_ceil_f32",
-  VOP_F32_F32, fceil
->;
-defm V_RNDNE_F32 : VOP1Inst <vop1<0x23, 0x1e>, "v_rndne_f32",
-  VOP_F32_F32, frint
->;
-defm V_FLOOR_F32 : VOP1Inst <vop1<0x24, 0x1f>, "v_floor_f32",
-  VOP_F32_F32, ffloor
->;
-defm V_EXP_F32 : VOP1Inst <vop1<0x25, 0x20>, "v_exp_f32",
-  VOP_F32_F32, fexp2
->;
-
-let SchedRW = [WriteQuarterRate32] in {
-
-defm V_LOG_F32 : VOP1Inst <vop1<0x27, 0x21>, "v_log_f32",
-  VOP_F32_F32, flog2
->;
-defm V_RCP_F32 : VOP1Inst <vop1<0x2a, 0x22>, "v_rcp_f32",
-  VOP_F32_F32, AMDGPUrcp
->;
-defm V_RCP_IFLAG_F32 : VOP1Inst <vop1<0x2b, 0x23>, "v_rcp_iflag_f32",
-  VOP_F32_F32
->;
-defm V_RSQ_F32 : VOP1Inst <vop1<0x2e, 0x24>, "v_rsq_f32",
-  VOP_F32_F32, AMDGPUrsq
->;
-
-} //let SchedRW = [WriteQuarterRate32]
-
-let SchedRW = [WriteDouble] in {
-
-defm V_RCP_F64 : VOP1Inst <vop1<0x2f, 0x25>, "v_rcp_f64",
-  VOP_F64_F64, AMDGPUrcp
->;
-defm V_RSQ_F64 : VOP1Inst <vop1<0x31, 0x26>, "v_rsq_f64",
-  VOP_F64_F64, AMDGPUrsq
->;
-
-} // let SchedRW = [WriteDouble];
-
-defm V_SQRT_F32 : VOP1Inst <vop1<0x33, 0x27>, "v_sqrt_f32",
-  VOP_F32_F32, fsqrt
->;
-
-let SchedRW = [WriteDouble] in {
-
-defm V_SQRT_F64 : VOP1Inst <vop1<0x34, 0x28>, "v_sqrt_f64",
-  VOP_F64_F64, fsqrt
->;
-
-} // let SchedRW = [WriteDouble]
-
-defm V_SIN_F32 : VOP1Inst <vop1<0x35, 0x29>, "v_sin_f32",
-  VOP_F32_F32, AMDGPUsin
->;
-defm V_COS_F32 : VOP1Inst <vop1<0x36, 0x2a>, "v_cos_f32",
-  VOP_F32_F32, AMDGPUcos
->;
-defm V_NOT_B32 : VOP1Inst <vop1<0x37, 0x2b>, "v_not_b32", VOP_I32_I32>;
-defm V_BFREV_B32 : VOP1Inst <vop1<0x38, 0x2c>, "v_bfrev_b32", VOP_I32_I32>;
-defm V_FFBH_U32 : VOP1Inst <vop1<0x39, 0x2d>, "v_ffbh_u32", VOP_I32_I32>;
-defm V_FFBL_B32 : VOP1Inst <vop1<0x3a, 0x2e>, "v_ffbl_b32", VOP_I32_I32>;
-defm V_FFBH_I32 : VOP1Inst <vop1<0x3b, 0x2f>, "v_ffbh_i32", VOP_I32_I32>;
-defm V_FREXP_EXP_I32_F64 : VOP1Inst <vop1<0x3c,0x30>, "v_frexp_exp_i32_f64",
-  VOP_I32_F64
->;
-defm V_FREXP_MANT_F64 : VOP1Inst <vop1<0x3d, 0x31>, "v_frexp_mant_f64",
-  VOP_F64_F64
->;
-defm V_FRACT_F64 : VOP1Inst <vop1<0x3e, 0x32>, "v_fract_f64", VOP_F64_F64>;
-defm V_FREXP_EXP_I32_F32 : VOP1Inst <vop1<0x3f, 0x33>, "v_frexp_exp_i32_f32",
-  VOP_I32_F32
->;
-defm V_FREXP_MANT_F32 : VOP1Inst <vop1<0x40, 0x34>, "v_frexp_mant_f32",
-  VOP_F32_F32
->;
-let vdst = 0, src0 = 0 in {
-defm V_CLREXCP : VOP1_m <vop1<0x41,0x35>, (outs), (ins), "v_clrexcp", [],
-  "v_clrexcp"
->;
-}
-defm V_MOVRELD_B32 : VOP1Inst <vop1<0x42, 0x36>, "v_movreld_b32", VOP_I32_I32>;
-defm V_MOVRELS_B32 : VOP1Inst <vop1<0x43, 0x37>, "v_movrels_b32", VOP_I32_I32>;
-defm V_MOVRELSD_B32 : VOP1Inst <vop1<0x44, 0x38>, "v_movrelsd_b32", VOP_I32_I32>;
-
-// These instruction only exist on SI and CI
-let SubtargetPredicate = isSICI in {
-
-let SchedRW = [WriteQuarterRate32] in {
-
-defm V_MOV_FED_B32 : VOP1InstSI <vop1<0x9>, "v_mov_fed_b32", VOP_I32_I32>;
-defm V_LOG_CLAMP_F32 : VOP1InstSI <vop1<0x26>, "v_log_clamp_f32", VOP_F32_F32>;
-defm V_RCP_CLAMP_F32 : VOP1InstSI <vop1<0x28>, "v_rcp_clamp_f32", VOP_F32_F32>;
-defm V_RCP_LEGACY_F32 : VOP1InstSI <vop1<0x29>, "v_rcp_legacy_f32", VOP_F32_F32>;
-defm V_RSQ_CLAMP_F32 : VOP1InstSI <vop1<0x2c>, "v_rsq_clamp_f32",
-  VOP_F32_F32, AMDGPUrsq_clamped
->;
-defm V_RSQ_LEGACY_F32 : VOP1InstSI <vop1<0x2d>, "v_rsq_legacy_f32",
-  VOP_F32_F32, AMDGPUrsq_legacy
->;
-
-} // End let SchedRW = [WriteQuarterRate32]
-
-let SchedRW = [WriteDouble] in {
-
-defm V_RCP_CLAMP_F64 : VOP1InstSI <vop1<0x30>, "v_rcp_clamp_f64", VOP_F64_F64>;
-defm V_RSQ_CLAMP_F64 : VOP1InstSI <vop1<0x32>, "v_rsq_clamp_f64",
-  VOP_F64_F64, AMDGPUrsq_clamped
->;
-
-} // End SchedRW = [WriteDouble]
-
-} // End SubtargetPredicate = isSICI
-
-//===----------------------------------------------------------------------===//
-// VINTRP Instructions
-//===----------------------------------------------------------------------===//
-
-let Uses = [M0] in {
-
-// FIXME: Specify SchedRW for VINTRP insturctions.
-
-multiclass V_INTERP_P1_F32_m : VINTRP_m <
-  0x00000000,
-  (outs VGPR_32:$dst),
-  (ins VGPR_32:$i, i32imm:$attr_chan, i32imm:$attr),
-  "v_interp_p1_f32 $dst, $i, $attr_chan, $attr, [m0]",
-  [(set f32:$dst, (AMDGPUinterp_p1 i32:$i, (i32 imm:$attr_chan),
-                                           (i32 imm:$attr)))]
->;
-
-let OtherPredicates = [has32BankLDS] in {
-
-defm V_INTERP_P1_F32 : V_INTERP_P1_F32_m;
-
-} // End OtherPredicates = [has32BankLDS]
-
-let OtherPredicates = [has16BankLDS], Constraints = "@earlyclobber $dst" in {
-
-defm V_INTERP_P1_F32_16bank : V_INTERP_P1_F32_m;
-
-} // End OtherPredicates = [has32BankLDS], Constraints = "@earlyclobber $dst"
-
-let DisableEncoding = "$src0", Constraints = "$src0 = $dst" in {
-
-defm V_INTERP_P2_F32 : VINTRP_m <
-  0x00000001,
-  (outs VGPR_32:$dst),
-  (ins VGPR_32:$src0, VGPR_32:$j, i32imm:$attr_chan, i32imm:$attr),
-  "v_interp_p2_f32 $dst, [$src0], $j, $attr_chan, $attr, [m0]",
-  [(set f32:$dst, (AMDGPUinterp_p2 f32:$src0, i32:$j, (i32 imm:$attr_chan),
-                                                     (i32 imm:$attr)))]>;
-
-} // End DisableEncoding = "$src0", Constraints = "$src0 = $dst"
-
-defm V_INTERP_MOV_F32 : VINTRP_m <
-  0x00000002,
-  (outs VGPR_32:$dst),
-  (ins InterpSlot:$src0, i32imm:$attr_chan, i32imm:$attr),
-  "v_interp_mov_f32 $dst, $src0, $attr_chan, $attr, [m0]",
-  [(set f32:$dst, (AMDGPUinterp_mov (i32 imm:$src0), (i32 imm:$attr_chan),
-                                    (i32 imm:$attr)))]>;
-
-} // End Uses = [M0]
-
-//===----------------------------------------------------------------------===//
-// VOP2 Instructions
-//===----------------------------------------------------------------------===//
-
-multiclass V_CNDMASK <vop2 op, string name> {
-  defm _e32 : VOP2_m <
-      op, VOP_CNDMASK.Outs, VOP_CNDMASK.Ins32, VOP_CNDMASK.Asm32, [],
-      name, name>;
-
-  defm _e64  : VOP3_m <
-      op, VOP_CNDMASK.Outs, VOP_CNDMASK.Ins64,
-      name#!cast<string>(VOP_CNDMASK.Asm64), [], name, 3>;
-}
-
-defm V_CNDMASK_B32 : V_CNDMASK<vop2<0x0>, "v_cndmask_b32">;
-
-let isCommutable = 1 in {
-defm V_ADD_F32 : VOP2Inst <vop2<0x3, 0x1>, "v_add_f32",
-  VOP_F32_F32_F32, fadd
->;
-
-defm V_SUB_F32 : VOP2Inst <vop2<0x4, 0x2>, "v_sub_f32", VOP_F32_F32_F32, fsub>;
-defm V_SUBREV_F32 : VOP2Inst <vop2<0x5, 0x3>, "v_subrev_f32",
-  VOP_F32_F32_F32, null_frag, "v_sub_f32"
->;
-} // End isCommutable = 1
-
-let isCommutable = 1 in {
-
-defm V_MUL_LEGACY_F32 : VOP2Inst <vop2<0x7, 0x4>, "v_mul_legacy_f32",
-  VOP_F32_F32_F32, int_AMDGPU_mul
->;
-
-defm V_MUL_F32 : VOP2Inst <vop2<0x8, 0x5>, "v_mul_f32",
-  VOP_F32_F32_F32, fmul
->;
-
-defm V_MUL_I32_I24 : VOP2Inst <vop2<0x9, 0x6>, "v_mul_i32_i24",
-  VOP_I32_I32_I32, AMDGPUmul_i24
->;
-
-defm V_MUL_HI_I32_I24 : VOP2Inst <vop2<0xa,0x7>, "v_mul_hi_i32_i24",
-  VOP_I32_I32_I32
->;
-
-defm V_MUL_U32_U24 : VOP2Inst <vop2<0xb, 0x8>, "v_mul_u32_u24",
-  VOP_I32_I32_I32, AMDGPUmul_u24
->;
-
-defm V_MUL_HI_U32_U24 : VOP2Inst <vop2<0xc,0x9>, "v_mul_hi_u32_u24",
- VOP_I32_I32_I32
->;
-
-defm V_MIN_F32 : VOP2Inst <vop2<0xf, 0xa>, "v_min_f32", VOP_F32_F32_F32,
-  fminnum>;
-defm V_MAX_F32 : VOP2Inst <vop2<0x10, 0xb>, "v_max_f32", VOP_F32_F32_F32,
-  fmaxnum>;
-defm V_MIN_I32 : VOP2Inst <vop2<0x11, 0xc>, "v_min_i32", VOP_I32_I32_I32>;
-defm V_MAX_I32 : VOP2Inst <vop2<0x12, 0xd>, "v_max_i32", VOP_I32_I32_I32>;
-defm V_MIN_U32 : VOP2Inst <vop2<0x13, 0xe>, "v_min_u32", VOP_I32_I32_I32>;
-defm V_MAX_U32 : VOP2Inst <vop2<0x14, 0xf>, "v_max_u32", VOP_I32_I32_I32>;
-
-defm V_LSHRREV_B32 : VOP2Inst <
-  vop2<0x16, 0x10>, "v_lshrrev_b32", VOP_I32_I32_I32, null_frag,
-    "v_lshr_b32"
->;
-
-defm V_ASHRREV_I32 : VOP2Inst <
-  vop2<0x18, 0x11>, "v_ashrrev_i32", VOP_I32_I32_I32, null_frag,
-    "v_ashr_i32"
->;
-
-defm V_LSHLREV_B32 : VOP2Inst <
-  vop2<0x1a, 0x12>, "v_lshlrev_b32", VOP_I32_I32_I32, null_frag,
-    "v_lshl_b32"
->;
-
-defm V_AND_B32 : VOP2Inst <vop2<0x1b, 0x13>, "v_and_b32", VOP_I32_I32_I32>;
-defm V_OR_B32 : VOP2Inst <vop2<0x1c, 0x14>, "v_or_b32", VOP_I32_I32_I32>;
-defm V_XOR_B32 : VOP2Inst <vop2<0x1d, 0x15>, "v_xor_b32", VOP_I32_I32_I32>;
-
-defm V_MAC_F32 : VOP2Inst <vop2<0x1f, 0x16>, "v_mac_f32", VOP_F32_F32_F32>;
-} // End isCommutable = 1
-
-defm V_MADMK_F32 : VOP2MADK <vop2<0x20, 0x17>, "v_madmk_f32">;
-
-let isCommutable = 1 in {
-defm V_MADAK_F32 : VOP2MADK <vop2<0x21, 0x18>, "v_madak_f32">;
-} // End isCommutable = 1
-
-let isCommutable = 1, Defs = [VCC] in { // Carry-out goes to VCC
-// No patterns so that the scalar instructions are always selected.
-// The scalar versions will be replaced with vector when needed later.
-
-// V_ADD_I32, V_SUB_I32, and V_SUBREV_I32 where renamed to *_U32 in VI,
-// but the VI instructions behave the same as the SI versions.
-defm V_ADD_I32 : VOP2bInst <vop2<0x25, 0x19>, "v_add_i32",
-  VOP_I32_I32_I32, add
->;
-defm V_SUB_I32 : VOP2bInst <vop2<0x26, 0x1a>, "v_sub_i32", VOP_I32_I32_I32>;
-
-defm V_SUBREV_I32 : VOP2bInst <vop2<0x27, 0x1b>, "v_subrev_i32",
-  VOP_I32_I32_I32, null_frag, "v_sub_i32"
->;
-
-let Uses = [VCC] in { // Carry-in comes from VCC
-defm V_ADDC_U32 : VOP2bInst <vop2<0x28, 0x1c>, "v_addc_u32",
-  VOP_I32_I32_I32_VCC
->;
-defm V_SUBB_U32 : VOP2bInst <vop2<0x29, 0x1d>, "v_subb_u32",
-  VOP_I32_I32_I32_VCC
->;
-defm V_SUBBREV_U32 : VOP2bInst <vop2<0x2a, 0x1e>, "v_subbrev_u32",
-  VOP_I32_I32_I32_VCC, null_frag, "v_subb_u32"
->;
-
-} // End Uses = [VCC]
-} // End isCommutable = 1, Defs = [VCC]
-
-defm V_READLANE_B32 : VOP2SI_3VI_m <
-  vop3 <0x001, 0x289>,
-  "v_readlane_b32",
-  (outs SReg_32:$vdst),
-  (ins VGPR_32:$src0, SCSrc_32:$src1),
-  "v_readlane_b32 $vdst, $src0, $src1"
->;
-
-defm V_WRITELANE_B32 : VOP2SI_3VI_m <
-  vop3 <0x002, 0x28a>,
-  "v_writelane_b32",
-  (outs VGPR_32:$vdst),
-  (ins SReg_32:$src0, SCSrc_32:$src1),
-  "v_writelane_b32 $vdst, $src0, $src1"
->;
-
-// These instructions only exist on SI and CI
-let SubtargetPredicate = isSICI in {
-
-defm V_MIN_LEGACY_F32 : VOP2InstSI <vop2<0xd>, "v_min_legacy_f32",
-  VOP_F32_F32_F32, AMDGPUfmin_legacy
->;
-defm V_MAX_LEGACY_F32 : VOP2InstSI <vop2<0xe>, "v_max_legacy_f32",
-  VOP_F32_F32_F32, AMDGPUfmax_legacy
->;
-
-let isCommutable = 1 in {
-defm V_LSHR_B32 : VOP2InstSI <vop2<0x15>, "v_lshr_b32", VOP_I32_I32_I32>;
-defm V_ASHR_I32 : VOP2InstSI <vop2<0x17>, "v_ashr_i32", VOP_I32_I32_I32>;
-defm V_LSHL_B32 : VOP2InstSI <vop2<0x19>, "v_lshl_b32", VOP_I32_I32_I32>;
-} // End isCommutable = 1
-} // End let SubtargetPredicate = SICI
-
-let isCommutable = 1 in {
-defm V_MAC_LEGACY_F32 : VOP2_VI3_Inst <vop23<0x6, 0x28e>, "v_mac_legacy_f32",
-  VOP_F32_F32_F32
->;
-} // End isCommutable = 1
-
-defm V_BFM_B32 : VOP2_VI3_Inst <vop23<0x1e, 0x293>, "v_bfm_b32",
-  VOP_I32_I32_I32
->;
-defm V_BCNT_U32_B32 : VOP2_VI3_Inst <vop23<0x22, 0x28b>, "v_bcnt_u32_b32",
-  VOP_I32_I32_I32
->;
-defm V_MBCNT_LO_U32_B32 : VOP2_VI3_Inst <vop23<0x23, 0x28c>, "v_mbcnt_lo_u32_b32",
-  VOP_I32_I32_I32
->;
-defm V_MBCNT_HI_U32_B32 : VOP2_VI3_Inst <vop23<0x24, 0x28d>, "v_mbcnt_hi_u32_b32",
-  VOP_I32_I32_I32
->;
-defm V_LDEXP_F32 : VOP2_VI3_Inst <vop23<0x2b, 0x288>, "v_ldexp_f32",
-  VOP_F32_F32_I32, AMDGPUldexp
->;
-
-defm V_CVT_PKACCUM_U8_F32 : VOP2_VI3_Inst <vop23<0x2c, 0x1f0>, "v_cvt_pkaccum_u8_f32",
-  VOP_I32_F32_I32>; // TODO: set "Uses = dst"
-
-defm V_CVT_PKNORM_I16_F32 : VOP2_VI3_Inst <vop23<0x2d, 0x294>, "v_cvt_pknorm_i16_f32",
-  VOP_I32_F32_F32
->;
-defm V_CVT_PKNORM_U16_F32 : VOP2_VI3_Inst <vop23<0x2e, 0x295>, "v_cvt_pknorm_u16_f32",
-  VOP_I32_F32_F32
->;
-defm V_CVT_PKRTZ_F16_F32 : VOP2_VI3_Inst <vop23<0x2f, 0x296>, "v_cvt_pkrtz_f16_f32",
-  VOP_I32_F32_F32, int_SI_packf16
->;
-defm V_CVT_PK_U16_U32 : VOP2_VI3_Inst <vop23<0x30, 0x297>, "v_cvt_pk_u16_u32",
-  VOP_I32_I32_I32
->;
-defm V_CVT_PK_I16_I32 : VOP2_VI3_Inst <vop23<0x31, 0x298>, "v_cvt_pk_i16_i32",
-  VOP_I32_I32_I32
->;
-
-//===----------------------------------------------------------------------===//
-// VOP3 Instructions
-//===----------------------------------------------------------------------===//
-
-let isCommutable = 1 in {
-defm V_MAD_LEGACY_F32 : VOP3Inst <vop3<0x140, 0x1c0>, "v_mad_legacy_f32",
-  VOP_F32_F32_F32_F32
->;
-
-defm V_MAD_F32 : VOP3Inst <vop3<0x141, 0x1c1>, "v_mad_f32",
-  VOP_F32_F32_F32_F32, fmad
->;
-
-defm V_MAD_I32_I24 : VOP3Inst <vop3<0x142, 0x1c2>, "v_mad_i32_i24",
-  VOP_I32_I32_I32_I32, AMDGPUmad_i24
->;
-defm V_MAD_U32_U24 : VOP3Inst <vop3<0x143, 0x1c3>, "v_mad_u32_u24",
-  VOP_I32_I32_I32_I32, AMDGPUmad_u24
->;
-} // End isCommutable = 1
-
-defm V_CUBEID_F32 : VOP3Inst <vop3<0x144, 0x1c4>, "v_cubeid_f32",
-  VOP_F32_F32_F32_F32
->;
-defm V_CUBESC_F32 : VOP3Inst <vop3<0x145, 0x1c5>, "v_cubesc_f32",
-  VOP_F32_F32_F32_F32
->;
-defm V_CUBETC_F32 : VOP3Inst <vop3<0x146, 0x1c6>, "v_cubetc_f32",
-  VOP_F32_F32_F32_F32
->;
-defm V_CUBEMA_F32 : VOP3Inst <vop3<0x147, 0x1c7>, "v_cubema_f32",
-  VOP_F32_F32_F32_F32
->;
-
-defm V_BFE_U32 : VOP3Inst <vop3<0x148, 0x1c8>, "v_bfe_u32",
-  VOP_I32_I32_I32_I32, AMDGPUbfe_u32
->;
-defm V_BFE_I32 : VOP3Inst <vop3<0x149, 0x1c9>, "v_bfe_i32",
-  VOP_I32_I32_I32_I32, AMDGPUbfe_i32
->;
-
-defm V_BFI_B32 : VOP3Inst <vop3<0x14a, 0x1ca>, "v_bfi_b32",
-  VOP_I32_I32_I32_I32, AMDGPUbfi
->;
-
-let isCommutable = 1 in {
-defm V_FMA_F32 : VOP3Inst <vop3<0x14b, 0x1cb>, "v_fma_f32",
-  VOP_F32_F32_F32_F32, fma
->;
-defm V_FMA_F64 : VOP3Inst <vop3<0x14c, 0x1cc>, "v_fma_f64",
-  VOP_F64_F64_F64_F64, fma
->;
-} // End isCommutable = 1
-
-//def V_LERP_U8 : VOP3_U8 <0x0000014d, "v_lerp_u8", []>;
-defm V_ALIGNBIT_B32 : VOP3Inst <vop3<0x14e, 0x1ce>, "v_alignbit_b32",
-  VOP_I32_I32_I32_I32
->;
-defm V_ALIGNBYTE_B32 : VOP3Inst <vop3<0x14f, 0x1cf>, "v_alignbyte_b32",
-  VOP_I32_I32_I32_I32
->;
-
-defm V_MIN3_F32 : VOP3Inst <vop3<0x151, 0x1d0>, "v_min3_f32",
-  VOP_F32_F32_F32_F32, AMDGPUfmin3>;
-
-defm V_MIN3_I32 : VOP3Inst <vop3<0x152, 0x1d1>, "v_min3_i32",
-  VOP_I32_I32_I32_I32, AMDGPUsmin3
->;
-defm V_MIN3_U32 : VOP3Inst <vop3<0x153, 0x1d2>, "v_min3_u32",
-  VOP_I32_I32_I32_I32, AMDGPUumin3
->;
-defm V_MAX3_F32 : VOP3Inst <vop3<0x154, 0x1d3>, "v_max3_f32",
-  VOP_F32_F32_F32_F32, AMDGPUfmax3
->;
-defm V_MAX3_I32 : VOP3Inst <vop3<0x155, 0x1d4>, "v_max3_i32",
-  VOP_I32_I32_I32_I32, AMDGPUsmax3
->;
-defm V_MAX3_U32 : VOP3Inst <vop3<0x156, 0x1d5>, "v_max3_u32",
-  VOP_I32_I32_I32_I32, AMDGPUumax3
->;
-defm V_MED3_F32 : VOP3Inst <vop3<0x157, 0x1d6>, "v_med3_f32",
-  VOP_F32_F32_F32_F32
->;
-defm V_MED3_I32 : VOP3Inst <vop3<0x158, 0x1d7>, "v_med3_i32",
-  VOP_I32_I32_I32_I32
->;
-defm V_MED3_U32 : VOP3Inst <vop3<0x159, 0x1d8>, "v_med3_u32",
-  VOP_I32_I32_I32_I32
->;
-
-//def V_SAD_U8 : VOP3_U8 <0x0000015a, "v_sad_u8", []>;
-//def V_SAD_HI_U8 : VOP3_U8 <0x0000015b, "v_sad_hi_u8", []>;
-//def V_SAD_U16 : VOP3_U16 <0x0000015c, "v_sad_u16", []>;
-defm V_SAD_U32 : VOP3Inst <vop3<0x15d, 0x1dc>, "v_sad_u32",
-  VOP_I32_I32_I32_I32
->;
-////def V_CVT_PK_U8_F32 : VOP3_U8 <0x0000015e, "v_cvt_pk_u8_f32", []>;
-defm V_DIV_FIXUP_F32 : VOP3Inst <
-  vop3<0x15f, 0x1de>, "v_div_fixup_f32", VOP_F32_F32_F32_F32, AMDGPUdiv_fixup
->;
-
-let SchedRW = [WriteDouble] in {
-
-defm V_DIV_FIXUP_F64 : VOP3Inst <
-  vop3<0x160, 0x1df>, "v_div_fixup_f64", VOP_F64_F64_F64_F64, AMDGPUdiv_fixup
->;
-
-} // let SchedRW = [WriteDouble]
-
-let SchedRW = [WriteDouble] in {
-let isCommutable = 1 in {
-
-defm V_ADD_F64 : VOP3Inst <vop3<0x164, 0x280>, "v_add_f64",
-  VOP_F64_F64_F64, fadd
->;
-defm V_MUL_F64 : VOP3Inst <vop3<0x165, 0x281>, "v_mul_f64",
-  VOP_F64_F64_F64, fmul
->;
-
-defm V_MIN_F64 : VOP3Inst <vop3<0x166, 0x282>, "v_min_f64",
-  VOP_F64_F64_F64, fminnum
->;
-defm V_MAX_F64 : VOP3Inst <vop3<0x167, 0x283>, "v_max_f64",
-  VOP_F64_F64_F64, fmaxnum
->;
-
-} // isCommutable = 1
-
-defm V_LDEXP_F64 : VOP3Inst <vop3<0x168, 0x284>, "v_ldexp_f64",
-  VOP_F64_F64_I32, AMDGPUldexp
->;
-
-} // let SchedRW = [WriteDouble]
-
-let isCommutable = 1, SchedRW = [WriteQuarterRate32] in {
-
-defm V_MUL_LO_U32 : VOP3Inst <vop3<0x169, 0x285>, "v_mul_lo_u32",
-  VOP_I32_I32_I32
->;
-defm V_MUL_HI_U32 : VOP3Inst <vop3<0x16a, 0x286>, "v_mul_hi_u32",
-  VOP_I32_I32_I32
->;
-
-defm V_MUL_LO_I32 : VOP3Inst <vop3<0x16b, 0x285>, "v_mul_lo_i32",
-  VOP_I32_I32_I32
->;
-defm V_MUL_HI_I32 : VOP3Inst <vop3<0x16c, 0x287>, "v_mul_hi_i32",
-  VOP_I32_I32_I32
->;
-
-} // isCommutable = 1, SchedRW = [WriteQuarterRate32]
-
-let SchedRW = [WriteFloatFMA, WriteSALU] in {
-defm V_DIV_SCALE_F32 : VOP3b_32 <vop3<0x16d, 0x1e0>, "v_div_scale_f32", []>;
-}
-
-let SchedRW = [WriteDouble, WriteSALU] in {
-// Double precision division pre-scale.
-defm V_DIV_SCALE_F64 : VOP3b_64 <vop3<0x16e, 0x1e1>, "v_div_scale_f64", []>;
-} // let SchedRW = [WriteDouble]
-
-let isCommutable = 1, Uses = [VCC] in {
-
-// v_div_fmas_f32:
-//   result = src0 * src1 + src2
-//   if (vcc)
-//     result *= 2^32
-//
-defm V_DIV_FMAS_F32 : VOP3_VCC_Inst <vop3<0x16f, 0x1e2>, "v_div_fmas_f32",
-  VOP_F32_F32_F32_F32, AMDGPUdiv_fmas
->;
-
-let SchedRW = [WriteDouble] in {
-// v_div_fmas_f64:
-//   result = src0 * src1 + src2
-//   if (vcc)
-//     result *= 2^64
-//
-defm V_DIV_FMAS_F64 : VOP3_VCC_Inst <vop3<0x170, 0x1e3>, "v_div_fmas_f64",
-  VOP_F64_F64_F64_F64, AMDGPUdiv_fmas
->;
-
-} // End SchedRW = [WriteDouble]
-} // End isCommutable = 1
-
-//def V_MSAD_U8 : VOP3_U8 <0x00000171, "v_msad_u8", []>;
-//def V_QSAD_U8 : VOP3_U8 <0x00000172, "v_qsad_u8", []>;
-//def V_MQSAD_U8 : VOP3_U8 <0x00000173, "v_mqsad_u8", []>;
-
-let SchedRW = [WriteDouble] in {
-defm V_TRIG_PREOP_F64 : VOP3Inst <
-  vop3<0x174, 0x292>, "v_trig_preop_f64", VOP_F64_F64_I32, AMDGPUtrig_preop
->;
-
-} // let SchedRW = [WriteDouble]
-
-// These instructions only exist on SI and CI
-let SubtargetPredicate = isSICI in {
-
-defm V_LSHL_B64 : VOP3Inst <vop3<0x161>, "v_lshl_b64", VOP_I64_I64_I32>;
-defm V_LSHR_B64 : VOP3Inst <vop3<0x162>, "v_lshr_b64", VOP_I64_I64_I32>;
-defm V_ASHR_I64 : VOP3Inst <vop3<0x163>, "v_ashr_i64", VOP_I64_I64_I32>;
-
-defm V_MULLIT_F32 : VOP3Inst <vop3<0x150>, "v_mullit_f32",
-  VOP_F32_F32_F32_F32>;
-
-} // End SubtargetPredicate = isSICI
-
-let SubtargetPredicate = isVI in {
-
-defm V_LSHLREV_B64 : VOP3Inst <vop3<0, 0x28f>, "v_lshlrev_b64",
-  VOP_I64_I32_I64
->;
-defm V_LSHRREV_B64 : VOP3Inst <vop3<0, 0x290>, "v_lshrrev_b64",
-  VOP_I64_I32_I64
->;
-defm V_ASHRREV_I64 : VOP3Inst <vop3<0, 0x291>, "v_ashrrev_i64",
-  VOP_I64_I32_I64
->;
-
-} // End SubtargetPredicate = isVI
-
-//===----------------------------------------------------------------------===//
-// Pseudo Instructions
-//===----------------------------------------------------------------------===//
-let isCodeGenOnly = 1, isPseudo = 1 in {
-
-// For use in patterns
-def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$dst),
-  (ins VSrc_64:$src0, VSrc_64:$src1, SSrc_64:$src2), "", []
->;
-
-let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
-// 64-bit vector move instruction.  This is mainly used by the SIFoldOperands
-// pass to enable folding of inline immediates.
-def V_MOV_B64_PSEUDO : InstSI <(outs VReg_64:$dst), (ins VSrc_64:$src0), "", []>;
-} // end let hasSideEffects = 0, mayLoad = 0, mayStore = 0
-
-let hasSideEffects = 1 in {
-def SGPR_USE : InstSI <(outs),(ins), "", []>;
-}
-
-// SI pseudo instructions. These are used by the CFG structurizer pass
-// and should be lowered to ISA instructions prior to codegen.
-
-let mayLoad = 1, mayStore = 1, hasSideEffects = 1 in {
-let Uses = [EXEC], Defs = [EXEC] in {
-
-let isBranch = 1, isTerminator = 1 in {
-
-def SI_IF: InstSI <
-  (outs SReg_64:$dst),
-  (ins SReg_64:$vcc, brtarget:$target),
-  "",
-  [(set i64:$dst, (int_SI_if i1:$vcc, bb:$target))]
->;
-
-def SI_ELSE : InstSI <
-  (outs SReg_64:$dst),
-  (ins SReg_64:$src, brtarget:$target),
-  "",
-  [(set i64:$dst, (int_SI_else i64:$src, bb:$target))]
-> {
-  let Constraints = "$src = $dst";
-}
-
-def SI_LOOP : InstSI <
-  (outs),
-  (ins SReg_64:$saved, brtarget:$target),
-  "si_loop $saved, $target",
-  [(int_SI_loop i64:$saved, bb:$target)]
->;
-
-} // end isBranch = 1, isTerminator = 1
-
-def SI_BREAK : InstSI <
-  (outs SReg_64:$dst),
-  (ins SReg_64:$src),
-  "si_else $dst, $src",
-  [(set i64:$dst, (int_SI_break i64:$src))]
->;
-
-def SI_IF_BREAK : InstSI <
-  (outs SReg_64:$dst),
-  (ins SReg_64:$vcc, SReg_64:$src),
-  "si_if_break $dst, $vcc, $src",
-  [(set i64:$dst, (int_SI_if_break i1:$vcc, i64:$src))]
->;
-
-def SI_ELSE_BREAK : InstSI <
-  (outs SReg_64:$dst),
-  (ins SReg_64:$src0, SReg_64:$src1),
-  "si_else_break $dst, $src0, $src1",
-  [(set i64:$dst, (int_SI_else_break i64:$src0, i64:$src1))]
->;
-
-def SI_END_CF : InstSI <
-  (outs),
-  (ins SReg_64:$saved),
-  "si_end_cf $saved",
-  [(int_SI_end_cf i64:$saved)]
->;
-
-} // End Uses = [EXEC], Defs = [EXEC]
-
-let Uses = [EXEC], Defs = [EXEC,VCC] in {
-def SI_KILL : InstSI <
-  (outs),
-  (ins VSrc_32:$src),
-  "si_kill $src",
-  [(int_AMDGPU_kill f32:$src)]
->;
-} // End Uses = [EXEC], Defs = [EXEC,VCC]
-
-} // end mayLoad = 1, mayStore = 1, hasSideEffects = 1
-
-let Uses = [EXEC], Defs = [EXEC,VCC,M0] in {
-
-//defm SI_ : RegisterLoadStore <VGPR_32, FRAMEri, ADDRIndirect>;
-
-let UseNamedOperandTable = 1 in {
-
-def SI_RegisterLoad : InstSI <
-  (outs VGPR_32:$dst, SReg_64:$temp),
-  (ins FRAMEri32:$addr, i32imm:$chan),
-  "", []
-> {
-  let isRegisterLoad = 1;
-  let mayLoad = 1;
-}
-
-class SIRegStore<dag outs> : InstSI <
-  outs,
-  (ins VGPR_32:$val, FRAMEri32:$addr, i32imm:$chan),
-  "", []
-> {
-  let isRegisterStore = 1;
-  let mayStore = 1;
-}
-
-let usesCustomInserter = 1 in {
-def SI_RegisterStorePseudo : SIRegStore<(outs)>;
-} // End usesCustomInserter = 1
-def SI_RegisterStore : SIRegStore<(outs SReg_64:$temp)>;
-
-
-} // End UseNamedOperandTable = 1
-
-def SI_INDIRECT_SRC : InstSI <
-  (outs VGPR_32:$dst, SReg_64:$temp),
-  (ins unknown:$src, VSrc_32:$idx, i32imm:$off),
-  "si_indirect_src $dst, $temp, $src, $idx, $off",
-  []
->;
-
-class SI_INDIRECT_DST<RegisterClass rc> : InstSI <
-  (outs rc:$dst, SReg_64:$temp),
-  (ins unknown:$src, VSrc_32:$idx, i32imm:$off, VGPR_32:$val),
-  "si_indirect_dst $dst, $temp, $src, $idx, $off, $val",
-  []
-> {
-  let Constraints = "$src = $dst";
-}
-
-def SI_INDIRECT_DST_V1 : SI_INDIRECT_DST<VGPR_32>;
-def SI_INDIRECT_DST_V2 : SI_INDIRECT_DST<VReg_64>;
-def SI_INDIRECT_DST_V4 : SI_INDIRECT_DST<VReg_128>;
-def SI_INDIRECT_DST_V8 : SI_INDIRECT_DST<VReg_256>;
-def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>;
-
-} // Uses = [EXEC,VCC,M0], Defs = [EXEC,VCC,M0]
-
-multiclass SI_SPILL_SGPR <RegisterClass sgpr_class> {
-
-  let UseNamedOperandTable = 1 in {
-    def _SAVE : InstSI <
-      (outs),
-      (ins sgpr_class:$src, i32imm:$frame_idx, SReg_128:$scratch_rsrc,
-           SReg_32:$scratch_offset),
-      "", []
-    >;
-
-    def _RESTORE : InstSI <
-      (outs sgpr_class:$dst),
-      (ins i32imm:$frame_idx, SReg_128:$scratch_rsrc, SReg_32:$scratch_offset),
-      "", []
-    >;
-  } // End UseNamedOperandTable = 1
-}
-
-// It's unclear whether you can use M0 as the output of v_readlane_b32
-// instructions, so use SGPR_32 register class for spills to prevent
-// this from happening.
-defm SI_SPILL_S32  : SI_SPILL_SGPR <SGPR_32>;
-defm SI_SPILL_S64  : SI_SPILL_SGPR <SReg_64>;
-defm SI_SPILL_S128 : SI_SPILL_SGPR <SReg_128>;
-defm SI_SPILL_S256 : SI_SPILL_SGPR <SReg_256>;
-defm SI_SPILL_S512 : SI_SPILL_SGPR <SReg_512>;
-
-multiclass SI_SPILL_VGPR <RegisterClass vgpr_class> {
-  let UseNamedOperandTable = 1, VGPRSpill = 1 in {
-    def _SAVE : InstSI <
-      (outs),
-      (ins vgpr_class:$src, i32imm:$frame_idx, SReg_128:$scratch_rsrc,
-           SReg_32:$scratch_offset),
-      "", []
-    >;
-
-    def _RESTORE : InstSI <
-      (outs vgpr_class:$dst),
-      (ins i32imm:$frame_idx, SReg_128:$scratch_rsrc, SReg_32:$scratch_offset),
-      "", []
-    >;
-  } // End UseNamedOperandTable = 1, VGPRSpill = 1
-}
-
-defm SI_SPILL_V32  : SI_SPILL_VGPR <VGPR_32>;
-defm SI_SPILL_V64  : SI_SPILL_VGPR <VReg_64>;
-defm SI_SPILL_V96  : SI_SPILL_VGPR <VReg_96>;
-defm SI_SPILL_V128 : SI_SPILL_VGPR <VReg_128>;
-defm SI_SPILL_V256 : SI_SPILL_VGPR <VReg_256>;
-defm SI_SPILL_V512 : SI_SPILL_VGPR <VReg_512>;
-
-let Defs = [SCC] in {
-
-def SI_CONSTDATA_PTR : InstSI <
-  (outs SReg_64:$dst),
-  (ins),
-  "", [(set SReg_64:$dst, (i64 SIconstdata_ptr))]
->;
-
-} // End Defs = [SCC]
-
-} // end IsCodeGenOnly, isPseudo
-
-} // end SubtargetPredicate = isGCN
-
-let Predicates = [isGCN] in {
-
-def : Pat<
-  (int_AMDGPU_cndlt f32:$src0, f32:$src1, f32:$src2),
-  (V_CNDMASK_B32_e64 $src2, $src1,
-                     (V_CMP_GT_F32_e64 SRCMODS.NONE, 0, SRCMODS.NONE, $src0,
-                                       DSTCLAMP.NONE, DSTOMOD.NONE))
->;
-
-def : Pat <
-  (int_AMDGPU_kilp),
-  (SI_KILL 0xbf800000)
->;
-
-/* int_SI_vs_load_input */
-def : Pat<
-  (SIload_input v4i32:$tlst, imm:$attr_offset, i32:$buf_idx_vgpr),
-  (BUFFER_LOAD_FORMAT_XYZW_IDXEN $buf_idx_vgpr, $tlst, 0, imm:$attr_offset, 0, 0, 0)
->;
-
-/* int_SI_export */
-def : Pat <
-  (int_SI_export imm:$en, imm:$vm, imm:$done, imm:$tgt, imm:$compr,
-                 f32:$src0, f32:$src1, f32:$src2, f32:$src3),
-  (EXP imm:$en, imm:$tgt, imm:$compr, imm:$done, imm:$vm,
-       $src0, $src1, $src2, $src3)
->;
-
-//===----------------------------------------------------------------------===//
-// SMRD Patterns
-//===----------------------------------------------------------------------===//
-
-multiclass SMRD_Pattern <SMRD Instr_IMM, SMRD Instr_SGPR, ValueType vt> {
-
-  // 1. SI-CI: Offset as 8bit DWORD immediate
-  def : Pat <
-    (constant_load (add i64:$sbase, (i64 IMM8bitDWORD:$offset))),
-    (vt (Instr_IMM $sbase, (as_dword_i32imm $offset)))
-  >;
-
-  // 2. Offset loaded in an 32bit SGPR
-  def : Pat <
-    (constant_load (add i64:$sbase, (i64 IMM32bit:$offset))),
-    (vt (Instr_SGPR $sbase, (S_MOV_B32 (i32 (as_i32imm $offset)))))
-  >;
-
-  // 3. No offset at all
-  def : Pat <
-    (constant_load i64:$sbase),
-    (vt (Instr_IMM $sbase, 0))
-  >;
-}
-
-multiclass SMRD_Pattern_vi <SMRD Instr_IMM, SMRD Instr_SGPR, ValueType vt> {
-
-  // 1. VI: Offset as 20bit immediate in bytes
-  def : Pat <
-    (constant_load (add i64:$sbase, (i64 IMM20bit:$offset))),
-    (vt (Instr_IMM $sbase, (as_i32imm $offset)))
-  >;
-
-  // 2. Offset loaded in an 32bit SGPR
-  def : Pat <
-    (constant_load (add i64:$sbase, (i64 IMM32bit:$offset))),
-    (vt (Instr_SGPR $sbase, (S_MOV_B32 (i32 (as_i32imm $offset)))))
-  >;
-
-  // 3. No offset at all
-  def : Pat <
-    (constant_load i64:$sbase),
-    (vt (Instr_IMM $sbase, 0))
-  >;
-}
-
-let Predicates = [isSICI] in {
-defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, f32>;
-defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, i32>;
-defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, v2i32>;
-defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, v4i32>;
-defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v32i8>;
-defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v8i32>;
-defm : SMRD_Pattern <S_LOAD_DWORDX16_IMM, S_LOAD_DWORDX16_SGPR, v16i32>;
-} // End Predicates = [isSICI]
-
-let Predicates = [isVI] in {
-defm : SMRD_Pattern_vi <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, f32>;
-defm : SMRD_Pattern_vi <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, i32>;
-defm : SMRD_Pattern_vi <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, v2i32>;
-defm : SMRD_Pattern_vi <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, v4i32>;
-defm : SMRD_Pattern_vi <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v32i8>;
-defm : SMRD_Pattern_vi <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v8i32>;
-defm : SMRD_Pattern_vi <S_LOAD_DWORDX16_IMM, S_LOAD_DWORDX16_SGPR, v16i32>;
-} // End Predicates = [isVI]
-
-let Predicates = [isSICI] in {
-
-// 1. Offset as 8bit DWORD immediate
-def : Pat <
-  (SIload_constant v4i32:$sbase, IMM8bitDWORD:$offset),
-  (S_BUFFER_LOAD_DWORD_IMM $sbase, (as_dword_i32imm $offset))
->;
-
-} // End Predicates = [isSICI]
-
-// 2. Offset loaded in an 32bit SGPR
-def : Pat <
-  (SIload_constant v4i32:$sbase, imm:$offset),
-  (S_BUFFER_LOAD_DWORD_SGPR $sbase, (S_MOV_B32 imm:$offset))
->;
-
-//===----------------------------------------------------------------------===//
-// SOP1 Patterns
-//===----------------------------------------------------------------------===//
-
-def : Pat <
-  (i64 (ctpop i64:$src)),
-    (i64 (REG_SEQUENCE SReg_64,
-     (S_BCNT1_I32_B64 $src), sub0,
-     (S_MOV_B32 0), sub1))
->;
-
-//===----------------------------------------------------------------------===//
-// SOP2 Patterns
-//===----------------------------------------------------------------------===//
-
-// V_ADD_I32_e32/S_ADD_U32 produces carry in VCC/SCC. For the vector
-// case, the sgpr-copies pass will fix this to use the vector version.
-def : Pat <
-  (i32 (addc i32:$src0, i32:$src1)),
-  (S_ADD_U32 $src0, $src1)
->;
-
-//===----------------------------------------------------------------------===//
-// SOPP Patterns
-//===----------------------------------------------------------------------===//
-
-def : Pat <
-  (int_AMDGPU_barrier_global),
-  (S_BARRIER)
->;
-
-//===----------------------------------------------------------------------===//
-// VOP1 Patterns
-//===----------------------------------------------------------------------===//
-
-let Predicates = [UnsafeFPMath] in {
-
-//def : RcpPat<V_RCP_F64_e32, f64>;
-//defm : RsqPat<V_RSQ_F64_e32, f64>;
-//defm : RsqPat<V_RSQ_F32_e32, f32>;
-
-def : RsqPat<V_RSQ_F32_e32, f32>;
-def : RsqPat<V_RSQ_F64_e32, f64>;
-}
-
-//===----------------------------------------------------------------------===//
-// VOP2 Patterns
-//===----------------------------------------------------------------------===//
-
-def : Pat <
-  (i32 (add (i32 (ctpop i32:$popcnt)), i32:$val)),
-  (V_BCNT_U32_B32_e64 $popcnt, $val)
->;
-
-def : Pat <
-  (i32 (select i1:$src0, i32:$src1, i32:$src2)),
-  (V_CNDMASK_B32_e64 $src2, $src1, $src0)
->;
-
-/********** ======================= **********/
-/********** Image sampling patterns **********/
-/********** ======================= **********/
-
-// Image + sampler
-class SampleRawPattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat <
-  (name vt:$addr, v8i32:$rsrc, v4i32:$sampler, i32:$dmask, i32:$unorm,
-        i32:$r128, i32:$da, i32:$glc, i32:$slc, i32:$tfe, i32:$lwe),
-  (opcode (as_i32imm $dmask), (as_i1imm $unorm), (as_i1imm $glc), (as_i1imm $da),
-          (as_i1imm $r128), (as_i1imm $tfe), (as_i1imm $lwe), (as_i1imm $slc),
-          $addr, $rsrc, $sampler)
->;
-
-multiclass SampleRawPatterns<SDPatternOperator name, string opcode> {
-  def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V1), i32>;
-  def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V2), v2i32>;
-  def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>;
-  def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V8), v8i32>;
-  def : SampleRawPattern<name, !cast<MIMG>(opcode # _V4_V16), v16i32>;
-}
-
-// Image only
-class ImagePattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat <
-  (name vt:$addr, v8i32:$rsrc, i32:$dmask, i32:$unorm,
-        i32:$r128, i32:$da, i32:$glc, i32:$slc, i32:$tfe, i32:$lwe),
-  (opcode (as_i32imm $dmask), (as_i1imm $unorm), (as_i1imm $glc), (as_i1imm $da),
-          (as_i1imm $r128), (as_i1imm $tfe), (as_i1imm $lwe), (as_i1imm $slc),
-          $addr, $rsrc)
->;
-
-multiclass ImagePatterns<SDPatternOperator name, string opcode> {
-  def : ImagePattern<name, !cast<MIMG>(opcode # _V4_V1), i32>;
-  def : ImagePattern<name, !cast<MIMG>(opcode # _V4_V2), v2i32>;
-  def : ImagePattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>;
-}
-
-// Basic sample
-defm : SampleRawPatterns<int_SI_image_sample,           "IMAGE_SAMPLE">;
-defm : SampleRawPatterns<int_SI_image_sample_cl,        "IMAGE_SAMPLE_CL">;
-defm : SampleRawPatterns<int_SI_image_sample_d,         "IMAGE_SAMPLE_D">;
-defm : SampleRawPatterns<int_SI_image_sample_d_cl,      "IMAGE_SAMPLE_D_CL">;
-defm : SampleRawPatterns<int_SI_image_sample_l,         "IMAGE_SAMPLE_L">;
-defm : SampleRawPatterns<int_SI_image_sample_b,         "IMAGE_SAMPLE_B">;
-defm : SampleRawPatterns<int_SI_image_sample_b_cl,      "IMAGE_SAMPLE_B_CL">;
-defm : SampleRawPatterns<int_SI_image_sample_lz,        "IMAGE_SAMPLE_LZ">;
-defm : SampleRawPatterns<int_SI_image_sample_cd,        "IMAGE_SAMPLE_CD">;
-defm : SampleRawPatterns<int_SI_image_sample_cd_cl,     "IMAGE_SAMPLE_CD_CL">;
-
-// Sample with comparison
-defm : SampleRawPatterns<int_SI_image_sample_c,         "IMAGE_SAMPLE_C">;
-defm : SampleRawPatterns<int_SI_image_sample_c_cl,      "IMAGE_SAMPLE_C_CL">;
-defm : SampleRawPatterns<int_SI_image_sample_c_d,       "IMAGE_SAMPLE_C_D">;
-defm : SampleRawPatterns<int_SI_image_sample_c_d_cl,    "IMAGE_SAMPLE_C_D_CL">;
-defm : SampleRawPatterns<int_SI_image_sample_c_l,       "IMAGE_SAMPLE_C_L">;
-defm : SampleRawPatterns<int_SI_image_sample_c_b,       "IMAGE_SAMPLE_C_B">;
-defm : SampleRawPatterns<int_SI_image_sample_c_b_cl,    "IMAGE_SAMPLE_C_B_CL">;
-defm : SampleRawPatterns<int_SI_image_sample_c_lz,      "IMAGE_SAMPLE_C_LZ">;
-defm : SampleRawPatterns<int_SI_image_sample_c_cd,      "IMAGE_SAMPLE_C_CD">;
-defm : SampleRawPatterns<int_SI_image_sample_c_cd_cl,   "IMAGE_SAMPLE_C_CD_CL">;
-
-// Sample with offsets
-defm : SampleRawPatterns<int_SI_image_sample_o,         "IMAGE_SAMPLE_O">;
-defm : SampleRawPatterns<int_SI_image_sample_cl_o,      "IMAGE_SAMPLE_CL_O">;
-defm : SampleRawPatterns<int_SI_image_sample_d_o,       "IMAGE_SAMPLE_D_O">;
-defm : SampleRawPatterns<int_SI_image_sample_d_cl_o,    "IMAGE_SAMPLE_D_CL_O">;
-defm : SampleRawPatterns<int_SI_image_sample_l_o,       "IMAGE_SAMPLE_L_O">;
-defm : SampleRawPatterns<int_SI_image_sample_b_o,       "IMAGE_SAMPLE_B_O">;
-defm : SampleRawPatterns<int_SI_image_sample_b_cl_o,    "IMAGE_SAMPLE_B_CL_O">;
-defm : SampleRawPatterns<int_SI_image_sample_lz_o,      "IMAGE_SAMPLE_LZ_O">;
-defm : SampleRawPatterns<int_SI_image_sample_cd_o,      "IMAGE_SAMPLE_CD_O">;
-defm : SampleRawPatterns<int_SI_image_sample_cd_cl_o,   "IMAGE_SAMPLE_CD_CL_O">;
-
-// Sample with comparison and offsets
-defm : SampleRawPatterns<int_SI_image_sample_c_o,       "IMAGE_SAMPLE_C_O">;
-defm : SampleRawPatterns<int_SI_image_sample_c_cl_o,    "IMAGE_SAMPLE_C_CL_O">;
-defm : SampleRawPatterns<int_SI_image_sample_c_d_o,     "IMAGE_SAMPLE_C_D_O">;
-defm : SampleRawPatterns<int_SI_image_sample_c_d_cl_o,  "IMAGE_SAMPLE_C_D_CL_O">;
-defm : SampleRawPatterns<int_SI_image_sample_c_l_o,     "IMAGE_SAMPLE_C_L_O">;
-defm : SampleRawPatterns<int_SI_image_sample_c_b_o,     "IMAGE_SAMPLE_C_B_O">;
-defm : SampleRawPatterns<int_SI_image_sample_c_b_cl_o,  "IMAGE_SAMPLE_C_B_CL_O">;
-defm : SampleRawPatterns<int_SI_image_sample_c_lz_o,    "IMAGE_SAMPLE_C_LZ_O">;
-defm : SampleRawPatterns<int_SI_image_sample_c_cd_o,    "IMAGE_SAMPLE_C_CD_O">;
-defm : SampleRawPatterns<int_SI_image_sample_c_cd_cl_o, "IMAGE_SAMPLE_C_CD_CL_O">;
-
-// Gather opcodes
-// Only the variants which make sense are defined.
-def : SampleRawPattern<int_SI_gather4,           IMAGE_GATHER4_V4_V2,        v2i32>;
-def : SampleRawPattern<int_SI_gather4,           IMAGE_GATHER4_V4_V4,        v4i32>;
-def : SampleRawPattern<int_SI_gather4_cl,        IMAGE_GATHER4_CL_V4_V4,     v4i32>;
-def : SampleRawPattern<int_SI_gather4_l,         IMAGE_GATHER4_L_V4_V4,      v4i32>;
-def : SampleRawPattern<int_SI_gather4_b,         IMAGE_GATHER4_B_V4_V4,      v4i32>;
-def : SampleRawPattern<int_SI_gather4_b_cl,      IMAGE_GATHER4_B_CL_V4_V4,   v4i32>;
-def : SampleRawPattern<int_SI_gather4_b_cl,      IMAGE_GATHER4_B_CL_V4_V8,   v8i32>;
-def : SampleRawPattern<int_SI_gather4_lz,        IMAGE_GATHER4_LZ_V4_V2,     v2i32>;
-def : SampleRawPattern<int_SI_gather4_lz,        IMAGE_GATHER4_LZ_V4_V4,     v4i32>;
-
-def : SampleRawPattern<int_SI_gather4_c,         IMAGE_GATHER4_C_V4_V4,      v4i32>;
-def : SampleRawPattern<int_SI_gather4_c_cl,      IMAGE_GATHER4_C_CL_V4_V4,   v4i32>;
-def : SampleRawPattern<int_SI_gather4_c_cl,      IMAGE_GATHER4_C_CL_V4_V8,   v8i32>;
-def : SampleRawPattern<int_SI_gather4_c_l,       IMAGE_GATHER4_C_L_V4_V4,    v4i32>;
-def : SampleRawPattern<int_SI_gather4_c_l,       IMAGE_GATHER4_C_L_V4_V8,    v8i32>;
-def : SampleRawPattern<int_SI_gather4_c_b,       IMAGE_GATHER4_C_B_V4_V4,    v4i32>;
-def : SampleRawPattern<int_SI_gather4_c_b,       IMAGE_GATHER4_C_B_V4_V8,    v8i32>;
-def : SampleRawPattern<int_SI_gather4_c_b_cl,    IMAGE_GATHER4_C_B_CL_V4_V8, v8i32>;
-def : SampleRawPattern<int_SI_gather4_c_lz,      IMAGE_GATHER4_C_LZ_V4_V4,   v4i32>;
-
-def : SampleRawPattern<int_SI_gather4_o,         IMAGE_GATHER4_O_V4_V4,      v4i32>;
-def : SampleRawPattern<int_SI_gather4_cl_o,      IMAGE_GATHER4_CL_O_V4_V4,   v4i32>;
-def : SampleRawPattern<int_SI_gather4_cl_o,      IMAGE_GATHER4_CL_O_V4_V8,   v8i32>;
-def : SampleRawPattern<int_SI_gather4_l_o,       IMAGE_GATHER4_L_O_V4_V4,    v4i32>;
-def : SampleRawPattern<int_SI_gather4_l_o,       IMAGE_GATHER4_L_O_V4_V8,    v8i32>;
-def : SampleRawPattern<int_SI_gather4_b_o,       IMAGE_GATHER4_B_O_V4_V4,    v4i32>;
-def : SampleRawPattern<int_SI_gather4_b_o,       IMAGE_GATHER4_B_O_V4_V8,    v8i32>;
-def : SampleRawPattern<int_SI_gather4_b_cl_o,    IMAGE_GATHER4_B_CL_O_V4_V8, v8i32>;
-def : SampleRawPattern<int_SI_gather4_lz_o,      IMAGE_GATHER4_LZ_O_V4_V4,   v4i32>;
-
-def : SampleRawPattern<int_SI_gather4_c_o,       IMAGE_GATHER4_C_O_V4_V4,    v4i32>;
-def : SampleRawPattern<int_SI_gather4_c_o,       IMAGE_GATHER4_C_O_V4_V8,    v8i32>;
-def : SampleRawPattern<int_SI_gather4_c_cl_o,    IMAGE_GATHER4_C_CL_O_V4_V8, v8i32>;
-def : SampleRawPattern<int_SI_gather4_c_l_o,     IMAGE_GATHER4_C_L_O_V4_V8,  v8i32>;
-def : SampleRawPattern<int_SI_gather4_c_b_o,     IMAGE_GATHER4_C_B_O_V4_V8,  v8i32>;
-def : SampleRawPattern<int_SI_gather4_c_b_cl_o,  IMAGE_GATHER4_C_B_CL_O_V4_V8, v8i32>;
-def : SampleRawPattern<int_SI_gather4_c_lz_o,    IMAGE_GATHER4_C_LZ_O_V4_V4, v4i32>;
-def : SampleRawPattern<int_SI_gather4_c_lz_o,    IMAGE_GATHER4_C_LZ_O_V4_V8, v8i32>;
-
-def : SampleRawPattern<int_SI_getlod, IMAGE_GET_LOD_V4_V1, i32>;
-def : SampleRawPattern<int_SI_getlod, IMAGE_GET_LOD_V4_V2, v2i32>;
-def : SampleRawPattern<int_SI_getlod, IMAGE_GET_LOD_V4_V4, v4i32>;
-
-def : ImagePattern<int_SI_getresinfo, IMAGE_GET_RESINFO_V4_V1, i32>;
-defm : ImagePatterns<int_SI_image_load, "IMAGE_LOAD">;
-defm : ImagePatterns<int_SI_image_load_mip, "IMAGE_LOAD_MIP">;
-
-/* SIsample for simple 1D texture lookup */
-def : Pat <
-  (SIsample i32:$addr, v32i8:$rsrc, v4i32:$sampler, imm),
-  (IMAGE_SAMPLE_V4_V1 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
->;
-
-class SamplePattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
-    (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, imm),
-    (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
->;
-
-class SampleRectPattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
-    (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_RECT),
-    (opcode 0xf, 1, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
->;
-
-class SampleArrayPattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
-    (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_ARRAY),
-    (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc, $sampler)
->;
-
-class SampleShadowPattern<SDNode name, MIMG opcode,
-                          ValueType vt> : Pat <
-    (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_SHADOW),
-    (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
->;
-
-class SampleShadowArrayPattern<SDNode name, MIMG opcode,
-                               ValueType vt> : Pat <
-    (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_SHADOW_ARRAY),
-    (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc, $sampler)
->;
-
-/* SIsample* for texture lookups consuming more address parameters */
-multiclass SamplePatterns<MIMG sample, MIMG sample_c, MIMG sample_l,
-                          MIMG sample_c_l, MIMG sample_b, MIMG sample_c_b,
-MIMG sample_d, MIMG sample_c_d, ValueType addr_type> {
-  def : SamplePattern <SIsample, sample, addr_type>;
-  def : SampleRectPattern <SIsample, sample, addr_type>;
-  def : SampleArrayPattern <SIsample, sample, addr_type>;
-  def : SampleShadowPattern <SIsample, sample_c, addr_type>;
-  def : SampleShadowArrayPattern <SIsample, sample_c, addr_type>;
-
-  def : SamplePattern <SIsamplel, sample_l, addr_type>;
-  def : SampleArrayPattern <SIsamplel, sample_l, addr_type>;
-  def : SampleShadowPattern <SIsamplel, sample_c_l, addr_type>;
-  def : SampleShadowArrayPattern <SIsamplel, sample_c_l, addr_type>;
-
-  def : SamplePattern <SIsampleb, sample_b, addr_type>;
-  def : SampleArrayPattern <SIsampleb, sample_b, addr_type>;
-  def : SampleShadowPattern <SIsampleb, sample_c_b, addr_type>;
-  def : SampleShadowArrayPattern <SIsampleb, sample_c_b, addr_type>;
-
-  def : SamplePattern <SIsampled, sample_d, addr_type>;
-  def : SampleArrayPattern <SIsampled, sample_d, addr_type>;
-  def : SampleShadowPattern <SIsampled, sample_c_d, addr_type>;
-  def : SampleShadowArrayPattern <SIsampled, sample_c_d, addr_type>;
-}
-
-defm : SamplePatterns<IMAGE_SAMPLE_V4_V2, IMAGE_SAMPLE_C_V4_V2,
-                      IMAGE_SAMPLE_L_V4_V2, IMAGE_SAMPLE_C_L_V4_V2,
-                      IMAGE_SAMPLE_B_V4_V2, IMAGE_SAMPLE_C_B_V4_V2,
-                      IMAGE_SAMPLE_D_V4_V2, IMAGE_SAMPLE_C_D_V4_V2,
-                      v2i32>;
-defm : SamplePatterns<IMAGE_SAMPLE_V4_V4, IMAGE_SAMPLE_C_V4_V4,
-                      IMAGE_SAMPLE_L_V4_V4, IMAGE_SAMPLE_C_L_V4_V4,
-                      IMAGE_SAMPLE_B_V4_V4, IMAGE_SAMPLE_C_B_V4_V4,
-                      IMAGE_SAMPLE_D_V4_V4, IMAGE_SAMPLE_C_D_V4_V4,
-                      v4i32>;
-defm : SamplePatterns<IMAGE_SAMPLE_V4_V8, IMAGE_SAMPLE_C_V4_V8,
-                      IMAGE_SAMPLE_L_V4_V8, IMAGE_SAMPLE_C_L_V4_V8,
-                      IMAGE_SAMPLE_B_V4_V8, IMAGE_SAMPLE_C_B_V4_V8,
-                      IMAGE_SAMPLE_D_V4_V8, IMAGE_SAMPLE_C_D_V4_V8,
-                      v8i32>;
-defm : SamplePatterns<IMAGE_SAMPLE_V4_V16, IMAGE_SAMPLE_C_V4_V16,
-                      IMAGE_SAMPLE_L_V4_V16, IMAGE_SAMPLE_C_L_V4_V16,
-                      IMAGE_SAMPLE_B_V4_V16, IMAGE_SAMPLE_C_B_V4_V16,
-                      IMAGE_SAMPLE_D_V4_V16, IMAGE_SAMPLE_C_D_V4_V16,
-                      v16i32>;
-
-/* int_SI_imageload for texture fetches consuming varying address parameters */
-class ImageLoadPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
-    (name addr_type:$addr, v32i8:$rsrc, imm),
-    (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc)
->;
-
-class ImageLoadArrayPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
-    (name addr_type:$addr, v32i8:$rsrc, TEX_ARRAY),
-    (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc)
->;
-
-class ImageLoadMSAAPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
-    (name addr_type:$addr, v32i8:$rsrc, TEX_MSAA),
-    (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc)
->;
-
-class ImageLoadArrayMSAAPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
-    (name addr_type:$addr, v32i8:$rsrc, TEX_ARRAY_MSAA),
-    (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc)
->;
-
-multiclass ImageLoadPatterns<MIMG opcode, ValueType addr_type> {
-  def : ImageLoadPattern <int_SI_imageload, opcode, addr_type>;
-  def : ImageLoadArrayPattern <int_SI_imageload, opcode, addr_type>;
-}
-
-multiclass ImageLoadMSAAPatterns<MIMG opcode, ValueType addr_type> {
-  def : ImageLoadMSAAPattern <int_SI_imageload, opcode, addr_type>;
-  def : ImageLoadArrayMSAAPattern <int_SI_imageload, opcode, addr_type>;
-}
-
-defm : ImageLoadPatterns<IMAGE_LOAD_MIP_V4_V2, v2i32>;
-defm : ImageLoadPatterns<IMAGE_LOAD_MIP_V4_V4, v4i32>;
-
-defm : ImageLoadMSAAPatterns<IMAGE_LOAD_V4_V2, v2i32>;
-defm : ImageLoadMSAAPatterns<IMAGE_LOAD_V4_V4, v4i32>;
-
-/* Image resource information */
-def : Pat <
-  (int_SI_resinfo i32:$mipid, v32i8:$rsrc, imm),
-  (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 0, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
->;
-
-def : Pat <
-  (int_SI_resinfo i32:$mipid, v32i8:$rsrc, TEX_ARRAY),
-  (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 1, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
->;
-
-def : Pat <
-  (int_SI_resinfo i32:$mipid, v32i8:$rsrc, TEX_ARRAY_MSAA),
-  (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 1, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
->;
-
-/********** ============================================ **********/
-/********** Extraction, Insertion, Building and Casting  **********/
-/********** ============================================ **********/
-
-foreach Index = 0-2 in {
-  def Extract_Element_v2i32_#Index : Extract_Element <
-    i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-  def Insert_Element_v2i32_#Index : Insert_Element <
-    i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-
-  def Extract_Element_v2f32_#Index : Extract_Element <
-    f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-  def Insert_Element_v2f32_#Index : Insert_Element <
-    f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-}
-
-foreach Index = 0-3 in {
-  def Extract_Element_v4i32_#Index : Extract_Element <
-    i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-  def Insert_Element_v4i32_#Index : Insert_Element <
-    i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-
-  def Extract_Element_v4f32_#Index : Extract_Element <
-    f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-  def Insert_Element_v4f32_#Index : Insert_Element <
-    f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-}
-
-foreach Index = 0-7 in {
-  def Extract_Element_v8i32_#Index : Extract_Element <
-    i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-  def Insert_Element_v8i32_#Index : Insert_Element <
-    i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-
-  def Extract_Element_v8f32_#Index : Extract_Element <
-    f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-  def Insert_Element_v8f32_#Index : Insert_Element <
-    f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-}
-
-foreach Index = 0-15 in {
-  def Extract_Element_v16i32_#Index : Extract_Element <
-    i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-  def Insert_Element_v16i32_#Index : Insert_Element <
-    i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-
-  def Extract_Element_v16f32_#Index : Extract_Element <
-    f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-  def Insert_Element_v16f32_#Index : Insert_Element <
-    f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
-  >;
-}
-
-def : BitConvert <i32, f32, SReg_32>;
-def : BitConvert <i32, f32, VGPR_32>;
-
-def : BitConvert <f32, i32, SReg_32>;
-def : BitConvert <f32, i32, VGPR_32>;
-
-def : BitConvert <i64, f64, VReg_64>;
-
-def : BitConvert <f64, i64, VReg_64>;
-
-def : BitConvert <v2f32, v2i32, VReg_64>;
-def : BitConvert <v2i32, v2f32, VReg_64>;
-def : BitConvert <v2i32, i64, VReg_64>;
-def : BitConvert <i64, v2i32, VReg_64>;
-def : BitConvert <v2f32, i64, VReg_64>;
-def : BitConvert <i64, v2f32, VReg_64>;
-def : BitConvert <v2i32, f64, VReg_64>;
-def : BitConvert <f64, v2i32, VReg_64>;
-def : BitConvert <v4f32, v4i32, VReg_128>;
-def : BitConvert <v4i32, v4f32, VReg_128>;
-
-def : BitConvert <v8f32, v8i32, SReg_256>;
-def : BitConvert <v8i32, v8f32, SReg_256>;
-def : BitConvert <v8i32, v32i8, SReg_256>;
-def : BitConvert <v32i8, v8i32, SReg_256>;
-def : BitConvert <v8i32, v32i8, VReg_256>;
-def : BitConvert <v8i32, v8f32, VReg_256>;
-def : BitConvert <v8f32, v8i32, VReg_256>;
-def : BitConvert <v32i8, v8i32, VReg_256>;
-
-def : BitConvert <v16i32, v16f32, VReg_512>;
-def : BitConvert <v16f32, v16i32, VReg_512>;
-
-/********** =================== **********/
-/********** Src & Dst modifiers **********/
-/********** =================== **********/
-
-def : Pat <
-  (AMDGPUclamp (VOP3Mods0Clamp f32:$src0, i32:$src0_modifiers, i32:$omod),
-               (f32 FP_ZERO), (f32 FP_ONE)),
-  (V_ADD_F32_e64 $src0_modifiers, $src0, 0, 0, 1, $omod)
->;
-
-/********** ================================ **********/
-/********** Floating point absolute/negative **********/
-/********** ================================ **********/
-
-// Prevent expanding both fneg and fabs.
-
-// FIXME: Should use S_OR_B32
-def : Pat <
-  (fneg (fabs f32:$src)),
-  (V_OR_B32_e32 $src, (V_MOV_B32_e32 0x80000000)) /* Set sign bit */
->;
-
-// FIXME: Should use S_OR_B32
-def : Pat <
-  (fneg (fabs f64:$src)),
-  (REG_SEQUENCE VReg_64,
-    (i32 (EXTRACT_SUBREG f64:$src, sub0)),
-    sub0,
-    (V_OR_B32_e32 (EXTRACT_SUBREG f64:$src, sub1),
-                  (V_MOV_B32_e32 0x80000000)), // Set sign bit.
-    sub1)
->;
-
-def : Pat <
-  (fabs f32:$src),
-  (V_AND_B32_e32 $src, (V_MOV_B32_e32 0x7fffffff))
->;
-
-def : Pat <
-  (fneg f32:$src),
-  (V_XOR_B32_e32 $src, (V_MOV_B32_e32 0x80000000))
->;
-
-def : Pat <
-  (fabs f64:$src),
-  (REG_SEQUENCE VReg_64,
-    (i32 (EXTRACT_SUBREG f64:$src, sub0)),
-    sub0,
-    (V_AND_B32_e32 (EXTRACT_SUBREG f64:$src, sub1),
-                   (V_MOV_B32_e32 0x7fffffff)), // Set sign bit.
-     sub1)
->;
-
-def : Pat <
-  (fneg f64:$src),
-  (REG_SEQUENCE VReg_64,
-    (i32 (EXTRACT_SUBREG f64:$src, sub0)),
-    sub0,
-    (V_XOR_B32_e32 (EXTRACT_SUBREG f64:$src, sub1),
-                   (V_MOV_B32_e32 0x80000000)),
-    sub1)
->;
-
-/********** ================== **********/
-/********** Immediate Patterns **********/
-/********** ================== **********/
-
-def : Pat <
-  (SGPRImm<(i32 imm)>:$imm),
-  (S_MOV_B32 imm:$imm)
->;
-
-def : Pat <
-  (SGPRImm<(f32 fpimm)>:$imm),
-  (S_MOV_B32 (f32 (bitcast_fpimm_to_i32 $imm)))
->;
-
-def : Pat <
-  (i32 imm:$imm),
-  (V_MOV_B32_e32 imm:$imm)
->;
-
-def : Pat <
-  (f32 fpimm:$imm),
-  (V_MOV_B32_e32 (f32 (bitcast_fpimm_to_i32 $imm)))
->;
-
-def : Pat <
-  (i64 InlineImm<i64>:$imm),
-  (S_MOV_B64 InlineImm<i64>:$imm)
->;
-
-// XXX - Should this use a s_cmp to set SCC?
-
-// Set to sign-extended 64-bit value (true = -1, false = 0)
-def : Pat <
-  (i1 imm:$imm),
-  (S_MOV_B64 (i64 (as_i64imm $imm)))
->;
-
-def : Pat <
-  (f64 InlineFPImm<f64>:$imm),
-  (S_MOV_B64 (f64 (bitcast_fpimm_to_i64 InlineFPImm<f64>:$imm)))
->;
-
-/********** ================== **********/
-/********** Intrinsic Patterns **********/
-/********** ================== **********/
-
-/* llvm.AMDGPU.pow */
-def : POW_Common <V_LOG_F32_e32, V_EXP_F32_e32, V_MUL_LEGACY_F32_e32>;
-
-def : Pat <
-  (int_AMDGPU_div f32:$src0, f32:$src1),
-  (V_MUL_LEGACY_F32_e32 $src0, (V_RCP_LEGACY_F32_e32 $src1))
->;
-
-def : Pat <
-  (int_AMDGPU_cube v4f32:$src),
-  (REG_SEQUENCE VReg_128,
-    (V_CUBETC_F32 0 /* src0_modifiers */, (EXTRACT_SUBREG $src, sub0),
-                  0 /* src1_modifiers */, (EXTRACT_SUBREG $src, sub1),
-                  0 /* src2_modifiers */, (EXTRACT_SUBREG $src, sub2),
-                  0 /* clamp */, 0 /* omod */), sub0,
-    (V_CUBESC_F32 0 /* src0_modifiers */, (EXTRACT_SUBREG $src, sub0),
-                  0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub1),
-                  0 /* src2_modifiers */,(EXTRACT_SUBREG $src, sub2),
-                  0 /* clamp */, 0 /* omod */), sub1,
-    (V_CUBEMA_F32 0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub0),
-                  0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub1),
-                  0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub2),
-                  0 /* clamp */, 0 /* omod */), sub2,
-    (V_CUBEID_F32 0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub0),
-                  0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub1),
-                  0 /* src1_modifiers */,(EXTRACT_SUBREG $src, sub2),
-                  0 /* clamp */, 0 /* omod */), sub3)
->;
-
-def : Pat <
-  (i32 (sext i1:$src0)),
-  (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src0)
->;
-
-class Ext32Pat <SDNode ext> : Pat <
-  (i32 (ext i1:$src0)),
-  (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src0)
->;
-
-def : Ext32Pat <zext>;
-def : Ext32Pat <anyext>;
-
-// Offset in an 32Bit VGPR
-def : Pat <
-  (SIload_constant v4i32:$sbase, i32:$voff),
-  (BUFFER_LOAD_DWORD_OFFEN $voff, $sbase, 0, 0, 0, 0, 0)
->;
-
-// The multiplication scales from [0,1] to the unsigned integer range
-def : Pat <
-  (AMDGPUurecip i32:$src0),
-  (V_CVT_U32_F32_e32
-    (V_MUL_F32_e32 CONST.FP_UINT_MAX_PLUS_1,
-                   (V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0))))
->;
-
-def : Pat <
-  (int_SI_tid),
-  (V_MBCNT_HI_U32_B32_e64 0xffffffff,
-                          (V_MBCNT_LO_U32_B32_e64 0xffffffff, 0))
->;
-
-//===----------------------------------------------------------------------===//
-// VOP3 Patterns
-//===----------------------------------------------------------------------===//
-
-def : IMad24Pat<V_MAD_I32_I24>;
-def : UMad24Pat<V_MAD_U32_U24>;
-
-def : Pat <
-  (mulhu i32:$src0, i32:$src1),
-  (V_MUL_HI_U32 $src0, $src1)
->;
-
-def : Pat <
-  (mulhs i32:$src0, i32:$src1),
-  (V_MUL_HI_I32 $src0, $src1)
->;
-
-defm : BFIPatterns <V_BFI_B32, S_MOV_B32, SReg_64>;
-def : ROTRPattern <V_ALIGNBIT_B32>;
-
-/********** ======================= **********/
-/**********   Load/Store Patterns   **********/
-/********** ======================= **********/
-
-class DSReadPat <DS inst, ValueType vt, PatFrag frag> : Pat <
-  (vt (frag (DS1Addr1Offset i32:$ptr, i32:$offset))),
-  (inst $ptr, (as_i16imm $offset), (i1 0))
->;
-
-def : DSReadPat <DS_READ_I8,  i32, si_sextload_local_i8>;
-def : DSReadPat <DS_READ_U8,  i32, si_az_extload_local_i8>;
-def : DSReadPat <DS_READ_I16, i32, si_sextload_local_i16>;
-def : DSReadPat <DS_READ_U16, i32, si_az_extload_local_i16>;
-def : DSReadPat <DS_READ_B32, i32, si_load_local>;
-
-let AddedComplexity = 100 in {
-
-def : DSReadPat <DS_READ_B64, v2i32, si_load_local_align8>;
-
-} // End AddedComplexity = 100
-
-def : Pat <
-  (v2i32 (si_load_local (DS64Bit4ByteAligned i32:$ptr, i8:$offset0,
-                                                    i8:$offset1))),
-  (DS_READ2_B32 $ptr, $offset0, $offset1, (i1 0))
->;
-
-class DSWritePat <DS inst, ValueType vt, PatFrag frag> : Pat <
-  (frag vt:$value, (DS1Addr1Offset i32:$ptr, i32:$offset)),
-  (inst $ptr, $value, (as_i16imm $offset), (i1 0))
->;
-
-def : DSWritePat <DS_WRITE_B8, i32, si_truncstore_local_i8>;
-def : DSWritePat <DS_WRITE_B16, i32, si_truncstore_local_i16>;
-def : DSWritePat <DS_WRITE_B32, i32, si_store_local>;
-
-let AddedComplexity = 100 in {
-
-def : DSWritePat <DS_WRITE_B64, v2i32, si_store_local_align8>;
-} // End AddedComplexity = 100
-
-def : Pat <
-  (si_store_local v2i32:$value, (DS64Bit4ByteAligned i32:$ptr, i8:$offset0,
-                                                               i8:$offset1)),
-  (DS_WRITE2_B32 $ptr, (EXTRACT_SUBREG $value, sub0),
-                       (EXTRACT_SUBREG $value, sub1), $offset0, $offset1,
-                       (i1 0))
->;
-
-class DSAtomicRetPat<DS inst, ValueType vt, PatFrag frag> : Pat <
-  (frag (DS1Addr1Offset i32:$ptr, i32:$offset), vt:$value),
-  (inst $ptr, $value, (as_i16imm $offset), (i1 0))
->;
-
-// Special case of DSAtomicRetPat for add / sub 1 -> inc / dec
-//
-// We need to use something for the data0, so we set a register to
-// -1. For the non-rtn variants, the manual says it does
-// DS[A] = (DS[A] >= D0) ? 0 : DS[A] + 1, and setting D0 to uint_max
-// will always do the increment so I'm assuming it's the same.
-//
-// We also load this -1 with s_mov_b32 / s_mov_b64 even though this
-// needs to be a VGPR. The SGPR copy pass will fix this, and it's
-// easier since there is no v_mov_b64.
-class DSAtomicIncRetPat<DS inst, ValueType vt,
-                        Instruction LoadImm, PatFrag frag> : Pat <
-  (frag (DS1Addr1Offset i32:$ptr, i32:$offset), (vt 1)),
-  (inst $ptr, (LoadImm (vt -1)), (as_i16imm $offset), (i1 0))
->;
-
-
-class DSAtomicCmpXChg <DS inst, ValueType vt, PatFrag frag> : Pat <
-  (frag (DS1Addr1Offset i32:$ptr, i32:$offset), vt:$cmp, vt:$swap),
-  (inst $ptr, $cmp, $swap, (as_i16imm $offset), (i1 0))
->;
-
-
-// 32-bit atomics.
-def : DSAtomicIncRetPat<DS_INC_RTN_U32, i32,
-                        S_MOV_B32, si_atomic_load_add_local>;
-def : DSAtomicIncRetPat<DS_DEC_RTN_U32, i32,
-                        S_MOV_B32, si_atomic_load_sub_local>;
-
-def : DSAtomicRetPat<DS_WRXCHG_RTN_B32, i32, si_atomic_swap_local>;
-def : DSAtomicRetPat<DS_ADD_RTN_U32, i32, si_atomic_load_add_local>;
-def : DSAtomicRetPat<DS_SUB_RTN_U32, i32, si_atomic_load_sub_local>;
-def : DSAtomicRetPat<DS_AND_RTN_B32, i32, si_atomic_load_and_local>;
-def : DSAtomicRetPat<DS_OR_RTN_B32, i32, si_atomic_load_or_local>;
-def : DSAtomicRetPat<DS_XOR_RTN_B32, i32, si_atomic_load_xor_local>;
-def : DSAtomicRetPat<DS_MIN_RTN_I32, i32, si_atomic_load_min_local>;
-def : DSAtomicRetPat<DS_MAX_RTN_I32, i32, si_atomic_load_max_local>;
-def : DSAtomicRetPat<DS_MIN_RTN_U32, i32, si_atomic_load_umin_local>;
-def : DSAtomicRetPat<DS_MAX_RTN_U32, i32, si_atomic_load_umax_local>;
-
-def : DSAtomicCmpXChg<DS_CMPST_RTN_B32, i32, si_atomic_cmp_swap_32_local>;
-
-// 64-bit atomics.
-def : DSAtomicIncRetPat<DS_INC_RTN_U64, i64,
-                        S_MOV_B64, si_atomic_load_add_local>;
-def : DSAtomicIncRetPat<DS_DEC_RTN_U64, i64,
-                        S_MOV_B64, si_atomic_load_sub_local>;
-
-def : DSAtomicRetPat<DS_WRXCHG_RTN_B64, i64, si_atomic_swap_local>;
-def : DSAtomicRetPat<DS_ADD_RTN_U64, i64, si_atomic_load_add_local>;
-def : DSAtomicRetPat<DS_SUB_RTN_U64, i64, si_atomic_load_sub_local>;
-def : DSAtomicRetPat<DS_AND_RTN_B64, i64, si_atomic_load_and_local>;
-def : DSAtomicRetPat<DS_OR_RTN_B64, i64, si_atomic_load_or_local>;
-def : DSAtomicRetPat<DS_XOR_RTN_B64, i64, si_atomic_load_xor_local>;
-def : DSAtomicRetPat<DS_MIN_RTN_I64, i64, si_atomic_load_min_local>;
-def : DSAtomicRetPat<DS_MAX_RTN_I64, i64, si_atomic_load_max_local>;
-def : DSAtomicRetPat<DS_MIN_RTN_U64, i64, si_atomic_load_umin_local>;
-def : DSAtomicRetPat<DS_MAX_RTN_U64, i64, si_atomic_load_umax_local>;
-
-def : DSAtomicCmpXChg<DS_CMPST_RTN_B64, i64, si_atomic_cmp_swap_64_local>;
-
-
-//===----------------------------------------------------------------------===//
-// MUBUF Patterns
-//===----------------------------------------------------------------------===//
-
-multiclass MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt,
-                              PatFrag constant_ld> {
-  def : Pat <
-     (vt (constant_ld (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i32:$soffset,
-                                   i16:$offset, i1:$glc, i1:$slc, i1:$tfe))),
-     (Instr_ADDR64 $vaddr, $srsrc, $soffset, $offset, $glc, $slc, $tfe)
-  >;
-}
-
-let Predicates = [isSICI] in {
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_SBYTE_ADDR64, i32, sextloadi8_constant>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32, az_extloadi8_constant>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_SSHORT_ADDR64, i32, sextloadi16_constant>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_USHORT_ADDR64, i32, az_extloadi16_constant>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORD_ADDR64, i32, constant_load>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, v2i32, constant_load>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX4_ADDR64, v4i32, constant_load>;
-} // End Predicates = [isSICI]
-
-class MUBUFScratchLoadPat <MUBUF Instr, ValueType vt, PatFrag ld> : Pat <
-  (vt (ld (MUBUFScratch v4i32:$srsrc, i32:$vaddr,
-                        i32:$soffset, u16imm:$offset))),
-  (Instr $vaddr, $srsrc, $soffset, $offset, 0, 0, 0)
->;
-
-def : MUBUFScratchLoadPat <BUFFER_LOAD_SBYTE_OFFEN, i32, sextloadi8_private>;
-def : MUBUFScratchLoadPat <BUFFER_LOAD_UBYTE_OFFEN, i32, extloadi8_private>;
-def : MUBUFScratchLoadPat <BUFFER_LOAD_SSHORT_OFFEN, i32, sextloadi16_private>;
-def : MUBUFScratchLoadPat <BUFFER_LOAD_USHORT_OFFEN, i32, extloadi16_private>;
-def : MUBUFScratchLoadPat <BUFFER_LOAD_DWORD_OFFEN, i32, load_private>;
-def : MUBUFScratchLoadPat <BUFFER_LOAD_DWORDX2_OFFEN, v2i32, load_private>;
-def : MUBUFScratchLoadPat <BUFFER_LOAD_DWORDX4_OFFEN, v4i32, load_private>;
-
-// BUFFER_LOAD_DWORD*, addr64=0
-multiclass MUBUF_Load_Dword <ValueType vt, MUBUF offset, MUBUF offen, MUBUF idxen,
-                             MUBUF bothen> {
-
-  def : Pat <
-    (vt (int_SI_buffer_load_dword v4i32:$rsrc, (i32 imm), i32:$soffset,
-                                  imm:$offset, 0, 0, imm:$glc, imm:$slc,
-                                  imm:$tfe)),
-    (offset $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $glc),
-            (as_i1imm $slc), (as_i1imm $tfe))
-  >;
-
-  def : Pat <
-    (vt (int_SI_buffer_load_dword v4i32:$rsrc, i32:$vaddr, i32:$soffset,
-                                  imm:$offset, 1, 0, imm:$glc, imm:$slc,
-                                  imm:$tfe)),
-    (offen $vaddr, $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $glc), (as_i1imm $slc),
-           (as_i1imm $tfe))
-  >;
-
-  def : Pat <
-    (vt (int_SI_buffer_load_dword v4i32:$rsrc, i32:$vaddr, i32:$soffset,
-                                  imm:$offset, 0, 1, imm:$glc, imm:$slc,
-                                  imm:$tfe)),
-    (idxen $vaddr, $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $glc),
-           (as_i1imm $slc), (as_i1imm $tfe))
-  >;
-
-  def : Pat <
-    (vt (int_SI_buffer_load_dword v4i32:$rsrc, v2i32:$vaddr, i32:$soffset,
-                                  imm:$offset, 1, 1, imm:$glc, imm:$slc,
-                                  imm:$tfe)),
-    (bothen $vaddr, $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $glc), (as_i1imm $slc),
-            (as_i1imm $tfe))
-  >;
-}
-
-defm : MUBUF_Load_Dword <i32, BUFFER_LOAD_DWORD_OFFSET, BUFFER_LOAD_DWORD_OFFEN,
-                         BUFFER_LOAD_DWORD_IDXEN, BUFFER_LOAD_DWORD_BOTHEN>;
-defm : MUBUF_Load_Dword <v2i32, BUFFER_LOAD_DWORDX2_OFFSET, BUFFER_LOAD_DWORDX2_OFFEN,
-                         BUFFER_LOAD_DWORDX2_IDXEN, BUFFER_LOAD_DWORDX2_BOTHEN>;
-defm : MUBUF_Load_Dword <v4i32, BUFFER_LOAD_DWORDX4_OFFSET, BUFFER_LOAD_DWORDX4_OFFEN,
-                         BUFFER_LOAD_DWORDX4_IDXEN, BUFFER_LOAD_DWORDX4_BOTHEN>;
-
-class MUBUFScratchStorePat <MUBUF Instr, ValueType vt, PatFrag st> : Pat <
-  (st vt:$value, (MUBUFScratch v4i32:$srsrc, i32:$vaddr, i32:$soffset,
-                               u16imm:$offset)),
-  (Instr $value, $vaddr, $srsrc, $soffset, $offset, 0, 0, 0)
->;
-
-def : MUBUFScratchStorePat <BUFFER_STORE_BYTE_OFFEN, i32, truncstorei8_private>;
-def : MUBUFScratchStorePat <BUFFER_STORE_SHORT_OFFEN, i32, truncstorei16_private>;
-def : MUBUFScratchStorePat <BUFFER_STORE_DWORD_OFFEN, i32, store_private>;
-def : MUBUFScratchStorePat <BUFFER_STORE_DWORDX2_OFFEN, v2i32, store_private>;
-def : MUBUFScratchStorePat <BUFFER_STORE_DWORDX4_OFFEN, v4i32, store_private>;
-
-/*
-class MUBUFStore_Pattern <MUBUF Instr, ValueType vt, PatFrag st> : Pat <
-  (st vt:$value, (MUBUFScratch v4i32:$srsrc, i64:$vaddr, u16imm:$offset)),
-  (Instr $value, $srsrc, $vaddr, $offset)
->;
-
-let Predicates = [isSICI] in {
-def : MUBUFStore_Pattern <BUFFER_STORE_BYTE_ADDR64, i32, truncstorei8_private>;
-def : MUBUFStore_Pattern <BUFFER_STORE_SHORT_ADDR64, i32, truncstorei16_private>;
-def : MUBUFStore_Pattern <BUFFER_STORE_DWORD_ADDR64, i32, store_private>;
-def : MUBUFStore_Pattern <BUFFER_STORE_DWORDX2_ADDR64, v2i32, store_private>;
-def : MUBUFStore_Pattern <BUFFER_STORE_DWORDX4_ADDR64, v4i32, store_private>;
-} // End Predicates = [isSICI]
-
-*/
-
-//===----------------------------------------------------------------------===//
-// MTBUF Patterns
-//===----------------------------------------------------------------------===//
-
-// TBUFFER_STORE_FORMAT_*, addr64=0
-class MTBUF_StoreResource <ValueType vt, int num_channels, MTBUF opcode> : Pat<
-  (SItbuffer_store v4i32:$rsrc, vt:$vdata, num_channels, i32:$vaddr,
-                   i32:$soffset, imm:$inst_offset, imm:$dfmt,
-                   imm:$nfmt, imm:$offen, imm:$idxen,
-                   imm:$glc, imm:$slc, imm:$tfe),
-  (opcode
-    $vdata, (as_i16imm $inst_offset), (as_i1imm $offen), (as_i1imm $idxen),
-    (as_i1imm $glc), 0, (as_i8imm $dfmt), (as_i8imm $nfmt), $vaddr, $rsrc,
-    (as_i1imm $slc), (as_i1imm $tfe), $soffset)
->;
-
-def : MTBUF_StoreResource <i32, 1, TBUFFER_STORE_FORMAT_X>;
-def : MTBUF_StoreResource <v2i32, 2, TBUFFER_STORE_FORMAT_XY>;
-def : MTBUF_StoreResource <v4i32, 3, TBUFFER_STORE_FORMAT_XYZ>;
-def : MTBUF_StoreResource <v4i32, 4, TBUFFER_STORE_FORMAT_XYZW>;
-
-let SubtargetPredicate = isCI in {
-
-defm V_QSAD_PK_U16_U8 : VOP3Inst <vop3<0x173>, "v_qsad_pk_u16_u8",
-  VOP_I32_I32_I32
->;
-defm V_MQSAD_U16_U8 : VOP3Inst <vop3<0x172>, "v_mqsad_u16_u8",
-  VOP_I32_I32_I32
->;
-defm V_MQSAD_U32_U8 : VOP3Inst <vop3<0x175>, "v_mqsad_u32_u8",
-  VOP_I32_I32_I32
->;
-
-let isCommutable = 1 in {
-defm V_MAD_U64_U32 : VOP3Inst <vop3<0x176>, "v_mad_u64_u32",
-  VOP_I64_I32_I32_I64
->;
-
-// XXX - Does this set VCC?
-defm V_MAD_I64_I32 : VOP3Inst <vop3<0x177>, "v_mad_i64_i32",
-  VOP_I64_I32_I32_I64
->;
-} // End isCommutable = 1
-
-// Remaining instructions:
-// FLAT_*
-// S_CBRANCH_CDBGUSER
-// S_CBRANCH_CDBGSYS
-// S_CBRANCH_CDBGSYS_OR_USER
-// S_CBRANCH_CDBGSYS_AND_USER
-// S_DCACHE_INV_VOL
-// DS_NOP
-// DS_GWS_SEMA_RELEASE_ALL
-// DS_WRAP_RTN_B32
-// DS_CNDXCHG32_RTN_B64
-// DS_WRITE_B96
-// DS_WRITE_B128
-// DS_CONDXCHG32_RTN_B128
-// DS_READ_B96
-// DS_READ_B128
-// BUFFER_LOAD_DWORDX3
-// BUFFER_STORE_DWORDX3
-
-} // End isCI
-
-/********** ====================== **********/
-/**********   Indirect adressing   **********/
-/********** ====================== **********/
-
-multiclass SI_INDIRECT_Pattern <ValueType vt, ValueType eltvt, SI_INDIRECT_DST IndDst> {
-
-  // 1. Extract with offset
-  def : Pat<
-    (eltvt (vector_extract vt:$vec, (add i32:$idx, imm:$off))),
-    (SI_INDIRECT_SRC $vec, $idx, imm:$off)
-  >;
-
-  // 2. Extract without offset
-  def : Pat<
-    (eltvt (vector_extract vt:$vec, i32:$idx)),
-    (SI_INDIRECT_SRC $vec, $idx, 0)
-  >;
-
-  // 3. Insert with offset
-  def : Pat<
-    (vector_insert vt:$vec, eltvt:$val, (add i32:$idx, imm:$off)),
-    (IndDst $vec, $idx, imm:$off, $val)
-  >;
-
-  // 4. Insert without offset
-  def : Pat<
-    (vector_insert vt:$vec, eltvt:$val, i32:$idx),
-    (IndDst $vec, $idx, 0, $val)
-  >;
-}
-
-defm : SI_INDIRECT_Pattern <v2f32, f32, SI_INDIRECT_DST_V2>;
-defm : SI_INDIRECT_Pattern <v4f32, f32, SI_INDIRECT_DST_V4>;
-defm : SI_INDIRECT_Pattern <v8f32, f32, SI_INDIRECT_DST_V8>;
-defm : SI_INDIRECT_Pattern <v16f32, f32, SI_INDIRECT_DST_V16>;
-
-defm : SI_INDIRECT_Pattern <v2i32, i32, SI_INDIRECT_DST_V2>;
-defm : SI_INDIRECT_Pattern <v4i32, i32, SI_INDIRECT_DST_V4>;
-defm : SI_INDIRECT_Pattern <v8i32, i32, SI_INDIRECT_DST_V8>;
-defm : SI_INDIRECT_Pattern <v16i32, i32, SI_INDIRECT_DST_V16>;
-
-//===----------------------------------------------------------------------===//
-// Conversion Patterns
-//===----------------------------------------------------------------------===//
-
-def : Pat<(i32 (sext_inreg i32:$src, i1)),
-  (S_BFE_I32 i32:$src, 65536)>; // 0 | 1 << 16
-
-// Handle sext_inreg in i64
-def : Pat <
-  (i64 (sext_inreg i64:$src, i1)),
-  (S_BFE_I64 i64:$src, 0x10000) // 0 | 1 << 16
->;
-
-def : Pat <
-  (i64 (sext_inreg i64:$src, i8)),
-  (S_BFE_I64 i64:$src, 0x80000) // 0 | 8 << 16
->;
-
-def : Pat <
-  (i64 (sext_inreg i64:$src, i16)),
-  (S_BFE_I64 i64:$src, 0x100000) // 0 | 16 << 16
->;
-
-def : Pat <
-  (i64 (sext_inreg i64:$src, i32)),
-  (S_BFE_I64 i64:$src, 0x200000) // 0 | 32 << 16
->;
-
-class ZExt_i64_i32_Pat <SDNode ext> : Pat <
-  (i64 (ext i32:$src)),
-  (REG_SEQUENCE SReg_64, $src, sub0, (S_MOV_B32 0), sub1)
->;
-
-class ZExt_i64_i1_Pat <SDNode ext> : Pat <
-  (i64 (ext i1:$src)),
-    (REG_SEQUENCE VReg_64,
-      (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src), sub0,
-      (S_MOV_B32 0), sub1)
->;
-
-
-def : ZExt_i64_i32_Pat<zext>;
-def : ZExt_i64_i32_Pat<anyext>;
-def : ZExt_i64_i1_Pat<zext>;
-def : ZExt_i64_i1_Pat<anyext>;
-
-def : Pat <
-  (i64 (sext i32:$src)),
-    (REG_SEQUENCE SReg_64, $src, sub0,
-    (S_ASHR_I32 $src, 31), sub1)
->;
-
-def : Pat <
-  (i64 (sext i1:$src)),
-  (REG_SEQUENCE VReg_64,
-    (V_CNDMASK_B32_e64 0, -1, $src), sub0,
-    (V_CNDMASK_B32_e64 0, -1, $src), sub1)
->;
-
-// If we need to perform a logical operation on i1 values, we need to
-// use vector comparisons since there is only one SCC register. Vector
-// comparisions still write to a pair of SGPRs, so treat these as
-// 64-bit comparisons. When legalizing SGPR copies, instructions
-// resulting in the copies from SCC to these instructions will be
-// moved to the VALU.
-def : Pat <
-  (i1 (and i1:$src0, i1:$src1)),
-  (S_AND_B64 $src0, $src1)
->;
-
-def : Pat <
-  (i1 (or i1:$src0, i1:$src1)),
-  (S_OR_B64 $src0, $src1)
->;
-
-def : Pat <
-  (i1 (xor i1:$src0, i1:$src1)),
-  (S_XOR_B64 $src0, $src1)
->;
-
-def : Pat <
-  (f32 (sint_to_fp i1:$src)),
-  (V_CNDMASK_B32_e64 (i32 0), CONST.FP32_NEG_ONE, $src)
->;
-
-def : Pat <
-  (f32 (uint_to_fp i1:$src)),
-  (V_CNDMASK_B32_e64 (i32 0), CONST.FP32_ONE, $src)
->;
-
-def : Pat <
-  (f64 (sint_to_fp i1:$src)),
-  (V_CVT_F64_I32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src))
->;
-
-def : Pat <
-  (f64 (uint_to_fp i1:$src)),
-  (V_CVT_F64_U32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src))
->;
-
-//===----------------------------------------------------------------------===//
-// Miscellaneous Patterns
-//===----------------------------------------------------------------------===//
-
-def : Pat <
-  (i32 (trunc i64:$a)),
-  (EXTRACT_SUBREG $a, sub0)
->;
-
-def : Pat <
-  (i1 (trunc i32:$a)),
-  (V_CMP_EQ_I32_e64 (V_AND_B32_e64 (i32 1), $a), 1)
->;
-
-def : Pat <
-  (i1 (trunc i64:$a)),
-  (V_CMP_EQ_I32_e64 (V_AND_B32_e64 (i32 1),
-                    (EXTRACT_SUBREG $a, sub0)), 1)
->;
-
-def : Pat <
-  (i32 (bswap i32:$a)),
-  (V_BFI_B32 (S_MOV_B32 0x00ff00ff),
-             (V_ALIGNBIT_B32 $a, $a, 24),
-             (V_ALIGNBIT_B32 $a, $a, 8))
->;
-
-def : Pat <
-  (f32 (select i1:$src2, f32:$src1, f32:$src0)),
-  (V_CNDMASK_B32_e64 $src0, $src1, $src2)
->;
-
-multiclass BFMPatterns <ValueType vt, InstSI BFM, InstSI MOV> {
-  def : Pat <
-    (vt (shl (vt (add (vt (shl 1, vt:$a)), -1)), vt:$b)),
-    (BFM $a, $b)
-  >;
-
-  def : Pat <
-    (vt (add (vt (shl 1, vt:$a)), -1)),
-    (BFM $a, (MOV 0))
-  >;
-}
-
-defm : BFMPatterns <i32, S_BFM_B32, S_MOV_B32>;
-// FIXME: defm : BFMPatterns <i64, S_BFM_B64, S_MOV_B64>;
-
-def : BFEPattern <V_BFE_U32, S_MOV_B32>;
-
-//===----------------------------------------------------------------------===//
-// Fract Patterns
-//===----------------------------------------------------------------------===//
-
-let Predicates = [isSI] in {
-
-// V_FRACT is buggy on SI, so the F32 version is never used and (x-floor(x)) is
-// used instead. However, SI doesn't have V_FLOOR_F64, so the most efficient
-// way to implement it is using V_FRACT_F64.
-// The workaround for the V_FRACT bug is:
-//    fract(x) = isnan(x) ? x : min(V_FRACT(x), 0.99999999999999999)
-
-// Convert (x + (-floor(x)) to fract(x)
-def : Pat <
-  (f64 (fadd (f64 (VOP3Mods f64:$x, i32:$mods)),
-             (f64 (fneg (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))))))),
-  (V_CNDMASK_B64_PSEUDO
-      $x,
-      (V_MIN_F64
-          SRCMODS.NONE,
-          (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE),
-          SRCMODS.NONE,
-          (V_MOV_B64_PSEUDO 0x3fefffffffffffff),
-          DSTCLAMP.NONE, DSTOMOD.NONE),
-      (V_CMP_CLASS_F64_e64 SRCMODS.NONE, $x, 3/*NaN*/))
->;
-
-// Convert floor(x) to (x - fract(x))
-def : Pat <
-  (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))),
-  (V_ADD_F64
-      $mods,
-      $x,
-      SRCMODS.NEG,
-      (V_CNDMASK_B64_PSEUDO
-         $x,
-         (V_MIN_F64
-             SRCMODS.NONE,
-             (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE),
-             SRCMODS.NONE,
-             (V_MOV_B64_PSEUDO 0x3fefffffffffffff),
-             DSTCLAMP.NONE, DSTOMOD.NONE),
-         (V_CMP_CLASS_F64_e64 SRCMODS.NONE, $x, 3/*NaN*/)),
-      DSTCLAMP.NONE, DSTOMOD.NONE)
->;
-
-} // End Predicates = [isSI]
-
-let Predicates = [isCI] in {
-
-// Convert (x - floor(x)) to fract(x)
-def : Pat <
-  (f32 (fsub (f32 (VOP3Mods f32:$x, i32:$mods)),
-             (f32 (ffloor (f32 (VOP3Mods f32:$x, i32:$mods)))))),
-  (V_FRACT_F32_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
->;
-
-// Convert (x + (-floor(x))) to fract(x)
-def : Pat <
-  (f64 (fadd (f64 (VOP3Mods f64:$x, i32:$mods)),
-             (f64 (fneg (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))))))),
-  (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
->;
-
-} // End Predicates = [isCI]
-
-//============================================================================//
-// Miscellaneous Optimization Patterns
-//============================================================================//
-
-def : SHA256MaPattern <V_BFI_B32, V_XOR_B32_e64>;
-
-//============================================================================//
-// Assembler aliases
-//============================================================================//
-
-def : MnemonicAlias<"v_add_u32", "v_add_i32">;
-def : MnemonicAlias<"v_sub_u32", "v_sub_i32">;
-def : MnemonicAlias<"v_subrev_u32", "v_subrev_i32">;
-
-} // End isGCN predicate

Removed: llvm/trunk/lib/Target/R600/SIIntrinsics.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIIntrinsics.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIIntrinsics.td (original)
+++ llvm/trunk/lib/Target/R600/SIIntrinsics.td (removed)
@@ -1,199 +0,0 @@
-//===-- SIIntrinsics.td - SI Intrinsic defs ----------------*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// SI Intrinsic Definitions
-//
-//===----------------------------------------------------------------------===//
-
-
-let TargetPrefix = "SI", isTarget = 1 in {
-
-  def int_SI_tid : Intrinsic <[llvm_i32_ty], [], [IntrNoMem]>;
-  def int_SI_packf16 : Intrinsic <[llvm_i32_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-  def int_SI_export : Intrinsic <[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], []>;
-  def int_SI_load_const : Intrinsic <[llvm_float_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_SI_vs_load_input : Intrinsic <[llvm_v4f32_ty], [llvm_anyint_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]> ;
-
-  // Fully-flexible TBUFFER_STORE_FORMAT_* except for the ADDR64 bit, which is not exposed
-  def int_SI_tbuffer_store : Intrinsic <
-    [],
-    [llvm_anyint_ty, // rsrc(SGPR)
-     llvm_anyint_ty, // vdata(VGPR), overloaded for types i32, v2i32, v4i32
-     llvm_i32_ty,    // num_channels(imm), selects opcode suffix: 1=X, 2=XY, 3=XYZ, 4=XYZW
-     llvm_i32_ty,    // vaddr(VGPR)
-     llvm_i32_ty,    // soffset(SGPR)
-     llvm_i32_ty,    // inst_offset(imm)
-     llvm_i32_ty,    // dfmt(imm)
-     llvm_i32_ty,    // nfmt(imm)
-     llvm_i32_ty,    // offen(imm)
-     llvm_i32_ty,    // idxen(imm)
-     llvm_i32_ty,    // glc(imm)
-     llvm_i32_ty,    // slc(imm)
-     llvm_i32_ty],   // tfe(imm)
-    []>;
-
-  // Fully-flexible BUFFER_LOAD_DWORD_* except for the ADDR64 bit, which is not exposed
-  def int_SI_buffer_load_dword : Intrinsic <
-    [llvm_anyint_ty], // vdata(VGPR), overloaded for types i32, v2i32, v4i32
-    [llvm_anyint_ty,  // rsrc(SGPR)
-     llvm_anyint_ty,  // vaddr(VGPR)
-     llvm_i32_ty,     // soffset(SGPR)
-     llvm_i32_ty,     // inst_offset(imm)
-     llvm_i32_ty,     // offen(imm)
-     llvm_i32_ty,     // idxen(imm)
-     llvm_i32_ty,     // glc(imm)
-     llvm_i32_ty,     // slc(imm)
-     llvm_i32_ty],    // tfe(imm)
-    [IntrReadArgMem]>;
-
-  def int_SI_sendmsg : Intrinsic <[], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-
-  // Fully-flexible SAMPLE instruction.
-  class SampleRaw : Intrinsic <
-    [llvm_v4f32_ty],    // vdata(VGPR)
-    [llvm_anyint_ty,    // vaddr(VGPR)
-     llvm_v8i32_ty,     // rsrc(SGPR)
-     llvm_v4i32_ty,     // sampler(SGPR)
-     llvm_i32_ty,       // dmask(imm)
-     llvm_i32_ty,       // unorm(imm)
-     llvm_i32_ty,       // r128(imm)
-     llvm_i32_ty,       // da(imm)
-     llvm_i32_ty,       // glc(imm)
-     llvm_i32_ty,       // slc(imm)
-     llvm_i32_ty,       // tfe(imm)
-     llvm_i32_ty],      // lwe(imm)
-    [IntrNoMem]>;
-
-  // Image instruction without a sampler.
-  class Image : Intrinsic <
-    [llvm_v4f32_ty],    // vdata(VGPR)
-    [llvm_anyint_ty,    // vaddr(VGPR)
-     llvm_v8i32_ty,     // rsrc(SGPR)
-     llvm_i32_ty,       // dmask(imm)
-     llvm_i32_ty,       // unorm(imm)
-     llvm_i32_ty,       // r128(imm)
-     llvm_i32_ty,       // da(imm)
-     llvm_i32_ty,       // glc(imm)
-     llvm_i32_ty,       // slc(imm)
-     llvm_i32_ty,       // tfe(imm)
-     llvm_i32_ty],      // lwe(imm)
-    [IntrNoMem]>;
-
-  // Basic sample
-  def int_SI_image_sample : SampleRaw;
-  def int_SI_image_sample_cl : SampleRaw;
-  def int_SI_image_sample_d : SampleRaw;
-  def int_SI_image_sample_d_cl : SampleRaw;
-  def int_SI_image_sample_l : SampleRaw;
-  def int_SI_image_sample_b : SampleRaw;
-  def int_SI_image_sample_b_cl : SampleRaw;
-  def int_SI_image_sample_lz : SampleRaw;
-  def int_SI_image_sample_cd : SampleRaw;
-  def int_SI_image_sample_cd_cl : SampleRaw;
-
-  // Sample with comparison
-  def int_SI_image_sample_c : SampleRaw;
-  def int_SI_image_sample_c_cl : SampleRaw;
-  def int_SI_image_sample_c_d : SampleRaw;
-  def int_SI_image_sample_c_d_cl : SampleRaw;
-  def int_SI_image_sample_c_l : SampleRaw;
-  def int_SI_image_sample_c_b : SampleRaw;
-  def int_SI_image_sample_c_b_cl : SampleRaw;
-  def int_SI_image_sample_c_lz : SampleRaw;
-  def int_SI_image_sample_c_cd : SampleRaw;
-  def int_SI_image_sample_c_cd_cl : SampleRaw;
-
-  // Sample with offsets
-  def int_SI_image_sample_o : SampleRaw;
-  def int_SI_image_sample_cl_o : SampleRaw;
-  def int_SI_image_sample_d_o : SampleRaw;
-  def int_SI_image_sample_d_cl_o : SampleRaw;
-  def int_SI_image_sample_l_o : SampleRaw;
-  def int_SI_image_sample_b_o : SampleRaw;
-  def int_SI_image_sample_b_cl_o : SampleRaw;
-  def int_SI_image_sample_lz_o : SampleRaw;
-  def int_SI_image_sample_cd_o : SampleRaw;
-  def int_SI_image_sample_cd_cl_o : SampleRaw;
-
-  // Sample with comparison and offsets
-  def int_SI_image_sample_c_o : SampleRaw;
-  def int_SI_image_sample_c_cl_o : SampleRaw;
-  def int_SI_image_sample_c_d_o : SampleRaw;
-  def int_SI_image_sample_c_d_cl_o : SampleRaw;
-  def int_SI_image_sample_c_l_o : SampleRaw;
-  def int_SI_image_sample_c_b_o : SampleRaw;
-  def int_SI_image_sample_c_b_cl_o : SampleRaw;
-  def int_SI_image_sample_c_lz_o : SampleRaw;
-  def int_SI_image_sample_c_cd_o : SampleRaw;
-  def int_SI_image_sample_c_cd_cl_o : SampleRaw;
-
-  // Basic gather4
-  def int_SI_gather4 : SampleRaw;
-  def int_SI_gather4_cl : SampleRaw;
-  def int_SI_gather4_l : SampleRaw;
-  def int_SI_gather4_b : SampleRaw;
-  def int_SI_gather4_b_cl : SampleRaw;
-  def int_SI_gather4_lz : SampleRaw;
-
-  // Gather4 with comparison
-  def int_SI_gather4_c : SampleRaw;
-  def int_SI_gather4_c_cl : SampleRaw;
-  def int_SI_gather4_c_l : SampleRaw;
-  def int_SI_gather4_c_b : SampleRaw;
-  def int_SI_gather4_c_b_cl : SampleRaw;
-  def int_SI_gather4_c_lz : SampleRaw;
-
-  // Gather4 with offsets
-  def int_SI_gather4_o : SampleRaw;
-  def int_SI_gather4_cl_o : SampleRaw;
-  def int_SI_gather4_l_o : SampleRaw;
-  def int_SI_gather4_b_o : SampleRaw;
-  def int_SI_gather4_b_cl_o : SampleRaw;
-  def int_SI_gather4_lz_o : SampleRaw;
-
-  // Gather4 with comparison and offsets
-  def int_SI_gather4_c_o : SampleRaw;
-  def int_SI_gather4_c_cl_o : SampleRaw;
-  def int_SI_gather4_c_l_o : SampleRaw;
-  def int_SI_gather4_c_b_o : SampleRaw;
-  def int_SI_gather4_c_b_cl_o : SampleRaw;
-  def int_SI_gather4_c_lz_o : SampleRaw;
-
-  def int_SI_getlod : SampleRaw;
-
-  // Image instrinsics.
-  def int_SI_image_load : Image;
-  def int_SI_image_load_mip : Image;
-  def int_SI_getresinfo : Image;
-
-  // Deprecated image and sample intrinsics.
-  class Sample : Intrinsic <[llvm_v4f32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
-
-  def int_SI_sample : Sample;
-  def int_SI_sampleb : Sample;
-  def int_SI_sampled : Sample;
-  def int_SI_samplel : Sample;
-  def int_SI_imageload : Intrinsic <[llvm_v4i32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_SI_resinfo : Intrinsic <[llvm_v4i32_ty], [llvm_i32_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
-
-  /* Interpolation Intrinsics */
-
-  def int_SI_fs_constant : Intrinsic <[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_SI_fs_interp : Intrinsic <[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_v2i32_ty], [IntrNoMem]>;
-
-  /* Control flow Intrinsics */
-
-  def int_SI_if : Intrinsic<[llvm_i64_ty], [llvm_i1_ty, llvm_empty_ty], []>;
-  def int_SI_else : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_empty_ty], []>;
-  def int_SI_break : Intrinsic<[llvm_i64_ty], [llvm_i64_ty], []>;
-  def int_SI_if_break : Intrinsic<[llvm_i64_ty], [llvm_i1_ty, llvm_i64_ty], []>;
-  def int_SI_else_break : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], []>;
-  def int_SI_loop : Intrinsic<[], [llvm_i64_ty, llvm_empty_ty], []>;
-  def int_SI_end_cf : Intrinsic<[], [llvm_i64_ty], []>;
-}

Removed: llvm/trunk/lib/Target/R600/SILoadStoreOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SILoadStoreOptimizer.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SILoadStoreOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/R600/SILoadStoreOptimizer.cpp (removed)
@@ -1,421 +0,0 @@
-//===-- SILoadStoreOptimizer.cpp ------------------------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass tries to fuse DS instructions with close by immediate offsets.
-// This will fuse operations such as
-//  ds_read_b32 v0, v2 offset:16
-//  ds_read_b32 v1, v2 offset:32
-// ==>
-//   ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
-//
-//
-// Future improvements:
-//
-// - This currently relies on the scheduler to place loads and stores next to
-//   each other, and then only merges adjacent pairs of instructions. It would
-//   be good to be more flexible with interleaved instructions, and possibly run
-//   before scheduling. It currently missing stores of constants because loading
-//   the constant into the data register is placed between the stores, although
-//   this is arguably a scheduling problem.
-//
-// - Live interval recomputing seems inefficient. This currently only matches
-//   one pair, and recomputes live intervals and moves on to the next pair. It
-//   would be better to compute a list of all merges that need to occur
-//
-// - With a list of instructions to process, we can also merge more. If a
-//   cluster of loads have offsets that are too large to fit in the 8-bit
-//   offsets, but are close enough to fit in the 8 bits, we can add to the base
-//   pointer and use the new reduced offsets.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "SIInstrInfo.h"
-#include "SIRegisterInfo.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/LiveVariables.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetMachine.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "si-load-store-opt"
-
-namespace {
-
-class SILoadStoreOptimizer : public MachineFunctionPass {
-private:
-  const SIInstrInfo *TII;
-  const SIRegisterInfo *TRI;
-  MachineRegisterInfo *MRI;
-  LiveIntervals *LIS;
-
-
-  static bool offsetsCanBeCombined(unsigned Offset0,
-                                   unsigned Offset1,
-                                   unsigned EltSize);
-
-  MachineBasicBlock::iterator findMatchingDSInst(MachineBasicBlock::iterator I,
-                                                 unsigned EltSize);
-
-  void updateRegDefsUses(unsigned SrcReg,
-                         unsigned DstReg,
-                         unsigned SubIdx);
-
-  MachineBasicBlock::iterator mergeRead2Pair(
-    MachineBasicBlock::iterator I,
-    MachineBasicBlock::iterator Paired,
-    unsigned EltSize);
-
-  MachineBasicBlock::iterator mergeWrite2Pair(
-    MachineBasicBlock::iterator I,
-    MachineBasicBlock::iterator Paired,
-    unsigned EltSize);
-
-public:
-  static char ID;
-
-  SILoadStoreOptimizer()
-      : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr), MRI(nullptr),
-        LIS(nullptr) {}
-
-  SILoadStoreOptimizer(const TargetMachine &TM_) : MachineFunctionPass(ID) {
-    initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
-  }
-
-  bool optimizeBlock(MachineBasicBlock &MBB);
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  const char *getPassName() const override {
-    return "SI Load / Store Optimizer";
-  }
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.setPreservesCFG();
-    AU.addPreserved<SlotIndexes>();
-    AU.addPreserved<LiveIntervals>();
-    AU.addPreserved<LiveVariables>();
-    AU.addRequired<LiveIntervals>();
-
-    MachineFunctionPass::getAnalysisUsage(AU);
-  }
-};
-
-} // End anonymous namespace.
-
-INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
-                      "SI Load / Store Optimizer", false, false)
-INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
-INITIALIZE_PASS_DEPENDENCY(LiveVariables)
-INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
-INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
-                    "SI Load / Store Optimizer", false, false)
-
-char SILoadStoreOptimizer::ID = 0;
-
-char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
-
-FunctionPass *llvm::createSILoadStoreOptimizerPass(TargetMachine &TM) {
-  return new SILoadStoreOptimizer(TM);
-}
-
-bool SILoadStoreOptimizer::offsetsCanBeCombined(unsigned Offset0,
-                                                unsigned Offset1,
-                                                unsigned Size) {
-  // XXX - Would the same offset be OK? Is there any reason this would happen or
-  // be useful?
-  if (Offset0 == Offset1)
-    return false;
-
-  // This won't be valid if the offset isn't aligned.
-  if ((Offset0 % Size != 0) || (Offset1 % Size != 0))
-    return false;
-
-  unsigned EltOffset0 = Offset0 / Size;
-  unsigned EltOffset1 = Offset1 / Size;
-
-  // Check if the new offsets fit in the reduced 8-bit range.
-  if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1))
-    return true;
-
-  // If the offset in elements doesn't fit in 8-bits, we might be able to use
-  // the stride 64 versions.
-  if ((EltOffset0 % 64 != 0) || (EltOffset1 % 64) != 0)
-    return false;
-
-  return isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64);
-}
-
-MachineBasicBlock::iterator
-SILoadStoreOptimizer::findMatchingDSInst(MachineBasicBlock::iterator I,
-                                         unsigned EltSize){
-  MachineBasicBlock::iterator E = I->getParent()->end();
-  MachineBasicBlock::iterator MBBI = I;
-  ++MBBI;
-
-  if (MBBI->getOpcode() != I->getOpcode())
-    return E;
-
-  // Don't merge volatiles.
-  if (MBBI->hasOrderedMemoryRef())
-    return E;
-
-  int AddrIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(), AMDGPU::OpName::addr);
-  const MachineOperand &AddrReg0 = I->getOperand(AddrIdx);
-  const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
-
-  // Check same base pointer. Be careful of subregisters, which can occur with
-  // vectors of pointers.
-  if (AddrReg0.getReg() == AddrReg1.getReg() &&
-      AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
-    int OffsetIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(),
-                                               AMDGPU::OpName::offset);
-    unsigned Offset0 = I->getOperand(OffsetIdx).getImm() & 0xffff;
-    unsigned Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
-
-    // Check both offsets fit in the reduced range.
-    if (offsetsCanBeCombined(Offset0, Offset1, EltSize))
-      return MBBI;
-  }
-
-  return E;
-}
-
-void SILoadStoreOptimizer::updateRegDefsUses(unsigned SrcReg,
-                                             unsigned DstReg,
-                                             unsigned SubIdx) {
-  for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(SrcReg),
-         E = MRI->reg_end(); I != E; ) {
-    MachineOperand &O = *I;
-    ++I;
-    O.substVirtReg(DstReg, SubIdx, *TRI);
-  }
-}
-
-MachineBasicBlock::iterator  SILoadStoreOptimizer::mergeRead2Pair(
-  MachineBasicBlock::iterator I,
-  MachineBasicBlock::iterator Paired,
-  unsigned EltSize) {
-  MachineBasicBlock *MBB = I->getParent();
-
-  // Be careful, since the addresses could be subregisters themselves in weird
-  // cases, like vectors of pointers.
-  const MachineOperand *AddrReg = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
-
-  unsigned DestReg0 = TII->getNamedOperand(*I, AMDGPU::OpName::vdst)->getReg();
-  unsigned DestReg1
-    = TII->getNamedOperand(*Paired, AMDGPU::OpName::vdst)->getReg();
-
-  unsigned Offset0
-          = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
-  unsigned Offset1
-    = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
-
-  unsigned NewOffset0 = Offset0 / EltSize;
-  unsigned NewOffset1 = Offset1 / EltSize;
-  unsigned Opc = (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
-
-  // Prefer the st64 form if we can use it, even if we can fit the offset in the
-  // non st64 version. I'm not sure if there's any real reason to do this.
-  bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
-  if (UseST64) {
-    NewOffset0 /= 64;
-    NewOffset1 /= 64;
-    Opc = (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
-  }
-
-  assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
-         (NewOffset0 != NewOffset1) &&
-         "Computed offset doesn't fit");
-
-  const MCInstrDesc &Read2Desc = TII->get(Opc);
-
-  const TargetRegisterClass *SuperRC
-    = (EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
-  unsigned DestReg = MRI->createVirtualRegister(SuperRC);
-
-  DebugLoc DL = I->getDebugLoc();
-  MachineInstrBuilder Read2
-    = BuildMI(*MBB, I, DL, Read2Desc, DestReg)
-    .addOperand(*AddrReg) // addr
-    .addImm(NewOffset0) // offset0
-    .addImm(NewOffset1) // offset1
-    .addImm(0) // gds
-    .addMemOperand(*I->memoperands_begin())
-    .addMemOperand(*Paired->memoperands_begin());
-
-  unsigned SubRegIdx0 = (EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
-  unsigned SubRegIdx1 = (EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
-  updateRegDefsUses(DestReg0, DestReg, SubRegIdx0);
-  updateRegDefsUses(DestReg1, DestReg, SubRegIdx1);
-
-  LIS->RemoveMachineInstrFromMaps(I);
-  // Replacing Paired in the maps with Read2 allows us to avoid updating the
-  // live range for the m0 register.
-  LIS->ReplaceMachineInstrInMaps(Paired, Read2);
-  I->eraseFromParent();
-  Paired->eraseFromParent();
-
-  LiveInterval &AddrRegLI = LIS->getInterval(AddrReg->getReg());
-  LIS->shrinkToUses(&AddrRegLI);
-
-  LIS->getInterval(DestReg); // Create new LI
-
-  DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
-  return Read2.getInstr();
-}
-
-MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
-  MachineBasicBlock::iterator I,
-  MachineBasicBlock::iterator Paired,
-  unsigned EltSize) {
-  MachineBasicBlock *MBB = I->getParent();
-
-  // Be sure to use .addOperand(), and not .addReg() with these. We want to be
-  // sure we preserve the subregister index and any register flags set on them.
-  const MachineOperand *Addr = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
-  const MachineOperand *Data0 = TII->getNamedOperand(*I, AMDGPU::OpName::data0);
-  const MachineOperand *Data1
-    = TII->getNamedOperand(*Paired, AMDGPU::OpName::data0);
-
-
-  unsigned Offset0
-    = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
-  unsigned Offset1
-    = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
-
-  unsigned NewOffset0 = Offset0 / EltSize;
-  unsigned NewOffset1 = Offset1 / EltSize;
-  unsigned Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
-
-  // Prefer the st64 form if we can use it, even if we can fit the offset in the
-  // non st64 version. I'm not sure if there's any real reason to do this.
-  bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
-  if (UseST64) {
-    NewOffset0 /= 64;
-    NewOffset1 /= 64;
-    Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
-  }
-
-  assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
-         (NewOffset0 != NewOffset1) &&
-         "Computed offset doesn't fit");
-
-  const MCInstrDesc &Write2Desc = TII->get(Opc);
-  DebugLoc DL = I->getDebugLoc();
-
-  // repairLiveintervalsInRange() doesn't handle physical register, so we have
-  // to update the M0 range manually.
-  SlotIndex PairedIndex = LIS->getInstructionIndex(Paired);
-  LiveRange &M0Range = LIS->getRegUnit(*MCRegUnitIterator(AMDGPU::M0, TRI));
-  LiveRange::Segment *M0Segment = M0Range.getSegmentContaining(PairedIndex);
-  bool UpdateM0Range = M0Segment->end == PairedIndex.getRegSlot();
-
-  MachineInstrBuilder Write2
-    = BuildMI(*MBB, I, DL, Write2Desc)
-    .addOperand(*Addr) // addr
-    .addOperand(*Data0) // data0
-    .addOperand(*Data1) // data1
-    .addImm(NewOffset0) // offset0
-    .addImm(NewOffset1) // offset1
-    .addImm(0) // gds
-    .addMemOperand(*I->memoperands_begin())
-    .addMemOperand(*Paired->memoperands_begin());
-
-  // XXX - How do we express subregisters here?
-  unsigned OrigRegs[] = { Data0->getReg(), Data1->getReg(), Addr->getReg() };
-
-  LIS->RemoveMachineInstrFromMaps(I);
-  LIS->RemoveMachineInstrFromMaps(Paired);
-  I->eraseFromParent();
-  Paired->eraseFromParent();
-
-  // This doesn't handle physical registers like M0
-  LIS->repairIntervalsInRange(MBB, Write2, Write2, OrigRegs);
-
-  if (UpdateM0Range) {
-    SlotIndex Write2Index = LIS->getInstructionIndex(Write2);
-    M0Segment->end = Write2Index.getRegSlot();
-  }
-
-  DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
-  return Write2.getInstr();
-}
-
-// Scan through looking for adjacent LDS operations with constant offsets from
-// the same base register. We rely on the scheduler to do the hard work of
-// clustering nearby loads, and assume these are all adjacent.
-bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
-  bool Modified = false;
-
-  for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
-    MachineInstr &MI = *I;
-
-    // Don't combine if volatile.
-    if (MI.hasOrderedMemoryRef()) {
-      ++I;
-      continue;
-    }
-
-    unsigned Opc = MI.getOpcode();
-    if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
-      unsigned Size = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
-      MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size);
-      if (Match != E) {
-        Modified = true;
-        I = mergeRead2Pair(I, Match, Size);
-      } else {
-        ++I;
-      }
-
-      continue;
-    } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
-      unsigned Size = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
-      MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size);
-      if (Match != E) {
-        Modified = true;
-        I = mergeWrite2Pair(I, Match, Size);
-      } else {
-        ++I;
-      }
-
-      continue;
-    }
-
-    ++I;
-  }
-
-  return Modified;
-}
-
-bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
-  const TargetSubtargetInfo &STM = MF.getSubtarget();
-  TRI = static_cast<const SIRegisterInfo *>(STM.getRegisterInfo());
-  TII = static_cast<const SIInstrInfo *>(STM.getInstrInfo());
-  MRI = &MF.getRegInfo();
-
-  LIS = &getAnalysis<LiveIntervals>();
-
-  DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
-
-  assert(!MRI->isSSA());
-
-  bool Modified = false;
-
-  for (MachineBasicBlock &MBB : MF)
-    Modified |= optimizeBlock(MBB);
-
-  return Modified;
-}

Removed: llvm/trunk/lib/Target/R600/SILowerControlFlow.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SILowerControlFlow.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SILowerControlFlow.cpp (original)
+++ llvm/trunk/lib/Target/R600/SILowerControlFlow.cpp (removed)
@@ -1,605 +0,0 @@
-//===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief This pass lowers the pseudo control flow instructions to real
-/// machine instructions.
-///
-/// All control flow is handled using predicated instructions and
-/// a predicate stack.  Each Scalar ALU controls the operations of 64 Vector
-/// ALUs.  The Scalar ALU can update the predicate for any of the Vector ALUs
-/// by writting to the 64-bit EXEC register (each bit corresponds to a
-/// single vector ALU).  Typically, for predicates, a vector ALU will write
-/// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
-/// Vector ALU) and then the ScalarALU will AND the VCC register with the
-/// EXEC to update the predicates.
-///
-/// For example:
-/// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2
-/// %SGPR0 = SI_IF %VCC
-///   %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0
-/// %SGPR0 = SI_ELSE %SGPR0
-///   %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0
-/// SI_END_CF %SGPR0
-///
-/// becomes:
-///
-/// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC  // Save and update the exec mask
-/// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC  // Clear live bits from saved exec mask
-/// S_CBRANCH_EXECZ label0            // This instruction is an optional
-///                                   // optimization which allows us to
-///                                   // branch if all the bits of
-///                                   // EXEC are zero.
-/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch
-///
-/// label0:
-/// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC   // Restore the exec mask for the Then block
-/// %EXEC = S_XOR_B64 %SGPR0, %EXEC    // Clear live bits from saved exec mask
-/// S_BRANCH_EXECZ label1              // Use our branch optimization
-///                                    // instruction again.
-/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR   // Do the THEN block
-/// label1:
-/// %EXEC = S_OR_B64 %EXEC, %SGPR0     // Re-enable saved exec mask bits
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "AMDGPUSubtarget.h"
-#include "SIInstrInfo.h"
-#include "SIMachineFunctionInfo.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/IR/Constants.h"
-
-using namespace llvm;
-
-namespace {
-
-class SILowerControlFlowPass : public MachineFunctionPass {
-
-private:
-  static const unsigned SkipThreshold = 12;
-
-  static char ID;
-  const SIRegisterInfo *TRI;
-  const SIInstrInfo *TII;
-
-  bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To);
-
-  void Skip(MachineInstr &From, MachineOperand &To);
-  void SkipIfDead(MachineInstr &MI);
-
-  void If(MachineInstr &MI);
-  void Else(MachineInstr &MI);
-  void Break(MachineInstr &MI);
-  void IfBreak(MachineInstr &MI);
-  void ElseBreak(MachineInstr &MI);
-  void Loop(MachineInstr &MI);
-  void EndCf(MachineInstr &MI);
-
-  void Kill(MachineInstr &MI);
-  void Branch(MachineInstr &MI);
-
-  void LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0);
-  void computeIndirectRegAndOffset(unsigned VecReg, unsigned &Reg, int &Offset);
-  void IndirectSrc(MachineInstr &MI);
-  void IndirectDst(MachineInstr &MI);
-
-public:
-  SILowerControlFlowPass(TargetMachine &tm) :
-    MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  const char *getPassName() const override {
-    return "SI Lower control flow instructions";
-  }
-
-};
-
-} // End anonymous namespace
-
-char SILowerControlFlowPass::ID = 0;
-
-FunctionPass *llvm::createSILowerControlFlowPass(TargetMachine &tm) {
-  return new SILowerControlFlowPass(tm);
-}
-
-bool SILowerControlFlowPass::shouldSkip(MachineBasicBlock *From,
-                                        MachineBasicBlock *To) {
-
-  unsigned NumInstr = 0;
-
-  for (MachineBasicBlock *MBB = From; MBB != To && !MBB->succ_empty();
-       MBB = *MBB->succ_begin()) {
-
-    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
-         NumInstr < SkipThreshold && I != E; ++I) {
-
-      if (I->isBundle() || !I->isBundled())
-        if (++NumInstr >= SkipThreshold)
-          return true;
-    }
-  }
-
-  return false;
-}
-
-void SILowerControlFlowPass::Skip(MachineInstr &From, MachineOperand &To) {
-
-  if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB()))
-    return;
-
-  DebugLoc DL = From.getDebugLoc();
-  BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
-          .addOperand(To)
-          .addReg(AMDGPU::EXEC);
-}
-
-void SILowerControlFlowPass::SkipIfDead(MachineInstr &MI) {
-
-  MachineBasicBlock &MBB = *MI.getParent();
-  DebugLoc DL = MI.getDebugLoc();
-
-  if (MBB.getParent()->getInfo<SIMachineFunctionInfo>()->getShaderType() !=
-      ShaderType::PIXEL ||
-      !shouldSkip(&MBB, &MBB.getParent()->back()))
-    return;
-
-  MachineBasicBlock::iterator Insert = &MI;
-  ++Insert;
-
-  // If the exec mask is non-zero, skip the next two instructions
-  BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
-          .addImm(3)
-          .addReg(AMDGPU::EXEC);
-
-  // Exec mask is zero: Export to NULL target...
-  BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP))
-          .addImm(0)
-          .addImm(0x09) // V_008DFC_SQ_EXP_NULL
-          .addImm(0)
-          .addImm(1)
-          .addImm(1)
-          .addReg(AMDGPU::VGPR0)
-          .addReg(AMDGPU::VGPR0)
-          .addReg(AMDGPU::VGPR0)
-          .addReg(AMDGPU::VGPR0);
-
-  // ... and terminate wavefront
-  BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
-}
-
-void SILowerControlFlowPass::If(MachineInstr &MI) {
-  MachineBasicBlock &MBB = *MI.getParent();
-  DebugLoc DL = MI.getDebugLoc();
-  unsigned Reg = MI.getOperand(0).getReg();
-  unsigned Vcc = MI.getOperand(1).getReg();
-
-  BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg)
-          .addReg(Vcc);
-
-  BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg)
-          .addReg(AMDGPU::EXEC)
-          .addReg(Reg);
-
-  Skip(MI, MI.getOperand(2));
-
-  MI.eraseFromParent();
-}
-
-void SILowerControlFlowPass::Else(MachineInstr &MI) {
-  MachineBasicBlock &MBB = *MI.getParent();
-  DebugLoc DL = MI.getDebugLoc();
-  unsigned Dst = MI.getOperand(0).getReg();
-  unsigned Src = MI.getOperand(1).getReg();
-
-  BuildMI(MBB, MBB.getFirstNonPHI(), DL,
-          TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst)
-          .addReg(Src); // Saved EXEC
-
-  BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
-          .addReg(AMDGPU::EXEC)
-          .addReg(Dst);
-
-  Skip(MI, MI.getOperand(2));
-
-  MI.eraseFromParent();
-}
-
-void SILowerControlFlowPass::Break(MachineInstr &MI) {
-  MachineBasicBlock &MBB = *MI.getParent();
-  DebugLoc DL = MI.getDebugLoc();
-
-  unsigned Dst = MI.getOperand(0).getReg();
-  unsigned Src = MI.getOperand(1).getReg();
- 
-  BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
-          .addReg(AMDGPU::EXEC)
-          .addReg(Src);
-
-  MI.eraseFromParent();
-}
-
-void SILowerControlFlowPass::IfBreak(MachineInstr &MI) {
-  MachineBasicBlock &MBB = *MI.getParent();
-  DebugLoc DL = MI.getDebugLoc();
-
-  unsigned Dst = MI.getOperand(0).getReg();
-  unsigned Vcc = MI.getOperand(1).getReg();
-  unsigned Src = MI.getOperand(2).getReg();
- 
-  BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
-          .addReg(Vcc)
-          .addReg(Src);
-
-  MI.eraseFromParent();
-}
-
-void SILowerControlFlowPass::ElseBreak(MachineInstr &MI) {
-  MachineBasicBlock &MBB = *MI.getParent();
-  DebugLoc DL = MI.getDebugLoc();
-
-  unsigned Dst = MI.getOperand(0).getReg();
-  unsigned Saved = MI.getOperand(1).getReg();
-  unsigned Src = MI.getOperand(2).getReg();
- 
-  BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
-          .addReg(Saved)
-          .addReg(Src);
-
-  MI.eraseFromParent();
-}
-
-void SILowerControlFlowPass::Loop(MachineInstr &MI) {
-  MachineBasicBlock &MBB = *MI.getParent();
-  DebugLoc DL = MI.getDebugLoc();
-  unsigned Src = MI.getOperand(0).getReg();
-
-  BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC)
-          .addReg(AMDGPU::EXEC)
-          .addReg(Src);
-
-  BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
-          .addOperand(MI.getOperand(1))
-          .addReg(AMDGPU::EXEC);
-
-  MI.eraseFromParent();
-}
-
-void SILowerControlFlowPass::EndCf(MachineInstr &MI) {
-  MachineBasicBlock &MBB = *MI.getParent();
-  DebugLoc DL = MI.getDebugLoc();
-  unsigned Reg = MI.getOperand(0).getReg();
-
-  BuildMI(MBB, MBB.getFirstNonPHI(), DL,
-          TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
-          .addReg(AMDGPU::EXEC)
-          .addReg(Reg);
-
-  MI.eraseFromParent();
-}
-
-void SILowerControlFlowPass::Branch(MachineInstr &MI) {
-  if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode())
-    MI.eraseFromParent();
-
-  // If these aren't equal, this is probably an infinite loop.
-}
-
-void SILowerControlFlowPass::Kill(MachineInstr &MI) {
-  MachineBasicBlock &MBB = *MI.getParent();
-  DebugLoc DL = MI.getDebugLoc();
-  const MachineOperand &Op = MI.getOperand(0);
-
-#ifndef NDEBUG
-  const SIMachineFunctionInfo *MFI
-    = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
-  // Kill is only allowed in pixel / geometry shaders.
-  assert(MFI->getShaderType() == ShaderType::PIXEL ||
-         MFI->getShaderType() == ShaderType::GEOMETRY);
-#endif
-
-  // Clear this thread from the exec mask if the operand is negative
-  if ((Op.isImm())) {
-    // Constant operand: Set exec mask to 0 or do nothing
-    if (Op.getImm() & 0x80000000) {
-      BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
-              .addImm(0);
-    }
-  } else {
-    BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32), AMDGPU::VCC)
-           .addImm(0)
-           .addOperand(Op);
-  }
-
-  MI.eraseFromParent();
-}
-
-void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) {
-
-  MachineBasicBlock &MBB = *MI.getParent();
-  DebugLoc DL = MI.getDebugLoc();
-  MachineBasicBlock::iterator I = MI;
-
-  unsigned Save = MI.getOperand(1).getReg();
-  unsigned Idx = MI.getOperand(3).getReg();
-
-  if (AMDGPU::SReg_32RegClass.contains(Idx)) {
-    if (Offset) {
-      BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
-              .addReg(Idx)
-              .addImm(Offset);
-    } else {
-      BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
-              .addReg(Idx);
-    }
-    MBB.insert(I, MovRel);
-  } else {
-
-    assert(AMDGPU::SReg_64RegClass.contains(Save));
-    assert(AMDGPU::VGPR_32RegClass.contains(Idx));
-
-    // Save the EXEC mask
-    BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save)
-            .addReg(AMDGPU::EXEC);
-
-    // Read the next variant into VCC (lower 32 bits) <- also loop target
-    BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
-            AMDGPU::VCC_LO)
-            .addReg(Idx);
-
-    // Move index from VCC into M0
-    BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
-            .addReg(AMDGPU::VCC_LO);
-
-    // Compare the just read M0 value to all possible Idx values
-    BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32), AMDGPU::VCC)
-            .addReg(AMDGPU::M0)
-            .addReg(Idx);
-
-    // Update EXEC, save the original EXEC value to VCC
-    BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
-            .addReg(AMDGPU::VCC);
-
-    if (Offset) {
-      BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
-              .addReg(AMDGPU::M0)
-              .addImm(Offset);
-    }
-    // Do the actual move
-    MBB.insert(I, MovRel);
-
-    // Update EXEC, switch all done bits to 0 and all todo bits to 1
-    BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
-            .addReg(AMDGPU::EXEC)
-            .addReg(AMDGPU::VCC);
-
-    // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
-    BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
-            .addImm(-7)
-            .addReg(AMDGPU::EXEC);
-
-    // Restore EXEC
-    BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
-            .addReg(Save);
-
-  }
-  MI.eraseFromParent();
-}
-
-/// \param @VecReg The register which holds element zero of the vector
-///                 being addressed into.
-/// \param[out] @Reg The base register to use in the indirect addressing instruction.
-/// \param[in,out] @Offset As an input, this is the constant offset part of the
-//                         indirect Index. e.g. v0 = v[VecReg + Offset]
-//                         As an output, this is a constant value that needs
-//                         to be added to the value stored in M0.
-void SILowerControlFlowPass::computeIndirectRegAndOffset(unsigned VecReg,
-                                                         unsigned &Reg,
-                                                         int &Offset) {
-  unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0);
-  if (!SubReg)
-    SubReg = VecReg;
-
-  const TargetRegisterClass *RC = TRI->getPhysRegClass(SubReg);
-  int RegIdx = TRI->getHWRegIndex(SubReg) + Offset;
-
-  if (RegIdx < 0) {
-    Offset = RegIdx;
-    RegIdx = 0;
-  } else {
-    Offset = 0;
-  }
-
-  Reg = RC->getRegister(RegIdx);
-}
-
-void SILowerControlFlowPass::IndirectSrc(MachineInstr &MI) {
-
-  MachineBasicBlock &MBB = *MI.getParent();
-  DebugLoc DL = MI.getDebugLoc();
-
-  unsigned Dst = MI.getOperand(0).getReg();
-  unsigned Vec = MI.getOperand(2).getReg();
-  int Off = MI.getOperand(4).getImm();
-  unsigned Reg;
-
-  computeIndirectRegAndOffset(Vec, Reg, Off);
-
-  MachineInstr *MovRel =
-    BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
-            .addReg(Reg)
-            .addReg(AMDGPU::M0, RegState::Implicit)
-            .addReg(Vec, RegState::Implicit);
-
-  LoadM0(MI, MovRel, Off);
-}
-
-void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) {
-
-  MachineBasicBlock &MBB = *MI.getParent();
-  DebugLoc DL = MI.getDebugLoc();
-
-  unsigned Dst = MI.getOperand(0).getReg();
-  int Off = MI.getOperand(4).getImm();
-  unsigned Val = MI.getOperand(5).getReg();
-  unsigned Reg;
-
-  computeIndirectRegAndOffset(Dst, Reg, Off);
-
-  MachineInstr *MovRel = 
-    BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32))
-            .addReg(Reg, RegState::Define)
-            .addReg(Val)
-            .addReg(AMDGPU::M0, RegState::Implicit)
-            .addReg(Dst, RegState::Implicit);
-
-  LoadM0(MI, MovRel, Off);
-}
-
-bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
-  TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
-  TRI =
-      static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
-  SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
-
-  bool HaveKill = false;
-  bool NeedWQM = false;
-  bool NeedFlat = false;
-  unsigned Depth = 0;
-
-  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
-       BI != BE; ++BI) {
-
-    MachineBasicBlock &MBB = *BI;
-    MachineBasicBlock::iterator I, Next;
-    for (I = MBB.begin(); I != MBB.end(); I = Next) {
-      Next = std::next(I);
-
-      MachineInstr &MI = *I;
-      if (TII->isWQM(MI.getOpcode()) || TII->isDS(MI.getOpcode()))
-        NeedWQM = true;
-
-      // Flat uses m0 in case it needs to access LDS.
-      if (TII->isFLAT(MI.getOpcode()))
-        NeedFlat = true;
-
-      switch (MI.getOpcode()) {
-        default: break;
-        case AMDGPU::SI_IF:
-          ++Depth;
-          If(MI);
-          break;
-
-        case AMDGPU::SI_ELSE:
-          Else(MI);
-          break;
-
-        case AMDGPU::SI_BREAK:
-          Break(MI);
-          break;
-
-        case AMDGPU::SI_IF_BREAK:
-          IfBreak(MI);
-          break;
-
-        case AMDGPU::SI_ELSE_BREAK:
-          ElseBreak(MI);
-          break;
-
-        case AMDGPU::SI_LOOP:
-          ++Depth;
-          Loop(MI);
-          break;
-
-        case AMDGPU::SI_END_CF:
-          if (--Depth == 0 && HaveKill) {
-            SkipIfDead(MI);
-            HaveKill = false;
-          }
-          EndCf(MI);
-          break;
-
-        case AMDGPU::SI_KILL:
-          if (Depth == 0)
-            SkipIfDead(MI);
-          else
-            HaveKill = true;
-          Kill(MI);
-          break;
-
-        case AMDGPU::S_BRANCH:
-          Branch(MI);
-          break;
-
-        case AMDGPU::SI_INDIRECT_SRC:
-          IndirectSrc(MI);
-          break;
-
-        case AMDGPU::SI_INDIRECT_DST_V1:
-        case AMDGPU::SI_INDIRECT_DST_V2:
-        case AMDGPU::SI_INDIRECT_DST_V4:
-        case AMDGPU::SI_INDIRECT_DST_V8:
-        case AMDGPU::SI_INDIRECT_DST_V16:
-          IndirectDst(MI);
-          break;
-      }
-    }
-  }
-
-  if (NeedWQM && MFI->getShaderType() == ShaderType::PIXEL) {
-    MachineBasicBlock &MBB = MF.front();
-    BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
-            AMDGPU::EXEC).addReg(AMDGPU::EXEC);
-  }
-
-  // FIXME: This seems inappropriate to do here.
-  if (NeedFlat && MFI->IsKernel) {
-    // Insert the prologue initializing the SGPRs pointing to the scratch space
-    // for flat accesses.
-    const MachineFrameInfo *FrameInfo = MF.getFrameInfo();
-
-    // TODO: What to use with function calls?
-
-    // FIXME: This is reporting stack size that is used in a scratch buffer
-    // rather than registers as well.
-    uint64_t StackSizeBytes = FrameInfo->getStackSize();
-
-    int IndirectBegin
-      = static_cast<const AMDGPUInstrInfo*>(TII)->getIndirectIndexBegin(MF);
-    // Convert register index to 256-byte unit.
-    uint64_t StackOffset = IndirectBegin < 0 ? 0 : (4 * IndirectBegin / 256);
-
-    assert((StackSizeBytes < 0xffff) && StackOffset < 0xffff &&
-           "Stack limits should be smaller than 16-bits");
-
-    // Initialize the flat scratch register pair.
-    // TODO: Can we use one s_mov_b64 here?
-
-    // Offset is in units of 256-bytes.
-    MachineBasicBlock &MBB = MF.front();
-    DebugLoc NoDL;
-    MachineBasicBlock::iterator Start = MBB.getFirstNonPHI();
-    const MCInstrDesc &SMovK = TII->get(AMDGPU::S_MOVK_I32);
-
-    assert(isInt<16>(StackOffset) && isInt<16>(StackSizeBytes));
-
-    BuildMI(MBB, Start, NoDL, SMovK, AMDGPU::FLAT_SCR_LO)
-      .addImm(StackOffset);
-
-    // Documentation says size is "per-thread scratch size in bytes"
-    BuildMI(MBB, Start, NoDL, SMovK, AMDGPU::FLAT_SCR_HI)
-      .addImm(StackSizeBytes);
-  }
-
-  return true;
-}

Removed: llvm/trunk/lib/Target/R600/SILowerI1Copies.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SILowerI1Copies.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SILowerI1Copies.cpp (original)
+++ llvm/trunk/lib/Target/R600/SILowerI1Copies.cpp (removed)
@@ -1,151 +0,0 @@
-//===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// i1 values are usually inserted by the CFG Structurize pass and they are
-/// unique in that they can be copied from VALU to SALU registers.
-/// This is not possible for any other value type.  Since there are no
-/// MOV instructions for i1, we to use V_CMP_* and V_CNDMASK to move the i1.
-///
-//===----------------------------------------------------------------------===//
-//
-
-#define DEBUG_TYPE "si-i1-copies"
-#include "AMDGPU.h"
-#include "AMDGPUSubtarget.h"
-#include "SIInstrInfo.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/IR/LLVMContext.h"
-#include "llvm/IR/Function.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetMachine.h"
-
-using namespace llvm;
-
-namespace {
-
-class SILowerI1Copies : public MachineFunctionPass {
-public:
-  static char ID;
-
-public:
-  SILowerI1Copies() : MachineFunctionPass(ID) {
-    initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry());
-  }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  const char *getPassName() const override {
-    return "SI Lower i1 Copies";
-  }
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.addRequired<MachineDominatorTree>();
-    AU.setPreservesCFG();
-    MachineFunctionPass::getAnalysisUsage(AU);
-  }
-};
-
-} // End anonymous namespace.
-
-INITIALIZE_PASS_BEGIN(SILowerI1Copies, DEBUG_TYPE,
-                      "SI Lower i1 Copies", false, false)
-INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
-INITIALIZE_PASS_END(SILowerI1Copies, DEBUG_TYPE,
-                    "SI Lower i1 Copies", false, false)
-
-char SILowerI1Copies::ID = 0;
-
-char &llvm::SILowerI1CopiesID = SILowerI1Copies::ID;
-
-FunctionPass *llvm::createSILowerI1CopiesPass() {
-  return new SILowerI1Copies();
-}
-
-bool SILowerI1Copies::runOnMachineFunction(MachineFunction &MF) {
-  MachineRegisterInfo &MRI = MF.getRegInfo();
-  const SIInstrInfo *TII =
-      static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
-  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
-  std::vector<unsigned> I1Defs;
-
-  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
-                                                  BI != BE; ++BI) {
-
-    MachineBasicBlock &MBB = *BI;
-    MachineBasicBlock::iterator I, Next;
-    for (I = MBB.begin(); I != MBB.end(); I = Next) {
-      Next = std::next(I);
-      MachineInstr &MI = *I;
-
-      if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF) {
-        unsigned Reg = MI.getOperand(0).getReg();
-        const TargetRegisterClass *RC = MRI.getRegClass(Reg);
-        if (RC == &AMDGPU::VReg_1RegClass)
-          MRI.setRegClass(Reg, &AMDGPU::SReg_64RegClass);
-        continue;
-      }
-
-      if (MI.getOpcode() != AMDGPU::COPY)
-        continue;
-
-      const MachineOperand &Dst = MI.getOperand(0);
-      const MachineOperand &Src = MI.getOperand(1);
-
-      if (!TargetRegisterInfo::isVirtualRegister(Src.getReg()) ||
-          !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
-        continue;
-
-      const TargetRegisterClass *DstRC = MRI.getRegClass(Dst.getReg());
-      const TargetRegisterClass *SrcRC = MRI.getRegClass(Src.getReg());
-
-      if (DstRC == &AMDGPU::VReg_1RegClass &&
-          TRI->getCommonSubClass(SrcRC, &AMDGPU::SGPR_64RegClass)) {
-        I1Defs.push_back(Dst.getReg());
-        DebugLoc DL = MI.getDebugLoc();
-
-        MachineInstr *DefInst = MRI.getUniqueVRegDef(Src.getReg());
-        if (DefInst->getOpcode() == AMDGPU::S_MOV_B64) {
-          if (DefInst->getOperand(1).isImm()) {
-            I1Defs.push_back(Dst.getReg());
-
-            int64_t Val = DefInst->getOperand(1).getImm();
-            assert(Val == 0 || Val == -1);
-
-            BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_MOV_B32_e32))
-              .addOperand(Dst)
-              .addImm(Val);
-            MI.eraseFromParent();
-            continue;
-          }
-        }
-
-        BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64))
-          .addOperand(Dst)
-          .addImm(0)
-          .addImm(-1)
-          .addOperand(Src);
-        MI.eraseFromParent();
-      } else if (TRI->getCommonSubClass(DstRC, &AMDGPU::SGPR_64RegClass) &&
-                 SrcRC == &AMDGPU::VReg_1RegClass) {
-        BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(AMDGPU::V_CMP_NE_I32_e64))
-          .addOperand(Dst)
-          .addOperand(Src)
-          .addImm(0);
-        MI.eraseFromParent();
-      }
-    }
-  }
-
-  for (unsigned Reg : I1Defs)
-    MRI.setRegClass(Reg, &AMDGPU::VGPR_32RegClass);
-
-  return false;
-}

Removed: llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.cpp (original)
+++ llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.cpp (removed)
@@ -1,77 +0,0 @@
-//===-- SIMachineFunctionInfo.cpp - SI Machine Function Info -------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//===----------------------------------------------------------------------===//
-
-
-#include "SIMachineFunctionInfo.h"
-#include "AMDGPUSubtarget.h"
-#include "SIInstrInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/LLVMContext.h"
-
-#define MAX_LANES 64
-
-using namespace llvm;
-
-
-// Pin the vtable to this file.
-void SIMachineFunctionInfo::anchor() {}
-
-SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
-  : AMDGPUMachineFunction(MF),
-    TIDReg(AMDGPU::NoRegister),
-    HasSpilledVGPRs(false),
-    PSInputAddr(0),
-    NumUserSGPRs(0),
-    LDSWaveSpillSize(0) { }
-
-SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg(
-                                                       MachineFunction *MF,
-                                                       unsigned FrameIndex,
-                                                       unsigned SubIdx) {
-  const MachineFrameInfo *FrameInfo = MF->getFrameInfo();
-  const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(
-      MF->getSubtarget<AMDGPUSubtarget>().getRegisterInfo());
-  MachineRegisterInfo &MRI = MF->getRegInfo();
-  int64_t Offset = FrameInfo->getObjectOffset(FrameIndex);
-  Offset += SubIdx * 4;
-
-  unsigned LaneVGPRIdx = Offset / (64 * 4);
-  unsigned Lane = (Offset / 4) % 64;
-
-  struct SpilledReg Spill;
-
-  if (!LaneVGPRs.count(LaneVGPRIdx)) {
-    unsigned LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass);
-    LaneVGPRs[LaneVGPRIdx] = LaneVGPR;
-    MRI.setPhysRegUsed(LaneVGPR);
-
-    // Add this register as live-in to all blocks to avoid machine verifer
-    // complaining about use of an undefined physical register.
-    for (MachineFunction::iterator BI = MF->begin(), BE = MF->end();
-         BI != BE; ++BI) {
-      BI->addLiveIn(LaneVGPR);
-    }
-  }
-
-  Spill.VGPR = LaneVGPRs[LaneVGPRIdx];
-  Spill.Lane = Lane;
-  return Spill;
-}
-
-unsigned SIMachineFunctionInfo::getMaximumWorkGroupSize(
-                                              const MachineFunction &MF) const {
-  const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
-  // FIXME: We should get this information from kernel attributes if it
-  // is available.
-  return getShaderType() == ShaderType::COMPUTE ? 256 : ST.getWavefrontSize();
-}

Removed: llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.h (original)
+++ llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.h (removed)
@@ -1,66 +0,0 @@
-//===- SIMachineFunctionInfo.h - SIMachineFunctionInfo interface -*- C++ -*-==//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-//
-//===----------------------------------------------------------------------===//
-
-
-#ifndef LLVM_LIB_TARGET_R600_SIMACHINEFUNCTIONINFO_H
-#define LLVM_LIB_TARGET_R600_SIMACHINEFUNCTIONINFO_H
-
-#include "AMDGPUMachineFunction.h"
-#include "SIRegisterInfo.h"
-#include <map>
-
-namespace llvm {
-
-class MachineRegisterInfo;
-
-/// This class keeps track of the SPI_SP_INPUT_ADDR config register, which
-/// tells the hardware which interpolation parameters to load.
-class SIMachineFunctionInfo : public AMDGPUMachineFunction {
-  void anchor() override;
-
-  unsigned TIDReg;
-  bool HasSpilledVGPRs;
-
-public:
-
-  struct SpilledReg {
-    unsigned VGPR;
-    int Lane;
-    SpilledReg(unsigned R, int L) : VGPR (R), Lane (L) { }
-    SpilledReg() : VGPR(0), Lane(-1) { }
-    bool hasLane() { return Lane != -1;}
-  };
-
-  // SIMachineFunctionInfo definition
-
-  SIMachineFunctionInfo(const MachineFunction &MF);
-  SpilledReg getSpilledReg(MachineFunction *MF, unsigned FrameIndex,
-                           unsigned SubIdx);
-  unsigned PSInputAddr;
-  unsigned NumUserSGPRs;
-  std::map<unsigned, unsigned> LaneVGPRs;
-  unsigned LDSWaveSpillSize;
-  unsigned ScratchOffsetReg;
-  bool hasCalculatedTID() const { return TIDReg != AMDGPU::NoRegister; };
-  unsigned getTIDReg() const { return TIDReg; };
-  void setTIDReg(unsigned Reg) { TIDReg = Reg; }
-  bool hasSpilledVGPRs() const { return HasSpilledVGPRs; }
-  void setHasSpilledVGPRs(bool Spill = true) { HasSpilledVGPRs = Spill; }
-
-  unsigned getMaximumWorkGroupSize(const MachineFunction &MF) const;
-};
-
-} // End namespace llvm
-
-
-#endif

Removed: llvm/trunk/lib/Target/R600/SIPrepareScratchRegs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIPrepareScratchRegs.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIPrepareScratchRegs.cpp (original)
+++ llvm/trunk/lib/Target/R600/SIPrepareScratchRegs.cpp (removed)
@@ -1,194 +0,0 @@
-//===-- SIPrepareScratchRegs.cpp - Use predicates for control flow --------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-///
-/// This pass loads scratch pointer and scratch offset into a register or a
-/// frame index which can be used anywhere in the program.  These values will
-/// be used for spilling VGPRs.
-///
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "AMDGPUSubtarget.h"
-#include "SIDefines.h"
-#include "SIInstrInfo.h"
-#include "SIMachineFunctionInfo.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/LLVMContext.h"
-
-using namespace llvm;
-
-namespace {
-
-class SIPrepareScratchRegs : public MachineFunctionPass {
-
-private:
-  static char ID;
-
-public:
-  SIPrepareScratchRegs() : MachineFunctionPass(ID) { }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  const char *getPassName() const override {
-    return "SI prepare scratch registers";
-  }
-
-};
-
-} // End anonymous namespace
-
-char SIPrepareScratchRegs::ID = 0;
-
-FunctionPass *llvm::createSIPrepareScratchRegs() {
-  return new SIPrepareScratchRegs();
-}
-
-bool SIPrepareScratchRegs::runOnMachineFunction(MachineFunction &MF) {
-  SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
-  const SIInstrInfo *TII =
-      static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
-  const SIRegisterInfo *TRI = &TII->getRegisterInfo();
-  MachineRegisterInfo &MRI = MF.getRegInfo();
-  MachineFrameInfo *FrameInfo = MF.getFrameInfo();
-  MachineBasicBlock *Entry = MF.begin();
-  MachineBasicBlock::iterator I = Entry->begin();
-  DebugLoc DL = I->getDebugLoc();
-
-  // FIXME: If we don't have enough VGPRs for SGPR spilling we will need to
-  // run this pass.
-  if (!MFI->hasSpilledVGPRs())
-    return false;
-
-  unsigned ScratchPtrPreloadReg =
-      TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_PTR);
-  unsigned ScratchOffsetPreloadReg =
-      TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET);
-
-  if (!Entry->isLiveIn(ScratchPtrPreloadReg))
-    Entry->addLiveIn(ScratchPtrPreloadReg);
-
-  if (!Entry->isLiveIn(ScratchOffsetPreloadReg))
-    Entry->addLiveIn(ScratchOffsetPreloadReg);
-
-  // Load the scratch offset.
-  unsigned ScratchOffsetReg =
-      TRI->findUnusedRegister(MRI, &AMDGPU::SGPR_32RegClass);
-  int ScratchOffsetFI = -1;
-
-  if (ScratchOffsetReg != AMDGPU::NoRegister) {
-    // Found an SGPR to use
-    MRI.setPhysRegUsed(ScratchOffsetReg);
-    BuildMI(*Entry, I, DL, TII->get(AMDGPU::S_MOV_B32), ScratchOffsetReg)
-            .addReg(ScratchOffsetPreloadReg);
-  } else {
-    // No SGPR is available, we must spill.
-    ScratchOffsetFI = FrameInfo->CreateSpillStackObject(4,4);
-    BuildMI(*Entry, I, DL, TII->get(AMDGPU::SI_SPILL_S32_SAVE))
-            .addReg(ScratchOffsetPreloadReg)
-            .addFrameIndex(ScratchOffsetFI)
-            .addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Undef)
-            .addReg(AMDGPU::SGPR0, RegState::Undef);
-  }
-
-
-  // Now that we have the scratch pointer and offset values, we need to
-  // add them to all the SI_SPILL_V* instructions.
-
-  RegScavenger RS;
-  unsigned ScratchRsrcFI = FrameInfo->CreateSpillStackObject(16, 4);
-  RS.addScavengingFrameIndex(ScratchRsrcFI);
-
-  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
-       BI != BE; ++BI) {
-
-    MachineBasicBlock &MBB = *BI;
-    // Add the scratch offset reg as a live-in so that the register scavenger
-    // doesn't re-use it.
-    if (!MBB.isLiveIn(ScratchOffsetReg) &&
-        ScratchOffsetReg != AMDGPU::NoRegister)
-      MBB.addLiveIn(ScratchOffsetReg);
-    RS.enterBasicBlock(&MBB);
-
-    for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
-         I != E; ++I) {
-      MachineInstr &MI = *I;
-      RS.forward(I);
-      DebugLoc DL = MI.getDebugLoc();
-      if (!TII->isVGPRSpill(MI.getOpcode()))
-        continue;
-
-      // Scratch resource
-      unsigned ScratchRsrcReg =
-          RS.scavengeRegister(&AMDGPU::SReg_128RegClass, 0);
-
-      uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT | AMDGPU::RSRC_TID_ENABLE |
-                      0xffffffff; // Size
-
-      unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
-      unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
-      unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
-      unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
-
-      BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), Rsrc0)
-              .addExternalSymbol("SCRATCH_RSRC_DWORD0")
-              .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
-
-      BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), Rsrc1)
-              .addExternalSymbol("SCRATCH_RSRC_DWORD1")
-              .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
-
-      BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), Rsrc2)
-              .addImm(Rsrc & 0xffffffff)
-              .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
-
-      BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), Rsrc3)
-              .addImm(Rsrc >> 32)
-              .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
-
-      // Scratch Offset
-      if (ScratchOffsetReg == AMDGPU::NoRegister) {
-        ScratchOffsetReg = RS.scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
-        BuildMI(MBB, I, DL, TII->get(AMDGPU::SI_SPILL_S32_RESTORE),
-                ScratchOffsetReg)
-                .addFrameIndex(ScratchOffsetFI)
-                .addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Undef)
-                .addReg(AMDGPU::SGPR0, RegState::Undef);
-      } else if (!MBB.isLiveIn(ScratchOffsetReg)) {
-        MBB.addLiveIn(ScratchOffsetReg);
-      }
-
-      if (ScratchRsrcReg == AMDGPU::NoRegister ||
-          ScratchOffsetReg == AMDGPU::NoRegister) {
-        LLVMContext &Ctx = MF.getFunction()->getContext();
-        Ctx.emitError("ran out of SGPRs for spilling VGPRs");
-        ScratchRsrcReg = AMDGPU::SGPR0;
-        ScratchOffsetReg = AMDGPU::SGPR0;
-      }
-      MI.getOperand(2).setReg(ScratchRsrcReg);
-      MI.getOperand(2).setIsKill(true);
-      MI.getOperand(2).setIsUndef(false);
-      MI.getOperand(3).setReg(ScratchOffsetReg);
-      MI.getOperand(3).setIsUndef(false);
-      MI.getOperand(3).setIsKill(false);
-      MI.addOperand(MachineOperand::CreateReg(Rsrc0, false, true, true));
-      MI.addOperand(MachineOperand::CreateReg(Rsrc1, false, true, true));
-      MI.addOperand(MachineOperand::CreateReg(Rsrc2, false, true, true));
-      MI.addOperand(MachineOperand::CreateReg(Rsrc3, false, true, true));
-    }
-  }
-  return true;
-}

Removed: llvm/trunk/lib/Target/R600/SIRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIRegisterInfo.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/R600/SIRegisterInfo.cpp (removed)
@@ -1,543 +0,0 @@
-//===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief SI implementation of the TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-
-#include "SIRegisterInfo.h"
-#include "SIInstrInfo.h"
-#include "SIMachineFunctionInfo.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/LLVMContext.h"
-
-using namespace llvm;
-
-SIRegisterInfo::SIRegisterInfo() : AMDGPURegisterInfo() {}
-
-BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
-  BitVector Reserved(getNumRegs());
-  Reserved.set(AMDGPU::EXEC);
-
-  // EXEC_LO and EXEC_HI could be allocated and used as regular register,
-  // but this seems likely to result in bugs, so I'm marking them as reserved.
-  Reserved.set(AMDGPU::EXEC_LO);
-  Reserved.set(AMDGPU::EXEC_HI);
-
-  Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
-  Reserved.set(AMDGPU::FLAT_SCR);
-  Reserved.set(AMDGPU::FLAT_SCR_LO);
-  Reserved.set(AMDGPU::FLAT_SCR_HI);
-
-  // Reserve some VGPRs to use as temp registers in case we have to spill VGPRs
-  Reserved.set(AMDGPU::VGPR255);
-  Reserved.set(AMDGPU::VGPR254);
-
-  // Tonga and Iceland can only allocate a fixed number of SGPRs due
-  // to a hw bug.
-  if (MF.getSubtarget<AMDGPUSubtarget>().hasSGPRInitBug()) {
-    unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
-    // Reserve some SGPRs for FLAT_SCRATCH and VCC (4 SGPRs).
-    // Assume XNACK_MASK is unused.
-    unsigned Limit = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 4;
-
-    for (unsigned i = Limit; i < NumSGPRs; ++i) {
-      unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
-      MCRegAliasIterator R = MCRegAliasIterator(Reg, this, true);
-
-      for (; R.isValid(); ++R)
-        Reserved.set(*R);
-    }
-  }
-
-  return Reserved;
-}
-
-unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
-                                                unsigned Idx) const {
-
-  const AMDGPUSubtarget &STI = MF.getSubtarget<AMDGPUSubtarget>();
-  // FIXME: We should adjust the max number of waves based on LDS size.
-  unsigned SGPRLimit = getNumSGPRsAllowed(STI.getGeneration(),
-                                          STI.getMaxWavesPerCU());
-  unsigned VGPRLimit = getNumVGPRsAllowed(STI.getMaxWavesPerCU());
-
-  for (regclass_iterator I = regclass_begin(), E = regclass_end();
-       I != E; ++I) {
-
-    unsigned NumSubRegs = std::max((int)(*I)->getSize() / 4, 1);
-    unsigned Limit;
-
-    if (isSGPRClass(*I)) {
-      Limit = SGPRLimit / NumSubRegs;
-    } else {
-      Limit = VGPRLimit / NumSubRegs;
-    }
-
-    const int *Sets = getRegClassPressureSets(*I);
-    assert(Sets);
-    for (unsigned i = 0; Sets[i] != -1; ++i) {
-	    if (Sets[i] == (int)Idx)
-        return Limit;
-    }
-  }
-  return 256;
-}
-
-bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
-  return Fn.getFrameInfo()->hasStackObjects();
-}
-
-static unsigned getNumSubRegsForSpillOp(unsigned Op) {
-
-  switch (Op) {
-  case AMDGPU::SI_SPILL_S512_SAVE:
-  case AMDGPU::SI_SPILL_S512_RESTORE:
-  case AMDGPU::SI_SPILL_V512_SAVE:
-  case AMDGPU::SI_SPILL_V512_RESTORE:
-    return 16;
-  case AMDGPU::SI_SPILL_S256_SAVE:
-  case AMDGPU::SI_SPILL_S256_RESTORE:
-  case AMDGPU::SI_SPILL_V256_SAVE:
-  case AMDGPU::SI_SPILL_V256_RESTORE:
-    return 8;
-  case AMDGPU::SI_SPILL_S128_SAVE:
-  case AMDGPU::SI_SPILL_S128_RESTORE:
-  case AMDGPU::SI_SPILL_V128_SAVE:
-  case AMDGPU::SI_SPILL_V128_RESTORE:
-    return 4;
-  case AMDGPU::SI_SPILL_V96_SAVE:
-  case AMDGPU::SI_SPILL_V96_RESTORE:
-    return 3;
-  case AMDGPU::SI_SPILL_S64_SAVE:
-  case AMDGPU::SI_SPILL_S64_RESTORE:
-  case AMDGPU::SI_SPILL_V64_SAVE:
-  case AMDGPU::SI_SPILL_V64_RESTORE:
-    return 2;
-  case AMDGPU::SI_SPILL_S32_SAVE:
-  case AMDGPU::SI_SPILL_S32_RESTORE:
-  case AMDGPU::SI_SPILL_V32_SAVE:
-  case AMDGPU::SI_SPILL_V32_RESTORE:
-    return 1;
-  default: llvm_unreachable("Invalid spill opcode");
-  }
-}
-
-void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
-                                           unsigned LoadStoreOp,
-                                           unsigned Value,
-                                           unsigned ScratchRsrcReg,
-                                           unsigned ScratchOffset,
-                                           int64_t Offset,
-                                           RegScavenger *RS) const {
-
-  MachineBasicBlock *MBB = MI->getParent();
-  const MachineFunction *MF = MI->getParent()->getParent();
-  const SIInstrInfo *TII =
-      static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
-  LLVMContext &Ctx = MF->getFunction()->getContext();
-  DebugLoc DL = MI->getDebugLoc();
-  bool IsLoad = TII->get(LoadStoreOp).mayLoad();
-
-  bool RanOutOfSGPRs = false;
-  unsigned SOffset = ScratchOffset;
-
-  unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
-  unsigned Size = NumSubRegs * 4;
-
-  if (!isUInt<12>(Offset + Size)) {
-    SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
-    if (SOffset == AMDGPU::NoRegister) {
-      RanOutOfSGPRs = true;
-      SOffset = AMDGPU::SGPR0;
-    }
-    BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
-            .addReg(ScratchOffset)
-            .addImm(Offset);
-    Offset = 0;
-  }
-
-  if (RanOutOfSGPRs)
-    Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
-
-  for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
-    unsigned SubReg = NumSubRegs > 1 ?
-        getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
-        Value;
-    bool IsKill = (i == e - 1);
-
-    BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
-            .addReg(SubReg, getDefRegState(IsLoad))
-            .addReg(ScratchRsrcReg, getKillRegState(IsKill))
-            .addReg(SOffset)
-            .addImm(Offset)
-            .addImm(0) // glc
-            .addImm(0) // slc
-            .addImm(0) // tfe
-            .addReg(Value, RegState::Implicit | getDefRegState(IsLoad));
-  }
-}
-
-void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
-                                        int SPAdj, unsigned FIOperandNum,
-                                        RegScavenger *RS) const {
-  MachineFunction *MF = MI->getParent()->getParent();
-  MachineBasicBlock *MBB = MI->getParent();
-  SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
-  MachineFrameInfo *FrameInfo = MF->getFrameInfo();
-  const SIInstrInfo *TII =
-      static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
-  DebugLoc DL = MI->getDebugLoc();
-
-  MachineOperand &FIOp = MI->getOperand(FIOperandNum);
-  int Index = MI->getOperand(FIOperandNum).getIndex();
-
-  switch (MI->getOpcode()) {
-    // SGPR register spill
-    case AMDGPU::SI_SPILL_S512_SAVE:
-    case AMDGPU::SI_SPILL_S256_SAVE:
-    case AMDGPU::SI_SPILL_S128_SAVE:
-    case AMDGPU::SI_SPILL_S64_SAVE:
-    case AMDGPU::SI_SPILL_S32_SAVE: {
-      unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
-
-      for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
-        unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
-                                           &AMDGPU::SGPR_32RegClass, i);
-        struct SIMachineFunctionInfo::SpilledReg Spill =
-            MFI->getSpilledReg(MF, Index, i);
-
-        if (Spill.VGPR == AMDGPU::NoRegister) {
-           LLVMContext &Ctx = MF->getFunction()->getContext();
-           Ctx.emitError("Ran out of VGPRs for spilling SGPR");
-        }
-
-        BuildMI(*MBB, MI, DL,
-                TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
-                Spill.VGPR)
-                .addReg(SubReg)
-                .addImm(Spill.Lane);
-
-      }
-      MI->eraseFromParent();
-      break;
-    }
-
-    // SGPR register restore
-    case AMDGPU::SI_SPILL_S512_RESTORE:
-    case AMDGPU::SI_SPILL_S256_RESTORE:
-    case AMDGPU::SI_SPILL_S128_RESTORE:
-    case AMDGPU::SI_SPILL_S64_RESTORE:
-    case AMDGPU::SI_SPILL_S32_RESTORE: {
-      unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
-
-      for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
-        unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
-                                           &AMDGPU::SGPR_32RegClass, i);
-        struct SIMachineFunctionInfo::SpilledReg Spill =
-            MFI->getSpilledReg(MF, Index, i);
-
-        if (Spill.VGPR == AMDGPU::NoRegister) {
-           LLVMContext &Ctx = MF->getFunction()->getContext();
-           Ctx.emitError("Ran out of VGPRs for spilling SGPR");
-        }
-
-        BuildMI(*MBB, MI, DL,
-                TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
-                SubReg)
-                .addReg(Spill.VGPR)
-                .addImm(Spill.Lane)
-                .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
-      }
-
-      // TODO: only do this when it is needed
-      switch (MF->getSubtarget<AMDGPUSubtarget>().getGeneration()) {
-      case AMDGPUSubtarget::SOUTHERN_ISLANDS:
-        // "VALU writes SGPR" -> "SMRD reads that SGPR" needs "S_NOP 3" on SI
-        TII->insertNOPs(MI, 3);
-        break;
-      case AMDGPUSubtarget::SEA_ISLANDS:
-        break;
-      default: // VOLCANIC_ISLANDS and later
-        // "VALU writes SGPR -> VMEM reads that SGPR" needs "S_NOP 4" on VI
-        // and later. This also applies to VALUs which write VCC, but we're
-        // unlikely to see VMEM use VCC.
-        TII->insertNOPs(MI, 4);
-      }
-
-      MI->eraseFromParent();
-      break;
-    }
-
-    // VGPR register spill
-    case AMDGPU::SI_SPILL_V512_SAVE:
-    case AMDGPU::SI_SPILL_V256_SAVE:
-    case AMDGPU::SI_SPILL_V128_SAVE:
-    case AMDGPU::SI_SPILL_V96_SAVE:
-    case AMDGPU::SI_SPILL_V64_SAVE:
-    case AMDGPU::SI_SPILL_V32_SAVE:
-      buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
-            TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(),
-            TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
-            TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
-             FrameInfo->getObjectOffset(Index), RS);
-      MI->eraseFromParent();
-      break;
-    case AMDGPU::SI_SPILL_V32_RESTORE:
-    case AMDGPU::SI_SPILL_V64_RESTORE:
-    case AMDGPU::SI_SPILL_V96_RESTORE:
-    case AMDGPU::SI_SPILL_V128_RESTORE:
-    case AMDGPU::SI_SPILL_V256_RESTORE:
-    case AMDGPU::SI_SPILL_V512_RESTORE: {
-      buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
-            TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(),
-            TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
-            TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
-            FrameInfo->getObjectOffset(Index), RS);
-      MI->eraseFromParent();
-      break;
-    }
-
-    default: {
-      int64_t Offset = FrameInfo->getObjectOffset(Index);
-      FIOp.ChangeToImmediate(Offset);
-      if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
-        unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, SPAdj);
-        BuildMI(*MBB, MI, MI->getDebugLoc(),
-                TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
-                .addImm(Offset);
-        FIOp.ChangeToRegister(TmpReg, false, false, true);
-      }
-    }
-  }
-}
-
-const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass(
-                                                                   MVT VT) const {
-  switch(VT.SimpleTy) {
-    default:
-    case MVT::i32: return &AMDGPU::VGPR_32RegClass;
-  }
-}
-
-unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
-  return getEncodingValue(Reg) & 0xff;
-}
-
-const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
-  assert(!TargetRegisterInfo::isVirtualRegister(Reg));
-
-  static const TargetRegisterClass *BaseClasses[] = {
-    &AMDGPU::VGPR_32RegClass,
-    &AMDGPU::SReg_32RegClass,
-    &AMDGPU::VReg_64RegClass,
-    &AMDGPU::SReg_64RegClass,
-    &AMDGPU::VReg_96RegClass,
-    &AMDGPU::VReg_128RegClass,
-    &AMDGPU::SReg_128RegClass,
-    &AMDGPU::VReg_256RegClass,
-    &AMDGPU::SReg_256RegClass,
-    &AMDGPU::VReg_512RegClass
-  };
-
-  for (const TargetRegisterClass *BaseClass : BaseClasses) {
-    if (BaseClass->contains(Reg)) {
-      return BaseClass;
-    }
-  }
-  return nullptr;
-}
-
-bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
-  return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) ||
-         getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) ||
-         getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) ||
-         getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) ||
-         getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) ||
-         getCommonSubClass(&AMDGPU::VReg_512RegClass, RC);
-}
-
-const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
-                                         const TargetRegisterClass *SRC) const {
-    if (hasVGPRs(SRC)) {
-      return SRC;
-    } else if (SRC == &AMDGPU::SCCRegRegClass) {
-      return &AMDGPU::VCCRegRegClass;
-    } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_32RegClass)) {
-      return &AMDGPU::VGPR_32RegClass;
-    } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_64RegClass)) {
-      return &AMDGPU::VReg_64RegClass;
-    } else if (getCommonSubClass(SRC, &AMDGPU::SReg_128RegClass)) {
-      return &AMDGPU::VReg_128RegClass;
-    } else if (getCommonSubClass(SRC, &AMDGPU::SReg_256RegClass)) {
-      return &AMDGPU::VReg_256RegClass;
-    } else if (getCommonSubClass(SRC, &AMDGPU::SReg_512RegClass)) {
-      return &AMDGPU::VReg_512RegClass;
-    }
-    return nullptr;
-}
-
-const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
-                         const TargetRegisterClass *RC, unsigned SubIdx) const {
-  if (SubIdx == AMDGPU::NoSubRegister)
-    return RC;
-
-  // If this register has a sub-register, we can safely assume it is a 32-bit
-  // register, because all of SI's sub-registers are 32-bit.
-  if (isSGPRClass(RC)) {
-    return &AMDGPU::SGPR_32RegClass;
-  } else {
-    return &AMDGPU::VGPR_32RegClass;
-  }
-}
-
-unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
-                                          const TargetRegisterClass *SubRC,
-                                          unsigned Channel) const {
-
-  switch (Reg) {
-    case AMDGPU::VCC:
-      switch(Channel) {
-        case 0: return AMDGPU::VCC_LO;
-        case 1: return AMDGPU::VCC_HI;
-        default: llvm_unreachable("Invalid SubIdx for VCC");
-      }
-
-  case AMDGPU::FLAT_SCR:
-    switch (Channel) {
-    case 0:
-      return AMDGPU::FLAT_SCR_LO;
-    case 1:
-      return AMDGPU::FLAT_SCR_HI;
-    default:
-      llvm_unreachable("Invalid SubIdx for FLAT_SCR");
-    }
-    break;
-
-  case AMDGPU::EXEC:
-    switch (Channel) {
-    case 0:
-      return AMDGPU::EXEC_LO;
-    case 1:
-      return AMDGPU::EXEC_HI;
-    default:
-      llvm_unreachable("Invalid SubIdx for EXEC");
-    }
-    break;
-  }
-
-  const TargetRegisterClass *RC = getPhysRegClass(Reg);
-  // 32-bit registers don't have sub-registers, so we can just return the
-  // Reg.  We need to have this check here, because the calculation below
-  // using getHWRegIndex() will fail with special 32-bit registers like
-  // VCC_LO, VCC_HI, EXEC_LO, EXEC_HI and M0.
-  if (RC->getSize() == 4) {
-    assert(Channel == 0);
-    return Reg;
-  }
-
-  unsigned Index = getHWRegIndex(Reg);
-  return SubRC->getRegister(Index + Channel);
-}
-
-bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
-  return OpType == AMDGPU::OPERAND_REG_IMM32;
-}
-
-bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
-  if (opCanUseLiteralConstant(OpType))
-    return true;
-
-  return OpType == AMDGPU::OPERAND_REG_INLINE_C;
-}
-
-unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
-                                           enum PreloadedValue Value) const {
-
-  const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
-  switch (Value) {
-  case SIRegisterInfo::TGID_X:
-    return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 0);
-  case SIRegisterInfo::TGID_Y:
-    return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 1);
-  case SIRegisterInfo::TGID_Z:
-    return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 2);
-  case SIRegisterInfo::SCRATCH_WAVE_OFFSET:
-    if (MFI->getShaderType() != ShaderType::COMPUTE)
-      return MFI->ScratchOffsetReg;
-    return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 4);
-  case SIRegisterInfo::SCRATCH_PTR:
-    return AMDGPU::SGPR2_SGPR3;
-  case SIRegisterInfo::INPUT_PTR:
-    return AMDGPU::SGPR0_SGPR1;
-  case SIRegisterInfo::TIDIG_X:
-    return AMDGPU::VGPR0;
-  case SIRegisterInfo::TIDIG_Y:
-    return AMDGPU::VGPR1;
-  case SIRegisterInfo::TIDIG_Z:
-    return AMDGPU::VGPR2;
-  }
-  llvm_unreachable("unexpected preloaded value type");
-}
-
-/// \brief Returns a register that is not used at any point in the function.
-///        If all registers are used, then this function will return
-//         AMDGPU::NoRegister.
-unsigned SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
-                                           const TargetRegisterClass *RC) const {
-
-  for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
-       I != E; ++I) {
-    if (!MRI.isPhysRegUsed(*I))
-      return *I;
-  }
-  return AMDGPU::NoRegister;
-}
-
-unsigned SIRegisterInfo::getNumVGPRsAllowed(unsigned WaveCount) const {
-  switch(WaveCount) {
-    case 10: return 24;
-    case 9:  return 28;
-    case 8:  return 32;
-    case 7:  return 36;
-    case 6:  return 40;
-    case 5:  return 48;
-    case 4:  return 64;
-    case 3:  return 84;
-    case 2:  return 128;
-    default: return 256;
-  }
-}
-
-unsigned SIRegisterInfo::getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen,
-                                            unsigned WaveCount) const {
-  if (gen >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
-    switch (WaveCount) {
-      case 10: return 80;
-      case 9:  return 80;
-      case 8:  return 96;
-      default: return 102;
-    }
-  } else {
-    switch(WaveCount) {
-      case 10: return 48;
-      case 9:  return 56;
-      case 8:  return 64;
-      case 7:  return 72;
-      case 6:  return 80;
-      case 5:  return 96;
-      default: return 103;
-    }
-  }
-}

Removed: llvm/trunk/lib/Target/R600/SIRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIRegisterInfo.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIRegisterInfo.h (original)
+++ llvm/trunk/lib/Target/R600/SIRegisterInfo.h (removed)
@@ -1,131 +0,0 @@
-//===-- SIRegisterInfo.h - SI Register Info Interface ----------*- C++ -*--===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Interface definition for SIRegisterInfo
-//
-//===----------------------------------------------------------------------===//
-
-
-#ifndef LLVM_LIB_TARGET_R600_SIREGISTERINFO_H
-#define LLVM_LIB_TARGET_R600_SIREGISTERINFO_H
-
-#include "AMDGPURegisterInfo.h"
-#include "AMDGPUSubtarget.h"
-#include "llvm/Support/Debug.h"
-
-namespace llvm {
-
-struct SIRegisterInfo : public AMDGPURegisterInfo {
-
-  SIRegisterInfo();
-
-  BitVector getReservedRegs(const MachineFunction &MF) const override;
-
-  unsigned getRegPressureSetLimit(const MachineFunction &MF,
-                                  unsigned Idx) const override;
-
-  bool requiresRegisterScavenging(const MachineFunction &Fn) const override;
-
-  void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
-                           unsigned FIOperandNum,
-                           RegScavenger *RS) const override;
-
-  /// \brief get the register class of the specified type to use in the
-  /// CFGStructurizer
-  const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT) const override;
-
-  unsigned getHWRegIndex(unsigned Reg) const override;
-
-  /// \brief Return the 'base' register class for this register.
-  /// e.g. SGPR0 => SReg_32, VGPR => VGPR_32 SGPR0_SGPR1 -> SReg_32, etc.
-  const TargetRegisterClass *getPhysRegClass(unsigned Reg) const;
-
-  /// \returns true if this class contains only SGPR registers
-  bool isSGPRClass(const TargetRegisterClass *RC) const {
-    if (!RC)
-      return false;
-
-    return !hasVGPRs(RC);
-  }
-
-  /// \returns true if this class ID contains only SGPR registers
-  bool isSGPRClassID(unsigned RCID) const {
-    if (static_cast<int>(RCID) == -1)
-      return false;
-
-    return isSGPRClass(getRegClass(RCID));
-  }
-
-  /// \returns true if this class contains VGPR registers.
-  bool hasVGPRs(const TargetRegisterClass *RC) const;
-
-  /// \returns A VGPR reg class with the same width as \p SRC
-  const TargetRegisterClass *getEquivalentVGPRClass(
-                                          const TargetRegisterClass *SRC) const;
-
-  /// \returns The register class that is used for a sub-register of \p RC for
-  /// the given \p SubIdx.  If \p SubIdx equals NoSubRegister, \p RC will
-  /// be returned.
-  const TargetRegisterClass *getSubRegClass(const TargetRegisterClass *RC,
-                                            unsigned SubIdx) const;
-
-  /// \p Channel This is the register channel (e.g. a value from 0-16), not the
-  ///            SubReg index.
-  /// \returns The sub-register of Reg that is in Channel.
-  unsigned getPhysRegSubReg(unsigned Reg, const TargetRegisterClass *SubRC,
-                            unsigned Channel) const;
-
-  /// \returns True if operands defined with this operand type can accept
-  /// a literal constant (i.e. any 32-bit immediate).
-  bool opCanUseLiteralConstant(unsigned OpType) const;
-
-  /// \returns True if operands defined with this operand type can accept
-  /// an inline constant. i.e. An integer value in the range (-16, 64) or
-  /// -4.0f, -2.0f, -1.0f, -0.5f, 0.0f, 0.5f, 1.0f, 2.0f, 4.0f. 
-  bool opCanUseInlineConstant(unsigned OpType) const;
-
-  enum PreloadedValue {
-    TGID_X,
-    TGID_Y,
-    TGID_Z,
-    SCRATCH_WAVE_OFFSET,
-    SCRATCH_PTR,
-    INPUT_PTR,
-    TIDIG_X,
-    TIDIG_Y,
-    TIDIG_Z
-  };
-
-  /// \brief Returns the physical register that \p Value is stored in.
-  unsigned getPreloadedValue(const MachineFunction &MF,
-                             enum PreloadedValue Value) const;
-
-  /// \brief Give the maximum number of VGPRs that can be used by \p WaveCount
-  ///        concurrent waves.
-  unsigned getNumVGPRsAllowed(unsigned WaveCount) const;
-
-  /// \brief Give the maximum number of SGPRs that can be used by \p WaveCount
-  ///        concurrent waves.
-  unsigned getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen,
-                              unsigned WaveCount) const;
-
-  unsigned findUnusedRegister(const MachineRegisterInfo &MRI,
-                              const TargetRegisterClass *RC) const;
-
-private:
-  void buildScratchLoadStore(MachineBasicBlock::iterator MI,
-                             unsigned LoadStoreOp, unsigned Value,
-                             unsigned ScratchRsrcReg, unsigned ScratchOffset,
-                             int64_t Offset, RegScavenger *RS) const;
-};
-
-} // End namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/R600/SIRegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIRegisterInfo.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIRegisterInfo.td (original)
+++ llvm/trunk/lib/Target/R600/SIRegisterInfo.td (removed)
@@ -1,284 +0,0 @@
-//===-- SIRegisterInfo.td - SI Register defs ---------------*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-//  Declarations that describe the SI registers
-//===----------------------------------------------------------------------===//
-
-class SIReg <string n, bits<16> encoding = 0> : Register<n> {
-  let Namespace = "AMDGPU";
-  let HWEncoding = encoding;
-}
-
-// Special Registers
-def VCC_LO : SIReg<"vcc_lo", 106>;
-def VCC_HI : SIReg<"vcc_hi", 107>;
-
-// VCC for 64-bit instructions
-def VCC : RegisterWithSubRegs<"vcc", [VCC_LO, VCC_HI]> {
-  let Namespace = "AMDGPU";
-  let SubRegIndices = [sub0, sub1];
-  let HWEncoding = 106;
-}
-
-def EXEC_LO : SIReg<"exec_lo", 126>;
-def EXEC_HI : SIReg<"exec_hi", 127>;
-
-def EXEC : RegisterWithSubRegs<"EXEC", [EXEC_LO, EXEC_HI]> {
-  let Namespace = "AMDGPU";
-  let SubRegIndices = [sub0, sub1];
-  let HWEncoding = 126;
-}
-
-def SCC : SIReg<"scc", 253>;
-def M0 : SIReg <"m0", 124>;
-
-def FLAT_SCR_LO : SIReg<"flat_scr_lo", 104>; // Offset in units of 256-bytes.
-def FLAT_SCR_HI : SIReg<"flat_scr_hi", 105>; // Size is the per-thread scratch size, in bytes.
-
-// Pair to indicate location of scratch space for flat accesses.
-def FLAT_SCR : RegisterWithSubRegs <"flat_scr", [FLAT_SCR_LO, FLAT_SCR_HI]> {
-  let Namespace = "AMDGPU";
-  let SubRegIndices = [sub0, sub1];
-  let HWEncoding = 104;
-}
-
-// SGPR registers
-foreach Index = 0-101 in {
-  def SGPR#Index : SIReg <"SGPR"#Index, Index>;
-}
-
-// VGPR registers
-foreach Index = 0-255 in {
-  def VGPR#Index : SIReg <"VGPR"#Index, Index> {
-    let HWEncoding{8} = 1;
-  }
-}
-
-//===----------------------------------------------------------------------===//
-//  Groupings using register classes and tuples
-//===----------------------------------------------------------------------===//
-
-// SGPR 32-bit registers
-def SGPR_32 : RegisterClass<"AMDGPU", [i32, f32], 32,
-                            (add (sequence "SGPR%u", 0, 101))>;
-
-// SGPR 64-bit registers
-def SGPR_64Regs : RegisterTuples<[sub0, sub1],
-                             [(add (decimate (trunc SGPR_32, 101), 2)),
-                              (add (decimate (shl SGPR_32, 1), 2))]>;
-
-// SGPR 128-bit registers
-def SGPR_128 : RegisterTuples<[sub0, sub1, sub2, sub3],
-                              [(add (decimate (trunc SGPR_32, 99), 4)),
-                               (add (decimate (shl SGPR_32, 1), 4)),
-                               (add (decimate (shl SGPR_32, 2), 4)),
-                               (add (decimate (shl SGPR_32, 3), 4))]>;
-
-// SGPR 256-bit registers
-def SGPR_256 : RegisterTuples<[sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7],
-                              [(add (decimate (trunc SGPR_32, 95), 4)),
-                               (add (decimate (shl SGPR_32, 1), 4)),
-                               (add (decimate (shl SGPR_32, 2), 4)),
-                               (add (decimate (shl SGPR_32, 3), 4)),
-                               (add (decimate (shl SGPR_32, 4), 4)),
-                               (add (decimate (shl SGPR_32, 5), 4)),
-                               (add (decimate (shl SGPR_32, 6), 4)),
-                               (add (decimate (shl SGPR_32, 7), 4))]>;
-
-// SGPR 512-bit registers
-def SGPR_512 : RegisterTuples<[sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7,
-                               sub8, sub9, sub10, sub11, sub12, sub13, sub14, sub15],
-                              [(add (decimate (trunc SGPR_32, 87), 4)),
-                               (add (decimate (shl SGPR_32, 1), 4)),
-                               (add (decimate (shl SGPR_32, 2), 4)),
-                               (add (decimate (shl SGPR_32, 3), 4)),
-                               (add (decimate (shl SGPR_32, 4), 4)),
-                               (add (decimate (shl SGPR_32, 5), 4)),
-                               (add (decimate (shl SGPR_32, 6), 4)),
-                               (add (decimate (shl SGPR_32, 7), 4)),
-                               (add (decimate (shl SGPR_32, 8), 4)),
-                               (add (decimate (shl SGPR_32, 9), 4)),
-                               (add (decimate (shl SGPR_32, 10), 4)),
-                               (add (decimate (shl SGPR_32, 11), 4)),
-                               (add (decimate (shl SGPR_32, 12), 4)),
-                               (add (decimate (shl SGPR_32, 13), 4)),
-                               (add (decimate (shl SGPR_32, 14), 4)),
-                               (add (decimate (shl SGPR_32, 15), 4))]>;
-
-// VGPR 32-bit registers
-def VGPR_32 : RegisterClass<"AMDGPU", [i32, f32], 32,
-                            (add (sequence "VGPR%u", 0, 255))>;
-
-// VGPR 64-bit registers
-def VGPR_64 : RegisterTuples<[sub0, sub1],
-                             [(add (trunc VGPR_32, 255)),
-                              (add (shl VGPR_32, 1))]>;
-
-// VGPR 96-bit registers
-def VGPR_96 : RegisterTuples<[sub0, sub1, sub2],
-                             [(add (trunc VGPR_32, 254)),
-                              (add (shl VGPR_32, 1)),
-                              (add (shl VGPR_32, 2))]>;
-
-// VGPR 128-bit registers
-def VGPR_128 : RegisterTuples<[sub0, sub1, sub2, sub3],
-                              [(add (trunc VGPR_32, 253)),
-                               (add (shl VGPR_32, 1)),
-                               (add (shl VGPR_32, 2)),
-                               (add (shl VGPR_32, 3))]>;
-
-// VGPR 256-bit registers
-def VGPR_256 : RegisterTuples<[sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7],
-                              [(add (trunc VGPR_32, 249)),
-                               (add (shl VGPR_32, 1)),
-                               (add (shl VGPR_32, 2)),
-                               (add (shl VGPR_32, 3)),
-                               (add (shl VGPR_32, 4)),
-                               (add (shl VGPR_32, 5)),
-                               (add (shl VGPR_32, 6)),
-                               (add (shl VGPR_32, 7))]>;
-
-// VGPR 512-bit registers
-def VGPR_512 : RegisterTuples<[sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7,
-                               sub8, sub9, sub10, sub11, sub12, sub13, sub14, sub15],
-                              [(add (trunc VGPR_32, 241)),
-                               (add (shl VGPR_32, 1)),
-                               (add (shl VGPR_32, 2)),
-                               (add (shl VGPR_32, 3)),
-                               (add (shl VGPR_32, 4)),
-                               (add (shl VGPR_32, 5)),
-                               (add (shl VGPR_32, 6)),
-                               (add (shl VGPR_32, 7)),
-                               (add (shl VGPR_32, 8)),
-                               (add (shl VGPR_32, 9)),
-                               (add (shl VGPR_32, 10)),
-                               (add (shl VGPR_32, 11)),
-                               (add (shl VGPR_32, 12)),
-                               (add (shl VGPR_32, 13)),
-                               (add (shl VGPR_32, 14)),
-                               (add (shl VGPR_32, 15))]>;
-
-//===----------------------------------------------------------------------===//
-//  Register classes used as source and destination
-//===----------------------------------------------------------------------===//
-
-class RegImmMatcher<string name> : AsmOperandClass {
-  let Name = name;
-  let RenderMethod = "addRegOrImmOperands";
-}
-
-// Special register classes for predicates and the M0 register
-def SCCReg : RegisterClass<"AMDGPU", [i32, i1], 32, (add SCC)> {
-  let CopyCost = -1; // Theoretically it is possible to read from SCC,
-                     // but it should never be necessary.
-}
-
-def VCCReg : RegisterClass<"AMDGPU", [i64, i1], 64, (add VCC)>;
-def EXECReg : RegisterClass<"AMDGPU", [i64, i1], 64, (add EXEC)>;
-
-// Register class for all scalar registers (SGPRs + Special Registers)
-def SReg_32 : RegisterClass<"AMDGPU", [i32, f32], 32,
-  (add SGPR_32, M0, VCC_LO, VCC_HI, EXEC_LO, EXEC_HI, FLAT_SCR_LO, FLAT_SCR_HI)
->;
-
-def SGPR_64 : RegisterClass<"AMDGPU", [v2i32, i64, f64], 64, (add SGPR_64Regs)>;
-
-def SReg_64 : RegisterClass<"AMDGPU", [v2i32, i64, f64, i1], 64,
-  (add SGPR_64, VCCReg, EXECReg, FLAT_SCR)
->;
-
-def SReg_128 : RegisterClass<"AMDGPU", [v4i32, v16i8], 128, (add SGPR_128)>;
-
-def SReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 256, (add SGPR_256)>;
-
-def SReg_512 : RegisterClass<"AMDGPU", [v64i8, v16i32], 512, (add SGPR_512)>;
-
-// Register class for all vector registers (VGPRs + Interploation Registers)
-def VReg_64 : RegisterClass<"AMDGPU", [i64, f64, v2i32, v2f32], 64, (add VGPR_64)>;
-
-def VReg_96 : RegisterClass<"AMDGPU", [untyped], 96, (add VGPR_96)> {
-  let Size = 96;
-}
-
-def VReg_128 : RegisterClass<"AMDGPU", [v4i32, v4f32], 128, (add VGPR_128)>;
-
-def VReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 256, (add VGPR_256)>;
-
-def VReg_512 : RegisterClass<"AMDGPU", [v16i32, v16f32], 512, (add VGPR_512)>;
-
-def VReg_1 : RegisterClass<"AMDGPU", [i1], 32, (add VGPR_32)> {
-  let Size = 32;
-}
-
-class RegImmOperand <RegisterClass rc> : RegisterOperand<rc> {
-  let OperandNamespace = "AMDGPU";
-  let OperandType = "OPERAND_REG_IMM32";
-}
-
-class RegInlineOperand <RegisterClass rc> : RegisterOperand<rc> {
-  let OperandNamespace = "AMDGPU";
-  let OperandType = "OPERAND_REG_INLINE_C";
-}
-
-//===----------------------------------------------------------------------===//
-//  SSrc_* Operands with an SGPR or a 32-bit immediate
-//===----------------------------------------------------------------------===//
-
-def SSrc_32 : RegImmOperand<SReg_32> {
-  let ParserMatchClass = RegImmMatcher<"SSrc32">;
-}
-
-def SSrc_64 : RegImmOperand<SReg_64> {
-  let ParserMatchClass = RegImmMatcher<"SSrc64">;
-}
-
-//===----------------------------------------------------------------------===//
-//  SCSrc_* Operands with an SGPR or a inline constant
-//===----------------------------------------------------------------------===//
-
-def SCSrc_32 : RegInlineOperand<SReg_32> {
-  let ParserMatchClass = RegImmMatcher<"SCSrc32">;
-}
-
-//===----------------------------------------------------------------------===//
-//  VSrc_* Operands with an SGPR, VGPR or a 32-bit immediate
-//===----------------------------------------------------------------------===//
-
-def VS_32 : RegisterClass<"AMDGPU", [i32, f32], 32, (add VGPR_32, SReg_32)>;
-
-def VS_64 : RegisterClass<"AMDGPU", [i64, f64], 64, (add VReg_64, SReg_64)>;
-
-def VSrc_32 : RegisterOperand<VS_32> {
-  let OperandNamespace = "AMDGPU";
-  let OperandType = "OPERAND_REG_IMM32";
-  let ParserMatchClass = RegImmMatcher<"VSrc32">;
-}
-
-def VSrc_64 : RegisterOperand<VS_64> {
-  let OperandNamespace = "AMDGPU";
-  let OperandType = "OPERAND_REG_IMM32";
-  let ParserMatchClass = RegImmMatcher<"VSrc64">;
-}
-
-//===----------------------------------------------------------------------===//
-//  VCSrc_* Operands with an SGPR, VGPR or an inline constant
-//===----------------------------------------------------------------------===//
-
-def VCSrc_32 : RegisterOperand<VS_32> {
-  let OperandNamespace = "AMDGPU";
-  let OperandType = "OPERAND_REG_INLINE_C";
-  let ParserMatchClass = RegImmMatcher<"VCSrc32">;
-}
-
-def VCSrc_64 : RegisterOperand<VS_64> {
-  let OperandNamespace = "AMDGPU";
-  let OperandType = "OPERAND_REG_INLINE_C";
-  let ParserMatchClass = RegImmMatcher<"VCSrc64">;
-}

Removed: llvm/trunk/lib/Target/R600/SISchedule.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SISchedule.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SISchedule.td (original)
+++ llvm/trunk/lib/Target/R600/SISchedule.td (removed)
@@ -1,91 +0,0 @@
-//===-- SISchedule.td - SI Scheduling definitons -------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// MachineModel definitions for Southern Islands (SI)
-//
-//===----------------------------------------------------------------------===//
-
-def WriteBranch : SchedWrite;
-def WriteExport : SchedWrite;
-def WriteLDS    : SchedWrite;
-def WriteSALU   : SchedWrite;
-def WriteSMEM   : SchedWrite;
-def WriteVMEM   : SchedWrite;
-
-// Vector ALU instructions
-def Write32Bit         : SchedWrite;
-def WriteQuarterRate32 : SchedWrite;
-
-def WriteFloatFMA   : SchedWrite;
-
-def WriteDouble     : SchedWrite;
-def WriteDoubleAdd  : SchedWrite;
-
-def SIFullSpeedModel : SchedMachineModel;
-def SIQuarterSpeedModel : SchedMachineModel;
-
-// BufferSize = 0 means the processors are in-order.
-let BufferSize = 0 in {
-
-// XXX: Are the resource counts correct?
-def HWBranch : ProcResource<1>;
-def HWExport : ProcResource<7>;   // Taken from S_WAITCNT
-def HWLGKM   : ProcResource<31>;  // Taken from S_WAITCNT
-def HWSALU   : ProcResource<1>;
-def HWVMEM   : ProcResource<15>;  // Taken from S_WAITCNT
-def HWVALU   : ProcResource<1>;
-
-}
-
-class HWWriteRes<SchedWrite write, list<ProcResourceKind> resources,
-                 int latency> : WriteRes<write, resources> {
-  let Latency = latency;
-}
-
-class HWVALUWriteRes<SchedWrite write, int latency> :
-  HWWriteRes<write, [HWVALU], latency>;
-
-
-// The latency numbers are taken from AMD Accelerated Parallel Processing
-// guide.  They may not be acurate.
-
-// The latency values are 1 / (operations / cycle) / 4.
-multiclass SICommonWriteRes {
-
-  def : HWWriteRes<WriteBranch,  [HWBranch], 100>; // XXX: Guessed ???
-  def : HWWriteRes<WriteExport,  [HWExport], 100>; // XXX: Guessed ???
-  def : HWWriteRes<WriteLDS,     [HWLGKM],    32>; // 2 - 64
-  def : HWWriteRes<WriteSALU,    [HWSALU],     1>;
-  def : HWWriteRes<WriteSMEM,    [HWLGKM],    10>; // XXX: Guessed ???
-  def : HWWriteRes<WriteVMEM,    [HWVMEM],   450>; // 300 - 600
-
-  def : HWVALUWriteRes<Write32Bit,         1>;
-  def : HWVALUWriteRes<WriteQuarterRate32, 4>;
-}
-
-
-let SchedModel = SIFullSpeedModel in {
-
-defm : SICommonWriteRes;
-
-def : HWVALUWriteRes<WriteFloatFMA,   1>;
-def : HWVALUWriteRes<WriteDouble,     4>;
-def : HWVALUWriteRes<WriteDoubleAdd,  2>;
-
-} // End SchedModel = SIFullSpeedModel
-
-let SchedModel = SIQuarterSpeedModel in {
-
-defm : SICommonWriteRes;
-
-def : HWVALUWriteRes<WriteFloatFMA, 16>;
-def : HWVALUWriteRes<WriteDouble,   16>;
-def : HWVALUWriteRes<WriteDoubleAdd, 8>;
-
-}  // End SchedModel = SIQuarterSpeedModel

Removed: llvm/trunk/lib/Target/R600/SIShrinkInstructions.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SIShrinkInstructions.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SIShrinkInstructions.cpp (original)
+++ llvm/trunk/lib/Target/R600/SIShrinkInstructions.cpp (removed)
@@ -1,272 +0,0 @@
-//===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// The pass tries to use the 32-bit encoding for instructions when possible.
-//===----------------------------------------------------------------------===//
-//
-
-#include "AMDGPU.h"
-#include "AMDGPUMCInstLower.h"
-#include "AMDGPUSubtarget.h"
-#include "SIInstrInfo.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetMachine.h"
-
-#define DEBUG_TYPE "si-shrink-instructions"
-
-STATISTIC(NumInstructionsShrunk,
-          "Number of 64-bit instruction reduced to 32-bit.");
-STATISTIC(NumLiteralConstantsFolded,
-          "Number of literal constants folded into 32-bit instructions.");
-
-namespace llvm {
-  void initializeSIShrinkInstructionsPass(PassRegistry&);
-}
-
-using namespace llvm;
-
-namespace {
-
-class SIShrinkInstructions : public MachineFunctionPass {
-public:
-  static char ID;
-
-public:
-  SIShrinkInstructions() : MachineFunctionPass(ID) {
-  }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  const char *getPassName() const override {
-    return "SI Shrink Instructions";
-  }
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.setPreservesCFG();
-    MachineFunctionPass::getAnalysisUsage(AU);
-  }
-};
-
-} // End anonymous namespace.
-
-INITIALIZE_PASS_BEGIN(SIShrinkInstructions, DEBUG_TYPE,
-                      "SI Lower il Copies", false, false)
-INITIALIZE_PASS_END(SIShrinkInstructions, DEBUG_TYPE,
-                    "SI Lower il Copies", false, false)
-
-char SIShrinkInstructions::ID = 0;
-
-FunctionPass *llvm::createSIShrinkInstructionsPass() {
-  return new SIShrinkInstructions();
-}
-
-static bool isVGPR(const MachineOperand *MO, const SIRegisterInfo &TRI,
-                   const MachineRegisterInfo &MRI) {
-  if (!MO->isReg())
-    return false;
-
-  if (TargetRegisterInfo::isVirtualRegister(MO->getReg()))
-    return TRI.hasVGPRs(MRI.getRegClass(MO->getReg()));
-
-  return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg()));
-}
-
-static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
-                      const SIRegisterInfo &TRI,
-                      const MachineRegisterInfo &MRI) {
-
-  const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
-  // Can't shrink instruction with three operands.
-  // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add
-  // a special case for it.  It can only be shrunk if the third operand
-  // is vcc.  We should handle this the same way we handle vopc, by addding
-  // a register allocation hint pre-regalloc and then do the shrining
-  // post-regalloc.
-  if (Src2)
-    return false;
-
-  const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
-  const MachineOperand *Src1Mod =
-      TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
-
-  if (Src1 && (!isVGPR(Src1, TRI, MRI) || (Src1Mod && Src1Mod->getImm() != 0)))
-    return false;
-
-  // We don't need to check src0, all input types are legal, so just make sure
-  // src0 isn't using any modifiers.
-  if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
-    return false;
-
-  // Check output modifiers
-  if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
-    return false;
-
-  if (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
-    return false;
-
-  return true;
-}
-
-/// \brief This function checks \p MI for operands defined by a move immediate
-/// instruction and then folds the literal constant into the instruction if it
-/// can.  This function assumes that \p MI is a VOP1, VOP2, or VOPC instruction
-/// and will only fold literal constants if we are still in SSA.
-static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
-                           MachineRegisterInfo &MRI, bool TryToCommute = true) {
-
-  if (!MRI.isSSA())
-    return;
-
-  assert(TII->isVOP1(MI.getOpcode()) || TII->isVOP2(MI.getOpcode()) ||
-         TII->isVOPC(MI.getOpcode()));
-
-  const SIRegisterInfo &TRI = TII->getRegisterInfo();
-  int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
-  MachineOperand &Src0 = MI.getOperand(Src0Idx);
-
-  // Only one literal constant is allowed per instruction, so if src0 is a
-  // literal constant then we can't do any folding.
-  if (Src0.isImm() &&
-      TII->isLiteralConstant(Src0, TII->getOpSize(MI, Src0Idx)))
-    return;
-
-  // Literal constants and SGPRs can only be used in Src0, so if Src0 is an
-  // SGPR, we cannot commute the instruction, so we can't fold any literal
-  // constants.
-  if (Src0.isReg() && !isVGPR(&Src0, TRI, MRI))
-    return;
-
-  // Try to fold Src0
-  if (Src0.isReg()) {
-    unsigned Reg = Src0.getReg();
-    MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
-    if (Def && Def->isMoveImmediate()) {
-      MachineOperand &MovSrc = Def->getOperand(1);
-      bool ConstantFolded = false;
-
-      if (MovSrc.isImm() && isUInt<32>(MovSrc.getImm())) {
-        Src0.ChangeToImmediate(MovSrc.getImm());
-        ConstantFolded = true;
-      }
-      if (ConstantFolded) {
-        if (MRI.use_empty(Reg))
-          Def->eraseFromParent();
-        ++NumLiteralConstantsFolded;
-        return;
-      }
-    }
-  }
-
-  // We have failed to fold src0, so commute the instruction and try again.
-  if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(&MI))
-    foldImmediates(MI, TII, MRI, false);
-
-}
-
-bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
-  MachineRegisterInfo &MRI = MF.getRegInfo();
-  const SIInstrInfo *TII =
-      static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
-  const SIRegisterInfo &TRI = TII->getRegisterInfo();
-  std::vector<unsigned> I1Defs;
-
-  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
-                                                  BI != BE; ++BI) {
-
-    MachineBasicBlock &MBB = *BI;
-    MachineBasicBlock::iterator I, Next;
-    for (I = MBB.begin(); I != MBB.end(); I = Next) {
-      Next = std::next(I);
-      MachineInstr &MI = *I;
-
-      // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
-      if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
-        const MachineOperand &Src = MI.getOperand(1);
-
-        if (Src.isImm()) {
-          if (isInt<16>(Src.getImm()) && !TII->isInlineConstant(Src, 4))
-            MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
-        }
-
-        continue;
-      }
-
-      if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
-        continue;
-
-      if (!canShrink(MI, TII, TRI, MRI)) {
-        // Try commuting the instruction and see if that enables us to shrink
-        // it.
-        if (!MI.isCommutable() || !TII->commuteInstruction(&MI) ||
-            !canShrink(MI, TII, TRI, MRI))
-          continue;
-      }
-
-      // getVOPe32 could be -1 here if we started with an instruction that had
-      // a 32-bit encoding and then commuted it to an instruction that did not.
-      if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
-        continue;
-
-      int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
-
-      if (TII->isVOPC(Op32)) {
-        unsigned DstReg = MI.getOperand(0).getReg();
-        if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
-          // VOPC instructions can only write to the VCC register.  We can't
-          // force them to use VCC here, because the register allocator has
-          // trouble with sequences like this, which cause the allocator to run
-          // out of registers if vreg0 and vreg1 belong to the VCCReg register
-          // class:
-          // vreg0 = VOPC;
-          // vreg1 = VOPC;
-          // S_AND_B64 vreg0, vreg1
-          //
-          // So, instead of forcing the instruction to write to VCC, we provide
-          // a hint to the register allocator to use VCC and then we we will run
-          // this pass again after RA and shrink it if it outputs to VCC.
-          MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC);
-          continue;
-        }
-        if (DstReg != AMDGPU::VCC)
-          continue;
-      }
-
-      // We can shrink this instruction
-      DEBUG(dbgs() << "Shrinking "; MI.dump(); dbgs() << '\n';);
-
-      MachineInstrBuilder Inst32 =
-          BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
-
-      // dst
-      Inst32.addOperand(MI.getOperand(0));
-
-      Inst32.addOperand(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
-
-      const MachineOperand *Src1 =
-          TII->getNamedOperand(MI, AMDGPU::OpName::src1);
-      if (Src1)
-        Inst32.addOperand(*Src1);
-
-      ++NumInstructionsShrunk;
-      MI.eraseFromParent();
-
-      foldImmediates(*Inst32, TII, MRI);
-      DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
-
-
-    }
-  }
-  return false;
-}

Removed: llvm/trunk/lib/Target/R600/SITypeRewriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/SITypeRewriter.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/SITypeRewriter.cpp (original)
+++ llvm/trunk/lib/Target/R600/SITypeRewriter.cpp (removed)
@@ -1,161 +0,0 @@
-//===-- SITypeRewriter.cpp - Remove unwanted types ------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// This pass removes performs the following type substitution on all
-/// non-compute shaders:
-///
-/// v16i8 => i128
-///   - v16i8 is used for constant memory resource descriptors.  This type is
-///      legal for some compute APIs, and we don't want to declare it as legal
-///      in the backend, because we want the legalizer to expand all v16i8
-///      operations.
-/// v1* => *
-///   - Having v1* types complicates the legalizer and we can easily replace
-///   - them with the element type.
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/InstVisitor.h"
-
-using namespace llvm;
-
-namespace {
-
-class SITypeRewriter : public FunctionPass,
-                       public InstVisitor<SITypeRewriter> {
-
-  static char ID;
-  Module *Mod;
-  Type *v16i8;
-  Type *v4i32;
-
-public:
-  SITypeRewriter() : FunctionPass(ID) { }
-  bool doInitialization(Module &M) override;
-  bool runOnFunction(Function &F) override;
-  const char *getPassName() const override {
-    return "SI Type Rewriter";
-  }
-  void visitLoadInst(LoadInst &I);
-  void visitCallInst(CallInst &I);
-  void visitBitCast(BitCastInst &I);
-};
-
-} // End anonymous namespace
-
-char SITypeRewriter::ID = 0;
-
-bool SITypeRewriter::doInitialization(Module &M) {
-  Mod = &M;
-  v16i8 = VectorType::get(Type::getInt8Ty(M.getContext()), 16);
-  v4i32 = VectorType::get(Type::getInt32Ty(M.getContext()), 4);
-  return false;
-}
-
-bool SITypeRewriter::runOnFunction(Function &F) {
-  Attribute A = F.getFnAttribute("ShaderType");
-
-  unsigned ShaderType = ShaderType::COMPUTE;
-  if (A.isStringAttribute()) {
-    StringRef Str = A.getValueAsString();
-    Str.getAsInteger(0, ShaderType);
-  }
-  if (ShaderType == ShaderType::COMPUTE)
-    return false;
-
-  visit(F);
-  visit(F);
-
-  return false;
-}
-
-void SITypeRewriter::visitLoadInst(LoadInst &I) {
-  Value *Ptr = I.getPointerOperand();
-  Type *PtrTy = Ptr->getType();
-  Type *ElemTy = PtrTy->getPointerElementType();
-  IRBuilder<> Builder(&I);
-  if (ElemTy == v16i8)  {
-    Value *BitCast = Builder.CreateBitCast(Ptr,
-        PointerType::get(v4i32,PtrTy->getPointerAddressSpace()));
-    LoadInst *Load = Builder.CreateLoad(BitCast);
-    SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
-    I.getAllMetadataOtherThanDebugLoc(MD);
-    for (unsigned i = 0, e = MD.size(); i != e; ++i) {
-      Load->setMetadata(MD[i].first, MD[i].second);
-    }
-    Value *BitCastLoad = Builder.CreateBitCast(Load, I.getType());
-    I.replaceAllUsesWith(BitCastLoad);
-    I.eraseFromParent();
-  }
-}
-
-void SITypeRewriter::visitCallInst(CallInst &I) {
-  IRBuilder<> Builder(&I);
-
-  SmallVector <Value*, 8> Args;
-  SmallVector <Type*, 8> Types;
-  bool NeedToReplace = false;
-  Function *F = I.getCalledFunction();
-  std::string Name = F->getName();
-  for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
-    Value *Arg = I.getArgOperand(i);
-    if (Arg->getType() == v16i8) {
-      Args.push_back(Builder.CreateBitCast(Arg, v4i32));
-      Types.push_back(v4i32);
-      NeedToReplace = true;
-      Name = Name + ".v4i32";
-    } else if (Arg->getType()->isVectorTy() &&
-               Arg->getType()->getVectorNumElements() == 1 &&
-               Arg->getType()->getVectorElementType() ==
-                                              Type::getInt32Ty(I.getContext())){
-      Type *ElementTy = Arg->getType()->getVectorElementType();
-      std::string TypeName = "i32";
-      InsertElementInst *Def = cast<InsertElementInst>(Arg);
-      Args.push_back(Def->getOperand(1));
-      Types.push_back(ElementTy);
-      std::string VecTypeName = "v1" + TypeName;
-      Name = Name.replace(Name.find(VecTypeName), VecTypeName.length(), TypeName);
-      NeedToReplace = true;
-    } else {
-      Args.push_back(Arg);
-      Types.push_back(Arg->getType());
-    }
-  }
-
-  if (!NeedToReplace) {
-    return;
-  }
-  Function *NewF = Mod->getFunction(Name);
-  if (!NewF) {
-    NewF = Function::Create(FunctionType::get(F->getReturnType(), Types, false), GlobalValue::ExternalLinkage, Name, Mod);
-    NewF->setAttributes(F->getAttributes());
-  }
-  I.replaceAllUsesWith(Builder.CreateCall(NewF, Args));
-  I.eraseFromParent();
-}
-
-void SITypeRewriter::visitBitCast(BitCastInst &I) {
-  IRBuilder<> Builder(&I);
-  if (I.getDestTy() != v4i32) {
-    return;
-  }
-
-  if (BitCastInst *Op = dyn_cast<BitCastInst>(I.getOperand(0))) {
-    if (Op->getSrcTy() == v4i32) {
-      I.replaceAllUsesWith(Op->getOperand(0));
-      I.eraseFromParent();
-    }
-  }
-}
-
-FunctionPass *llvm::createSITypeRewriter() {
-  return new SITypeRewriter();
-}

Removed: llvm/trunk/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp (original)
+++ llvm/trunk/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp (removed)
@@ -1,30 +0,0 @@
-//===-- TargetInfo/AMDGPUTargetInfo.cpp - TargetInfo for AMDGPU -----------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPUTargetMachine.h"
-#include "llvm/Support/TargetRegistry.h"
-
-using namespace llvm;
-
-/// \brief The target which suports all AMD GPUs.  This will eventually
-///         be deprecated and there will be a R600 target and a GCN target.
-Target llvm::TheAMDGPUTarget;
-/// \brief The target for GCN GPUs
-Target llvm::TheGCNTarget;
-
-/// \brief Extern function to initialize the targets for the AMDGPU backend
-extern "C" void LLVMInitializeR600TargetInfo() {
-  RegisterTarget<Triple::r600, false>
-    R600(TheAMDGPUTarget, "r600", "AMD GPUs HD2XXX-HD6XXX");
-  RegisterTarget<Triple::amdgcn, false> GCN(TheGCNTarget, "amdgcn", "AMD GCN GPUs");
-}

Removed: llvm/trunk/lib/Target/R600/TargetInfo/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/TargetInfo/CMakeLists.txt?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/TargetInfo/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/R600/TargetInfo/CMakeLists.txt (removed)
@@ -1,3 +0,0 @@
-add_llvm_library(LLVMR600Info
-  AMDGPUTargetInfo.cpp
-  )

Removed: llvm/trunk/lib/Target/R600/TargetInfo/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/TargetInfo/LLVMBuild.txt?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/TargetInfo/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/R600/TargetInfo/LLVMBuild.txt (removed)
@@ -1,23 +0,0 @@
-;===- ./lib/Target/R600/TargetInfo/LLVMBuild.txt --------------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = R600Info
-parent = R600
-required_libraries = Support
-add_to_library_groups = R600

Removed: llvm/trunk/lib/Target/R600/TargetInfo/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/TargetInfo/Makefile?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/TargetInfo/Makefile (original)
+++ llvm/trunk/lib/Target/R600/TargetInfo/Makefile (removed)
@@ -1,15 +0,0 @@
-##===- lib/Target/AMDGPU/TargetInfo/Makefile ----------------*- Makefile -*-===##
-#
-#                     The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMR600Info
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common

Removed: llvm/trunk/lib/Target/R600/VIInstrFormats.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/VIInstrFormats.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/VIInstrFormats.td (original)
+++ llvm/trunk/lib/Target/R600/VIInstrFormats.td (removed)
@@ -1,166 +0,0 @@
-//===-- VIInstrFormats.td - VI Instruction Encodings ----------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// VI Instruction format definitions.
-//
-//===----------------------------------------------------------------------===//
-
-class DSe_vi <bits<8> op> : Enc64 {
-  bits<8> vdst;
-  bits<1> gds;
-  bits<8> addr;
-  bits<8> data0;
-  bits<8> data1;
-  bits<8> offset0;
-  bits<8> offset1;
-
-  let Inst{7-0} = offset0;
-  let Inst{15-8} = offset1;
-  let Inst{16} = gds;
-  let Inst{24-17} = op;
-  let Inst{31-26} = 0x36; //encoding
-  let Inst{39-32} = addr;
-  let Inst{47-40} = data0;
-  let Inst{55-48} = data1;
-  let Inst{63-56} = vdst;
-}
-
-class MUBUFe_vi <bits<7> op> : Enc64 {
-  bits<12> offset;
-  bits<1> offen;
-  bits<1> idxen;
-  bits<1> glc;
-  bits<1> lds;
-  bits<8> vaddr;
-  bits<8> vdata;
-  bits<7> srsrc;
-  bits<1> slc;
-  bits<1> tfe;
-  bits<8> soffset;
-
-  let Inst{11-0} = offset;
-  let Inst{12} = offen;
-  let Inst{13} = idxen;
-  let Inst{14} = glc;
-  let Inst{16} = lds;
-  let Inst{17} = slc;
-  let Inst{24-18} = op;
-  let Inst{31-26} = 0x38; //encoding
-  let Inst{39-32} = vaddr;
-  let Inst{47-40} = vdata;
-  let Inst{52-48} = srsrc{6-2};
-  let Inst{55} = tfe;
-  let Inst{63-56} = soffset;
-}
-
-class MTBUFe_vi <bits<4> op> : Enc64 {
-  bits<12> offset;
-  bits<1>  offen;
-  bits<1>  idxen;
-  bits<1>  glc;
-  bits<4>  dfmt;
-  bits<3>  nfmt;
-  bits<8>  vaddr;
-  bits<8>  vdata;
-  bits<7>  srsrc;
-  bits<1>  slc;
-  bits<1>  tfe;
-  bits<8>  soffset;
-
-  let Inst{11-0}  = offset;
-  let Inst{12}    = offen;
-  let Inst{13}    = idxen;
-  let Inst{14}    = glc;
-  let Inst{18-15} = op;
-  let Inst{22-19} = dfmt;
-  let Inst{25-23} = nfmt;
-  let Inst{31-26} = 0x3a; //encoding
-  let Inst{39-32} = vaddr;
-  let Inst{47-40} = vdata;
-  let Inst{52-48} = srsrc{6-2};
-  let Inst{54}    = slc;
-  let Inst{55}    = tfe;
-  let Inst{63-56} = soffset;
-}
-
-class SMEMe_vi <bits<8> op, bit imm> : Enc64 {
-  bits<7>  sbase;
-  bits<7>  sdata;
-  bits<1>  glc;
-  bits<20> offset;
-
-  let Inst{5-0}   = sbase{6-1};
-  let Inst{12-6}  = sdata;
-  let Inst{16}    = glc;
-  let Inst{17}    = imm;
-  let Inst{25-18} = op;
-  let Inst{31-26} = 0x30; //encoding
-  let Inst{51-32} = offset;
-}
-
-class VOP3e_vi <bits<10> op> : Enc64 {
-  bits<8> vdst;
-  bits<2> src0_modifiers;
-  bits<9> src0;
-  bits<2> src1_modifiers;
-  bits<9> src1;
-  bits<2> src2_modifiers;
-  bits<9> src2;
-  bits<1> clamp;
-  bits<2> omod;
-
-  let Inst{7-0}   = vdst;
-  let Inst{8}     = src0_modifiers{1};
-  let Inst{9}     = src1_modifiers{1};
-  let Inst{10}    = src2_modifiers{1};
-  let Inst{15}    = clamp;
-  let Inst{25-16} = op;
-  let Inst{31-26} = 0x34; //encoding
-  let Inst{40-32} = src0;
-  let Inst{49-41} = src1;
-  let Inst{58-50} = src2;
-  let Inst{60-59} = omod;
-  let Inst{61} = src0_modifiers{0};
-  let Inst{62} = src1_modifiers{0};
-  let Inst{63} = src2_modifiers{0};
-}
-
-class VOP3be_vi <bits<10> op> : Enc64 {
-  bits<8> vdst;
-  bits<2> src0_modifiers;
-  bits<9> src0;
-  bits<2> src1_modifiers;
-  bits<9> src1;
-  bits<2> src2_modifiers;
-  bits<9> src2;
-  bits<7> sdst;
-  bits<2> omod;
-  bits<1> clamp;
-
-  let Inst{7-0} = vdst;
-  let Inst{14-8} = sdst;
-  let Inst{15} = clamp;
-  let Inst{25-16} = op;
-  let Inst{31-26} = 0x34; //encoding
-  let Inst{40-32} = src0;
-  let Inst{49-41} = src1;
-  let Inst{58-50} = src2;
-  let Inst{60-59} = omod;
-  let Inst{61} = src0_modifiers{0};
-  let Inst{62} = src1_modifiers{0};
-  let Inst{63} = src2_modifiers{0};
-}
-
-class EXPe_vi : EXPe {
-  let Inst{31-26} = 0x31; //encoding
-}
-
-class VINTRPe_vi <bits<2> op> : VINTRPe <op> {
-  let Inst{31-26} = 0x35; // encoding
-}

Removed: llvm/trunk/lib/Target/R600/VIInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/VIInstructions.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/VIInstructions.td (original)
+++ llvm/trunk/lib/Target/R600/VIInstructions.td (removed)
@@ -1,106 +0,0 @@
-//===-- VIInstructions.td - VI Instruction Defintions ---------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// Instruction definitions for VI and newer.
-//===----------------------------------------------------------------------===//
-
-let SIAssemblerPredicate = DisableInst, SubtargetPredicate = isVI in {
-
-//===----------------------------------------------------------------------===//
-// VOP1 Instructions
-//===----------------------------------------------------------------------===//
-
-defm V_CVT_F16_U16 : VOP1Inst <vop1<0, 0x39>, "v_cvt_f16_u16", VOP_F16_I16>;
-defm V_CVT_F16_I16 : VOP1Inst <vop1<0, 0x3a>, "v_cvt_f16_i16", VOP_F16_I16>;
-defm V_CVT_U16_F16 : VOP1Inst <vop1<0, 0x3b>, "v_cvt_u16_f16", VOP_I16_F16>;
-defm V_CVT_I16_F16 : VOP1Inst <vop1<0, 0x3c>, "v_cvt_i16_f16", VOP_I16_F16>;
-defm V_RCP_F16 : VOP1Inst <vop1<0, 0x3d>, "v_rcp_f16", VOP_F16_F16>;
-defm V_SQRT_F16 : VOP1Inst <vop1<0, 0x3e>, "v_sqrt_f16", VOP_F16_F16>;
-defm V_RSQ_F16 : VOP1Inst <vop1<0, 0x3f>, "v_rsq_f16", VOP_F16_F16>;
-defm V_LOG_F16 : VOP1Inst <vop1<0, 0x40>, "v_log_f16", VOP_F16_F16>;
-defm V_EXP_F16 : VOP1Inst <vop1<0, 0x41>, "v_exp_f16", VOP_F16_F16>;
-defm V_FREXP_MANT_F16 : VOP1Inst <vop1<0, 0x42>, "v_frexp_mant_f16",
-  VOP_F16_F16
->;
-defm V_FREXP_EXP_I16_F16 : VOP1Inst <vop1<0, 0x43>, "v_frexp_exp_i16_f16",
-  VOP_I16_F16
->;
-defm V_FLOOR_F16 : VOP1Inst <vop1<0, 0x44>, "v_floor_f16", VOP_F16_F16>;
-defm V_CEIL_F16 : VOP1Inst <vop1<0, 0x45>, "v_ceil_f16", VOP_F16_F16>;
-defm V_TRUNC_F16 : VOP1Inst <vop1<0, 0x46>, "v_trunc_f16", VOP_F16_F16>;
-defm V_RNDNE_F16 : VOP1Inst <vop1<0, 0x47>, "v_rndne_f16", VOP_F16_F16>;
-defm V_FRACT_F16 : VOP1Inst <vop1<0, 0x48>, "v_fract_f16", VOP_F16_F16>;
-defm V_SIN_F16 : VOP1Inst <vop1<0, 0x49>, "v_sin_f16", VOP_F16_F16>;
-defm V_COS_F16 : VOP1Inst <vop1<0, 0x4a>, "v_cos_f16", VOP_F16_F16>;
-
-//===----------------------------------------------------------------------===//
-// VOP2 Instructions
-//===----------------------------------------------------------------------===//
-
-let isCommutable = 1 in {
-
-defm V_ADD_F16 : VOP2Inst <vop2<0, 0x1f>, "v_add_f16", VOP_F16_F16_F16>;
-defm V_SUB_F16 : VOP2Inst <vop2<0, 0x20>, "v_sub_f16", VOP_F16_F16_F16>;
-defm V_SUBREV_F16 : VOP2Inst <vop2<0, 0x21>, "v_subrev_f16", VOP_F16_F16_F16,
-  null_frag, "v_sub_f16"
->;
-defm V_MUL_F16 : VOP2Inst <vop2<0, 0x22>, "v_mul_f16", VOP_F16_F16_F16>;
-defm V_MAC_F16 : VOP2Inst <vop2<0, 0x23>, "v_mac_f16", VOP_F16_F16_F16>;
-} // End isCommutable = 1
-defm V_MADMK_F16 : VOP2MADK <vop2<0,0x24>, "v_madmk_f16">;
-let isCommutable = 1 in {
-defm V_MADAK_F16 : VOP2MADK <vop2<0,0x25>, "v_madak_f16">;
-defm V_ADD_U16 : VOP2Inst <vop2<0,0x26>, "v_add_u16", VOP_I16_I16_I16>;
-defm V_SUB_U16 : VOP2Inst <vop2<0,0x27>, "v_sub_u16" , VOP_I16_I16_I16>;
-defm V_SUBREV_U16 : VOP2Inst <vop2<0,0x28>, "v_subrev_u16", VOP_I16_I16_I16>;
-defm V_MUL_LO_U16 : VOP2Inst <vop2<0,0x29>, "v_mul_lo_u16", VOP_I16_I16_I16>;
-} // End isCommutable = 1
-defm V_LSHLREV_B16 : VOP2Inst <vop2<0,0x2a>, "v_lshlrev_b16", VOP_I16_I16_I16>;
-defm V_LSHRREV_B16 : VOP2Inst <vop2<0,0x2b>, "v_lshrrev_b16", VOP_I16_I16_I16>;
-defm V_ASHRREV_B16 : VOP2Inst <vop2<0,0x2c>, "v_ashrrev_b16", VOP_I16_I16_I16>;
-let isCommutable = 1 in {
-defm V_MAX_F16 : VOP2Inst <vop2<0,0x2d>, "v_max_f16", VOP_F16_F16_F16>;
-defm V_MIN_F16 : VOP2Inst <vop2<0,0x2e>, "v_min_f16", VOP_F16_F16_F16>;
-defm V_MAX_U16 : VOP2Inst <vop2<0,0x2f>, "v_max_u16", VOP_I16_I16_I16>;
-defm V_MAX_I16 : VOP2Inst <vop2<0,0x30>, "v_max_i16", VOP_I16_I16_I16>;
-defm V_MIN_U16 : VOP2Inst <vop2<0,0x31>, "v_min_u16", VOP_I16_I16_I16>;
-defm V_MIN_I16 : VOP2Inst <vop2<0,0x32>, "v_min_i16", VOP_I16_I16_I16>;
-} // End isCommutable = 1
-defm V_LDEXP_F16 : VOP2Inst <vop2<0,0x33>, "v_ldexp_f16", VOP_F16_F16_I16>;
-
-// Aliases to simplify matching of floating-pint instructions that are VOP2 on
-// SI and VOP3 on VI.
-
-class SI2_VI3Alias <string name, Instruction inst> : InstAlias <
-  name#" $dst, $src0, $src1",
-  (inst VGPR_32:$dst, 0, VCSrc_32:$src0, 0, VCSrc_32:$src1, 0, 0)
->, PredicateControl {
-  let UseInstAsmMatchConverter = 0;
-}
-
-def : SI2_VI3Alias <"v_ldexp_f32", V_LDEXP_F32_e64_vi>;
-def : SI2_VI3Alias <"v_cvt_pkaccum_u8_f32", V_CVT_PKACCUM_U8_F32_e64_vi>;
-def : SI2_VI3Alias <"v_cvt_pknorm_i16_f32", V_CVT_PKNORM_I16_F32_e64_vi>;
-def : SI2_VI3Alias <"v_cvt_pknorm_u16_f32", V_CVT_PKNORM_U16_F32_e64_vi>;
-def : SI2_VI3Alias <"v_cvt_pkrtz_f16_f32", V_CVT_PKRTZ_F16_F32_e64_vi>;
-
-} // End SIAssemblerPredicate = DisableInst, SubtargetPredicate = isVI
-
-//===----------------------------------------------------------------------===//
-// SMEM Patterns
-//===----------------------------------------------------------------------===//
-
-let Predicates = [isVI] in {
-
-// 1. Offset as 20bit DWORD immediate
-def : Pat <
-  (SIload_constant v4i32:$sbase, IMM20bit:$offset),
-  (S_BUFFER_LOAD_DWORD_IMM $sbase, (as_i32imm $offset))
->;
-
-} // End Predicates = [isVI]

Copied: llvm/trunk/test/CodeGen/AMDGPU/32-bit-local-address-space.ll (from r239647, llvm/trunk/test/CodeGen/R600/32-bit-local-address-space.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/32-bit-local-address-space.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/32-bit-local-address-space.ll&p1=llvm/trunk/test/CodeGen/R600/32-bit-local-address-space.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/README (from r239647, llvm/trunk/test/CodeGen/R600/README)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/README?p2=llvm/trunk/test/CodeGen/AMDGPU/README&p1=llvm/trunk/test/CodeGen/R600/README&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/add-debug.ll (from r239647, llvm/trunk/test/CodeGen/R600/add-debug.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/add-debug.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/add-debug.ll&p1=llvm/trunk/test/CodeGen/R600/add-debug.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/add.ll (from r239647, llvm/trunk/test/CodeGen/R600/add.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/add.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/add.ll&p1=llvm/trunk/test/CodeGen/R600/add.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/add_i64.ll (from r239647, llvm/trunk/test/CodeGen/R600/add_i64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/add_i64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/add_i64.ll&p1=llvm/trunk/test/CodeGen/R600/add_i64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/address-space.ll (from r239647, llvm/trunk/test/CodeGen/R600/address-space.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/address-space.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/address-space.ll&p1=llvm/trunk/test/CodeGen/R600/address-space.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/and.ll (from r239647, llvm/trunk/test/CodeGen/R600/and.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/and.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/and.ll&p1=llvm/trunk/test/CodeGen/R600/and.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/anyext.ll (from r239647, llvm/trunk/test/CodeGen/R600/anyext.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/anyext.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/anyext.ll&p1=llvm/trunk/test/CodeGen/R600/anyext.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll (from r239647, llvm/trunk/test/CodeGen/R600/array-ptr-calc-i32.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll&p1=llvm/trunk/test/CodeGen/R600/array-ptr-calc-i32.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/array-ptr-calc-i64.ll (from r239647, llvm/trunk/test/CodeGen/R600/array-ptr-calc-i64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/array-ptr-calc-i64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/array-ptr-calc-i64.ll&p1=llvm/trunk/test/CodeGen/R600/array-ptr-calc-i64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll (from r239647, llvm/trunk/test/CodeGen/R600/atomic_cmp_swap_local.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll&p1=llvm/trunk/test/CodeGen/R600/atomic_cmp_swap_local.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/atomic_load_add.ll (from r239647, llvm/trunk/test/CodeGen/R600/atomic_load_add.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/atomic_load_add.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/atomic_load_add.ll&p1=llvm/trunk/test/CodeGen/R600/atomic_load_add.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/atomic_load_sub.ll (from r239647, llvm/trunk/test/CodeGen/R600/atomic_load_sub.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/atomic_load_sub.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/atomic_load_sub.ll&p1=llvm/trunk/test/CodeGen/R600/atomic_load_sub.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/basic-branch.ll (from r239647, llvm/trunk/test/CodeGen/R600/basic-branch.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/basic-branch.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/basic-branch.ll&p1=llvm/trunk/test/CodeGen/R600/basic-branch.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/basic-loop.ll (from r239647, llvm/trunk/test/CodeGen/R600/basic-loop.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/basic-loop.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/basic-loop.ll&p1=llvm/trunk/test/CodeGen/R600/basic-loop.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/bfe_uint.ll (from r239647, llvm/trunk/test/CodeGen/R600/bfe_uint.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/bfe_uint.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/bfe_uint.ll&p1=llvm/trunk/test/CodeGen/R600/bfe_uint.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/bfi_int.ll (from r239647, llvm/trunk/test/CodeGen/R600/bfi_int.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/bfi_int.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/bfi_int.ll&p1=llvm/trunk/test/CodeGen/R600/bfi_int.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/big_alu.ll (from r239647, llvm/trunk/test/CodeGen/R600/big_alu.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/big_alu.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/big_alu.ll&p1=llvm/trunk/test/CodeGen/R600/big_alu.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/bitcast.ll (from r239647, llvm/trunk/test/CodeGen/R600/bitcast.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/bitcast.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/bitcast.ll&p1=llvm/trunk/test/CodeGen/R600/bitcast.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/bswap.ll (from r239647, llvm/trunk/test/CodeGen/R600/bswap.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/bswap.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/bswap.ll&p1=llvm/trunk/test/CodeGen/R600/bswap.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/build_vector.ll (from r239647, llvm/trunk/test/CodeGen/R600/build_vector.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/build_vector.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/build_vector.ll&p1=llvm/trunk/test/CodeGen/R600/build_vector.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/call.ll (from r239647, llvm/trunk/test/CodeGen/R600/call.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/call.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/call.ll&p1=llvm/trunk/test/CodeGen/R600/call.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/call_fs.ll (from r239647, llvm/trunk/test/CodeGen/R600/call_fs.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/call_fs.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/call_fs.ll&p1=llvm/trunk/test/CodeGen/R600/call_fs.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/cayman-loop-bug.ll (from r239647, llvm/trunk/test/CodeGen/R600/cayman-loop-bug.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/cayman-loop-bug.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/cayman-loop-bug.ll&p1=llvm/trunk/test/CodeGen/R600/cayman-loop-bug.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/cf-stack-bug.ll (from r239647, llvm/trunk/test/CodeGen/R600/cf-stack-bug.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/cf-stack-bug.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/cf-stack-bug.ll&p1=llvm/trunk/test/CodeGen/R600/cf-stack-bug.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/cf_end.ll (from r239647, llvm/trunk/test/CodeGen/R600/cf_end.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/cf_end.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/cf_end.ll&p1=llvm/trunk/test/CodeGen/R600/cf_end.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/cgp-addressing-modes.ll (from r239647, llvm/trunk/test/CodeGen/R600/cgp-addressing-modes.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/cgp-addressing-modes.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/cgp-addressing-modes.ll&p1=llvm/trunk/test/CodeGen/R600/cgp-addressing-modes.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/coalescer_remat.ll (from r239647, llvm/trunk/test/CodeGen/R600/coalescer_remat.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/coalescer_remat.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/coalescer_remat.ll&p1=llvm/trunk/test/CodeGen/R600/coalescer_remat.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/codegen-prepare-addrmode-sext.ll (from r239647, llvm/trunk/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/codegen-prepare-addrmode-sext.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/codegen-prepare-addrmode-sext.ll&p1=llvm/trunk/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/combine_vloads.ll (from r239647, llvm/trunk/test/CodeGen/R600/combine_vloads.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/combine_vloads.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/combine_vloads.ll&p1=llvm/trunk/test/CodeGen/R600/combine_vloads.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/commute-compares.ll (from r239647, llvm/trunk/test/CodeGen/R600/commute-compares.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/commute-compares.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/commute-compares.ll&p1=llvm/trunk/test/CodeGen/R600/commute-compares.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/commute_modifiers.ll (from r239647, llvm/trunk/test/CodeGen/R600/commute_modifiers.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/commute_modifiers.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/commute_modifiers.ll&p1=llvm/trunk/test/CodeGen/R600/commute_modifiers.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/complex-folding.ll (from r239647, llvm/trunk/test/CodeGen/R600/complex-folding.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/complex-folding.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/complex-folding.ll&p1=llvm/trunk/test/CodeGen/R600/complex-folding.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/concat_vectors.ll (from r239647, llvm/trunk/test/CodeGen/R600/concat_vectors.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/concat_vectors.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/concat_vectors.ll&p1=llvm/trunk/test/CodeGen/R600/concat_vectors.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/copy-illegal-type.ll (from r239647, llvm/trunk/test/CodeGen/R600/copy-illegal-type.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/copy-illegal-type.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/copy-illegal-type.ll&p1=llvm/trunk/test/CodeGen/R600/copy-illegal-type.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/copy-to-reg.ll (from r239647, llvm/trunk/test/CodeGen/R600/copy-to-reg.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/copy-to-reg.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/copy-to-reg.ll&p1=llvm/trunk/test/CodeGen/R600/copy-to-reg.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/ctlz_zero_undef.ll (from r239647, llvm/trunk/test/CodeGen/R600/ctlz_zero_undef.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ctlz_zero_undef.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/ctlz_zero_undef.ll&p1=llvm/trunk/test/CodeGen/R600/ctlz_zero_undef.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/ctpop.ll (from r239647, llvm/trunk/test/CodeGen/R600/ctpop.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ctpop.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/ctpop.ll&p1=llvm/trunk/test/CodeGen/R600/ctpop.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/ctpop64.ll (from r239647, llvm/trunk/test/CodeGen/R600/ctpop64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ctpop64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/ctpop64.ll&p1=llvm/trunk/test/CodeGen/R600/ctpop64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/cttz_zero_undef.ll (from r239647, llvm/trunk/test/CodeGen/R600/cttz_zero_undef.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/cttz_zero_undef.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/cttz_zero_undef.ll&p1=llvm/trunk/test/CodeGen/R600/cttz_zero_undef.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll (from r239647, llvm/trunk/test/CodeGen/R600/cvt_f32_ubyte.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll&p1=llvm/trunk/test/CodeGen/R600/cvt_f32_ubyte.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/cvt_flr_i32_f32.ll (from r239647, llvm/trunk/test/CodeGen/R600/cvt_flr_i32_f32.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/cvt_flr_i32_f32.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/cvt_flr_i32_f32.ll&p1=llvm/trunk/test/CodeGen/R600/cvt_flr_i32_f32.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/cvt_rpi_i32_f32.ll (from r239647, llvm/trunk/test/CodeGen/R600/cvt_rpi_i32_f32.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/cvt_rpi_i32_f32.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/cvt_rpi_i32_f32.ll&p1=llvm/trunk/test/CodeGen/R600/cvt_rpi_i32_f32.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/dagcombiner-bug-illegal-vec4-int-to-fp.ll (from r239647, llvm/trunk/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/dagcombiner-bug-illegal-vec4-int-to-fp.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/dagcombiner-bug-illegal-vec4-int-to-fp.ll&p1=llvm/trunk/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/debug.ll (from r239647, llvm/trunk/test/CodeGen/R600/debug.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/debug.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/debug.ll&p1=llvm/trunk/test/CodeGen/R600/debug.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/default-fp-mode.ll (from r239647, llvm/trunk/test/CodeGen/R600/default-fp-mode.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/default-fp-mode.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/default-fp-mode.ll&p1=llvm/trunk/test/CodeGen/R600/default-fp-mode.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/disconnected-predset-break-bug.ll (from r239647, llvm/trunk/test/CodeGen/R600/disconnected-predset-break-bug.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/disconnected-predset-break-bug.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/disconnected-predset-break-bug.ll&p1=llvm/trunk/test/CodeGen/R600/disconnected-predset-break-bug.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/dot4-folding.ll (from r239647, llvm/trunk/test/CodeGen/R600/dot4-folding.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/dot4-folding.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/dot4-folding.ll&p1=llvm/trunk/test/CodeGen/R600/dot4-folding.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/ds-negative-offset-addressing-mode-loop.ll (from r239647, llvm/trunk/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ds-negative-offset-addressing-mode-loop.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/ds-negative-offset-addressing-mode-loop.ll&p1=llvm/trunk/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/ds_read2.ll (from r239647, llvm/trunk/test/CodeGen/R600/ds_read2.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ds_read2.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/ds_read2.ll&p1=llvm/trunk/test/CodeGen/R600/ds_read2.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/ds_read2_offset_order.ll (from r239647, llvm/trunk/test/CodeGen/R600/ds_read2_offset_order.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ds_read2_offset_order.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/ds_read2_offset_order.ll&p1=llvm/trunk/test/CodeGen/R600/ds_read2_offset_order.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/ds_read2st64.ll (from r239647, llvm/trunk/test/CodeGen/R600/ds_read2st64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ds_read2st64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/ds_read2st64.ll&p1=llvm/trunk/test/CodeGen/R600/ds_read2st64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/ds_write2.ll (from r239647, llvm/trunk/test/CodeGen/R600/ds_write2.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ds_write2.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/ds_write2.ll&p1=llvm/trunk/test/CodeGen/R600/ds_write2.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/ds_write2st64.ll (from r239647, llvm/trunk/test/CodeGen/R600/ds_write2st64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ds_write2st64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/ds_write2st64.ll&p1=llvm/trunk/test/CodeGen/R600/ds_write2st64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/elf.ll (from r239647, llvm/trunk/test/CodeGen/R600/elf.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/elf.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/elf.ll&p1=llvm/trunk/test/CodeGen/R600/elf.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/elf.r600.ll (from r239647, llvm/trunk/test/CodeGen/R600/elf.r600.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/elf.r600.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/elf.r600.ll&p1=llvm/trunk/test/CodeGen/R600/elf.r600.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/empty-function.ll (from r239647, llvm/trunk/test/CodeGen/R600/empty-function.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/empty-function.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/empty-function.ll&p1=llvm/trunk/test/CodeGen/R600/empty-function.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/endcf-loop-header.ll (from r239647, llvm/trunk/test/CodeGen/R600/endcf-loop-header.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/endcf-loop-header.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/endcf-loop-header.ll&p1=llvm/trunk/test/CodeGen/R600/endcf-loop-header.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/extload-private.ll (from r239647, llvm/trunk/test/CodeGen/R600/extload-private.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/extload-private.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/extload-private.ll&p1=llvm/trunk/test/CodeGen/R600/extload-private.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/extload.ll (from r239647, llvm/trunk/test/CodeGen/R600/extload.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/extload.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/extload.ll&p1=llvm/trunk/test/CodeGen/R600/extload.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt_i16.ll (from r239647, llvm/trunk/test/CodeGen/R600/extract_vector_elt_i16.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt_i16.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt_i16.ll&p1=llvm/trunk/test/CodeGen/R600/extract_vector_elt_i16.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fabs.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fabs.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fabs.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fabs.f64.ll&p1=llvm/trunk/test/CodeGen/R600/fabs.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fabs.ll (from r239647, llvm/trunk/test/CodeGen/R600/fabs.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fabs.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fabs.ll&p1=llvm/trunk/test/CodeGen/R600/fabs.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fadd.ll (from r239647, llvm/trunk/test/CodeGen/R600/fadd.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fadd.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fadd.ll&p1=llvm/trunk/test/CodeGen/R600/fadd.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fadd64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fadd64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fadd64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fadd64.ll&p1=llvm/trunk/test/CodeGen/R600/fadd64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fceil.ll (from r239647, llvm/trunk/test/CodeGen/R600/fceil.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fceil.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fceil.ll&p1=llvm/trunk/test/CodeGen/R600/fceil.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fceil64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fceil64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fceil64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fceil64.ll&p1=llvm/trunk/test/CodeGen/R600/fceil64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fcmp-cnd.ll (from r239647, llvm/trunk/test/CodeGen/R600/fcmp-cnd.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fcmp-cnd.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fcmp-cnd.ll&p1=llvm/trunk/test/CodeGen/R600/fcmp-cnd.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fcmp-cnde-int-args.ll (from r239647, llvm/trunk/test/CodeGen/R600/fcmp-cnde-int-args.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fcmp-cnde-int-args.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fcmp-cnde-int-args.ll&p1=llvm/trunk/test/CodeGen/R600/fcmp-cnde-int-args.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fcmp.ll (from r239647, llvm/trunk/test/CodeGen/R600/fcmp.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fcmp.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fcmp.ll&p1=llvm/trunk/test/CodeGen/R600/fcmp.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fcmp64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fcmp64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fcmp64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fcmp64.ll&p1=llvm/trunk/test/CodeGen/R600/fcmp64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fconst64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fconst64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fconst64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fconst64.ll&p1=llvm/trunk/test/CodeGen/R600/fconst64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fcopysign.f32.ll (from r239647, llvm/trunk/test/CodeGen/R600/fcopysign.f32.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fcopysign.f32.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fcopysign.f32.ll&p1=llvm/trunk/test/CodeGen/R600/fcopysign.f32.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fcopysign.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fcopysign.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fcopysign.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fcopysign.f64.ll&p1=llvm/trunk/test/CodeGen/R600/fcopysign.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fdiv.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fdiv.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fdiv.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fdiv.f64.ll&p1=llvm/trunk/test/CodeGen/R600/fdiv.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fdiv.ll (from r239647, llvm/trunk/test/CodeGen/R600/fdiv.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fdiv.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fdiv.ll&p1=llvm/trunk/test/CodeGen/R600/fdiv.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fetch-limits.r600.ll (from r239647, llvm/trunk/test/CodeGen/R600/fetch-limits.r600.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fetch-limits.r600.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fetch-limits.r600.ll&p1=llvm/trunk/test/CodeGen/R600/fetch-limits.r600.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fetch-limits.r700+.ll (from r239647, llvm/trunk/test/CodeGen/R600/fetch-limits.r700+.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fetch-limits.r700%2B.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fetch-limits.r700%2B.ll&p1=llvm/trunk/test/CodeGen/R600/fetch-limits.r700%2B.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/ffloor.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/ffloor.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ffloor.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/ffloor.f64.ll&p1=llvm/trunk/test/CodeGen/R600/ffloor.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/ffloor.ll (from r239647, llvm/trunk/test/CodeGen/R600/ffloor.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ffloor.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/ffloor.ll&p1=llvm/trunk/test/CodeGen/R600/ffloor.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/flat-address-space.ll (from r239647, llvm/trunk/test/CodeGen/R600/flat-address-space.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/flat-address-space.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/flat-address-space.ll&p1=llvm/trunk/test/CodeGen/R600/flat-address-space.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/floor.ll (from r239647, llvm/trunk/test/CodeGen/R600/floor.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/floor.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/floor.ll&p1=llvm/trunk/test/CodeGen/R600/floor.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fma-combine.ll (from r239647, llvm/trunk/test/CodeGen/R600/fma-combine.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fma-combine.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fma-combine.ll&p1=llvm/trunk/test/CodeGen/R600/fma-combine.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fma.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fma.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fma.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fma.f64.ll&p1=llvm/trunk/test/CodeGen/R600/fma.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fma.ll (from r239647, llvm/trunk/test/CodeGen/R600/fma.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fma.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fma.ll&p1=llvm/trunk/test/CodeGen/R600/fma.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmad.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmad.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmad.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmad.ll&p1=llvm/trunk/test/CodeGen/R600/fmad.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmax.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmax.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmax.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmax.ll&p1=llvm/trunk/test/CodeGen/R600/fmax.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmax3.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmax3.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmax3.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmax3.f64.ll&p1=llvm/trunk/test/CodeGen/R600/fmax3.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmax3.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmax3.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmax3.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmax3.ll&p1=llvm/trunk/test/CodeGen/R600/fmax3.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmax_legacy.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmax_legacy.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmax_legacy.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmax_legacy.f64.ll&p1=llvm/trunk/test/CodeGen/R600/fmax_legacy.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmax_legacy.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmax_legacy.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmax_legacy.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmax_legacy.ll&p1=llvm/trunk/test/CodeGen/R600/fmax_legacy.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmaxnum.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmaxnum.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmaxnum.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmaxnum.f64.ll&p1=llvm/trunk/test/CodeGen/R600/fmaxnum.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmaxnum.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmaxnum.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmaxnum.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmaxnum.ll&p1=llvm/trunk/test/CodeGen/R600/fmaxnum.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmin.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmin.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmin.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmin.ll&p1=llvm/trunk/test/CodeGen/R600/fmin.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmin3.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmin3.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmin3.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmin3.ll&p1=llvm/trunk/test/CodeGen/R600/fmin3.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmin_legacy.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmin_legacy.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmin_legacy.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmin_legacy.f64.ll&p1=llvm/trunk/test/CodeGen/R600/fmin_legacy.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmin_legacy.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmin_legacy.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmin_legacy.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmin_legacy.ll&p1=llvm/trunk/test/CodeGen/R600/fmin_legacy.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fminnum.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fminnum.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fminnum.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fminnum.f64.ll&p1=llvm/trunk/test/CodeGen/R600/fminnum.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fminnum.ll (from r239647, llvm/trunk/test/CodeGen/R600/fminnum.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fminnum.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fminnum.ll&p1=llvm/trunk/test/CodeGen/R600/fminnum.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmul.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmul.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmul.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmul.ll&p1=llvm/trunk/test/CodeGen/R600/fmul.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmul64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmul64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmul64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmul64.ll&p1=llvm/trunk/test/CodeGen/R600/fmul64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fmuladd.ll (from r239647, llvm/trunk/test/CodeGen/R600/fmuladd.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fmuladd.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fmuladd.ll&p1=llvm/trunk/test/CodeGen/R600/fmuladd.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fnearbyint.ll (from r239647, llvm/trunk/test/CodeGen/R600/fnearbyint.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fnearbyint.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fnearbyint.ll&p1=llvm/trunk/test/CodeGen/R600/fnearbyint.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fneg-fabs.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.f64.ll&p1=llvm/trunk/test/CodeGen/R600/fneg-fabs.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.ll (from r239647, llvm/trunk/test/CodeGen/R600/fneg-fabs.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.ll&p1=llvm/trunk/test/CodeGen/R600/fneg-fabs.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fneg.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fneg.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fneg.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fneg.f64.ll&p1=llvm/trunk/test/CodeGen/R600/fneg.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fneg.ll (from r239647, llvm/trunk/test/CodeGen/R600/fneg.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fneg.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fneg.ll&p1=llvm/trunk/test/CodeGen/R600/fneg.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fp-classify.ll (from r239647, llvm/trunk/test/CodeGen/R600/fp-classify.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fp-classify.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fp-classify.ll&p1=llvm/trunk/test/CodeGen/R600/fp-classify.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fp16_to_fp.ll (from r239647, llvm/trunk/test/CodeGen/R600/fp16_to_fp.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fp16_to_fp.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fp16_to_fp.ll&p1=llvm/trunk/test/CodeGen/R600/fp16_to_fp.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fp32_to_fp16.ll (from r239647, llvm/trunk/test/CodeGen/R600/fp32_to_fp16.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fp32_to_fp16.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fp32_to_fp16.ll&p1=llvm/trunk/test/CodeGen/R600/fp32_to_fp16.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fp_to_sint.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fp_to_sint.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fp_to_sint.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fp_to_sint.f64.ll&p1=llvm/trunk/test/CodeGen/R600/fp_to_sint.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fp_to_sint.ll (from r239647, llvm/trunk/test/CodeGen/R600/fp_to_sint.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fp_to_sint.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fp_to_sint.ll&p1=llvm/trunk/test/CodeGen/R600/fp_to_sint.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fp_to_uint.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fp_to_uint.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fp_to_uint.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fp_to_uint.f64.ll&p1=llvm/trunk/test/CodeGen/R600/fp_to_uint.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fp_to_uint.ll (from r239647, llvm/trunk/test/CodeGen/R600/fp_to_uint.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fp_to_uint.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fp_to_uint.ll&p1=llvm/trunk/test/CodeGen/R600/fp_to_uint.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fpext.ll (from r239647, llvm/trunk/test/CodeGen/R600/fpext.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fpext.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fpext.ll&p1=llvm/trunk/test/CodeGen/R600/fpext.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fptrunc.ll (from r239647, llvm/trunk/test/CodeGen/R600/fptrunc.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fptrunc.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fptrunc.ll&p1=llvm/trunk/test/CodeGen/R600/fptrunc.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/frem.ll (from r239647, llvm/trunk/test/CodeGen/R600/frem.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/frem.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/frem.ll&p1=llvm/trunk/test/CodeGen/R600/frem.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fsqrt.ll (from r239647, llvm/trunk/test/CodeGen/R600/fsqrt.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fsqrt.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fsqrt.ll&p1=llvm/trunk/test/CodeGen/R600/fsqrt.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fsub.ll (from r239647, llvm/trunk/test/CodeGen/R600/fsub.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fsub.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fsub.ll&p1=llvm/trunk/test/CodeGen/R600/fsub.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/fsub64.ll (from r239647, llvm/trunk/test/CodeGen/R600/fsub64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fsub64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/fsub64.ll&p1=llvm/trunk/test/CodeGen/R600/fsub64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/ftrunc.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/ftrunc.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ftrunc.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/ftrunc.f64.ll&p1=llvm/trunk/test/CodeGen/R600/ftrunc.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/ftrunc.ll (from r239647, llvm/trunk/test/CodeGen/R600/ftrunc.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ftrunc.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/ftrunc.ll&p1=llvm/trunk/test/CodeGen/R600/ftrunc.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/gep-address-space.ll (from r239647, llvm/trunk/test/CodeGen/R600/gep-address-space.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/gep-address-space.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/gep-address-space.ll&p1=llvm/trunk/test/CodeGen/R600/gep-address-space.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/global-directive.ll (from r239647, llvm/trunk/test/CodeGen/R600/global-directive.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/global-directive.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/global-directive.ll&p1=llvm/trunk/test/CodeGen/R600/global-directive.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/global-extload-i1.ll (from r239647, llvm/trunk/test/CodeGen/R600/global-extload-i1.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/global-extload-i1.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/global-extload-i1.ll&p1=llvm/trunk/test/CodeGen/R600/global-extload-i1.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/global-extload-i16.ll (from r239647, llvm/trunk/test/CodeGen/R600/global-extload-i16.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/global-extload-i16.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/global-extload-i16.ll&p1=llvm/trunk/test/CodeGen/R600/global-extload-i16.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/global-extload-i32.ll (from r239647, llvm/trunk/test/CodeGen/R600/global-extload-i32.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/global-extload-i32.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/global-extload-i32.ll&p1=llvm/trunk/test/CodeGen/R600/global-extload-i32.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/global-extload-i8.ll (from r239647, llvm/trunk/test/CodeGen/R600/global-extload-i8.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/global-extload-i8.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/global-extload-i8.ll&p1=llvm/trunk/test/CodeGen/R600/global-extload-i8.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/global-zero-initializer.ll (from r239647, llvm/trunk/test/CodeGen/R600/global-zero-initializer.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/global-zero-initializer.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/global-zero-initializer.ll&p1=llvm/trunk/test/CodeGen/R600/global-zero-initializer.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/global_atomics.ll (from r239647, llvm/trunk/test/CodeGen/R600/global_atomics.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/global_atomics.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/global_atomics.ll&p1=llvm/trunk/test/CodeGen/R600/global_atomics.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/gv-const-addrspace-fail.ll (from r239647, llvm/trunk/test/CodeGen/R600/gv-const-addrspace-fail.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/gv-const-addrspace-fail.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/gv-const-addrspace-fail.ll&p1=llvm/trunk/test/CodeGen/R600/gv-const-addrspace-fail.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/gv-const-addrspace.ll (from r239647, llvm/trunk/test/CodeGen/R600/gv-const-addrspace.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/gv-const-addrspace.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/gv-const-addrspace.ll&p1=llvm/trunk/test/CodeGen/R600/gv-const-addrspace.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/half.ll (from r239647, llvm/trunk/test/CodeGen/R600/half.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/half.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/half.ll&p1=llvm/trunk/test/CodeGen/R600/half.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/hsa.ll (from r239647, llvm/trunk/test/CodeGen/R600/hsa.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/hsa.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/hsa.ll&p1=llvm/trunk/test/CodeGen/R600/hsa.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll (from r239647, llvm/trunk/test/CodeGen/R600/i1-copy-implicit-def.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll&p1=llvm/trunk/test/CodeGen/R600/i1-copy-implicit-def.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/i1-copy-phi.ll (from r239647, llvm/trunk/test/CodeGen/R600/i1-copy-phi.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/i1-copy-phi.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/i1-copy-phi.ll&p1=llvm/trunk/test/CodeGen/R600/i1-copy-phi.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/i8-to-double-to-float.ll (from r239647, llvm/trunk/test/CodeGen/R600/i8-to-double-to-float.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/i8-to-double-to-float.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/i8-to-double-to-float.ll&p1=llvm/trunk/test/CodeGen/R600/i8-to-double-to-float.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/icmp-select-sete-reverse-args.ll (from r239647, llvm/trunk/test/CodeGen/R600/icmp-select-sete-reverse-args.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/icmp-select-sete-reverse-args.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/icmp-select-sete-reverse-args.ll&p1=llvm/trunk/test/CodeGen/R600/icmp-select-sete-reverse-args.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/icmp64.ll (from r239647, llvm/trunk/test/CodeGen/R600/icmp64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/icmp64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/icmp64.ll&p1=llvm/trunk/test/CodeGen/R600/icmp64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/imm.ll (from r239647, llvm/trunk/test/CodeGen/R600/imm.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/imm.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/imm.ll&p1=llvm/trunk/test/CodeGen/R600/imm.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/indirect-addressing-si.ll (from r239647, llvm/trunk/test/CodeGen/R600/indirect-addressing-si.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/indirect-addressing-si.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/indirect-addressing-si.ll&p1=llvm/trunk/test/CodeGen/R600/indirect-addressing-si.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/indirect-private-64.ll (from r239647, llvm/trunk/test/CodeGen/R600/indirect-private-64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/indirect-private-64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/indirect-private-64.ll&p1=llvm/trunk/test/CodeGen/R600/indirect-private-64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/infinite-loop-evergreen.ll (from r239647, llvm/trunk/test/CodeGen/R600/infinite-loop-evergreen.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/infinite-loop-evergreen.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/infinite-loop-evergreen.ll&p1=llvm/trunk/test/CodeGen/R600/infinite-loop-evergreen.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/infinite-loop.ll (from r239647, llvm/trunk/test/CodeGen/R600/infinite-loop.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/infinite-loop.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/infinite-loop.ll&p1=llvm/trunk/test/CodeGen/R600/infinite-loop.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/inline-asm.ll (from r239647, llvm/trunk/test/CodeGen/R600/inline-asm.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/inline-asm.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/inline-asm.ll&p1=llvm/trunk/test/CodeGen/R600/inline-asm.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/inline-calls.ll (from r239647, llvm/trunk/test/CodeGen/R600/inline-calls.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/inline-calls.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/inline-calls.ll&p1=llvm/trunk/test/CodeGen/R600/inline-calls.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/input-mods.ll (from r239647, llvm/trunk/test/CodeGen/R600/input-mods.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/input-mods.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/input-mods.ll&p1=llvm/trunk/test/CodeGen/R600/input-mods.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/insert_subreg.ll (from r239647, llvm/trunk/test/CodeGen/R600/insert_subreg.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/insert_subreg.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/insert_subreg.ll&p1=llvm/trunk/test/CodeGen/R600/insert_subreg.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll (from r239647, llvm/trunk/test/CodeGen/R600/insert_vector_elt.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll&p1=llvm/trunk/test/CodeGen/R600/insert_vector_elt.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/jump-address.ll (from r239647, llvm/trunk/test/CodeGen/R600/jump-address.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/jump-address.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/jump-address.ll&p1=llvm/trunk/test/CodeGen/R600/jump-address.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/kcache-fold.ll (from r239647, llvm/trunk/test/CodeGen/R600/kcache-fold.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/kcache-fold.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/kcache-fold.ll&p1=llvm/trunk/test/CodeGen/R600/kcache-fold.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/kernel-args.ll (from r239647, llvm/trunk/test/CodeGen/R600/kernel-args.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/kernel-args.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/kernel-args.ll&p1=llvm/trunk/test/CodeGen/R600/kernel-args.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/large-alloca.ll (from r239647, llvm/trunk/test/CodeGen/R600/large-alloca.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/large-alloca.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/large-alloca.ll&p1=llvm/trunk/test/CodeGen/R600/large-alloca.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/large-constant-initializer.ll (from r239647, llvm/trunk/test/CodeGen/R600/large-constant-initializer.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/large-constant-initializer.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/large-constant-initializer.ll&p1=llvm/trunk/test/CodeGen/R600/large-constant-initializer.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/lds-initializer.ll (from r239647, llvm/trunk/test/CodeGen/R600/lds-initializer.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/lds-initializer.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/lds-initializer.ll&p1=llvm/trunk/test/CodeGen/R600/lds-initializer.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/lds-oqap-crash.ll (from r239647, llvm/trunk/test/CodeGen/R600/lds-oqap-crash.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/lds-oqap-crash.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/lds-oqap-crash.ll&p1=llvm/trunk/test/CodeGen/R600/lds-oqap-crash.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/lds-output-queue.ll (from r239647, llvm/trunk/test/CodeGen/R600/lds-output-queue.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/lds-output-queue.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/lds-output-queue.ll&p1=llvm/trunk/test/CodeGen/R600/lds-output-queue.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/lds-size.ll (from r239647, llvm/trunk/test/CodeGen/R600/lds-size.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/lds-size.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/lds-size.ll&p1=llvm/trunk/test/CodeGen/R600/lds-size.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/lds-zero-initializer.ll (from r239647, llvm/trunk/test/CodeGen/R600/lds-zero-initializer.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/lds-zero-initializer.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/lds-zero-initializer.ll&p1=llvm/trunk/test/CodeGen/R600/lds-zero-initializer.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/legalizedag-bug-expand-setcc.ll (from r239647, llvm/trunk/test/CodeGen/R600/legalizedag-bug-expand-setcc.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/legalizedag-bug-expand-setcc.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/legalizedag-bug-expand-setcc.ll&p1=llvm/trunk/test/CodeGen/R600/legalizedag-bug-expand-setcc.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Added: llvm/trunk/test/CodeGen/AMDGPU/lit.local.cfg
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/lit.local.cfg?rev=239657&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/lit.local.cfg (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/lit.local.cfg Fri Jun 12 22:28:10 2015
@@ -0,0 +1,2 @@
+if not 'AMDGPU' in config.root.targets:
+    config.unsupported = True

Copied: llvm/trunk/test/CodeGen/AMDGPU/literals.ll (from r239647, llvm/trunk/test/CodeGen/R600/literals.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/literals.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/literals.ll&p1=llvm/trunk/test/CodeGen/R600/literals.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.abs.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.abs.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.abs.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.abs.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.abs.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.barrier.global.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.barrier.global.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.barrier.global.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.barrier.local.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.barrier.local.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.barrier.local.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.u32.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.u32.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.u32.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfi.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfi.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfi.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfi.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfi.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfm.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfm.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfm.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfm.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfm.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.brev.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.brev.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.brev.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.brev.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.brev.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.clamp.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.clamp.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.clamp.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.clamp.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.clamp.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.class.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.class.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.class.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.class.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.class.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.cube.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cube.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.cube.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.cube.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cube.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.cvt_f32_ubyte.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.cvt_f32_ubyte.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.cvt_f32_ubyte.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.div_fixup.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.div_fixup.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.div_fixup.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.div_fmas.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.div_fmas.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.div_fmas.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.div_scale.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.div_scale.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.div_scale.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.flbit.i32.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.flbit.i32.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.flbit.i32.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.flbit.i32.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.flbit.i32.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.fract.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.fract.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.fract.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.fract.f64.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.fract.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.fract.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.fract.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.fract.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.fract.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.fract.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imad24.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imad24.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imad24.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imad24.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imad24.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imax.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imax.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imax.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imax.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imax.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imin.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imin.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imin.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imin.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imin.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imul24.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imul24.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imul24.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imul24.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imul24.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.kill.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.kill.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.kill.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.kill.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.kill.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.ldexp.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.ldexp.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.ldexp.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.ldexp.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.ldexp.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.legacy.rsq.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.legacy.rsq.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.legacy.rsq.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.mul.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.mul.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.mul.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.mul.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.mul.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rcp.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rcp.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rcp.f64.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rcp.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rcp.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rcp.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rcp.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rcp.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.clamped.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.clamped.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.clamped.f64.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.clamped.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.clamped.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.clamped.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rsq.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rsq.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.tex.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.tex.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.tex.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.tex.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.tex.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.trig_preop.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.trig_preop.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.trig_preop.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.trunc.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.trunc.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.trunc.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.trunc.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.trunc.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umad24.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umad24.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umad24.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umad24.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umad24.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umax.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umax.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umax.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umax.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umax.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umin.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umin.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umin.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umin.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umin.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umul24.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umul24.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umul24.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umul24.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umul24.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.fs.interp.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.fs.interp.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.fs.interp.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.fs.interp.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.fs.interp.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.gather4.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.gather4.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.gather4.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.gather4.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.gather4.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.getlod.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.getlod.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.getlod.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.getlod.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.getlod.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.image.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.image.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.image.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.image.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.image.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.image.sample.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.image.sample.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.image.sample.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.image.sample.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.image.sample.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.image.sample.o.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.image.sample.o.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.image.sample.o.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.image.sample.o.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.image.sample.o.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.imageload.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.imageload.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.imageload.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.imageload.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.imageload.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.load.dword.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.load.dword.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.resinfo.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.resinfo.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.resinfo.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.resinfo.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.resinfo.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sample-masked.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.sample-masked.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sample-masked.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sample-masked.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.sample-masked.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sample.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.sample.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sample.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sample.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.sample.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sampled.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.sampled.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sampled.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sampled.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.sampled.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sendmsg-m0.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.sendmsg-m0.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sendmsg-m0.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sendmsg-m0.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.sendmsg-m0.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sendmsg.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.sendmsg.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sendmsg.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sendmsg.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.sendmsg.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.tbuffer.store.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.tbuffer.store.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.tbuffer.store.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.tbuffer.store.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.tbuffer.store.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.tid.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.tid.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.tid.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.tid.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.SI.tid.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgpu.dp4.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.amdgpu.dp4.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgpu.dp4.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgpu.dp4.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.amdgpu.dp4.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgpu.kilp.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.amdgpu.kilp.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgpu.kilp.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgpu.kilp.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.amdgpu.kilp.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgpu.lrp.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.amdgpu.lrp.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgpu.lrp.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgpu.lrp.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.amdgpu.lrp.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.cos.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.cos.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.cos.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.cos.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.cos.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.exp2.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.exp2.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.exp2.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.exp2.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.exp2.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.log2.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.log2.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.log2.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.log2.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.log2.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.memcpy.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.memcpy.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.memcpy.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.memcpy.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.memcpy.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.pow.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.pow.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.pow.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.pow.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.pow.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.rint.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.rint.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.rint.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.rint.f64.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.rint.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.rint.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.rint.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.rint.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.rint.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.rint.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.round.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.round.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.round.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.round.f64.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.round.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.round.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.round.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.round.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.round.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.round.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.sin.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.sin.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.sin.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.sin.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.sin.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/llvm.sqrt.ll (from r239647, llvm/trunk/test/CodeGen/R600/llvm.sqrt.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.sqrt.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/llvm.sqrt.ll&p1=llvm/trunk/test/CodeGen/R600/llvm.sqrt.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/load-i1.ll (from r239647, llvm/trunk/test/CodeGen/R600/load-i1.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load-i1.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/load-i1.ll&p1=llvm/trunk/test/CodeGen/R600/load-i1.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/load-input-fold.ll (from r239647, llvm/trunk/test/CodeGen/R600/load-input-fold.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load-input-fold.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/load-input-fold.ll&p1=llvm/trunk/test/CodeGen/R600/load-input-fold.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/load.ll (from r239647, llvm/trunk/test/CodeGen/R600/load.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/load.ll&p1=llvm/trunk/test/CodeGen/R600/load.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/load.vec.ll (from r239647, llvm/trunk/test/CodeGen/R600/load.vec.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load.vec.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/load.vec.ll&p1=llvm/trunk/test/CodeGen/R600/load.vec.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/load64.ll (from r239647, llvm/trunk/test/CodeGen/R600/load64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/load64.ll&p1=llvm/trunk/test/CodeGen/R600/load64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/local-64.ll (from r239647, llvm/trunk/test/CodeGen/R600/local-64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/local-64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/local-64.ll&p1=llvm/trunk/test/CodeGen/R600/local-64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/local-atomics.ll (from r239647, llvm/trunk/test/CodeGen/R600/local-atomics.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/local-atomics.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/local-atomics.ll&p1=llvm/trunk/test/CodeGen/R600/local-atomics.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/local-atomics64.ll (from r239647, llvm/trunk/test/CodeGen/R600/local-atomics64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/local-atomics64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/local-atomics64.ll&p1=llvm/trunk/test/CodeGen/R600/local-atomics64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/local-memory-two-objects.ll (from r239647, llvm/trunk/test/CodeGen/R600/local-memory-two-objects.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/local-memory-two-objects.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/local-memory-two-objects.ll&p1=llvm/trunk/test/CodeGen/R600/local-memory-two-objects.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/local-memory.ll (from r239647, llvm/trunk/test/CodeGen/R600/local-memory.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/local-memory.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/local-memory.ll&p1=llvm/trunk/test/CodeGen/R600/local-memory.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/loop-address.ll (from r239647, llvm/trunk/test/CodeGen/R600/loop-address.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/loop-address.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/loop-address.ll&p1=llvm/trunk/test/CodeGen/R600/loop-address.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/loop-idiom.ll (from r239647, llvm/trunk/test/CodeGen/R600/loop-idiom.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/loop-idiom.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/loop-idiom.ll&p1=llvm/trunk/test/CodeGen/R600/loop-idiom.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/lshl.ll (from r239647, llvm/trunk/test/CodeGen/R600/lshl.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/lshl.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/lshl.ll&p1=llvm/trunk/test/CodeGen/R600/lshl.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/lshr.ll (from r239647, llvm/trunk/test/CodeGen/R600/lshr.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/lshr.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/lshr.ll&p1=llvm/trunk/test/CodeGen/R600/lshr.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/m0-spill.ll (from r239647, llvm/trunk/test/CodeGen/R600/m0-spill.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/m0-spill.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/m0-spill.ll&p1=llvm/trunk/test/CodeGen/R600/m0-spill.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/mad-combine.ll (from r239647, llvm/trunk/test/CodeGen/R600/mad-combine.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mad-combine.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/mad-combine.ll&p1=llvm/trunk/test/CodeGen/R600/mad-combine.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/mad-sub.ll (from r239647, llvm/trunk/test/CodeGen/R600/mad-sub.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mad-sub.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/mad-sub.ll&p1=llvm/trunk/test/CodeGen/R600/mad-sub.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/mad_int24.ll (from r239647, llvm/trunk/test/CodeGen/R600/mad_int24.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mad_int24.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/mad_int24.ll&p1=llvm/trunk/test/CodeGen/R600/mad_int24.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/mad_uint24.ll (from r239647, llvm/trunk/test/CodeGen/R600/mad_uint24.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mad_uint24.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/mad_uint24.ll&p1=llvm/trunk/test/CodeGen/R600/mad_uint24.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/madak.ll (from r239647, llvm/trunk/test/CodeGen/R600/madak.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/madak.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/madak.ll&p1=llvm/trunk/test/CodeGen/R600/madak.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/madmk.ll (from r239647, llvm/trunk/test/CodeGen/R600/madmk.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/madmk.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/madmk.ll&p1=llvm/trunk/test/CodeGen/R600/madmk.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/max-literals.ll (from r239647, llvm/trunk/test/CodeGen/R600/max-literals.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/max-literals.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/max-literals.ll&p1=llvm/trunk/test/CodeGen/R600/max-literals.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/max.ll (from r239647, llvm/trunk/test/CodeGen/R600/max.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/max.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/max.ll&p1=llvm/trunk/test/CodeGen/R600/max.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/max3.ll (from r239647, llvm/trunk/test/CodeGen/R600/max3.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/max3.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/max3.ll&p1=llvm/trunk/test/CodeGen/R600/max3.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/merge-stores.ll (from r239647, llvm/trunk/test/CodeGen/R600/merge-stores.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/merge-stores.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/merge-stores.ll&p1=llvm/trunk/test/CodeGen/R600/merge-stores.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/min.ll (from r239647, llvm/trunk/test/CodeGen/R600/min.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/min.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/min.ll&p1=llvm/trunk/test/CodeGen/R600/min.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/min3.ll (from r239647, llvm/trunk/test/CodeGen/R600/min3.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/min3.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/min3.ll&p1=llvm/trunk/test/CodeGen/R600/min3.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/missing-store.ll (from r239647, llvm/trunk/test/CodeGen/R600/missing-store.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/missing-store.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/missing-store.ll&p1=llvm/trunk/test/CodeGen/R600/missing-store.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/mubuf.ll (from r239647, llvm/trunk/test/CodeGen/R600/mubuf.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mubuf.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/mubuf.ll&p1=llvm/trunk/test/CodeGen/R600/mubuf.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/mul.ll (from r239647, llvm/trunk/test/CodeGen/R600/mul.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mul.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/mul.ll&p1=llvm/trunk/test/CodeGen/R600/mul.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/mul_int24.ll (from r239647, llvm/trunk/test/CodeGen/R600/mul_int24.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mul_int24.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/mul_int24.ll&p1=llvm/trunk/test/CodeGen/R600/mul_int24.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/mul_uint24.ll (from r239647, llvm/trunk/test/CodeGen/R600/mul_uint24.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mul_uint24.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/mul_uint24.ll&p1=llvm/trunk/test/CodeGen/R600/mul_uint24.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/mulhu.ll (from r239647, llvm/trunk/test/CodeGen/R600/mulhu.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mulhu.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/mulhu.ll&p1=llvm/trunk/test/CodeGen/R600/mulhu.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/no-initializer-constant-addrspace.ll (from r239647, llvm/trunk/test/CodeGen/R600/no-initializer-constant-addrspace.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/no-initializer-constant-addrspace.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/no-initializer-constant-addrspace.ll&p1=llvm/trunk/test/CodeGen/R600/no-initializer-constant-addrspace.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/no-shrink-extloads.ll (from r239647, llvm/trunk/test/CodeGen/R600/no-shrink-extloads.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/no-shrink-extloads.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/no-shrink-extloads.ll&p1=llvm/trunk/test/CodeGen/R600/no-shrink-extloads.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/operand-folding.ll (from r239647, llvm/trunk/test/CodeGen/R600/operand-folding.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/operand-folding.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/operand-folding.ll&p1=llvm/trunk/test/CodeGen/R600/operand-folding.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/operand-spacing.ll (from r239647, llvm/trunk/test/CodeGen/R600/operand-spacing.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/operand-spacing.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/operand-spacing.ll&p1=llvm/trunk/test/CodeGen/R600/operand-spacing.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/or.ll (from r239647, llvm/trunk/test/CodeGen/R600/or.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/or.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/or.ll&p1=llvm/trunk/test/CodeGen/R600/or.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/packetizer.ll (from r239647, llvm/trunk/test/CodeGen/R600/packetizer.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/packetizer.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/packetizer.ll&p1=llvm/trunk/test/CodeGen/R600/packetizer.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/parallelandifcollapse.ll (from r239647, llvm/trunk/test/CodeGen/R600/parallelandifcollapse.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/parallelandifcollapse.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/parallelandifcollapse.ll&p1=llvm/trunk/test/CodeGen/R600/parallelandifcollapse.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/parallelorifcollapse.ll (from r239647, llvm/trunk/test/CodeGen/R600/parallelorifcollapse.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/parallelorifcollapse.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/parallelorifcollapse.ll&p1=llvm/trunk/test/CodeGen/R600/parallelorifcollapse.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/predicate-dp4.ll (from r239647, llvm/trunk/test/CodeGen/R600/predicate-dp4.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/predicate-dp4.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/predicate-dp4.ll&p1=llvm/trunk/test/CodeGen/R600/predicate-dp4.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/predicates.ll (from r239647, llvm/trunk/test/CodeGen/R600/predicates.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/predicates.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/predicates.ll&p1=llvm/trunk/test/CodeGen/R600/predicates.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/private-memory-atomics.ll (from r239647, llvm/trunk/test/CodeGen/R600/private-memory-atomics.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/private-memory-atomics.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/private-memory-atomics.ll&p1=llvm/trunk/test/CodeGen/R600/private-memory-atomics.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/private-memory-broken.ll (from r239647, llvm/trunk/test/CodeGen/R600/private-memory-broken.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/private-memory-broken.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/private-memory-broken.ll&p1=llvm/trunk/test/CodeGen/R600/private-memory-broken.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/private-memory.ll (from r239647, llvm/trunk/test/CodeGen/R600/private-memory.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/private-memory.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/private-memory.ll&p1=llvm/trunk/test/CodeGen/R600/private-memory.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/pv-packing.ll (from r239647, llvm/trunk/test/CodeGen/R600/pv-packing.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/pv-packing.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/pv-packing.ll&p1=llvm/trunk/test/CodeGen/R600/pv-packing.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/pv.ll (from r239647, llvm/trunk/test/CodeGen/R600/pv.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/pv.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/pv.ll&p1=llvm/trunk/test/CodeGen/R600/pv.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/r600-encoding.ll (from r239647, llvm/trunk/test/CodeGen/R600/r600-encoding.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/r600-encoding.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/r600-encoding.ll&p1=llvm/trunk/test/CodeGen/R600/r600-encoding.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/r600-export-fix.ll (from r239647, llvm/trunk/test/CodeGen/R600/r600-export-fix.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/r600-export-fix.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/r600-export-fix.ll&p1=llvm/trunk/test/CodeGen/R600/r600-export-fix.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/r600-infinite-loop-bug-while-reorganizing-vector.ll (from r239647, llvm/trunk/test/CodeGen/R600/r600-infinite-loop-bug-while-reorganizing-vector.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/r600-infinite-loop-bug-while-reorganizing-vector.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/r600-infinite-loop-bug-while-reorganizing-vector.ll&p1=llvm/trunk/test/CodeGen/R600/r600-infinite-loop-bug-while-reorganizing-vector.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/r600cfg.ll (from r239647, llvm/trunk/test/CodeGen/R600/r600cfg.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/r600cfg.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/r600cfg.ll&p1=llvm/trunk/test/CodeGen/R600/r600cfg.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/reciprocal.ll (from r239647, llvm/trunk/test/CodeGen/R600/reciprocal.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/reciprocal.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/reciprocal.ll&p1=llvm/trunk/test/CodeGen/R600/reciprocal.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/register-count-comments.ll (from r239647, llvm/trunk/test/CodeGen/R600/register-count-comments.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/register-count-comments.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/register-count-comments.ll&p1=llvm/trunk/test/CodeGen/R600/register-count-comments.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/reorder-stores.ll (from r239647, llvm/trunk/test/CodeGen/R600/reorder-stores.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/reorder-stores.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/reorder-stores.ll&p1=llvm/trunk/test/CodeGen/R600/reorder-stores.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/rotl.i64.ll (from r239647, llvm/trunk/test/CodeGen/R600/rotl.i64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/rotl.i64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/rotl.i64.ll&p1=llvm/trunk/test/CodeGen/R600/rotl.i64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/rotl.ll (from r239647, llvm/trunk/test/CodeGen/R600/rotl.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/rotl.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/rotl.ll&p1=llvm/trunk/test/CodeGen/R600/rotl.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/rotr.i64.ll (from r239647, llvm/trunk/test/CodeGen/R600/rotr.i64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/rotr.i64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/rotr.i64.ll&p1=llvm/trunk/test/CodeGen/R600/rotr.i64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/rotr.ll (from r239647, llvm/trunk/test/CodeGen/R600/rotr.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/rotr.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/rotr.ll&p1=llvm/trunk/test/CodeGen/R600/rotr.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/rsq.ll (from r239647, llvm/trunk/test/CodeGen/R600/rsq.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/rsq.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/rsq.ll&p1=llvm/trunk/test/CodeGen/R600/rsq.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/rv7x0_count3.ll (from r239647, llvm/trunk/test/CodeGen/R600/rv7x0_count3.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/rv7x0_count3.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/rv7x0_count3.ll&p1=llvm/trunk/test/CodeGen/R600/rv7x0_count3.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/s_movk_i32.ll (from r239647, llvm/trunk/test/CodeGen/R600/s_movk_i32.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/s_movk_i32.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/s_movk_i32.ll&p1=llvm/trunk/test/CodeGen/R600/s_movk_i32.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/saddo.ll (from r239647, llvm/trunk/test/CodeGen/R600/saddo.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/saddo.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/saddo.ll&p1=llvm/trunk/test/CodeGen/R600/saddo.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/salu-to-valu.ll (from r239647, llvm/trunk/test/CodeGen/R600/salu-to-valu.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/salu-to-valu.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/salu-to-valu.ll&p1=llvm/trunk/test/CodeGen/R600/salu-to-valu.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/scalar_to_vector.ll (from r239647, llvm/trunk/test/CodeGen/R600/scalar_to_vector.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/scalar_to_vector.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/scalar_to_vector.ll&p1=llvm/trunk/test/CodeGen/R600/scalar_to_vector.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/schedule-fs-loop-nested-if.ll (from r239647, llvm/trunk/test/CodeGen/R600/schedule-fs-loop-nested-if.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/schedule-fs-loop-nested-if.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/schedule-fs-loop-nested-if.ll&p1=llvm/trunk/test/CodeGen/R600/schedule-fs-loop-nested-if.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/schedule-fs-loop-nested.ll (from r239647, llvm/trunk/test/CodeGen/R600/schedule-fs-loop-nested.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/schedule-fs-loop-nested.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/schedule-fs-loop-nested.ll&p1=llvm/trunk/test/CodeGen/R600/schedule-fs-loop-nested.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/schedule-fs-loop.ll (from r239647, llvm/trunk/test/CodeGen/R600/schedule-fs-loop.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/schedule-fs-loop.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/schedule-fs-loop.ll&p1=llvm/trunk/test/CodeGen/R600/schedule-fs-loop.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/schedule-global-loads.ll (from r239647, llvm/trunk/test/CodeGen/R600/schedule-global-loads.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/schedule-global-loads.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/schedule-global-loads.ll&p1=llvm/trunk/test/CodeGen/R600/schedule-global-loads.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/schedule-if-2.ll (from r239647, llvm/trunk/test/CodeGen/R600/schedule-if-2.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/schedule-if-2.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/schedule-if-2.ll&p1=llvm/trunk/test/CodeGen/R600/schedule-if-2.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/schedule-if.ll (from r239647, llvm/trunk/test/CodeGen/R600/schedule-if.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/schedule-if.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/schedule-if.ll&p1=llvm/trunk/test/CodeGen/R600/schedule-if.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll (from r239647, llvm/trunk/test/CodeGen/R600/schedule-kernel-arg-loads.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll&p1=llvm/trunk/test/CodeGen/R600/schedule-kernel-arg-loads.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/schedule-vs-if-nested-loop-failure.ll (from r239647, llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/schedule-vs-if-nested-loop-failure.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/schedule-vs-if-nested-loop-failure.ll&p1=llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/schedule-vs-if-nested-loop.ll (from r239647, llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/schedule-vs-if-nested-loop.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/schedule-vs-if-nested-loop.ll&p1=llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/scratch-buffer.ll (from r239647, llvm/trunk/test/CodeGen/R600/scratch-buffer.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/scratch-buffer.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/scratch-buffer.ll&p1=llvm/trunk/test/CodeGen/R600/scratch-buffer.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/sdiv.ll (from r239647, llvm/trunk/test/CodeGen/R600/sdiv.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sdiv.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/sdiv.ll&p1=llvm/trunk/test/CodeGen/R600/sdiv.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/sdivrem24.ll (from r239647, llvm/trunk/test/CodeGen/R600/sdivrem24.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sdivrem24.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/sdivrem24.ll&p1=llvm/trunk/test/CodeGen/R600/sdivrem24.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/sdivrem64.ll (from r239647, llvm/trunk/test/CodeGen/R600/sdivrem64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sdivrem64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/sdivrem64.ll&p1=llvm/trunk/test/CodeGen/R600/sdivrem64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/select-i1.ll (from r239647, llvm/trunk/test/CodeGen/R600/select-i1.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/select-i1.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/select-i1.ll&p1=llvm/trunk/test/CodeGen/R600/select-i1.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/select-vectors.ll (from r239647, llvm/trunk/test/CodeGen/R600/select-vectors.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/select-vectors.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/select-vectors.ll&p1=llvm/trunk/test/CodeGen/R600/select-vectors.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/select.ll (from r239647, llvm/trunk/test/CodeGen/R600/select.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/select.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/select.ll&p1=llvm/trunk/test/CodeGen/R600/select.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/select64.ll (from r239647, llvm/trunk/test/CodeGen/R600/select64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/select64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/select64.ll&p1=llvm/trunk/test/CodeGen/R600/select64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/selectcc-cnd.ll (from r239647, llvm/trunk/test/CodeGen/R600/selectcc-cnd.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/selectcc-cnd.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/selectcc-cnd.ll&p1=llvm/trunk/test/CodeGen/R600/selectcc-cnd.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/selectcc-cnde-int.ll (from r239647, llvm/trunk/test/CodeGen/R600/selectcc-cnde-int.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/selectcc-cnde-int.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/selectcc-cnde-int.ll&p1=llvm/trunk/test/CodeGen/R600/selectcc-cnde-int.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/selectcc-icmp-select-float.ll (from r239647, llvm/trunk/test/CodeGen/R600/selectcc-icmp-select-float.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/selectcc-icmp-select-float.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/selectcc-icmp-select-float.ll&p1=llvm/trunk/test/CodeGen/R600/selectcc-icmp-select-float.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/selectcc-opt.ll (from r239647, llvm/trunk/test/CodeGen/R600/selectcc-opt.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/selectcc-opt.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/selectcc-opt.ll&p1=llvm/trunk/test/CodeGen/R600/selectcc-opt.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/selectcc.ll (from r239647, llvm/trunk/test/CodeGen/R600/selectcc.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/selectcc.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/selectcc.ll&p1=llvm/trunk/test/CodeGen/R600/selectcc.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/set-dx10.ll (from r239647, llvm/trunk/test/CodeGen/R600/set-dx10.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/set-dx10.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/set-dx10.ll&p1=llvm/trunk/test/CodeGen/R600/set-dx10.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/setcc-equivalent.ll (from r239647, llvm/trunk/test/CodeGen/R600/setcc-equivalent.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/setcc-equivalent.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/setcc-equivalent.ll&p1=llvm/trunk/test/CodeGen/R600/setcc-equivalent.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/setcc-opt.ll (from r239647, llvm/trunk/test/CodeGen/R600/setcc-opt.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/setcc-opt.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/setcc-opt.ll&p1=llvm/trunk/test/CodeGen/R600/setcc-opt.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/setcc.ll (from r239647, llvm/trunk/test/CodeGen/R600/setcc.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/setcc.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/setcc.ll&p1=llvm/trunk/test/CodeGen/R600/setcc.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/setcc64.ll (from r239647, llvm/trunk/test/CodeGen/R600/setcc64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/setcc64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/setcc64.ll&p1=llvm/trunk/test/CodeGen/R600/setcc64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/seto.ll (from r239647, llvm/trunk/test/CodeGen/R600/seto.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/seto.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/seto.ll&p1=llvm/trunk/test/CodeGen/R600/seto.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/setuo.ll (from r239647, llvm/trunk/test/CodeGen/R600/setuo.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/setuo.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/setuo.ll&p1=llvm/trunk/test/CodeGen/R600/setuo.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/sext-eliminate.ll (from r239647, llvm/trunk/test/CodeGen/R600/sext-eliminate.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sext-eliminate.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/sext-eliminate.ll&p1=llvm/trunk/test/CodeGen/R600/sext-eliminate.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/sext-in-reg.ll (from r239647, llvm/trunk/test/CodeGen/R600/sext-in-reg.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sext-in-reg.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/sext-in-reg.ll&p1=llvm/trunk/test/CodeGen/R600/sext-in-reg.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/sgpr-control-flow.ll (from r239647, llvm/trunk/test/CodeGen/R600/sgpr-control-flow.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sgpr-control-flow.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/sgpr-control-flow.ll&p1=llvm/trunk/test/CodeGen/R600/sgpr-control-flow.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/sgpr-copy-duplicate-operand.ll (from r239647, llvm/trunk/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sgpr-copy-duplicate-operand.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/sgpr-copy-duplicate-operand.ll&p1=llvm/trunk/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/sgpr-copy.ll (from r239647, llvm/trunk/test/CodeGen/R600/sgpr-copy.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sgpr-copy.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/sgpr-copy.ll&p1=llvm/trunk/test/CodeGen/R600/sgpr-copy.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/shared-op-cycle.ll (from r239647, llvm/trunk/test/CodeGen/R600/shared-op-cycle.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/shared-op-cycle.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/shared-op-cycle.ll&p1=llvm/trunk/test/CodeGen/R600/shared-op-cycle.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/shl.ll (from r239647, llvm/trunk/test/CodeGen/R600/shl.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/shl.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/shl.ll&p1=llvm/trunk/test/CodeGen/R600/shl.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/shl_add_constant.ll (from r239647, llvm/trunk/test/CodeGen/R600/shl_add_constant.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/shl_add_constant.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/shl_add_constant.ll&p1=llvm/trunk/test/CodeGen/R600/shl_add_constant.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/shl_add_ptr.ll (from r239647, llvm/trunk/test/CodeGen/R600/shl_add_ptr.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/shl_add_ptr.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/shl_add_ptr.ll&p1=llvm/trunk/test/CodeGen/R600/shl_add_ptr.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/si-annotate-cf-assertion.ll (from r239647, llvm/trunk/test/CodeGen/R600/si-annotate-cf-assertion.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/si-annotate-cf-assertion.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/si-annotate-cf-assertion.ll&p1=llvm/trunk/test/CodeGen/R600/si-annotate-cf-assertion.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/si-annotate-cf.ll (from r239647, llvm/trunk/test/CodeGen/R600/si-annotate-cf.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/si-annotate-cf.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/si-annotate-cf.ll&p1=llvm/trunk/test/CodeGen/R600/si-annotate-cf.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/si-lod-bias.ll (from r239647, llvm/trunk/test/CodeGen/R600/si-lod-bias.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/si-lod-bias.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/si-lod-bias.ll&p1=llvm/trunk/test/CodeGen/R600/si-lod-bias.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/si-sgpr-spill.ll (from r239647, llvm/trunk/test/CodeGen/R600/si-sgpr-spill.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/si-sgpr-spill.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/si-sgpr-spill.ll&p1=llvm/trunk/test/CodeGen/R600/si-sgpr-spill.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/si-spill-cf.ll (from r239647, llvm/trunk/test/CodeGen/R600/si-spill-cf.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/si-spill-cf.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/si-spill-cf.ll&p1=llvm/trunk/test/CodeGen/R600/si-spill-cf.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll (from r239647, llvm/trunk/test/CodeGen/R600/si-triv-disjoint-mem-access.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll&p1=llvm/trunk/test/CodeGen/R600/si-triv-disjoint-mem-access.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/si-vector-hang.ll (from r239647, llvm/trunk/test/CodeGen/R600/si-vector-hang.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/si-vector-hang.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/si-vector-hang.ll&p1=llvm/trunk/test/CodeGen/R600/si-vector-hang.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/sign_extend.ll (from r239647, llvm/trunk/test/CodeGen/R600/sign_extend.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sign_extend.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/sign_extend.ll&p1=llvm/trunk/test/CodeGen/R600/sign_extend.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/simplify-demanded-bits-build-pair.ll (from r239647, llvm/trunk/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/simplify-demanded-bits-build-pair.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/simplify-demanded-bits-build-pair.ll&p1=llvm/trunk/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/sint_to_fp.f64.ll (from r239647, llvm/trunk/test/CodeGen/R600/sint_to_fp.f64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sint_to_fp.f64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/sint_to_fp.f64.ll&p1=llvm/trunk/test/CodeGen/R600/sint_to_fp.f64.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/test/CodeGen/AMDGPU/sint_to_fp.ll (from r239647, llvm/trunk/test/CodeGen/R600/sint_to_fp.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sint_to_fp.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/sint_to_fp.ll&p1=llvm/trunk/test/CodeGen/R600/sint_to_fp.ll&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)





More information about the llvm-commits mailing list