[llvm-commits] [PATCH 1/4] AMDGPU: Add core backend files for R600/SI codegen v4

Tom Stellard tstellar at gmail.com
Tue Jun 19 13:18:50 PDT 2012


---
 lib/Target/AMDGPU/AMDGPU.h                         |   35 +
 lib/Target/AMDGPU/AMDGPU.td                        |   21 +
 lib/Target/AMDGPU/AMDGPUConvertToISA.cpp           |   67 +
 lib/Target/AMDGPU/AMDGPUISelLowering.cpp           |  395 ++
 lib/Target/AMDGPU/AMDGPUISelLowering.h             |   76 +
 lib/Target/AMDGPU/AMDGPUInstrInfo.cpp              |   54 +
 lib/Target/AMDGPU/AMDGPUInstrInfo.h                |   48 +
 lib/Target/AMDGPU/AMDGPUInstrInfo.td               |   55 +
 lib/Target/AMDGPU/AMDGPUInstructions.td            |  122 +
 lib/Target/AMDGPU/AMDGPUIntrinsics.td              |   64 +
 lib/Target/AMDGPU/AMDGPURegisterInfo.cpp           |   24 +
 lib/Target/AMDGPU/AMDGPURegisterInfo.h             |   42 +
 lib/Target/AMDGPU/AMDGPURegisterInfo.td            |   22 +
 lib/Target/AMDGPU/AMDGPUTargetMachine.cpp          |  159 +
 lib/Target/AMDGPU/AMDGPUTargetMachine.h            |   70 +
 lib/Target/AMDGPU/AMDGPUUtil.cpp                   |  139 +
 lib/Target/AMDGPU/AMDGPUUtil.h                     |   46 +
 lib/Target/AMDGPU/AMDIL.h                          |  251 +
 lib/Target/AMDGPU/AMDIL7XXDevice.cpp               |  128 +
 lib/Target/AMDGPU/AMDIL7XXDevice.h                 |   71 +
 lib/Target/AMDGPU/AMDILAlgorithms.tpp              |   93 +
 lib/Target/AMDGPU/AMDILBase.td                     |  109 +
 lib/Target/AMDGPU/AMDILCFGStructurizer.cpp         | 3236 ++++++++++++
 lib/Target/AMDGPU/AMDILCallingConv.td              |   42 +
 lib/Target/AMDGPU/AMDILCodeEmitter.h               |   48 +
 lib/Target/AMDGPU/AMDILDevice.cpp                  |  137 +
 lib/Target/AMDGPU/AMDILDevice.h                    |  116 +
 lib/Target/AMDGPU/AMDILDeviceInfo.cpp              |   93 +
 lib/Target/AMDGPU/AMDILDeviceInfo.h                |   89 +
 lib/Target/AMDGPU/AMDILDevices.h                   |   19 +
 lib/Target/AMDGPU/AMDILEnumeratedTypes.td          |  522 ++
 lib/Target/AMDGPU/AMDILEvergreenDevice.cpp         |  183 +
 lib/Target/AMDGPU/AMDILEvergreenDevice.h           |   87 +
 lib/Target/AMDGPU/AMDILFormats.td                  |  174 +
 lib/Target/AMDGPU/AMDILFrameLowering.cpp           |   53 +
 lib/Target/AMDGPU/AMDILFrameLowering.h             |   46 +
 lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp            |  393 ++
 lib/Target/AMDGPU/AMDILISelLowering.cpp            | 1851 +++++++
 lib/Target/AMDGPU/AMDILISelLowering.h              |  203 +
 lib/Target/AMDGPU/AMDILInstrInfo.cpp               |  509 ++
 lib/Target/AMDGPU/AMDILInstrInfo.h                 |  161 +
 lib/Target/AMDGPU/AMDILInstrInfo.td                |  108 +
 lib/Target/AMDGPU/AMDILInstructions.td             |  143 +
 lib/Target/AMDGPU/AMDILIntrinsicInfo.cpp           |  171 +
 lib/Target/AMDGPU/AMDILIntrinsicInfo.h             |   49 +
 lib/Target/AMDGPU/AMDILIntrinsics.td               |  705 +++
 lib/Target/AMDGPU/AMDILMultiClass.td               |   95 +
 lib/Target/AMDGPU/AMDILNIDevice.cpp                |   71 +
 lib/Target/AMDGPU/AMDILNIDevice.h                  |   59 +
 lib/Target/AMDGPU/AMDILNodes.td                    |   47 +
 lib/Target/AMDGPU/AMDILOperands.td                 |   32 +
 lib/Target/AMDGPU/AMDILPatterns.td                 |  504 ++
 lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp       | 1258 +++++
 lib/Target/AMDGPU/AMDILProfiles.td                 |  174 +
 lib/Target/AMDGPU/AMDILRegisterInfo.cpp            |  162 +
 lib/Target/AMDGPU/AMDILRegisterInfo.h              |   95 +
 lib/Target/AMDGPU/AMDILRegisterInfo.td             |  110 +
 lib/Target/AMDGPU/AMDILSIDevice.cpp                |   49 +
 lib/Target/AMDGPU/AMDILSIDevice.h                  |   45 +
 lib/Target/AMDGPU/AMDILSubtarget.cpp               |  178 +
 lib/Target/AMDGPU/AMDILSubtarget.h                 |   77 +
 lib/Target/AMDGPU/AMDILTokenDesc.td                |  120 +
 lib/Target/AMDGPU/AMDILUtilityFunctions.h          |   75 +
 lib/Target/AMDGPU/AMDILVersion.td                  |   58 +
 lib/Target/AMDGPU/CMakeLists.txt                   |   51 +
 lib/Target/AMDGPU/LLVMBuild.txt                    |   32 +
 lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp |  105 +
 lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h   |   30 +
 .../AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp     |   61 +
 .../AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h       |   35 +
 lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt      |    7 +
 lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt       |   23 +
 lib/Target/AMDGPU/MCTargetDesc/Makefile            |   16 +
 lib/Target/AMDGPU/Makefile                         |   23 +
 lib/Target/AMDGPU/Processors.td                    |   28 +
 lib/Target/AMDGPU/R600CodeEmitter.cpp              |  660 +++
 lib/Target/AMDGPU/R600GenRegisterInfo.pl           |  189 +
 lib/Target/AMDGPU/R600HwRegInfo.include            | 1848 +++++++
 lib/Target/AMDGPU/R600ISelLowering.cpp             |  258 +
 lib/Target/AMDGPU/R600ISelLowering.h               |   44 +
 lib/Target/AMDGPU/R600InstrInfo.cpp                |   94 +
 lib/Target/AMDGPU/R600InstrInfo.h                  |   71 +
 lib/Target/AMDGPU/R600Instructions.td              | 1159 +++++
 lib/Target/AMDGPU/R600Intrinsics.td                |   16 +
 lib/Target/AMDGPU/R600KernelParameters.cpp         |  546 ++
 lib/Target/AMDGPU/R600MachineFunctionInfo.cpp      |   16 +
 lib/Target/AMDGPU/R600MachineFunctionInfo.h        |   33 +
 lib/Target/AMDGPU/R600RegisterInfo.cpp             |   88 +
 lib/Target/AMDGPU/R600RegisterInfo.h               |   54 +
 lib/Target/AMDGPU/R600RegisterInfo.td              | 5271 ++++++++++++++++++++
 lib/Target/AMDGPU/R600Schedule.td                  |   36 +
 lib/Target/AMDGPU/SIAssignInterpRegs.cpp           |  117 +
 lib/Target/AMDGPU/SICodeEmitter.cpp                |  321 ++
 lib/Target/AMDGPU/SIGenRegisterInfo.pl             |  270 +
 lib/Target/AMDGPU/SIISelLowering.cpp               |  195 +
 lib/Target/AMDGPU/SIISelLowering.h                 |   48 +
 lib/Target/AMDGPU/SIInstrFormats.td                |  128 +
 lib/Target/AMDGPU/SIInstrInfo.cpp                  |  104 +
 lib/Target/AMDGPU/SIInstrInfo.h                    |   90 +
 lib/Target/AMDGPU/SIInstrInfo.td                   |  477 ++
 lib/Target/AMDGPU/SIInstructions.td                |  964 ++++
 lib/Target/AMDGPU/SIIntrinsics.td                  |   35 +
 lib/Target/AMDGPU/SIMachineFunctionInfo.cpp        |   18 +
 lib/Target/AMDGPU/SIMachineFunctionInfo.h          |   37 +
 lib/Target/AMDGPU/SIRegisterInfo.cpp               |   51 +
 lib/Target/AMDGPU/SIRegisterInfo.h                 |   47 +
 lib/Target/AMDGPU/SIRegisterInfo.td                |  886 ++++
 lib/Target/AMDGPU/SISchedule.td                    |   15 +
 lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp  |   26 +
 lib/Target/AMDGPU/TargetInfo/CMakeLists.txt        |    7 +
 lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt         |   23 +
 lib/Target/AMDGPU/TargetInfo/Makefile              |   15 +
 lib/Target/AMDIL/CMakeLists.txt                    |   61 +
 lib/Target/AMDIL/LLVMBuild.txt                     |   32 +
 114 files changed, 29199 insertions(+), 0 deletions(-)
 create mode 100644 lib/Target/AMDGPU/AMDGPU.h
 create mode 100644 lib/Target/AMDGPU/AMDGPU.td
 create mode 100644 lib/Target/AMDGPU/AMDGPUConvertToISA.cpp
 create mode 100644 lib/Target/AMDGPU/AMDGPUISelLowering.cpp
 create mode 100644 lib/Target/AMDGPU/AMDGPUISelLowering.h
 create mode 100644 lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
 create mode 100644 lib/Target/AMDGPU/AMDGPUInstrInfo.h
 create mode 100644 lib/Target/AMDGPU/AMDGPUInstrInfo.td
 create mode 100644 lib/Target/AMDGPU/AMDGPUInstructions.td
 create mode 100644 lib/Target/AMDGPU/AMDGPUIntrinsics.td
 create mode 100644 lib/Target/AMDGPU/AMDGPURegisterInfo.cpp
 create mode 100644 lib/Target/AMDGPU/AMDGPURegisterInfo.h
 create mode 100644 lib/Target/AMDGPU/AMDGPURegisterInfo.td
 create mode 100644 lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
 create mode 100644 lib/Target/AMDGPU/AMDGPUTargetMachine.h
 create mode 100644 lib/Target/AMDGPU/AMDGPUUtil.cpp
 create mode 100644 lib/Target/AMDGPU/AMDGPUUtil.h
 create mode 100644 lib/Target/AMDGPU/AMDIL.h
 create mode 100644 lib/Target/AMDGPU/AMDIL7XXDevice.cpp
 create mode 100644 lib/Target/AMDGPU/AMDIL7XXDevice.h
 create mode 100644 lib/Target/AMDGPU/AMDILAlgorithms.tpp
 create mode 100644 lib/Target/AMDGPU/AMDILBase.td
 create mode 100644 lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
 create mode 100644 lib/Target/AMDGPU/AMDILCallingConv.td
 create mode 100644 lib/Target/AMDGPU/AMDILCodeEmitter.h
 create mode 100644 lib/Target/AMDGPU/AMDILDevice.cpp
 create mode 100644 lib/Target/AMDGPU/AMDILDevice.h
 create mode 100644 lib/Target/AMDGPU/AMDILDeviceInfo.cpp
 create mode 100644 lib/Target/AMDGPU/AMDILDeviceInfo.h
 create mode 100644 lib/Target/AMDGPU/AMDILDevices.h
 create mode 100644 lib/Target/AMDGPU/AMDILEnumeratedTypes.td
 create mode 100644 lib/Target/AMDGPU/AMDILEvergreenDevice.cpp
 create mode 100644 lib/Target/AMDGPU/AMDILEvergreenDevice.h
 create mode 100644 lib/Target/AMDGPU/AMDILFormats.td
 create mode 100644 lib/Target/AMDGPU/AMDILFrameLowering.cpp
 create mode 100644 lib/Target/AMDGPU/AMDILFrameLowering.h
 create mode 100644 lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp
 create mode 100644 lib/Target/AMDGPU/AMDILISelLowering.cpp
 create mode 100644 lib/Target/AMDGPU/AMDILISelLowering.h
 create mode 100644 lib/Target/AMDGPU/AMDILInstrInfo.cpp
 create mode 100644 lib/Target/AMDGPU/AMDILInstrInfo.h
 create mode 100644 lib/Target/AMDGPU/AMDILInstrInfo.td
 create mode 100644 lib/Target/AMDGPU/AMDILInstructions.td
 create mode 100644 lib/Target/AMDGPU/AMDILIntrinsicInfo.cpp
 create mode 100644 lib/Target/AMDGPU/AMDILIntrinsicInfo.h
 create mode 100644 lib/Target/AMDGPU/AMDILIntrinsics.td
 create mode 100644 lib/Target/AMDGPU/AMDILMultiClass.td
 create mode 100644 lib/Target/AMDGPU/AMDILNIDevice.cpp
 create mode 100644 lib/Target/AMDGPU/AMDILNIDevice.h
 create mode 100644 lib/Target/AMDGPU/AMDILNodes.td
 create mode 100644 lib/Target/AMDGPU/AMDILOperands.td
 create mode 100644 lib/Target/AMDGPU/AMDILPatterns.td
 create mode 100644 lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp
 create mode 100644 lib/Target/AMDGPU/AMDILProfiles.td
 create mode 100644 lib/Target/AMDGPU/AMDILRegisterInfo.cpp
 create mode 100644 lib/Target/AMDGPU/AMDILRegisterInfo.h
 create mode 100644 lib/Target/AMDGPU/AMDILRegisterInfo.td
 create mode 100644 lib/Target/AMDGPU/AMDILSIDevice.cpp
 create mode 100644 lib/Target/AMDGPU/AMDILSIDevice.h
 create mode 100644 lib/Target/AMDGPU/AMDILSubtarget.cpp
 create mode 100644 lib/Target/AMDGPU/AMDILSubtarget.h
 create mode 100644 lib/Target/AMDGPU/AMDILTokenDesc.td
 create mode 100644 lib/Target/AMDGPU/AMDILUtilityFunctions.h
 create mode 100644 lib/Target/AMDGPU/AMDILVersion.td
 create mode 100644 lib/Target/AMDGPU/CMakeLists.txt
 create mode 100644 lib/Target/AMDGPU/LLVMBuild.txt
 create mode 100644 lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
 create mode 100644 lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h
 create mode 100644 lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
 create mode 100644 lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h
 create mode 100644 lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt
 create mode 100644 lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt
 create mode 100644 lib/Target/AMDGPU/MCTargetDesc/Makefile
 create mode 100644 lib/Target/AMDGPU/Makefile
 create mode 100644 lib/Target/AMDGPU/Processors.td
 create mode 100644 lib/Target/AMDGPU/R600CodeEmitter.cpp
 create mode 100644 lib/Target/AMDGPU/R600GenRegisterInfo.pl
 create mode 100644 lib/Target/AMDGPU/R600HwRegInfo.include
 create mode 100644 lib/Target/AMDGPU/R600ISelLowering.cpp
 create mode 100644 lib/Target/AMDGPU/R600ISelLowering.h
 create mode 100644 lib/Target/AMDGPU/R600InstrInfo.cpp
 create mode 100644 lib/Target/AMDGPU/R600InstrInfo.h
 create mode 100644 lib/Target/AMDGPU/R600Instructions.td
 create mode 100644 lib/Target/AMDGPU/R600Intrinsics.td
 create mode 100644 lib/Target/AMDGPU/R600KernelParameters.cpp
 create mode 100644 lib/Target/AMDGPU/R600MachineFunctionInfo.cpp
 create mode 100644 lib/Target/AMDGPU/R600MachineFunctionInfo.h
 create mode 100644 lib/Target/AMDGPU/R600RegisterInfo.cpp
 create mode 100644 lib/Target/AMDGPU/R600RegisterInfo.h
 create mode 100644 lib/Target/AMDGPU/R600RegisterInfo.td
 create mode 100644 lib/Target/AMDGPU/R600Schedule.td
 create mode 100644 lib/Target/AMDGPU/SIAssignInterpRegs.cpp
 create mode 100644 lib/Target/AMDGPU/SICodeEmitter.cpp
 create mode 100644 lib/Target/AMDGPU/SIGenRegisterInfo.pl
 create mode 100644 lib/Target/AMDGPU/SIISelLowering.cpp
 create mode 100644 lib/Target/AMDGPU/SIISelLowering.h
 create mode 100644 lib/Target/AMDGPU/SIInstrFormats.td
 create mode 100644 lib/Target/AMDGPU/SIInstrInfo.cpp
 create mode 100644 lib/Target/AMDGPU/SIInstrInfo.h
 create mode 100644 lib/Target/AMDGPU/SIInstrInfo.td
 create mode 100644 lib/Target/AMDGPU/SIInstructions.td
 create mode 100644 lib/Target/AMDGPU/SIIntrinsics.td
 create mode 100644 lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
 create mode 100644 lib/Target/AMDGPU/SIMachineFunctionInfo.h
 create mode 100644 lib/Target/AMDGPU/SIRegisterInfo.cpp
 create mode 100644 lib/Target/AMDGPU/SIRegisterInfo.h
 create mode 100644 lib/Target/AMDGPU/SIRegisterInfo.td
 create mode 100644 lib/Target/AMDGPU/SISchedule.td
 create mode 100644 lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp
 create mode 100644 lib/Target/AMDGPU/TargetInfo/CMakeLists.txt
 create mode 100644 lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt
 create mode 100644 lib/Target/AMDGPU/TargetInfo/Makefile
 create mode 100644 lib/Target/AMDIL/CMakeLists.txt
 create mode 100644 lib/Target/AMDIL/LLVMBuild.txt

diff --git a/lib/Target/AMDGPU/AMDGPU.h b/lib/Target/AMDGPU/AMDGPU.h
new file mode 100644
index 0000000..191f495
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPU.h
@@ -0,0 +1,35 @@
+//===-- AMDGPU.h - MachineFunction passes hw codegen --------------*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDGPU_H
+#define AMDGPU_H
+
+#include "AMDGPUTargetMachine.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+
+class FunctionPass;
+class AMDGPUTargetMachine;
+
+// R600 Passes
+FunctionPass* createR600KernelParametersPass(const TargetData* TD);
+FunctionPass *createR600CodeEmitterPass(formatted_raw_ostream &OS);
+
+// SI Passes
+FunctionPass *createSIAssignInterpRegsPass(TargetMachine &tm);
+FunctionPass *createSICodeEmitterPass(formatted_raw_ostream &OS);
+
+// Passes common to R600 and SI
+FunctionPass *createAMDGPUConvertToISAPass(TargetMachine &tm);
+
+} // End namespace llvm
+
+#endif // AMDGPU_H
diff --git a/lib/Target/AMDGPU/AMDGPU.td b/lib/Target/AMDGPU/AMDGPU.td
new file mode 100644
index 0000000..1bb5fb9
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPU.td
@@ -0,0 +1,21 @@
+//===-- AMDIL.td - AMDIL Tablegen files --*- tablegen -*-------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+
+// Include AMDIL TD files
+include "AMDILBase.td"
+include "AMDILVersion.td"
+
+// Include AMDGPU TD files
+include "R600Schedule.td"
+include "SISchedule.td"
+include "Processors.td"
+include "AMDGPUInstrInfo.td"
+include "AMDGPUIntrinsics.td"
+include "AMDGPURegisterInfo.td"
+include "AMDGPUInstructions.td"
diff --git a/lib/Target/AMDGPU/AMDGPUConvertToISA.cpp b/lib/Target/AMDGPU/AMDGPUConvertToISA.cpp
new file mode 100644
index 0000000..b876a66
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUConvertToISA.cpp
@@ -0,0 +1,67 @@
+//===-- AMDGPUConvertToISA.cpp - Lower AMDIL to HW ISA --------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass lowers AMDIL machine instructions to the appropriate hardware
+// instructions. 
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "AMDGPUInstrInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+using namespace llvm;
+
+namespace {
+
+class AMDGPUConvertToISAPass : public MachineFunctionPass {
+
+private:
+  static char ID;
+  TargetMachine &TM;
+
+public:
+  AMDGPUConvertToISAPass(TargetMachine &tm) :
+    MachineFunctionPass(ID), TM(tm) { }
+
+  virtual bool runOnMachineFunction(MachineFunction &MF);
+
+  virtual const char *getPassName() const {return "AMDGPU Convert to ISA";}
+
+};
+
+} // End anonymous namespace
+
+char AMDGPUConvertToISAPass::ID = 0;
+
+FunctionPass *llvm::createAMDGPUConvertToISAPass(TargetMachine &tm) {
+  return new AMDGPUConvertToISAPass(tm);
+}
+
+bool AMDGPUConvertToISAPass::runOnMachineFunction(MachineFunction &MF)
+{
+  const AMDGPUInstrInfo * TII =
+                      static_cast<const AMDGPUInstrInfo*>(TM.getInstrInfo());
+
+  for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
+                                                  BB != BB_E; ++BB) {
+    MachineBasicBlock &MBB = *BB;
+    for (MachineBasicBlock::iterator I = MBB.begin(), Next = llvm::next(I);
+         I != MBB.end(); I = Next, Next = llvm::next(I) ) {
+      MachineInstr &MI = *I;
+      MachineInstr * newInstr = TII->convertToISA(MI, MF, MBB.findDebugLoc(I));
+      if (!newInstr) {
+        continue;
+      }
+      MBB.insert(I, newInstr);
+      MI.eraseFromParent();
+    }
+  }
+  return false;
+}
diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
new file mode 100644
index 0000000..9d076bd
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -0,0 +1,395 @@
+//===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the parent TargetLowering class for hardware code gen targets.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPUISelLowering.h"
+#include "AMDILIntrinsicInfo.h"
+#include "AMDGPUUtil.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
+  AMDILTargetLowering(TM)
+{
+  // We need to custom lower some of the intrinsics
+  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+
+  setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
+  setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
+
+  // Library functions.  These default to Expand, but we have instructions
+  // for them.
+  setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
+  setOperationAction(ISD::FEXP2,  MVT::f32, Legal);
+  setOperationAction(ISD::FRINT,  MVT::f32, Legal);
+
+  setOperationAction(ISD::UDIV, MVT::i32, Custom);
+  setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
+}
+
+SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
+    const
+{
+  switch (Op.getOpcode()) {
+  default: return AMDILTargetLowering::LowerOperation(Op, DAG);
+  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+  case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
+  case ISD::UDIV:
+    return DAG.getNode(ISD::UDIVREM, Op.getDebugLoc(), Op.getValueType(),
+                       Op.getOperand(0), Op.getOperand(1)).getValue(0);
+  case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
+  }
+}
+
+SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
+    SelectionDAG &DAG) const
+{
+  unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+  DebugLoc DL = Op.getDebugLoc();
+  EVT VT = Op.getValueType();
+
+  switch (IntrinsicID) {
+    default: return Op;
+    case AMDGPUIntrinsic::AMDIL_abs:
+      return LowerIntrinsicIABS(Op, DAG);
+    case AMDGPUIntrinsic::AMDIL_exp:
+      return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
+    case AMDGPUIntrinsic::AMDIL_fabs:
+      return DAG.getNode(ISD::FABS, DL, VT, Op.getOperand(1));
+    case AMDGPUIntrinsic::AMDGPU_lrp:
+      return LowerIntrinsicLRP(Op, DAG);
+    case AMDGPUIntrinsic::AMDIL_fraction:
+      return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
+    case AMDGPUIntrinsic::AMDIL_mad:
+      return DAG.getNode(AMDILISD::MAD, DL, VT, Op.getOperand(1),
+                              Op.getOperand(2), Op.getOperand(3));
+    case AMDGPUIntrinsic::AMDIL_max:
+      return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
+                                                  Op.getOperand(2));
+    case AMDGPUIntrinsic::AMDGPU_imax:
+      return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
+                                                  Op.getOperand(2));
+    case AMDGPUIntrinsic::AMDGPU_umax:
+      return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
+                                                  Op.getOperand(2));
+    case AMDGPUIntrinsic::AMDIL_min:
+      return DAG.getNode(AMDGPUISD::FMIN, DL, VT, Op.getOperand(1),
+                                                  Op.getOperand(2));
+    case AMDGPUIntrinsic::AMDGPU_imin:
+      return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
+                                                  Op.getOperand(2));
+    case AMDGPUIntrinsic::AMDGPU_umin:
+      return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
+                                                  Op.getOperand(2));
+    case AMDGPUIntrinsic::AMDIL_round_nearest:
+      return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
+    case AMDGPUIntrinsic::AMDIL_round_posinf:
+      return DAG.getNode(ISD::FCEIL, DL, VT, Op.getOperand(1));
+  }
+}
+
+///IABS(a) = SMAX(sub(0, a), a)
+SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
+    SelectionDAG &DAG) const
+{
+
+  DebugLoc DL = Op.getDebugLoc();
+  EVT VT = Op.getValueType();
+  SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
+                                              Op.getOperand(1));
+
+  return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
+}
+
+/// Linear Interpolation
+/// LRP(a, b, c) = muladd(a,  b, (1 - a) * c)
+SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
+    SelectionDAG &DAG) const
+{
+  DebugLoc DL = Op.getDebugLoc();
+  EVT VT = Op.getValueType();
+  SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
+                                DAG.getConstantFP(1.0f, MVT::f32),
+                                Op.getOperand(1));
+  SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
+                                                    Op.getOperand(3));
+  return DAG.getNode(AMDILISD::MAD, DL, VT, Op.getOperand(1),
+                                               Op.getOperand(2),
+                                               OneSubAC);
+}
+
+SDValue AMDGPUTargetLowering::LowerSELECT_CC(SDValue Op,
+    SelectionDAG &DAG) const
+{
+  DebugLoc DL = Op.getDebugLoc();
+  EVT VT = Op.getValueType();
+
+  SDValue LHS = Op.getOperand(0);
+  SDValue RHS = Op.getOperand(1);
+  SDValue True = Op.getOperand(2);
+  SDValue False = Op.getOperand(3);
+  SDValue CC = Op.getOperand(4);
+  ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
+  SDValue Temp;
+
+  // LHS and RHS are guaranteed to be the same value type
+  EVT CompareVT = LHS.getValueType();
+
+  // We need all the operands of SELECT_CC to have the same value type, so if
+  // necessary we need to convert LHS and RHS to be the same type True and
+  // False.  True and False are guaranteed to have the same type as this
+  // SELECT_CC node.
+
+  if (CompareVT !=  VT) {
+    ISD::NodeType ConversionOp = ISD::DELETED_NODE;
+    if (VT == MVT::f32 && CompareVT == MVT::i32) {
+      if (isUnsignedIntSetCC(CCOpcode)) {
+        ConversionOp = ISD::UINT_TO_FP;
+      } else {
+        ConversionOp = ISD::SINT_TO_FP;
+      }
+    } else if (VT == MVT::i32 && CompareVT == MVT::f32) {
+      ConversionOp = ISD::FP_TO_SINT;
+    } else {
+      // I don't think there will be any other type pairings.
+      assert(!"Unhandled operand type parings in SELECT_CC");
+    }
+    // XXX Check the value of LHS and RHS and avoid creating sequences like
+    // (FTOI (ITOF))
+    LHS = DAG.getNode(ConversionOp, DL, VT, LHS);
+    RHS = DAG.getNode(ConversionOp, DL, VT, RHS);
+  }
+
+  // If True is a hardware TRUE value and False is a hardware FALSE value or
+  // vice-versa we can handle this with a native instruction (SET* instructions).
+  if ((isHWTrueValue(True) && isHWFalseValue(False))) {
+    return DAG.getNode(ISD::SELECT_CC, DL, VT, LHS, RHS, True, False, CC);
+  }
+
+  // XXX If True is a hardware TRUE value and False is a hardware FALSE value,
+  // we can handle this with a native instruction, but we need to swap true
+  // and false and change the conditional.
+  if (isHWTrueValue(False) && isHWFalseValue(True)) {
+  }
+
+  // XXX Check if we can lower this to a SELECT or if it is supported by a native
+  // operation. (The code below does this but we don't have the Instruction
+  // selection patterns to do this yet.
+#if 0
+  if (isZero(LHS) || isZero(RHS)) {
+    SDValue Cond = (isZero(LHS) ? RHS : LHS);
+    bool SwapTF = false;
+    switch (CCOpcode) {
+    case ISD::SETOEQ:
+    case ISD::SETUEQ:
+    case ISD::SETEQ:
+      SwapTF = true;
+      // Fall through
+    case ISD::SETONE:
+    case ISD::SETUNE:
+    case ISD::SETNE:
+      // We can lower to select
+      if (SwapTF) {
+        Temp = True;
+        True = False;
+        False = Temp;
+      }
+      // CNDE
+      return DAG.getNode(ISD::SELECT, DL, VT, Cond, True, False);
+    default:
+      // Supported by a native operation (CNDGE, CNDGT)
+      return DAG.getNode(ISD::SELECT_CC, DL, VT, LHS, RHS, True, False, CC);
+    }
+  }
+#endif
+
+  // If we make it this for it means we have no native instructions to handle
+  // this SELECT_CC, so we must lower it.
+  SDValue HWTrue, HWFalse;
+
+  if (VT == MVT::f32) {
+    HWTrue = DAG.getConstantFP(1.0f, VT);
+    HWFalse = DAG.getConstantFP(0.0f, VT);
+  } else if (VT == MVT::i32) {
+    HWTrue = DAG.getConstant(-1, VT);
+    HWFalse = DAG.getConstant(0, VT);
+  }
+  else {
+    assert(!"Unhandled value type in LowerSELECT_CC");
+  }
+
+  // Lower this unsupported SELECT_CC into a combination of two supported
+  // SELECT_CC operations.
+  SDValue Cond = DAG.getNode(ISD::SELECT_CC, DL, VT, LHS, RHS, HWTrue, HWFalse, CC);
+
+  return DAG.getNode(ISD::SELECT, DL, VT, Cond, True, False);
+}
+
+
+SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
+    SelectionDAG &DAG) const
+{
+  DebugLoc DL = Op.getDebugLoc();
+  EVT VT = Op.getValueType();
+
+  SDValue Num = Op.getOperand(0);
+  SDValue Den = Op.getOperand(1);
+
+  SmallVector<SDValue, 8> Results;
+
+  // RCP =  URECIP(Den) = 2^32 / Den + e
+  // e is rounding error.
+  SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
+
+  // RCP_LO = umulo(RCP, Den) */
+  SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
+
+  // RCP_HI = mulhu (RCP, Den) */
+  SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
+
+  // NEG_RCP_LO = -RCP_LO
+  SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
+                                                     RCP_LO);
+
+  // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
+  SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
+                                           NEG_RCP_LO, RCP_LO,
+                                           ISD::SETEQ);
+  // Calculate the rounding error from the URECIP instruction
+  // E = mulhu(ABS_RCP_LO, RCP)
+  SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
+
+  // RCP_A_E = RCP + E
+  SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
+
+  // RCP_S_E = RCP - E
+  SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
+
+  // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
+  SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
+                                     RCP_A_E, RCP_S_E,
+                                     ISD::SETEQ);
+  // Quotient = mulhu(Tmp0, Num)
+  SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
+
+  // Num_S_Remainder = Quotient * Den
+  SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
+
+  // Remainder = Num - Num_S_Remainder
+  SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
+
+  // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
+  SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
+                                                 DAG.getConstant(-1, VT),
+                                                 DAG.getConstant(0, VT),
+                                                 ISD::SETGE);
+  // Remainder_GE_Zero = (Remainder >= 0 ? -1 : 0)
+  SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Remainder,
+                                                  DAG.getConstant(0, VT),
+                                                  DAG.getConstant(-1, VT),
+                                                  DAG.getConstant(0, VT),
+                                                  ISD::SETGE);
+  // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
+  SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
+                                               Remainder_GE_Zero);
+
+  // Calculate Division result:
+
+  // Quotient_A_One = Quotient + 1
+  SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
+                                                         DAG.getConstant(1, VT));
+
+  // Quotient_S_One = Quotient - 1
+  SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
+                                                         DAG.getConstant(1, VT));
+
+  // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
+  SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
+                                     Quotient, Quotient_A_One, ISD::SETEQ);
+
+  // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
+  Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
+                            Quotient_S_One, Div, ISD::SETEQ);
+
+  // Calculate Rem result:
+
+  // Remainder_S_Den = Remainder - Den
+  SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
+
+  // Remainder_A_Den = Remainder + Den
+  SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
+
+  // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
+  SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
+                                    Remainder, Remainder_S_Den, ISD::SETEQ);
+
+  // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
+  Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
+                            Remainder_A_Den, Rem, ISD::SETEQ);
+
+  DAG.ReplaceAllUsesWith(Op.getValue(0).getNode(), &Div);
+  DAG.ReplaceAllUsesWith(Op.getValue(1).getNode(), &Rem);
+
+  return Op;
+}
+
+//===----------------------------------------------------------------------===//
+// Helper functions
+//===----------------------------------------------------------------------===//
+
+bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const
+{
+  if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
+    return CFP->isExactlyValue(1.0);
+  }
+  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+    return C->isAllOnesValue();
+  }
+  return false;
+}
+
+bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const
+{
+  if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
+    return CFP->getValueAPF().isZero();
+  }
+  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+    return C->isNullValue();
+  }
+  return false;
+}
+
+void AMDGPUTargetLowering::addLiveIn(MachineInstr * MI,
+    MachineFunction * MF, MachineRegisterInfo & MRI,
+    const TargetInstrInfo * TII, unsigned reg) const
+{
+  AMDGPU::utilAddLiveIn(MF, MRI, TII, reg, MI->getOperand(0).getReg());
+}
+
+#define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
+
+const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const
+{
+  switch (Opcode) {
+  default: return AMDILTargetLowering::getTargetNodeName(Opcode);
+
+  NODE_NAME_CASE(FRACT)
+  NODE_NAME_CASE(FMAX)
+  NODE_NAME_CASE(SMAX)
+  NODE_NAME_CASE(UMAX)
+  NODE_NAME_CASE(FMIN)
+  NODE_NAME_CASE(SMIN)
+  NODE_NAME_CASE(UMIN)
+  NODE_NAME_CASE(URECIP)
+  }
+}
diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.h b/lib/Target/AMDGPU/AMDGPUISelLowering.h
new file mode 100644
index 0000000..9aa602b
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -0,0 +1,76 @@
+//===-- AMDGPUISelLowering.h - AMDGPU Lowering Interface --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the interface defintiion of the TargetLowering class
+// that is common to all AMD GPUs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDGPUISELLOWERING_H
+#define AMDGPUISELLOWERING_H
+
+#include "AMDILISelLowering.h"
+
+namespace llvm {
+
+class AMDGPUTargetLowering : public AMDILTargetLowering
+{
+private:
+  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
+
+protected:
+
+  /// addLiveIn - This functions adds reg to the live in list of the entry block
+  /// and emits a copy from reg to MI.getOperand(0).
+  ///
+  //  Some registers are loaded with values before the program
+  /// begins to execute.  The loading of these values is modeled with pseudo
+  /// instructions which are lowered using this function. 
+  void addLiveIn(MachineInstr * MI, MachineFunction * MF,
+                 MachineRegisterInfo & MRI, const TargetInstrInfo * TII,
+		 unsigned reg) const;
+
+  bool isHWTrueValue(SDValue Op) const;
+  bool isHWFalseValue(SDValue Op) const;
+
+public:
+  AMDGPUTargetLowering(TargetMachine &TM);
+
+  virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerIntrinsicIABS(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerIntrinsicLRP(SDValue Op, SelectionDAG &DAG) const;
+  virtual const char* getTargetNodeName(unsigned Opcode) const;
+
+};
+
+namespace AMDGPUISD
+{
+
+enum
+{
+  AMDGPU_FIRST = AMDILISD::LAST_ISD_NUMBER,
+  FRACT,
+  FMAX,
+  SMAX,
+  UMAX,
+  FMIN,
+  SMIN,
+  UMIN,
+  URECIP,
+  LAST_AMDGPU_ISD_NUMBER
+};
+
+
+} // End namespace AMDGPUISD
+
+} // End namespace llvm
+
+#endif // AMDGPUISELLOWERING_H
diff --git a/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp b/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
new file mode 100644
index 0000000..d2bb4e1
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
@@ -0,0 +1,54 @@
+//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation of the TargetInstrInfo class that is
+// common to all AMD GPUs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPUInstrInfo.h"
+#include "AMDGPURegisterInfo.h"
+#include "AMDGPUTargetMachine.h"
+#include "AMDIL.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+AMDGPUInstrInfo::AMDGPUInstrInfo(AMDGPUTargetMachine &tm)
+  : AMDILInstrInfo(tm), TM(tm) { }
+
+MachineInstr * AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
+    DebugLoc DL) const
+{
+  MachineInstrBuilder newInstr;
+  MachineRegisterInfo &MRI = MF.getRegInfo();
+  const AMDGPURegisterInfo & RI = getRegisterInfo();
+
+  // Create the new instruction
+  newInstr = BuildMI(MF, DL, TM.getInstrInfo()->get(MI.getOpcode()));
+
+  for (unsigned i = 0; i < MI.getNumOperands(); i++) {
+    MachineOperand &MO = MI.getOperand(i);
+    // Convert dst regclass to one that is supported by the ISA
+    if (MO.isReg() && MO.isDef()) {
+      if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
+        const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
+        const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
+
+        assert(newRegClass);
+
+        MRI.setRegClass(MO.getReg(), newRegClass);
+      }
+    }
+    // Add the operand to the new instruction
+    newInstr.addOperand(MO);
+  }
+
+  return newInstr;
+}
diff --git a/lib/Target/AMDGPU/AMDGPUInstrInfo.h b/lib/Target/AMDGPU/AMDGPUInstrInfo.h
new file mode 100644
index 0000000..e6b79c8
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUInstrInfo.h
@@ -0,0 +1,48 @@
+//===-- AMDGPUInstrInfo.h - AMDGPU Instruction Information ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the definition of a TargetInstrInfo class that is common
+// to all AMD GPUs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDGPUINSTRUCTIONINFO_H_
+#define AMDGPUINSTRUCTIONINFO_H_
+
+#include "AMDGPURegisterInfo.h"
+#include "AMDILInstrInfo.h"
+
+#include <map>
+
+namespace llvm {
+
+class AMDGPUTargetMachine;
+class MachineFunction;
+class MachineInstr;
+class MachineInstrBuilder;
+
+class AMDGPUInstrInfo : public AMDILInstrInfo {
+private:
+  AMDGPUTargetMachine & TM;
+
+public:
+  explicit AMDGPUInstrInfo(AMDGPUTargetMachine &tm);
+
+  virtual const AMDGPURegisterInfo &getRegisterInfo() const = 0;
+
+  /// convertToISA - Convert the AMDIL MachineInstr to a supported ISA
+  /// MachineInstr
+  virtual MachineInstr * convertToISA(MachineInstr & MI, MachineFunction &MF,
+    DebugLoc DL) const;
+
+};
+
+} // End llvm namespace
+
+#endif // AMDGPUINSTRINFO_H_
diff --git a/lib/Target/AMDGPU/AMDGPUInstrInfo.td b/lib/Target/AMDGPU/AMDGPUInstrInfo.td
new file mode 100644
index 0000000..5e44ef9
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUInstrInfo.td
@@ -0,0 +1,55 @@
+//===-- AMDGPUInstrInfo.td - AMDGPU DAG nodes --------------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains DAG node defintions for the AMDGPU target.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// AMDGPU DAG Nodes
+//
+
+// out = a - floor(a)
+def AMDGPUfract : SDNode<"AMDGPUISD::FRACT", SDTFPUnaryOp>;
+
+// out = max(a, b) a and b are floats
+def AMDGPUfmax : SDNode<"AMDGPUISD::FMAX", SDTFPBinOp,
+  [SDNPCommutative, SDNPAssociative]
+>;
+
+// out = max(a, b) a and b are signed ints
+def AMDGPUsmax : SDNode<"AMDGPUISD::SMAX", SDTIntBinOp,
+  [SDNPCommutative, SDNPAssociative]
+>;
+
+// out = max(a, b) a and b are unsigned ints
+def AMDGPUumax : SDNode<"AMDGPUISD::UMAX", SDTIntBinOp,
+  [SDNPCommutative, SDNPAssociative]
+>;
+
+// out = min(a, b) a and b are floats
+def AMDGPUfmin : SDNode<"AMDGPUISD::FMIN", SDTFPBinOp,
+  [SDNPCommutative, SDNPAssociative]
+>;
+
+// out = min(a, b) a snd b are signed ints
+def AMDGPUsmin : SDNode<"AMDGPUISD::SMIN", SDTIntBinOp,
+  [SDNPCommutative, SDNPAssociative]
+>;
+
+// out = min(a, b) a and b are unsigned ints
+def AMDGPUumin : SDNode<"AMDGPUISD::UMIN", SDTIntBinOp,
+  [SDNPCommutative, SDNPAssociative]
+>;
+
+// urecip - This operation is a helper for integer division, it returns the
+// result of 1 / a as a fractional unsigned integer.
+// out = (2^32 / a) + e
+// e is rounding error
+def AMDGPUurecip : SDNode<"AMDGPUISD::URECIP", SDTIntUnaryOp>;
diff --git a/lib/Target/AMDGPU/AMDGPUInstructions.td b/lib/Target/AMDGPU/AMDGPUInstructions.td
new file mode 100644
index 0000000..9ec9c4d
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -0,0 +1,122 @@
+//===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains instruction defs that are common to all hw codegen
+// targets.
+//
+//===----------------------------------------------------------------------===//
+
+class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instruction {
+  field bits<16> AMDILOp = 0;
+  field bits<3> Gen = 0;
+
+  let Namespace = "AMDIL";
+  let OutOperandList = outs;
+  let InOperandList = ins;
+  let AsmString = asm;
+  let Pattern = pattern;
+  let TSFlags{42-40} = Gen;
+  let TSFlags{63-48} = AMDILOp;
+}
+
+class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern>
+    : AMDGPUInst<outs, ins, asm, pattern> {
+
+  field bits<32> Inst = 0xffffffff;
+
+}
+
+class Constants {
+int TWO_PI = 0x40c90fdb;
+int PI = 0x40490fdb;
+int TWO_PI_INV = 0x3e22f983;
+}
+def CONST : Constants;
+
+def FP_ZERO : PatLeaf <
+  (fpimm),
+  [{return N->getValueAPF().isZero();}]
+>;
+
+def FP_ONE : PatLeaf <
+  (fpimm),
+  [{return N->isExactlyValue(1.0);}]
+>;
+
+let isCodeGenOnly = 1, isPseudo = 1, usesCustomInserter = 1  in {
+
+class CLAMP <RegisterClass rc> : AMDGPUShaderInst <
+  (outs rc:$dst),
+  (ins rc:$src0),
+  "CLAMP $dst, $src0",
+  [(set rc:$dst, (int_AMDIL_clamp rc:$src0, (f32 FP_ZERO), (f32 FP_ONE)))]
+>;
+
+class FABS <RegisterClass rc> : AMDGPUShaderInst <
+  (outs rc:$dst),
+  (ins rc:$src0),
+  "FABS $dst, $src0",
+  [(set rc:$dst, (fabs rc:$src0))]
+>;
+
+class FNEG <RegisterClass rc> : AMDGPUShaderInst <
+  (outs rc:$dst),
+  (ins rc:$src0),
+  "FNEG $dst, $src0",
+  [(set rc:$dst, (fneg rc:$src0))]
+>;
+
+} // End isCodeGenOnly = 1, isPseudo = 1, hasCustomInserter = 1
+
+/* Generic helper patterns for intrinsics */
+/* -------------------------------------- */
+
+class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul,
+                  RegisterClass rc> : Pat <
+  (int_AMDGPU_pow rc:$src0, rc:$src1),
+  (exp_ieee (mul rc:$src1, (log_ieee rc:$src0)))
+>;
+
+/* Other helper patterns */
+/* --------------------- */
+
+/* Extract element pattern */
+class Extract_Element <ValueType sub_type, ValueType vec_type,
+                     RegisterClass vec_class, int sub_idx, 
+                     SubRegIndex sub_reg>: Pat<
+  (sub_type (vector_extract (vec_type vec_class:$src), sub_idx)),
+  (EXTRACT_SUBREG vec_class:$src, sub_reg)
+>;
+
+/* Insert element pattern */
+class Insert_Element <ValueType elem_type, ValueType vec_type,
+                      RegisterClass elem_class, RegisterClass vec_class,
+                      int sub_idx, SubRegIndex sub_reg> : Pat <
+
+  (vec_type (vector_insert (vec_type vec_class:$vec),
+                           (elem_type elem_class:$elem), sub_idx)),
+  (INSERT_SUBREG vec_class:$vec, elem_class:$elem, sub_reg)
+>;
+
+// Vector Build pattern
+class Vector_Build <ValueType vecType, RegisterClass elemClass> : Pat <
+  (IL_vbuild elemClass:$src),
+  (INSERT_SUBREG (vecType (IMPLICIT_DEF)), elemClass:$src, sel_x)
+>;
+
+// bitconvert pattern
+class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : Pat <
+  (dt (bitconvert (st rc:$src0))),
+  (dt rc:$src0)
+>;
+
+include "R600Instructions.td"
+
+include "SIInstrInfo.td"
+
diff --git a/lib/Target/AMDGPU/AMDGPUIntrinsics.td b/lib/Target/AMDGPU/AMDGPUIntrinsics.td
new file mode 100644
index 0000000..78f072c
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUIntrinsics.td
@@ -0,0 +1,64 @@
+//===-- AMDGPUIntrinsics.td - Common intrinsics  -*- tablegen -*-----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines intrinsics that are used by all hw codegen targets.
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "AMDGPU", isTarget = 1 in {
+
+  def int_AMDGPU_load_const : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_load_imm : Intrinsic<[llvm_v4f32_ty], [llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_reserve_reg : Intrinsic<[], [llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_store_output : Intrinsic<[], [llvm_float_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_swizzle : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+  def int_AMDGPU_arl : Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_cndlt : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_cos : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_div : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_dp4 : Intrinsic<[llvm_float_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+  def int_AMDGPU_floor : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_kill : Intrinsic<[], [llvm_float_ty], []>;
+  def int_AMDGPU_kilp : Intrinsic<[], [], []>;
+  def int_AMDGPU_lrp : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_mul : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_pow : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_rcp : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_rsq : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_seq : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_sgt : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_sge : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_sin : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_sle : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_sne : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_ssg : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_mullit : Intrinsic<[llvm_v4f32_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_tex : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_txb : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_txf : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_txq : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_txd : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_txl : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_trunc : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+  def int_AMDGPU_ddx : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_ddy : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_imax : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_imin : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_umax : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_umin : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+  def int_AMDGPU_cube : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+}
+
+let TargetPrefix = "TGSI", isTarget = 1 in {
+
+  def int_TGSI_lit_z : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],[]>;
+}
+
+include "SIIntrinsics.td"
diff --git a/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp b/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp
new file mode 100644
index 0000000..ad48335
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp
@@ -0,0 +1,24 @@
+//===-- AMDGPURegisterInfo.cpp - AMDGPU Register Information -------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Parent TargetRegisterInfo class common to all hw codegen targets.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPURegisterInfo.h"
+#include "AMDGPUTargetMachine.h"
+
+using namespace llvm;
+
+AMDGPURegisterInfo::AMDGPURegisterInfo(AMDGPUTargetMachine &tm,
+    const TargetInstrInfo &tii)
+: AMDILRegisterInfo(tm, tii),
+  TM(tm),
+  TII(tii)
+  { }
diff --git a/lib/Target/AMDGPU/AMDGPURegisterInfo.h b/lib/Target/AMDGPU/AMDGPURegisterInfo.h
new file mode 100644
index 0000000..5863807
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPURegisterInfo.h
@@ -0,0 +1,42 @@
+//===-- AMDGPURegisterInfo.h - AMDGPURegisterInfo Interface -*- C++ -*-----===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the TargetRegisterInfo interface that is implemented
+// by all hw codegen targets.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDGPUREGISTERINFO_H_
+#define AMDGPUREGISTERINFO_H_
+
+#include "AMDILRegisterInfo.h"
+
+namespace llvm {
+
+class AMDGPUTargetMachine;
+class TargetInstrInfo;
+
+struct AMDGPURegisterInfo : public AMDILRegisterInfo
+{
+  AMDGPUTargetMachine &TM;
+  const TargetInstrInfo &TII;
+
+  AMDGPURegisterInfo(AMDGPUTargetMachine &tm, const TargetInstrInfo &tii);
+
+  virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
+
+  /// getISARegClass - rc is an AMDIL reg class.  This function returns the
+  /// ISA reg class that is equivalent to the given AMDIL reg class.
+  virtual const TargetRegisterClass *
+    getISARegClass(const TargetRegisterClass * rc) const = 0;
+};
+
+} // End namespace llvm
+
+#endif // AMDIDSAREGISTERINFO_H_
diff --git a/lib/Target/AMDGPU/AMDGPURegisterInfo.td b/lib/Target/AMDGPU/AMDGPURegisterInfo.td
new file mode 100644
index 0000000..1707903
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPURegisterInfo.td
@@ -0,0 +1,22 @@
+//===-- AMDGPURegisterInfo.td - AMDGPU register info -------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Tablegen register definitions common to all hw codegen targets.
+//
+//===----------------------------------------------------------------------===//
+
+let Namespace = "AMDIL" in {
+  def sel_x : SubRegIndex;
+  def sel_y : SubRegIndex;
+  def sel_z : SubRegIndex;
+  def sel_w : SubRegIndex;
+}
+
+include "R600RegisterInfo.td"
+include "SIRegisterInfo.td"
diff --git a/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
new file mode 100644
index 0000000..c6a2412
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -0,0 +1,159 @@
+//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The AMDGPU target machine contains all of the hardware specific information
+// needed to emit code for R600 and SI GPUs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPUTargetMachine.h"
+#include "AMDGPU.h"
+#include "R600ISelLowering.h"
+#include "R600InstrInfo.h"
+#include "SIISelLowering.h"
+#include "SIInstrInfo.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/PassManager.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_os_ostream.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/Scalar.h"
+
+using namespace llvm;
+
+extern "C" void LLVMInitializeAMDGPUTarget() {
+  // Register the target
+  RegisterTargetMachine<AMDGPUTargetMachine> X(TheAMDGPUTarget);
+}
+
+AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, StringRef TT,
+    StringRef CPU, StringRef FS,
+  TargetOptions Options,
+  Reloc::Model RM, CodeModel::Model CM,
+  CodeGenOpt::Level OptLevel
+)
+:
+  LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OptLevel),
+  Subtarget(TT, CPU, FS),
+  DataLayout(Subtarget.getDataLayout()),
+  FrameLowering(TargetFrameLowering::StackGrowsUp,
+      Subtarget.device()->getStackAlignment(), 0),
+  IntrinsicInfo(this),
+  mDump(false)
+
+{
+  // TLInfo uses InstrInfo so it must be initialized after.
+  if (Subtarget.device()->getGeneration() <= AMDILDeviceInfo::HD6XXX) {
+    InstrInfo = new R600InstrInfo(*this);
+    TLInfo = new R600TargetLowering(*this);
+  } else {
+    InstrInfo = new SIInstrInfo(*this);
+    TLInfo = new SITargetLowering(*this);
+  }
+}
+
+AMDGPUTargetMachine::~AMDGPUTargetMachine()
+{
+}
+
+bool AMDGPUTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
+                                              formatted_raw_ostream &Out,
+                                              CodeGenFileType FileType,
+                                              bool DisableVerify) {
+  // XXX: Hack here addPassesToEmitFile will fail, but this is Ok since we are
+  // only using it to access addPassesToGenerateCode()
+  bool fail = LLVMTargetMachine::addPassesToEmitFile(PM, Out, FileType,
+                                                     DisableVerify);
+  assert(fail);
+
+  const AMDILSubtarget &STM = getSubtarget<AMDILSubtarget>();
+  std::string gpu = STM.getDeviceName();
+  if (gpu == "SI") {
+    PM.add(createSICodeEmitterPass(Out));
+  } else if (Subtarget.device()->getGeneration() <= AMDILDeviceInfo::HD6XXX) {
+    PM.add(createR600CodeEmitterPass(Out));
+  } else {
+    abort();
+    return true;
+  }
+  PM.add(createGCInfoDeleter());
+
+  return false;
+}
+
+namespace {
+class AMDGPUPassConfig : public TargetPassConfig {
+public:
+  AMDGPUPassConfig(AMDGPUTargetMachine *TM, PassManagerBase &PM)
+    : TargetPassConfig(TM, PM) {}
+
+  AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
+    return getTM<AMDGPUTargetMachine>();
+  }
+
+  virtual bool addPreISel();
+  virtual bool addInstSelector();
+  virtual bool addPreRegAlloc();
+  virtual bool addPostRegAlloc();
+  virtual bool addPreSched2();
+  virtual bool addPreEmitPass();
+};
+} // End of anonymous namespace
+
+TargetPassConfig *AMDGPUTargetMachine::createPassConfig(PassManagerBase &PM) {
+  return new AMDGPUPassConfig(this, PM);
+}
+
+bool
+AMDGPUPassConfig::addPreISel()
+{
+  const AMDILSubtarget &ST = TM->getSubtarget<AMDILSubtarget>();
+  if (ST.device()->getGeneration() <= AMDILDeviceInfo::HD6XXX) {
+    PM->add(createR600KernelParametersPass(
+                     getAMDGPUTargetMachine().getTargetData()));
+  }
+  return false;
+}
+
+bool AMDGPUPassConfig::addInstSelector() {
+  PM->add(createAMDILPeepholeOpt(*TM));
+  PM->add(createAMDILISelDag(getAMDGPUTargetMachine()));
+  return false;
+}
+
+bool AMDGPUPassConfig::addPreRegAlloc() {
+  const AMDILSubtarget &ST = TM->getSubtarget<AMDILSubtarget>();
+
+  if (ST.device()->getGeneration() > AMDILDeviceInfo::HD6XXX) {
+    PM->add(createSIAssignInterpRegsPass(*TM));
+  }
+  PM->add(createAMDGPUConvertToISAPass(*TM));
+  return false;
+}
+
+bool AMDGPUPassConfig::addPostRegAlloc() {
+  return false;
+}
+
+bool AMDGPUPassConfig::addPreSched2() {
+  return false;
+}
+
+bool AMDGPUPassConfig::addPreEmitPass() {
+  PM->add(createAMDILCFGPreparationPass(*TM));
+  PM->add(createAMDILCFGStructurizerPass(*TM));
+
+  return false;
+}
+
diff --git a/lib/Target/AMDGPU/AMDGPUTargetMachine.h b/lib/Target/AMDGPU/AMDGPUTargetMachine.h
new file mode 100644
index 0000000..84a1ea3
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUTargetMachine.h
@@ -0,0 +1,70 @@
+//===-- AMDGPUTargetMachine.h - AMDGPU TargetMachine Interface --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  The AMDGPU TargetMachine interface definition for hw codgen targets.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDGPU_TARGET_MACHINE_H
+#define AMDGPU_TARGET_MACHINE_H
+
+#include "AMDGPUInstrInfo.h"
+#include "AMDILFrameLowering.h"
+#include "AMDILIntrinsicInfo.h"
+#include "AMDILSubtarget.h"
+#include "R600ISelLowering.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Target/TargetData.h"
+
+namespace llvm {
+
+MCAsmInfo* createMCAsmInfo(const Target &T, StringRef TT);
+
+class AMDGPUTargetMachine : public LLVMTargetMachine {
+
+  AMDILSubtarget Subtarget;
+  const TargetData DataLayout;
+  AMDILFrameLowering FrameLowering;
+  AMDILIntrinsicInfo IntrinsicInfo;
+  const AMDGPUInstrInfo * InstrInfo;
+  AMDGPUTargetLowering * TLInfo;
+  bool mDump;
+
+public:
+   AMDGPUTargetMachine(const Target &T, StringRef TT, StringRef FS,
+                       StringRef CPU,
+                       TargetOptions Options,
+                       Reloc::Model RM, CodeModel::Model CM,
+                       CodeGenOpt::Level OL);
+   ~AMDGPUTargetMachine();
+   virtual const AMDILFrameLowering* getFrameLowering() const {
+     return &FrameLowering;
+   }
+   virtual const AMDILIntrinsicInfo* getIntrinsicInfo() const {
+     return &IntrinsicInfo;
+   }
+   virtual const AMDGPUInstrInfo *getInstrInfo() const {return InstrInfo;}
+   virtual const AMDILSubtarget *getSubtargetImpl() const {return &Subtarget; }
+   virtual const AMDGPURegisterInfo *getRegisterInfo() const {
+      return &InstrInfo->getRegisterInfo();
+   }
+   virtual AMDGPUTargetLowering * getTargetLowering() const {
+      return TLInfo;
+   }
+   virtual const TargetData* getTargetData() const { return &DataLayout; }
+   virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+   virtual bool addPassesToEmitFile(PassManagerBase &PM,
+                                              formatted_raw_ostream &Out,
+                                              CodeGenFileType FileType,
+                                              bool DisableVerify);
+};
+
+} // End namespace llvm
+
+#endif // AMDGPU_TARGET_MACHINE_H
diff --git a/lib/Target/AMDGPU/AMDGPUUtil.cpp b/lib/Target/AMDGPU/AMDGPUUtil.cpp
new file mode 100644
index 0000000..0d30e00
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUUtil.cpp
@@ -0,0 +1,139 @@
+//===-- AMDGPUUtil.cpp - AMDGPU Utility functions -------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common utility functions used by hw codegen targets
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPUUtil.h"
+#include "AMDGPURegisterInfo.h"
+#include "AMDIL.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+using namespace llvm;
+
+// Some instructions act as place holders to emulate operations that the GPU
+// hardware does automatically. This function can be used to check if
+// an opcode falls into this category.
+bool AMDGPU::isPlaceHolderOpcode(unsigned opcode)
+{
+  switch (opcode) {
+  default: return false;
+  case AMDIL::RETURN:
+  case AMDIL::LOAD_INPUT:
+  case AMDIL::LAST:
+  case AMDIL::MASK_WRITE:
+  case AMDIL::RESERVE_REG:
+    return true;
+  }
+}
+
+bool AMDGPU::isTransOp(unsigned opcode)
+{
+  switch(opcode) {
+    default: return false;
+
+    case AMDIL::COS_r600:
+    case AMDIL::COS_eg:
+    case AMDIL::MULLIT:
+    case AMDIL::MUL_LIT_r600:
+    case AMDIL::MUL_LIT_eg:
+    case AMDIL::EXP_IEEE_r600:
+    case AMDIL::EXP_IEEE_eg:
+    case AMDIL::LOG_CLAMPED_r600:
+    case AMDIL::LOG_IEEE_r600:
+    case AMDIL::LOG_CLAMPED_eg:
+    case AMDIL::LOG_IEEE_eg:
+      return true;
+  }
+}
+
+bool AMDGPU::isTexOp(unsigned opcode)
+{
+  switch(opcode) {
+  default: return false;
+  case AMDIL::TEX_LD:
+  case AMDIL::TEX_GET_TEXTURE_RESINFO:
+  case AMDIL::TEX_SAMPLE:
+  case AMDIL::TEX_SAMPLE_C:
+  case AMDIL::TEX_SAMPLE_L:
+  case AMDIL::TEX_SAMPLE_C_L:
+  case AMDIL::TEX_SAMPLE_LB:
+  case AMDIL::TEX_SAMPLE_C_LB:
+  case AMDIL::TEX_SAMPLE_G:
+  case AMDIL::TEX_SAMPLE_C_G:
+  case AMDIL::TEX_GET_GRADIENTS_H:
+  case AMDIL::TEX_GET_GRADIENTS_V:
+  case AMDIL::TEX_SET_GRADIENTS_H:
+  case AMDIL::TEX_SET_GRADIENTS_V:
+    return true;
+  }
+}
+
+bool AMDGPU::isReductionOp(unsigned opcode)
+{
+  switch(opcode) {
+    default: return false;
+    case AMDIL::DOT4_r600:
+    case AMDIL::DOT4_eg:
+      return true;
+  }
+}
+
+bool AMDGPU::isCubeOp(unsigned opcode)
+{
+  switch(opcode) {
+    default: return false;
+    case AMDIL::CUBE_r600:
+    case AMDIL::CUBE_eg:
+      return true;
+  }
+}
+
+
+bool AMDGPU::isFCOp(unsigned opcode)
+{
+  switch(opcode) {
+  default: return false;
+  case AMDIL::BREAK_LOGICALZ_f32:
+  case AMDIL::BREAK_LOGICALNZ_i32:
+  case AMDIL::BREAK_LOGICALZ_i32:
+  case AMDIL::BREAK_LOGICALNZ_f32:
+  case AMDIL::CONTINUE_LOGICALNZ_f32:
+  case AMDIL::IF_LOGICALNZ_i32:
+  case AMDIL::IF_LOGICALZ_f32:
+  case AMDIL::ELSE:
+  case AMDIL::ENDIF:
+  case AMDIL::ENDLOOP:
+  case AMDIL::IF_LOGICALNZ_f32:
+  case AMDIL::WHILELOOP:
+    return true;
+  }
+}
+
+void AMDGPU::utilAddLiveIn(llvm::MachineFunction * MF,
+                           llvm::MachineRegisterInfo & MRI,
+                           const llvm::TargetInstrInfo * TII,
+                           unsigned physReg, unsigned virtReg)
+{
+    if (!MRI.isLiveIn(physReg)) {
+      MRI.addLiveIn(physReg, virtReg);
+      MF->front().addLiveIn(physReg);
+      BuildMI(MF->front(), MF->front().begin(), DebugLoc(),
+              TII->get(TargetOpcode::COPY), virtReg)
+                .addReg(physReg);
+    } else {
+      MRI.replaceRegWith(virtReg, MRI.getLiveInVirtReg(physReg));
+    }
+}
diff --git a/lib/Target/AMDGPU/AMDGPUUtil.h b/lib/Target/AMDGPU/AMDGPUUtil.h
new file mode 100644
index 0000000..633ea3b
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUUtil.h
@@ -0,0 +1,46 @@
+//===-- AMDGPUUtil.h - AMDGPU Utility function declarations -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declarations for utility functions common to all hw codegen targets.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDGPU_UTIL_H
+#define AMDGPU_UTIL_H
+
+namespace llvm {
+
+class MachineFunction;
+class MachineRegisterInfo;
+class TargetInstrInfo;
+
+}
+
+namespace AMDGPU {
+
+bool isPlaceHolderOpcode(unsigned opcode);
+
+bool isTransOp(unsigned opcode);
+bool isTexOp(unsigned opcode);
+bool isReductionOp(unsigned opcode);
+bool isCubeOp(unsigned opcode);
+bool isFCOp(unsigned opcode);
+
+// XXX: Move these to AMDGPUInstrInfo.h
+#define MO_FLAG_CLAMP (1 << 0)
+#define MO_FLAG_NEG   (1 << 1)
+#define MO_FLAG_ABS   (1 << 2)
+#define MO_FLAG_MASK  (1 << 3)
+
+void utilAddLiveIn(llvm::MachineFunction * MF, llvm::MachineRegisterInfo & MRI,
+    const llvm::TargetInstrInfo * TII, unsigned physReg, unsigned virtReg);
+
+} // End namespace AMDGPU
+
+#endif // AMDGPU_UTIL_H
diff --git a/lib/Target/AMDGPU/AMDIL.h b/lib/Target/AMDGPU/AMDIL.h
new file mode 100644
index 0000000..4029f27
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDIL.h
@@ -0,0 +1,251 @@
+//===-- AMDIL.h - Top-level interface for AMDIL representation --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This file contains the entry points for global functions defined in the LLVM
+// AMDIL back-end.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDIL_H_
+#define AMDIL_H_
+
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Target/TargetMachine.h"
+
+#define AMDIL_MAJOR_VERSION 2
+#define AMDIL_MINOR_VERSION 0
+#define AMDIL_REVISION_NUMBER 74
+#define ARENA_SEGMENT_RESERVED_UAVS 12
+#define DEFAULT_ARENA_UAV_ID 8
+#define DEFAULT_RAW_UAV_ID 7
+#define GLOBAL_RETURN_RAW_UAV_ID 11
+#define HW_MAX_NUM_CB 8
+#define MAX_NUM_UNIQUE_UAVS 8
+#define OPENCL_MAX_NUM_ATOMIC_COUNTERS 8
+#define OPENCL_MAX_READ_IMAGES 128
+#define OPENCL_MAX_WRITE_IMAGES 8
+#define OPENCL_MAX_SAMPLERS 16
+
+// The next two values can never be zero, as zero is the ID that is
+// used to assert against.
+#define DEFAULT_LDS_ID     1
+#define DEFAULT_GDS_ID     1
+#define DEFAULT_SCRATCH_ID 1
+#define DEFAULT_VEC_SLOTS  8
+
+// SC->CAL version matchings.
+#define CAL_VERSION_SC_150               1700
+#define CAL_VERSION_SC_149               1700
+#define CAL_VERSION_SC_148               1525
+#define CAL_VERSION_SC_147               1525
+#define CAL_VERSION_SC_146               1525
+#define CAL_VERSION_SC_145               1451
+#define CAL_VERSION_SC_144               1451
+#define CAL_VERSION_SC_143               1441
+#define CAL_VERSION_SC_142               1441
+#define CAL_VERSION_SC_141               1420
+#define CAL_VERSION_SC_140               1400
+#define CAL_VERSION_SC_139               1387
+#define CAL_VERSION_SC_138               1387
+#define CAL_APPEND_BUFFER_SUPPORT        1340
+#define CAL_VERSION_SC_137               1331
+#define CAL_VERSION_SC_136                982
+#define CAL_VERSION_SC_135                950
+#define CAL_VERSION_GLOBAL_RETURN_BUFFER  990
+
+#define OCL_DEVICE_RV710        0x0001
+#define OCL_DEVICE_RV730        0x0002
+#define OCL_DEVICE_RV770        0x0004
+#define OCL_DEVICE_CEDAR        0x0008
+#define OCL_DEVICE_REDWOOD      0x0010
+#define OCL_DEVICE_JUNIPER      0x0020
+#define OCL_DEVICE_CYPRESS      0x0040
+#define OCL_DEVICE_CAICOS       0x0080
+#define OCL_DEVICE_TURKS        0x0100
+#define OCL_DEVICE_BARTS        0x0200
+#define OCL_DEVICE_CAYMAN       0x0400
+#define OCL_DEVICE_ALL          0x3FFF
+
+/// The number of function ID's that are reserved for 
+/// internal compiler usage.
+const unsigned int RESERVED_FUNCS = 1024;
+
+#define AMDIL_OPT_LEVEL_DECL
+#define  AMDIL_OPT_LEVEL_VAR
+#define AMDIL_OPT_LEVEL_VAR_NO_COMMA
+
+namespace llvm {
+class AMDILInstrPrinter;
+class FunctionPass;
+class MCAsmInfo;
+class raw_ostream;
+class Target;
+class TargetMachine;
+
+/// Instruction selection passes.
+FunctionPass*
+  createAMDILISelDag(TargetMachine &TM AMDIL_OPT_LEVEL_DECL);
+FunctionPass*
+  createAMDILPeepholeOpt(TargetMachine &TM AMDIL_OPT_LEVEL_DECL);
+
+/// Pre emit passes.
+FunctionPass*
+  createAMDILCFGPreparationPass(TargetMachine &TM AMDIL_OPT_LEVEL_DECL);
+FunctionPass*
+  createAMDILCFGStructurizerPass(TargetMachine &TM AMDIL_OPT_LEVEL_DECL);
+
+extern Target TheAMDILTarget;
+extern Target TheAMDGPUTarget;
+} // end namespace llvm;
+
+#define GET_REGINFO_ENUM
+#include "AMDGPUGenRegisterInfo.inc"
+#define GET_INSTRINFO_ENUM
+#include "AMDGPUGenInstrInfo.inc"
+
+/// Include device information enumerations
+#include "AMDILDeviceInfo.h"
+
+namespace llvm {
+/// OpenCL uses address spaces to differentiate between
+/// various memory regions on the hardware. On the CPU
+/// all of the address spaces point to the same memory,
+/// however on the GPU, each address space points to
+/// a seperate piece of memory that is unique from other
+/// memory locations.
+namespace AMDILAS {
+enum AddressSpaces {
+  PRIVATE_ADDRESS  = 0, // Address space for private memory.
+  GLOBAL_ADDRESS   = 1, // Address space for global memory (RAT0, VTX0).
+  CONSTANT_ADDRESS = 2, // Address space for constant memory.
+  LOCAL_ADDRESS    = 3, // Address space for local memory.
+  REGION_ADDRESS   = 4, // Address space for region memory.
+  ADDRESS_NONE     = 5, // Address space for unknown memory.
+  PARAM_D_ADDRESS  = 6, // Address space for direct addressible parameter memory (CONST0)
+  PARAM_I_ADDRESS  = 7, // Address space for indirect addressible parameter memory (VTX1)
+  USER_SGPR_ADDRESS = 8, // Address space for USER_SGPRS on SI
+  LAST_ADDRESS     = 9
+};
+
+// This union/struct combination is an easy way to read out the
+// exact bits that are needed.
+typedef union ResourceRec {
+  struct {
+#ifdef __BIG_ENDIAN__
+    unsigned short isImage       : 1;  // Reserved for future use/llvm.
+    unsigned short ResourceID    : 10; // Flag to specify the resourece ID for
+                                       // the op.
+    unsigned short HardwareInst  : 1;  // Flag to specify that this instruction
+                                       // is a hardware instruction.
+    unsigned short ConflictPtr   : 1;  // Flag to specify that the pointer has a
+                                       // conflict.
+    unsigned short ByteStore     : 1;  // Flag to specify if the op is a byte
+                                       // store op.
+    unsigned short PointerPath   : 1;  // Flag to specify if the op is on the
+                                       // pointer path.
+    unsigned short CacheableRead : 1;  // Flag to specify if the read is
+                                       // cacheable.
+#else
+    unsigned short CacheableRead : 1;  // Flag to specify if the read is
+                                       // cacheable.
+    unsigned short PointerPath   : 1;  // Flag to specify if the op is on the
+                                       // pointer path.
+    unsigned short ByteStore     : 1;  // Flag to specify if the op is byte
+                                       // store op.
+    unsigned short ConflictPtr   : 1;  // Flag to specify that the pointer has
+                                       // a conflict.
+    unsigned short HardwareInst  : 1;  // Flag to specify that this instruction
+                                       // is a hardware instruction.
+    unsigned short ResourceID    : 10; // Flag to specify the resource ID for
+                                       // the op.
+    unsigned short isImage       : 1;  // Reserved for future use.
+#endif
+  } bits;
+  unsigned short u16all;
+} InstrResEnc;
+
+} // namespace AMDILAS
+
+// Enums corresponding to AMDIL condition codes for IL.  These
+// values must be kept in sync with the ones in the .td file.
+namespace AMDILCC {
+enum CondCodes {
+  // AMDIL specific condition codes. These correspond to the IL_CC_*
+  // in AMDILInstrInfo.td and must be kept in the same order.
+  IL_CC_D_EQ  =  0,   // DEQ instruction.
+  IL_CC_D_GE  =  1,   // DGE instruction.
+  IL_CC_D_LT  =  2,   // DLT instruction.
+  IL_CC_D_NE  =  3,   // DNE instruction.
+  IL_CC_F_EQ  =  4,   //  EQ instruction.
+  IL_CC_F_GE  =  5,   //  GE instruction.
+  IL_CC_F_LT  =  6,   //  LT instruction.
+  IL_CC_F_NE  =  7,   //  NE instruction.
+  IL_CC_I_EQ  =  8,   // IEQ instruction.
+  IL_CC_I_GE  =  9,   // IGE instruction.
+  IL_CC_I_LT  = 10,   // ILT instruction.
+  IL_CC_I_NE  = 11,   // INE instruction.
+  IL_CC_U_GE  = 12,   // UGE instruction.
+  IL_CC_U_LT  = 13,   // ULE instruction.
+  // Pseudo IL Comparison instructions here.
+  IL_CC_F_GT  = 14,   //  GT instruction.
+  IL_CC_U_GT  = 15,
+  IL_CC_I_GT  = 16,
+  IL_CC_D_GT  = 17,
+  IL_CC_F_LE  = 18,   //  LE instruction
+  IL_CC_U_LE  = 19,
+  IL_CC_I_LE  = 20,
+  IL_CC_D_LE  = 21,
+  IL_CC_F_UNE = 22,
+  IL_CC_F_UEQ = 23,
+  IL_CC_F_ULT = 24,
+  IL_CC_F_UGT = 25,
+  IL_CC_F_ULE = 26,
+  IL_CC_F_UGE = 27,
+  IL_CC_F_ONE = 28,
+  IL_CC_F_OEQ = 29,
+  IL_CC_F_OLT = 30,
+  IL_CC_F_OGT = 31,
+  IL_CC_F_OLE = 32,
+  IL_CC_F_OGE = 33,
+  IL_CC_D_UNE = 34,
+  IL_CC_D_UEQ = 35,
+  IL_CC_D_ULT = 36,
+  IL_CC_D_UGT = 37,
+  IL_CC_D_ULE = 38,
+  IL_CC_D_UGE = 39,
+  IL_CC_D_ONE = 40,
+  IL_CC_D_OEQ = 41,
+  IL_CC_D_OLT = 42,
+  IL_CC_D_OGT = 43,
+  IL_CC_D_OLE = 44,
+  IL_CC_D_OGE = 45,
+  IL_CC_U_EQ  = 46,
+  IL_CC_U_NE  = 47,
+  IL_CC_F_O   = 48,
+  IL_CC_D_O   = 49,
+  IL_CC_F_UO  = 50,
+  IL_CC_D_UO  = 51,
+  IL_CC_L_LE  = 52,
+  IL_CC_L_GE  = 53,
+  IL_CC_L_EQ  = 54,
+  IL_CC_L_NE  = 55,
+  IL_CC_L_LT  = 56,
+  IL_CC_L_GT  = 57,
+  IL_CC_UL_LE = 58,
+  IL_CC_UL_GE = 59,
+  IL_CC_UL_EQ = 60,
+  IL_CC_UL_NE = 61,
+  IL_CC_UL_LT = 62,
+  IL_CC_UL_GT = 63,
+  COND_ERROR  = 64
+};
+
+} // end namespace AMDILCC
+} // end namespace llvm
+#endif // AMDIL_H_
diff --git a/lib/Target/AMDGPU/AMDIL7XXDevice.cpp b/lib/Target/AMDGPU/AMDIL7XXDevice.cpp
new file mode 100644
index 0000000..3f2f821
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDIL7XXDevice.cpp
@@ -0,0 +1,128 @@
+//===-- AMDIL7XXDevice.cpp - Device Info for 7XX GPUs ---------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+#include "AMDIL7XXDevice.h"
+#include "AMDILDevice.h"
+
+using namespace llvm;
+
+AMDIL7XXDevice::AMDIL7XXDevice(AMDILSubtarget *ST) : AMDILDevice(ST)
+{
+  setCaps();
+  std::string name = mSTM->getDeviceName();
+  if (name == "rv710") {
+    mDeviceFlag = OCL_DEVICE_RV710;
+  } else if (name == "rv730") {
+    mDeviceFlag = OCL_DEVICE_RV730;
+  } else {
+    mDeviceFlag = OCL_DEVICE_RV770;
+  }
+}
+
+AMDIL7XXDevice::~AMDIL7XXDevice()
+{
+}
+
+void AMDIL7XXDevice::setCaps()
+{
+  mSWBits.set(AMDILDeviceInfo::LocalMem);
+}
+
+size_t AMDIL7XXDevice::getMaxLDSSize() const
+{
+  if (usesHardware(AMDILDeviceInfo::LocalMem)) {
+    return MAX_LDS_SIZE_700;
+  }
+  return 0;
+}
+
+size_t AMDIL7XXDevice::getWavefrontSize() const
+{
+  return AMDILDevice::HalfWavefrontSize;
+}
+
+uint32_t AMDIL7XXDevice::getGeneration() const
+{
+  return AMDILDeviceInfo::HD4XXX;
+}
+
+uint32_t AMDIL7XXDevice::getResourceID(uint32_t DeviceID) const
+{
+  switch (DeviceID) {
+  default:
+    assert(0 && "ID type passed in is unknown!");
+    break;
+  case GLOBAL_ID:
+  case CONSTANT_ID:
+  case RAW_UAV_ID:
+  case ARENA_UAV_ID:
+    break;
+  case LDS_ID:
+    if (usesHardware(AMDILDeviceInfo::LocalMem)) {
+      return DEFAULT_LDS_ID;
+    }
+    break;
+  case SCRATCH_ID:
+    if (usesHardware(AMDILDeviceInfo::PrivateMem)) {
+      return DEFAULT_SCRATCH_ID;
+    }
+    break;
+  case GDS_ID:
+    assert(0 && "GDS UAV ID is not supported on this chip");
+    if (usesHardware(AMDILDeviceInfo::RegionMem)) {
+      return DEFAULT_GDS_ID;
+    }
+    break;
+  };
+
+  return 0;
+}
+
+uint32_t AMDIL7XXDevice::getMaxNumUAVs() const
+{
+  return 1;
+}
+
+AMDIL770Device::AMDIL770Device(AMDILSubtarget *ST): AMDIL7XXDevice(ST)
+{
+  setCaps();
+}
+
+AMDIL770Device::~AMDIL770Device()
+{
+}
+
+void AMDIL770Device::setCaps()
+{
+  if (mSTM->isOverride(AMDILDeviceInfo::DoubleOps)) {
+    mSWBits.set(AMDILDeviceInfo::FMA);
+    mHWBits.set(AMDILDeviceInfo::DoubleOps);
+  }
+  mSWBits.set(AMDILDeviceInfo::BarrierDetect);
+  mHWBits.reset(AMDILDeviceInfo::LongOps);
+  mSWBits.set(AMDILDeviceInfo::LongOps);
+  mSWBits.set(AMDILDeviceInfo::LocalMem);
+}
+
+size_t AMDIL770Device::getWavefrontSize() const
+{
+  return AMDILDevice::WavefrontSize;
+}
+
+AMDIL710Device::AMDIL710Device(AMDILSubtarget *ST) : AMDIL7XXDevice(ST)
+{
+}
+
+AMDIL710Device::~AMDIL710Device()
+{
+}
+
+size_t AMDIL710Device::getWavefrontSize() const
+{
+  return AMDILDevice::QuarterWavefrontSize;
+}
diff --git a/lib/Target/AMDGPU/AMDIL7XXDevice.h b/lib/Target/AMDGPU/AMDIL7XXDevice.h
new file mode 100644
index 0000000..4d8d47a
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDIL7XXDevice.h
@@ -0,0 +1,71 @@
+//==-- AMDIL7XXDevice.h - Define 7XX Device Device for AMDIL ---*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// Interface for the subtarget data classes.
+//
+//===----------------------------------------------------------------------===//
+// This file will define the interface that each generation needs to
+// implement in order to correctly answer queries on the capabilities of the
+// specific hardware.
+//===----------------------------------------------------------------------===//
+#ifndef _AMDIL7XXDEVICEIMPL_H_
+#define _AMDIL7XXDEVICEIMPL_H_
+#include "AMDILDevice.h"
+#include "AMDILSubtarget.h"
+
+namespace llvm {
+class AMDILSubtarget;
+
+//===----------------------------------------------------------------------===//
+// 7XX generation of devices and their respective sub classes
+//===----------------------------------------------------------------------===//
+
+// The AMDIL7XXDevice class represents the generic 7XX device. All 7XX
+// devices are derived from this class. The AMDIL7XX device will only
+// support the minimal features that are required to be considered OpenCL 1.0
+// compliant and nothing more.
+class AMDIL7XXDevice : public AMDILDevice {
+public:
+  AMDIL7XXDevice(AMDILSubtarget *ST);
+  virtual ~AMDIL7XXDevice();
+  virtual size_t getMaxLDSSize() const;
+  virtual size_t getWavefrontSize() const;
+  virtual uint32_t getGeneration() const;
+  virtual uint32_t getResourceID(uint32_t DeviceID) const;
+  virtual uint32_t getMaxNumUAVs() const;
+
+protected:
+  virtual void setCaps();
+}; // AMDIL7XXDevice
+
+// The AMDIL770Device class represents the RV770 chip and it's
+// derivative cards. The difference between this device and the base
+// class is this device device adds support for double precision
+// and has a larger wavefront size.
+class AMDIL770Device : public AMDIL7XXDevice {
+public:
+  AMDIL770Device(AMDILSubtarget *ST);
+  virtual ~AMDIL770Device();
+  virtual size_t getWavefrontSize() const;
+private:
+  virtual void setCaps();
+}; // AMDIL770Device
+
+// The AMDIL710Device class derives from the 7XX base class, but this
+// class is a smaller derivative, so we need to overload some of the
+// functions in order to correctly specify this information.
+class AMDIL710Device : public AMDIL7XXDevice {
+public:
+  AMDIL710Device(AMDILSubtarget *ST);
+  virtual ~AMDIL710Device();
+  virtual size_t getWavefrontSize() const;
+}; // AMDIL710Device
+
+} // namespace llvm
+#endif // _AMDILDEVICEIMPL_H_
diff --git a/lib/Target/AMDGPU/AMDILAlgorithms.tpp b/lib/Target/AMDGPU/AMDILAlgorithms.tpp
new file mode 100644
index 0000000..058475f
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILAlgorithms.tpp
@@ -0,0 +1,93 @@
+//===------ AMDILAlgorithms.tpp - AMDIL Template Algorithms Header --------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides templates algorithms that extend the STL algorithms, but
+// are useful for the AMDIL backend
+//
+//===----------------------------------------------------------------------===//
+
+// A template function that loops through the iterators and passes the second
+// argument along with each iterator to the function. If the function returns
+// true, then the current iterator is invalidated and it moves back, before
+// moving forward to the next iterator, otherwise it moves forward without
+// issue. This is based on the for_each STL function, but allows a reference to
+// the second argument
+template<class InputIterator, class Function, typename Arg>
+Function binaryForEach(InputIterator First, InputIterator Last, Function F,
+                       Arg &Second)
+{
+  for ( ; First!=Last; ++First ) {
+    F(*First, Second);
+  }
+  return F;
+}
+
+template<class InputIterator, class Function, typename Arg>
+Function safeBinaryForEach(InputIterator First, InputIterator Last, Function F,
+                           Arg &Second)
+{
+  for ( ; First!=Last; ++First ) {
+    if (F(*First, Second)) {
+      --First;
+    }
+  }
+  return F;
+}
+
+// A template function that has two levels of looping before calling the
+// function with the passed in argument. See binaryForEach for further
+// explanation
+template<class InputIterator, class Function, typename Arg>
+Function binaryNestedForEach(InputIterator First, InputIterator Last,
+                             Function F, Arg &Second)
+{
+  for ( ; First != Last; ++First) {
+    binaryForEach(First->begin(), First->end(), F, Second);
+  }
+  return F;
+}
+template<class InputIterator, class Function, typename Arg>
+Function safeBinaryNestedForEach(InputIterator First, InputIterator Last,
+                                 Function F, Arg &Second)
+{
+  for ( ; First != Last; ++First) {
+    safeBinaryForEach(First->begin(), First->end(), F, Second);
+  }
+  return F;
+}
+
+// Unlike the STL, a pointer to the iterator itself is passed in with the 'safe'
+// versions of these functions This allows the function to handle situations
+// such as invalidated iterators
+template<class InputIterator, class Function>
+Function safeForEach(InputIterator First, InputIterator Last, Function F)
+{
+  for ( ; First!=Last; ++First )  F(&First)
+    ; // Do nothing.
+  return F;
+}
+
+// A template function that has two levels of looping before calling the
+// function with a pointer to the current iterator. See binaryForEach for
+// further explanation
+template<class InputIterator, class SecondIterator, class Function>
+Function safeNestedForEach(InputIterator First, InputIterator Last,
+                              SecondIterator S, Function F)
+{
+  for ( ; First != Last; ++First) {
+    SecondIterator sf, sl;
+    for (sf = First->begin(), sl = First->end();
+         sf != sl; )  {
+      if (!F(&sf)) {
+        ++sf;
+      } 
+    }
+  }
+  return F;
+}
diff --git a/lib/Target/AMDGPU/AMDILBase.td b/lib/Target/AMDGPU/AMDILBase.td
new file mode 100644
index 0000000..31ebed3
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILBase.td
@@ -0,0 +1,109 @@
+//===- AMDIL.td - AMDIL Target Machine -------------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Target-independent interfaces which we are implementing
+//===----------------------------------------------------------------------===//
+
+include "llvm/Target/Target.td"
+
+//===----------------------------------------------------------------------===//
+// AMDIL Subtarget features.
+//===----------------------------------------------------------------------===//
+def FeatureFP64     : SubtargetFeature<"fp64",
+        "CapsOverride[AMDILDeviceInfo::DoubleOps]",
+        "true",
+        "Enable 64bit double precision operations">;
+def FeatureByteAddress    : SubtargetFeature<"byte_addressable_store",
+        "CapsOverride[AMDILDeviceInfo::ByteStores]",
+        "true",
+        "Enable byte addressable stores">;
+def FeatureBarrierDetect : SubtargetFeature<"barrier_detect",
+        "CapsOverride[AMDILDeviceInfo::BarrierDetect]",
+        "true",
+        "Enable duplicate barrier detection(HD5XXX or later).">;
+def FeatureImages : SubtargetFeature<"images",
+        "CapsOverride[AMDILDeviceInfo::Images]",
+        "true",
+        "Enable image functions">;
+def FeatureMultiUAV : SubtargetFeature<"multi_uav",
+        "CapsOverride[AMDILDeviceInfo::MultiUAV]",
+        "true",
+        "Generate multiple UAV code(HD5XXX family or later)">;
+def FeatureMacroDB : SubtargetFeature<"macrodb",
+        "CapsOverride[AMDILDeviceInfo::MacroDB]",
+        "true",
+        "Use internal macrodb, instead of macrodb in driver">;
+def FeatureNoAlias : SubtargetFeature<"noalias",
+        "CapsOverride[AMDILDeviceInfo::NoAlias]",
+        "true",
+        "assert that all kernel argument pointers are not aliased">;
+def FeatureNoInline : SubtargetFeature<"no-inline",
+        "CapsOverride[AMDILDeviceInfo::NoInline]",
+        "true",
+        "specify whether to not inline functions">;
+
+def Feature64BitPtr : SubtargetFeature<"64BitPtr",
+        "mIs64bit",
+        "false",
+        "Specify if 64bit addressing should be used.">;
+
+def Feature32on64BitPtr : SubtargetFeature<"64on32BitPtr",
+        "mIs32on64bit",
+        "false",
+        "Specify if 64bit sized pointers with 32bit addressing should be used.">;
+def FeatureDebug : SubtargetFeature<"debug",
+        "CapsOverride[AMDILDeviceInfo::Debug]",
+        "true",
+        "Debug mode is enabled, so disable hardware accelerated address spaces.">;
+def FeatureDumpCode : SubtargetFeature <"DumpCode",
+        "mDumpCode",
+        "true",
+        "Dump MachineInstrs in the CodeEmitter">;
+
+
+//===----------------------------------------------------------------------===//
+// Register File, Calling Conv, Instruction Descriptions
+//===----------------------------------------------------------------------===//
+
+
+include "AMDILRegisterInfo.td"
+include "AMDILCallingConv.td"
+include "AMDILInstrInfo.td"
+
+def AMDILInstrInfo : InstrInfo {}
+
+//===----------------------------------------------------------------------===//
+// AMDIL processors supported.
+//===----------------------------------------------------------------------===//
+//include "Processors.td"
+
+//===----------------------------------------------------------------------===//
+// Declare the target which we are implementing
+//===----------------------------------------------------------------------===//
+def AMDILAsmWriter : AsmWriter {
+    string AsmWriterClassName = "AsmPrinter";
+    int Variant = 0;
+}
+
+def AMDILAsmParser : AsmParser {
+    string AsmParserClassName = "AsmParser";
+    int Variant = 0;
+
+    string CommentDelimiter = ";";
+
+    string RegisterPrefix = "r";
+
+}
+
+
+def AMDIL : Target {
+  // Pull in Instruction Info:
+  let InstructionSet = AMDILInstrInfo;
+  let AssemblyWriters = [AMDILAsmWriter];
+  let AssemblyParsers = [AMDILAsmParser];
+}
diff --git a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
new file mode 100644
index 0000000..ba7d246
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
@@ -0,0 +1,3236 @@
+//===-- AMDILCFGStructurizer.cpp - CFG Structurizer -----------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+
+#define DEBUGME 0
+#define DEBUG_TYPE "structcfg"
+
+#include "AMDIL.h"
+#include "AMDILInstrInfo.h"
+#include "AMDILRegisterInfo.h"
+#include "AMDILUtilityFunctions.h"
+#include "llvm/ADT/SCCIterator.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/DominatorInternals.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+#define FirstNonDebugInstr(A) A->begin()
+using namespace llvm;
+
+// TODO: move-begin.
+
+//===----------------------------------------------------------------------===//
+//
+// Statistics for CFGStructurizer.
+//
+//===----------------------------------------------------------------------===//
+
+STATISTIC(numSerialPatternMatch,    "CFGStructurizer number of serial pattern "
+    "matched");
+STATISTIC(numIfPatternMatch,        "CFGStructurizer number of if pattern "
+    "matched");
+STATISTIC(numLoopbreakPatternMatch, "CFGStructurizer number of loop-break "
+    "pattern matched");
+STATISTIC(numLoopcontPatternMatch,  "CFGStructurizer number of loop-continue "
+    "pattern matched");
+STATISTIC(numLoopPatternMatch,      "CFGStructurizer number of loop pattern "
+    "matched");
+STATISTIC(numClonedBlock,           "CFGStructurizer cloned blocks");
+STATISTIC(numClonedInstr,           "CFGStructurizer cloned instructions");
+
+//===----------------------------------------------------------------------===//
+//
+// Miscellaneous utility for CFGStructurizer.
+//
+//===----------------------------------------------------------------------===//
+namespace llvmCFGStruct
+{
+#define SHOWNEWINSTR(i) \
+  if (DEBUGME) errs() << "New instr: " << *i << "\n"
+
+#define SHOWNEWBLK(b, msg) \
+if (DEBUGME) { \
+  errs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
+  errs() << "\n"; \
+}
+
+#define SHOWBLK_DETAIL(b, msg) \
+if (DEBUGME) { \
+  if (b) { \
+  errs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
+  b->print(errs()); \
+  errs() << "\n"; \
+  } \
+}
+
+#define INVALIDSCCNUM -1
+#define INVALIDREGNUM 0
+
+template<class LoopinfoT>
+void PrintLoopinfo(const LoopinfoT &LoopInfo, llvm::raw_ostream &OS) {
+  for (typename LoopinfoT::iterator iter = LoopInfo.begin(),
+       iterEnd = LoopInfo.end();
+       iter != iterEnd; ++iter) {
+    (*iter)->print(OS, 0);
+  }
+}
+
+template<class NodeT>
+void ReverseVector(SmallVector<NodeT *, DEFAULT_VEC_SLOTS> &Src) {
+  size_t sz = Src.size();
+  for (size_t i = 0; i < sz/2; ++i) {
+    NodeT *t = Src[i];
+    Src[i] = Src[sz - i - 1];
+    Src[sz - i - 1] = t;
+  }
+}
+
+} //end namespace llvmCFGStruct
+
+
+//===----------------------------------------------------------------------===//
+//
+// MachinePostDominatorTree
+//
+//===----------------------------------------------------------------------===//
+
+namespace llvm {
+
+/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used
+/// to compute the a post-dominator tree.
+///
+struct MachinePostDominatorTree : public MachineFunctionPass {
+  static char ID; // Pass identification, replacement for typeid
+  DominatorTreeBase<MachineBasicBlock> *DT;
+  MachinePostDominatorTree() : MachineFunctionPass(ID)
+  {
+    DT = new DominatorTreeBase<MachineBasicBlock>(true); //true indicate
+    // postdominator
+  }
+
+  ~MachinePostDominatorTree();
+
+  virtual bool runOnMachineFunction(MachineFunction &MF);
+
+  virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+    AU.setPreservesAll();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  inline const std::vector<MachineBasicBlock *> &getRoots() const {
+    return DT->getRoots();
+  }
+
+  inline MachineDomTreeNode *getRootNode() const {
+    return DT->getRootNode();
+  }
+
+  inline MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
+    return DT->getNode(BB);
+  }
+
+  inline MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
+    return DT->getNode(BB);
+  }
+
+  inline bool dominates(MachineDomTreeNode *A, MachineDomTreeNode *B) const {
+    return DT->dominates(A, B);
+  }
+
+  inline bool dominates(MachineBasicBlock *A, MachineBasicBlock *B) const {
+    return DT->dominates(A, B);
+  }
+
+  inline bool
+  properlyDominates(const MachineDomTreeNode *A, MachineDomTreeNode *B) const {
+    return DT->properlyDominates(A, B);
+  }
+
+  inline bool
+  properlyDominates(MachineBasicBlock *A, MachineBasicBlock *B) const {
+    return DT->properlyDominates(A, B);
+  }
+
+  inline MachineBasicBlock *
+  findNearestCommonDominator(MachineBasicBlock *A, MachineBasicBlock *B) {
+    return DT->findNearestCommonDominator(A, B);
+  }
+
+  virtual void print(llvm::raw_ostream &OS, const Module *M = 0) const {
+    DT->print(OS);
+  }
+};
+} //end of namespace llvm
+
+char MachinePostDominatorTree::ID = 0;
+static RegisterPass<MachinePostDominatorTree>
+machinePostDominatorTreePass("machinepostdomtree",
+                             "MachinePostDominator Tree Construction",
+                             true, true);
+
+//const PassInfo *const llvm::MachinePostDominatorsID
+//= &machinePostDominatorTreePass;
+
+bool MachinePostDominatorTree::runOnMachineFunction(MachineFunction &F) {
+  DT->recalculate(F);
+  //DEBUG(DT->dump());
+  return false;
+}
+
+MachinePostDominatorTree::~MachinePostDominatorTree() {
+  delete DT;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// supporting data structure for CFGStructurizer
+//
+//===----------------------------------------------------------------------===//
+
+namespace llvmCFGStruct
+{
+template<class PassT>
+struct CFGStructTraits {
+};
+
+template <class InstrT>
+class BlockInformation {
+public:
+  bool isRetired;
+  int  sccNum;
+  //SmallVector<InstrT*, DEFAULT_VEC_SLOTS> succInstr;
+  //Instructions defining the corresponding successor.
+  BlockInformation() : isRetired(false), sccNum(INVALIDSCCNUM) {}
+};
+
+template <class BlockT, class InstrT, class RegiT>
+class LandInformation {
+public:
+  BlockT *landBlk;
+  std::set<RegiT> breakInitRegs;  //Registers that need to "reg = 0", before
+                                  //WHILELOOP(thisloop) init before entering
+                                  //thisloop.
+  std::set<RegiT> contInitRegs;   //Registers that need to "reg = 0", after
+                                  //WHILELOOP(thisloop) init after entering
+                                  //thisloop.
+  std::set<RegiT> endbranchInitRegs; //Init before entering this loop, at loop
+                                     //land block, branch cond on this reg.
+  std::set<RegiT> breakOnRegs;       //registers that need to "if (reg) break
+                                     //endif" after ENDLOOP(thisloop) break
+                                     //outerLoopOf(thisLoop).
+  std::set<RegiT> contOnRegs;       //registers that need to "if (reg) continue
+                                    //endif" after ENDLOOP(thisloop) continue on
+                                    //outerLoopOf(thisLoop).
+  LandInformation() : landBlk(NULL) {}
+};
+
+} //end of namespace llvmCFGStruct
+
+//===----------------------------------------------------------------------===//
+//
+// CFGStructurizer
+//
+//===----------------------------------------------------------------------===//
+
+namespace llvmCFGStruct
+{
+// bixia TODO: port it to BasicBlock, not just MachineBasicBlock.
+template<class PassT>
+class  CFGStructurizer
+{
+public:
+  typedef enum {
+    Not_SinglePath = 0,
+    SinglePath_InPath = 1,
+    SinglePath_NotInPath = 2
+  } PathToKind;
+
+public:
+  typedef typename PassT::InstructionType         InstrT;
+  typedef typename PassT::FunctionType            FuncT;
+  typedef typename PassT::DominatortreeType       DomTreeT;
+  typedef typename PassT::PostDominatortreeType   PostDomTreeT;
+  typedef typename PassT::DomTreeNodeType         DomTreeNodeT;
+  typedef typename PassT::LoopinfoType            LoopInfoT;
+
+  typedef GraphTraits<FuncT *>                    FuncGTraits;
+  //typedef FuncGTraits::nodes_iterator BlockIterator;
+  typedef typename FuncT::iterator                BlockIterator;
+
+  typedef typename FuncGTraits::NodeType          BlockT;
+  typedef GraphTraits<BlockT *>                   BlockGTraits;
+  typedef GraphTraits<Inverse<BlockT *> >         InvBlockGTraits;
+  //typedef BlockGTraits::succ_iterator InstructionIterator;
+  typedef typename BlockT::iterator               InstrIterator;
+
+  typedef CFGStructTraits<PassT>                  CFGTraits;
+  typedef BlockInformation<InstrT>                BlockInfo;
+  typedef std::map<BlockT *, BlockInfo *>         BlockInfoMap;
+
+  typedef int                                     RegiT;
+  typedef typename PassT::LoopType                LoopT;
+  typedef LandInformation<BlockT, InstrT, RegiT>  LoopLandInfo;
+        typedef std::map<LoopT *, LoopLandInfo *> LoopLandInfoMap;
+        //landing info for loop break
+  typedef SmallVector<BlockT *, 32>               BlockTSmallerVector;
+
+public:
+  CFGStructurizer();
+  ~CFGStructurizer();
+
+  /// Perform the CFG structurization
+  bool run(FuncT &Func, PassT &Pass, const AMDILRegisterInfo *tri);
+
+  /// Perform the CFG preparation
+  bool prepare(FuncT &Func, PassT &Pass, const AMDILRegisterInfo *tri);
+
+private:
+  void   orderBlocks();
+  void   printOrderedBlocks(llvm::raw_ostream &OS);
+  int patternMatch(BlockT *CurBlock);
+  int patternMatchGroup(BlockT *CurBlock);
+
+  int serialPatternMatch(BlockT *CurBlock);
+  int ifPatternMatch(BlockT *CurBlock);
+  int switchPatternMatch(BlockT *CurBlock);
+  int loopendPatternMatch(BlockT *CurBlock);
+  int loopPatternMatch(BlockT *CurBlock);
+
+  int loopbreakPatternMatch(LoopT *LoopRep, BlockT *LoopHeader);
+  int loopcontPatternMatch(LoopT *LoopRep, BlockT *LoopHeader);
+  //int loopWithoutBreak(BlockT *);
+
+  void handleLoopbreak (BlockT *ExitingBlock, LoopT *ExitingLoop,
+                        BlockT *ExitBlock, LoopT *exitLoop, BlockT *landBlock);
+  void handleLoopcontBlock(BlockT *ContingBlock, LoopT *contingLoop,
+                           BlockT *ContBlock, LoopT *contLoop);
+  bool isSameloopDetachedContbreak(BlockT *Src1Block, BlockT *Src2Block);
+  int handleJumpintoIf(BlockT *HeadBlock, BlockT *TrueBlock,
+                       BlockT *FalseBlock);
+  int handleJumpintoIfImp(BlockT *HeadBlock, BlockT *TrueBlock,
+                          BlockT *FalseBlock);
+  int improveSimpleJumpintoIf(BlockT *HeadBlock, BlockT *TrueBlock,
+                              BlockT *FalseBlock, BlockT **LandBlockPtr);
+  void showImproveSimpleJumpintoIf(BlockT *HeadBlock, BlockT *TrueBlock,
+                                   BlockT *FalseBlock, BlockT *LandBlock,
+                                   bool Detail = false);
+  PathToKind singlePathTo(BlockT *SrcBlock, BlockT *DstBlock,
+                          bool AllowSideEntry = true);
+  BlockT *singlePathEnd(BlockT *srcBlock, BlockT *DstBlock,
+                        bool AllowSideEntry = true);
+  int cloneOnSideEntryTo(BlockT *PreBlock, BlockT *SrcBlock, BlockT *DstBlock);
+  void mergeSerialBlock(BlockT *DstBlock, BlockT *srcBlock);
+
+  void mergeIfthenelseBlock(InstrT *BranchInstr, BlockT *CurBlock,
+                            BlockT *TrueBlock, BlockT *FalseBlock,
+                            BlockT *LandBlock);
+  void mergeLooplandBlock(BlockT *DstBlock, LoopLandInfo *LoopLand);
+  void mergeLoopbreakBlock(BlockT *ExitingBlock, BlockT *ExitBlock,
+                           BlockT *ExitLandBlock, RegiT SetReg);
+  void settleLoopcontBlock(BlockT *ContingBlock, BlockT *ContBlock,
+                           RegiT SetReg);
+  BlockT *relocateLoopcontBlock(LoopT *ParentLoopRep, LoopT *LoopRep,
+                                std::set<BlockT*> &ExitBlockSet,
+                                BlockT *ExitLandBlk);
+  BlockT *addLoopEndbranchBlock(LoopT *LoopRep,
+                                BlockTSmallerVector &ExitingBlocks,
+                                BlockTSmallerVector &ExitBlocks);
+  BlockT *normalizeInfiniteLoopExit(LoopT *LoopRep);
+  void removeUnconditionalBranch(BlockT *SrcBlock);
+  void removeRedundantConditionalBranch(BlockT *SrcBlock);
+  void addDummyExitBlock(SmallVector<BlockT *, DEFAULT_VEC_SLOTS> &RetBlocks);
+
+  void removeSuccessor(BlockT *SrcBlock);
+  BlockT *cloneBlockForPredecessor(BlockT *CurBlock, BlockT *PredBlock);
+  BlockT *exitingBlock2ExitBlock (LoopT *LoopRep, BlockT *exitingBlock);
+
+  void migrateInstruction(BlockT *SrcBlock, BlockT *DstBlock,
+                          InstrIterator InsertPos);
+
+  void recordSccnum(BlockT *SrcBlock, int SCCNum);
+  int getSCCNum(BlockT *srcBlk);
+
+  void retireBlock(BlockT *DstBlock, BlockT *SrcBlock);
+  bool isRetiredBlock(BlockT *SrcBlock);
+  bool isActiveLoophead(BlockT *CurBlock);
+  bool needMigrateBlock(BlockT *Block);
+
+  BlockT *recordLoopLandBlock(LoopT *LoopRep, BlockT *LandBlock,
+                              BlockTSmallerVector &exitBlocks,
+                              std::set<BlockT*> &ExitBlockSet);
+  void setLoopLandBlock(LoopT *LoopRep, BlockT *Block = NULL);
+  BlockT *getLoopLandBlock(LoopT *LoopRep);
+  LoopLandInfo *getLoopLandInfo(LoopT *LoopRep);
+
+  void addLoopBreakOnReg(LoopT *LoopRep, RegiT RegNum);
+  void addLoopContOnReg(LoopT *LoopRep, RegiT RegNum);
+  void addLoopBreakInitReg(LoopT *LoopRep, RegiT RegNum);
+  void addLoopContInitReg(LoopT *LoopRep, RegiT RegNum);
+  void addLoopEndbranchInitReg(LoopT *LoopRep, RegiT RegNum);
+
+  bool hasBackEdge(BlockT *curBlock);
+  unsigned getLoopDepth  (LoopT *LoopRep);
+  int countActiveBlock(
+    typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator IterStart,
+    typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator IterEnd);
+    BlockT *findNearestCommonPostDom(std::set<BlockT *>&);
+  BlockT *findNearestCommonPostDom(BlockT *Block1, BlockT *Block2);
+
+private:
+  DomTreeT *domTree;
+  PostDomTreeT *postDomTree;
+  LoopInfoT *loopInfo;
+  PassT *passRep;
+  FuncT *funcRep;
+
+  BlockInfoMap blockInfoMap;
+  LoopLandInfoMap loopLandInfoMap;
+  SmallVector<BlockT *, DEFAULT_VEC_SLOTS> orderedBlks;
+  const AMDILRegisterInfo *TRI;
+
+};  //template class CFGStructurizer
+
+template<class PassT> CFGStructurizer<PassT>::CFGStructurizer()
+  : domTree(NULL), postDomTree(NULL), loopInfo(NULL) {
+}
+
+template<class PassT> CFGStructurizer<PassT>::~CFGStructurizer() {
+  for (typename BlockInfoMap::iterator I = blockInfoMap.begin(),
+       E = blockInfoMap.end(); I != E; ++I) {
+    delete I->second;
+  }
+}
+
+template<class PassT>
+bool CFGStructurizer<PassT>::prepare(FuncT &func, PassT &pass,
+                                     const AMDILRegisterInfo * tri) {
+  passRep = &pass;
+  funcRep = &func;
+  TRI = tri;
+
+  bool changed = false;
+  //func.RenumberBlocks();
+
+  //to do, if not reducible flow graph, make it so ???
+
+  if (DEBUGME) {
+        errs() << "AMDILCFGStructurizer::prepare\n";
+    //func.viewCFG();
+    //func.viewCFGOnly();
+    //func.dump();
+  }
+
+  //FIXME: gcc complains on this.
+  //domTree = &pass.getAnalysis<DomTreeT>();
+      //domTree = CFGTraits::getDominatorTree(pass);
+      //if (DEBUGME) {
+      //    domTree->print(errs());
+    //}
+
+  //FIXME: gcc complains on this.
+  //domTree = &pass.getAnalysis<DomTreeT>();
+      //postDomTree = CFGTraits::getPostDominatorTree(pass);
+      //if (DEBUGME) {
+      //   postDomTree->print(errs());
+    //}
+
+  //FIXME: gcc complains on this.
+  //loopInfo = &pass.getAnalysis<LoopInfoT>();
+  loopInfo = CFGTraits::getLoopInfo(pass);
+  if (DEBUGME) {
+    errs() << "LoopInfo:\n";
+    PrintLoopinfo(*loopInfo, errs());
+  }
+
+  orderBlocks();
+  if (DEBUGME) {
+    errs() << "Ordered blocks:\n";
+    printOrderedBlocks(errs());
+  }
+
+  SmallVector<BlockT *, DEFAULT_VEC_SLOTS> retBlks;
+
+  for (typename LoopInfoT::iterator iter = loopInfo->begin(),
+       iterEnd = loopInfo->end();
+       iter != iterEnd; ++iter) {
+    LoopT* loopRep = (*iter);
+    BlockTSmallerVector exitingBlks;
+    loopRep->getExitingBlocks(exitingBlks);
+    
+    if (exitingBlks.size() == 0) {
+      BlockT* dummyExitBlk = normalizeInfiniteLoopExit(loopRep);
+      if (dummyExitBlk != NULL)
+        retBlks.push_back(dummyExitBlk);
+    }
+  }
+
+  // Remove unconditional branch instr.
+  // Add dummy exit block iff there are multiple returns.
+
+  for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
+       iterBlk = orderedBlks.begin(), iterEndBlk = orderedBlks.end();
+       iterBlk != iterEndBlk;
+       ++iterBlk) {
+    BlockT *curBlk = *iterBlk;
+    removeUnconditionalBranch(curBlk);
+    removeRedundantConditionalBranch(curBlk);
+    if (CFGTraits::isReturnBlock(curBlk)) {
+      retBlks.push_back(curBlk);
+    }
+    assert(curBlk->succ_size() <= 2);
+    //assert(curBlk->size() > 0);
+    //removeEmptyBlock(curBlk) ??
+  } //for
+
+  if (retBlks.size() >= 2) {
+    addDummyExitBlock(retBlks);
+    changed = true;
+  }
+
+  return changed;
+} //CFGStructurizer::prepare
+
+template<class PassT>
+bool CFGStructurizer<PassT>::run(FuncT &func, PassT &pass,
+    const AMDILRegisterInfo * tri) {
+  passRep = &pass;
+  funcRep = &func;
+  TRI = tri;
+
+  //func.RenumberBlocks();
+
+  //Assume reducible CFG...
+  if (DEBUGME) {
+    errs() << "AMDILCFGStructurizer::run\n";
+    //errs() << func.getFunction()->getNameStr() << "\n";
+    func.viewCFG();
+    //func.viewCFGOnly();
+    //func.dump();
+  }
+
+#if 1
+  //FIXME: gcc complains on this.
+  //domTree = &pass.getAnalysis<DomTreeT>();
+  domTree = CFGTraits::getDominatorTree(pass);
+  if (DEBUGME) {
+    domTree->print(errs(), (const llvm::Module*)0);
+  }
+#endif
+
+  //FIXME: gcc complains on this.
+  //domTree = &pass.getAnalysis<DomTreeT>();
+  postDomTree = CFGTraits::getPostDominatorTree(pass);
+  if (DEBUGME) {
+    postDomTree->print(errs());
+  }
+
+  //FIXME: gcc complains on this.
+  //loopInfo = &pass.getAnalysis<LoopInfoT>();
+  loopInfo = CFGTraits::getLoopInfo(pass);
+  if (DEBUGME) {
+    errs() << "LoopInfo:\n";
+    PrintLoopinfo(*loopInfo, errs());
+  }
+
+  orderBlocks();
+//#define STRESSTEST
+#ifdef STRESSTEST
+  //Use the worse block ordering to test the algorithm.
+  ReverseVector(orderedBlks);
+#endif
+
+  if (DEBUGME) {
+    errs() << "Ordered blocks:\n";
+    printOrderedBlocks(errs());
+  }
+  int numIter = 0;
+  bool finish = false;
+  BlockT *curBlk;
+  bool makeProgress = false;
+  int numRemainedBlk = countActiveBlock(orderedBlks.begin(),
+                                        orderedBlks.end());
+
+  do {
+    ++numIter;
+    if (DEBUGME) {
+      errs() << "numIter = " << numIter
+             << ", numRemaintedBlk = " << numRemainedBlk << "\n";
+    }
+
+    typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
+      iterBlk = orderedBlks.begin();
+    typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
+      iterBlkEnd = orderedBlks.end();
+
+    typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
+      sccBeginIter = iterBlk;
+    BlockT *sccBeginBlk = NULL;
+    int sccNumBlk = 0;  // The number of active blocks, init to a
+                        // maximum possible number.
+    int sccNumIter;     // Number of iteration in this SCC.
+
+    while (iterBlk != iterBlkEnd) {
+      curBlk = *iterBlk;
+
+      if (sccBeginBlk == NULL) {
+        sccBeginIter = iterBlk;
+        sccBeginBlk = curBlk;
+        sccNumIter = 0;
+        sccNumBlk = numRemainedBlk; // Init to maximum possible number.
+        if (DEBUGME) {
+              errs() << "start processing SCC" << getSCCNum(sccBeginBlk);
+              errs() << "\n";
+        }
+      }
+
+      if (!isRetiredBlock(curBlk)) {
+        patternMatch(curBlk);
+      }
+
+      ++iterBlk;
+
+      bool contNextScc = true;
+      if (iterBlk == iterBlkEnd
+          || getSCCNum(sccBeginBlk) != getSCCNum(*iterBlk)) {
+        // Just finish one scc.
+        ++sccNumIter;
+        int sccRemainedNumBlk = countActiveBlock(sccBeginIter, iterBlk);
+        if (sccRemainedNumBlk != 1 && sccRemainedNumBlk >= sccNumBlk) {
+          if (DEBUGME) {
+            errs() << "Can't reduce SCC " << getSCCNum(curBlk)
+                   << ", sccNumIter = " << sccNumIter;
+            errs() << "doesn't make any progress\n";
+          }
+          contNextScc = true;
+        } else if (sccRemainedNumBlk != 1 && sccRemainedNumBlk < sccNumBlk) {
+          sccNumBlk = sccRemainedNumBlk;
+          iterBlk = sccBeginIter;
+          contNextScc = false;
+          if (DEBUGME) {
+            errs() << "repeat processing SCC" << getSCCNum(curBlk)
+                   << "sccNumIter = " << sccNumIter << "\n";
+            func.viewCFG();
+            //func.viewCFGOnly();
+          }
+        } else {
+          // Finish the current scc.
+          contNextScc = true;
+        }
+      } else {
+        // Continue on next component in the current scc.
+        contNextScc = false;
+      }
+
+      if (contNextScc) {
+        sccBeginBlk = NULL;
+      }
+    } //while, "one iteration" over the function.
+
+    BlockT *entryBlk = FuncGTraits::nodes_begin(&func);
+    if (entryBlk->succ_size() == 0) {
+      finish = true;
+      if (DEBUGME) {
+        errs() << "Reduce to one block\n";
+      }
+    } else {
+      int newnumRemainedBlk
+        = countActiveBlock(orderedBlks.begin(), orderedBlks.end());
+      // consider cloned blocks ??
+      if (newnumRemainedBlk == 1 || newnumRemainedBlk < numRemainedBlk) {
+        makeProgress = true;
+        numRemainedBlk = newnumRemainedBlk;
+      } else {
+        makeProgress = false;
+        if (DEBUGME) {
+          errs() << "No progress\n";
+        }
+      }
+    }
+  } while (!finish && makeProgress);
+
+  // Misc wrap up to maintain the consistency of the Function representation.
+  CFGTraits::wrapup(FuncGTraits::nodes_begin(&func));
+
+  // Detach retired Block, release memory.
+  for (typename BlockInfoMap::iterator iterMap = blockInfoMap.begin(),
+       iterEndMap = blockInfoMap.end(); iterMap != iterEndMap; ++iterMap) {
+    if ((*iterMap).second && (*iterMap).second->isRetired) {
+      assert(((*iterMap).first)->getNumber() != -1);
+      if (DEBUGME) {
+        errs() << "Erase BB" << ((*iterMap).first)->getNumber() << "\n";
+      }
+      (*iterMap).first->eraseFromParent();  //Remove from the parent Function.
+    }
+    delete (*iterMap).second;
+  }
+  blockInfoMap.clear();
+
+  // clear loopLandInfoMap
+  for (typename LoopLandInfoMap::iterator iterMap = loopLandInfoMap.begin(),
+       iterEndMap = loopLandInfoMap.end(); iterMap != iterEndMap; ++iterMap) {
+    delete (*iterMap).second;
+  }
+  loopLandInfoMap.clear();
+
+  if (DEBUGME) {
+    func.viewCFG();
+    //func.dump();
+  }
+
+  if (!finish) {
+    assert(!"IRREDUCIBL_CF");
+  }
+
+  return true;
+} //CFGStructurizer::run
+
+/// Print the ordered Blocks.
+///
+template<class PassT>
+void CFGStructurizer<PassT>::printOrderedBlocks(llvm::raw_ostream &os) {
+  size_t i = 0;
+  for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
+      iterBlk = orderedBlks.begin(), iterBlkEnd = orderedBlks.end();
+       iterBlk != iterBlkEnd;
+       ++iterBlk, ++i) {
+    os << "BB" << (*iterBlk)->getNumber();
+    os << "(" << getSCCNum(*iterBlk) << "," << (*iterBlk)->size() << ")";
+    if (i != 0 && i % 10 == 0) {
+      os << "\n";
+    } else {
+      os << " ";
+    }
+  }
+} //printOrderedBlocks
+
+/// Compute the reversed DFS post order of Blocks
+///
+template<class PassT> void CFGStructurizer<PassT>::orderBlocks() {
+  int sccNum = 0;
+  BlockT *bb;
+  for (scc_iterator<FuncT *> sccIter = scc_begin(funcRep),
+       sccEnd = scc_end(funcRep); sccIter != sccEnd; ++sccIter, ++sccNum) {
+    std::vector<BlockT *> &sccNext = *sccIter;
+    for (typename std::vector<BlockT *>::const_iterator
+         blockIter = sccNext.begin(), blockEnd = sccNext.end();
+         blockIter != blockEnd; ++blockIter) {
+      bb = *blockIter;
+      orderedBlks.push_back(bb);
+      recordSccnum(bb, sccNum);
+    }
+  }
+
+  //walk through all the block in func to check for unreachable
+  for (BlockIterator blockIter1 = FuncGTraits::nodes_begin(funcRep),
+       blockEnd1 = FuncGTraits::nodes_end(funcRep);
+       blockIter1 != blockEnd1; ++blockIter1) {
+    BlockT *bb = &(*blockIter1);
+    sccNum = getSCCNum(bb);
+    if (sccNum == INVALIDSCCNUM) {
+      errs() << "unreachable block BB" << bb->getNumber() << "\n";
+    }
+  } //end of for
+} //orderBlocks
+
+template<class PassT> int CFGStructurizer<PassT>::patternMatch(BlockT *curBlk) {
+  int numMatch = 0;
+  int curMatch;
+
+  if (DEBUGME) {
+        errs() << "Begin patternMatch BB" << curBlk->getNumber() << "\n";
+  }
+
+  while ((curMatch = patternMatchGroup(curBlk)) > 0) {
+    numMatch += curMatch;
+  }
+
+  if (DEBUGME) {
+        errs() << "End patternMatch BB" << curBlk->getNumber()
+      << ", numMatch = " << numMatch << "\n";
+  }
+
+  return numMatch;
+} //patternMatch
+
+template<class PassT>
+int CFGStructurizer<PassT>::patternMatchGroup(BlockT *curBlk) {
+  int numMatch = 0;
+  numMatch += serialPatternMatch(curBlk);
+  numMatch += ifPatternMatch(curBlk);
+  //numMatch += switchPatternMatch(curBlk);
+  numMatch += loopendPatternMatch(curBlk);
+  numMatch += loopPatternMatch(curBlk);
+  return numMatch;
+}//patternMatchGroup
+
+template<class PassT>
+int CFGStructurizer<PassT>::serialPatternMatch(BlockT *curBlk) {
+  if (curBlk->succ_size() != 1) {
+    return 0;
+  }
+
+  BlockT *childBlk = *curBlk->succ_begin();
+  if (childBlk->pred_size() != 1 || isActiveLoophead(childBlk)) {
+    return 0;
+  }
+
+  mergeSerialBlock(curBlk, childBlk);
+  ++numSerialPatternMatch;
+  return 1;
+} //serialPatternMatch
+
+template<class PassT>
+int CFGStructurizer<PassT>::ifPatternMatch(BlockT *curBlk) {
+  //two edges
+  if (curBlk->succ_size() != 2) {
+    return 0;
+  }
+
+  if (hasBackEdge(curBlk)) {
+    return 0;
+  }
+
+  InstrT *branchInstr = CFGTraits::getNormalBlockBranchInstr(curBlk);
+  if (branchInstr == NULL) {
+    return 0;
+  }
+
+  assert(CFGTraits::isCondBranch(branchInstr));
+
+  BlockT *trueBlk = CFGTraits::getTrueBranch(branchInstr);
+  BlockT *falseBlk = CFGTraits::getFalseBranch(curBlk, branchInstr);
+  BlockT *landBlk;
+  int cloned = 0;
+
+  // TODO: Simplify
+  if (trueBlk->succ_size() == 1 && falseBlk->succ_size() == 1
+    && *trueBlk->succ_begin() == *falseBlk->succ_begin()) {
+    landBlk = *trueBlk->succ_begin();
+  } else if (trueBlk->succ_size() == 0 && falseBlk->succ_size() == 0) {
+    landBlk = NULL;
+  } else if (trueBlk->succ_size() == 1 && *trueBlk->succ_begin() == falseBlk) {
+    landBlk = falseBlk;
+    falseBlk = NULL;
+  } else if (falseBlk->succ_size() == 1
+             && *falseBlk->succ_begin() == trueBlk) {
+    landBlk = trueBlk;
+    trueBlk = NULL;
+  } else if (falseBlk->succ_size() == 1
+             && isSameloopDetachedContbreak(trueBlk, falseBlk)) {
+    landBlk = *falseBlk->succ_begin();
+  } else if (trueBlk->succ_size() == 1
+    && isSameloopDetachedContbreak(falseBlk, trueBlk)) {
+    landBlk = *trueBlk->succ_begin();
+  } else {
+    return handleJumpintoIf(curBlk, trueBlk, falseBlk);
+  }
+
+  // improveSimpleJumpinfoIf can handle the case where landBlk == NULL but the
+  // new BB created for landBlk==NULL may introduce new challenge to the
+  // reduction process.
+  if (landBlk != NULL &&
+      ((trueBlk && trueBlk->pred_size() > 1)
+      || (falseBlk && falseBlk->pred_size() > 1))) {
+     cloned += improveSimpleJumpintoIf(curBlk, trueBlk, falseBlk, &landBlk);
+  }
+
+  if (trueBlk && trueBlk->pred_size() > 1) {
+    trueBlk = cloneBlockForPredecessor(trueBlk, curBlk);
+    ++cloned;
+  }
+
+  if (falseBlk && falseBlk->pred_size() > 1) {
+    falseBlk = cloneBlockForPredecessor(falseBlk, curBlk);
+    ++cloned;
+  }
+
+  mergeIfthenelseBlock(branchInstr, curBlk, trueBlk, falseBlk, landBlk);
+
+  ++numIfPatternMatch;
+
+  numClonedBlock += cloned;
+
+  return 1 + cloned;
+} //ifPatternMatch
+
+template<class PassT>
+int CFGStructurizer<PassT>::switchPatternMatch(BlockT *curBlk) {
+  return 0;
+} //switchPatternMatch
+
+template<class PassT>
+int CFGStructurizer<PassT>::loopendPatternMatch(BlockT *curBlk) {
+  LoopT *loopRep = loopInfo->getLoopFor(curBlk);
+  typename std::vector<LoopT *> nestedLoops;
+  while (loopRep) {
+    nestedLoops.push_back(loopRep);
+    loopRep = loopRep->getParentLoop();
+  }
+
+  if (nestedLoops.size() == 0) {
+    return 0;
+  }
+
+  // Process nested loop outside->inside, so "continue" to a outside loop won't
+  // be mistaken as "break" of the current loop.
+  int num = 0;
+  for (typename std::vector<LoopT *>::reverse_iterator
+       iter = nestedLoops.rbegin(), iterEnd = nestedLoops.rend();
+       iter != iterEnd; ++iter) {
+    loopRep = *iter;
+
+    if (getLoopLandBlock(loopRep) != NULL) {
+      continue;
+    }
+
+    BlockT *loopHeader = loopRep->getHeader();
+
+    int numBreak = loopbreakPatternMatch(loopRep, loopHeader);
+
+    if (numBreak == -1) {
+      break;
+    }
+
+    int numCont = loopcontPatternMatch(loopRep, loopHeader);
+    num += numBreak + numCont;
+  }
+
+  return num;
+} //loopendPatternMatch
+
+template<class PassT>
+int CFGStructurizer<PassT>::loopPatternMatch(BlockT *curBlk) {
+  if (curBlk->succ_size() != 0) {
+    return 0;
+  }
+
+  int numLoop = 0;
+  LoopT *loopRep = loopInfo->getLoopFor(curBlk);
+  while (loopRep && loopRep->getHeader() == curBlk) {
+    LoopLandInfo *loopLand = getLoopLandInfo(loopRep);
+    if (loopLand) {
+      BlockT *landBlk = loopLand->landBlk;
+      assert(landBlk);
+      if (!isRetiredBlock(landBlk)) {
+        mergeLooplandBlock(curBlk, loopLand);
+        ++numLoop;
+      }
+    }
+    loopRep = loopRep->getParentLoop();
+  }
+
+  numLoopPatternMatch += numLoop;
+
+  return numLoop;
+} //loopPatternMatch
+
+template<class PassT>
+int CFGStructurizer<PassT>::loopbreakPatternMatch(LoopT *loopRep,
+                                                  BlockT *loopHeader) {
+  BlockTSmallerVector exitingBlks;
+  loopRep->getExitingBlocks(exitingBlks);
+
+  if (DEBUGME) {
+    errs() << "Loop has " << exitingBlks.size() << " exiting blocks\n";
+  }
+
+  if (exitingBlks.size() == 0) {
+    setLoopLandBlock(loopRep);
+    return 0;
+  }
+
+  // Compute the corresponding exitBlks and exit block set.
+  BlockTSmallerVector exitBlks;
+  std::set<BlockT *> exitBlkSet;
+  for (typename BlockTSmallerVector::const_iterator iter = exitingBlks.begin(),
+       iterEnd = exitingBlks.end(); iter != iterEnd; ++iter) {
+    BlockT *exitingBlk = *iter;
+    BlockT *exitBlk = exitingBlock2ExitBlock(loopRep, exitingBlk);
+    exitBlks.push_back(exitBlk);
+    exitBlkSet.insert(exitBlk);  //non-duplicate insert
+  }
+
+  assert(exitBlkSet.size() > 0);
+  assert(exitBlks.size() == exitingBlks.size());
+
+  if (DEBUGME) {
+    errs() << "Loop has " << exitBlkSet.size() << " exit blocks\n";
+  }
+
+  // Find exitLandBlk.
+  BlockT *exitLandBlk = NULL;
+  int numCloned = 0;
+  int numSerial = 0;
+
+  if (exitBlkSet.size() == 1)
+  {
+    exitLandBlk = *exitBlkSet.begin();
+  } else {
+    exitLandBlk = findNearestCommonPostDom(exitBlkSet);
+
+    if (exitLandBlk == NULL) {
+      return -1;
+    }
+
+    bool allInPath = true;
+    bool allNotInPath = true;
+    for (typename std::set<BlockT*>::const_iterator
+         iter = exitBlkSet.begin(),
+         iterEnd = exitBlkSet.end();
+         iter != iterEnd; ++iter) {
+      BlockT *exitBlk = *iter;
+
+      PathToKind pathKind = singlePathTo(exitBlk, exitLandBlk, true);
+      if (DEBUGME) {
+        errs() << "BB" << exitBlk->getNumber()
+               << " to BB" << exitLandBlk->getNumber() << " PathToKind="
+               << pathKind << "\n";
+      }
+
+      allInPath = allInPath && (pathKind == SinglePath_InPath);
+      allNotInPath = allNotInPath && (pathKind == SinglePath_NotInPath);
+
+      if (!allInPath && !allNotInPath) {
+        if (DEBUGME) {
+              errs() << "singlePath check fail\n";
+        }
+        return -1;
+      }
+    } // check all exit blocks
+
+    if (allNotInPath) {
+#if 1
+
+      // TODO: Simplify, maybe separate function?
+      //funcRep->viewCFG();
+      LoopT *parentLoopRep = loopRep->getParentLoop();
+      BlockT *parentLoopHeader = NULL;
+      if (parentLoopRep)
+        parentLoopHeader = parentLoopRep->getHeader();
+
+      if (exitLandBlk == parentLoopHeader &&
+          (exitLandBlk = relocateLoopcontBlock(parentLoopRep,
+                                               loopRep,
+                                               exitBlkSet,
+                                               exitLandBlk)) != NULL) {
+        if (DEBUGME) {
+          errs() << "relocateLoopcontBlock success\n";
+        }
+      } else if ((exitLandBlk = addLoopEndbranchBlock(loopRep,
+                                                      exitingBlks,
+                                                      exitBlks)) != NULL) {
+        if (DEBUGME) {
+          errs() << "insertEndbranchBlock success\n";
+        }
+      } else {
+        if (DEBUGME) {
+          errs() << "loop exit fail\n";
+        }
+        return -1;
+      }
+#else
+      return -1;
+#endif
+    }
+
+    // Handle side entry to exit path.
+    exitBlks.clear();
+    exitBlkSet.clear();
+    for (typename BlockTSmallerVector::iterator iterExiting =
+           exitingBlks.begin(),
+         iterExitingEnd = exitingBlks.end();
+         iterExiting != iterExitingEnd; ++iterExiting) {
+      BlockT *exitingBlk = *iterExiting;
+      BlockT *exitBlk = exitingBlock2ExitBlock(loopRep, exitingBlk);
+      BlockT *newExitBlk = exitBlk;
+
+      if (exitBlk != exitLandBlk && exitBlk->pred_size() > 1) {
+        newExitBlk = cloneBlockForPredecessor(exitBlk, exitingBlk);
+        ++numCloned;
+      }
+
+      numCloned += cloneOnSideEntryTo(exitingBlk, newExitBlk, exitLandBlk);
+
+      exitBlks.push_back(newExitBlk);
+      exitBlkSet.insert(newExitBlk);
+    }
+
+    for (typename BlockTSmallerVector::iterator iterExit = exitBlks.begin(),
+         iterExitEnd = exitBlks.end();
+         iterExit != iterExitEnd; ++iterExit) {
+      BlockT *exitBlk = *iterExit;
+      numSerial += serialPatternMatch(exitBlk);
+    }
+
+    for (typename BlockTSmallerVector::iterator iterExit = exitBlks.begin(),
+         iterExitEnd = exitBlks.end();
+         iterExit != iterExitEnd; ++iterExit) {
+      BlockT *exitBlk = *iterExit;
+      if (exitBlk->pred_size() > 1) {
+        if (exitBlk != exitLandBlk) {
+          return -1;
+        }
+      } else {
+        if (exitBlk != exitLandBlk &&
+            (exitBlk->succ_size() != 1 ||
+            *exitBlk->succ_begin() != exitLandBlk)) {
+          return -1;
+        }
+      }
+    }
+  } // else
+
+  // LoopT *exitLandLoop = loopInfo->getLoopFor(exitLandBlk);
+  exitLandBlk = recordLoopLandBlock(loopRep, exitLandBlk, exitBlks, exitBlkSet);
+
+  // Fold break into the breaking block. Leverage across level breaks.
+  assert(exitingBlks.size() == exitBlks.size());
+  for (typename BlockTSmallerVector::const_iterator iterExit = exitBlks.begin(),
+       iterExiting = exitingBlks.begin(), iterExitEnd = exitBlks.end();
+       iterExit != iterExitEnd; ++iterExit, ++iterExiting) {
+    BlockT *exitBlk = *iterExit;
+    BlockT *exitingBlk = *iterExiting;
+    assert(exitBlk->pred_size() == 1 || exitBlk == exitLandBlk);
+    LoopT *exitingLoop = loopInfo->getLoopFor(exitingBlk);
+    handleLoopbreak(exitingBlk, exitingLoop, exitBlk, loopRep, exitLandBlk);
+  }
+
+  int numBreak = static_cast<int>(exitingBlks.size());
+  numLoopbreakPatternMatch += numBreak;
+  numClonedBlock += numCloned;
+  return numBreak + numSerial + numCloned;
+} //loopbreakPatternMatch
+
+template<class PassT>
+int CFGStructurizer<PassT>::loopcontPatternMatch(LoopT *loopRep,
+                                                 BlockT *loopHeader) {
+  int numCont = 0;
+  SmallVector<BlockT *, DEFAULT_VEC_SLOTS> contBlk;
+  for (typename InvBlockGTraits::ChildIteratorType iter =
+       InvBlockGTraits::child_begin(loopHeader),
+       iterEnd = InvBlockGTraits::child_end(loopHeader);
+       iter != iterEnd; ++iter) {
+    BlockT *curBlk = *iter;
+    if (loopRep->contains(curBlk)) {
+      handleLoopcontBlock(curBlk, loopInfo->getLoopFor(curBlk),
+                          loopHeader, loopRep);
+      contBlk.push_back(curBlk);
+      ++numCont;
+    }
+  }
+
+  for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::iterator
+       iter = contBlk.begin(), iterEnd = contBlk.end();
+       iter != iterEnd; ++iter) {
+    (*iter)->removeSuccessor(loopHeader);
+  }
+
+  numLoopcontPatternMatch += numCont;
+
+  return numCont;
+} //loopcontPatternMatch
+
+
+template<class PassT>
+bool CFGStructurizer<PassT>::isSameloopDetachedContbreak(BlockT *src1Blk,
+                                                         BlockT *src2Blk) {
+  // return true iff src1Blk->succ_size() == 0 && src1Blk and src2Blk are in the
+  // same loop with LoopLandInfo without explicitly keeping track of
+  // loopContBlks and loopBreakBlks, this is a method to get the information.
+  //
+  if (src1Blk->succ_size() == 0) {
+    LoopT *loopRep = loopInfo->getLoopFor(src1Blk);
+    if (loopRep != NULL && loopRep == loopInfo->getLoopFor(src2Blk)) {
+      LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
+      if (theEntry != NULL) {
+        if (DEBUGME) {
+          errs() << "isLoopContBreakBlock yes src1 = BB"
+                 << src1Blk->getNumber()
+                 << " src2 = BB" << src2Blk->getNumber() << "\n";
+        }
+        return true;
+      }
+    }
+  }
+  return false;
+}  //isSameloopDetachedContbreak
+
+template<class PassT>
+int CFGStructurizer<PassT>::handleJumpintoIf(BlockT *headBlk,
+                                             BlockT *trueBlk,
+                                             BlockT *falseBlk) {
+  int num = handleJumpintoIfImp(headBlk, trueBlk, falseBlk);
+  if (num == 0) {
+    if (DEBUGME) {
+      errs() << "handleJumpintoIf swap trueBlk and FalseBlk" << "\n";
+    }
+    num = handleJumpintoIfImp(headBlk, falseBlk, trueBlk);
+  }
+  return num;
+}
+
+template<class PassT>
+int CFGStructurizer<PassT>::handleJumpintoIfImp(BlockT *headBlk,
+                                                BlockT *trueBlk,
+                                                BlockT *falseBlk) {
+  int num = 0;
+  BlockT *downBlk;
+
+  //trueBlk could be the common post dominator
+  downBlk = trueBlk;
+
+  if (DEBUGME) {
+    errs() << "handleJumpintoIfImp head = BB" << headBlk->getNumber()
+           << " true = BB" << trueBlk->getNumber()
+           << ", numSucc=" << trueBlk->succ_size()
+           << " false = BB" << falseBlk->getNumber() << "\n";
+  }
+
+  while (downBlk) {
+    if (DEBUGME) {
+      errs() << "check down = BB" << downBlk->getNumber();
+    }
+
+    if (//postDomTree->dominates(downBlk, falseBlk) &&
+        singlePathTo(falseBlk, downBlk) == SinglePath_InPath) {
+      if (DEBUGME) {
+        errs() << " working\n";
+      }
+
+      num += cloneOnSideEntryTo(headBlk, trueBlk, downBlk);
+      num += cloneOnSideEntryTo(headBlk, falseBlk, downBlk);
+
+      numClonedBlock += num;
+      num += serialPatternMatch(*headBlk->succ_begin());
+      num += serialPatternMatch(*(++headBlk->succ_begin()));
+      num += ifPatternMatch(headBlk);
+      assert(num > 0); //
+
+      break;
+    }
+    if (DEBUGME) {
+      errs() << " not working\n";
+    }
+    downBlk = (downBlk->succ_size() == 1) ? (*downBlk->succ_begin()) : NULL;
+  } // walk down the postDomTree
+
+  return num;
+} //handleJumpintoIf
+
+template<class PassT>
+void CFGStructurizer<PassT>::showImproveSimpleJumpintoIf(BlockT *headBlk,
+                                                         BlockT *trueBlk,
+                                                         BlockT *falseBlk,
+                                                         BlockT *landBlk,
+                                                         bool detail) {
+  errs() << "head = BB" << headBlk->getNumber()
+         << " size = " << headBlk->size();
+  if (detail) {
+    errs() << "\n";
+    headBlk->print(errs());
+    errs() << "\n";
+  }
+
+  if (trueBlk) {
+    errs() << ", true = BB" << trueBlk->getNumber() << " size = "
+           << trueBlk->size() << " numPred = " << trueBlk->pred_size();
+    if (detail) {
+      errs() << "\n";
+      trueBlk->print(errs());
+      errs() << "\n";
+    }
+  }
+  if (falseBlk) {
+    errs() << ", false = BB" << falseBlk->getNumber() << " size = "
+           << falseBlk->size() << " numPred = " << falseBlk->pred_size();
+    if (detail) {
+      errs() << "\n";
+      falseBlk->print(errs());
+      errs() << "\n";
+    }
+  }
+  if (landBlk) {
+    errs() << ", land = BB" << landBlk->getNumber() << " size = "
+           << landBlk->size() << " numPred = " << landBlk->pred_size();
+    if (detail) {
+      errs() << "\n";
+      landBlk->print(errs());
+      errs() << "\n";
+    }
+  }
+
+    errs() << "\n";
+} //showImproveSimpleJumpintoIf
+
+template<class PassT>
+int CFGStructurizer<PassT>::improveSimpleJumpintoIf(BlockT *headBlk,
+                                                    BlockT *trueBlk,
+                                                    BlockT *falseBlk,
+                                                    BlockT **plandBlk) {
+  bool migrateTrue = false;
+  bool migrateFalse = false;
+
+  BlockT *landBlk = *plandBlk;
+
+  assert((trueBlk == NULL || trueBlk->succ_size() <= 1)
+         && (falseBlk == NULL || falseBlk->succ_size() <= 1));
+
+  if (trueBlk == falseBlk) {
+    return 0;
+  }
+
+#if 0
+  if (DEBUGME) {
+    errs() << "improveSimpleJumpintoIf: ";
+    showImproveSimpleJumpintoIf(headBlk, trueBlk, falseBlk, landBlk, 0);
+  }
+#endif
+
+  // unsigned landPredSize = landBlk ? landBlk->pred_size() : 0;
+  // May consider the # landBlk->pred_size() as it represents the number of
+  // assignment initReg = .. needed to insert.
+  migrateTrue = needMigrateBlock(trueBlk);
+  migrateFalse = needMigrateBlock(falseBlk);
+
+  if (!migrateTrue && !migrateFalse) {
+    return 0;
+  }
+
+  // If we need to migrate either trueBlk and falseBlk, migrate the rest that
+  // have more than one predecessors.  without doing this, its predecessor
+  // rather than headBlk will have undefined value in initReg.
+  if (!migrateTrue && trueBlk && trueBlk->pred_size() > 1) {
+    migrateTrue = true;
+  }
+  if (!migrateFalse && falseBlk && falseBlk->pred_size() > 1) {
+    migrateFalse = true;
+  }
+
+  if (DEBUGME) {
+    errs() << "before improveSimpleJumpintoIf: ";
+    showImproveSimpleJumpintoIf(headBlk, trueBlk, falseBlk, landBlk, 0);
+    //showImproveSimpleJumpintoIf(headBlk, trueBlk, falseBlk, landBlk, 1);
+  }
+
+  // org: headBlk => if () {trueBlk} else {falseBlk} => landBlk
+  //
+  // new: headBlk => if () {initReg = 1; org trueBlk branch} else
+  //      {initReg = 0; org falseBlk branch }
+  //      => landBlk => if (initReg) {org trueBlk} else {org falseBlk}
+  //      => org landBlk
+  //      if landBlk->pred_size() > 2, put the about if-else inside
+  //      if (initReg !=2) {...}
+  //
+  // add initReg = initVal to headBlk
+
+  const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
+  unsigned initReg =
+    funcRep->getRegInfo().createVirtualRegister(I32RC);
+  if (!migrateTrue || !migrateFalse) {
+    int initVal = migrateTrue ? 0 : 1;
+    CFGTraits::insertAssignInstrBefore(headBlk, passRep, initReg, initVal);
+  }
+
+  int numNewBlk = 0;
+
+  if (landBlk == NULL) {
+    landBlk = funcRep->CreateMachineBasicBlock();
+    funcRep->push_back(landBlk);  //insert to function
+
+    if (trueBlk) {
+      trueBlk->addSuccessor(landBlk);
+    } else {
+      headBlk->addSuccessor(landBlk);
+    }
+
+    if (falseBlk) {
+      falseBlk->addSuccessor(landBlk);
+    } else {
+      headBlk->addSuccessor(landBlk);
+    }
+
+    numNewBlk ++;
+  }
+
+  bool landBlkHasOtherPred = (landBlk->pred_size() > 2);
+
+  //insert AMDIL::ENDIF to avoid special case "input landBlk == NULL"
+  typename BlockT::iterator insertPos =
+    CFGTraits::getInstrPos
+    (landBlk, CFGTraits::insertInstrBefore(landBlk, AMDIL::ENDIF, passRep));
+
+  if (landBlkHasOtherPred) {
+    unsigned immReg =
+      funcRep->getRegInfo().createVirtualRegister(I32RC);
+    CFGTraits::insertAssignInstrBefore(insertPos, passRep, immReg, 2);
+    unsigned cmpResReg =
+      funcRep->getRegInfo().createVirtualRegister(I32RC);
+
+    CFGTraits::insertCompareInstrBefore(landBlk, insertPos, passRep, cmpResReg,
+                                        initReg, immReg);
+    CFGTraits::insertCondBranchBefore(landBlk, insertPos,
+                                      AMDIL::IF_LOGICALZ_i32, passRep,
+                                      cmpResReg, DebugLoc());
+  }
+
+  CFGTraits::insertCondBranchBefore(landBlk, insertPos, AMDIL::IF_LOGICALNZ_i32,
+                                    passRep, initReg, DebugLoc());
+
+  if (migrateTrue) {
+    migrateInstruction(trueBlk, landBlk, insertPos);
+    // need to uncondionally insert the assignment to ensure a path from its
+    // predecessor rather than headBlk has valid value in initReg if
+    // (initVal != 1).
+    CFGTraits::insertAssignInstrBefore(trueBlk, passRep, initReg, 1);
+  }
+  CFGTraits::insertInstrBefore(insertPos, AMDIL::ELSE, passRep);
+
+  if (migrateFalse) {
+    migrateInstruction(falseBlk, landBlk, insertPos);
+    // need to uncondionally insert the assignment to ensure a path from its
+    // predecessor rather than headBlk has valid value in initReg if
+    // (initVal != 0)
+    CFGTraits::insertAssignInstrBefore(falseBlk, passRep, initReg, 0);
+  }
+  //CFGTraits::insertInstrBefore(insertPos, AMDIL::ENDIF, passRep);
+
+  if (landBlkHasOtherPred) {
+    // add endif
+    CFGTraits::insertInstrBefore(insertPos, AMDIL::ENDIF, passRep);
+
+    // put initReg = 2 to other predecessors of landBlk
+    for (typename BlockT::pred_iterator predIter = landBlk->pred_begin(),
+         predIterEnd = landBlk->pred_end(); predIter != predIterEnd;
+         ++predIter) {
+      BlockT *curBlk = *predIter;
+      if (curBlk != trueBlk && curBlk != falseBlk) {
+        CFGTraits::insertAssignInstrBefore(curBlk, passRep, initReg, 2);
+      }
+    } //for
+  }
+  if (DEBUGME) {
+    errs() << "result from improveSimpleJumpintoIf: ";
+    showImproveSimpleJumpintoIf(headBlk, trueBlk, falseBlk, landBlk, 0);
+    //showImproveSimpleJumpintoIf(headBlk, trueBlk, falseBlk, landBlk, 1);
+  }
+
+  // update landBlk
+  *plandBlk = landBlk;
+
+  return numNewBlk;
+} //improveSimpleJumpintoIf
+
+template<class PassT>
+void CFGStructurizer<PassT>::handleLoopbreak(BlockT *exitingBlk,
+                                              LoopT *exitingLoop,
+                                             BlockT *exitBlk,
+                                              LoopT *exitLoop,
+                                             BlockT *landBlk) {
+  if (DEBUGME) {
+    errs() << "Trying to break loop-depth = " << getLoopDepth(exitLoop)
+           << " from loop-depth = " << getLoopDepth(exitingLoop) << "\n";
+  }
+  const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
+
+  RegiT initReg = INVALIDREGNUM;
+  if (exitingLoop != exitLoop) {
+    initReg = static_cast<int>
+      (funcRep->getRegInfo().createVirtualRegister(I32RC));
+    assert(initReg != INVALIDREGNUM);
+    addLoopBreakInitReg(exitLoop, initReg);
+    while (exitingLoop != exitLoop && exitingLoop) {
+      addLoopBreakOnReg(exitingLoop, initReg);
+      exitingLoop = exitingLoop->getParentLoop();
+    }
+    assert(exitingLoop == exitLoop);
+  }
+
+  mergeLoopbreakBlock(exitingBlk, exitBlk, landBlk, initReg);
+
+} //handleLoopbreak
+
+template<class PassT>
+void CFGStructurizer<PassT>::handleLoopcontBlock(BlockT *contingBlk,
+                                                  LoopT *contingLoop,
+                                                 BlockT *contBlk,
+                                                  LoopT *contLoop) {
+  if (DEBUGME) {
+    errs() << "loopcontPattern cont = BB" << contingBlk->getNumber()
+           << " header = BB" << contBlk->getNumber() << "\n";
+
+    errs() << "Trying to continue loop-depth = "
+           << getLoopDepth(contLoop)
+           << " from loop-depth = " << getLoopDepth(contingLoop) << "\n";
+  }
+
+  RegiT initReg = INVALIDREGNUM;
+  const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
+  if (contingLoop != contLoop) {
+    initReg = static_cast<int>
+      (funcRep->getRegInfo().createVirtualRegister(I32RC));
+    assert(initReg != INVALIDREGNUM);
+    addLoopContInitReg(contLoop, initReg);
+    while (contingLoop && contingLoop->getParentLoop() != contLoop) {
+      addLoopBreakOnReg(contingLoop, initReg);  //not addLoopContOnReg
+      contingLoop = contingLoop->getParentLoop();
+    }
+    assert(contingLoop && contingLoop->getParentLoop() == contLoop);
+    addLoopContOnReg(contingLoop, initReg);
+  }
+
+  settleLoopcontBlock(contingBlk, contBlk, initReg);
+  //contingBlk->removeSuccessor(loopHeader);
+} //handleLoopcontBlock
+
+template<class PassT>
+void CFGStructurizer<PassT>::mergeSerialBlock(BlockT *dstBlk, BlockT *srcBlk) {
+  if (DEBUGME) {
+    errs() << "serialPattern BB" << dstBlk->getNumber()
+           << " <= BB" << srcBlk->getNumber() << "\n";
+  }
+  //removeUnconditionalBranch(dstBlk);
+  dstBlk->splice(dstBlk->end(), srcBlk, FirstNonDebugInstr(srcBlk), srcBlk->end());
+
+  dstBlk->removeSuccessor(srcBlk);
+  CFGTraits::cloneSuccessorList(dstBlk, srcBlk);
+
+  removeSuccessor(srcBlk);
+  retireBlock(dstBlk, srcBlk);
+} //mergeSerialBlock
+
+template<class PassT>
+void CFGStructurizer<PassT>::mergeIfthenelseBlock(InstrT *branchInstr,
+                                                  BlockT *curBlk,
+                                                  BlockT *trueBlk,
+                                                  BlockT *falseBlk,
+                                                  BlockT *landBlk) {
+  if (DEBUGME) {
+    errs() << "ifPattern BB" << curBlk->getNumber();
+    errs() << "{  ";
+    if (trueBlk) {
+      errs() << "BB" << trueBlk->getNumber();
+    }
+    errs() << "  } else ";
+    errs() << "{  ";
+    if (falseBlk) {
+      errs() << "BB" << falseBlk->getNumber();
+    }
+    errs() << "  }\n ";
+    errs() << "landBlock: ";
+    if (landBlk == NULL) {
+      errs() << "NULL";
+    } else {
+      errs() << "BB" << landBlk->getNumber();
+    }
+    errs() << "\n";
+  }
+
+  int oldOpcode = branchInstr->getOpcode();
+  DebugLoc branchDL = branchInstr->getDebugLoc();
+
+//    transform to
+//    if cond
+//       trueBlk
+//    else
+//       falseBlk
+//    endif
+//    landBlk
+
+  typename BlockT::iterator branchInstrPos =
+    CFGTraits::getInstrPos(curBlk, branchInstr);
+  CFGTraits::insertCondBranchBefore(branchInstrPos,
+                                    CFGTraits::getBranchNzeroOpcode(oldOpcode),
+                                    passRep,
+									branchDL);
+
+  if (trueBlk) {
+    curBlk->splice(branchInstrPos, trueBlk, FirstNonDebugInstr(trueBlk), trueBlk->end());
+    curBlk->removeSuccessor(trueBlk);
+    if (landBlk && trueBlk->succ_size()!=0) {
+      trueBlk->removeSuccessor(landBlk);
+    }
+    retireBlock(curBlk, trueBlk);
+  }
+  CFGTraits::insertInstrBefore(branchInstrPos, AMDIL::ELSE, passRep);
+
+  if (falseBlk) {
+    curBlk->splice(branchInstrPos, falseBlk, FirstNonDebugInstr(falseBlk),
+                   falseBlk->end());
+    curBlk->removeSuccessor(falseBlk);
+    if (landBlk && falseBlk->succ_size() != 0) {
+      falseBlk->removeSuccessor(landBlk);
+    }
+    retireBlock(curBlk, falseBlk);
+  }
+  CFGTraits::insertInstrBefore(branchInstrPos, AMDIL::ENDIF, passRep);
+
+  //curBlk->remove(branchInstrPos);
+  branchInstr->eraseFromParent();
+
+  if (landBlk && trueBlk && falseBlk) {
+    curBlk->addSuccessor(landBlk);
+  }
+
+} //mergeIfthenelseBlock
+
+template<class PassT>
+void CFGStructurizer<PassT>::mergeLooplandBlock(BlockT *dstBlk,
+                                                LoopLandInfo *loopLand) {
+  BlockT *landBlk = loopLand->landBlk;
+
+  if (DEBUGME) {
+    errs() << "loopPattern header = BB" << dstBlk->getNumber()
+           << " land = BB" << landBlk->getNumber() << "\n";
+  }
+
+  // Loop contInitRegs are init at the beginning of the loop.
+  for (typename std::set<RegiT>::const_iterator iter =
+         loopLand->contInitRegs.begin(),
+       iterEnd = loopLand->contInitRegs.end(); iter != iterEnd; ++iter) {
+    CFGTraits::insertAssignInstrBefore(dstBlk, passRep, *iter, 0);
+  }
+
+  /* we last inserterd the DebugLoc in the
+   * BREAK_LOGICALZ_i32 or AMDIL::BREAK_LOGICALNZ statement in the current dstBlk.
+   * search for the DebugLoc in the that statement.
+   * if not found, we have to insert the empty/default DebugLoc */
+  InstrT *loopBreakInstr = CFGTraits::getLoopBreakInstr(dstBlk);
+  DebugLoc DLBreak = (loopBreakInstr) ? loopBreakInstr->getDebugLoc() : DebugLoc();
+
+  CFGTraits::insertInstrBefore(dstBlk, AMDIL::WHILELOOP, passRep, DLBreak);
+  // Loop breakInitRegs are init before entering the loop.
+  for (typename std::set<RegiT>::const_iterator iter =
+         loopLand->breakInitRegs.begin(),
+       iterEnd = loopLand->breakInitRegs.end(); iter != iterEnd; ++iter)
+  {
+    CFGTraits::insertAssignInstrBefore(dstBlk, passRep, *iter, 0);
+  }
+  // Loop endbranchInitRegs are init before entering the loop.
+  for (typename std::set<RegiT>::const_iterator iter =
+         loopLand->endbranchInitRegs.begin(),
+       iterEnd = loopLand->endbranchInitRegs.end(); iter != iterEnd; ++iter) {
+    CFGTraits::insertAssignInstrBefore(dstBlk, passRep, *iter, 0);
+  }
+
+  /* we last inserterd the DebugLoc in the continue statement in the current dstBlk
+   * search for the DebugLoc in the continue statement.
+   * if not found, we have to insert the empty/default DebugLoc */
+  InstrT *continueInstr = CFGTraits::getContinueInstr(dstBlk);
+  DebugLoc DLContinue = (continueInstr) ? continueInstr->getDebugLoc() : DebugLoc();
+
+  CFGTraits::insertInstrEnd(dstBlk, AMDIL::ENDLOOP, passRep, DLContinue);
+  // Loop breakOnRegs are check after the ENDLOOP: break the loop outside this
+  // loop.
+  for (typename std::set<RegiT>::const_iterator iter =
+         loopLand->breakOnRegs.begin(),
+       iterEnd = loopLand->breakOnRegs.end(); iter != iterEnd; ++iter) {
+    CFGTraits::insertCondBranchEnd(dstBlk, AMDIL::BREAK_LOGICALNZ_i32, passRep,
+                                   *iter);
+  }
+
+  // Loop contOnRegs are check after the ENDLOOP: cont the loop outside this
+  // loop.
+  for (std::set<RegiT>::const_iterator iter = loopLand->contOnRegs.begin(),
+       iterEnd = loopLand->contOnRegs.end(); iter != iterEnd; ++iter) {
+    CFGTraits::insertCondBranchEnd(dstBlk, AMDIL::CONTINUE_LOGICALNZ_i32,
+                                   passRep, *iter);
+  }
+
+  dstBlk->splice(dstBlk->end(), landBlk, landBlk->begin(), landBlk->end());
+
+  for (typename BlockT::succ_iterator iter = landBlk->succ_begin(),
+       iterEnd = landBlk->succ_end(); iter != iterEnd; ++iter) {
+    dstBlk->addSuccessor(*iter);  // *iter's predecessor is also taken care of.
+  }
+
+  removeSuccessor(landBlk);
+  retireBlock(dstBlk, landBlk);
+} //mergeLooplandBlock
+
+template<class PassT>
+void CFGStructurizer<PassT>::mergeLoopbreakBlock(BlockT *exitingBlk,
+                                                 BlockT *exitBlk,
+                                                 BlockT *exitLandBlk,
+                                                 RegiT  setReg) {
+  if (DEBUGME) {
+    errs() << "loopbreakPattern exiting = BB" << exitingBlk->getNumber()
+           << " exit = BB" << exitBlk->getNumber()
+           << " land = BB" << exitLandBlk->getNumber() << "\n";
+  }
+
+  InstrT *branchInstr = CFGTraits::getLoopendBlockBranchInstr(exitingBlk);
+  assert(branchInstr && CFGTraits::isCondBranch(branchInstr));
+
+  DebugLoc DL = branchInstr->getDebugLoc();
+
+  BlockT *trueBranch = CFGTraits::getTrueBranch(branchInstr);
+  int oldOpcode = branchInstr->getOpcode();
+
+  //    transform exitingBlk to
+  //    if ( ) {
+  //       exitBlk (if exitBlk != exitLandBlk)
+  //       setReg = 1
+  //       break
+  //    }endif
+  //    successor = {orgSuccessor(exitingBlk) - exitBlk}
+
+  typename BlockT::iterator branchInstrPos =
+    CFGTraits::getInstrPos(exitingBlk, branchInstr);
+
+  if (exitBlk == exitLandBlk && setReg == INVALIDREGNUM) {
+    //break_logical
+    int newOpcode =
+    (trueBranch == exitBlk) ? CFGTraits::getBreakNzeroOpcode(oldOpcode)
+                            : CFGTraits::getBreakZeroOpcode(oldOpcode);
+    CFGTraits::insertCondBranchBefore(branchInstrPos, newOpcode, passRep, DL);
+  } else {
+    int newOpcode =
+    (trueBranch == exitBlk) ? CFGTraits::getBranchNzeroOpcode(oldOpcode)
+                            : CFGTraits::getBranchZeroOpcode(oldOpcode);
+    CFGTraits::insertCondBranchBefore(branchInstrPos, newOpcode, passRep, DL);
+    if (exitBlk != exitLandBlk) {
+      //splice is insert-before ...
+      exitingBlk->splice(branchInstrPos, exitBlk, exitBlk->begin(),
+                         exitBlk->end());
+    }
+    if (setReg != INVALIDREGNUM) {
+      CFGTraits::insertAssignInstrBefore(branchInstrPos, passRep, setReg, 1);
+    }
+    CFGTraits::insertInstrBefore(branchInstrPos, AMDIL::BREAK, passRep);
+    CFGTraits::insertInstrBefore(branchInstrPos, AMDIL::ENDIF, passRep);
+  } //if_logical
+
+  //now branchInst can be erase safely
+  //exitingBlk->eraseFromParent(branchInstr);
+  branchInstr->eraseFromParent();
+
+  //now take care of successors, retire blocks
+  exitingBlk->removeSuccessor(exitBlk);
+  if (exitBlk != exitLandBlk) {
+    //splice is insert-before ...
+    exitBlk->removeSuccessor(exitLandBlk);
+    retireBlock(exitingBlk, exitBlk);
+  }
+
+} //mergeLoopbreakBlock
+
+template<class PassT>
+void CFGStructurizer<PassT>::settleLoopcontBlock(BlockT *contingBlk,
+                                                 BlockT *contBlk,
+                                                 RegiT   setReg) {
+  if (DEBUGME) {
+    errs() << "settleLoopcontBlock conting = BB"
+           << contingBlk->getNumber()
+           << ", cont = BB" << contBlk->getNumber() << "\n";
+  }
+
+  InstrT *branchInstr = CFGTraits::getLoopendBlockBranchInstr(contingBlk);
+  if (branchInstr) {
+    assert(CFGTraits::isCondBranch(branchInstr));
+    typename BlockT::iterator branchInstrPos =
+      CFGTraits::getInstrPos(contingBlk, branchInstr);
+    BlockT *trueBranch = CFGTraits::getTrueBranch(branchInstr);
+    int oldOpcode = branchInstr->getOpcode();
+	DebugLoc DL = branchInstr->getDebugLoc();
+
+    //    transform contingBlk to
+    //     if () {
+    //          move instr after branchInstr
+    //          continue
+    //        or
+    //          setReg = 1
+    //          break
+    //     }endif
+    //     successor = {orgSuccessor(contingBlk) - loopHeader}
+
+    bool useContinueLogical = 
+      (setReg == INVALIDREGNUM && (&*contingBlk->rbegin()) == branchInstr);
+
+    if (useContinueLogical == false) 
+    {
+      int branchOpcode =
+        trueBranch == contBlk ? CFGTraits::getBranchNzeroOpcode(oldOpcode)
+                              : CFGTraits::getBranchZeroOpcode(oldOpcode);
+
+      CFGTraits::insertCondBranchBefore(branchInstrPos, branchOpcode, passRep, DL);
+
+      if (setReg != INVALIDREGNUM) {
+        CFGTraits::insertAssignInstrBefore(branchInstrPos, passRep, setReg, 1);
+        // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
+        CFGTraits::insertInstrEnd(contingBlk, AMDIL::BREAK, passRep, DL);
+      } else {
+        // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
+        CFGTraits::insertInstrEnd(contingBlk, AMDIL::CONTINUE, passRep, DL);
+      }
+
+      CFGTraits::insertInstrEnd(contingBlk, AMDIL::ENDIF, passRep, DL);
+    } else {
+      int branchOpcode =
+        trueBranch == contBlk ? CFGTraits::getContinueNzeroOpcode(oldOpcode)
+                              : CFGTraits::getContinueZeroOpcode(oldOpcode);
+
+      CFGTraits::insertCondBranchBefore(branchInstrPos, branchOpcode, passRep, DL);
+    }
+
+    //contingBlk->eraseFromParent(branchInstr);
+    branchInstr->eraseFromParent();
+  } else {
+    /* if we've arrived here then we've already erased the branch instruction
+	 * travel back up the basic block to see the last reference of our debug location
+	 * we've just inserted that reference here so it should be representative */
+    if (setReg != INVALIDREGNUM) {
+      CFGTraits::insertAssignInstrBefore(contingBlk, passRep, setReg, 1);
+      // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
+      CFGTraits::insertInstrEnd(contingBlk, AMDIL::BREAK, passRep, CFGTraits::getLastDebugLocInBB(contingBlk));
+    } else {
+      // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
+      CFGTraits::insertInstrEnd(contingBlk, AMDIL::CONTINUE, passRep, CFGTraits::getLastDebugLocInBB(contingBlk));
+    }
+  } //else
+
+} //settleLoopcontBlock
+
+// BBs in exitBlkSet are determined as in break-path for loopRep,
+// before we can put code for BBs as inside loop-body for loopRep
+// check whether those BBs are determined as cont-BB for parentLoopRep
+// earlier.
+// If so, generate a new BB newBlk
+//    (1) set newBlk common successor of BBs in exitBlkSet
+//    (2) change the continue-instr in BBs in exitBlkSet to break-instr
+//    (3) generate continue-instr in newBlk
+//
+template<class PassT>
+typename CFGStructurizer<PassT>::BlockT *
+CFGStructurizer<PassT>::relocateLoopcontBlock(LoopT *parentLoopRep,
+                                              LoopT *loopRep,
+                                              std::set<BlockT *> &exitBlkSet,
+                                              BlockT *exitLandBlk) {
+  std::set<BlockT *> endBlkSet;
+
+//  BlockT *parentLoopHead = parentLoopRep->getHeader();
+
+
+  for (typename std::set<BlockT *>::const_iterator iter = exitBlkSet.begin(),
+       iterEnd = exitBlkSet.end();
+       iter != iterEnd; ++iter) {
+    BlockT *exitBlk = *iter;
+    BlockT *endBlk = singlePathEnd(exitBlk, exitLandBlk);
+
+    if (endBlk == NULL || CFGTraits::getContinueInstr(endBlk) == NULL)
+      return NULL;
+
+    endBlkSet.insert(endBlk);
+  }
+
+  BlockT *newBlk = funcRep->CreateMachineBasicBlock();
+  funcRep->push_back(newBlk);  //insert to function
+  CFGTraits::insertInstrEnd(newBlk, AMDIL::CONTINUE, passRep);
+  SHOWNEWBLK(newBlk, "New continue block: ");
+
+  for (typename std::set<BlockT*>::const_iterator iter = endBlkSet.begin(),
+       iterEnd = endBlkSet.end();
+       iter != iterEnd; ++iter) {
+      BlockT *endBlk = *iter;
+      InstrT *contInstr = CFGTraits::getContinueInstr(endBlk);
+      if (contInstr) {
+        contInstr->eraseFromParent();
+      }
+      endBlk->addSuccessor(newBlk);
+      if (DEBUGME) {
+        errs() << "Add new continue Block to BB"
+               << endBlk->getNumber() << " successors\n";
+      }
+  }
+
+  return newBlk;
+} //relocateLoopcontBlock
+
+
+// LoopEndbranchBlock is a BB created by the CFGStructurizer to use as
+// LoopLandBlock. This BB branch on the loop endBranchInit register to the
+// pathes corresponding to the loop exiting branches.
+
+template<class PassT>
+typename CFGStructurizer<PassT>::BlockT *
+CFGStructurizer<PassT>::addLoopEndbranchBlock(LoopT *loopRep,
+                                              BlockTSmallerVector &exitingBlks,
+                                              BlockTSmallerVector &exitBlks) {
+  const AMDILInstrInfo *tii =
+             static_cast<const AMDILInstrInfo *>(passRep->getTargetInstrInfo());
+  const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
+
+  RegiT endBranchReg = static_cast<int>
+    (funcRep->getRegInfo().createVirtualRegister(I32RC));
+  assert(endBranchReg >= 0);
+
+  // reg = 0 before entering the loop
+  addLoopEndbranchInitReg(loopRep, endBranchReg);
+
+  uint32_t numBlks = static_cast<uint32_t>(exitingBlks.size());
+  assert(numBlks >=2 && numBlks == exitBlks.size());
+
+  BlockT *preExitingBlk = exitingBlks[0];
+  BlockT *preExitBlk = exitBlks[0];
+  BlockT *preBranchBlk = funcRep->CreateMachineBasicBlock();
+  funcRep->push_back(preBranchBlk);  //insert to function
+  SHOWNEWBLK(preBranchBlk, "New loopEndbranch block: ");
+
+  BlockT *newLandBlk = preBranchBlk;
+
+      CFGTraits::replaceInstrUseOfBlockWith(preExitingBlk, preExitBlk,
+        newLandBlk);
+  preExitingBlk->removeSuccessor(preExitBlk);
+  preExitingBlk->addSuccessor(newLandBlk);
+
+  //it is redundant to add reg = 0 to exitingBlks[0]
+
+  // For 1..n th exiting path (the last iteration handles two pathes) create the
+  // branch to the previous path and the current path.
+  for (uint32_t i = 1; i < numBlks; ++i) {
+    BlockT *curExitingBlk = exitingBlks[i];
+    BlockT *curExitBlk = exitBlks[i];
+    BlockT *curBranchBlk;
+
+    if (i == numBlks - 1) {
+      curBranchBlk = curExitBlk;
+    } else {
+      curBranchBlk = funcRep->CreateMachineBasicBlock();
+      funcRep->push_back(curBranchBlk);  //insert to function
+      SHOWNEWBLK(curBranchBlk, "New loopEndbranch block: ");
+    }
+
+    // Add reg = i to exitingBlks[i].
+    CFGTraits::insertAssignInstrBefore(curExitingBlk, passRep,
+                                       endBranchReg, i);
+
+    // Remove the edge (exitingBlks[i] exitBlks[i]) add new edge
+    // (exitingBlks[i], newLandBlk).
+    CFGTraits::replaceInstrUseOfBlockWith(curExitingBlk, curExitBlk,
+                                          newLandBlk);
+    curExitingBlk->removeSuccessor(curExitBlk);
+    curExitingBlk->addSuccessor(newLandBlk);
+
+    // add to preBranchBlk the branch instruction:
+    // if (endBranchReg == preVal)
+    //    preExitBlk
+    // else
+    //    curBranchBlk
+    //
+    // preValReg = i - 1
+
+  DebugLoc DL;
+  RegiT preValReg = static_cast<int>
+    (funcRep->getRegInfo().createVirtualRegister(I32RC));
+
+  preBranchBlk->insert(preBranchBlk->begin(),
+                       tii->getMovImmInstr(preBranchBlk->getParent(), preValReg,
+                       i - 1));
+
+  // condResReg = (endBranchReg == preValReg)
+    RegiT condResReg = static_cast<int>
+      (funcRep->getRegInfo().createVirtualRegister(I32RC));
+    BuildMI(preBranchBlk, DL, tii->get(tii->getIEQOpcode()), condResReg)
+      .addReg(endBranchReg).addReg(preValReg);
+
+    BuildMI(preBranchBlk, DL, tii->get(AMDIL::BRANCH_COND_i32))
+      .addMBB(preExitBlk).addReg(condResReg);
+
+    preBranchBlk->addSuccessor(preExitBlk);
+    preBranchBlk->addSuccessor(curBranchBlk);
+
+    // Update preExitingBlk, preExitBlk, preBranchBlk.
+    preExitingBlk = curExitingBlk;
+    preExitBlk = curExitBlk;
+    preBranchBlk = curBranchBlk;
+
+  }  //end for 1 .. n blocks
+
+  return newLandBlk;
+} //addLoopEndbranchBlock
+
+template<class PassT>
+typename CFGStructurizer<PassT>::PathToKind
+CFGStructurizer<PassT>::singlePathTo(BlockT *srcBlk, BlockT *dstBlk,
+                                     bool allowSideEntry) {
+  assert(dstBlk);
+
+  if (srcBlk == dstBlk) {
+    return SinglePath_InPath;
+  }
+
+  while (srcBlk && srcBlk->succ_size() == 1) {
+    srcBlk = *srcBlk->succ_begin();
+    if (srcBlk == dstBlk) {
+      return SinglePath_InPath;
+    }
+
+    if (!allowSideEntry && srcBlk->pred_size() > 1) {
+      return Not_SinglePath;
+    }
+  }
+
+  if (srcBlk && srcBlk->succ_size()==0) {
+    return SinglePath_NotInPath;
+  }
+
+  return Not_SinglePath;
+} //singlePathTo
+
+// If there is a single path from srcBlk to dstBlk, return the last block before
+// dstBlk If there is a single path from srcBlk->end without dstBlk, return the
+// last block in the path Otherwise, return NULL
+template<class PassT>
+typename CFGStructurizer<PassT>::BlockT *
+CFGStructurizer<PassT>::singlePathEnd(BlockT *srcBlk, BlockT *dstBlk,
+                                      bool allowSideEntry) {
+  assert(dstBlk);
+
+  if (srcBlk == dstBlk) {
+    return srcBlk;
+  }
+
+  if (srcBlk->succ_size() == 0) {
+    return srcBlk;
+  }
+
+  while (srcBlk && srcBlk->succ_size() == 1) {
+    BlockT *preBlk = srcBlk;
+
+    srcBlk = *srcBlk->succ_begin();
+    if (srcBlk == NULL) {
+      return preBlk;
+    }
+
+    if (!allowSideEntry && srcBlk->pred_size() > 1) {
+      return NULL;
+    }
+  }
+
+  if (srcBlk && srcBlk->succ_size()==0) {
+    return srcBlk;
+  }
+
+  return NULL;
+
+} //singlePathEnd
+
+template<class PassT>
+int CFGStructurizer<PassT>::cloneOnSideEntryTo(BlockT *preBlk, BlockT *srcBlk,
+                                               BlockT *dstBlk) {
+  int cloned = 0;
+  assert(preBlk->isSuccessor(srcBlk));
+  while (srcBlk && srcBlk != dstBlk) {
+    assert(srcBlk->succ_size() == 1);
+    if (srcBlk->pred_size() > 1) {
+      srcBlk = cloneBlockForPredecessor(srcBlk, preBlk);
+      ++cloned;
+    }
+
+    preBlk = srcBlk;
+    srcBlk = *srcBlk->succ_begin();
+  }
+
+  return cloned;
+} //cloneOnSideEntryTo
+
+template<class PassT>
+typename CFGStructurizer<PassT>::BlockT *
+CFGStructurizer<PassT>::cloneBlockForPredecessor(BlockT *curBlk,
+                                                 BlockT *predBlk) {
+  assert(predBlk->isSuccessor(curBlk) &&
+         "succBlk is not a prececessor of curBlk");
+
+  BlockT *cloneBlk = CFGTraits::clone(curBlk);  //clone instructions
+  CFGTraits::replaceInstrUseOfBlockWith(predBlk, curBlk, cloneBlk);
+  //srcBlk, oldBlk, newBlk
+
+  predBlk->removeSuccessor(curBlk);
+  predBlk->addSuccessor(cloneBlk);
+
+  // add all successor to cloneBlk
+  CFGTraits::cloneSuccessorList(cloneBlk, curBlk);
+
+  numClonedInstr += curBlk->size();
+
+  if (DEBUGME) {
+    errs() << "Cloned block: " << "BB"
+           << curBlk->getNumber() << "size " << curBlk->size() << "\n";
+  }
+
+  SHOWNEWBLK(cloneBlk, "result of Cloned block: ");
+
+  return cloneBlk;
+} //cloneBlockForPredecessor
+
+template<class PassT>
+typename CFGStructurizer<PassT>::BlockT *
+CFGStructurizer<PassT>::exitingBlock2ExitBlock(LoopT *loopRep,
+                                               BlockT *exitingBlk) {
+  BlockT *exitBlk = NULL;
+
+  for (typename BlockT::succ_iterator iterSucc = exitingBlk->succ_begin(),
+       iterSuccEnd = exitingBlk->succ_end();
+       iterSucc != iterSuccEnd; ++iterSucc) {
+    BlockT *curBlk = *iterSucc;
+    if (!loopRep->contains(curBlk)) {
+      assert(exitBlk == NULL);
+      exitBlk = curBlk;
+    }
+  }
+
+  assert(exitBlk != NULL);
+
+  return exitBlk;
+} //exitingBlock2ExitBlock
+
+template<class PassT>
+void CFGStructurizer<PassT>::migrateInstruction(BlockT *srcBlk,
+                                                BlockT *dstBlk,
+                                                InstrIterator insertPos) {
+  InstrIterator spliceEnd;
+  //look for the input branchinstr, not the AMDIL branchinstr
+  InstrT *branchInstr = CFGTraits::getNormalBlockBranchInstr(srcBlk);
+  if (branchInstr == NULL) {
+    if (DEBUGME) {
+      errs() << "migrateInstruction don't see branch instr\n" ;
+    }
+    spliceEnd = srcBlk->end();
+  } else {
+    if (DEBUGME) {
+      errs() << "migrateInstruction see branch instr\n" ;
+      branchInstr->dump();
+    }
+    spliceEnd = CFGTraits::getInstrPos(srcBlk, branchInstr);
+  }
+  if (DEBUGME) {
+    errs() << "migrateInstruction before splice dstSize = " << dstBlk->size()
+      << "srcSize = " << srcBlk->size() << "\n";
+  }
+
+  //splice insert before insertPos
+  dstBlk->splice(insertPos, srcBlk, srcBlk->begin(), spliceEnd);
+
+  if (DEBUGME) {
+    errs() << "migrateInstruction after splice dstSize = " << dstBlk->size()
+      << "srcSize = " << srcBlk->size() << "\n";
+  }
+} //migrateInstruction
+
+// normalizeInfiniteLoopExit change
+//   B1:
+//        uncond_br LoopHeader
+//
+// to
+//   B1:
+//        cond_br 1 LoopHeader dummyExit
+// and return the newly added dummy exit block
+// 
+template<class PassT>
+typename CFGStructurizer<PassT>::BlockT *
+CFGStructurizer<PassT>::normalizeInfiniteLoopExit(LoopT* LoopRep) {
+  BlockT *loopHeader;
+  BlockT *loopLatch;
+  loopHeader = LoopRep->getHeader();
+  loopLatch = LoopRep->getLoopLatch();
+  BlockT *dummyExitBlk = NULL;
+  const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
+  if (loopHeader!=NULL && loopLatch!=NULL) {
+    InstrT *branchInstr = CFGTraits::getLoopendBlockBranchInstr(loopLatch);
+    if (branchInstr!=NULL && CFGTraits::isUncondBranch(branchInstr)) {
+      dummyExitBlk = funcRep->CreateMachineBasicBlock();
+      funcRep->push_back(dummyExitBlk);  //insert to function
+      SHOWNEWBLK(dummyExitBlk, "DummyExitBlock to normalize infiniteLoop: ");
+
+      if (DEBUGME) errs() << "Old branch instr: " << *branchInstr << "\n";
+
+      typename BlockT::iterator insertPos =
+        CFGTraits::getInstrPos(loopLatch, branchInstr);
+      unsigned immReg =
+        funcRep->getRegInfo().createVirtualRegister(I32RC);
+      CFGTraits::insertAssignInstrBefore(insertPos, passRep, immReg, 1);
+      InstrT *newInstr = 
+        CFGTraits::insertInstrBefore(insertPos, AMDIL::BRANCH_COND_i32, passRep);
+      MachineInstrBuilder(newInstr).addMBB(loopHeader).addReg(immReg, false);
+
+      SHOWNEWINSTR(newInstr);
+
+      branchInstr->eraseFromParent();
+      loopLatch->addSuccessor(dummyExitBlk);
+    }
+  }
+
+  return dummyExitBlk;
+} //normalizeInfiniteLoopExit
+
+template<class PassT>
+void CFGStructurizer<PassT>::removeUnconditionalBranch(BlockT *srcBlk) {
+  InstrT *branchInstr;
+
+  // I saw two unconditional branch in one basic block in example
+  // test_fc_do_while_or.c need to fix the upstream on this to remove the loop.
+  while ((branchInstr = CFGTraits::getLoopendBlockBranchInstr(srcBlk))
+          && CFGTraits::isUncondBranch(branchInstr)) {
+    if (DEBUGME) {
+          errs() << "Removing unconditional branch instruction" ;
+      branchInstr->dump();
+    }
+    branchInstr->eraseFromParent();
+  }
+} //removeUnconditionalBranch
+
+template<class PassT>
+void CFGStructurizer<PassT>::removeRedundantConditionalBranch(BlockT *srcBlk) {
+  if (srcBlk->succ_size() == 2) {
+    BlockT *blk1 = *srcBlk->succ_begin();
+    BlockT *blk2 = *(++srcBlk->succ_begin());
+
+    if (blk1 == blk2) {
+      InstrT *branchInstr = CFGTraits::getNormalBlockBranchInstr(srcBlk);
+      assert(branchInstr && CFGTraits::isCondBranch(branchInstr));
+      if (DEBUGME) {
+        errs() << "Removing unneeded conditional branch instruction" ;
+        branchInstr->dump();
+      }
+      branchInstr->eraseFromParent();
+      SHOWNEWBLK(blk1, "Removing redundant successor");
+      srcBlk->removeSuccessor(blk1);
+    }
+  }
+} //removeRedundantConditionalBranch
+
+template<class PassT>
+void CFGStructurizer<PassT>::addDummyExitBlock(SmallVector<BlockT*,
+                                               DEFAULT_VEC_SLOTS> &retBlks) {
+  BlockT *dummyExitBlk = funcRep->CreateMachineBasicBlock();
+  funcRep->push_back(dummyExitBlk);  //insert to function
+  CFGTraits::insertInstrEnd(dummyExitBlk, AMDIL::RETURN, passRep);
+
+  for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::iterator iter =
+         retBlks.begin(),
+       iterEnd = retBlks.end(); iter != iterEnd; ++iter) {
+    BlockT *curBlk = *iter;
+    InstrT *curInstr = CFGTraits::getReturnInstr(curBlk);
+    if (curInstr) {
+      curInstr->eraseFromParent();
+    }
+#if 0
+    if (curBlk->size()==0 && curBlk->pred_size() == 1) {
+      if (DEBUGME) {
+        errs() << "Replace empty block BB" <<  curBlk->getNumber()
+          << " with dummyExitBlock\n";
+      }
+      BlockT *predb = *curBlk->pred_begin();
+      predb->removeSuccessor(curBlk);
+      curBlk = predb;
+    } //handle empty curBlk
+#endif
+    curBlk->addSuccessor(dummyExitBlk);
+    if (DEBUGME) {
+      errs() << "Add dummyExitBlock to BB" << curBlk->getNumber()
+             << " successors\n";
+    }
+  } //for
+
+  SHOWNEWBLK(dummyExitBlk, "DummyExitBlock: ");
+} //addDummyExitBlock
+
+template<class PassT>
+void CFGStructurizer<PassT>::removeSuccessor(BlockT *srcBlk) {
+  while (srcBlk->succ_size()) {
+    srcBlk->removeSuccessor(*srcBlk->succ_begin());
+  }
+}
+
+template<class PassT>
+void CFGStructurizer<PassT>::recordSccnum(BlockT *srcBlk, int sccNum) {
+  BlockInfo *&srcBlkInfo = blockInfoMap[srcBlk];
+
+  if (srcBlkInfo == NULL) {
+    srcBlkInfo = new BlockInfo();
+  }
+
+  srcBlkInfo->sccNum = sccNum;
+}
+
+template<class PassT>
+int CFGStructurizer<PassT>::getSCCNum(BlockT *srcBlk) {
+  BlockInfo *srcBlkInfo = blockInfoMap[srcBlk];
+  return srcBlkInfo ? srcBlkInfo->sccNum : INVALIDSCCNUM;
+}
+
+template<class PassT>
+void CFGStructurizer<PassT>::retireBlock(BlockT *dstBlk, BlockT *srcBlk) {
+  if (DEBUGME) {
+        errs() << "Retiring BB" << srcBlk->getNumber() << "\n";
+  }
+
+  BlockInfo *&srcBlkInfo = blockInfoMap[srcBlk];
+
+  if (srcBlkInfo == NULL) {
+    srcBlkInfo = new BlockInfo();
+  }
+
+  srcBlkInfo->isRetired = true;
+  //int i = srcBlk->succ_size();
+  //int j = srcBlk->pred_size();
+  assert(srcBlk->succ_size() == 0 && srcBlk->pred_size() == 0
+         && "can't retire block yet");
+}
+
+template<class PassT>
+bool CFGStructurizer<PassT>::isRetiredBlock(BlockT *srcBlk) {
+  BlockInfo *srcBlkInfo = blockInfoMap[srcBlk];
+  return (srcBlkInfo && srcBlkInfo->isRetired);
+}
+
+template<class PassT>
+bool CFGStructurizer<PassT>::isActiveLoophead(BlockT *curBlk) {
+  LoopT *loopRep = loopInfo->getLoopFor(curBlk);
+  while (loopRep && loopRep->getHeader() == curBlk) {
+    LoopLandInfo *loopLand = getLoopLandInfo(loopRep);
+
+    if(loopLand == NULL)
+      return true;
+
+    BlockT *landBlk = loopLand->landBlk;
+    assert(landBlk);
+    if (!isRetiredBlock(landBlk)) {
+      return true;
+    }
+
+    loopRep = loopRep->getParentLoop();
+  }
+
+  return false;
+} //isActiveLoophead
+
+template<class PassT>
+bool CFGStructurizer<PassT>::needMigrateBlock(BlockT *blk) {
+  const unsigned blockSizeThreshold = 30;
+  const unsigned cloneInstrThreshold = 100;
+
+  bool multiplePreds = blk && (blk->pred_size() > 1);
+
+  if(!multiplePreds)
+    return false;
+
+  unsigned blkSize = blk->size();
+  return ((blkSize > blockSizeThreshold)
+          && (blkSize * (blk->pred_size() - 1) > cloneInstrThreshold));
+} //needMigrateBlock
+
+template<class PassT>
+typename CFGStructurizer<PassT>::BlockT *
+CFGStructurizer<PassT>::recordLoopLandBlock(LoopT *loopRep, BlockT *landBlk,
+                                            BlockTSmallerVector &exitBlks,
+                                            std::set<BlockT *> &exitBlkSet) {
+  SmallVector<BlockT *, DEFAULT_VEC_SLOTS> inpathBlks;  //in exit path blocks
+
+  for (typename BlockT::pred_iterator predIter = landBlk->pred_begin(),
+       predIterEnd = landBlk->pred_end();
+       predIter != predIterEnd; ++predIter) {
+    BlockT *curBlk = *predIter;
+    if (loopRep->contains(curBlk) || exitBlkSet.count(curBlk)) {
+      inpathBlks.push_back(curBlk);
+    }
+  } //for
+
+  //if landBlk has predecessors that are not in the given loop,
+  //create a new block
+  BlockT *newLandBlk = landBlk;
+  if (inpathBlks.size() != landBlk->pred_size()) {
+    newLandBlk = funcRep->CreateMachineBasicBlock();
+    funcRep->push_back(newLandBlk);  //insert to function
+    newLandBlk->addSuccessor(landBlk);
+    for (typename SmallVector<BlockT*, DEFAULT_VEC_SLOTS>::iterator iter =
+         inpathBlks.begin(),
+         iterEnd = inpathBlks.end(); iter != iterEnd; ++iter) {
+      BlockT *curBlk = *iter;
+      CFGTraits::replaceInstrUseOfBlockWith(curBlk, landBlk, newLandBlk);
+      //srcBlk, oldBlk, newBlk
+      curBlk->removeSuccessor(landBlk);
+      curBlk->addSuccessor(newLandBlk);
+    }
+    for (size_t i = 0, tot = exitBlks.size(); i < tot; ++i) {
+      if (exitBlks[i] == landBlk) {
+        exitBlks[i] = newLandBlk;
+      }
+    }
+    SHOWNEWBLK(newLandBlk, "NewLandingBlock: ");
+  }
+
+  setLoopLandBlock(loopRep, newLandBlk);
+
+  return newLandBlk;
+} // recordLoopbreakLand
+
+template<class PassT>
+void CFGStructurizer<PassT>::setLoopLandBlock(LoopT *loopRep, BlockT *blk) {
+  LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
+
+  if (theEntry == NULL) {
+    theEntry = new LoopLandInfo();
+  }
+  assert(theEntry->landBlk == NULL);
+
+  if (blk == NULL) {
+    blk = funcRep->CreateMachineBasicBlock();
+    funcRep->push_back(blk);  //insert to function
+    SHOWNEWBLK(blk, "DummyLandingBlock for loop without break: ");
+  }
+
+  theEntry->landBlk = blk;
+
+  if (DEBUGME) {
+    errs() << "setLoopLandBlock loop-header = BB"
+           << loopRep->getHeader()->getNumber()
+           << "  landing-block = BB" << blk->getNumber() << "\n";
+  }
+} // setLoopLandBlock
+
+template<class PassT>
+void CFGStructurizer<PassT>::addLoopBreakOnReg(LoopT *loopRep, RegiT regNum) {
+  LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
+
+  if (theEntry == NULL) {
+    theEntry = new LoopLandInfo();
+  }
+
+  theEntry->breakOnRegs.insert(regNum);
+
+  if (DEBUGME) {
+    errs() << "addLoopBreakOnReg loop-header = BB"
+           << loopRep->getHeader()->getNumber()
+           << "  regNum = " << regNum << "\n";
+  }
+} // addLoopBreakOnReg
+
+template<class PassT>
+void CFGStructurizer<PassT>::addLoopContOnReg(LoopT *loopRep, RegiT regNum) {
+  LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
+
+  if (theEntry == NULL) {
+    theEntry = new LoopLandInfo();
+  }
+  theEntry->contOnRegs.insert(regNum);
+
+  if (DEBUGME) {
+    errs() << "addLoopContOnReg loop-header = BB"
+           << loopRep->getHeader()->getNumber()
+           << "  regNum = " << regNum << "\n";
+  }
+} // addLoopContOnReg
+
+template<class PassT>
+void CFGStructurizer<PassT>::addLoopBreakInitReg(LoopT *loopRep, RegiT regNum) {
+  LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
+
+  if (theEntry == NULL) {
+    theEntry = new LoopLandInfo();
+  }
+  theEntry->breakInitRegs.insert(regNum);
+
+  if (DEBUGME) {
+    errs() << "addLoopBreakInitReg loop-header = BB"
+           << loopRep->getHeader()->getNumber()
+           << "  regNum = " << regNum << "\n";
+  }
+} // addLoopBreakInitReg
+
+template<class PassT>
+void CFGStructurizer<PassT>::addLoopContInitReg(LoopT *loopRep, RegiT regNum) {
+  LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
+
+  if (theEntry == NULL) {
+    theEntry = new LoopLandInfo();
+  }
+  theEntry->contInitRegs.insert(regNum);
+
+  if (DEBUGME) {
+    errs() << "addLoopContInitReg loop-header = BB"
+           << loopRep->getHeader()->getNumber()
+           << "  regNum = " << regNum << "\n";
+  }
+} // addLoopContInitReg
+
+template<class PassT>
+void CFGStructurizer<PassT>::addLoopEndbranchInitReg(LoopT *loopRep,
+                                                     RegiT regNum) {
+  LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
+
+  if (theEntry == NULL) {
+    theEntry = new LoopLandInfo();
+  }
+  theEntry->endbranchInitRegs.insert(regNum);
+
+  if (DEBUGME)
+  {
+        errs() << "addLoopEndbranchInitReg loop-header = BB"
+      << loopRep->getHeader()->getNumber()
+      << "  regNum = " << regNum << "\n";
+  }
+} // addLoopEndbranchInitReg
+
+template<class PassT>
+typename CFGStructurizer<PassT>::LoopLandInfo *
+CFGStructurizer<PassT>::getLoopLandInfo(LoopT *loopRep) {
+  LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
+
+  return theEntry;
+} // getLoopLandInfo
+
+template<class PassT>
+typename CFGStructurizer<PassT>::BlockT *
+CFGStructurizer<PassT>::getLoopLandBlock(LoopT *loopRep) {
+  LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
+
+  return theEntry ? theEntry->landBlk : NULL;
+} // getLoopLandBlock
+
+
+template<class PassT>
+bool CFGStructurizer<PassT>::hasBackEdge(BlockT *curBlk) {
+  LoopT *loopRep = loopInfo->getLoopFor(curBlk);
+  if (loopRep == NULL)
+    return false;
+
+  BlockT *loopHeader = loopRep->getHeader();
+
+  return curBlk->isSuccessor(loopHeader);
+
+} //hasBackEdge
+
+template<class PassT>
+unsigned CFGStructurizer<PassT>::getLoopDepth(LoopT *loopRep) {
+  return loopRep ? loopRep->getLoopDepth() : 0;
+} //getLoopDepth
+
+template<class PassT>
+int CFGStructurizer<PassT>::countActiveBlock
+(typename SmallVector<BlockT*, DEFAULT_VEC_SLOTS>::const_iterator iterStart,
+ typename SmallVector<BlockT*, DEFAULT_VEC_SLOTS>::const_iterator iterEnd) {
+  int count = 0;
+  while (iterStart != iterEnd) {
+    if (!isRetiredBlock(*iterStart)) {
+      ++count;
+    }
+    ++iterStart;
+  }
+
+  return count;
+} //countActiveBlock
+
+// This is work around solution for findNearestCommonDominator not avaiable to
+// post dom a proper fix should go to Dominators.h.
+
+template<class PassT>
+typename CFGStructurizer<PassT>::BlockT*
+CFGStructurizer<PassT>::findNearestCommonPostDom(BlockT *blk1, BlockT *blk2) {
+
+  if (postDomTree->dominates(blk1, blk2)) {
+    return blk1;
+  }
+  if (postDomTree->dominates(blk2, blk1)) {
+    return blk2;
+  }
+
+  DomTreeNodeT *node1 = postDomTree->getNode(blk1);
+  DomTreeNodeT *node2 = postDomTree->getNode(blk2);
+
+  // Handle newly cloned node.
+  if (node1 == NULL && blk1->succ_size() == 1) {
+    return findNearestCommonPostDom(*blk1->succ_begin(), blk2);
+  }
+  if (node2 == NULL && blk2->succ_size() == 1) {
+    return findNearestCommonPostDom(blk1, *blk2->succ_begin());
+  }
+
+  if (node1 == NULL || node2 == NULL) {
+    return NULL;
+  }
+
+  node1 = node1->getIDom();
+  while (node1) {
+    if (postDomTree->dominates(node1, node2)) {
+      return node1->getBlock();
+    }
+    node1 = node1->getIDom();
+  }
+
+  return NULL;
+}
+
+template<class PassT>
+typename CFGStructurizer<PassT>::BlockT *
+CFGStructurizer<PassT>::findNearestCommonPostDom
+(typename std::set<BlockT *> &blks) {
+  BlockT *commonDom;
+  typename std::set<BlockT *>::const_iterator iter = blks.begin();
+  typename std::set<BlockT *>::const_iterator iterEnd = blks.end();
+  for (commonDom = *iter; iter != iterEnd && commonDom != NULL; ++iter) {
+    BlockT *curBlk = *iter;
+    if (curBlk != commonDom) {
+      commonDom = findNearestCommonPostDom(curBlk, commonDom);
+    }
+  }
+
+  if (DEBUGME) {
+    errs() << "Common post dominator for exit blocks is ";
+    if (commonDom) {
+          errs() << "BB" << commonDom->getNumber() << "\n";
+    } else {
+      errs() << "NULL\n";
+    }
+  }
+
+  return commonDom;
+} //findNearestCommonPostDom
+
+} //end namespace llvm
+
+//todo: move-end
+
+
+//===----------------------------------------------------------------------===//
+//
+// CFGStructurizer for AMDIL
+//
+//===----------------------------------------------------------------------===//
+
+
+using namespace llvmCFGStruct;
+
+namespace llvm
+{
+class AMDILCFGStructurizer : public MachineFunctionPass
+{
+public:
+  typedef MachineInstr              InstructionType;
+  typedef MachineFunction           FunctionType;
+  typedef MachineBasicBlock         BlockType;
+  typedef MachineLoopInfo           LoopinfoType;
+  typedef MachineDominatorTree      DominatortreeType;
+  typedef MachinePostDominatorTree  PostDominatortreeType;
+  typedef MachineDomTreeNode        DomTreeNodeType;
+  typedef MachineLoop               LoopType;
+
+protected:
+  TargetMachine &TM;
+  const TargetInstrInfo *TII;
+  const AMDILRegisterInfo *TRI;
+
+public:
+  AMDILCFGStructurizer(char &pid, TargetMachine &tm AMDIL_OPT_LEVEL_DECL);
+  const TargetInstrInfo *getTargetInstrInfo() const;
+  //bool runOnMachineFunction(MachineFunction &F);
+
+private:
+
+};   //end of class AMDILCFGStructurizer
+
+//char AMDILCFGStructurizer::ID = 0;
+} //end of namespace llvm
+AMDILCFGStructurizer::AMDILCFGStructurizer(char &pid, TargetMachine &tm
+                                           AMDIL_OPT_LEVEL_DECL)
+: MachineFunctionPass(pid), TM(tm), TII(tm.getInstrInfo()),
+  TRI(static_cast<const AMDILRegisterInfo *>(tm.getRegisterInfo())
+  ) {
+}
+
+const TargetInstrInfo *AMDILCFGStructurizer::getTargetInstrInfo() const {
+  return TII;
+}
+//===----------------------------------------------------------------------===//
+//
+// CFGPrepare
+//
+//===----------------------------------------------------------------------===//
+
+
+using namespace llvmCFGStruct;
+
+namespace llvm
+{
+class AMDILCFGPrepare : public AMDILCFGStructurizer
+{
+public:
+  static char ID;
+
+public:
+  AMDILCFGPrepare(TargetMachine &tm AMDIL_OPT_LEVEL_DECL);
+
+  virtual const char *getPassName() const;
+  virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+
+  bool runOnMachineFunction(MachineFunction &F);
+
+private:
+
+};   //end of class AMDILCFGPrepare
+
+char AMDILCFGPrepare::ID = 0;
+} //end of namespace llvm
+
+AMDILCFGPrepare::AMDILCFGPrepare(TargetMachine &tm AMDIL_OPT_LEVEL_DECL)
+  : AMDILCFGStructurizer(ID, tm  AMDIL_OPT_LEVEL_VAR) 
+{
+}
+const char *AMDILCFGPrepare::getPassName() const {
+  return "AMD IL Control Flow Graph Preparation Pass";
+}
+
+void AMDILCFGPrepare::getAnalysisUsage(AnalysisUsage &AU) const {
+  AU.addPreserved<MachineFunctionAnalysis>();
+  AU.addRequired<MachineFunctionAnalysis>();
+  AU.addRequired<MachineDominatorTree>();
+  AU.addRequired<MachinePostDominatorTree>();
+  AU.addRequired<MachineLoopInfo>();
+}
+
+//===----------------------------------------------------------------------===//
+//
+// CFGPerform
+//
+//===----------------------------------------------------------------------===//
+
+
+using namespace llvmCFGStruct;
+
+namespace llvm
+{
+class AMDILCFGPerform : public AMDILCFGStructurizer
+{
+public:
+  static char ID;
+
+public:
+  AMDILCFGPerform(TargetMachine &tm AMDIL_OPT_LEVEL_DECL);
+  virtual const char *getPassName() const;
+  virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+  bool runOnMachineFunction(MachineFunction &F);
+
+private:
+
+};   //end of class AMDILCFGPerform
+
+char AMDILCFGPerform::ID = 0;
+} //end of namespace llvm
+
+  AMDILCFGPerform::AMDILCFGPerform(TargetMachine &tm AMDIL_OPT_LEVEL_DECL)
+: AMDILCFGStructurizer(ID, tm AMDIL_OPT_LEVEL_VAR)
+{
+}
+
+const char *AMDILCFGPerform::getPassName() const {
+  return "AMD IL Control Flow Graph structurizer Pass";
+}
+
+void AMDILCFGPerform::getAnalysisUsage(AnalysisUsage &AU) const {
+  AU.addPreserved<MachineFunctionAnalysis>();
+  AU.addRequired<MachineFunctionAnalysis>();
+  AU.addRequired<MachineDominatorTree>();
+  AU.addRequired<MachinePostDominatorTree>();
+  AU.addRequired<MachineLoopInfo>();
+}
+
+//===----------------------------------------------------------------------===//
+//
+// CFGStructTraits<AMDILCFGStructurizer>
+//
+//===----------------------------------------------------------------------===//
+
+namespace llvmCFGStruct
+{
+// this class is tailor to the AMDIL backend
+template<>
+struct CFGStructTraits<AMDILCFGStructurizer>
+{
+  typedef int RegiT;
+
+  static int getBreakNzeroOpcode(int oldOpcode) {
+    switch(oldOpcode) {
+      ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::BREAK_LOGICALNZ);
+    default:
+      assert(0 && "internal error");
+    };
+    return -1;
+  }
+
+  static int getBreakZeroOpcode(int oldOpcode) {
+    switch(oldOpcode) {
+      ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::BREAK_LOGICALZ);
+    default:
+      assert(0 && "internal error");
+    };
+    return -1;
+  }
+
+  static int getBranchNzeroOpcode(int oldOpcode) {
+    switch(oldOpcode) {
+      ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::IF_LOGICALNZ);
+    default:
+      assert(0 && "internal error");
+    };
+    return -1;
+  }
+
+  static int getBranchZeroOpcode(int oldOpcode) {
+    switch(oldOpcode) {
+      ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::IF_LOGICALZ);
+    default:
+      assert(0 && "internal error");
+    };
+    return -1;
+  }
+
+  static int getContinueNzeroOpcode(int oldOpcode)
+  {
+    switch(oldOpcode) {
+      ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::CONTINUE_LOGICALNZ);
+      default:
+        assert(0 && "internal error");
+    };
+    return -1;
+  }
+
+  static int getContinueZeroOpcode(int oldOpcode) {
+    switch(oldOpcode) {
+      ExpandCaseToAllScalarReturn(AMDIL::BRANCH_COND, AMDIL::CONTINUE_LOGICALZ);
+    default:
+      assert(0 && "internal error");
+    };
+    return -1;
+  }
+
+// the explicitly represented branch target is the true branch target
+#define getExplicitBranch getTrueBranch
+#define setExplicitBranch setTrueBranch
+
+  static MachineBasicBlock *getTrueBranch(MachineInstr *instr) {
+    return instr->getOperand(0).getMBB();
+  }
+
+  static void setTrueBranch(MachineInstr *instr, MachineBasicBlock *blk) {
+    instr->getOperand(0).setMBB(blk);
+  }
+
+  static MachineBasicBlock *
+  getFalseBranch(MachineBasicBlock *blk, MachineInstr *instr) {
+    assert(blk->succ_size() == 2);
+    MachineBasicBlock *trueBranch = getTrueBranch(instr);
+    MachineBasicBlock::succ_iterator iter = blk->succ_begin();
+    MachineBasicBlock::succ_iterator iterNext = iter;
+    ++iterNext;
+
+    return (*iter == trueBranch) ? *iterNext : *iter;
+  }
+
+  static bool isCondBranch(MachineInstr *instr) {
+    switch (instr->getOpcode()) {
+      ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND);
+      break;
+    default:
+      return false;
+    }
+    return true;
+  }
+
+  static bool isUncondBranch(MachineInstr *instr) {
+    switch (instr->getOpcode()) {
+    case AMDIL::BRANCH:
+      break;
+    default:
+      return false;
+    }
+    return true;
+  }
+
+  static DebugLoc getLastDebugLocInBB(MachineBasicBlock *blk) {
+    //get DebugLoc from the first MachineBasicBlock instruction with debug info
+    DebugLoc DL;
+	for (MachineBasicBlock::iterator iter = blk->begin(); iter != blk->end(); ++iter) {
+	  MachineInstr *instr = &(*iter);
+	  if (instr->getDebugLoc().isUnknown() == false) {
+	    DL = instr->getDebugLoc();
+	  }
+    }
+    return DL;
+  }
+
+  static MachineInstr *getNormalBlockBranchInstr(MachineBasicBlock *blk) {
+    MachineBasicBlock::reverse_iterator iter = blk->rbegin();
+    MachineInstr *instr = &*iter;
+    if (instr && (isCondBranch(instr) || isUncondBranch(instr))) {
+      return instr;
+    }
+    return NULL;
+  }
+
+  // The correct naming for this is getPossibleLoopendBlockBranchInstr.
+  //
+  // BB with backward-edge could have move instructions after the branch
+  // instruction.  Such move instruction "belong to" the loop backward-edge.
+  //
+  static MachineInstr *getLoopendBlockBranchInstr(MachineBasicBlock *blk) {
+    const AMDILInstrInfo * TII = static_cast<const AMDILInstrInfo *>(
+                                  blk->getParent()->getTarget().getInstrInfo());
+
+    for (MachineBasicBlock::reverse_iterator iter = blk->rbegin(),
+         iterEnd = blk->rend(); iter != iterEnd; ++iter) {
+      // FIXME: Simplify
+      MachineInstr *instr = &*iter;
+      if (instr) {
+        if (isCondBranch(instr) || isUncondBranch(instr)) {
+          return instr;
+        } else if (!TII->isMov(instr->getOpcode())) {
+          break;
+        }
+      }
+    }
+    return NULL;
+  }
+
+  static MachineInstr *getReturnInstr(MachineBasicBlock *blk) {
+    MachineBasicBlock::reverse_iterator iter = blk->rbegin();
+    if (iter != blk->rend()) {
+      MachineInstr *instr = &(*iter);
+      if (instr->getOpcode() == AMDIL::RETURN) {
+        return instr;
+      }
+    }
+    return NULL;
+  }
+
+  static MachineInstr *getContinueInstr(MachineBasicBlock *blk) {
+    MachineBasicBlock::reverse_iterator iter = blk->rbegin();
+    if (iter != blk->rend()) {
+      MachineInstr *instr = &(*iter);
+      if (instr->getOpcode() == AMDIL::CONTINUE) {
+        return instr;
+      }
+    }
+    return NULL;
+  }
+
+  static MachineInstr *getLoopBreakInstr(MachineBasicBlock *blk) {
+    for (MachineBasicBlock::iterator iter = blk->begin(); (iter != blk->end()); ++iter) {
+      MachineInstr *instr = &(*iter);
+      if ((instr->getOpcode() == AMDIL::BREAK_LOGICALNZ_i32) || (instr->getOpcode() == AMDIL::BREAK_LOGICALZ_i32)) {
+        return instr;
+      }
+    }
+    return NULL;
+  }
+
+  static bool isReturnBlock(MachineBasicBlock *blk) {
+    MachineInstr *instr = getReturnInstr(blk);
+    bool isReturn = (blk->succ_size() == 0);
+    if (instr) {
+      assert(isReturn);
+    } else if (isReturn) {
+      if (DEBUGME) {
+        errs() << "BB" << blk->getNumber()
+               <<" is return block without RETURN instr\n";
+      }
+    }
+
+    return  isReturn;
+  }
+
+  static MachineBasicBlock::iterator
+  getInstrPos(MachineBasicBlock *blk, MachineInstr *instr) {
+    assert(instr->getParent() == blk && "instruction doesn't belong to block");
+    MachineBasicBlock::iterator iter = blk->begin();
+    MachineBasicBlock::iterator iterEnd = blk->end();
+    while (&(*iter) != instr && iter != iterEnd) {
+      ++iter;
+    }
+
+    assert(iter != iterEnd);
+    return iter;
+  }//getInstrPos
+
+  static MachineInstr *insertInstrBefore(MachineBasicBlock *blk, int newOpcode,
+                                         AMDILCFGStructurizer *passRep) {
+    return insertInstrBefore(blk,newOpcode,passRep,DebugLoc());
+  } //insertInstrBefore
+
+  static MachineInstr *insertInstrBefore(MachineBasicBlock *blk, int newOpcode,
+                                         AMDILCFGStructurizer *passRep, DebugLoc DL) {
+    const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
+    MachineInstr *newInstr =
+      blk->getParent()->CreateMachineInstr(tii->get(newOpcode), DL);
+
+    MachineBasicBlock::iterator res;
+    if (blk->begin() != blk->end()) {
+      blk->insert(blk->begin(), newInstr);
+    } else {
+      blk->push_back(newInstr);
+    }
+
+    SHOWNEWINSTR(newInstr);
+
+    return newInstr;
+  } //insertInstrBefore
+
+  static void insertInstrEnd(MachineBasicBlock *blk, int newOpcode,
+                             AMDILCFGStructurizer *passRep) {
+    insertInstrEnd(blk,newOpcode,passRep,DebugLoc());
+  } //insertInstrEnd
+
+  static void insertInstrEnd(MachineBasicBlock *blk, int newOpcode,
+                             AMDILCFGStructurizer *passRep, DebugLoc DL) {
+    const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
+   MachineInstr *newInstr = blk->getParent()
+      ->CreateMachineInstr(tii->get(newOpcode), DL);
+
+    blk->push_back(newInstr);
+    //assume the instruction doesn't take any reg operand ...
+
+    SHOWNEWINSTR(newInstr);
+  } //insertInstrEnd
+
+  static MachineInstr *insertInstrBefore(MachineBasicBlock::iterator instrPos,
+                                         int newOpcode, 
+                                         AMDILCFGStructurizer *passRep) {
+    MachineInstr *oldInstr = &(*instrPos);
+    const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
+    MachineBasicBlock *blk = oldInstr->getParent();
+    MachineInstr *newInstr =
+      blk->getParent()->CreateMachineInstr(tii->get(newOpcode),
+                                           DebugLoc());
+
+    blk->insert(instrPos, newInstr);
+    //assume the instruction doesn't take any reg operand ...
+
+    SHOWNEWINSTR(newInstr);
+    return newInstr;
+  } //insertInstrBefore
+
+  static void insertCondBranchBefore(MachineBasicBlock::iterator instrPos,
+                                     int newOpcode,
+                                     AMDILCFGStructurizer *passRep,
+									 DebugLoc DL) {
+    MachineInstr *oldInstr = &(*instrPos);
+    const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
+    MachineBasicBlock *blk = oldInstr->getParent();
+    MachineInstr *newInstr =
+      blk->getParent()->CreateMachineInstr(tii->get(newOpcode),
+                                           DL);
+
+    blk->insert(instrPos, newInstr);
+    MachineInstrBuilder(newInstr).addReg(oldInstr->getOperand(1).getReg(),
+                                         false);
+
+    SHOWNEWINSTR(newInstr);
+    //erase later oldInstr->eraseFromParent();
+  } //insertCondBranchBefore
+
+  static void insertCondBranchBefore(MachineBasicBlock *blk,
+                                     MachineBasicBlock::iterator insertPos,
+                                     int newOpcode,
+                                     AMDILCFGStructurizer *passRep,
+                                     RegiT regNum,
+									 DebugLoc DL) {
+    const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
+
+    MachineInstr *newInstr =
+      blk->getParent()->CreateMachineInstr(tii->get(newOpcode), DL);
+
+    //insert before
+    blk->insert(insertPos, newInstr);
+    MachineInstrBuilder(newInstr).addReg(regNum, false);
+
+    SHOWNEWINSTR(newInstr);
+  } //insertCondBranchBefore
+
+  static void insertCondBranchEnd(MachineBasicBlock *blk,
+                                  int newOpcode,
+                                  AMDILCFGStructurizer *passRep,
+                                  RegiT regNum) {
+    const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
+    MachineInstr *newInstr =
+      blk->getParent()->CreateMachineInstr(tii->get(newOpcode), DebugLoc());
+
+    blk->push_back(newInstr);
+    MachineInstrBuilder(newInstr).addReg(regNum, false);
+
+    SHOWNEWINSTR(newInstr);
+  } //insertCondBranchEnd
+
+
+  static void insertAssignInstrBefore(MachineBasicBlock::iterator instrPos,
+                                      AMDILCFGStructurizer *passRep,
+                                      RegiT regNum, int regVal) {
+    MachineInstr *oldInstr = &(*instrPos);
+    const AMDILInstrInfo *tii =
+             static_cast<const AMDILInstrInfo *>(passRep->getTargetInstrInfo());
+    MachineBasicBlock *blk = oldInstr->getParent();
+    MachineInstr *newInstr = tii->getMovImmInstr(blk->getParent(), regNum,
+                                                 regVal);
+    blk->insert(instrPos, newInstr);
+
+    SHOWNEWINSTR(newInstr);
+  } //insertAssignInstrBefore
+
+  static void insertAssignInstrBefore(MachineBasicBlock *blk,
+                                      AMDILCFGStructurizer *passRep,
+                                      RegiT regNum, int regVal) {
+    const AMDILInstrInfo *tii =
+             static_cast<const AMDILInstrInfo *>(passRep->getTargetInstrInfo());
+
+    MachineInstr *newInstr = tii->getMovImmInstr(blk->getParent(), regNum,
+                                                 regVal);
+    if (blk->begin() != blk->end()) {
+      blk->insert(blk->begin(), newInstr);
+    } else {
+      blk->push_back(newInstr);
+    }
+
+    SHOWNEWINSTR(newInstr);
+
+  } //insertInstrBefore
+
+  static void insertCompareInstrBefore(MachineBasicBlock *blk,
+                                       MachineBasicBlock::iterator instrPos,
+                                       AMDILCFGStructurizer *passRep,
+                                       RegiT dstReg, RegiT src1Reg,
+                                       RegiT src2Reg) {
+    const AMDILInstrInfo *tii =
+             static_cast<const AMDILInstrInfo *>(passRep->getTargetInstrInfo());
+    MachineInstr *newInstr =
+      blk->getParent()->CreateMachineInstr(tii->get(tii->getIEQOpcode()), DebugLoc());
+
+    MachineInstrBuilder(newInstr).addReg(dstReg, RegState::Define); //set target
+    MachineInstrBuilder(newInstr).addReg(src1Reg); //set src value
+    MachineInstrBuilder(newInstr).addReg(src2Reg); //set src value
+
+    blk->insert(instrPos, newInstr);
+    SHOWNEWINSTR(newInstr);
+
+  } //insertCompareInstrBefore
+
+  static void cloneSuccessorList(MachineBasicBlock *dstBlk,
+                                 MachineBasicBlock *srcBlk) {
+    for (MachineBasicBlock::succ_iterator iter = srcBlk->succ_begin(),
+         iterEnd = srcBlk->succ_end(); iter != iterEnd; ++iter) {
+      dstBlk->addSuccessor(*iter);  // *iter's predecessor is also taken care of
+    }
+  } //cloneSuccessorList
+
+  static MachineBasicBlock *clone(MachineBasicBlock *srcBlk) {
+    MachineFunction *func = srcBlk->getParent();
+    MachineBasicBlock *newBlk = func->CreateMachineBasicBlock();
+    func->push_back(newBlk);  //insert to function
+    //newBlk->setNumber(srcBlk->getNumber());
+    for (MachineBasicBlock::iterator iter = srcBlk->begin(),
+         iterEnd = srcBlk->end();
+         iter != iterEnd; ++iter) {
+      MachineInstr *instr = func->CloneMachineInstr(iter);
+      newBlk->push_back(instr);
+    }
+    return newBlk;
+  }
+
+  //MachineBasicBlock::ReplaceUsesOfBlockWith doesn't serve the purpose because
+  //the AMDIL instruction is not recognized as terminator fix this and retire
+  //this routine
+  static void replaceInstrUseOfBlockWith(MachineBasicBlock *srcBlk,
+                                         MachineBasicBlock *oldBlk,
+                                         MachineBasicBlock *newBlk) {
+    MachineInstr *branchInstr = getLoopendBlockBranchInstr(srcBlk);
+    if (branchInstr && isCondBranch(branchInstr) &&
+        getExplicitBranch(branchInstr) == oldBlk) {
+      setExplicitBranch(branchInstr, newBlk);
+    }
+  }
+
+  static void wrapup(MachineBasicBlock *entryBlk) {
+    assert((!entryBlk->getParent()->getJumpTableInfo()
+            || entryBlk->getParent()->getJumpTableInfo()->isEmpty())
+           && "found a jump table");
+
+     //collect continue right before endloop
+     SmallVector<MachineInstr *, DEFAULT_VEC_SLOTS> contInstr;
+     MachineBasicBlock::iterator pre = entryBlk->begin();
+     MachineBasicBlock::iterator iterEnd = entryBlk->end();
+     MachineBasicBlock::iterator iter = pre;
+     while (iter != iterEnd) {
+       if (pre->getOpcode() == AMDIL::CONTINUE
+           && iter->getOpcode() == AMDIL::ENDLOOP) {
+         contInstr.push_back(pre);
+       }
+       pre = iter;
+       ++iter;
+     } //end while
+
+     //delete continue right before endloop
+     for (unsigned i = 0; i < contInstr.size(); ++i) {
+        contInstr[i]->eraseFromParent();
+     }
+
+     // TODO to fix up jump table so later phase won't be confused.  if
+     // (jumpTableInfo->isEmpty() == false) { need to clean the jump table, but
+     // there isn't such an interface yet.  alternatively, replace all the other
+     // blocks in the jump table with the entryBlk //}
+
+  } //wrapup
+
+  static MachineDominatorTree *getDominatorTree(AMDILCFGStructurizer &pass) {
+    return &pass.getAnalysis<MachineDominatorTree>();
+  }
+
+  static MachinePostDominatorTree*
+  getPostDominatorTree(AMDILCFGStructurizer &pass) {
+    return &pass.getAnalysis<MachinePostDominatorTree>();
+  }
+
+  static MachineLoopInfo *getLoopInfo(AMDILCFGStructurizer &pass) {
+    return &pass.getAnalysis<MachineLoopInfo>();
+  }
+}; // template class CFGStructTraits
+} //end of namespace llvm
+
+// createAMDILCFGPreparationPass- Returns a pass
+FunctionPass *llvm::createAMDILCFGPreparationPass(TargetMachine &tm
+                                                  AMDIL_OPT_LEVEL_DECL) {
+  return new AMDILCFGPrepare(tm  AMDIL_OPT_LEVEL_VAR);
+}
+
+bool AMDILCFGPrepare::runOnMachineFunction(MachineFunction &func) {
+  return llvmCFGStruct::CFGStructurizer<AMDILCFGStructurizer>().prepare(func,
+                                                                        *this,
+                                                                        TRI);
+}
+
+// createAMDILCFGStructurizerPass- Returns a pass
+FunctionPass *llvm::createAMDILCFGStructurizerPass(TargetMachine &tm
+                                                   AMDIL_OPT_LEVEL_DECL) {
+  return new AMDILCFGPerform(tm  AMDIL_OPT_LEVEL_VAR);
+}
+
+bool AMDILCFGPerform::runOnMachineFunction(MachineFunction &func) {
+  return llvmCFGStruct::CFGStructurizer<AMDILCFGStructurizer>().run(func,
+                                                                    *this,
+                                                                    TRI);
+}
+
+//end of file newline goes below
+
diff --git a/lib/Target/AMDGPU/AMDILCallingConv.td b/lib/Target/AMDGPU/AMDILCallingConv.td
new file mode 100644
index 0000000..371d02a
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILCallingConv.td
@@ -0,0 +1,42 @@
+//===- AMDILCallingConv.td - Calling Conventions AMDIL -----*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This describes the calling conventions for the AMDIL architectures.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Return Value Calling Conventions
+//===----------------------------------------------------------------------===//
+
+// AMDIL 32-bit C return-value convention.
+def RetCC_AMDIL32 : CallingConv<[
+ // Since IL has no return values, all values can be emulated on the stack
+ // The stack can then be mapped to a number of sequential virtual registers
+ // in IL
+
+ // Integer and FP scalar values get put on the stack at 16-byte alignment
+ // but with a size of 4 bytes
+ CCIfType<[i32, f32], CCAssignToReg<
+ [
+ R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, R13, R14, R15, R16, R17, R18, R19, R20
+]> >, CCAssignToStack<16, 16>]>;
+
+// AMDIL 32-bit C Calling convention.
+def CC_AMDIL32 : CallingConv<[
+  // Since IL has parameter values, all values can be emulated on the stack
+ // The stack can then be mapped to a number of sequential virtual registers
+ // in IL
+ // Integer and FP scalar values get put on the stack at 16-byte alignment
+ // but with a size of 4 bytes
+ // Integer and FP scalar values get put on the stack at 16-byte alignment
+ // but with a size of 4 bytes
+ CCIfType<[i32, f32], CCAssignToReg<
+[R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, R13, R14, R15, R16, R17, R18, R19, R20
+]> >, CCAssignToStack<16, 16>]>;
diff --git a/lib/Target/AMDGPU/AMDILCodeEmitter.h b/lib/Target/AMDGPU/AMDILCodeEmitter.h
new file mode 100644
index 0000000..0c7ae59
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILCodeEmitter.h
@@ -0,0 +1,48 @@
+//===-- AMDILCodeEmitter.h - AMDIL Code Emitter interface -----------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// CodeEmitter interface for R600 and SI codegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDILCODEEMITTER_H
+#define AMDILCODEEMITTER_H
+
+namespace llvm {
+
+  class AMDILCodeEmitter {
+  public:
+    uint64_t getBinaryCodeForInstr(const MachineInstr &MI) const;
+    virtual uint64_t getMachineOpValue(const MachineInstr &MI,
+                                   const MachineOperand &MO) const { return 0; }
+    virtual unsigned GPR4AlignEncode(const MachineInstr  &MI,
+                                     unsigned OpNo) const {
+      return 0;
+    }
+    virtual unsigned GPR2AlignEncode(const MachineInstr &MI,
+                                     unsigned OpNo) const {
+      return 0;
+    }
+    virtual uint64_t VOPPostEncode(const MachineInstr &MI,
+                                   uint64_t Value) const {
+      return Value;
+    }
+    virtual uint64_t i32LiteralEncode(const MachineInstr &MI,
+                                      unsigned OpNo) const {
+      return 0;
+    }
+    virtual uint32_t SMRDmemriEncode(const MachineInstr &MI, unsigned OpNo)
+                                                                     const {
+      return 0;
+    }
+  };
+
+} // End namespace llvm
+
+#endif // AMDILCODEEMITTER_H
diff --git a/lib/Target/AMDGPU/AMDILDevice.cpp b/lib/Target/AMDGPU/AMDILDevice.cpp
new file mode 100644
index 0000000..4294a8b
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILDevice.cpp
@@ -0,0 +1,137 @@
+//===-- AMDILDevice.cpp - Base class for AMDIL Devices --------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+#include "AMDILDevice.h"
+#include "AMDILSubtarget.h"
+
+using namespace llvm;
+// Default implementation for all of the classes.
+AMDILDevice::AMDILDevice(AMDILSubtarget *ST) : mSTM(ST)
+{
+  mHWBits.resize(AMDILDeviceInfo::MaxNumberCapabilities);
+  mSWBits.resize(AMDILDeviceInfo::MaxNumberCapabilities);
+  setCaps();
+  mDeviceFlag = OCL_DEVICE_ALL;
+}
+
+AMDILDevice::~AMDILDevice()
+{
+    mHWBits.clear();
+    mSWBits.clear();
+}
+
+size_t AMDILDevice::getMaxGDSSize() const
+{
+  return 0;
+}
+
+uint32_t 
+AMDILDevice::getDeviceFlag() const
+{
+  return mDeviceFlag;
+}
+
+size_t AMDILDevice::getMaxNumCBs() const
+{
+  if (usesHardware(AMDILDeviceInfo::ConstantMem)) {
+    return HW_MAX_NUM_CB;
+  }
+
+  return 0;
+}
+
+size_t AMDILDevice::getMaxCBSize() const
+{
+  if (usesHardware(AMDILDeviceInfo::ConstantMem)) {
+    return MAX_CB_SIZE;
+  }
+
+  return 0;
+}
+
+size_t AMDILDevice::getMaxScratchSize() const
+{
+  return 65536;
+}
+
+uint32_t AMDILDevice::getStackAlignment() const
+{
+  return 16;
+}
+
+void AMDILDevice::setCaps()
+{
+  mSWBits.set(AMDILDeviceInfo::HalfOps);
+  mSWBits.set(AMDILDeviceInfo::ByteOps);
+  mSWBits.set(AMDILDeviceInfo::ShortOps);
+  mSWBits.set(AMDILDeviceInfo::HW64BitDivMod);
+  if (mSTM->isOverride(AMDILDeviceInfo::NoInline)) {
+    mSWBits.set(AMDILDeviceInfo::NoInline);
+  }
+  if (mSTM->isOverride(AMDILDeviceInfo::MacroDB)) {
+    mSWBits.set(AMDILDeviceInfo::MacroDB);
+  }
+  if (mSTM->isOverride(AMDILDeviceInfo::Debug)) {
+    mSWBits.set(AMDILDeviceInfo::ConstantMem);
+  } else {
+    mHWBits.set(AMDILDeviceInfo::ConstantMem);
+  }
+  if (mSTM->isOverride(AMDILDeviceInfo::Debug)) {
+    mSWBits.set(AMDILDeviceInfo::PrivateMem);
+  } else {
+    mHWBits.set(AMDILDeviceInfo::PrivateMem);
+  }
+  if (mSTM->isOverride(AMDILDeviceInfo::BarrierDetect)) {
+    mSWBits.set(AMDILDeviceInfo::BarrierDetect);
+  }
+  mSWBits.set(AMDILDeviceInfo::ByteLDSOps);
+  mSWBits.set(AMDILDeviceInfo::LongOps);
+}
+
+AMDILDeviceInfo::ExecutionMode
+AMDILDevice::getExecutionMode(AMDILDeviceInfo::Caps Caps) const
+{
+  if (mHWBits[Caps]) {
+    assert(!mSWBits[Caps] && "Cannot set both SW and HW caps");
+    return AMDILDeviceInfo::Hardware;
+  }
+
+  if (mSWBits[Caps]) {
+    assert(!mHWBits[Caps] && "Cannot set both SW and HW caps");
+    return AMDILDeviceInfo::Software;
+  }
+
+  return AMDILDeviceInfo::Unsupported;
+
+}
+
+bool AMDILDevice::isSupported(AMDILDeviceInfo::Caps Mode) const
+{
+  return getExecutionMode(Mode) != AMDILDeviceInfo::Unsupported;
+}
+
+bool AMDILDevice::usesHardware(AMDILDeviceInfo::Caps Mode) const
+{
+  return getExecutionMode(Mode) == AMDILDeviceInfo::Hardware;
+}
+
+bool AMDILDevice::usesSoftware(AMDILDeviceInfo::Caps Mode) const
+{
+  return getExecutionMode(Mode) == AMDILDeviceInfo::Software;
+}
+
+std::string
+AMDILDevice::getDataLayout() const
+{
+    return std::string("e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16"
+      "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:32:32"
+      "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64"
+      "-v96:128:128-v128:128:128-v192:256:256-v256:256:256"
+      "-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+      "-n8:16:32:64");
+}
diff --git a/lib/Target/AMDGPU/AMDILDevice.h b/lib/Target/AMDGPU/AMDILDevice.h
new file mode 100644
index 0000000..706dd82
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILDevice.h
@@ -0,0 +1,116 @@
+//===---- AMDILDevice.h - Define Device Data for AMDIL -----*- C++ -*------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// Interface for the subtarget data classes.
+//
+//===----------------------------------------------------------------------===//
+// This file will define the interface that each generation needs to
+// implement in order to correctly answer queries on the capabilities of the
+// specific hardware.
+//===----------------------------------------------------------------------===//
+#ifndef _AMDILDEVICEIMPL_H_
+#define _AMDILDEVICEIMPL_H_
+#include "AMDIL.h"
+#include "llvm/ADT/BitVector.h"
+
+namespace llvm {
+  class AMDILSubtarget;
+  class MCStreamer;
+//===----------------------------------------------------------------------===//
+// Interface for data that is specific to a single device
+//===----------------------------------------------------------------------===//
+class AMDILDevice {
+public:
+  AMDILDevice(AMDILSubtarget *ST);
+  virtual ~AMDILDevice();
+
+  // Enum values for the various memory types.
+  enum {
+    RAW_UAV_ID   = 0,
+    ARENA_UAV_ID = 1,
+    LDS_ID       = 2,
+    GDS_ID       = 3,
+    SCRATCH_ID   = 4,
+    CONSTANT_ID  = 5,
+    GLOBAL_ID    = 6,
+    MAX_IDS      = 7
+  } IO_TYPE_IDS;
+
+  // Returns the max LDS size that the hardware supports.  Size is in
+  // bytes.
+  virtual size_t getMaxLDSSize() const = 0;
+
+  // Returns the max GDS size that the hardware supports if the GDS is
+  // supported by the hardware.  Size is in bytes.
+  virtual size_t getMaxGDSSize() const;
+
+  // Returns the max number of hardware constant address spaces that
+  // are supported by this device.
+  virtual size_t getMaxNumCBs() const;
+
+  // Returns the max number of bytes a single hardware constant buffer
+  // can support.  Size is in bytes.
+  virtual size_t getMaxCBSize() const;
+
+  // Returns the max number of bytes allowed by the hardware scratch
+  // buffer.  Size is in bytes.
+  virtual size_t getMaxScratchSize() const;
+
+  // Get the flag that corresponds to the device.
+  virtual uint32_t getDeviceFlag() const;
+
+  // Returns the number of work-items that exist in a single hardware
+  // wavefront.
+  virtual size_t getWavefrontSize() const = 0;
+
+  // Get the generational name of this specific device.
+  virtual uint32_t getGeneration() const = 0;
+
+  // Get the stack alignment of this specific device.
+  virtual uint32_t getStackAlignment() const;
+
+  // Get the resource ID for this specific device.
+  virtual uint32_t getResourceID(uint32_t DeviceID) const = 0;
+
+  // Get the max number of UAV's for this device.
+  virtual uint32_t getMaxNumUAVs() const = 0;
+
+
+  // API utilizing more detailed capabilities of each family of
+  // cards. If a capability is supported, then either usesHardware or
+  // usesSoftware returned true.  If usesHardware returned true, then
+  // usesSoftware must return false for the same capability.  Hardware
+  // execution means that the feature is done natively by the hardware
+  // and is not emulated by the softare.  Software execution means
+  // that the feature could be done in the hardware, but there is
+  // software that emulates it with possibly using the hardware for
+  // support since the hardware does not fully comply with OpenCL
+  // specs.
+  bool isSupported(AMDILDeviceInfo::Caps Mode) const;
+  bool usesHardware(AMDILDeviceInfo::Caps Mode) const;
+  bool usesSoftware(AMDILDeviceInfo::Caps Mode) const;
+  virtual std::string getDataLayout() const;
+  static const unsigned int MAX_LDS_SIZE_700 = 16384;
+  static const unsigned int MAX_LDS_SIZE_800 = 32768;
+  static const unsigned int WavefrontSize = 64;
+  static const unsigned int HalfWavefrontSize = 32;
+  static const unsigned int QuarterWavefrontSize = 16;
+protected:
+  virtual void setCaps();
+  llvm::BitVector mHWBits;
+  llvm::BitVector mSWBits;
+  AMDILSubtarget *mSTM;
+  uint32_t mDeviceFlag;
+private:
+  AMDILDeviceInfo::ExecutionMode
+  getExecutionMode(AMDILDeviceInfo::Caps Caps) const;
+}; // AMDILDevice
+
+} // namespace llvm
+#endif // _AMDILDEVICEIMPL_H_
diff --git a/lib/Target/AMDGPU/AMDILDeviceInfo.cpp b/lib/Target/AMDGPU/AMDILDeviceInfo.cpp
new file mode 100644
index 0000000..cbf5b51
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILDeviceInfo.cpp
@@ -0,0 +1,93 @@
+//===-- AMDILDeviceInfo.cpp - AMDILDeviceInfo class -----------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// Function that creates DeviceInfo from a device name and other information.
+//
+//==-----------------------------------------------------------------------===//
+#include "AMDILDevices.h"
+#include "AMDILSubtarget.h"
+
+using namespace llvm;
+namespace llvm {
+namespace AMDILDeviceInfo {
+    AMDILDevice*
+getDeviceFromName(const std::string &deviceName, AMDILSubtarget *ptr, bool is64bit, bool is64on32bit)
+{
+    if (deviceName.c_str()[2] == '7') {
+        switch (deviceName.c_str()[3]) {
+            case '1':
+                return new AMDIL710Device(ptr);
+            case '7':
+                return new AMDIL770Device(ptr);
+            default:
+                return new AMDIL7XXDevice(ptr);
+        };
+    } else if (deviceName == "cypress") {
+#if DEBUG
+      assert(!is64bit && "This device does not support 64bit pointers!");
+      assert(!is64on32bit && "This device does not support 64bit"
+          " on 32bit pointers!");
+#endif
+        return new AMDILCypressDevice(ptr);
+    } else if (deviceName == "juniper") {
+#if DEBUG
+      assert(!is64bit && "This device does not support 64bit pointers!");
+      assert(!is64on32bit && "This device does not support 64bit"
+          " on 32bit pointers!");
+#endif
+        return new AMDILEvergreenDevice(ptr);
+    } else if (deviceName == "redwood") {
+#if DEBUG
+      assert(!is64bit && "This device does not support 64bit pointers!");
+      assert(!is64on32bit && "This device does not support 64bit"
+          " on 32bit pointers!");
+#endif
+      return new AMDILRedwoodDevice(ptr);
+    } else if (deviceName == "cedar") {
+#if DEBUG
+      assert(!is64bit && "This device does not support 64bit pointers!");
+      assert(!is64on32bit && "This device does not support 64bit"
+          " on 32bit pointers!");
+#endif
+        return new AMDILCedarDevice(ptr);
+    } else if (deviceName == "barts"
+      || deviceName == "turks") {
+#if DEBUG
+      assert(!is64bit && "This device does not support 64bit pointers!");
+      assert(!is64on32bit && "This device does not support 64bit"
+          " on 32bit pointers!");
+#endif
+        return new AMDILNIDevice(ptr);
+    } else if (deviceName == "cayman") {
+#if DEBUG
+      assert(!is64bit && "This device does not support 64bit pointers!");
+      assert(!is64on32bit && "This device does not support 64bit"
+          " on 32bit pointers!");
+#endif
+        return new AMDILCaymanDevice(ptr);
+    } else if (deviceName == "caicos") {
+#if DEBUG
+      assert(!is64bit && "This device does not support 64bit pointers!");
+      assert(!is64on32bit && "This device does not support 64bit"
+          " on 32bit pointers!");
+#endif
+        return new AMDILNIDevice(ptr);
+    } else if (deviceName == "SI") {
+        return new AMDILSIDevice(ptr);
+    } else {
+#if DEBUG
+      assert(!is64bit && "This device does not support 64bit pointers!");
+      assert(!is64on32bit && "This device does not support 64bit"
+          " on 32bit pointers!");
+#endif
+        return new AMDIL7XXDevice(ptr);
+    }
+}
+} // End namespace AMDILDeviceInfo
+} // End namespace llvm
diff --git a/lib/Target/AMDGPU/AMDILDeviceInfo.h b/lib/Target/AMDGPU/AMDILDeviceInfo.h
new file mode 100644
index 0000000..06ac432
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILDeviceInfo.h
@@ -0,0 +1,89 @@
+//===-- AMDILDeviceInfo.h - Constants for describing devices --------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+#ifndef _AMDILDEVICEINFO_H_
+#define _AMDILDEVICEINFO_H_
+
+
+#include <string>
+
+namespace llvm
+{
+  class AMDILDevice;
+  class AMDILSubtarget;
+  namespace AMDILDeviceInfo
+  {
+    // Each Capabilities can be executed using a hardware instruction,
+    // emulated with a sequence of software instructions, or not
+    // supported at all.
+    enum ExecutionMode {
+      Unsupported = 0, // Unsupported feature on the card(Default value)
+      Software, // This is the execution mode that is set if the
+      // feature is emulated in software
+      Hardware  // This execution mode is set if the feature exists
+        // natively in hardware
+    };
+
+    // Any changes to this needs to have a corresponding update to the
+    // twiki page GPUMetadataABI
+    enum Caps {
+      HalfOps          = 0x1,  // Half float is supported or not.
+      DoubleOps        = 0x2,  // Double is supported or not.
+      ByteOps          = 0x3,  // Byte(char) is support or not.
+      ShortOps         = 0x4,  // Short is supported or not.
+      LongOps          = 0x5,  // Long is supported or not.
+      Images           = 0x6,  // Images are supported or not.
+      ByteStores       = 0x7,  // ByteStores available(!HD4XXX).
+      ConstantMem      = 0x8,  // Constant/CB memory.
+      LocalMem         = 0x9,  // Local/LDS memory.
+      PrivateMem       = 0xA,  // Scratch/Private/Stack memory.
+      RegionMem        = 0xB,  // OCL GDS Memory Extension.
+      FMA              = 0xC,  // Use HW FMA or SW FMA.
+      ArenaSegment     = 0xD,  // Use for Arena UAV per pointer 12-1023.
+      MultiUAV         = 0xE,  // Use for UAV per Pointer 0-7.
+      Reserved0        = 0xF,  // ReservedFlag
+      NoAlias          = 0x10, // Cached loads.
+      Signed24BitOps   = 0x11, // Peephole Optimization.
+      // Debug mode implies that no hardware features or optimizations
+      // are performned and that all memory access go through a single
+      // uav(Arena on HD5XXX/HD6XXX and Raw on HD4XXX).
+      Debug            = 0x12, // Debug mode is enabled.
+      CachedMem        = 0x13, // Cached mem is available or not.
+      BarrierDetect    = 0x14, // Detect duplicate barriers.
+      Reserved1        = 0x15, // Reserved flag
+      ByteLDSOps       = 0x16, // Flag to specify if byte LDS ops are available.
+      ArenaVectors     = 0x17, // Flag to specify if vector loads from arena work.
+      TmrReg           = 0x18, // Flag to specify if Tmr register is supported.
+      NoInline         = 0x19, // Flag to specify that no inlining should occur.
+      MacroDB          = 0x1A, // Flag to specify that backend handles macrodb.
+      HW64BitDivMod    = 0x1B, // Flag for backend to generate 64bit div/mod.
+      ArenaUAV         = 0x1C, // Flag to specify that arena uav is supported.
+      PrivateUAV       = 0x1D, // Flag to specify that private memory uses uav's.
+      // If more capabilities are required, then
+      // this number needs to be increased.
+      // All capabilities must come before this
+      // number.
+      MaxNumberCapabilities = 0x20
+    };
+    // These have to be in order with the older generations
+    // having the lower number enumerations.
+    enum Generation {
+      HD4XXX = 0, // 7XX based devices.
+      HD5XXX, // Evergreen based devices.
+      HD6XXX, // NI/Evergreen+ based devices.
+      HD7XXX,
+      HDTEST, // Experimental feature testing device.
+      HDNUMGEN
+    };
+
+
+  AMDILDevice*
+    getDeviceFromName(const std::string &name, AMDILSubtarget *ptr, bool is64bit = false, bool is64on32bit = false);
+  } // namespace AMDILDeviceInfo
+} // namespace llvm
+#endif // _AMDILDEVICEINFO_H_
diff --git a/lib/Target/AMDGPU/AMDILDevices.h b/lib/Target/AMDGPU/AMDILDevices.h
new file mode 100644
index 0000000..cfcc330
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILDevices.h
@@ -0,0 +1,19 @@
+//===-- AMDILDevices.h - Consolidate AMDIL Device headers -----------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+#ifndef __AMDIL_DEVICES_H_
+#define __AMDIL_DEVICES_H_
+// Include all of the device specific header files
+// This file is for Internal use only!
+#include "AMDIL7XXDevice.h"
+#include "AMDILDevice.h"
+#include "AMDILEvergreenDevice.h"
+#include "AMDILNIDevice.h"
+#include "AMDILSIDevice.h"
+
+#endif // _AMDIL_DEVICES_H_
diff --git a/lib/Target/AMDGPU/AMDILEnumeratedTypes.td b/lib/Target/AMDGPU/AMDILEnumeratedTypes.td
new file mode 100644
index 0000000..f10936b
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILEnumeratedTypes.td
@@ -0,0 +1,522 @@
+//===-- AMDILEnumeratedTypes.td - IL Type definitions --*- tablegen -*-----===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+// ILEnumreatedTypes.td - The IL Enumerated Types
+//===--------------------------------------------------------------------===//
+
+// Section 5.1  IL Shader
+class ILShader<bits<8> val> {
+    bits<8> Value = val;
+}
+// Table 5-1
+def IL_SHADER_PIXEL : ILShader<0>;
+def IL_SHADER_COMPUTE : ILShader<1>;
+
+// Section 5.2 IL RegType
+class ILRegType<bits<6> val> {
+    bits<6> Value = val;
+}
+// Table 5-2
+def IL_REGTYPE_TEMP      : ILRegType<0>;
+def IL_REGTYPE_WINCOORD  : ILRegType<1>;
+def IL_REGTYPE_CONST_BUF : ILRegType<2>;
+def IL_REGTYPE_LITERAL   : ILRegType<3>;
+def IL_REGTYPE_ITEMP     : ILRegType<4>;
+def IL_REGTYPE_GLOBAL    : ILRegType<5>;
+
+// Section 5.3 IL Component Select
+class ILComponentSelect<bits<3> val, string text> {
+     bits<3> Value = val;
+     string Text = text;
+}
+// Table 5-3
+def IL_COMPSEL_X : ILComponentSelect<0, "x">;
+def IL_COMPSEL_Y : ILComponentSelect<1, "y">;
+def IL_COMPSEL_Z : ILComponentSelect<2, "z">;
+def IL_COMPSEL_W : ILComponentSelect<3, "w">;
+def IL_COMPSEL_0 : ILComponentSelect<4, "0">;
+def IL_COMPSEL_1 : ILComponentSelect<5, "1">;
+
+// Section 5.4 IL Mod Dst Comp
+class ILModDstComp<bits<2> val, string text> {
+    bits<2> Value = val;
+    string Text = text;
+}
+// Table 5-4
+def IL_MODCOMP_NOWRITE : ILModDstComp<0, "_">;
+def IL_MODCOMP_WRITE_X : ILModDstComp<1, "x">;
+def IL_MODCOMP_WRITE_y : ILModDstComp<1, "y">;
+def IL_MODCOMP_WRITE_z : ILModDstComp<1, "z">;
+def IL_MODCOMP_WRITE_w : ILModDstComp<1, "w">;
+def IL_MODCOMP_0       : ILModDstComp<2, "0">;
+def IL_MODCOMP_1       : ILModDstComp<3, "1">;
+
+// Section 5.5 IL Import Usage
+class ILImportUsage<bits<1> val, string usage> {
+    bits<1> Value = val;
+    string Text = usage;
+}
+// Table 5-5
+def IL_IMPORTUSAGE_WINCOORD : ILImportUsage<0, "_usage(wincoord)">;
+
+// Section 5.6 Il Shift Scale
+class ILShiftScale<bits<4> val, string scale> {
+    bits<4> Value = val;
+    string Text = scale;
+}
+
+// Table 5-6
+def IL_SHIFT_NONE   : ILShiftScale<0, "">;
+def IL_SHIFT_X2     : ILShiftScale<1, "_x2">;
+def IL_SHIFT_X4     : ILShiftScale<2, "_x4">;
+def IL_SHIFT_X8     : ILShiftScale<3, "_x8">;
+def IL_SHIFT_D2     : ILShiftScale<4, "_d2">;
+def IL_SHIFT_D4     : ILShiftScale<5, "_d4">;
+def IL_SHIFT_D8     : ILShiftScale<6, "_d8">;
+
+// Section 5.7 IL Divide Component
+class ILDivComp<bits<3> val, string divcomp> {
+    bits<3> Value = val;
+    string Text = divcomp;
+}
+
+// Table 5-7
+def IL_DIVCOMP_NONE : ILDivComp<0, "_divcomp(none)">;
+def IL_DIVCOMP_Y    : ILDivComp<1, "_divcomp(y)">;
+def IL_DIVCOMP_Z    : ILDivComp<2, "_divcomp(z)">;
+def IL_DIVCOMP_W    : ILDivComp<3, "_divcomp(w)">;
+//def IL_DIVCOMP_UNKNOWN : ILDivComp<4, "_divcomp(unknown)">;
+
+// Section 5.8 IL Relational Op
+class ILRelOp<bits<3> val, string op> {
+    bits<3> Value = val;
+    string Text = op;
+}
+
+// Table 5-8
+def IL_RELOP_EQ : ILRelOp<0, "_relop(eq)">;
+def IL_RELOP_NE : ILRelOp<1, "_relop(ne)">;
+def IL_RELOP_GT : ILRelOp<2, "_relop(gt)">;
+def IL_RELOP_GE : ILRelOp<3, "_relop(ge)">;
+def IL_RELOP_LT : ILRelOp<4, "_relop(lt)">;
+def IL_RELOP_LE : ILRelOp<5, "_relop(le)">;
+
+// Section 5.9 IL Zero Op
+class ILZeroOp<bits<3> val, string behavior> {
+    bits<3> Value = val;
+    string Text = behavior;
+}
+
+// Table 5-9
+def IL_ZEROOP_FLTMAX    : ILZeroOp<0, "_zeroop(fltmax)">;
+def IL_ZEROOP_0         : ILZeroOp<1, "_zeroop(zero)">;
+def IL_ZEROOP_INFINITY  : ILZeroOp<2, "_zeroop(infinity)">;
+def IL_ZEROOP_INF_ELSE_MAX : ILZeroOp<3, "_zeroop(inf_else_max)">;
+
+// Section 5.10 IL Cmp Value
+class ILCmpValue<bits<3> val, string num> {
+    bits<3> Value = val;
+    string Text = num;
+}
+
+// Table 5-10
+def IL_CMPVAL_0_0     : ILCmpValue<0, "0.0">;
+def IL_CMPVAL_0_5     : ILCmpValue<1, "0.5">;
+def IL_CMPVAL_1_0     : ILCmpValue<2, "1.0">;
+def IL_CMPVAL_NEG_0_5 : ILCmpValue<3, "-0.5">;
+def IL_CMPVAL_NEG_1_0 : ILCmpValue<4, "-1.0">;
+
+// Section 5.11 IL Addressing
+class ILAddressing<bits<3> val> {
+    bits<3> Value = val;
+}
+
+// Table 5-11
+def IL_ADDR_ABSOLUTE     : ILAddressing<0>;
+def IL_ADDR_RELATIVE     : ILAddressing<1>;
+def IL_ADDR_REG_RELATIVE : ILAddressing<2>;
+
+// Section 5.11 IL Element Format
+class ILElementFormat<bits<5> val> {
+    bits<5> Value = val;
+}
+
+// Table 5-11
+def IL_ELEMENTFORMAT_UNKNOWN : ILElementFormat<0>;
+def IL_ELEMENTFORMAT_SNORM   : ILElementFormat<1>;
+def IL_ELEMENTFORMAT_UNORM   : ILElementFormat<2>;
+def IL_ELEMENTFORMAT_SINT    : ILElementFormat<3>;
+def IL_ELEMENTFORMAT_UINT    : ILElementFormat<4>;
+def IL_ELEMENTFORMAT_FLOAT   : ILElementFormat<5>;
+def IL_ELEMENTFORMAT_SRGB    : ILElementFormat<6>;
+def IL_ELEMENTFORMAT_MIXED   : ILElementFormat<7>;
+def IL_ELEMENTFORMAT_Last    : ILElementFormat<8>;
+
+// Section 5.12 IL Op Code
+class ILOpCode<bits<16> val = -1, string cmd> {
+    bits<16> Value = val;
+    string Text = cmd;
+}
+
+// Table 5-12
+def IL_DCL_CONST_BUFFER         : ILOpCode<0, "dcl_cb">;
+def IL_DCL_INDEXED_TEMP_ARRAY   : ILOpCode<1, "dcl_index_temp_array">;
+def IL_DCL_INPUT                : ILOpCode<2, "dcl_input">;
+def IL_DCL_LITERAL              : ILOpCode<3, "dcl_literal">;
+def IL_DCL_OUTPUT               : ILOpCode<4, "dcl_output">;
+def IL_DCL_RESOURCE             : ILOpCode<5, "dcl_resource">;
+def IL_OP_ABS                   : ILOpCode<6, "abs">;
+def IL_OP_ADD                   : ILOpCode<7, "add">;
+def IL_OP_AND                   : ILOpCode<8, "iand">;
+def IL_OP_BREAK                 : ILOpCode<9, "break">;
+def IL_OP_BREAK_LOGICALNZ       : ILOpCode<10, "break_logicalnz">;
+def IL_OP_BREAK_LOGICALZ        : ILOpCode<11, "break_logicalz">;
+def IL_OP_BREAKC                : ILOpCode<12, "breakc">;
+def IL_OP_CALL                  : ILOpCode<13, "call">;
+def IL_OP_CALL_LOGICALNZ        : ILOpCode<14, "call_logicalnz">;
+def IL_OP_CALL_LOGICALZ         : ILOpCode<15, "call_logicalz">;
+def IL_OP_CASE                  : ILOpCode<16, "case">;
+def IL_OP_CLG                   : ILOpCode<17, "clg">;
+def IL_OP_CMOV                  : ILOpCode<18, "cmov">;
+def IL_OP_CMOV_LOGICAL          : ILOpCode<19, "cmov_logical">;
+def IL_OP_CMP                   : ILOpCode<20, "cmp">;
+def IL_OP_CONTINUE              : ILOpCode<21, "continue">;
+def IL_OP_CONTINUE_LOGICALNZ    : ILOpCode<22, "continue_logicalnz">;
+def IL_OP_CONTINUE_LOGICALZ     : ILOpCode<23, "continue_logicalz">;
+def IL_OP_CONTINUEC             : ILOpCode<24, "continuec">;
+def IL_OP_COS                   : ILOpCode<25, "cos">;
+def IL_OP_COS_VEC               : ILOpCode<26, "cos_vec">;
+def IL_OP_D_2_F                 : ILOpCode<27, "d2f">;
+def IL_OP_D_ADD                 : ILOpCode<28, "dadd">;
+def IL_OP_D_EQ                  : ILOpCode<29, "deq">;
+def IL_OP_D_FRC                 : ILOpCode<30, "dfrac">;
+def IL_OP_D_FREXP               : ILOpCode<31, "dfrexp">;
+def IL_OP_D_GE                  : ILOpCode<32, "dge">;
+def IL_OP_D_LDEXP               : ILOpCode<33, "dldexp">;
+def IL_OP_D_LT                  : ILOpCode<34, "dlt">;
+def IL_OP_D_MAD                 : ILOpCode<35, "dmad">;
+def IL_OP_D_MUL                 : ILOpCode<36, "dmul">;
+def IL_OP_D_NE                  : ILOpCode<37, "dne">;
+def IL_OP_DEFAULT               : ILOpCode<38, "default">;
+def IL_OP_DISCARD_LOGICALNZ     : ILOpCode<39, "discard_logicalnz">;
+def IL_OP_DISCARD_LOGICALZ      : ILOpCode<40, "discard_logicalz">;
+def IL_OP_DIV                   : ILOpCode<41, "div_zeroop(infinity)">;
+def IL_OP_DP2                   : ILOpCode<42, "dp2">;
+def IL_OP_DP3                   : ILOpCode<43, "dp3">;
+def IL_OP_DP4                   : ILOpCode<44, "dp4">;
+def IL_OP_ELSE                  : ILOpCode<45, "else">;
+def IL_OP_END                   : ILOpCode<46, "end">;
+def IL_OP_ENDFUNC               : ILOpCode<47, "endfunc">;
+def IL_OP_ENDIF                 : ILOpCode<48, "endif">;
+def IL_OP_ENDLOOP               : ILOpCode<49, "endloop">;
+def IL_OP_ENDMAIN               : ILOpCode<50, "endmain">;
+def IL_OP_ENDSWITCH             : ILOpCode<51, "endswitch">;
+def IL_OP_EQ                    : ILOpCode<52, "eq">;
+def IL_OP_EXP                   : ILOpCode<53, "exp">;
+def IL_OP_EXP_VEC               : ILOpCode<54, "exp_vec">;
+def IL_OP_F_2_D                 : ILOpCode<55, "f2d">;
+def IL_OP_FLR                   : ILOpCode<56, "flr">;
+def IL_OP_FRC                   : ILOpCode<57, "frc">;
+def IL_OP_FTOI                  : ILOpCode<58, "ftoi">;
+def IL_OP_FTOU                  : ILOpCode<59, "ftou">;
+def IL_OP_FUNC                  : ILOpCode<60, "func">;
+def IL_OP_GE                    : ILOpCode<61, "ge">;
+def IL_OP_I_ADD                 : ILOpCode<62, "iadd">;
+def IL_OP_I_EQ                  : ILOpCode<63, "ieq">;
+def IL_OP_I_GE                  : ILOpCode<64, "ige">;
+def IL_OP_I_LT                  : ILOpCode<65, "ilt">;
+def IL_OP_I_MAD                 : ILOpCode<66, "imad">;
+def IL_OP_I_MAX                 : ILOpCode<67, "imax">;
+def IL_OP_I_MIN                 : ILOpCode<68, "imin">;
+def IL_OP_I_MUL                 : ILOpCode<69, "imul">;
+def IL_OP_I_MUL_HIGH            : ILOpCode<70, "imul_high">;
+def IL_OP_I_NE                  : ILOpCode<71, "ine">;
+def IL_OP_I_NEGATE              : ILOpCode<72, "inegate">;
+def IL_OP_I_NOT                 : ILOpCode<73, "inot">;
+def IL_OP_I_OR                  : ILOpCode<74, "ior">;
+def IL_OP_I_SHL                 : ILOpCode<75, "ishl">;
+def IL_OP_I_SHR                 : ILOpCode<76, "ishr">;
+def IL_OP_I_XOR                 : ILOpCode<77, "ixor">;
+def IL_OP_IF_LOGICALNZ          : ILOpCode<78, "if_logicalnz">;
+def IL_OP_IF_LOGICALZ           : ILOpCode<79, "if_logicalz">;
+def IL_OP_IFC                   : ILOpCode<80, "ifc">;
+def IL_OP_ITOF                  : ILOpCode<81, "itof">;
+def IL_OP_LN                    : ILOpCode<82, "ln">;
+def IL_OP_LOG                   : ILOpCode<83, "log">;
+def IL_OP_LOG_VEC               : ILOpCode<84, "log_vec">;
+def IL_OP_LOOP                  : ILOpCode<85, "loop">;
+def IL_OP_LT                    : ILOpCode<86, "lt">;
+def IL_OP_MAD                   : ILOpCode<87, "mad_ieee">;
+def IL_OP_MAX                   : ILOpCode<88, "max_ieee">;
+def IL_OP_MIN                   : ILOpCode<89, "min_ieee">;
+def IL_OP_MOD                   : ILOpCode<90, "mod_ieee">;
+def IL_OP_MOV                   : ILOpCode<91, "mov">;
+def IL_OP_MUL_IEEE              : ILOpCode<92, "mul_ieee">;
+def IL_OP_NE                    : ILOpCode<93, "ne">;
+def IL_OP_NRM                   : ILOpCode<94, "nrm_nrm4_zeroop(zero)">;
+def IL_OP_POW                   : ILOpCode<95, "pow">;
+def IL_OP_RCP                   : ILOpCode<96, "rcp">;
+def IL_OP_RET                   : ILOpCode<97, "ret">;
+def IL_OP_RET_DYN               : ILOpCode<98, "ret_dyn">;
+def IL_OP_RET_LOGICALNZ         : ILOpCode<99, "ret_logicalnz">;
+def IL_OP_RET_LOGICALZ          : ILOpCode<100, "ret_logicalz">;
+def IL_OP_RND                   : ILOpCode<101, "rnd">;
+def IL_OP_ROUND_NEAR            : ILOpCode<102, "round_nearest">;
+def IL_OP_ROUND_NEG_INF         : ILOpCode<103, "round_neginf">;
+def IL_OP_ROUND_POS_INF         : ILOpCode<104, "round_plusinf">;
+def IL_OP_ROUND_ZERO            : ILOpCode<105, "round_z">;
+def IL_OP_RSQ                   : ILOpCode<106, "rsq">;
+def IL_OP_RSQ_VEC               : ILOpCode<107, "rsq_vec">;
+def IL_OP_SAMPLE                : ILOpCode<108, "sample">;
+def IL_OP_SAMPLE_L              : ILOpCode<109, "sample_l">;
+def IL_OP_SET                   : ILOpCode<110, "set">;
+def IL_OP_SGN                   : ILOpCode<111, "sgn">;
+def IL_OP_SIN                   : ILOpCode<112, "sin">;
+def IL_OP_SIN_VEC               : ILOpCode<113, "sin_vec">;
+def IL_OP_SUB                   : ILOpCode<114, "sub">;
+def IL_OP_SWITCH                : ILOpCode<115, "switch">;
+def IL_OP_TRC                   : ILOpCode<116, "trc">;
+def IL_OP_U_DIV                 : ILOpCode<117, "udiv">;
+def IL_OP_U_GE                  : ILOpCode<118, "uge">;
+def IL_OP_U_LT                  : ILOpCode<119, "ult">;
+def IL_OP_U_MAD                 : ILOpCode<120, "umad">;
+def IL_OP_U_MAX                 : ILOpCode<121, "umax">;
+def IL_OP_U_MIN                 : ILOpCode<122, "umin">;
+def IL_OP_U_MOD                 : ILOpCode<123, "umod">;
+def IL_OP_U_MUL                 : ILOpCode<124, "umul">;
+def IL_OP_U_MUL_HIGH            : ILOpCode<125, "umul_high">;
+def IL_OP_U_SHR                 : ILOpCode<126, "ushr">;
+def IL_OP_UTOF                  : ILOpCode<127, "utof">;
+def IL_OP_WHILE                 : ILOpCode<128, "whileloop">;
+// SC IL instructions that are not in CAL IL
+def IL_OP_ACOS                  : ILOpCode<129, "acos">;
+def IL_OP_ASIN                  : ILOpCode<130, "asin">;
+def IL_OP_EXN                   : ILOpCode<131, "exn">;
+def IL_OP_UBIT_REVERSE          : ILOpCode<132, "ubit_reverse">;
+def IL_OP_UBIT_EXTRACT          : ILOpCode<133, "ubit_extract">;
+def IL_OP_IBIT_EXTRACT          : ILOpCode<134, "ibit_extract">;
+def IL_OP_SQRT                  : ILOpCode<135, "sqrt">;
+def IL_OP_SQRT_VEC              : ILOpCode<136, "sqrt_vec">;
+def IL_OP_ATAN                  : ILOpCode<137, "atan">;
+def IL_OP_TAN                   : ILOpCode<137, "tan">;
+def IL_OP_D_DIV                 : ILOpCode<138, "ddiv">;
+def IL_OP_F_NEG                 : ILOpCode<139, "mov">;
+def IL_OP_GT                    : ILOpCode<140, "gt">;
+def IL_OP_LE                    : ILOpCode<141, "lt">;
+def IL_OP_DIST                  : ILOpCode<142, "dist">;
+def IL_OP_LEN                   : ILOpCode<143, "len">;
+def IL_OP_MACRO                 : ILOpCode<144, "mcall">;
+def IL_OP_INTR                  : ILOpCode<145, "call">;
+def IL_OP_I_FFB_HI              : ILOpCode<146, "ffb_hi">;
+def IL_OP_I_FFB_LO              : ILOpCode<147, "ffb_lo">;
+def IL_OP_BARRIER               : ILOpCode<148, "fence_threads_memory_lds">;
+def IL_OP_BARRIER_LOCAL         : ILOpCode<149, "fence_threads_lds">;
+def IL_OP_BARRIER_GLOBAL        : ILOpCode<150, "fence_threads_memory">;
+def IL_OP_FENCE                 : ILOpCode<151, "fence_lds_memory">;
+def IL_OP_FENCE_READ_ONLY       : ILOpCode<152, "fence_lds_mem_read_only">;
+def IL_OP_FENCE_WRITE_ONLY      : ILOpCode<153, "fence_lds_mem_write_only">;
+def IL_PSEUDO_INST              : ILOpCode<154, ";Pseudo Op">;
+def IL_OP_UNPACK_0              : ILOpCode<155, "unpack0">;
+def IL_OP_UNPACK_1              : ILOpCode<156, "unpack1">;
+def IL_OP_UNPACK_2              : ILOpCode<157, "unpack2">;
+def IL_OP_UNPACK_3              : ILOpCode<158, "unpack3">;
+def IL_OP_PI_REDUCE             : ILOpCode<159, "pireduce">;
+def IL_OP_IBIT_COUNT            : ILOpCode<160, "icbits">;
+def IL_OP_I_FFB_SGN             : ILOpCode<161, "ffb_shi">;
+def IL_OP_F2U4                  : ILOpCode<162, "f_2_u4">;
+def IL_OP_BIT_ALIGN             : ILOpCode<163, "bitalign">;
+def IL_OP_BYTE_ALIGN            : ILOpCode<164, "bytealign">;
+def IL_OP_U4_LERP               : ILOpCode<165, "u4lerp">;
+def IL_OP_SAD                   : ILOpCode<166, "sad">;
+def IL_OP_SAD_HI                : ILOpCode<167, "sadhi">;
+def IL_OP_SAD4                  : ILOpCode<168, "sad4">;
+def IL_OP_UBIT_INSERT           : ILOpCode<169, "ubit_insert">;
+def IL_OP_I_CARRY               : ILOpCode<170, "icarry">;
+def IL_OP_I_BORROW              : ILOpCode<171, "iborrow">;
+def IL_OP_U_MAD24               : ILOpCode<172, "umad24">;
+def IL_OP_U_MUL24               : ILOpCode<173, "umul24">;
+def IL_OP_I_MAD24               : ILOpCode<174, "imad24">;
+def IL_OP_I_MUL24               : ILOpCode<175, "imul24">;
+def IL_OP_CLAMP                 : ILOpCode<176, "clamp">;
+def IL_OP_LERP                  : ILOpCode<177, "lrp">;
+def IL_OP_FMA                   : ILOpCode<178, "fma">;
+def IL_OP_D_MIN                 : ILOpCode<179, "dmin">;
+def IL_OP_D_MAX                 : ILOpCode<180, "dmax">;
+def IL_OP_D_SQRT                : ILOpCode<181, "dsqrt">;
+def IL_OP_DP2_ADD               : ILOpCode<182, "dp2add">;
+def IL_OP_F16_TO_F32            : ILOpCode<183, "f162f">;
+def IL_OP_F32_TO_F16            : ILOpCode<184, "f2f16">;
+def IL_REG_LOCAL_ID_FLAT        : ILOpCode<185, "vTidInGrpFlat">;
+def IL_REG_LOCAL_ID             : ILOpCode<186, "vTidInGrp">;
+def IL_REG_GLOBAL_ID_FLAT       : ILOpCode<187, "vAbsTidFlag">;
+def IL_REG_GLOBAL_ID            : ILOpCode<188, "vAbsTid">;
+def IL_REG_GROUP_ID_FLAT        : ILOpCode<189, "vThreadGrpIDFlat">;
+def IL_REG_GROUP_ID             : ILOpCode<190, "vThreadGrpID">;
+def IL_OP_D_RCP                 : ILOpCode<191, "drcp_zeroop(infinity)">;
+def IL_OP_D_RSQ                 : ILOpCode<192, "drsq_zeroop(infinity)">;
+def IL_OP_D_MOV                 : ILOpCode<193, "dmov">;
+def IL_OP_D_MOVC                : ILOpCode<194, "dmovc">;
+def IL_OP_NOP                   : ILOpCode<195, "nop">;
+def IL_OP_UAV_ADD               : ILOpCode<196, "uav_add">;
+def IL_OP_UAV_AND               : ILOpCode<197, "uav_and">;
+def IL_OP_UAV_MAX               : ILOpCode<198, "uav_max">;
+def IL_OP_UAV_MIN               : ILOpCode<199, "uav_min">;
+def IL_OP_UAV_OR                : ILOpCode<200, "uav_or">;
+def IL_OP_UAV_RSUB              : ILOpCode<201, "uav_rsub">;
+def IL_OP_UAV_SUB               : ILOpCode<202, "uav_sub">;
+def IL_OP_UAV_UMAX              : ILOpCode<203, "uav_umax">;
+def IL_OP_UAV_UMIN              : ILOpCode<204, "uav_umin">;
+def IL_OP_UAV_XOR               : ILOpCode<205, "uav_xor">;
+def IL_OP_UAV_INC               : ILOpCode<206, "uav_uinc">;
+def IL_OP_UAV_DEC               : ILOpCode<207, "uav_udec">;
+def IL_OP_UAV_CMP               : ILOpCode<208, "uav_cmp">;
+def IL_OP_UAV_READ_ADD          : ILOpCode<209, "uav_read_add">;
+def IL_OP_UAV_READ_AND          : ILOpCode<210, "uav_read_and">;
+def IL_OP_UAV_READ_MAX          : ILOpCode<211, "uav_read_max">;
+def IL_OP_UAV_READ_MIN          : ILOpCode<212, "uav_read_min">;
+def IL_OP_UAV_READ_OR           : ILOpCode<213, "uav_read_or">;
+def IL_OP_UAV_READ_RSUB         : ILOpCode<214, "uav_read_rsub">;
+def IL_OP_UAV_READ_SUB          : ILOpCode<215, "uav_read_sub">;
+def IL_OP_UAV_READ_UMAX         : ILOpCode<216, "uav_read_umax">;
+def IL_OP_UAV_READ_UMIN         : ILOpCode<217, "uav_read_umin">;
+def IL_OP_UAV_READ_XOR          : ILOpCode<218, "uav_read_xor">;
+def IL_OP_UAV_READ_INC          : ILOpCode<219, "uav_read_uinc">;
+def IL_OP_UAV_READ_DEC          : ILOpCode<220, "uav_read_udec">;
+def IL_OP_UAV_READ_XCHG         : ILOpCode<221, "uav_read_xchg">;
+def IL_OP_UAV_READ_CMPXCHG      : ILOpCode<222, "uav_read_cmp_xchg">;
+def IL_OP_LDS_ADD               : ILOpCode<223, "lds_add">;
+def IL_OP_LDS_AND               : ILOpCode<224, "lds_and">;
+def IL_OP_LDS_MAX               : ILOpCode<225, "lds_max">;
+def IL_OP_LDS_MIN               : ILOpCode<226, "lds_min">;
+def IL_OP_LDS_OR                : ILOpCode<227, "lds_or">;
+def IL_OP_LDS_RSUB              : ILOpCode<228, "lds_rsub">;
+def IL_OP_LDS_SUB               : ILOpCode<229, "lds_sub">;
+def IL_OP_LDS_UMAX              : ILOpCode<230, "lds_umax">;
+def IL_OP_LDS_UMIN              : ILOpCode<231, "lds_umin">;
+def IL_OP_LDS_XOR               : ILOpCode<232, "lds_xor">;
+def IL_OP_LDS_INC               : ILOpCode<233, "lds_inc">;
+def IL_OP_LDS_DEC               : ILOpCode<234, "lds_dec">;
+def IL_OP_LDS_CMP               : ILOpCode<235, "lds_cmp">;
+def IL_OP_LDS_READ_ADD          : ILOpCode<236, "lds_read_add">;
+def IL_OP_LDS_READ_AND          : ILOpCode<237, "lds_read_and">;
+def IL_OP_LDS_READ_MAX          : ILOpCode<238, "lds_read_max">;
+def IL_OP_LDS_READ_MIN          : ILOpCode<239, "lds_read_min">;
+def IL_OP_LDS_READ_OR           : ILOpCode<240, "lds_read_or">;
+def IL_OP_LDS_READ_RSUB         : ILOpCode<241, "lds_read_rsub">;
+def IL_OP_LDS_READ_SUB          : ILOpCode<242, "lds_read_sub">;
+def IL_OP_LDS_READ_UMAX         : ILOpCode<243, "lds_read_umax">;
+def IL_OP_LDS_READ_UMIN         : ILOpCode<244, "lds_read_umin">;
+def IL_OP_LDS_READ_XOR          : ILOpCode<245, "lds_read_xor">;
+def IL_OP_LDS_READ_INC          : ILOpCode<246, "lds_read_inc">;
+def IL_OP_LDS_READ_DEC          : ILOpCode<247, "lds_read_dec">;
+def IL_OP_LDS_READ_XCHG         : ILOpCode<248, "lds_read_xchg">;
+def IL_OP_LDS_READ_CMPXCHG      : ILOpCode<249, "lds_read_cmp_xchg">;
+def IL_OP_GDS_ADD               : ILOpCode<250, "gds_add">;
+def IL_OP_GDS_AND               : ILOpCode<251, "gds_and">;
+def IL_OP_GDS_MAX               : ILOpCode<252, "gds_max">;
+def IL_OP_GDS_MIN               : ILOpCode<253, "gds_min">;
+def IL_OP_GDS_OR                : ILOpCode<254, "gds_or">;
+def IL_OP_GDS_RSUB              : ILOpCode<255, "gds_rsub">;
+def IL_OP_GDS_SUB               : ILOpCode<256, "gds_sub">;
+def IL_OP_GDS_UMAX              : ILOpCode<257, "gds_umax">;
+def IL_OP_GDS_UMIN              : ILOpCode<258, "gds_umin">;
+def IL_OP_GDS_MSKOR             : ILOpCode<259, "gds_mskor">;
+def IL_OP_GDS_XOR               : ILOpCode<260, "gds_xor">;
+def IL_OP_GDS_INC               : ILOpCode<261, "gds_inc">;
+def IL_OP_GDS_DEC               : ILOpCode<262, "gds_dec">;
+def IL_OP_GDS_CMP               : ILOpCode<263, "gds_cmp">;
+def IL_OP_GDS_READ_ADD          : ILOpCode<264, "gds_read_add">;
+def IL_OP_GDS_READ_AND          : ILOpCode<265, "gds_read_and">;
+def IL_OP_GDS_READ_MAX          : ILOpCode<266, "gds_read_max">;
+def IL_OP_GDS_READ_MIN          : ILOpCode<267, "gds_read_min">;
+def IL_OP_GDS_READ_OR           : ILOpCode<268, "gds_read_or">;
+def IL_OP_GDS_READ_RSUB         : ILOpCode<269, "gds_read_rsub">;
+def IL_OP_GDS_READ_SUB          : ILOpCode<270, "gds_read_sub">;
+def IL_OP_GDS_READ_UMAX         : ILOpCode<271, "gds_read_umax">;
+def IL_OP_GDS_READ_UMIN         : ILOpCode<272, "gds_read_umin">;
+def IL_OP_GDS_READ_MSKOR        : ILOpCode<273, "gds_read_mskor">;
+def IL_OP_GDS_READ_XOR          : ILOpCode<274, "gds_read_xor">;
+def IL_OP_GDS_READ_INC          : ILOpCode<275, "gds_read_inc">;
+def IL_OP_GDS_READ_DEC          : ILOpCode<276, "gds_read_dec">;
+def IL_OP_GDS_READ_XCHG         : ILOpCode<277, "gds_read_xchg">;
+def IL_OP_GDS_READ_CMPXCHG      : ILOpCode<278, "gds_read_cmp_xchg">;
+def IL_OP_APPEND_BUF_ALLOC      : ILOpCode<279, "append_buf_alloc">;
+def IL_OP_APPEND_BUF_CONSUME    : ILOpCode<280, "append_buf_consume">;
+def IL_OP_I64_ADD               : ILOpCode<281, "i64add">;
+def IL_OP_I64_MAX               : ILOpCode<282, "i64max">;
+def IL_OP_U64_MAX               : ILOpCode<283, "u64max">;
+def IL_OP_I64_MIN               : ILOpCode<284, "i64min">;
+def IL_OP_U64_MIN               : ILOpCode<285, "u64min">;
+def IL_OP_I64_NEGATE            : ILOpCode<286, "i64negate">;
+def IL_OP_I64_SHL               : ILOpCode<287, "i64shl">;
+def IL_OP_I64_SHR               : ILOpCode<288, "i64shr">;
+def IL_OP_U64_SHR               : ILOpCode<289, "u64shr">;
+def IL_OP_I64_EQ                : ILOpCode<290, "i64eq">;
+def IL_OP_I64_GE                : ILOpCode<291, "i64ge">;
+def IL_OP_U64_GE                : ILOpCode<292, "u64ge">;
+def IL_OP_I64_LT                : ILOpCode<293, "i64lt">;
+def IL_OP_U64_LT                : ILOpCode<294, "u64lt">;
+def IL_OP_I64_NE                : ILOpCode<295, "i64ne">;
+def IL_OP_U_MULHI24             : ILOpCode<296, "umul24_high">;
+def IL_OP_I_MULHI24             : ILOpCode<297, "imul24_high">;
+def IL_OP_GDS_LOAD              : ILOpCode<298, "gds_load">;
+def IL_OP_GDS_STORE             : ILOpCode<299, "gds_store">;
+def IL_OP_LDS_LOAD              : ILOpCode<300, "lds_load">;
+def IL_OP_LDS_LOAD_VEC          : ILOpCode<301, "lds_load_vec">;
+def IL_OP_LDS_LOAD_BYTE         : ILOpCode<302, "lds_load_byte">;
+def IL_OP_LDS_LOAD_UBYTE        : ILOpCode<303, "lds_load_ubyte">;
+def IL_OP_LDS_LOAD_SHORT        : ILOpCode<304, "lds_load_short">;
+def IL_OP_LDS_LOAD_USHORT       : ILOpCode<305, "lds_load_ushort">;
+def IL_OP_LDS_STORE             : ILOpCode<306, "lds_store">;
+def IL_OP_LDS_STORE_VEC         : ILOpCode<307, "lds_store_vec">;
+def IL_OP_LDS_STORE_BYTE        : ILOpCode<308, "lds_store_byte">;
+def IL_OP_LDS_STORE_SHORT       : ILOpCode<309, "lds_store_short">;
+def IL_OP_RAW_UAV_LOAD          : ILOpCode<310, "uav_raw_load">;
+def IL_OP_RAW_UAV_STORE         : ILOpCode<311, "uav_raw_store">;
+def IL_OP_ARENA_UAV_LOAD        : ILOpCode<312, "uav_arena_load">;
+def IL_OP_ARENA_UAV_STORE       : ILOpCode<313, "uav_arena_store">;
+def IL_OP_LDS_MSKOR             : ILOpCode<314, "lds_mskor">;
+def IL_OP_LDS_READ_MSKOR        : ILOpCode<315, "lds_read_mskor">;
+def IL_OP_UAV_BYTE_LOAD         : ILOpCode<316, "uav_byte_load">;
+def IL_OP_UAV_UBYTE_LOAD        : ILOpCode<317, "uav_ubyte_load">;
+def IL_OP_UAV_SHORT_LOAD        : ILOpCode<318, "uav_short_load">;
+def IL_OP_UAV_USHORT_LOAD       : ILOpCode<319, "uav_ushort_load">;
+def IL_OP_UAV_BYTE_STORE        : ILOpCode<320, "uav_byte_store">;
+def IL_OP_UAV_SHORT_STORE       : ILOpCode<320, "uav_short_store">;
+def IL_OP_UAV_STORE             : ILOpCode<321, "uav_store">;
+def IL_OP_UAV_LOAD              : ILOpCode<322, "uav_load">;
+def IL_OP_MUL                   : ILOpCode<323, "mul">;
+def IL_OP_DIV_INF               : ILOpCode<324, "div_zeroop(infinity)">;
+def IL_OP_DIV_FLTMAX            : ILOpCode<325, "div_zeroop(fltmax)">;
+def IL_OP_DIV_ZERO              : ILOpCode<326, "div_zeroop(zero)">;
+def IL_OP_DIV_INFELSEMAX        : ILOpCode<327, "div_zeroop(inf_else_max)">;
+def IL_OP_FTOI_FLR              : ILOpCode<328, "ftoi_flr">;
+def IL_OP_FTOI_RPI              : ILOpCode<329, "ftoi_rpi">;
+def IL_OP_F32_TO_F16_NEAR       : ILOpCode<330, "f2f16_near">;
+def IL_OP_F32_TO_F16_NEG_INF    : ILOpCode<331, "f2f16_neg_inf">;
+def IL_OP_F32_TO_F16_PLUS_INF   : ILOpCode<332, "f2f16_plus_inf">;
+def IL_OP_I64_MUL               : ILOpCode<333, "i64mul">;
+def IL_OP_U64_MUL               : ILOpCode<334, "u64mul">;
+def IL_OP_CU_ID                 : ILOpCode<355, "cu_id">;
+def IL_OP_WAVE_ID               : ILOpCode<356, "wave_id">;
+def IL_OP_I64_SUB               : ILOpCode<357, "i64sub">;
+def IL_OP_I64_DIV               : ILOpCode<358, "i64div">;
+def IL_OP_U64_DIV               : ILOpCode<359, "u64div">;
+def IL_OP_I64_MOD               : ILOpCode<360, "i64mod">;
+def IL_OP_U64_MOD               : ILOpCode<361, "u64mod">;
+def IL_DCL_GWS_THREAD_COUNT     : ILOpCode<362, "dcl_gws_thread_count">;
+def IL_DCL_SEMAPHORE            : ILOpCode<363, "dcl_semaphore">;
+def IL_OP_SEMAPHORE_INIT        : ILOpCode<364, "init_semaphore">;
+def IL_OP_SEMAPHORE_WAIT        : ILOpCode<365, "semaphore_wait">;
+def IL_OP_SEMAPHORE_SIGNAL      : ILOpCode<366, "semaphore_signal">;
+def IL_OP_BARRIER_REGION        : ILOpCode<377, "fence_threads_gds">;
+def IL_OP_BFI                   : ILOpCode<394, "bfi">;
+def IL_OP_BFM                   : ILOpCode<395, "bfm">;
+def IL_DBG_STRING               : ILOpCode<396, "dbg_string">;
+def IL_DBG_LINE                 : ILOpCode<397, "dbg_line">;
+def IL_DBG_TEMPLOC              : ILOpCode<398, "dbg_temploc">;
diff --git a/lib/Target/AMDGPU/AMDILEvergreenDevice.cpp b/lib/Target/AMDGPU/AMDILEvergreenDevice.cpp
new file mode 100644
index 0000000..6652c74
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILEvergreenDevice.cpp
@@ -0,0 +1,183 @@
+//===-- AMDILEvergreenDevice.cpp - Device Info for Evergreen --------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+#include "AMDILEvergreenDevice.h"
+
+using namespace llvm;
+
+AMDILEvergreenDevice::AMDILEvergreenDevice(AMDILSubtarget *ST)
+: AMDILDevice(ST) {
+  setCaps();
+  std::string name = ST->getDeviceName();
+  if (name == "cedar") {
+    mDeviceFlag = OCL_DEVICE_CEDAR;
+  } else if (name == "redwood") {
+    mDeviceFlag = OCL_DEVICE_REDWOOD;
+  } else if (name == "cypress") {
+    mDeviceFlag = OCL_DEVICE_CYPRESS;
+  } else {
+    mDeviceFlag = OCL_DEVICE_JUNIPER;
+  }
+}
+
+AMDILEvergreenDevice::~AMDILEvergreenDevice() {
+}
+
+size_t AMDILEvergreenDevice::getMaxLDSSize() const {
+  if (usesHardware(AMDILDeviceInfo::LocalMem)) {
+    return MAX_LDS_SIZE_800;
+  } else {
+    return 0;
+  }
+}
+size_t AMDILEvergreenDevice::getMaxGDSSize() const {
+  if (usesHardware(AMDILDeviceInfo::RegionMem)) {
+    return MAX_LDS_SIZE_800;
+  } else {
+    return 0;
+  }
+}
+uint32_t AMDILEvergreenDevice::getMaxNumUAVs() const {
+  return 12;
+}
+
+uint32_t AMDILEvergreenDevice::getResourceID(uint32_t id) const {
+  switch(id) {
+  default:
+    assert(0 && "ID type passed in is unknown!");
+    break;
+  case CONSTANT_ID:
+  case RAW_UAV_ID:
+    if (mSTM->calVersion() >= CAL_VERSION_GLOBAL_RETURN_BUFFER) {
+      return GLOBAL_RETURN_RAW_UAV_ID;
+    } else {
+      return DEFAULT_RAW_UAV_ID;
+    }
+  case GLOBAL_ID:
+  case ARENA_UAV_ID:
+    return DEFAULT_ARENA_UAV_ID;
+  case LDS_ID:
+    if (usesHardware(AMDILDeviceInfo::LocalMem)) {
+      return DEFAULT_LDS_ID;
+    } else {
+      return DEFAULT_ARENA_UAV_ID;
+    }
+  case GDS_ID:
+    if (usesHardware(AMDILDeviceInfo::RegionMem)) {
+      return DEFAULT_GDS_ID;
+    } else {
+      return DEFAULT_ARENA_UAV_ID;
+    }
+  case SCRATCH_ID:
+    if (usesHardware(AMDILDeviceInfo::PrivateMem)) {
+      return DEFAULT_SCRATCH_ID;
+    } else {
+      return DEFAULT_ARENA_UAV_ID;
+    }
+  };
+  return 0;
+}
+
+size_t AMDILEvergreenDevice::getWavefrontSize() const {
+  return AMDILDevice::WavefrontSize;
+}
+
+uint32_t AMDILEvergreenDevice::getGeneration() const {
+  return AMDILDeviceInfo::HD5XXX;
+}
+
+void AMDILEvergreenDevice::setCaps() {
+  mSWBits.set(AMDILDeviceInfo::ArenaSegment);
+  mHWBits.set(AMDILDeviceInfo::ArenaUAV);
+  if (mSTM->calVersion() >= CAL_VERSION_SC_140) {
+    mHWBits.set(AMDILDeviceInfo::HW64BitDivMod);
+    mSWBits.reset(AMDILDeviceInfo::HW64BitDivMod);
+  } 
+  mSWBits.set(AMDILDeviceInfo::Signed24BitOps);
+  if (mSTM->isOverride(AMDILDeviceInfo::ByteStores)) {
+    mHWBits.set(AMDILDeviceInfo::ByteStores);
+  }
+  if (mSTM->isOverride(AMDILDeviceInfo::Debug)) {
+    mSWBits.set(AMDILDeviceInfo::LocalMem);
+    mSWBits.set(AMDILDeviceInfo::RegionMem);
+  } else {
+    mHWBits.set(AMDILDeviceInfo::LocalMem);
+    mHWBits.set(AMDILDeviceInfo::RegionMem);
+  }
+  mHWBits.set(AMDILDeviceInfo::Images);
+  if (mSTM->isOverride(AMDILDeviceInfo::NoAlias)) {
+    mHWBits.set(AMDILDeviceInfo::NoAlias);
+  }
+  if (mSTM->calVersion() > CAL_VERSION_GLOBAL_RETURN_BUFFER) {
+    mHWBits.set(AMDILDeviceInfo::CachedMem);
+  }
+  if (mSTM->isOverride(AMDILDeviceInfo::MultiUAV)) {
+    mHWBits.set(AMDILDeviceInfo::MultiUAV);
+  }
+  if (mSTM->calVersion() > CAL_VERSION_SC_136) {
+    mHWBits.set(AMDILDeviceInfo::ByteLDSOps);
+    mSWBits.reset(AMDILDeviceInfo::ByteLDSOps);
+    mHWBits.set(AMDILDeviceInfo::ArenaVectors);
+  } else {
+    mSWBits.set(AMDILDeviceInfo::ArenaVectors);
+  }
+  if (mSTM->calVersion() > CAL_VERSION_SC_137) {
+    mHWBits.set(AMDILDeviceInfo::LongOps);
+    mSWBits.reset(AMDILDeviceInfo::LongOps);
+  }
+  mHWBits.set(AMDILDeviceInfo::TmrReg);
+}
+
+AMDILCypressDevice::AMDILCypressDevice(AMDILSubtarget *ST)
+  : AMDILEvergreenDevice(ST) {
+  setCaps();
+}
+
+AMDILCypressDevice::~AMDILCypressDevice() {
+}
+
+void AMDILCypressDevice::setCaps() {
+  if (mSTM->isOverride(AMDILDeviceInfo::DoubleOps)) {
+    mHWBits.set(AMDILDeviceInfo::DoubleOps);
+    mHWBits.set(AMDILDeviceInfo::FMA);
+  }
+}
+
+
+AMDILCedarDevice::AMDILCedarDevice(AMDILSubtarget *ST)
+  : AMDILEvergreenDevice(ST) {
+  setCaps();
+}
+
+AMDILCedarDevice::~AMDILCedarDevice() {
+}
+
+void AMDILCedarDevice::setCaps() {
+  mSWBits.set(AMDILDeviceInfo::FMA);
+}
+
+size_t AMDILCedarDevice::getWavefrontSize() const {
+  return AMDILDevice::QuarterWavefrontSize;
+}
+
+AMDILRedwoodDevice::AMDILRedwoodDevice(AMDILSubtarget *ST)
+  : AMDILEvergreenDevice(ST) {
+  setCaps();
+}
+
+AMDILRedwoodDevice::~AMDILRedwoodDevice()
+{
+}
+
+void AMDILRedwoodDevice::setCaps() {
+  mSWBits.set(AMDILDeviceInfo::FMA);
+}
+
+size_t AMDILRedwoodDevice::getWavefrontSize() const {
+  return AMDILDevice::HalfWavefrontSize;
+}
diff --git a/lib/Target/AMDGPU/AMDILEvergreenDevice.h b/lib/Target/AMDGPU/AMDILEvergreenDevice.h
new file mode 100644
index 0000000..2639ab8
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILEvergreenDevice.h
@@ -0,0 +1,87 @@
+//==- AMDILEvergreenDevice.h - Define Evergreen Device for AMDIL -*- C++ -*--=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// Interface for the subtarget data classes.
+//
+//===----------------------------------------------------------------------===//
+// This file will define the interface that each generation needs to
+// implement in order to correctly answer queries on the capabilities of the
+// specific hardware.
+//===----------------------------------------------------------------------===//
+#ifndef _AMDILEVERGREENDEVICE_H_
+#define _AMDILEVERGREENDEVICE_H_
+#include "AMDILDevice.h"
+#include "AMDILSubtarget.h"
+
+namespace llvm {
+  class AMDILSubtarget;
+//===----------------------------------------------------------------------===//
+// Evergreen generation of devices and their respective sub classes
+//===----------------------------------------------------------------------===//
+
+
+// The AMDILEvergreenDevice is the base device class for all of the Evergreen
+// series of cards. This class contains information required to differentiate
+// the Evergreen device from the generic AMDILDevice. This device represents
+// that capabilities of the 'Juniper' cards, also known as the HD57XX.
+class AMDILEvergreenDevice : public AMDILDevice {
+public:
+  AMDILEvergreenDevice(AMDILSubtarget *ST);
+  virtual ~AMDILEvergreenDevice();
+  virtual size_t getMaxLDSSize() const;
+  virtual size_t getMaxGDSSize() const;
+  virtual size_t getWavefrontSize() const;
+  virtual uint32_t getGeneration() const;
+  virtual uint32_t getMaxNumUAVs() const;
+  virtual uint32_t getResourceID(uint32_t) const;
+protected:
+  virtual void setCaps();
+}; // AMDILEvergreenDevice
+
+// The AMDILCypressDevice is similiar to the AMDILEvergreenDevice, except it has
+// support for double precision operations. This device is used to represent
+// both the Cypress and Hemlock cards, which are commercially known as HD58XX
+// and HD59XX cards.
+class AMDILCypressDevice : public AMDILEvergreenDevice {
+public:
+  AMDILCypressDevice(AMDILSubtarget *ST);
+  virtual ~AMDILCypressDevice();
+private:
+  virtual void setCaps();
+}; // AMDILCypressDevice
+
+
+// The AMDILCedarDevice is the class that represents all of the 'Cedar' based
+// devices. This class differs from the base AMDILEvergreenDevice in that the
+// device is a ~quarter of the 'Juniper'. These are commercially known as the
+// HD54XX and HD53XX series of cards.
+class AMDILCedarDevice : public AMDILEvergreenDevice {
+public:
+  AMDILCedarDevice(AMDILSubtarget *ST);
+  virtual ~AMDILCedarDevice();
+  virtual size_t getWavefrontSize() const;
+private:
+  virtual void setCaps();
+}; // AMDILCedarDevice
+
+// The AMDILRedwoodDevice is the class the represents all of the 'Redwood' based
+// devices. This class differs from the base class, in that these devices are
+// considered about half of a 'Juniper' device. These are commercially known as
+// the HD55XX and HD56XX series of cards.
+class AMDILRedwoodDevice : public AMDILEvergreenDevice {
+public:
+  AMDILRedwoodDevice(AMDILSubtarget *ST);
+  virtual ~AMDILRedwoodDevice();
+  virtual size_t getWavefrontSize() const;
+private:
+  virtual void setCaps();
+}; // AMDILRedwoodDevice
+  
+} // namespace llvm
+#endif // _AMDILEVERGREENDEVICE_H_
diff --git a/lib/Target/AMDGPU/AMDILFormats.td b/lib/Target/AMDGPU/AMDILFormats.td
new file mode 100644
index 0000000..25ca9a0
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILFormats.td
@@ -0,0 +1,174 @@
+//==- AMDILFormats.td - AMDIL Instruction Formats ----*- tablegen -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+//===--------------------------------------------------------------------===//
+include "AMDILTokenDesc.td"
+
+//===--------------------------------------------------------------------===//
+// The parent IL instruction class that inherits the Instruction class. This
+// class sets the corresponding namespace, the out and input dag lists the
+// pattern to match to and the string to print out for the assembly printer.
+//===--------------------------------------------------------------------===//
+class ILFormat<ILOpCode op, dag outs, dag ins, string asmstr, list<dag> pattern>
+: Instruction {
+
+     let Namespace = "AMDIL";
+     dag OutOperandList = outs;
+     dag InOperandList = ins;
+     ILOpCode operation = op;
+     let Pattern = pattern;
+     let AsmString = !strconcat(asmstr, "\n");
+     let isPseudo = 1;
+     bit hasIEEEFlag = 0;
+     bit hasZeroOpFlag = 0;
+}
+
+//===--------------------------------------------------------------------===//
+// Class that has one input parameters and one output parameter.
+// The basic pattern for this class is "Opcode Dst, Src0" and
+// handles the unary math operators.
+// It sets the binary token ILSrc, ILSrcMod, ILRelAddr and ILSrc and ILSrcMod
+// if the addressing is register relative for input and output register 0.
+//===--------------------------------------------------------------------===//
+class OneInOneOut<ILOpCode op, dag outs, dag ins,
+      string asmstr, list<dag> pattern>
+      : ILFormat<op, outs, ins, asmstr, pattern>
+{
+     ILDst       dst_reg;
+     ILDstMod    dst_mod;
+     ILRelAddr   dst_rel;
+     ILSrc       dst_reg_rel;
+     ILSrcMod    dst_reg_rel_mod;
+     ILSrc       src0_reg;
+     ILSrcMod    src0_mod;
+     ILRelAddr   src0_rel;
+     ILSrc       src0_reg_rel;
+     ILSrcMod    src0_reg_rel_mod;
+}
+
+//===--------------------------------------------------------------------===//
+// This class is similiar to the UnaryOp class, however, there is no
+// result value to assign.
+//===--------------------------------------------------------------------===//
+class UnaryOpNoRet<ILOpCode op, dag outs, dag ins,
+      string asmstr, list<dag> pattern>
+      : ILFormat<op, outs, ins, asmstr, pattern>
+{
+     ILSrc       src0_reg;
+     ILSrcMod    src0_mod;
+     ILRelAddr   src0_rel;
+     ILSrc       src0_reg_rel;
+     ILSrcMod    src0_reg_rel_mod;
+}
+
+//===--------------------------------------------------------------------===//
+// Set of classes that have two input parameters and one output parameter.
+// The basic pattern for this class is "Opcode Dst, Src0, Src1" and
+// handles the binary math operators and comparison operations.
+// It sets the binary token ILSrc, ILSrcMod, ILRelAddr and ILSrc and ILSrcMod
+// if the addressing is register relative for input register 1.
+//===--------------------------------------------------------------------===//
+class TwoInOneOut<ILOpCode op, dag outs, dag ins,
+      string asmstr, list<dag> pattern>
+      : OneInOneOut<op, outs, ins, asmstr, pattern>
+{
+     ILSrc       src1_reg;
+     ILSrcMod    src1_mod;
+     ILRelAddr   src1_rel;
+     ILSrc       src1_reg_rel;
+     ILSrcMod    src1_reg_rel_mod;
+}
+
+//===--------------------------------------------------------------------===//
+// Similiar to the UnaryOpNoRet class, but takes as arguments two input
+// operands. Used mainly for barrier instructions on PC platform.
+//===--------------------------------------------------------------------===//
+class BinaryOpNoRet<ILOpCode op, dag outs, dag ins,
+      string asmstr, list<dag> pattern>
+      : UnaryOpNoRet<op, outs, ins, asmstr, pattern>
+{
+     ILSrc       src1_reg;
+     ILSrcMod    src1_mod;
+     ILRelAddr   src1_rel;
+     ILSrc       src1_reg_rel;
+     ILSrcMod    src1_reg_rel_mod;
+}
+
+//===--------------------------------------------------------------------===//
+// Set of classes that have three input parameters and one output parameter.
+// The basic pattern for this class is "Opcode Dst, Src0, Src1, Src2" and
+// handles the mad and conditional mov instruction.
+// It sets the binary token ILSrc, ILSrcMod, ILRelAddr and ILSrc and ILSrcMod
+// if the addressing is register relative.
+// This class is the parent class of TernaryOp
+//===--------------------------------------------------------------------===//
+class ThreeInOneOut<ILOpCode op, dag outs, dag ins,
+      string asmstr, list<dag> pattern>
+      : TwoInOneOut<op, outs, ins, asmstr, pattern> {
+           ILSrc       src2_reg;
+           ILSrcMod    src2_mod;
+           ILRelAddr   src2_rel;
+           ILSrc       src2_reg_rel;
+           ILSrcMod    src2_reg_rel_mod;
+      }
+
+//===--------------------------------------------------------------------===//
+// Intrinsic classes
+// Generic versions of the above classes but for Target specific intrinsics
+// instead of SDNode patterns.
+//===--------------------------------------------------------------------===//
+let TargetPrefix = "AMDIL", isTarget = 1 in {
+     class VoidIntLong :
+          Intrinsic<[llvm_i64_ty], [], []>;
+     class VoidIntInt :
+          Intrinsic<[llvm_i32_ty], [], []>;
+     class VoidIntBool :
+          Intrinsic<[llvm_i32_ty], [], []>;
+     class UnaryIntInt :
+          Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+     class UnaryIntFloat :
+          Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+     class ConvertIntFTOI :
+          Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+     class ConvertIntITOF :
+          Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty], [IntrNoMem]>;
+     class UnaryIntNoRetInt :
+          Intrinsic<[], [llvm_anyint_ty], []>;
+     class UnaryIntNoRetFloat :
+          Intrinsic<[], [llvm_anyfloat_ty], []>;
+     class BinaryIntInt :
+          Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
+     class BinaryIntFloat :
+          Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
+     class BinaryIntNoRetInt :
+          Intrinsic<[], [llvm_anyint_ty, LLVMMatchType<0>], []>;
+     class BinaryIntNoRetFloat :
+          Intrinsic<[], [llvm_anyfloat_ty, LLVMMatchType<0>], []>;
+     class TernaryIntInt :
+          Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
+          LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
+     class TernaryIntFloat :
+          Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>,
+          LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
+     class QuaternaryIntInt :
+          Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
+          LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
+     class UnaryAtomicInt :
+          Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+     class BinaryAtomicInt :
+          Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+     class TernaryAtomicInt :
+          Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty]>;
+     class UnaryAtomicIntNoRet :
+          Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+     class BinaryAtomicIntNoRet :
+          Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+     class TernaryAtomicIntNoRet :
+          Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+}
diff --git a/lib/Target/AMDGPU/AMDILFrameLowering.cpp b/lib/Target/AMDGPU/AMDILFrameLowering.cpp
new file mode 100644
index 0000000..87eca87
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILFrameLowering.cpp
@@ -0,0 +1,53 @@
+//===----------------------- AMDILFrameLowering.cpp -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// Interface to describe a layout of a stack frame on a AMDIL target machine
+//
+//===----------------------------------------------------------------------===//
+#include "AMDILFrameLowering.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+
+using namespace llvm;
+AMDILFrameLowering::AMDILFrameLowering(StackDirection D, unsigned StackAl,
+    int LAO, unsigned TransAl)
+  : TargetFrameLowering(D, StackAl, LAO, TransAl)
+{
+}
+
+AMDILFrameLowering::~AMDILFrameLowering()
+{
+}
+
+/// getFrameIndexOffset - Returns the displacement from the frame register to
+/// the stack frame of the specified index.
+int AMDILFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
+                                         int FI) const {
+  const MachineFrameInfo *MFI = MF.getFrameInfo();
+  return MFI->getObjectOffset(FI);
+}
+
+const TargetFrameLowering::SpillSlot *
+AMDILFrameLowering::getCalleeSavedSpillSlots(unsigned &NumEntries) const
+{
+  NumEntries = 0;
+  return 0;
+}
+void
+AMDILFrameLowering::emitPrologue(MachineFunction &MF) const
+{
+}
+void
+AMDILFrameLowering::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const
+{
+}
+bool
+AMDILFrameLowering::hasFP(const MachineFunction &MF) const
+{
+  return false;
+}
diff --git a/lib/Target/AMDGPU/AMDILFrameLowering.h b/lib/Target/AMDGPU/AMDILFrameLowering.h
new file mode 100644
index 0000000..b1d919e
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILFrameLowering.h
@@ -0,0 +1,46 @@
+//===--------------------- AMDILFrameLowering.h -----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface to describe a layout of a stack frame on a AMDIL target machine
+//
+//===----------------------------------------------------------------------===//
+#ifndef _AMDILFRAME_LOWERING_H_
+#define _AMDILFRAME_LOWERING_H_
+
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+/// Information about the stack frame layout on the AMDIL targets. It holds
+/// the direction of the stack growth, the known stack alignment on entry to
+/// each function, and the offset to the locals area.
+/// See TargetFrameInfo for more comments.
+
+namespace llvm {
+  class AMDILFrameLowering : public TargetFrameLowering {
+    public:
+      AMDILFrameLowering(StackDirection D, unsigned StackAl, int LAO, unsigned
+          TransAl = 1);
+      virtual ~AMDILFrameLowering();
+      virtual int getFrameIndexOffset(const MachineFunction &MF,
+                                         int FI) const;
+      virtual const SpillSlot *
+        getCalleeSavedSpillSlots(unsigned &NumEntries) const;
+      virtual void emitPrologue(MachineFunction &MF) const;
+      virtual void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+      virtual bool hasFP(const MachineFunction &MF) const;
+  }; // class AMDILFrameLowering
+} // namespace llvm
+#endif // _AMDILFRAME_LOWERING_H_
diff --git a/lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp b/lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp
new file mode 100644
index 0000000..c3212fb
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILISelDAGToDAG.cpp
@@ -0,0 +1,393 @@
+//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This file defines an instruction selector for the AMDIL target.
+//
+//===----------------------------------------------------------------------===//
+#include "AMDGPUISelLowering.h" // For AMDGPUISD
+#include "AMDILDevices.h"
+#include "AMDILUtilityFunctions.h"
+#include "llvm/ADT/ValueMap.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/Support/Compiler.h"
+#include <list>
+#include <queue>
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Instruction Selector Implementation
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// AMDILDAGToDAGISel - AMDIL specific code to select AMDIL machine instructions
+// //for SelectionDAG operations.
+//
+namespace {
+class AMDILDAGToDAGISel : public SelectionDAGISel {
+  // Subtarget - Keep a pointer to the AMDIL Subtarget around so that we can
+  // make the right decision when generating code for different targets.
+  const AMDILSubtarget &Subtarget;
+public:
+  AMDILDAGToDAGISel(TargetMachine &TM AMDIL_OPT_LEVEL_DECL);
+  virtual ~AMDILDAGToDAGISel();
+
+  SDNode *Select(SDNode *N);
+  virtual const char *getPassName() const;
+
+private:
+  inline SDValue getSmallIPtrImm(unsigned Imm);
+
+  // Complex pattern selectors
+  bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
+  bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
+  bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
+
+  static bool checkType(const Value *ptr, unsigned int addrspace);
+  static const Value *getBasePointerValue(const Value *V);
+
+  static bool isGlobalStore(const StoreSDNode *N);
+  static bool isPrivateStore(const StoreSDNode *N);
+  static bool isLocalStore(const StoreSDNode *N);
+  static bool isRegionStore(const StoreSDNode *N);
+
+  static bool isCPLoad(const LoadSDNode *N);
+  static bool isConstantLoad(const LoadSDNode *N, int cbID);
+  static bool isGlobalLoad(const LoadSDNode *N);
+  static bool isPrivateLoad(const LoadSDNode *N);
+  static bool isLocalLoad(const LoadSDNode *N);
+  static bool isRegionLoad(const LoadSDNode *N);
+
+  bool SelectADDR8BitOffset(SDValue Addr, SDValue& Base, SDValue& Offset);
+  bool SelectADDRReg(SDValue Addr, SDValue& Base, SDValue& Offset);
+  bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
+
+  // Include the pieces autogenerated from the target description.
+#include "AMDGPUGenDAGISel.inc"
+};
+}  // end anonymous namespace
+
+// createAMDILISelDag - This pass converts a legalized DAG into a AMDIL-specific
+// DAG, ready for instruction scheduling.
+//
+FunctionPass *llvm::createAMDILISelDag(TargetMachine &TM
+                                        AMDIL_OPT_LEVEL_DECL) {
+  return new AMDILDAGToDAGISel(TM AMDIL_OPT_LEVEL_VAR);
+}
+
+AMDILDAGToDAGISel::AMDILDAGToDAGISel(TargetMachine &TM
+                                      AMDIL_OPT_LEVEL_DECL)
+  : SelectionDAGISel(TM AMDIL_OPT_LEVEL_VAR), Subtarget(TM.getSubtarget<AMDILSubtarget>())
+{
+}
+
+AMDILDAGToDAGISel::~AMDILDAGToDAGISel() {
+}
+
+SDValue AMDILDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
+  return CurDAG->getTargetConstant(Imm, MVT::i32);
+}
+
+bool AMDILDAGToDAGISel::SelectADDRParam(
+    SDValue Addr, SDValue& R1, SDValue& R2) {
+
+  if (Addr.getOpcode() == ISD::FrameIndex) {
+    if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+      R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+      R2 = CurDAG->getTargetConstant(0, MVT::i32);
+    } else {
+      R1 = Addr;
+      R2 = CurDAG->getTargetConstant(0, MVT::i32);
+    }
+  } else if (Addr.getOpcode() == ISD::ADD) {
+    R1 = Addr.getOperand(0);
+    R2 = Addr.getOperand(1);
+  } else {
+    R1 = Addr;
+    R2 = CurDAG->getTargetConstant(0, MVT::i32);
+  }
+  return true;
+}
+
+bool AMDILDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
+  if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+      Addr.getOpcode() == ISD::TargetGlobalAddress) {
+    return false;
+  }
+  return SelectADDRParam(Addr, R1, R2);
+}
+
+
+bool AMDILDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
+  if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+      Addr.getOpcode() == ISD::TargetGlobalAddress) {
+    return false;
+  }
+
+  if (Addr.getOpcode() == ISD::FrameIndex) {
+    if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+      R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
+      R2 = CurDAG->getTargetConstant(0, MVT::i64);
+    } else {
+      R1 = Addr;
+      R2 = CurDAG->getTargetConstant(0, MVT::i64);
+    }
+  } else if (Addr.getOpcode() == ISD::ADD) {
+    R1 = Addr.getOperand(0);
+    R2 = Addr.getOperand(1);
+  } else {
+    R1 = Addr;
+    R2 = CurDAG->getTargetConstant(0, MVT::i64);
+  }
+  return true;
+}
+
+SDNode *AMDILDAGToDAGISel::Select(SDNode *N) {
+  unsigned int Opc = N->getOpcode();
+  if (N->isMachineOpcode()) {
+    return NULL;   // Already selected.
+  }
+  switch (Opc) {
+  default: break;
+  case ISD::FrameIndex:
+    {
+      if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(N)) {
+        unsigned int FI = FIN->getIndex();
+        EVT OpVT = N->getValueType(0);
+        unsigned int NewOpc = AMDIL::COPY;
+        SDValue TFI = CurDAG->getTargetFrameIndex(FI, MVT::i32);
+        return CurDAG->SelectNodeTo(N, NewOpc, OpVT, TFI);
+      }
+    }
+    break;
+  }
+  return SelectCode(N);
+}
+
+bool AMDILDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
+  if (!ptr) {
+    return false;
+  }
+  Type *ptrType = ptr->getType();
+  return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
+}
+
+const Value * AMDILDAGToDAGISel::getBasePointerValue(const Value *V)
+{
+  if (!V) {
+    return NULL;
+  }
+  const Value *ret = NULL;
+  ValueMap<const Value *, bool> ValueBitMap;
+  std::queue<const Value *, std::list<const Value *> > ValueQueue;
+  ValueQueue.push(V);
+  while (!ValueQueue.empty()) {
+    V = ValueQueue.front();
+    if (ValueBitMap.find(V) == ValueBitMap.end()) {
+      ValueBitMap[V] = true;
+      if (dyn_cast<Argument>(V) && dyn_cast<PointerType>(V->getType())) {
+        ret = V;
+        break;
+      } else if (dyn_cast<GlobalVariable>(V)) {
+        ret = V;
+        break;
+      } else if (dyn_cast<Constant>(V)) {
+        const ConstantExpr *CE = dyn_cast<ConstantExpr>(V);
+        if (CE) {
+          ValueQueue.push(CE->getOperand(0));
+        }
+      } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
+        ret = AI;
+        break;
+      } else if (const Instruction *I = dyn_cast<Instruction>(V)) {
+        uint32_t numOps = I->getNumOperands();
+        for (uint32_t x = 0; x < numOps; ++x) {
+          ValueQueue.push(I->getOperand(x));
+        }
+      } else {
+        // assert(0 && "Found a Value that we didn't know how to handle!");
+      }
+    }
+    ValueQueue.pop();
+  }
+  return ret;
+}
+
+bool AMDILDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
+  return checkType(N->getSrcValue(), AMDILAS::GLOBAL_ADDRESS);
+}
+
+bool AMDILDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
+  return (!checkType(N->getSrcValue(), AMDILAS::LOCAL_ADDRESS)
+          && !checkType(N->getSrcValue(), AMDILAS::GLOBAL_ADDRESS)
+          && !checkType(N->getSrcValue(), AMDILAS::REGION_ADDRESS));
+}
+
+bool AMDILDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
+  return checkType(N->getSrcValue(), AMDILAS::LOCAL_ADDRESS);
+}
+
+bool AMDILDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
+  return checkType(N->getSrcValue(), AMDILAS::REGION_ADDRESS);
+}
+
+bool AMDILDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int cbID) {
+  if (checkType(N->getSrcValue(), AMDILAS::CONSTANT_ADDRESS)) {
+    return true;
+  }
+  MachineMemOperand *MMO = N->getMemOperand();
+  const Value *V = MMO->getValue();
+  const Value *BV = getBasePointerValue(V);
+  if (MMO
+      && MMO->getValue()
+      && ((V && dyn_cast<GlobalValue>(V))
+          || (BV && dyn_cast<GlobalValue>(
+                        getBasePointerValue(MMO->getValue()))))) {
+    return checkType(N->getSrcValue(), AMDILAS::PRIVATE_ADDRESS);
+  } else {
+    return false;
+  }
+}
+
+bool AMDILDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) {
+  return checkType(N->getSrcValue(), AMDILAS::GLOBAL_ADDRESS);
+}
+
+bool AMDILDAGToDAGISel::isLocalLoad(const  LoadSDNode *N) {
+  return checkType(N->getSrcValue(), AMDILAS::LOCAL_ADDRESS);
+}
+
+bool AMDILDAGToDAGISel::isRegionLoad(const  LoadSDNode *N) {
+  return checkType(N->getSrcValue(), AMDILAS::REGION_ADDRESS);
+}
+
+bool AMDILDAGToDAGISel::isCPLoad(const LoadSDNode *N) {
+  MachineMemOperand *MMO = N->getMemOperand();
+  if (checkType(N->getSrcValue(), AMDILAS::PRIVATE_ADDRESS)) {
+    if (MMO) {
+      const Value *V = MMO->getValue();
+      const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
+      if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+bool AMDILDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) {
+  if (checkType(N->getSrcValue(), AMDILAS::PRIVATE_ADDRESS)) {
+    // Check to make sure we are not a constant pool load or a constant load
+    // that is marked as a private load
+    if (isCPLoad(N) || isConstantLoad(N, -1)) {
+      return false;
+    }
+  }
+  if (!checkType(N->getSrcValue(), AMDILAS::LOCAL_ADDRESS)
+      && !checkType(N->getSrcValue(), AMDILAS::GLOBAL_ADDRESS)
+      && !checkType(N->getSrcValue(), AMDILAS::REGION_ADDRESS)
+      && !checkType(N->getSrcValue(), AMDILAS::CONSTANT_ADDRESS)
+      && !checkType(N->getSrcValue(), AMDILAS::PARAM_D_ADDRESS)
+      && !checkType(N->getSrcValue(), AMDILAS::PARAM_I_ADDRESS))
+  {
+    return true;
+  }
+  return false;
+}
+
+const char *AMDILDAGToDAGISel::getPassName() const {
+  return "AMDIL DAG->DAG Pattern Instruction Selection";
+}
+
+#ifdef DEBUGTMP
+#undef INT64_C
+#endif
+#undef DEBUGTMP
+
+///==== AMDGPU Functions ====///
+
+bool AMDILDAGToDAGISel::SelectADDR8BitOffset(SDValue Addr, SDValue& Base,
+                                             SDValue& Offset) {
+  if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+      Addr.getOpcode() == ISD::TargetGlobalAddress) {
+    return false;
+  }
+
+
+  if (Addr.getOpcode() == ISD::ADD) {
+    bool Match = false;
+
+    // Find the base ptr and the offset
+    for (unsigned i = 0; i < Addr.getNumOperands(); i++) {
+      SDValue Arg = Addr.getOperand(i);
+      ConstantSDNode * OffsetNode = dyn_cast<ConstantSDNode>(Arg);
+      // This arg isn't a constant so it must be the base PTR.
+      if (!OffsetNode) {
+        Base = Addr.getOperand(i);
+        continue;
+      }
+      // Check if the constant argument fits in 8-bits.  The offset is in bytes
+      // so we need to convert it to dwords.
+      if (isInt<8>(OffsetNode->getZExtValue() >> 2)) {
+        Match = true;
+        Offset = CurDAG->getTargetConstant(OffsetNode->getZExtValue() >> 2,
+                                           MVT::i32);
+      }
+    }
+    return Match;
+  }
+
+  // Default case, no offset
+  Base = Addr;
+  Offset = CurDAG->getTargetConstant(0, MVT::i32);
+  return true;
+}
+
+bool AMDILDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
+                                           SDValue &Offset)
+{
+  ConstantSDNode * IMMOffset;
+
+  if (Addr.getOpcode() == ISD::ADD
+      && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
+      && isInt<16>(IMMOffset->getZExtValue())) {
+
+      Base = Addr.getOperand(0);
+      Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
+      return true;
+  // If the pointer address is constant, we can move it to the offset field.
+  } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
+             && isInt<16>(IMMOffset->getZExtValue())) {
+    Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
+                                  CurDAG->getEntryNode().getDebugLoc(),
+                                  AMDIL::ZERO, MVT::i32);
+    Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
+    return true;
+  }
+
+  // Default case, no offset
+  Base = Addr;
+  Offset = CurDAG->getTargetConstant(0, MVT::i32);
+  return true;
+}
+
+bool AMDILDAGToDAGISel::SelectADDRReg(SDValue Addr, SDValue& Base,
+                                      SDValue& Offset) {
+  if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+      Addr.getOpcode() == ISD::TargetGlobalAddress  ||
+      Addr.getOpcode() != ISD::ADD) {
+    return false;
+  }
+
+  Base = Addr.getOperand(0);
+  Offset = Addr.getOperand(1);
+
+  return false;
+}
diff --git a/lib/Target/AMDGPU/AMDILISelLowering.cpp b/lib/Target/AMDGPU/AMDILISelLowering.cpp
new file mode 100644
index 0000000..f0c3c88
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILISelLowering.cpp
@@ -0,0 +1,1851 @@
+//===-- AMDILISelLowering.cpp - AMDIL DAG Lowering Implementation ---------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This file implements the interfaces that AMDIL uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDILISelLowering.h"
+#include "AMDILDevices.h"
+#include "AMDILIntrinsicInfo.h"
+#include "AMDILRegisterInfo.h"
+#include "AMDILSubtarget.h"
+#include "AMDILUtilityFunctions.h"
+#include "llvm/CallingConv.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetOptions.h"
+
+using namespace llvm;
+#define ISDBITCAST  ISD::BITCAST
+#define MVTGLUE     MVT::Glue
+//===----------------------------------------------------------------------===//
+// Calling Convention Implementation
+//===----------------------------------------------------------------------===//
+#include "AMDGPUGenCallingConv.inc"
+
+//===----------------------------------------------------------------------===//
+// TargetLowering Implementation Help Functions Begin
+//===----------------------------------------------------------------------===//
+  static SDValue
+getConversionNode(SelectionDAG &DAG, SDValue& Src, SDValue& Dst, bool asType)
+{
+  DebugLoc DL = Src.getDebugLoc();
+  EVT svt = Src.getValueType().getScalarType();
+  EVT dvt = Dst.getValueType().getScalarType();
+  if (svt.isFloatingPoint() && dvt.isFloatingPoint()) {
+    if (dvt.bitsGT(svt)) {
+      Src = DAG.getNode(ISD::FP_EXTEND, DL, dvt, Src);
+    } else if (svt.bitsLT(svt)) {
+      Src = DAG.getNode(ISD::FP_ROUND, DL, dvt, Src,
+          DAG.getConstant(1, MVT::i32));
+    }
+  } else if (svt.isInteger() && dvt.isInteger()) {
+    if (!svt.bitsEq(dvt)) {
+      Src = DAG.getSExtOrTrunc(Src, DL, dvt);
+    }
+  } else if (svt.isInteger()) {
+    unsigned opcode = (asType) ? ISDBITCAST : ISD::SINT_TO_FP;
+    if (!svt.bitsEq(dvt)) {
+      if (dvt.getSimpleVT().SimpleTy == MVT::f32) {
+        Src = DAG.getSExtOrTrunc(Src, DL, MVT::i32);
+      } else if (dvt.getSimpleVT().SimpleTy == MVT::f64) {
+        Src = DAG.getSExtOrTrunc(Src, DL, MVT::i64);
+      } else {
+        assert(0 && "We only support 32 and 64bit fp types");
+      }
+    }
+    Src = DAG.getNode(opcode, DL, dvt, Src);
+  } else if (dvt.isInteger()) {
+    unsigned opcode = (asType) ? ISDBITCAST : ISD::FP_TO_SINT;
+    if (svt.getSimpleVT().SimpleTy == MVT::f32) {
+      Src = DAG.getNode(opcode, DL, MVT::i32, Src);
+    } else if (svt.getSimpleVT().SimpleTy == MVT::f64) {
+      Src = DAG.getNode(opcode, DL, MVT::i64, Src);
+    } else {
+      assert(0 && "We only support 32 and 64bit fp types");
+    }
+    Src = DAG.getSExtOrTrunc(Src, DL, dvt);
+  }
+  return Src;
+}
+// CondCCodeToCC - Convert a DAG condition code to a AMDIL CC
+// condition.
+  static AMDILCC::CondCodes
+CondCCodeToCC(ISD::CondCode CC, const MVT::SimpleValueType& type)
+{
+  switch (CC) {
+    default:
+      {
+        errs()<<"Condition Code: "<< (unsigned int)CC<<"\n";
+        assert(0 && "Unknown condition code!");
+      }
+    case ISD::SETO:
+      switch(type) {
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_O;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_O;
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETUO:
+      switch(type) {
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_UO;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_UO;
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETGT:
+      switch (type) {
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+          return AMDILCC::IL_CC_I_GT;
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_GT;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_GT;
+        case MVT::i64:
+          return AMDILCC::IL_CC_L_GT;
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETGE:
+      switch (type) {
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+          return AMDILCC::IL_CC_I_GE;
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_GE;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_GE;
+        case MVT::i64:
+          return AMDILCC::IL_CC_L_GE;
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETLT:
+      switch (type) {
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+          return AMDILCC::IL_CC_I_LT;
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_LT;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_LT;
+        case MVT::i64:
+          return AMDILCC::IL_CC_L_LT;
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETLE:
+      switch (type) {
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+          return AMDILCC::IL_CC_I_LE;
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_LE;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_LE;
+        case MVT::i64:
+          return AMDILCC::IL_CC_L_LE;
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETNE:
+      switch (type) {
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+          return AMDILCC::IL_CC_I_NE;
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_NE;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_NE;
+        case MVT::i64:
+          return AMDILCC::IL_CC_L_NE;
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETEQ:
+      switch (type) {
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+          return AMDILCC::IL_CC_I_EQ;
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_EQ;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_EQ;
+        case MVT::i64:
+          return AMDILCC::IL_CC_L_EQ;
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETUGT:
+      switch (type) {
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+          return AMDILCC::IL_CC_U_GT;
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_UGT;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_UGT;
+        case MVT::i64:
+          return AMDILCC::IL_CC_UL_GT;
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETUGE:
+      switch (type) {
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+          return AMDILCC::IL_CC_U_GE;
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_UGE;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_UGE;
+        case MVT::i64:
+          return AMDILCC::IL_CC_UL_GE;
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETULT:
+      switch (type) {
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+          return AMDILCC::IL_CC_U_LT;
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_ULT;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_ULT;
+        case MVT::i64:
+          return AMDILCC::IL_CC_UL_LT;
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETULE:
+      switch (type) {
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+          return AMDILCC::IL_CC_U_LE;
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_ULE;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_ULE;
+        case MVT::i64:
+          return AMDILCC::IL_CC_UL_LE;
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETUNE:
+      switch (type) {
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+          return AMDILCC::IL_CC_U_NE;
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_UNE;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_UNE;
+        case MVT::i64:
+          return AMDILCC::IL_CC_UL_NE;
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETUEQ:
+      switch (type) {
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+          return AMDILCC::IL_CC_U_EQ;
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_UEQ;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_UEQ;
+        case MVT::i64:
+          return AMDILCC::IL_CC_UL_EQ;
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETOGT:
+      switch (type) {
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_OGT;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_OGT;
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+        case MVT::i64:
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETOGE:
+      switch (type) {
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_OGE;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_OGE;
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+        case MVT::i64:
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETOLT:
+      switch (type) {
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_OLT;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_OLT;
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+        case MVT::i64:
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETOLE:
+      switch (type) {
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_OLE;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_OLE;
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+        case MVT::i64:
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETONE:
+      switch (type) {
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_ONE;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_ONE;
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+        case MVT::i64:
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+    case ISD::SETOEQ:
+      switch (type) {
+        case MVT::f32:
+          return AMDILCC::IL_CC_F_OEQ;
+        case MVT::f64:
+          return AMDILCC::IL_CC_D_OEQ;
+        case MVT::i1:
+        case MVT::i8:
+        case MVT::i16:
+        case MVT::i32:
+        case MVT::i64:
+        default:
+          assert(0 && "Opcode combination not generated correctly!");
+          return AMDILCC::COND_ERROR;
+      };
+  };
+}
+
+SDValue
+AMDILTargetLowering::LowerMemArgument(
+    SDValue Chain,
+    CallingConv::ID CallConv,
+    const SmallVectorImpl<ISD::InputArg> &Ins,
+    DebugLoc dl, SelectionDAG &DAG,
+    const CCValAssign &VA,
+    MachineFrameInfo *MFI,
+    unsigned i) const
+{
+  // Create the nodes corresponding to a load from this parameter slot.
+  ISD::ArgFlagsTy Flags = Ins[i].Flags;
+
+  bool AlwaysUseMutable = (CallConv==CallingConv::Fast) &&
+    getTargetMachine().Options.GuaranteedTailCallOpt;
+  bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
+
+  // FIXME: For now, all byval parameter objects are marked mutable. This can
+  // be changed with more analysis.
+  // In case of tail call optimization mark all arguments mutable. Since they
+  // could be overwritten by lowering of arguments in case of a tail call.
+  int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
+      VA.getLocMemOffset(), isImmutable);
+  SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
+
+  if (Flags.isByVal())
+    return FIN;
+  return DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
+      MachinePointerInfo::getFixedStack(FI),
+      false, false, false, 0);
+}
+//===----------------------------------------------------------------------===//
+// TargetLowering Implementation Help Functions End
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// TargetLowering Class Implementation Begins
+//===----------------------------------------------------------------------===//
+  AMDILTargetLowering::AMDILTargetLowering(TargetMachine &TM)
+: TargetLowering(TM, new TargetLoweringObjectFileELF())
+{
+  int types[] =
+  {
+    (int)MVT::i8,
+    (int)MVT::i16,
+    (int)MVT::i32,
+    (int)MVT::f32,
+    (int)MVT::f64,
+    (int)MVT::i64,
+    (int)MVT::v2i8,
+    (int)MVT::v4i8,
+    (int)MVT::v2i16,
+    (int)MVT::v4i16,
+    (int)MVT::v4f32,
+    (int)MVT::v4i32,
+    (int)MVT::v2f32,
+    (int)MVT::v2i32,
+    (int)MVT::v2f64,
+    (int)MVT::v2i64
+  };
+
+  int IntTypes[] =
+  {
+    (int)MVT::i8,
+    (int)MVT::i16,
+    (int)MVT::i32,
+    (int)MVT::i64
+  };
+
+  int FloatTypes[] =
+  {
+    (int)MVT::f32,
+    (int)MVT::f64
+  };
+
+  int VectorTypes[] =
+  {
+    (int)MVT::v2i8,
+    (int)MVT::v4i8,
+    (int)MVT::v2i16,
+    (int)MVT::v4i16,
+    (int)MVT::v4f32,
+    (int)MVT::v4i32,
+    (int)MVT::v2f32,
+    (int)MVT::v2i32,
+    (int)MVT::v2f64,
+    (int)MVT::v2i64
+  };
+  size_t numTypes = sizeof(types) / sizeof(*types);
+  size_t numFloatTypes = sizeof(FloatTypes) / sizeof(*FloatTypes);
+  size_t numIntTypes = sizeof(IntTypes) / sizeof(*IntTypes);
+  size_t numVectorTypes = sizeof(VectorTypes) / sizeof(*VectorTypes);
+
+  const AMDILSubtarget &STM = getTargetMachine().getSubtarget<AMDILSubtarget>();
+  // These are the current register classes that are
+  // supported
+
+  for (unsigned int x  = 0; x < numTypes; ++x) {
+    MVT::SimpleValueType VT = (MVT::SimpleValueType)types[x];
+
+    //FIXME: SIGN_EXTEND_INREG is not meaningful for floating point types
+    // We cannot sextinreg, expand to shifts
+    setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
+    setOperationAction(ISD::SUBE, VT, Expand);
+    setOperationAction(ISD::SUBC, VT, Expand);
+    setOperationAction(ISD::ADDE, VT, Expand);
+    setOperationAction(ISD::ADDC, VT, Expand);
+    setOperationAction(ISD::SETCC, VT, Custom);
+    setOperationAction(ISD::BRCOND, VT, Custom);
+    setOperationAction(ISD::BR_CC, VT, Custom);
+    setOperationAction(ISD::BR_JT, VT, Expand);
+    setOperationAction(ISD::BRIND, VT, Expand);
+    // TODO: Implement custom UREM/SREM routines
+    setOperationAction(ISD::SREM, VT, Expand);
+    setOperationAction(ISD::GlobalAddress, VT, Custom);
+    setOperationAction(ISD::JumpTable, VT, Custom);
+    setOperationAction(ISD::ConstantPool, VT, Custom);
+    setOperationAction(ISD::SELECT, VT, Custom);
+    setOperationAction(ISD::SMUL_LOHI, VT, Expand);
+    setOperationAction(ISD::UMUL_LOHI, VT, Expand);
+    if (VT != MVT::i64 && VT != MVT::v2i64) {
+      setOperationAction(ISD::SDIV, VT, Custom);
+    }
+  }
+  for (unsigned int x = 0; x < numFloatTypes; ++x) {
+    MVT::SimpleValueType VT = (MVT::SimpleValueType)FloatTypes[x];
+
+    // IL does not have these operations for floating point types
+    setOperationAction(ISD::FP_ROUND_INREG, VT, Expand);
+    setOperationAction(ISD::SETOLT, VT, Expand);
+    setOperationAction(ISD::SETOGE, VT, Expand);
+    setOperationAction(ISD::SETOGT, VT, Expand);
+    setOperationAction(ISD::SETOLE, VT, Expand);
+    setOperationAction(ISD::SETULT, VT, Expand);
+    setOperationAction(ISD::SETUGE, VT, Expand);
+    setOperationAction(ISD::SETUGT, VT, Expand);
+    setOperationAction(ISD::SETULE, VT, Expand);
+  }
+
+  for (unsigned int x = 0; x < numIntTypes; ++x) {
+    MVT::SimpleValueType VT = (MVT::SimpleValueType)IntTypes[x];
+
+    // GPU also does not have divrem function for signed or unsigned
+    setOperationAction(ISD::SDIVREM, VT, Expand);
+
+    // GPU does not have [S|U]MUL_LOHI functions as a single instruction
+    setOperationAction(ISD::SMUL_LOHI, VT, Expand);
+    setOperationAction(ISD::UMUL_LOHI, VT, Expand);
+
+    // GPU doesn't have a rotl, rotr, or byteswap instruction
+    setOperationAction(ISD::ROTR, VT, Expand);
+    setOperationAction(ISD::ROTL, VT, Expand);
+    setOperationAction(ISD::BSWAP, VT, Expand);
+
+    // GPU doesn't have any counting operators
+    setOperationAction(ISD::CTPOP, VT, Expand);
+    setOperationAction(ISD::CTTZ, VT, Expand);
+    setOperationAction(ISD::CTLZ, VT, Expand);
+  }
+
+  for ( unsigned int ii = 0; ii < numVectorTypes; ++ii )
+  {
+    MVT::SimpleValueType VT = (MVT::SimpleValueType)VectorTypes[ii];
+
+    setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+    setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
+    setOperationAction(ISD::SDIVREM, VT, Expand);
+    setOperationAction(ISD::SMUL_LOHI, VT, Expand);
+    // setOperationAction(ISD::VSETCC, VT, Expand);
+    setOperationAction(ISD::SETCC, VT, Expand);
+    setOperationAction(ISD::SELECT_CC, VT, Expand);
+    setOperationAction(ISD::SELECT, VT, Expand);
+
+  }
+  if (STM.device()->isSupported(AMDILDeviceInfo::LongOps)) {
+    setOperationAction(ISD::MULHU, MVT::i64, Expand);
+    setOperationAction(ISD::MULHU, MVT::v2i64, Expand);
+    setOperationAction(ISD::MULHS, MVT::i64, Expand);
+    setOperationAction(ISD::MULHS, MVT::v2i64, Expand);
+    setOperationAction(ISD::ADD, MVT::v2i64, Expand);
+    setOperationAction(ISD::SREM, MVT::v2i64, Expand);
+    setOperationAction(ISD::Constant          , MVT::i64  , Legal);
+    setOperationAction(ISD::SDIV, MVT::v2i64, Expand);
+    setOperationAction(ISD::TRUNCATE, MVT::v2i64, Expand);
+    setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Expand);
+    setOperationAction(ISD::ZERO_EXTEND, MVT::v2i64, Expand);
+    setOperationAction(ISD::ANY_EXTEND, MVT::v2i64, Expand);
+  }
+  if (STM.device()->isSupported(AMDILDeviceInfo::DoubleOps)) {
+    // we support loading/storing v2f64 but not operations on the type
+    setOperationAction(ISD::FADD, MVT::v2f64, Expand);
+    setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
+    setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
+    setOperationAction(ISD::FP_ROUND_INREG, MVT::v2f64, Expand);
+    setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand);
+    setOperationAction(ISD::ConstantFP        , MVT::f64  , Legal);
+    // We want to expand vector conversions into their scalar
+    // counterparts.
+    setOperationAction(ISD::TRUNCATE, MVT::v2f64, Expand);
+    setOperationAction(ISD::SIGN_EXTEND, MVT::v2f64, Expand);
+    setOperationAction(ISD::ZERO_EXTEND, MVT::v2f64, Expand);
+    setOperationAction(ISD::ANY_EXTEND, MVT::v2f64, Expand);
+    setOperationAction(ISD::FABS, MVT::f64, Expand);
+    setOperationAction(ISD::FABS, MVT::v2f64, Expand);
+  }
+  // TODO: Fix the UDIV24 algorithm so it works for these
+  // types correctly. This needs vector comparisons
+  // for this to work correctly.
+  setOperationAction(ISD::UDIV, MVT::v2i8, Expand);
+  setOperationAction(ISD::UDIV, MVT::v4i8, Expand);
+  setOperationAction(ISD::UDIV, MVT::v2i16, Expand);
+  setOperationAction(ISD::UDIV, MVT::v4i16, Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Custom);
+  setOperationAction(ISD::SUBC, MVT::Other, Expand);
+  setOperationAction(ISD::ADDE, MVT::Other, Expand);
+  setOperationAction(ISD::ADDC, MVT::Other, Expand);
+  setOperationAction(ISD::BRCOND, MVT::Other, Custom);
+  setOperationAction(ISD::BR_CC, MVT::Other, Custom);
+  setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+  setOperationAction(ISD::BRIND, MVT::Other, Expand);
+  setOperationAction(ISD::SETCC, MVT::Other, Custom);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Expand);
+
+  setOperationAction(ISD::BUILD_VECTOR, MVT::Other, Custom);
+  // Use the default implementation.
+  setOperationAction(ISD::VAARG             , MVT::Other, Expand);
+  setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
+  setOperationAction(ISD::VAEND             , MVT::Other, Expand);
+  setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
+  setOperationAction(ISD::STACKRESTORE      , MVT::Other, Expand);
+  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
+  setOperationAction(ISD::ConstantFP        , MVT::f32    , Legal);
+  setOperationAction(ISD::Constant          , MVT::i32    , Legal);
+  setOperationAction(ISD::TRAP              , MVT::Other  , Legal);
+
+  setStackPointerRegisterToSaveRestore(AMDIL::SP);
+  setSchedulingPreference(Sched::RegPressure);
+  setPow2DivIsCheap(false);
+  setPrefLoopAlignment(16);
+  setSelectIsExpensive(true);
+  setJumpIsExpensive(true);
+
+  maxStoresPerMemcpy  = 4096;
+  maxStoresPerMemmove = 4096;
+  maxStoresPerMemset  = 4096;
+
+#undef numTypes
+#undef numIntTypes
+#undef numVectorTypes
+#undef numFloatTypes
+}
+
+const char *
+AMDILTargetLowering::getTargetNodeName(unsigned Opcode) const
+{
+  switch (Opcode) {
+    default: return 0;
+    case AMDILISD::CMOVLOG:  return "AMDILISD::CMOVLOG";
+    case AMDILISD::MAD:  return "AMDILISD::MAD";
+    case AMDILISD::CALL:  return "AMDILISD::CALL";
+    case AMDILISD::SELECT_CC: return "AMDILISD::SELECT_CC";
+    case AMDILISD::UMUL: return "AMDILISD::UMUL";
+    case AMDILISD::DIV_INF: return "AMDILISD::DIV_INF";
+    case AMDILISD::VBUILD: return "AMDILISD::VBUILD";
+    case AMDILISD::CMP: return "AMDILISD::CMP";
+    case AMDILISD::IL_CC_I_LT: return "AMDILISD::IL_CC_I_LT";
+    case AMDILISD::IL_CC_I_LE: return "AMDILISD::IL_CC_I_LE";
+    case AMDILISD::IL_CC_I_GT: return "AMDILISD::IL_CC_I_GT";
+    case AMDILISD::IL_CC_I_GE: return "AMDILISD::IL_CC_I_GE";
+    case AMDILISD::IL_CC_I_EQ: return "AMDILISD::IL_CC_I_EQ";
+    case AMDILISD::IL_CC_I_NE: return "AMDILISD::IL_CC_I_NE";
+    case AMDILISD::RET_FLAG: return "AMDILISD::RET_FLAG";
+    case AMDILISD::BRANCH_COND: return "AMDILISD::BRANCH_COND";
+
+  };
+}
+bool
+AMDILTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
+    const CallInst &I, unsigned Intrinsic) const
+{
+  return false;
+}
+
+// The backend supports 32 and 64 bit floating point immediates
+bool
+AMDILTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const
+{
+  if (VT.getScalarType().getSimpleVT().SimpleTy == MVT::f32
+      || VT.getScalarType().getSimpleVT().SimpleTy == MVT::f64) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+bool
+AMDILTargetLowering::ShouldShrinkFPConstant(EVT VT) const
+{
+  if (VT.getScalarType().getSimpleVT().SimpleTy == MVT::f32
+      || VT.getScalarType().getSimpleVT().SimpleTy == MVT::f64) {
+    return false;
+  } else {
+    return true;
+  }
+}
+
+
+// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
+// be zero. Op is expected to be a target specific node. Used by DAG
+// combiner.
+
+void
+AMDILTargetLowering::computeMaskedBitsForTargetNode(
+    const SDValue Op,
+    APInt &KnownZero,
+    APInt &KnownOne,
+    const SelectionDAG &DAG,
+    unsigned Depth) const
+{
+  APInt KnownZero2;
+  APInt KnownOne2;
+  KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything
+  switch (Op.getOpcode()) {
+    default: break;
+    case AMDILISD::SELECT_CC:
+             DAG.ComputeMaskedBits(
+                 Op.getOperand(1),
+                 KnownZero,
+                 KnownOne,
+                 Depth + 1
+                 );
+             DAG.ComputeMaskedBits(
+                 Op.getOperand(0),
+                 KnownZero2,
+                 KnownOne2
+                 );
+             assert((KnownZero & KnownOne) == 0
+                 && "Bits known to be one AND zero?");
+             assert((KnownZero2 & KnownOne2) == 0
+                 && "Bits known to be one AND zero?");
+             // Only known if known in both the LHS and RHS
+             KnownOne &= KnownOne2;
+             KnownZero &= KnownZero2;
+             break;
+  };
+}
+
+// This is the function that determines which calling convention should
+// be used. Currently there is only one calling convention
+CCAssignFn*
+AMDILTargetLowering::CCAssignFnForNode(unsigned int Op) const
+{
+  //uint64_t CC = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+  return CC_AMDIL32;
+}
+
+// LowerCallResult - Lower the result values of an ISD::CALL into the
+// appropriate copies out of appropriate physical registers.  This assumes that
+// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
+// being lowered.  The returns a SDNode with the same number of values as the
+// ISD::CALL.
+SDValue
+AMDILTargetLowering::LowerCallResult(
+    SDValue Chain,
+    SDValue InFlag,
+    CallingConv::ID CallConv,
+    bool isVarArg,
+    const SmallVectorImpl<ISD::InputArg> &Ins,
+    DebugLoc dl,
+    SelectionDAG &DAG,
+    SmallVectorImpl<SDValue> &InVals) const
+{
+  // Assign locations to each value returned by this call
+  SmallVector<CCValAssign, 16> RVLocs;
+  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+                 getTargetMachine(), RVLocs, *DAG.getContext());
+  CCInfo.AnalyzeCallResult(Ins, RetCC_AMDIL32);
+
+  // Copy all of the result registers out of their specified physreg.
+  for (unsigned i = 0; i != RVLocs.size(); ++i) {
+    EVT CopyVT = RVLocs[i].getValVT();
+    if (RVLocs[i].isRegLoc()) {
+      Chain = DAG.getCopyFromReg(
+          Chain,
+          dl,
+          RVLocs[i].getLocReg(),
+          CopyVT,
+          InFlag
+          ).getValue(1);
+      SDValue Val = Chain.getValue(0);
+      InFlag = Chain.getValue(2);
+      InVals.push_back(Val);
+    }
+  }
+
+  return Chain;
+
+}
+
+//===----------------------------------------------------------------------===//
+//                           Other Lowering Hooks
+//===----------------------------------------------------------------------===//
+
+// Recursively assign SDNodeOrdering to any unordered nodes
+// This is necessary to maintain source ordering of instructions
+// under -O0 to avoid odd-looking "skipping around" issues.
+  static const SDValue
+Ordered( SelectionDAG &DAG, unsigned order, const SDValue New )
+{
+  if (order != 0 && DAG.GetOrdering( New.getNode() ) == 0) {
+    DAG.AssignOrdering( New.getNode(), order );
+    for (unsigned i = 0, e = New.getNumOperands(); i < e; ++i)
+      Ordered( DAG, order, New.getOperand(i) );
+  }
+  return New;
+}
+
+#define LOWER(A) \
+  case ISD:: A: \
+return Ordered( DAG, DAG.GetOrdering( Op.getNode() ), Lower##A(Op, DAG) )
+
+SDValue
+AMDILTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
+{
+  switch (Op.getOpcode()) {
+    default:
+      Op.getNode()->dump();
+      assert(0 && "Custom lowering code for this"
+          "instruction is not implemented yet!");
+      break;
+      LOWER(GlobalAddress);
+      LOWER(JumpTable);
+      LOWER(ConstantPool);
+      LOWER(ExternalSymbol);
+      LOWER(SDIV);
+      LOWER(SREM);
+      LOWER(BUILD_VECTOR);
+      LOWER(SELECT);
+      LOWER(SETCC);
+      LOWER(SIGN_EXTEND_INREG);
+      LOWER(DYNAMIC_STACKALLOC);
+      LOWER(BRCOND);
+      LOWER(BR_CC);
+  }
+  return Op;
+}
+
+#undef LOWER
+
+SDValue
+AMDILTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
+{
+  SDValue DST = Op;
+  const GlobalAddressSDNode *GADN = cast<GlobalAddressSDNode>(Op);
+  const GlobalValue *G = GADN->getGlobal();
+  DebugLoc DL = Op.getDebugLoc();
+  const GlobalVariable *GV = dyn_cast<GlobalVariable>(G);
+  if (!GV) {
+    DST = DAG.getTargetGlobalAddress(GV, DL, MVT::i32);
+  } else {
+    if (GV->hasInitializer()) {
+      const Constant *C = dyn_cast<Constant>(GV->getInitializer());
+      if (const ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
+        DST = DAG.getConstant(CI->getValue(), Op.getValueType());
+      } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(C)) {
+        DST = DAG.getConstantFP(CF->getValueAPF(),
+            Op.getValueType());
+      } else if (dyn_cast<ConstantAggregateZero>(C)) {
+        EVT VT = Op.getValueType();
+        if (VT.isInteger()) {
+          DST = DAG.getConstant(0, VT);
+        } else {
+          DST = DAG.getConstantFP(0, VT);
+        }
+      } else {
+        assert(!"lowering this type of Global Address "
+            "not implemented yet!");
+        C->dump();
+        DST = DAG.getTargetGlobalAddress(GV, DL, MVT::i32);
+      }
+    } else {
+      DST = DAG.getTargetGlobalAddress(GV, DL, MVT::i32);
+    }
+  }
+  return DST;
+}
+
+SDValue
+AMDILTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
+{
+  JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
+  SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
+  return Result;
+}
+SDValue
+AMDILTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
+{
+  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
+  EVT PtrVT = Op.getValueType();
+  SDValue Result;
+  if (CP->isMachineConstantPoolEntry()) {
+    Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
+        CP->getAlignment(), CP->getOffset(), CP->getTargetFlags());
+  } else {
+    Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
+        CP->getAlignment(), CP->getOffset(), CP->getTargetFlags());
+  }
+  return Result;
+}
+
+SDValue
+AMDILTargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const
+{
+  const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
+  SDValue Result = DAG.getTargetExternalSymbol(Sym, MVT::i32);
+  return Result;
+}
+
+/// LowerFORMAL_ARGUMENTS - transform physical registers into
+/// virtual registers and generate load operations for
+/// arguments places on the stack.
+/// TODO: isVarArg, hasStructRet, isMemReg
+  SDValue
+AMDILTargetLowering::LowerFormalArguments(SDValue Chain,
+    CallingConv::ID CallConv,
+    bool isVarArg,
+    const SmallVectorImpl<ISD::InputArg> &Ins,
+    DebugLoc dl,
+    SelectionDAG &DAG,
+    SmallVectorImpl<SDValue> &InVals)
+const
+{
+
+  MachineFunction &MF = DAG.getMachineFunction();
+  MachineFrameInfo *MFI = MF.getFrameInfo();
+  //const Function *Fn = MF.getFunction();
+  //MachineRegisterInfo &RegInfo = MF.getRegInfo();
+
+  SmallVector<CCValAssign, 16> ArgLocs;
+  CallingConv::ID CC = MF.getFunction()->getCallingConv();
+  //bool hasStructRet = MF.getFunction()->hasStructRetAttr();
+
+  CCState CCInfo(CC, isVarArg, DAG.getMachineFunction(),
+                 getTargetMachine(), ArgLocs, *DAG.getContext());
+
+  // When more calling conventions are added, they need to be chosen here
+  CCInfo.AnalyzeFormalArguments(Ins, CC_AMDIL32);
+  SDValue StackPtr;
+
+  //unsigned int FirstStackArgLoc = 0;
+
+  for (unsigned int i = 0, e = ArgLocs.size(); i != e; ++i) {
+    CCValAssign &VA = ArgLocs[i];
+    if (VA.isRegLoc()) {
+      EVT RegVT = VA.getLocVT();
+      const TargetRegisterClass *RC = getRegClassFor(
+          RegVT.getSimpleVT().SimpleTy);
+
+      unsigned int Reg = MF.addLiveIn(VA.getLocReg(), RC);
+      SDValue ArgValue = DAG.getCopyFromReg(
+          Chain,
+          dl,
+          Reg,
+          RegVT);
+      // If this is an 8 or 16-bit value, it is really passed
+      // promoted to 32 bits.  Insert an assert[sz]ext to capture
+      // this, then truncate to the right size.
+
+      if (VA.getLocInfo() == CCValAssign::SExt) {
+        ArgValue = DAG.getNode(
+            ISD::AssertSext,
+            dl,
+            RegVT,
+            ArgValue,
+            DAG.getValueType(VA.getValVT()));
+      } else if (VA.getLocInfo() == CCValAssign::ZExt) {
+        ArgValue = DAG.getNode(
+            ISD::AssertZext,
+            dl,
+            RegVT,
+            ArgValue,
+            DAG.getValueType(VA.getValVT()));
+      }
+      if (VA.getLocInfo() != CCValAssign::Full) {
+        ArgValue = DAG.getNode(
+            ISD::TRUNCATE,
+            dl,
+            VA.getValVT(),
+            ArgValue);
+      }
+      // Add the value to the list of arguments
+      // to be passed in registers
+      InVals.push_back(ArgValue);
+      if (isVarArg) {
+        assert(0 && "Variable arguments are not yet supported");
+        // See MipsISelLowering.cpp for ideas on how to implement
+      }
+    } else if(VA.isMemLoc()) {
+      InVals.push_back(LowerMemArgument(Chain, CallConv, Ins,
+            dl, DAG, VA, MFI, i));
+    } else {
+      assert(0 && "found a Value Assign that is "
+          "neither a register or a memory location");
+    }
+  }
+  /*if (hasStructRet) {
+    assert(0 && "Has struct return is not yet implemented");
+  // See MipsISelLowering.cpp for ideas on how to implement
+  }*/
+
+  if (isVarArg) {
+    assert(0 && "Variable arguments are not yet supported");
+    // See X86/PPC/CellSPU ISelLowering.cpp for ideas on how to implement
+  }
+  // This needs to be changed to non-zero if the return function needs
+  // to pop bytes
+  return Chain;
+}
+/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
+/// by "Src" to address "Dst" with size and alignment information specified by
+/// the specific parameter attribute. The copy will be passed as a byval
+/// function parameter.
+static SDValue
+CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
+    ISD::ArgFlagsTy Flags, SelectionDAG &DAG) {
+  assert(0 && "MemCopy does not exist yet");
+  SDValue SizeNode     = DAG.getConstant(Flags.getByValSize(), MVT::i32);
+
+  return DAG.getMemcpy(Chain,
+      Src.getDebugLoc(),
+      Dst, Src, SizeNode, Flags.getByValAlign(),
+      /*IsVol=*/false, /*AlwaysInline=*/true, 
+      MachinePointerInfo(), MachinePointerInfo());
+}
+
+SDValue
+AMDILTargetLowering::LowerMemOpCallTo(SDValue Chain,
+    SDValue StackPtr, SDValue Arg,
+    DebugLoc dl, SelectionDAG &DAG,
+    const CCValAssign &VA,
+    ISD::ArgFlagsTy Flags) const
+{
+  unsigned int LocMemOffset = VA.getLocMemOffset();
+  SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
+  PtrOff = DAG.getNode(ISD::ADD,
+      dl,
+      getPointerTy(), StackPtr, PtrOff);
+  if (Flags.isByVal()) {
+    PtrOff = CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG);
+  } else {
+    PtrOff = DAG.getStore(Chain, dl, Arg, PtrOff,
+        MachinePointerInfo::getStack(LocMemOffset),
+        false, false, 0);
+  }
+  return PtrOff;
+}
+/// LowerCAL - functions arguments are copied from virtual
+/// regs to (physical regs)/(stack frame), CALLSEQ_START and
+/// CALLSEQ_END are emitted.
+/// TODO: isVarArg, isTailCall, hasStructRet
+SDValue
+AMDILTargetLowering::LowerCall(CallLoweringInfo &CLI,
+    SmallVectorImpl<SDValue> &InVals) const
+
+#if 0
+    SDValue Chain, SDValue Callee,
+    CallingConv::ID CallConv, bool isVarArg, bool doesNotRet,
+    bool& isTailCall,
+    const SmallVectorImpl<ISD::OutputArg> &Outs,
+    const SmallVectorImpl<SDValue> &OutVals,
+    const SmallVectorImpl<ISD::InputArg> &Ins,
+    DebugLoc dl, SelectionDAG &DAG,
+#endif
+{
+  CLI.IsTailCall = false;
+  MachineFunction& MF = CLI.DAG.getMachineFunction();
+  // FIXME: DO we need to handle fast calling conventions and tail call
+  // optimizations?? X86/PPC ISelLowering
+  /*bool hasStructRet = (TheCall->getNumArgs())
+    ? TheCall->getArgFlags(0).device()->isSRet()
+    : false;*/
+
+  MachineFrameInfo *MFI = MF.getFrameInfo();
+
+  // Analyze operands of the call, assigning locations to each operand
+  SmallVector<CCValAssign, 16> ArgLocs;
+  CCState CCInfo(CLI.CallConv, CLI.IsVarArg, CLI.DAG.getMachineFunction(),
+                 getTargetMachine(), ArgLocs, *CLI.DAG.getContext());
+  // Analyize the calling operands, but need to change
+  // if we have more than one calling convetion
+  CCInfo.AnalyzeCallOperands(CLI.Outs, CCAssignFnForNode(CLI.CallConv));
+
+  unsigned int NumBytes = CCInfo.getNextStackOffset();
+  if (CLI.IsTailCall) {
+    assert(CLI.IsTailCall && "Tail Call not handled yet!");
+    // See X86/PPC ISelLowering
+  }
+
+  CLI.Chain = CLI.DAG.getCALLSEQ_START(CLI.Chain,
+                                   CLI.DAG.getIntPtrConstant(NumBytes, true));
+
+  SmallVector<std::pair<unsigned int, SDValue>, 8> RegsToPass;
+  SmallVector<SDValue, 8> MemOpChains;
+  SDValue StackPtr;
+  //unsigned int FirstStacArgLoc = 0;
+  //int LastArgStackLoc = 0;
+
+  // Walk the register/memloc assignments, insert copies/loads
+  for (unsigned int i = 0, e = ArgLocs.size(); i != e; ++i) {
+    CCValAssign &VA = ArgLocs[i];
+    //bool isByVal = Flags.isByVal(); // handle byval/bypointer registers
+    // Arguments start after the 5 first operands of ISD::CALL
+    SDValue Arg = CLI.OutVals[i];
+    //Promote the value if needed
+    switch(VA.getLocInfo()) {
+      default: assert(0 && "Unknown loc info!");
+      case CCValAssign::Full:
+               break;
+      case CCValAssign::SExt:
+               Arg = CLI.DAG.getNode(ISD::SIGN_EXTEND,
+                   CLI.DL,
+                   VA.getLocVT(), Arg);
+               break;
+      case CCValAssign::ZExt:
+               Arg = CLI.DAG.getNode(ISD::ZERO_EXTEND,
+                   CLI.DL,
+                   VA.getLocVT(), Arg);
+               break;
+      case CCValAssign::AExt:
+               Arg = CLI.DAG.getNode(ISD::ANY_EXTEND,
+                   CLI.DL,
+                   VA.getLocVT(), Arg);
+               break;
+    }
+
+    if (VA.isRegLoc()) {
+      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+    } else if (VA.isMemLoc()) {
+      // Create the frame index object for this incoming parameter
+      int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
+          VA.getLocMemOffset(), true);
+      SDValue PtrOff = CLI.DAG.getFrameIndex(FI,getPointerTy());
+
+      // emit ISD::STORE whichs stores the
+      // parameter value to a stack Location
+      MemOpChains.push_back(CLI.DAG.getStore(CLI.Chain, CLI.DL, Arg, PtrOff,
+            MachinePointerInfo::getFixedStack(FI),
+            false, false, 0));
+    } else {
+      assert(0 && "Not a Reg/Mem Loc, major error!");
+    }
+  }
+  if (!MemOpChains.empty()) {
+    CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor,
+        CLI.DL,
+        MVT::Other,
+        &MemOpChains[0],
+        MemOpChains.size());
+  }
+  SDValue InFlag;
+  if (!CLI.IsTailCall) {
+    for (unsigned int i = 0, e = RegsToPass.size(); i != e; ++i) {
+      CLI.Chain = CLI.DAG.getCopyToReg(CLI.Chain,
+          CLI.DL,
+          RegsToPass[i].first,
+          RegsToPass[i].second,
+          InFlag);
+      InFlag = CLI.Chain.getValue(1);
+    }
+  }
+
+  // If the callee is a GlobalAddress/ExternalSymbol node (quite common,
+  // every direct call is) turn it into a TargetGlobalAddress/
+  // TargetExternalSymbol
+  // node so that legalize doesn't hack it.
+  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee))  {
+    CLI.Callee = CLI.DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, getPointerTy());
+  }
+  else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(CLI.Callee)) {
+    CLI.Callee = CLI.DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
+  }
+  else if (CLI.IsTailCall) {
+    assert(0 && "Tail calls are not handled yet");
+    // see X86 ISelLowering for ideas on implementation: 1708
+  }
+
+  SDVTList NodeTys = CLI.DAG.getVTList(MVT::Other, MVTGLUE);
+  SmallVector<SDValue, 8> Ops;
+
+  if (CLI.IsTailCall) {
+    assert(0 && "Tail calls are not handled yet");
+    // see X86 ISelLowering for ideas on implementation: 1721
+  }
+  // If this is a direct call, pass the chain and the callee
+  if (CLI.Callee.getNode()) {
+    Ops.push_back(CLI.Chain);
+    Ops.push_back(CLI.Callee);
+  }
+
+  if (CLI.IsTailCall) {
+    assert(0 && "Tail calls are not handled yet");
+    // see X86 ISelLowering for ideas on implementation: 1739
+  }
+
+  // Add argument registers to the end of the list so that they are known
+  // live into the call
+  for (unsigned int i = 0, e = RegsToPass.size(); i != e; ++i) {
+    Ops.push_back(CLI.DAG.getRegister(
+          RegsToPass[i].first,
+          RegsToPass[i].second.getValueType()));
+  }
+  if (InFlag.getNode()) {
+    Ops.push_back(InFlag);
+  }
+
+  // Emit Tail Call
+  if (CLI.IsTailCall) {
+    assert(0 && "Tail calls are not handled yet");
+    // see X86 ISelLowering for ideas on implementation: 1762
+  }
+
+  CLI.Chain = CLI.DAG.getNode(AMDILISD::CALL,
+      CLI.DL,
+      NodeTys, &Ops[0], Ops.size());
+  InFlag = CLI.Chain.getValue(1);
+
+  // Create the CALLSEQ_END node
+  CLI.Chain = CLI.DAG.getCALLSEQ_END(
+      CLI.Chain,
+      CLI.DAG.getIntPtrConstant(NumBytes, true),
+      CLI.DAG.getIntPtrConstant(0, true),
+      InFlag);
+  InFlag = CLI.Chain.getValue(1);
+  // Handle result values, copying them out of physregs into vregs that
+  // we return
+  return LowerCallResult(CLI.Chain, InFlag, CLI.CallConv, CLI.IsVarArg, CLI.Ins, CLI.DL, CLI.DAG,
+      InVals);
+}
+
+SDValue
+AMDILTargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const
+{
+  EVT OVT = Op.getValueType();
+  SDValue DST;
+  if (OVT.getScalarType() == MVT::i64) {
+    DST = LowerSDIV64(Op, DAG);
+  } else if (OVT.getScalarType() == MVT::i32) {
+    DST = LowerSDIV32(Op, DAG);
+  } else if (OVT.getScalarType() == MVT::i16
+      || OVT.getScalarType() == MVT::i8) {
+    DST = LowerSDIV24(Op, DAG);
+  } else {
+    DST = SDValue(Op.getNode(), 0);
+  }
+  return DST;
+}
+
+SDValue
+AMDILTargetLowering::LowerSREM(SDValue Op, SelectionDAG &DAG) const
+{
+  EVT OVT = Op.getValueType();
+  SDValue DST;
+  if (OVT.getScalarType() == MVT::i64) {
+    DST = LowerSREM64(Op, DAG);
+  } else if (OVT.getScalarType() == MVT::i32) {
+    DST = LowerSREM32(Op, DAG);
+  } else if (OVT.getScalarType() == MVT::i16) {
+    DST = LowerSREM16(Op, DAG);
+  } else if (OVT.getScalarType() == MVT::i8) {
+    DST = LowerSREM8(Op, DAG);
+  } else {
+    DST = SDValue(Op.getNode(), 0);
+  }
+  return DST;
+}
+
+SDValue
+AMDILTargetLowering::LowerBUILD_VECTOR( SDValue Op, SelectionDAG &DAG ) const
+{
+  EVT VT = Op.getValueType();
+  SDValue Nodes1;
+  SDValue second;
+  SDValue third;
+  SDValue fourth;
+  DebugLoc DL = Op.getDebugLoc();
+  Nodes1 = DAG.getNode(AMDILISD::VBUILD,
+      DL,
+      VT, Op.getOperand(0));
+#if 0
+  bool allEqual = true;
+  for (unsigned x = 1, y = Op.getNumOperands(); x < y; ++x) {
+    if (Op.getOperand(0) != Op.getOperand(x)) {
+      allEqual = false;
+      break;
+    }
+  }
+  if (allEqual) {
+    return Nodes1;
+  }
+#endif
+  switch(Op.getNumOperands()) {
+    default:
+    case 1:
+      break;
+    case 4:
+      fourth = Op.getOperand(3);
+      if (fourth.getOpcode() != ISD::UNDEF) {
+        Nodes1 = DAG.getNode(
+            ISD::INSERT_VECTOR_ELT,
+            DL,
+            Op.getValueType(),
+            Nodes1,
+            fourth,
+            DAG.getConstant(7, MVT::i32));
+      }
+    case 3:
+      third = Op.getOperand(2);
+      if (third.getOpcode() != ISD::UNDEF) {
+        Nodes1 = DAG.getNode(
+            ISD::INSERT_VECTOR_ELT,
+            DL,
+            Op.getValueType(),
+            Nodes1,
+            third,
+            DAG.getConstant(6, MVT::i32));
+      }
+    case 2:
+      second = Op.getOperand(1);
+      if (second.getOpcode() != ISD::UNDEF) {
+        Nodes1 = DAG.getNode(
+            ISD::INSERT_VECTOR_ELT,
+            DL,
+            Op.getValueType(),
+            Nodes1,
+            second,
+            DAG.getConstant(5, MVT::i32));
+      }
+      break;
+  };
+  return Nodes1;
+}
+
+SDValue
+AMDILTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const
+{
+  SDValue Cond = Op.getOperand(0);
+  SDValue LHS = Op.getOperand(1);
+  SDValue RHS = Op.getOperand(2);
+  DebugLoc DL = Op.getDebugLoc();
+  Cond = getConversionNode(DAG, Cond, Op, true);
+  Cond = DAG.getNode(AMDILISD::CMOVLOG,
+      DL,
+      Op.getValueType(), Cond, LHS, RHS);
+  return Cond;
+}
+SDValue
+AMDILTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const
+{
+  SDValue Cond;
+  SDValue LHS = Op.getOperand(0);
+  SDValue RHS = Op.getOperand(1);
+  SDValue CC  = Op.getOperand(2);
+  DebugLoc DL = Op.getDebugLoc();
+  ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
+  unsigned int AMDILCC = CondCCodeToCC(
+      SetCCOpcode,
+      LHS.getValueType().getSimpleVT().SimpleTy);
+  assert((AMDILCC != AMDILCC::COND_ERROR) && "Invalid SetCC!");
+  Cond = DAG.getNode(
+      ISD::SELECT_CC,
+      Op.getDebugLoc(),
+      LHS.getValueType(),
+      LHS, RHS,
+      DAG.getConstant(-1, MVT::i32),
+      DAG.getConstant(0, MVT::i32),
+      CC);
+  Cond = getConversionNode(DAG, Cond, Op, true);
+  Cond = DAG.getNode(
+      ISD::AND,
+      DL,
+      Cond.getValueType(),
+      DAG.getConstant(1, Cond.getValueType()),
+      Cond);
+  return Cond;
+}
+
+SDValue
+AMDILTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const
+{
+  SDValue Data = Op.getOperand(0);
+  VTSDNode *BaseType = cast<VTSDNode>(Op.getOperand(1));
+  DebugLoc DL = Op.getDebugLoc();
+  EVT DVT = Data.getValueType();
+  EVT BVT = BaseType->getVT();
+  unsigned baseBits = BVT.getScalarType().getSizeInBits();
+  unsigned srcBits = DVT.isSimple() ? DVT.getScalarType().getSizeInBits() : 1;
+  unsigned shiftBits = srcBits - baseBits;
+  if (srcBits < 32) {
+    // If the op is less than 32 bits, then it needs to extend to 32bits
+    // so it can properly keep the upper bits valid.
+    EVT IVT = genIntType(32, DVT.isVector() ? DVT.getVectorNumElements() : 1);
+    Data = DAG.getNode(ISD::ZERO_EXTEND, DL, IVT, Data);
+    shiftBits = 32 - baseBits;
+    DVT = IVT;
+  }
+  SDValue Shift = DAG.getConstant(shiftBits, DVT);
+  // Shift left by 'Shift' bits.
+  Data = DAG.getNode(ISD::SHL, DL, DVT, Data, Shift);
+  // Signed shift Right by 'Shift' bits.
+  Data = DAG.getNode(ISD::SRA, DL, DVT, Data, Shift);
+  if (srcBits < 32) {
+    // Once the sign extension is done, the op needs to be converted to
+    // its original type.
+    Data = DAG.getSExtOrTrunc(Data, DL, Op.getOperand(0).getValueType());
+  }
+  return Data;
+}
+EVT
+AMDILTargetLowering::genIntType(uint32_t size, uint32_t numEle) const
+{
+  int iSize = (size * numEle);
+  int vEle = (iSize >> ((size == 64) ? 6 : 5));
+  if (!vEle) {
+    vEle = 1;
+  }
+  if (size == 64) {
+    if (vEle == 1) {
+      return EVT(MVT::i64);
+    } else {
+      return EVT(MVT::getVectorVT(MVT::i64, vEle));
+    }
+  } else {
+    if (vEle == 1) {
+      return EVT(MVT::i32);
+    } else {
+      return EVT(MVT::getVectorVT(MVT::i32, vEle));
+    }
+  }
+}
+
+SDValue
+AMDILTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
+    SelectionDAG &DAG) const
+{
+  SDValue Chain = Op.getOperand(0);
+  SDValue Size = Op.getOperand(1);
+  unsigned int SPReg = AMDIL::SP;
+  DebugLoc DL = Op.getDebugLoc();
+  SDValue SP = DAG.getCopyFromReg(Chain,
+      DL,
+      SPReg, MVT::i32);
+  SDValue NewSP = DAG.getNode(ISD::ADD,
+      DL,
+      MVT::i32, SP, Size);
+  Chain = DAG.getCopyToReg(SP.getValue(1),
+      DL,
+      SPReg, NewSP);
+  SDValue Ops[2] = {NewSP, Chain};
+  Chain = DAG.getMergeValues(Ops, 2 ,DL);
+  return Chain;
+}
+SDValue
+AMDILTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const
+{
+  SDValue Chain = Op.getOperand(0);
+  SDValue Cond  = Op.getOperand(1);
+  SDValue Jump  = Op.getOperand(2);
+  SDValue Result;
+  Result = DAG.getNode(
+      AMDILISD::BRANCH_COND,
+      Op.getDebugLoc(),
+      Op.getValueType(),
+      Chain, Jump, Cond);
+  return Result;
+}
+
+SDValue
+AMDILTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const
+{
+  SDValue Chain = Op.getOperand(0);
+  SDValue CC = Op.getOperand(1);
+  SDValue LHS   = Op.getOperand(2);
+  SDValue RHS   = Op.getOperand(3);
+  SDValue JumpT  = Op.getOperand(4);
+  SDValue CmpValue;
+  SDValue Result;
+  CmpValue = DAG.getNode(
+      ISD::SELECT_CC,
+      Op.getDebugLoc(),
+      LHS.getValueType(),
+      LHS, RHS,
+      DAG.getConstant(-1, MVT::i32),
+      DAG.getConstant(0, MVT::i32),
+      CC);
+  Result = DAG.getNode(
+      AMDILISD::BRANCH_COND,
+      CmpValue.getDebugLoc(),
+      MVT::Other, Chain,
+      JumpT, CmpValue);
+  return Result;
+}
+
+// LowerRET - Lower an ISD::RET node.
+SDValue
+AMDILTargetLowering::LowerReturn(SDValue Chain,
+    CallingConv::ID CallConv, bool isVarArg,
+    const SmallVectorImpl<ISD::OutputArg> &Outs,
+    const SmallVectorImpl<SDValue> &OutVals,
+    DebugLoc dl, SelectionDAG &DAG)
+const
+{
+  //MachineFunction& MF = DAG.getMachineFunction();
+  // CCValAssign - represent the assignment of the return value
+  // to a location
+  SmallVector<CCValAssign, 16> RVLocs;
+
+  // CCState - Info about the registers and stack slot
+  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+                 getTargetMachine(), RVLocs, *DAG.getContext());
+
+  // Analyze return values of ISD::RET
+  CCInfo.AnalyzeReturn(Outs, RetCC_AMDIL32);
+  // If this is the first return lowered for this function, add
+  // the regs to the liveout set for the function
+  MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
+  for (unsigned int i = 0, e = RVLocs.size(); i != e; ++i) {
+    if (RVLocs[i].isRegLoc() && !MRI.isLiveOut(RVLocs[i].getLocReg())) {
+      MRI.addLiveOut(RVLocs[i].getLocReg());
+    }
+  }
+  // FIXME: implement this when tail call is implemented
+  // Chain = GetPossiblePreceedingTailCall(Chain, AMDILISD::TAILCALL);
+  // both x86 and ppc implement this in ISelLowering
+
+  // Regular return here
+  SDValue Flag;
+  SmallVector<SDValue, 6> RetOps;
+  RetOps.push_back(Chain);
+  RetOps.push_back(DAG.getConstant(0/*getBytesToPopOnReturn()*/, MVT::i32));
+  for (unsigned int i = 0, e = RVLocs.size(); i != e; ++i) {
+    CCValAssign &VA = RVLocs[i];
+    SDValue ValToCopy = OutVals[i];
+    assert(VA.isRegLoc() && "Can only return in registers!");
+    // ISD::Ret => ret chain, (regnum1, val1), ...
+    // So i * 2 + 1 index only the regnums
+    Chain = DAG.getCopyToReg(Chain,
+        dl,
+        VA.getLocReg(),
+        ValToCopy,
+        Flag);
+    // guarantee that all emitted copies are stuck together
+    // avoiding something bad
+    Flag = Chain.getValue(1);
+  }
+  /*if (MF.getFunction()->hasStructRetAttr()) {
+    assert(0 && "Struct returns are not yet implemented!");
+  // Both MIPS and X86 have this
+  }*/
+  RetOps[0] = Chain;
+  if (Flag.getNode())
+    RetOps.push_back(Flag);
+
+  Flag = DAG.getNode(AMDILISD::RET_FLAG,
+      dl,
+      MVT::Other, &RetOps[0], RetOps.size());
+  return Flag;
+}
+
+unsigned int
+AMDILTargetLowering::getFunctionAlignment(const Function *) const
+{
+  return 0;
+}
+
+SDValue
+AMDILTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const
+{
+  DebugLoc DL = Op.getDebugLoc();
+  EVT OVT = Op.getValueType();
+  SDValue LHS = Op.getOperand(0);
+  SDValue RHS = Op.getOperand(1);
+  MVT INTTY;
+  MVT FLTTY;
+  if (!OVT.isVector()) {
+    INTTY = MVT::i32;
+    FLTTY = MVT::f32;
+  } else if (OVT.getVectorNumElements() == 2) {
+    INTTY = MVT::v2i32;
+    FLTTY = MVT::v2f32;
+  } else if (OVT.getVectorNumElements() == 4) {
+    INTTY = MVT::v4i32;
+    FLTTY = MVT::v4f32;
+  }
+  unsigned bitsize = OVT.getScalarType().getSizeInBits();
+  // char|short jq = ia ^ ib;
+  SDValue jq = DAG.getNode(ISD::XOR, DL, OVT, LHS, RHS);
+
+  // jq = jq >> (bitsize - 2)
+  jq = DAG.getNode(ISD::SRA, DL, OVT, jq, DAG.getConstant(bitsize - 2, OVT)); 
+
+  // jq = jq | 0x1
+  jq = DAG.getNode(ISD::OR, DL, OVT, jq, DAG.getConstant(1, OVT));
+
+  // jq = (int)jq
+  jq = DAG.getSExtOrTrunc(jq, DL, INTTY);
+
+  // int ia = (int)LHS;
+  SDValue ia = DAG.getSExtOrTrunc(LHS, DL, INTTY);
+
+  // int ib, (int)RHS;
+  SDValue ib = DAG.getSExtOrTrunc(RHS, DL, INTTY);
+
+  // float fa = (float)ia;
+  SDValue fa = DAG.getNode(ISD::SINT_TO_FP, DL, FLTTY, ia);
+
+  // float fb = (float)ib;
+  SDValue fb = DAG.getNode(ISD::SINT_TO_FP, DL, FLTTY, ib);
+
+  // float fq = native_divide(fa, fb);
+  SDValue fq = DAG.getNode(AMDILISD::DIV_INF, DL, FLTTY, fa, fb);
+
+  // fq = trunc(fq);
+  fq = DAG.getNode(ISD::FTRUNC, DL, FLTTY, fq);
+
+  // float fqneg = -fq;
+  SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FLTTY, fq);
+
+  // float fr = mad(fqneg, fb, fa);
+  SDValue fr = DAG.getNode(AMDILISD::MAD, DL, FLTTY, fqneg, fb, fa);
+
+  // int iq = (int)fq;
+  SDValue iq = DAG.getNode(ISD::FP_TO_SINT, DL, INTTY, fq);
+
+  // fr = fabs(fr);
+  fr = DAG.getNode(ISD::FABS, DL, FLTTY, fr);
+
+  // fb = fabs(fb);
+  fb = DAG.getNode(ISD::FABS, DL, FLTTY, fb);
+
+  // int cv = fr >= fb;
+  SDValue cv;
+  if (INTTY == MVT::i32) {
+    cv = DAG.getSetCC(DL, INTTY, fr, fb, ISD::SETOGE);
+  } else {
+    cv = DAG.getSetCC(DL, INTTY, fr, fb, ISD::SETOGE);
+  }
+  // jq = (cv ? jq : 0);
+  jq = DAG.getNode(AMDILISD::CMOVLOG, DL, OVT, cv, jq, 
+      DAG.getConstant(0, OVT));
+  // dst = iq + jq;
+  iq = DAG.getSExtOrTrunc(iq, DL, OVT);
+  iq = DAG.getNode(ISD::ADD, DL, OVT, iq, jq);
+  return iq;
+}
+
+SDValue
+AMDILTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const
+{
+  DebugLoc DL = Op.getDebugLoc();
+  EVT OVT = Op.getValueType();
+  SDValue LHS = Op.getOperand(0);
+  SDValue RHS = Op.getOperand(1);
+  // The LowerSDIV32 function generates equivalent to the following IL.
+  // mov r0, LHS
+  // mov r1, RHS
+  // ilt r10, r0, 0
+  // ilt r11, r1, 0
+  // iadd r0, r0, r10
+  // iadd r1, r1, r11
+  // ixor r0, r0, r10
+  // ixor r1, r1, r11
+  // udiv r0, r0, r1
+  // ixor r10, r10, r11
+  // iadd r0, r0, r10
+  // ixor DST, r0, r10
+
+  // mov r0, LHS
+  SDValue r0 = LHS;
+
+  // mov r1, RHS
+  SDValue r1 = RHS;
+
+  // ilt r10, r0, 0
+  SDValue r10 = DAG.getSelectCC(DL,
+      r0, DAG.getConstant(0, OVT),
+      DAG.getConstant(-1, MVT::i32),
+      DAG.getConstant(0, MVT::i32),
+      ISD::SETLT);
+
+  // ilt r11, r1, 0
+  SDValue r11 = DAG.getSelectCC(DL,
+      r1, DAG.getConstant(0, OVT),
+      DAG.getConstant(-1, MVT::i32),
+      DAG.getConstant(0, MVT::i32),
+      ISD::SETLT);
+
+  // iadd r0, r0, r10
+  r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
+
+  // iadd r1, r1, r11
+  r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
+
+  // ixor r0, r0, r10
+  r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
+
+  // ixor r1, r1, r11
+  r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
+
+  // udiv r0, r0, r1
+  r0 = DAG.getNode(ISD::UDIV, DL, OVT, r0, r1);
+
+  // ixor r10, r10, r11
+  r10 = DAG.getNode(ISD::XOR, DL, OVT, r10, r11);
+
+  // iadd r0, r0, r10
+  r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
+
+  // ixor DST, r0, r10
+  SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10); 
+  return DST;
+}
+
+SDValue
+AMDILTargetLowering::LowerSDIV64(SDValue Op, SelectionDAG &DAG) const
+{
+  return SDValue(Op.getNode(), 0);
+}
+
+SDValue
+AMDILTargetLowering::LowerSREM8(SDValue Op, SelectionDAG &DAG) const
+{
+  DebugLoc DL = Op.getDebugLoc();
+  EVT OVT = Op.getValueType();
+  MVT INTTY = MVT::i32;
+  if (OVT == MVT::v2i8) {
+    INTTY = MVT::v2i32;
+  } else if (OVT == MVT::v4i8) {
+    INTTY = MVT::v4i32;
+  }
+  SDValue LHS = DAG.getSExtOrTrunc(Op.getOperand(0), DL, INTTY);
+  SDValue RHS = DAG.getSExtOrTrunc(Op.getOperand(1), DL, INTTY);
+  LHS = DAG.getNode(ISD::SREM, DL, INTTY, LHS, RHS);
+  LHS = DAG.getSExtOrTrunc(LHS, DL, OVT);
+  return LHS;
+}
+
+SDValue
+AMDILTargetLowering::LowerSREM16(SDValue Op, SelectionDAG &DAG) const
+{
+  DebugLoc DL = Op.getDebugLoc();
+  EVT OVT = Op.getValueType();
+  MVT INTTY = MVT::i32;
+  if (OVT == MVT::v2i16) {
+    INTTY = MVT::v2i32;
+  } else if (OVT == MVT::v4i16) {
+    INTTY = MVT::v4i32;
+  }
+  SDValue LHS = DAG.getSExtOrTrunc(Op.getOperand(0), DL, INTTY);
+  SDValue RHS = DAG.getSExtOrTrunc(Op.getOperand(1), DL, INTTY);
+  LHS = DAG.getNode(ISD::SREM, DL, INTTY, LHS, RHS);
+  LHS = DAG.getSExtOrTrunc(LHS, DL, OVT);
+  return LHS;
+}
+
+SDValue
+AMDILTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const
+{
+  DebugLoc DL = Op.getDebugLoc();
+  EVT OVT = Op.getValueType();
+  SDValue LHS = Op.getOperand(0);
+  SDValue RHS = Op.getOperand(1);
+  // The LowerSREM32 function generates equivalent to the following IL.
+  // mov r0, LHS
+  // mov r1, RHS
+  // ilt r10, r0, 0
+  // ilt r11, r1, 0
+  // iadd r0, r0, r10
+  // iadd r1, r1, r11
+  // ixor r0, r0, r10
+  // ixor r1, r1, r11
+  // udiv r20, r0, r1
+  // umul r20, r20, r1
+  // sub r0, r0, r20
+  // iadd r0, r0, r10
+  // ixor DST, r0, r10
+
+  // mov r0, LHS
+  SDValue r0 = LHS;
+
+  // mov r1, RHS
+  SDValue r1 = RHS;
+
+  // ilt r10, r0, 0
+  SDValue r10 = DAG.getNode(AMDILISD::CMP, DL, OVT,
+      DAG.getConstant(CondCCodeToCC(ISD::SETLT, MVT::i32), MVT::i32),
+      r0, DAG.getConstant(0, OVT));
+
+  // ilt r11, r1, 0
+  SDValue r11 = DAG.getNode(AMDILISD::CMP, DL, OVT, 
+      DAG.getConstant(CondCCodeToCC(ISD::SETLT, MVT::i32), MVT::i32),
+      r1, DAG.getConstant(0, OVT));
+
+  // iadd r0, r0, r10
+  r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
+
+  // iadd r1, r1, r11
+  r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
+
+  // ixor r0, r0, r10
+  r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
+
+  // ixor r1, r1, r11
+  r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
+
+  // udiv r20, r0, r1
+  SDValue r20 = DAG.getNode(ISD::UREM, DL, OVT, r0, r1);
+
+  // umul r20, r20, r1
+  r20 = DAG.getNode(AMDILISD::UMUL, DL, OVT, r20, r1);
+
+  // sub r0, r0, r20
+  r0 = DAG.getNode(ISD::SUB, DL, OVT, r0, r20);
+
+  // iadd r0, r0, r10
+  r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
+
+  // ixor DST, r0, r10
+  SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10); 
+  return DST;
+}
+
+SDValue
+AMDILTargetLowering::LowerSREM64(SDValue Op, SelectionDAG &DAG) const
+{
+  return SDValue(Op.getNode(), 0);
+}
diff --git a/lib/Target/AMDGPU/AMDILISelLowering.h b/lib/Target/AMDGPU/AMDILISelLowering.h
new file mode 100644
index 0000000..817aaf5
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILISelLowering.h
@@ -0,0 +1,203 @@
+//===-- AMDILISelLowering.h - AMDIL DAG Lowering Interface ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that AMDIL uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDIL_ISELLOWERING_H_
+#define AMDIL_ISELLOWERING_H_
+#include "AMDIL.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm
+{
+  namespace AMDILISD
+  {
+    enum
+    {
+      FIRST_NUMBER = ISD::BUILTIN_OP_END,
+      CMOVLOG,     // 32bit FP Conditional move logical instruction
+      MAD,         // 32bit Fused Multiply Add instruction
+      VBUILD,      // scalar to vector mov instruction
+      CALL,        // Function call based on a single integer
+      SELECT_CC,   // Select the correct conditional instruction
+      UMUL,        // 32bit unsigned multiplication
+      DIV_INF,      // Divide with infinity returned on zero divisor
+      CMP,
+      IL_CC_I_GT,
+      IL_CC_I_LT,
+      IL_CC_I_GE,
+      IL_CC_I_LE,
+      IL_CC_I_EQ,
+      IL_CC_I_NE,
+      RET_FLAG,
+      BRANCH_COND,
+      LAST_ISD_NUMBER
+    };
+  } // AMDILISD
+
+  class MachineBasicBlock;
+  class MachineInstr;
+  class DebugLoc;
+  class TargetInstrInfo;
+
+  class AMDILTargetLowering : public TargetLowering
+  {
+    public:
+      AMDILTargetLowering(TargetMachine &TM);
+
+      virtual SDValue
+        LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+
+      /// computeMaskedBitsForTargetNode - Determine which of
+      /// the bits specified
+      /// in Mask are known to be either zero or one and return them in
+      /// the
+      /// KnownZero/KnownOne bitsets.
+      virtual void
+        computeMaskedBitsForTargetNode(
+            const SDValue Op,
+            APInt &KnownZero,
+            APInt &KnownOne,
+            const SelectionDAG &DAG,
+            unsigned Depth = 0
+            ) const;
+
+      virtual bool 
+        getTgtMemIntrinsic(IntrinsicInfo &Info,
+                                  const CallInst &I, unsigned Intrinsic) const;
+      virtual const char*
+        getTargetNodeName(
+            unsigned Opcode
+            ) const;
+      // We want to mark f32/f64 floating point values as
+      // legal
+      bool
+        isFPImmLegal(const APFloat &Imm, EVT VT) const;
+      // We don't want to shrink f64/f32 constants because
+      // they both take up the same amount of space and
+      // we don't want to use a f2d instruction.
+      bool ShouldShrinkFPConstant(EVT VT) const;
+
+      /// getFunctionAlignment - Return the Log2 alignment of this
+      /// function.
+      virtual unsigned int
+        getFunctionAlignment(const Function *F) const;
+
+    private:
+      CCAssignFn*
+        CCAssignFnForNode(unsigned int CC) const;
+
+      SDValue LowerCallResult(SDValue Chain,
+          SDValue InFlag,
+          CallingConv::ID CallConv,
+          bool isVarArg,
+          const SmallVectorImpl<ISD::InputArg> &Ins,
+          DebugLoc dl,
+          SelectionDAG &DAG,
+          SmallVectorImpl<SDValue> &InVals) const;
+
+      SDValue LowerMemArgument(SDValue Chain,
+          CallingConv::ID CallConv,
+          const SmallVectorImpl<ISD::InputArg> &ArgInfo,
+          DebugLoc dl, SelectionDAG &DAG,
+          const CCValAssign &VA,  MachineFrameInfo *MFI,
+          unsigned i) const;
+
+      SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
+          SDValue Arg,
+          DebugLoc dl, SelectionDAG &DAG,
+          const CCValAssign &VA,
+          ISD::ArgFlagsTy Flags) const;
+
+      virtual SDValue
+        LowerFormalArguments(SDValue Chain,
+            CallingConv::ID CallConv, bool isVarArg,
+            const SmallVectorImpl<ISD::InputArg> &Ins,
+            DebugLoc dl, SelectionDAG &DAG,
+            SmallVectorImpl<SDValue> &InVals) const;
+
+      virtual SDValue
+        LowerCall(CallLoweringInfo &CLI,
+        SmallVectorImpl<SDValue> &InVals) const;
+
+      virtual SDValue
+        LowerReturn(SDValue Chain,
+            CallingConv::ID CallConv, bool isVarArg,
+            const SmallVectorImpl<ISD::OutputArg> &Outs,
+            const SmallVectorImpl<SDValue> &OutVals,
+            DebugLoc dl, SelectionDAG &DAG) const;
+
+      SDValue
+        LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+
+      SDValue
+        LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
+
+      SDValue
+        LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
+
+      SDValue
+        LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
+
+      SDValue
+        LowerSREM(SDValue Op, SelectionDAG &DAG) const;
+      SDValue
+        LowerSREM8(SDValue Op, SelectionDAG &DAG) const;
+      SDValue
+        LowerSREM16(SDValue Op, SelectionDAG &DAG) const;
+      SDValue
+        LowerSREM32(SDValue Op, SelectionDAG &DAG) const;
+      SDValue
+        LowerSREM64(SDValue Op, SelectionDAG &DAG) const;
+
+      SDValue
+        LowerSDIV(SDValue Op, SelectionDAG &DAG) const;
+      SDValue
+        LowerSDIV24(SDValue Op, SelectionDAG &DAG) const;
+      SDValue
+        LowerSDIV32(SDValue Op, SelectionDAG &DAG) const;
+      SDValue
+        LowerSDIV64(SDValue Op, SelectionDAG &DAG) const;
+
+      SDValue
+        LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+
+      SDValue
+        LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
+
+      SDValue
+        LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
+
+      SDValue
+        LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
+
+      EVT
+        genIntType(uint32_t size = 32, uint32_t numEle = 1) const;
+
+      SDValue
+        LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
+
+      SDValue
+        LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
+
+      SDValue
+        LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
+      SDValue
+        LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
+
+  }; // AMDILTargetLowering
+} // end namespace llvm
+
+#endif    // AMDIL_ISELLOWERING_H_
diff --git a/lib/Target/AMDGPU/AMDILInstrInfo.cpp b/lib/Target/AMDGPU/AMDILInstrInfo.cpp
new file mode 100644
index 0000000..c7259d8
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILInstrInfo.cpp
@@ -0,0 +1,509 @@
+//===- AMDILInstrInfo.cpp - AMDIL Instruction Information -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This file contains the AMDIL implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDILInstrInfo.h"
+#include "AMDIL.h"
+#include "AMDILISelLowering.h"
+#include "AMDILUtilityFunctions.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/Instructions.h"
+
+#define GET_INSTRINFO_CTOR
+#include "AMDGPUGenInstrInfo.inc"
+
+using namespace llvm;
+
+AMDILInstrInfo::AMDILInstrInfo(TargetMachine &tm)
+  : AMDILGenInstrInfo(),
+    RI(tm, *this),
+    TM(tm) {
+}
+
+const AMDILRegisterInfo &AMDILInstrInfo::getRegisterInfo() const {
+  return RI;
+}
+
+bool AMDILInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
+                                           unsigned &SrcReg, unsigned &DstReg,
+                                           unsigned &SubIdx) const {
+// TODO: Implement this function
+  return false;
+}
+
+unsigned AMDILInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+                                             int &FrameIndex) const {
+// TODO: Implement this function
+  return 0;
+}
+
+unsigned AMDILInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
+                                                   int &FrameIndex) const {
+// TODO: Implement this function
+  return 0;
+}
+
+bool AMDILInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
+                                          const MachineMemOperand *&MMO,
+                                          int &FrameIndex) const {
+// TODO: Implement this function
+  return false;
+}
+unsigned AMDILInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
+                                              int &FrameIndex) const {
+// TODO: Implement this function
+  return 0;
+}
+unsigned AMDILInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
+                                                    int &FrameIndex) const {
+// TODO: Implement this function
+  return 0;
+}
+bool AMDILInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
+                                           const MachineMemOperand *&MMO,
+                                           int &FrameIndex) const {
+// TODO: Implement this function
+  return false;
+}
+
+MachineInstr *
+AMDILInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
+                                      MachineBasicBlock::iterator &MBBI,
+                                      LiveVariables *LV) const {
+// TODO: Implement this function
+  return NULL;
+}
+bool AMDILInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
+                                        MachineBasicBlock &MBB) const {
+  while (iter != MBB.end()) {
+    switch (iter->getOpcode()) {
+    default:
+      break;
+      ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND);
+    case AMDIL::BRANCH:
+      return true;
+    };
+    ++iter;
+  }
+  return false;
+}
+
+bool AMDILInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+                                   MachineBasicBlock *&TBB,
+                                   MachineBasicBlock *&FBB,
+                                   SmallVectorImpl<MachineOperand> &Cond,
+                                   bool AllowModify) const {
+  bool retVal = true;
+  return retVal;
+  MachineBasicBlock::iterator iter = MBB.begin();
+  if (!getNextBranchInstr(iter, MBB)) {
+    retVal = false;
+  } else {
+    MachineInstr *firstBranch = iter;
+    if (!getNextBranchInstr(++iter, MBB)) {
+      if (firstBranch->getOpcode() == AMDIL::BRANCH) {
+        TBB = firstBranch->getOperand(0).getMBB();
+        firstBranch->eraseFromParent();
+        retVal = false;
+      } else {
+        TBB = firstBranch->getOperand(0).getMBB();
+        FBB = *(++MBB.succ_begin());
+        if (FBB == TBB) {
+          FBB = *(MBB.succ_begin());
+        }
+        Cond.push_back(firstBranch->getOperand(1));
+        retVal = false;
+      }
+    } else {
+      MachineInstr *secondBranch = iter;
+      if (!getNextBranchInstr(++iter, MBB)) {
+        if (secondBranch->getOpcode() == AMDIL::BRANCH) {
+          TBB = firstBranch->getOperand(0).getMBB();
+          Cond.push_back(firstBranch->getOperand(1));
+          FBB = secondBranch->getOperand(0).getMBB();
+          secondBranch->eraseFromParent();
+          retVal = false;
+        } else {
+          assert(0 && "Should not have two consecutive conditional branches");
+        }
+      } else {
+        MBB.getParent()->viewCFG();
+        assert(0 && "Should not have three branch instructions in"
+               " a single basic block");
+        retVal = false;
+      }
+    }
+  }
+  return retVal;
+}
+
+unsigned int AMDILInstrInfo::getBranchInstr(const MachineOperand &op) const {
+  const MachineInstr *MI = op.getParent();
+  
+  switch (MI->getDesc().OpInfo->RegClass) {
+  default: // FIXME: fallthrough??
+  case AMDIL::GPRI32RegClassID: return AMDIL::BRANCH_COND_i32;
+  case AMDIL::GPRF32RegClassID: return AMDIL::BRANCH_COND_f32;
+  };
+}
+
+unsigned int
+AMDILInstrInfo::InsertBranch(MachineBasicBlock &MBB,
+                             MachineBasicBlock *TBB,
+                             MachineBasicBlock *FBB,
+                             const SmallVectorImpl<MachineOperand> &Cond,
+                             DebugLoc DL) const
+{
+  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
+  for (unsigned int x = 0; x < Cond.size(); ++x) {
+    Cond[x].getParent()->dump();
+  }
+  if (FBB == 0) {
+    if (Cond.empty()) {
+      BuildMI(&MBB, DL, get(AMDIL::BRANCH)).addMBB(TBB);
+    } else {
+      BuildMI(&MBB, DL, get(getBranchInstr(Cond[0])))
+        .addMBB(TBB).addReg(Cond[0].getReg());
+    }
+    return 1;
+  } else {
+    BuildMI(&MBB, DL, get(getBranchInstr(Cond[0])))
+      .addMBB(TBB).addReg(Cond[0].getReg());
+    BuildMI(&MBB, DL, get(AMDIL::BRANCH)).addMBB(FBB);
+  }
+  assert(0 && "Inserting two branches not supported");
+  return 0;
+}
+
+unsigned int AMDILInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
+  MachineBasicBlock::iterator I = MBB.end();
+  if (I == MBB.begin()) {
+    return 0;
+  }
+  --I;
+  switch (I->getOpcode()) {
+  default:
+    return 0;
+    ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND);
+  case AMDIL::BRANCH:
+    I->eraseFromParent();
+    break;
+  }
+  I = MBB.end();
+  
+  if (I == MBB.begin()) {
+    return 1;
+  }
+  --I;
+  switch (I->getOpcode()) {
+    // FIXME: only one case??
+  default:
+    return 1;
+    ExpandCaseToAllScalarTypes(AMDIL::BRANCH_COND);
+    I->eraseFromParent();
+    break;
+  }
+  return 2;
+}
+
+MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) {
+  MachineBasicBlock::iterator tmp = MBB->end();
+  if (!MBB->size()) {
+    return MBB->end();
+  }
+  while (--tmp) {
+    if (tmp->getOpcode() == AMDIL::ENDLOOP
+        || tmp->getOpcode() == AMDIL::ENDIF
+        || tmp->getOpcode() == AMDIL::ELSE) {
+      if (tmp == MBB->begin()) {
+        return tmp;
+      } else {
+        continue;
+      }
+    }  else {
+      return ++tmp;
+    }
+  }
+  return MBB->end();
+}
+
+void
+AMDILInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
+                                    MachineBasicBlock::iterator MI,
+                                    unsigned SrcReg, bool isKill,
+                                    int FrameIndex,
+                                    const TargetRegisterClass *RC,
+                                    const TargetRegisterInfo *TRI) const {
+  unsigned int Opc = 0;
+  // MachineInstr *curMI = MI;
+  MachineFunction &MF = *(MBB.getParent());
+  MachineFrameInfo &MFI = *MF.getFrameInfo();
+  
+  DebugLoc DL;
+  switch (RC->getID()) {
+  case AMDIL::GPRF32RegClassID:
+    Opc = AMDIL::PRIVATESTORE_f32;
+    break;
+  case AMDIL::GPRI32RegClassID:
+    Opc = AMDIL::PRIVATESTORE_i32;
+    break;
+  }
+  if (MI != MBB.end()) DL = MI->getDebugLoc();
+  MachineMemOperand *MMO =
+   new MachineMemOperand(
+        MachinePointerInfo::getFixedStack(FrameIndex),
+                          MachineMemOperand::MOLoad,
+                          MFI.getObjectSize(FrameIndex),
+                          MFI.getObjectAlignment(FrameIndex));
+  if (MI != MBB.end()) {
+    DL = MI->getDebugLoc();
+  }
+  BuildMI(MBB, MI, DL, get(Opc))
+    .addReg(SrcReg, getKillRegState(isKill))
+    .addFrameIndex(FrameIndex)
+    .addMemOperand(MMO)
+    .addImm(0);
+}
+
+void
+AMDILInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+                                     MachineBasicBlock::iterator MI,
+                                     unsigned DestReg, int FrameIndex,
+                                     const TargetRegisterClass *RC,
+                                     const TargetRegisterInfo *TRI) const {
+  unsigned int Opc = 0;
+  MachineFunction &MF = *(MBB.getParent());
+  MachineFrameInfo &MFI = *MF.getFrameInfo();
+  DebugLoc DL;
+  switch (RC->getID()) {
+  case AMDIL::GPRF32RegClassID:
+    Opc = AMDIL::PRIVATELOAD_f32;
+    break;
+  case AMDIL::GPRI32RegClassID:
+    Opc = AMDIL::PRIVATELOAD_i32;
+    break;
+  }
+
+  MachineMemOperand *MMO =
+    new MachineMemOperand(
+        MachinePointerInfo::getFixedStack(FrameIndex),
+                          MachineMemOperand::MOLoad,
+                          MFI.getObjectSize(FrameIndex),
+                          MFI.getObjectAlignment(FrameIndex));
+  if (MI != MBB.end()) {
+    DL = MI->getDebugLoc();
+  }
+  BuildMI(MBB, MI, DL, get(Opc))
+    .addReg(DestReg, RegState::Define)
+    .addFrameIndex(FrameIndex)
+    .addMemOperand(MMO)
+    .addImm(0);
+}
+MachineInstr *
+AMDILInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
+                                      MachineInstr *MI,
+                                      const SmallVectorImpl<unsigned> &Ops,
+                                      int FrameIndex) const {
+// TODO: Implement this function
+  return 0;
+}
+MachineInstr*
+AMDILInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
+                                      MachineInstr *MI,
+                                      const SmallVectorImpl<unsigned> &Ops,
+                                      MachineInstr *LoadMI) const {
+  // TODO: Implement this function
+  return 0;
+}
+bool
+AMDILInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
+                                     const SmallVectorImpl<unsigned> &Ops) const
+{
+  // TODO: Implement this function
+  return false;
+}
+bool
+AMDILInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
+                                 unsigned Reg, bool UnfoldLoad,
+                                 bool UnfoldStore,
+                                 SmallVectorImpl<MachineInstr*> &NewMIs) const {
+  // TODO: Implement this function
+  return false;
+}
+
+bool
+AMDILInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
+                                    SmallVectorImpl<SDNode*> &NewNodes) const {
+  // TODO: Implement this function
+  return false;
+}
+
+unsigned
+AMDILInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
+                                           bool UnfoldLoad, bool UnfoldStore,
+                                           unsigned *LoadRegIndex) const {
+  // TODO: Implement this function
+  return 0;
+}
+
+bool AMDILInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
+                                             int64_t Offset1, int64_t Offset2,
+                                             unsigned NumLoads) const {
+  assert(Offset2 > Offset1
+         && "Second offset should be larger than first offset!");
+  // If we have less than 16 loads in a row, and the offsets are within 16,
+  // then schedule together.
+  // TODO: Make the loads schedule near if it fits in a cacheline
+  return (NumLoads < 16 && (Offset2 - Offset1) < 16);
+}
+
+bool
+AMDILInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
+  const {
+  // TODO: Implement this function
+  return true;
+}
+void AMDILInstrInfo::insertNoop(MachineBasicBlock &MBB,
+                                MachineBasicBlock::iterator MI) const {
+  // TODO: Implement this function
+}
+
+bool AMDILInstrInfo::isPredicated(const MachineInstr *MI) const {
+  // TODO: Implement this function
+  return false;
+}
+bool
+AMDILInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
+                                  const SmallVectorImpl<MachineOperand> &Pred2)
+  const {
+  // TODO: Implement this function
+  return false;
+}
+
+bool AMDILInstrInfo::DefinesPredicate(MachineInstr *MI,
+                                      std::vector<MachineOperand> &Pred) const {
+  // TODO: Implement this function
+  return false;
+}
+
+bool AMDILInstrInfo::isPredicable(MachineInstr *MI) const {
+  // TODO: Implement this function
+  return MI->getDesc().isPredicable();
+}
+
+bool
+AMDILInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
+  // TODO: Implement this function
+  return true;
+}
+
+bool AMDILInstrInfo::isLoadInst(MachineInstr *MI) const {
+  if (strstr(getName(MI->getOpcode()), "LOADCONST")) {
+    return false;
+  }
+  return strstr(getName(MI->getOpcode()), "LOAD");
+}
+
+bool AMDILInstrInfo::isSWSExtLoadInst(MachineInstr *MI) const
+{
+  return false;
+}
+
+bool AMDILInstrInfo::isExtLoadInst(MachineInstr *MI) const {
+  return strstr(getName(MI->getOpcode()), "EXTLOAD");
+}
+
+bool AMDILInstrInfo::isSExtLoadInst(MachineInstr *MI) const {
+  return strstr(getName(MI->getOpcode()), "SEXTLOAD");
+}
+
+bool AMDILInstrInfo::isAExtLoadInst(MachineInstr *MI) const {
+  return strstr(getName(MI->getOpcode()), "AEXTLOAD");
+}
+
+bool AMDILInstrInfo::isZExtLoadInst(MachineInstr *MI) const {
+  return strstr(getName(MI->getOpcode()), "ZEXTLOAD");
+}
+
+bool AMDILInstrInfo::isStoreInst(MachineInstr *MI) const {
+  return strstr(getName(MI->getOpcode()), "STORE");
+}
+
+bool AMDILInstrInfo::isTruncStoreInst(MachineInstr *MI) const {
+  return strstr(getName(MI->getOpcode()), "TRUNCSTORE");
+}
+
+bool AMDILInstrInfo::isAtomicInst(MachineInstr *MI) const {
+  return strstr(getName(MI->getOpcode()), "ATOM");
+}
+
+bool AMDILInstrInfo::isVolatileInst(MachineInstr *MI) const {
+  if (!MI->memoperands_empty()) {
+    for (MachineInstr::mmo_iterator mob = MI->memoperands_begin(),
+        moe = MI->memoperands_end(); mob != moe; ++mob) {
+      // If there is a volatile mem operand, this is a volatile instruction.
+      if ((*mob)->isVolatile()) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+bool AMDILInstrInfo::isGlobalInst(llvm::MachineInstr *MI) const
+{
+  return strstr(getName(MI->getOpcode()), "GLOBAL");
+}
+bool AMDILInstrInfo::isPrivateInst(llvm::MachineInstr *MI) const
+{
+  return strstr(getName(MI->getOpcode()), "PRIVATE");
+}
+bool AMDILInstrInfo::isConstantInst(llvm::MachineInstr *MI) const
+{
+  return strstr(getName(MI->getOpcode()), "CONSTANT")
+    || strstr(getName(MI->getOpcode()), "CPOOL");
+}
+bool AMDILInstrInfo::isRegionInst(llvm::MachineInstr *MI) const
+{
+  return strstr(getName(MI->getOpcode()), "REGION");
+}
+bool AMDILInstrInfo::isLocalInst(llvm::MachineInstr *MI) const
+{
+  return strstr(getName(MI->getOpcode()), "LOCAL");
+}
+bool AMDILInstrInfo::isImageInst(llvm::MachineInstr *MI) const
+{
+  return strstr(getName(MI->getOpcode()), "IMAGE");
+}
+bool AMDILInstrInfo::isAppendInst(llvm::MachineInstr *MI) const
+{
+  return strstr(getName(MI->getOpcode()), "APPEND");
+}
+bool AMDILInstrInfo::isRegionAtomic(llvm::MachineInstr *MI) const
+{
+  return strstr(getName(MI->getOpcode()), "ATOM_R");
+}
+bool AMDILInstrInfo::isLocalAtomic(llvm::MachineInstr *MI) const
+{
+  return strstr(getName(MI->getOpcode()), "ATOM_L");
+}
+bool AMDILInstrInfo::isGlobalAtomic(llvm::MachineInstr *MI) const
+{
+  return strstr(getName(MI->getOpcode()), "ATOM_G")
+    || isArenaAtomic(MI);
+}
+bool AMDILInstrInfo::isArenaAtomic(llvm::MachineInstr *MI) const
+{
+  return strstr(getName(MI->getOpcode()), "ATOM_A");
+}
diff --git a/lib/Target/AMDGPU/AMDILInstrInfo.h b/lib/Target/AMDGPU/AMDILInstrInfo.h
new file mode 100644
index 0000000..bff729b
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILInstrInfo.h
@@ -0,0 +1,161 @@
+//===- AMDILInstrInfo.h - AMDIL Instruction Information ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This file contains the AMDIL implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDILINSTRUCTIONINFO_H_
+#define AMDILINSTRUCTIONINFO_H_
+
+#include "AMDILRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+#define GET_INSTRINFO_HEADER
+#include "AMDGPUGenInstrInfo.inc"
+
+namespace llvm {
+  // AMDIL - This namespace holds all of the target specific flags that
+  // instruction info tracks.
+  //
+  //class AMDILTargetMachine;
+class AMDILInstrInfo : public AMDILGenInstrInfo {
+private:
+  const AMDILRegisterInfo RI;
+  TargetMachine &TM;
+  bool getNextBranchInstr(MachineBasicBlock::iterator &iter,
+                          MachineBasicBlock &MBB) const;
+  unsigned int getBranchInstr(const MachineOperand &op) const;
+public:
+  explicit AMDILInstrInfo(TargetMachine &tm);
+
+  // getRegisterInfo - TargetInstrInfo is a superset of MRegister info.  As
+  // such, whenever a client has an instance of instruction info, it should
+  // always be able to get register info as well (through this method).
+  const AMDILRegisterInfo &getRegisterInfo() const;
+
+  bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
+                             unsigned &DstReg, unsigned &SubIdx) const;
+
+  unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
+  unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
+                                     int &FrameIndex) const;
+  bool hasLoadFromStackSlot(const MachineInstr *MI,
+                            const MachineMemOperand *&MMO,
+                            int &FrameIndex) const;
+  unsigned isStoreFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
+  unsigned isStoreFromStackSlotPostFE(const MachineInstr *MI,
+                                      int &FrameIndex) const;
+  bool hasStoreFromStackSlot(const MachineInstr *MI,
+                             const MachineMemOperand *&MMO,
+                             int &FrameIndex) const;
+
+  MachineInstr *
+  convertToThreeAddress(MachineFunction::iterator &MFI,
+                        MachineBasicBlock::iterator &MBBI,
+                        LiveVariables *LV) const;
+
+  bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+                     MachineBasicBlock *&FBB,
+                     SmallVectorImpl<MachineOperand> &Cond,
+                     bool AllowModify) const;
+
+  unsigned RemoveBranch(MachineBasicBlock &MBB) const;
+
+  unsigned
+  InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+               MachineBasicBlock *FBB,
+               const SmallVectorImpl<MachineOperand> &Cond,
+               DebugLoc DL) const;
+
+  virtual void copyPhysReg(MachineBasicBlock &MBB,
+                           MachineBasicBlock::iterator MI, DebugLoc DL,
+                           unsigned DestReg, unsigned SrcReg,
+                           bool KillSrc) const = 0;
+
+  void storeRegToStackSlot(MachineBasicBlock &MBB,
+                           MachineBasicBlock::iterator MI,
+                           unsigned SrcReg, bool isKill, int FrameIndex,
+                           const TargetRegisterClass *RC,
+                           const TargetRegisterInfo *TRI) const;
+  void loadRegFromStackSlot(MachineBasicBlock &MBB,
+                            MachineBasicBlock::iterator MI,
+                            unsigned DestReg, int FrameIndex,
+                            const TargetRegisterClass *RC,
+                            const TargetRegisterInfo *TRI) const;
+
+protected:
+  MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
+                                      MachineInstr *MI,
+                                      const SmallVectorImpl<unsigned> &Ops,
+                                      int FrameIndex) const;
+  MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
+                                      MachineInstr *MI,
+                                      const SmallVectorImpl<unsigned> &Ops,
+                                      MachineInstr *LoadMI) const;
+public:
+  bool canFoldMemoryOperand(const MachineInstr *MI,
+                            const SmallVectorImpl<unsigned> &Ops) const;
+  bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
+                           unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
+                           SmallVectorImpl<MachineInstr *> &NewMIs) const;
+  bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
+                           SmallVectorImpl<SDNode *> &NewNodes) const;
+  unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
+                                      bool UnfoldLoad, bool UnfoldStore,
+                                      unsigned *LoadRegIndex = 0) const;
+  bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
+                               int64_t Offset1, int64_t Offset2,
+                               unsigned NumLoads) const;
+
+  bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
+  void insertNoop(MachineBasicBlock &MBB,
+                  MachineBasicBlock::iterator MI) const;
+  bool isPredicated(const MachineInstr *MI) const;
+  bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
+                         const SmallVectorImpl<MachineOperand> &Pred2) const;
+  bool DefinesPredicate(MachineInstr *MI,
+                        std::vector<MachineOperand> &Pred) const;
+  bool isPredicable(MachineInstr *MI) const;
+  bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const;
+
+  // Helper functions that check the opcode for status information
+  bool isLoadInst(llvm::MachineInstr *MI) const;
+  bool isExtLoadInst(llvm::MachineInstr *MI) const;
+  bool isSWSExtLoadInst(llvm::MachineInstr *MI) const;
+  bool isSExtLoadInst(llvm::MachineInstr *MI) const;
+  bool isZExtLoadInst(llvm::MachineInstr *MI) const;
+  bool isAExtLoadInst(llvm::MachineInstr *MI) const;
+  bool isStoreInst(llvm::MachineInstr *MI) const;
+  bool isTruncStoreInst(llvm::MachineInstr *MI) const;
+  bool isAtomicInst(llvm::MachineInstr *MI) const;
+  bool isVolatileInst(llvm::MachineInstr *MI) const;
+  bool isGlobalInst(llvm::MachineInstr *MI) const;
+  bool isPrivateInst(llvm::MachineInstr *MI) const;
+  bool isConstantInst(llvm::MachineInstr *MI) const;
+  bool isRegionInst(llvm::MachineInstr *MI) const;
+  bool isLocalInst(llvm::MachineInstr *MI) const;
+  bool isImageInst(llvm::MachineInstr *MI) const;
+  bool isAppendInst(llvm::MachineInstr *MI) const;
+  bool isRegionAtomic(llvm::MachineInstr *MI) const;
+  bool isLocalAtomic(llvm::MachineInstr *MI) const;
+  bool isGlobalAtomic(llvm::MachineInstr *MI) const;
+  bool isArenaAtomic(llvm::MachineInstr *MI) const;
+
+  virtual MachineInstr * getMovImmInstr(MachineFunction *MF, unsigned DstReg,
+                                        int64_t Imm) const = 0;
+
+  virtual unsigned getIEQOpcode() const = 0;
+
+  virtual bool isMov(unsigned Opcode) const = 0;
+};
+
+}
+
+#endif // AMDILINSTRINFO_H_
diff --git a/lib/Target/AMDGPU/AMDILInstrInfo.td b/lib/Target/AMDGPU/AMDILInstrInfo.td
new file mode 100644
index 0000000..969618e
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILInstrInfo.td
@@ -0,0 +1,108 @@
+//===------------ AMDILInstrInfo.td - AMDIL Target ------*-tablegen-*------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This file describes the AMDIL instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+// AMDIL Instruction Predicate Definitions
+// Predicate that is set to true if the hardware supports double precision
+// divide
+def HasHWDDiv                 : Predicate<"Subtarget.device()"
+                           "->getGeneration() > AMDILDeviceInfo::HD4XXX && "
+              "Subtarget.device()->usesHardware(AMDILDeviceInfo::DoubleOps)">;
+
+// Predicate that is set to true if the hardware supports double, but not double
+// precision divide in hardware
+def HasSWDDiv             : Predicate<"Subtarget.device()"
+                           "->getGeneration() == AMDILDeviceInfo::HD4XXX &&"
+              "Subtarget.device()->usesHardware(AMDILDeviceInfo::DoubleOps)">;
+
+// Predicate that is set to true if the hardware support 24bit signed
+// math ops. Otherwise a software expansion to 32bit math ops is used instead.
+def HasHWSign24Bit          : Predicate<"Subtarget.device()"
+                            "->getGeneration() > AMDILDeviceInfo::HD5XXX">;
+
+// Predicate that is set to true if 64bit operations are supported or not
+def HasHW64Bit              : Predicate<"Subtarget.device()"
+                            "->usesHardware(AMDILDeviceInfo::LongOps)">;
+def HasSW64Bit              : Predicate<"Subtarget.device()"
+                            "->usesSoftware(AMDILDeviceInfo::LongOps)">;
+
+// Predicate that is set to true if the timer register is supported
+def HasTmrRegister          : Predicate<"Subtarget.device()"
+                            "->isSupported(AMDILDeviceInfo::TmrReg)">;
+// Predicate that is true if we are at least evergreen series
+def HasDeviceIDInst         : Predicate<"Subtarget.device()"
+                            "->getGeneration() >= AMDILDeviceInfo::HD5XXX">;
+
+// Predicate that is true if we have region address space.
+def hasRegionAS             : Predicate<"Subtarget.device()"
+                            "->usesHardware(AMDILDeviceInfo::RegionMem)">;
+
+// Predicate that is false if we don't have region address space.
+def noRegionAS             : Predicate<"!Subtarget.device()"
+                            "->isSupported(AMDILDeviceInfo::RegionMem)">;
+
+
+// Predicate that is set to true if 64bit Mul is supported in the IL or not
+def HasHW64Mul              : Predicate<"Subtarget.calVersion()" 
+                                          ">= CAL_VERSION_SC_139"
+                                          "&& Subtarget.device()"
+                                          "->getGeneration() >="
+                                          "AMDILDeviceInfo::HD5XXX">;
+def HasSW64Mul              : Predicate<"Subtarget.calVersion()" 
+                                          "< CAL_VERSION_SC_139">;
+// Predicate that is set to true if 64bit Div/Mod is supported in the IL or not
+def HasHW64DivMod           : Predicate<"Subtarget.device()"
+                            "->usesHardware(AMDILDeviceInfo::HW64BitDivMod)">;
+def HasSW64DivMod           : Predicate<"Subtarget.device()"
+                            "->usesSoftware(AMDILDeviceInfo::HW64BitDivMod)">;
+
+// Predicate that is set to true if 64bit pointer are used.
+def Has64BitPtr             : Predicate<"Subtarget.is64bit()">;
+def Has32BitPtr             : Predicate<"!Subtarget.is64bit()">;
+//===--------------------------------------------------------------------===//
+// Custom Operands
+//===--------------------------------------------------------------------===//
+include "AMDILOperands.td"
+
+//===--------------------------------------------------------------------===//
+// Custom Selection DAG Type Profiles
+//===--------------------------------------------------------------------===//
+include "AMDILProfiles.td"
+
+//===--------------------------------------------------------------------===//
+// Custom Selection DAG Nodes
+//===--------------------------------------------------------------------===//
+include "AMDILNodes.td"
+
+//===--------------------------------------------------------------------===//
+// Custom Pattern DAG Nodes
+//===--------------------------------------------------------------------===//
+include "AMDILPatterns.td"
+
+//===----------------------------------------------------------------------===//
+// Instruction format classes
+//===----------------------------------------------------------------------===//
+include "AMDILFormats.td"
+
+//===--------------------------------------------------------------------===//
+// Multiclass Instruction formats
+//===--------------------------------------------------------------------===//
+include "AMDILMultiClass.td"
+
+//===--------------------------------------------------------------------===//
+// Intrinsics support
+//===--------------------------------------------------------------------===//
+include "AMDILIntrinsics.td"
+
+//===--------------------------------------------------------------------===//
+// Instructions support
+//===--------------------------------------------------------------------===//
+include "AMDILInstructions.td"
diff --git a/lib/Target/AMDGPU/AMDILInstructions.td b/lib/Target/AMDGPU/AMDILInstructions.td
new file mode 100644
index 0000000..ff0e2c1
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILInstructions.td
@@ -0,0 +1,143 @@
+//===-- AMDILInstructions.td - AMDIL Instruction definitions --------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+
+let Predicates = [Has32BitPtr] in {
+  let isCodeGenOnly=1 in {
+    //===----------------------------------------------------------------------===//
+    // Store Memory Operations
+    //===----------------------------------------------------------------------===//
+    defm GLOBALTRUNCSTORE   : GTRUNCSTORE<"!global trunc store">;
+    defm LOCALTRUNCSTORE    : LTRUNCSTORE<"!local trunc store">;
+    defm LOCALSTORE         : STORE<"!local store"          , local_store>;
+    defm PRIVATETRUNCSTORE  : PTRUNCSTORE<"!private trunc store">;
+    defm PRIVATESTORE       : STORE<"!private store"        , private_store>;
+    defm REGIONTRUNCSTORE   : RTRUNCSTORE<"!region trunc store">;
+    defm REGIONSTORE        : STORE<"!region hw store"      , region_store>;
+
+
+    //===---------------------------------------------------------------------===//
+    // Load Memory Operations
+    //===---------------------------------------------------------------------===//
+    defm GLOBALZEXTLOAD     : LOAD<"!global zext load"       , global_zext_load>;
+    defm GLOBALSEXTLOAD     : LOAD<"!global sext load"       , global_sext_load>;
+    defm GLOBALAEXTLOAD     : LOAD<"!global aext load"       , global_aext_load>;
+    defm PRIVATELOAD        : LOAD<"!private load"           , private_load>;
+    defm PRIVATEZEXTLOAD    : LOAD<"!private zext load"      , private_zext_load>;
+    defm PRIVATESEXTLOAD    : LOAD<"!private sext load"      , private_sext_load>;
+    defm PRIVATEAEXTLOAD    : LOAD<"!private aext load"      , private_aext_load>;
+    defm CPOOLLOAD          : LOAD<"!constant pool load"     , cp_load>;
+    defm CPOOLZEXTLOAD      : LOAD<"!constant pool zext load", cp_zext_load>;
+    defm CPOOLSEXTLOAD      : LOAD<"!constant pool sext load", cp_sext_load>;
+    defm CPOOLAEXTLOAD      : LOAD<"!constant aext pool load", cp_aext_load>;
+    defm CONSTANTLOAD       : LOAD<"!constant load"          , constant_load>;
+    defm CONSTANTZEXTLOAD   : LOAD<"!constant zext load"     , constant_zext_load>;
+    defm CONSTANTSEXTLOAD   : LOAD<"!constant sext load"     , constant_sext_load>;
+    defm CONSTANTAEXTLOAD   : LOAD<"!constant aext load"     , constant_aext_load>;
+    defm LOCALLOAD          : LOAD<"!local load"             , local_load>;
+    defm LOCALZEXTLOAD      : LOAD<"!local zext load"        , local_zext_load>;
+    defm LOCALSEXTLOAD      : LOAD<"!local sext load"        , local_sext_load>;
+    defm LOCALAEXTLOAD      : LOAD<"!local aext load"        , local_aext_load>;
+    defm REGIONLOAD         : LOAD<"!region load"            , region_load>;
+    defm REGIONZEXTLOAD     : LOAD<"!region zext load"       , region_zext_load>;
+    defm REGIONSEXTLOAD     : LOAD<"!region sext load"       , region_sext_load>;
+    defm REGIONAEXTLOAD     : LOAD<"!region aext load"       , region_aext_load>;
+  }
+}
+
+//===---------------------------------------------------------------------===//
+// Custom Inserter for Branches and returns, this eventually will be a
+// seperate pass
+//===---------------------------------------------------------------------===//
+let isTerminator = 1 in {
+  def BRANCH : ILFormat<IL_PSEUDO_INST, (outs), (ins brtarget:$target),
+      "; Pseudo unconditional branch instruction",
+      [(br bb:$target)]>;
+  defm BRANCH_COND : BranchConditional<IL_brcond>;
+}
+//===---------------------------------------------------------------------===//
+// return instructions
+//===---------------------------------------------------------------------===//
+let isTerminator = 1, isReturn = 1, isBarrier = 1, hasCtrlDep = 1 in {
+  def RETURN          : ILFormat<IL_OP_RET,(outs), (ins variable_ops),
+      IL_OP_RET.Text, [(IL_retflag)]>;
+}
+
+//===---------------------------------------------------------------------===//
+// Handle a function call
+//===---------------------------------------------------------------------===//
+let isCall = 1,
+    Defs = [
+    R1, R2, R3, R4, R5, R6, R7, R8, R9, R10
+    ]
+    ,
+    Uses = [
+    R11, R12, R13, R14, R15, R16, R17, R18, R19, R20
+    ]
+    in {
+      def CALL : UnaryOpNoRet<IL_OP_CALL, (outs),
+      (ins calltarget:$dst, variable_ops), 
+      !strconcat(IL_OP_CALL.Text, " $dst"), []>;
+    }
+
+
+//===---------------------------------------------------------------------===//
+// Flow and Program control Instructions
+//===---------------------------------------------------------------------===//
+let isTerminator=1 in {
+  def SWITCH      : ILFormat<IL_OP_SWITCH, (outs), (ins GPRI32:$src),
+  !strconcat(IL_OP_SWITCH.Text, " $src"), []>;
+  def CASE        : ILFormat<IL_OP_CASE, (outs), (ins GPRI32:$src),
+      !strconcat(IL_OP_CASE.Text, " $src"), []>;
+  def BREAK       : ILFormat<IL_OP_BREAK, (outs), (ins),
+      IL_OP_BREAK.Text, []>;
+  def CONTINUE    : ILFormat<IL_OP_CONTINUE, (outs), (ins),
+      IL_OP_CONTINUE.Text, []>;
+  def DEFAULT     : ILFormat<IL_OP_DEFAULT, (outs), (ins),
+      IL_OP_DEFAULT.Text, []>;
+  def ELSE        : ILFormat<IL_OP_ELSE, (outs), (ins),
+      IL_OP_ELSE.Text, []>;
+  def ENDSWITCH   : ILFormat<IL_OP_ENDSWITCH, (outs), (ins),
+      IL_OP_ENDSWITCH.Text, []>;
+  def ENDMAIN     : ILFormat<IL_OP_ENDMAIN, (outs), (ins),
+      IL_OP_ENDMAIN.Text, []>;
+  def END         : ILFormat<IL_OP_END, (outs), (ins),
+      IL_OP_END.Text, []>;
+  def ENDFUNC     : ILFormat<IL_OP_ENDFUNC, (outs), (ins),
+      IL_OP_ENDFUNC.Text, []>;
+  def ENDIF       : ILFormat<IL_OP_ENDIF, (outs), (ins),
+      IL_OP_ENDIF.Text, []>;
+  def WHILELOOP   : ILFormat<IL_OP_WHILE, (outs), (ins),
+      IL_OP_WHILE.Text, []>;
+  def ENDLOOP     : ILFormat<IL_OP_ENDLOOP, (outs), (ins),
+      IL_OP_ENDLOOP.Text, []>;
+  def FUNC        : ILFormat<IL_OP_FUNC, (outs), (ins),
+      IL_OP_FUNC.Text, []>;
+  def RETDYN      : ILFormat<IL_OP_RET_DYN, (outs), (ins),
+      IL_OP_RET_DYN.Text, []>;
+  // This opcode has custom swizzle pattern encoded in Swizzle Encoder
+  defm IF_LOGICALNZ  : BranchInstr<IL_OP_IF_LOGICALNZ>;
+  // This opcode has custom swizzle pattern encoded in Swizzle Encoder
+  defm IF_LOGICALZ   : BranchInstr<IL_OP_IF_LOGICALZ>;
+  // This opcode has custom swizzle pattern encoded in Swizzle Encoder
+  defm BREAK_LOGICALNZ : BranchInstr<IL_OP_BREAK_LOGICALNZ>;
+  // This opcode has custom swizzle pattern encoded in Swizzle Encoder
+  defm BREAK_LOGICALZ : BranchInstr<IL_OP_BREAK_LOGICALZ>;
+  // This opcode has custom swizzle pattern encoded in Swizzle Encoder
+  defm CONTINUE_LOGICALNZ : BranchInstr<IL_OP_CONTINUE_LOGICALNZ>;
+  // This opcode has custom swizzle pattern encoded in Swizzle Encoder
+  defm CONTINUE_LOGICALZ : BranchInstr<IL_OP_CONTINUE_LOGICALZ>;
+  defm IFC         : BranchInstr2<IL_OP_IFC>;
+  defm BREAKC      : BranchInstr2<IL_OP_BREAKC>;
+  defm CONTINUEC   : BranchInstr2<IL_OP_CONTINUEC>;
+}
+let isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in {
+  def TRAP : ILFormat<IL_OP_NOP, (outs), (ins),
+      IL_OP_NOP.Text, [(trap)]>;
+}
+
diff --git a/lib/Target/AMDGPU/AMDILIntrinsicInfo.cpp b/lib/Target/AMDGPU/AMDILIntrinsicInfo.cpp
new file mode 100644
index 0000000..678e32e
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILIntrinsicInfo.cpp
@@ -0,0 +1,171 @@
+//===- AMDILIntrinsicInfo.cpp - AMDIL Intrinsic Information ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This file contains the AMDIL Implementation of the IntrinsicInfo class.
+//
+//===-----------------------------------------------------------------------===//
+
+#include "AMDILIntrinsicInfo.h"
+#include "AMDIL.h"
+#include "AMDILSubtarget.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+
+using namespace llvm;
+
+#define GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
+#include "AMDGPUGenIntrinsics.inc"
+#undef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
+
+AMDILIntrinsicInfo::AMDILIntrinsicInfo(TargetMachine *tm) 
+  : TargetIntrinsicInfo(), mTM(tm)
+{
+}
+
+std::string 
+AMDILIntrinsicInfo::getName(unsigned int IntrID, Type **Tys,
+    unsigned int numTys) const 
+{
+  static const char* const names[] = {
+#define GET_INTRINSIC_NAME_TABLE
+#include "AMDGPUGenIntrinsics.inc"
+#undef GET_INTRINSIC_NAME_TABLE
+  };
+
+  //assert(!isOverloaded(IntrID)
+  //&& "AMDIL Intrinsics are not overloaded");
+  if (IntrID < Intrinsic::num_intrinsics) {
+    return 0;
+  }
+  assert(IntrID < AMDGPUIntrinsic::num_AMDIL_intrinsics
+      && "Invalid intrinsic ID");
+
+  std::string Result(names[IntrID - Intrinsic::num_intrinsics]);
+  return Result;
+}
+
+  static bool
+checkTruncation(const char *Name, unsigned int& Len)
+{
+  const char *ptr = Name + (Len - 1);
+  while(ptr != Name && *ptr != '_') {
+    --ptr;
+  }
+  // We don't want to truncate on atomic instructions
+  // but we do want to enter the check Truncation
+  // section so that we can translate the atomic
+  // instructions if we need to.
+  if (!strncmp(Name, "__atom", 6)) {
+    return true;
+  }
+  if (strstr(ptr, "i32")
+      || strstr(ptr, "u32")
+      || strstr(ptr, "i64")
+      || strstr(ptr, "u64")
+      || strstr(ptr, "f32")
+      || strstr(ptr, "f64")
+      || strstr(ptr, "i16")
+      || strstr(ptr, "u16")
+      || strstr(ptr, "i8")
+      || strstr(ptr, "u8")) {
+    Len = (unsigned int)(ptr - Name);
+    return true;
+  }
+  return false;
+}
+
+// We don't want to support both the OpenCL 1.0 atomics
+// and the 1.1 atomics with different names, so we translate
+// the 1.0 atomics to the 1.1 naming here if needed.
+static char*
+atomTranslateIfNeeded(const char *Name, unsigned int Len) 
+{
+  char *buffer = NULL;
+  if (strncmp(Name, "__atom_", 7))  {
+    // If we are not starting with __atom_, then
+    // go ahead and continue on with the allocation.
+    buffer = new char[Len + 1];
+    memcpy(buffer, Name, Len);
+  } else {
+    buffer = new char[Len + 3];
+    memcpy(buffer, "__atomic_", 9);
+    memcpy(buffer + 9, Name + 7, Len - 7);
+    Len += 2;
+  }
+  buffer[Len] = '\0';
+  return buffer;
+}
+
+unsigned int
+AMDILIntrinsicInfo::lookupName(const char *Name, unsigned int Len) const 
+{
+#define GET_FUNCTION_RECOGNIZER
+#include "AMDGPUGenIntrinsics.inc"
+#undef GET_FUNCTION_RECOGNIZER
+  AMDGPUIntrinsic::ID IntrinsicID
+    = (AMDGPUIntrinsic::ID)Intrinsic::not_intrinsic;
+  if (checkTruncation(Name, Len)) {
+    char *buffer = atomTranslateIfNeeded(Name, Len);
+    IntrinsicID = getIntrinsicForGCCBuiltin("AMDIL", buffer);
+    delete [] buffer;
+  } else {
+    IntrinsicID = getIntrinsicForGCCBuiltin("AMDIL", Name);
+  }
+  if (!isValidIntrinsic(IntrinsicID)) {
+    return 0;
+  }
+  if (IntrinsicID != (AMDGPUIntrinsic::ID)Intrinsic::not_intrinsic) {
+    return IntrinsicID;
+  }
+  return 0;
+}
+
+bool 
+AMDILIntrinsicInfo::isOverloaded(unsigned id) const 
+{
+  // Overload Table
+#define GET_INTRINSIC_OVERLOAD_TABLE
+#include "AMDGPUGenIntrinsics.inc"
+#undef GET_INTRINSIC_OVERLOAD_TABLE
+}
+
+/// This defines the "getAttributes(ID id)" method.
+#define GET_INTRINSIC_ATTRIBUTES
+#include "AMDGPUGenIntrinsics.inc"
+#undef GET_INTRINSIC_ATTRIBUTES
+
+Function*
+AMDILIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID,
+    Type **Tys,
+    unsigned numTys) const 
+{
+  assert(!"Not implemented");
+}
+
+/// Because the code generator has to support different SC versions, 
+/// this function is added to check that the intrinsic being used
+/// is actually valid. In the case where it isn't valid, the 
+/// function call is not translated into an intrinsic and the
+/// fall back software emulated path should pick up the result.
+bool
+AMDILIntrinsicInfo::isValidIntrinsic(unsigned int IntrID) const
+{
+  const AMDILSubtarget &STM = mTM->getSubtarget<AMDILSubtarget>();
+  switch (IntrID) {
+    default:
+      return true;
+    case AMDGPUIntrinsic::AMDIL_convert_f32_i32_rpi:
+    case AMDGPUIntrinsic::AMDIL_convert_f32_i32_flr:
+    case AMDGPUIntrinsic::AMDIL_convert_f32_f16_near:
+    case AMDGPUIntrinsic::AMDIL_convert_f32_f16_neg_inf:
+    case AMDGPUIntrinsic::AMDIL_convert_f32_f16_plus_inf:
+        return STM.calVersion() >= CAL_VERSION_SC_139;
+  };
+}
diff --git a/lib/Target/AMDGPU/AMDILIntrinsicInfo.h b/lib/Target/AMDGPU/AMDILIntrinsicInfo.h
new file mode 100644
index 0000000..072c265
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILIntrinsicInfo.h
@@ -0,0 +1,49 @@
+//===- AMDILIntrinsicInfo.h - AMDIL Intrinsic Information ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+//   Interface for the AMDIL Implementation of the Intrinsic Info class.
+//
+//===-----------------------------------------------------------------------===//
+#ifndef _AMDIL_INTRINSICS_H_
+#define _AMDIL_INTRINSICS_H_
+
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetIntrinsicInfo.h"
+
+namespace llvm {
+  class TargetMachine;
+  namespace AMDGPUIntrinsic {
+    enum ID {
+      last_non_AMDIL_intrinsic = Intrinsic::num_intrinsics - 1,
+#define GET_INTRINSIC_ENUM_VALUES
+#include "AMDGPUGenIntrinsics.inc"
+#undef GET_INTRINSIC_ENUM_VALUES
+      , num_AMDIL_intrinsics
+    };
+
+  }
+
+
+  class AMDILIntrinsicInfo : public TargetIntrinsicInfo {
+    TargetMachine *mTM;
+    public:
+      AMDILIntrinsicInfo(TargetMachine *tm);
+      std::string getName(unsigned int IntrId, Type **Tys = 0,
+          unsigned int numTys = 0) const;
+      unsigned int lookupName(const char *Name, unsigned int Len) const;
+      bool isOverloaded(unsigned int IID) const;
+      Function *getDeclaration(Module *M, unsigned int ID,
+          Type **Tys = 0,
+          unsigned int numTys = 0) const;
+      bool isValidIntrinsic(unsigned int) const;
+  }; // AMDILIntrinsicInfo
+}
+
+#endif // _AMDIL_INTRINSICS_H_
+
diff --git a/lib/Target/AMDGPU/AMDILIntrinsics.td b/lib/Target/AMDGPU/AMDILIntrinsics.td
new file mode 100644
index 0000000..ef361f4
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILIntrinsics.td
@@ -0,0 +1,705 @@
+//===- AMDILIntrinsics.td - Defines AMDIL Intrinscs -*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This file defines all of the amdil-specific intrinsics
+//
+//===---------------------------------------------------------------===//
+
+let TargetPrefix = "AMDIL", isTarget = 1 in {
+//------------- Synchronization Functions - OpenCL 6.11.9 --------------------//
+  def int_AMDIL_fence   : GCCBuiltin<"mem_fence">,
+        UnaryIntNoRetInt;
+  def int_AMDIL_fence_global   : GCCBuiltin<"mem_fence_global">,
+        UnaryIntNoRetInt;
+  def int_AMDIL_fence_local   : GCCBuiltin<"mem_fence_local">,
+        UnaryIntNoRetInt;
+  def int_AMDIL_fence_region   : GCCBuiltin<"mem_fence_region">,
+        UnaryIntNoRetInt;
+  def int_AMDIL_fence_read_only   : GCCBuiltin<"read_mem_fence">,
+        UnaryIntNoRetInt;
+  def int_AMDIL_fence_read_only_global   : GCCBuiltin<"read_mem_fence_global">,
+        UnaryIntNoRetInt;
+  def int_AMDIL_fence_read_only_local   : GCCBuiltin<"read_mem_fence_local">,
+        UnaryIntNoRetInt;
+  def int_AMDIL_fence_read_only_region : GCCBuiltin<"read_mem_fence_region">,
+        UnaryIntNoRetInt;
+  def int_AMDIL_fence_write_only   : GCCBuiltin<"write_mem_fence">,
+        UnaryIntNoRetInt;
+  def int_AMDIL_fence_write_only_global   : GCCBuiltin<"write_mem_fence_global">,
+        UnaryIntNoRetInt;
+  def int_AMDIL_fence_write_only_local   : GCCBuiltin<"write_mem_fence_local">,
+        UnaryIntNoRetInt;
+  def int_AMDIL_fence_write_only_region : GCCBuiltin<"write_mem_fence_region">,
+        UnaryIntNoRetInt;
+
+  def int_AMDIL_early_exit : GCCBuiltin<"__amdil_early_exit">,
+        UnaryIntNoRetInt;
+
+  def int_AMDIL_cmov_logical  : GCCBuiltin<"__amdil_cmov_logical">,
+          TernaryIntInt;
+  def int_AMDIL_fabs : GCCBuiltin<"__amdil_fabs">, UnaryIntFloat;
+  def int_AMDIL_abs : GCCBuiltin<"__amdil_abs">, UnaryIntInt;
+
+  def int_AMDIL_bit_extract_i32 : GCCBuiltin<"__amdil_ibit_extract">,
+          TernaryIntInt;
+  def int_AMDIL_bit_extract_u32 : GCCBuiltin<"__amdil_ubit_extract">,
+          TernaryIntInt;
+  def int_AMDIL_bit_reverse_u32 : GCCBuiltin<"__amdil_ubit_reverse">,
+          UnaryIntInt;
+  def int_AMDIL_bit_count_i32 : GCCBuiltin<"__amdil_count_bits">,
+          UnaryIntInt;
+  def int_AMDIL_bit_find_first_lo : GCCBuiltin<"__amdil_ffb_lo">,
+          UnaryIntInt;
+  def int_AMDIL_bit_find_first_hi : GCCBuiltin<"__amdil_ffb_hi">,
+          UnaryIntInt;
+  def int_AMDIL_bit_find_first_sgn : GCCBuiltin<"__amdil_ffb_signed">,
+          UnaryIntInt;
+  def int_AMDIL_media_bitalign : GCCBuiltin<"__amdil_bitalign">,
+                    TernaryIntInt;
+  def int_AMDIL_media_bytealign : GCCBuiltin<"__amdil_bytealign">,
+                    TernaryIntInt;
+  def int_AMDIL_bit_insert_u32 : GCCBuiltin<"__amdil_ubit_insert">,
+                    QuaternaryIntInt;
+  def int_AMDIL_bfi : GCCBuiltin<"__amdil_bfi">,
+      TernaryIntInt;
+  def int_AMDIL_bfm : GCCBuiltin<"__amdil_bfm">,
+      BinaryIntInt;
+  def int_AMDIL_mad_i32 : GCCBuiltin<"__amdil_imad">,
+          TernaryIntInt;
+  def int_AMDIL_mad_u32 : GCCBuiltin<"__amdil_umad">,
+          TernaryIntInt;
+  def int_AMDIL_mad     : GCCBuiltin<"__amdil_mad">,
+          TernaryIntFloat;
+  def int_AMDIL_mulhi_i32 : GCCBuiltin<"__amdil_imul_high">,
+          BinaryIntInt;
+  def int_AMDIL_mulhi_u32 : GCCBuiltin<"__amdil_umul_high">,
+          BinaryIntInt;
+  def int_AMDIL_mul24_i32 : GCCBuiltin<"__amdil_imul24">,
+          BinaryIntInt;
+  def int_AMDIL_mul24_u32 : GCCBuiltin<"__amdil_umul24">,
+          BinaryIntInt;
+  def int_AMDIL_mulhi24_i32 : GCCBuiltin<"__amdil_imul24_high">,
+          BinaryIntInt;
+  def int_AMDIL_mulhi24_u32 : GCCBuiltin<"__amdil_umul24_high">,
+          BinaryIntInt;
+  def int_AMDIL_mad24_i32 : GCCBuiltin<"__amdil_imad24">,
+          TernaryIntInt;
+  def int_AMDIL_mad24_u32 : GCCBuiltin<"__amdil_umad24">,
+          TernaryIntInt;
+  def int_AMDIL_carry_i32 : GCCBuiltin<"__amdil_carry">,
+          BinaryIntInt;
+  def int_AMDIL_borrow_i32 : GCCBuiltin<"__amdil_borrow">,
+          BinaryIntInt;
+  def int_AMDIL_min_i32 : GCCBuiltin<"__amdil_imin">,
+          BinaryIntInt;
+  def int_AMDIL_min_u32 : GCCBuiltin<"__amdil_umin">,
+          BinaryIntInt;
+  def int_AMDIL_min     : GCCBuiltin<"__amdil_min">,
+          BinaryIntFloat;
+  def int_AMDIL_max_i32 : GCCBuiltin<"__amdil_imax">,
+          BinaryIntInt;
+  def int_AMDIL_max_u32 : GCCBuiltin<"__amdil_umax">,
+          BinaryIntInt;
+  def int_AMDIL_max     : GCCBuiltin<"__amdil_max">,
+          BinaryIntFloat;
+  def int_AMDIL_media_lerp_u4 : GCCBuiltin<"__amdil_u4lerp">,
+          TernaryIntInt;
+  def int_AMDIL_media_sad : GCCBuiltin<"__amdil_sad">,
+          TernaryIntInt;
+  def int_AMDIL_media_sad_hi : GCCBuiltin<"__amdil_sadhi">,
+          TernaryIntInt;
+  def int_AMDIL_fraction : GCCBuiltin<"__amdil_fraction">,
+          UnaryIntFloat;
+  def int_AMDIL_clamp : GCCBuiltin<"__amdil_clamp">,
+          TernaryIntFloat;
+  def int_AMDIL_pireduce : GCCBuiltin<"__amdil_pireduce">,
+          UnaryIntFloat;
+  def int_AMDIL_round_nearest : GCCBuiltin<"__amdil_round_nearest">,
+          UnaryIntFloat;
+  def int_AMDIL_round_neginf : GCCBuiltin<"__amdil_round_neginf">,
+          UnaryIntFloat;
+  def int_AMDIL_round_posinf : GCCBuiltin<"__amdil_round_posinf">,
+          UnaryIntFloat;
+  def int_AMDIL_round_zero : GCCBuiltin<"__amdil_round_zero">,
+          UnaryIntFloat;
+  def int_AMDIL_acos : GCCBuiltin<"__amdil_acos">,
+          UnaryIntFloat;
+  def int_AMDIL_atan : GCCBuiltin<"__amdil_atan">,
+          UnaryIntFloat;
+  def int_AMDIL_asin : GCCBuiltin<"__amdil_asin">,
+          UnaryIntFloat;
+  def int_AMDIL_cos : GCCBuiltin<"__amdil_cos">,
+          UnaryIntFloat;
+  def int_AMDIL_cos_vec : GCCBuiltin<"__amdil_cos_vec">,
+          UnaryIntFloat;
+  def int_AMDIL_tan : GCCBuiltin<"__amdil_tan">,
+          UnaryIntFloat;
+  def int_AMDIL_sin : GCCBuiltin<"__amdil_sin">,
+          UnaryIntFloat;
+  def int_AMDIL_sin_vec : GCCBuiltin<"__amdil_sin_vec">,
+          UnaryIntFloat;
+  def int_AMDIL_pow : GCCBuiltin<"__amdil_pow">, BinaryIntFloat;
+  def int_AMDIL_div : GCCBuiltin<"__amdil_div">, BinaryIntFloat;
+  def int_AMDIL_udiv : GCCBuiltin<"__amdil_udiv">, BinaryIntInt;
+  def int_AMDIL_sqrt: GCCBuiltin<"__amdil_sqrt">,
+          UnaryIntFloat;
+  def int_AMDIL_sqrt_vec: GCCBuiltin<"__amdil_sqrt_vec">,
+          UnaryIntFloat;
+  def int_AMDIL_exp : GCCBuiltin<"__amdil_exp">,
+          UnaryIntFloat;
+  def int_AMDIL_exp_vec : GCCBuiltin<"__amdil_exp_vec">,
+          UnaryIntFloat;
+  def int_AMDIL_exn : GCCBuiltin<"__amdil_exn">,
+          UnaryIntFloat;
+  def int_AMDIL_log : GCCBuiltin<"__amdil_log">,
+          UnaryIntFloat;
+  def int_AMDIL_log_vec : GCCBuiltin<"__amdil_log_vec">,
+          UnaryIntFloat;
+  def int_AMDIL_ln : GCCBuiltin<"__amdil_ln">,
+          UnaryIntFloat;
+  def int_AMDIL_sign: GCCBuiltin<"__amdil_sign">,
+          UnaryIntFloat;
+  def int_AMDIL_fma: GCCBuiltin<"__amdil_fma">,
+          TernaryIntFloat;
+  def int_AMDIL_rsq : GCCBuiltin<"__amdil_rsq">,
+          UnaryIntFloat;
+  def int_AMDIL_rsq_vec : GCCBuiltin<"__amdil_rsq_vec">,
+          UnaryIntFloat;
+  def int_AMDIL_length : GCCBuiltin<"__amdil_length">,
+          UnaryIntFloat;
+  def int_AMDIL_lerp : GCCBuiltin<"__amdil_lerp">,
+          TernaryIntFloat;
+  def int_AMDIL_media_sad4 : GCCBuiltin<"__amdil_sad4">,
+      Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty,
+           llvm_v4i32_ty, llvm_i32_ty], []>;
+
+  def int_AMDIL_frexp_f64 : GCCBuiltin<"__amdil_frexp">,
+        Intrinsic<[llvm_v2i64_ty], [llvm_double_ty], []>;
+ def int_AMDIL_ldexp : GCCBuiltin<"__amdil_ldexp">,
+    Intrinsic<[llvm_anyfloat_ty], [llvm_anyfloat_ty, llvm_anyint_ty], []>;
+  def int_AMDIL_drcp : GCCBuiltin<"__amdil_rcp">,
+      Intrinsic<[llvm_double_ty], [llvm_double_ty], []>;
+  def int_AMDIL_convert_f16_f32 : GCCBuiltin<"__amdil_half_to_float">,
+      ConvertIntITOF;
+  def int_AMDIL_convert_f32_f16 : GCCBuiltin<"__amdil_float_to_half">,
+      ConvertIntFTOI;
+  def int_AMDIL_convert_f32_i32_rpi : GCCBuiltin<"__amdil_float_to_int_rpi">,
+      ConvertIntFTOI;
+  def int_AMDIL_convert_f32_i32_flr : GCCBuiltin<"__amdil_float_to_int_flr">,
+      ConvertIntFTOI;
+  def int_AMDIL_convert_f32_f16_near : GCCBuiltin<"__amdil_float_to_half_near">,
+      ConvertIntFTOI;
+  def int_AMDIL_convert_f32_f16_neg_inf : GCCBuiltin<"__amdil_float_to_half_neg_inf">,
+      ConvertIntFTOI;
+  def int_AMDIL_convert_f32_f16_plus_inf : GCCBuiltin<"__amdil_float_to_half_plus_inf">,
+      ConvertIntFTOI;
+ def int_AMDIL_media_convert_f2v4u8 : GCCBuiltin<"__amdil_f_2_u4">,
+      Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], []>;
+  def int_AMDIL_media_unpack_byte_0 : GCCBuiltin<"__amdil_unpack_0">,
+      ConvertIntITOF;
+  def int_AMDIL_media_unpack_byte_1 : GCCBuiltin<"__amdil_unpack_1">,
+      ConvertIntITOF;
+  def int_AMDIL_media_unpack_byte_2 : GCCBuiltin<"__amdil_unpack_2">,
+      ConvertIntITOF;
+  def int_AMDIL_media_unpack_byte_3 : GCCBuiltin<"__amdil_unpack_3">,
+      ConvertIntITOF;
+  def int_AMDIL_dp2_add : GCCBuiltin<"__amdil_dp2_add">,
+        Intrinsic<[llvm_float_ty], [llvm_v2f32_ty,
+          llvm_v2f32_ty, llvm_float_ty], []>;
+  def int_AMDIL_dp2 : GCCBuiltin<"__amdil_dp2">,
+        Intrinsic<[llvm_float_ty], [llvm_v2f32_ty,
+          llvm_v2f32_ty], []>;
+  def int_AMDIL_dp3 : GCCBuiltin<"__amdil_dp3">,
+        Intrinsic<[llvm_float_ty], [llvm_v4f32_ty,
+          llvm_v4f32_ty], []>;
+  def int_AMDIL_dp4 : GCCBuiltin<"__amdil_dp4">,
+        Intrinsic<[llvm_float_ty], [llvm_v4f32_ty,
+          llvm_v4f32_ty], []>;
+//===---------------------- Image functions begin ------------------------===//
+  def int_AMDIL_image1d_write : GCCBuiltin<"__amdil_image1d_write">,
+      Intrinsic<[], [llvm_ptr_ty, llvm_v2i32_ty, llvm_v4i32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image1d_read_norm  : GCCBuiltin<"__amdil_image1d_read_norm">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image1d_read_unnorm  : GCCBuiltin<"__amdil_image1d_read_unnorm">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image1d_info0 : GCCBuiltin<"__amdil_image1d_info0">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], []>;
+
+  def int_AMDIL_image1d_info1 : GCCBuiltin<"__amdil_image1d_info1">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], []>;
+
+ def int_AMDIL_image1d_array_write : GCCBuiltin<"__amdil_image1d_array_write">,
+      Intrinsic<[], [llvm_ptr_ty, llvm_v2i32_ty, llvm_v4i32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image1d_array_read_norm  : GCCBuiltin<"__amdil_image1d_array_read_norm">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image1d_array_read_unnorm  : GCCBuiltin<"__amdil_image1d_array_read_unnorm">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image1d_array_info0 : GCCBuiltin<"__amdil_image1d_array_info0">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], []>;
+
+  def int_AMDIL_image1d_array_info1 : GCCBuiltin<"__amdil_image1d_array_info1">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], []>;
+
+ def int_AMDIL_image2d_write : GCCBuiltin<"__amdil_image2d_write">,
+      Intrinsic<[], [llvm_ptr_ty, llvm_v2i32_ty, llvm_v4i32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image2d_read_norm  : GCCBuiltin<"__amdil_image2d_read_norm">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image2d_read_unnorm  : GCCBuiltin<"__amdil_image2d_read_unnorm">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image2d_info0 : GCCBuiltin<"__amdil_image2d_info0">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], []>;
+
+  def int_AMDIL_image2d_info1 : GCCBuiltin<"__amdil_image2d_info1">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], []>;
+
+ def int_AMDIL_image2d_array_write : GCCBuiltin<"__amdil_image2d_array_write">,
+      Intrinsic<[], [llvm_ptr_ty, llvm_v2i32_ty, llvm_v4i32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image2d_array_read_norm  : GCCBuiltin<"__amdil_image2d_array_read_norm">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image2d_array_read_unnorm  : GCCBuiltin<"__amdil_image2d_array_read_unnorm">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image2d_array_info0 : GCCBuiltin<"__amdil_image2d_array_info0">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], []>;
+
+  def int_AMDIL_image2d_array_info1 : GCCBuiltin<"__amdil_image2d_array_info1">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], []>;
+
+  def int_AMDIL_image3d_write : GCCBuiltin<"__amdil_image3d_write">,
+         Intrinsic<[], [llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image3d_read_norm  : GCCBuiltin<"__amdil_image3d_read_norm">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image3d_read_unnorm  : GCCBuiltin<"__amdil_image3d_read_unnorm">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_image3d_info0 : GCCBuiltin<"__amdil_image3d_info0">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], []>;
+
+  def int_AMDIL_image3d_info1 : GCCBuiltin<"__amdil_image3d_info1">,
+      Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], []>;
+
+//===---------------------- Image functions end --------------------------===//
+
+  def int_AMDIL_append_alloc_i32 : GCCBuiltin<"__amdil_append_alloc">,
+      Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrReadWriteArgMem]>;
+  def int_AMDIL_append_consume_i32 : GCCBuiltin<"__amdil_append_consume">,
+      Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrReadWriteArgMem]>;
+  def int_AMDIL_append_alloc_i32_noret : GCCBuiltin<"__amdil_append_alloc_noret">,
+      Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrReadWriteArgMem]>;
+  def int_AMDIL_append_consume_i32_noret : GCCBuiltin<"__amdil_append_consume_noret">,
+      Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrReadWriteArgMem]>;
+
+  def int_AMDIL_get_global_id : GCCBuiltin<"__amdil_get_global_id_int">,
+      Intrinsic<[llvm_v4i32_ty], [], []>;
+  def int_AMDIL_get_local_id : GCCBuiltin<"__amdil_get_local_id_int">,
+      Intrinsic<[llvm_v4i32_ty], [], []>;
+  def int_AMDIL_get_group_id : GCCBuiltin<"__amdil_get_group_id_int">,
+      Intrinsic<[llvm_v4i32_ty], [], []>;
+  def int_AMDIL_get_num_groups : GCCBuiltin<"__amdil_get_num_groups_int">,
+      Intrinsic<[llvm_v4i32_ty], [], []>;
+  def int_AMDIL_get_local_size : GCCBuiltin<"__amdil_get_local_size_int">,
+      Intrinsic<[llvm_v4i32_ty], [], []>;
+  def int_AMDIL_get_global_size : GCCBuiltin<"__amdil_get_global_size_int">,
+      Intrinsic<[llvm_v4i32_ty], [], []>;
+  def int_AMDIL_get_global_offset : GCCBuiltin<"__amdil_get_global_offset_int">,
+      Intrinsic<[llvm_v4i32_ty], [], []>;
+  def int_AMDIL_get_work_dim : GCCBuiltin<"get_work_dim">,
+      Intrinsic<[llvm_i32_ty], [], []>;
+  def int_AMDIL_get_printf_offset : GCCBuiltin<"__amdil_get_printf_offset">,
+      Intrinsic<[llvm_i32_ty], []>;
+  def int_AMDIL_get_printf_size : GCCBuiltin<"__amdil_get_printf_size">,
+      Intrinsic<[llvm_i32_ty], []>;
+
+/// Intrinsics for atomic instructions with no return value
+/// Signed 32 bit integer atomics for global address space
+def int_AMDIL_atomic_add_gi32_noret : GCCBuiltin<"__atomic_add_gi32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_sub_gi32_noret : GCCBuiltin<"__atomic_sub_gi32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_rsub_gi32_noret : GCCBuiltin<"__atomic_rsub_gi32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_xchg_gi32_noret : GCCBuiltin<"__atomic_xchg_gi32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_inc_gi32_noret : GCCBuiltin<"__atomic_inc_gi32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_dec_gi32_noret : GCCBuiltin<"__atomic_dec_gi32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_cmpxchg_gi32_noret : GCCBuiltin<"__atomic_cmpxchg_gi32_noret">,
+    TernaryAtomicIntNoRet;
+def int_AMDIL_atomic_min_gi32_noret : GCCBuiltin<"__atomic_min_gi32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_max_gi32_noret : GCCBuiltin<"__atomic_max_gi32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_and_gi32_noret : GCCBuiltin<"__atomic_and_gi32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_or_gi32_noret : GCCBuiltin<"__atomic_or_gi32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_xor_gi32_noret : GCCBuiltin<"__atomic_xor_gi32_noret">,
+    BinaryAtomicIntNoRet;
+
+
+
+/// Unsigned 32 bit integer atomics for global address space
+def int_AMDIL_atomic_add_gu32_noret : GCCBuiltin<"__atomic_add_gu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_sub_gu32_noret : GCCBuiltin<"__atomic_sub_gu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_rsub_gu32_noret : GCCBuiltin<"__atomic_rsub_gu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_xchg_gu32_noret : GCCBuiltin<"__atomic_xchg_gu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_inc_gu32_noret : GCCBuiltin<"__atomic_inc_gu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_dec_gu32_noret : GCCBuiltin<"__atomic_dec_gu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_cmpxchg_gu32_noret : GCCBuiltin<"__atomic_cmpxchg_gu32_noret">,
+    TernaryAtomicIntNoRet;
+def int_AMDIL_atomic_min_gu32_noret : GCCBuiltin<"__atomic_min_gu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_max_gu32_noret : GCCBuiltin<"__atomic_max_gu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_and_gu32_noret : GCCBuiltin<"__atomic_and_gu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_or_gu32_noret : GCCBuiltin<"__atomic_or_gu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_xor_gu32_noret : GCCBuiltin<"__atomic_xor_gu32_noret">,
+    BinaryAtomicIntNoRet;
+
+
+/// Intrinsics for atomic instructions with a return value
+/// Signed 32 bit integer atomics for global address space
+def int_AMDIL_atomic_add_gi32 : GCCBuiltin<"__atomic_add_gi32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_sub_gi32 : GCCBuiltin<"__atomic_sub_gi32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_rsub_gi32 : GCCBuiltin<"__atomic_rsub_gi32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_xchg_gi32 : GCCBuiltin<"__atomic_xchg_gi32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_inc_gi32 : GCCBuiltin<"__atomic_inc_gi32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_dec_gi32 : GCCBuiltin<"__atomic_dec_gi32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_cmpxchg_gi32 : GCCBuiltin<"__atomic_cmpxchg_gi32">,
+    TernaryAtomicInt;
+def int_AMDIL_atomic_min_gi32 : GCCBuiltin<"__atomic_min_gi32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_max_gi32 : GCCBuiltin<"__atomic_max_gi32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_and_gi32 : GCCBuiltin<"__atomic_and_gi32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_or_gi32 : GCCBuiltin<"__atomic_or_gi32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_xor_gi32 : GCCBuiltin<"__atomic_xor_gi32">,
+    BinaryAtomicInt;
+
+/// 32 bit float atomics required by OpenCL
+def int_AMDIL_atomic_xchg_gf32 : GCCBuiltin<"__atomic_xchg_gf32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_xchg_gf32_noret : GCCBuiltin<"__atomic_xchg_gf32_noret">,
+    BinaryAtomicIntNoRet;
+
+/// Unsigned 32 bit integer atomics for global address space
+def int_AMDIL_atomic_add_gu32 : GCCBuiltin<"__atomic_add_gu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_sub_gu32 : GCCBuiltin<"__atomic_sub_gu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_rsub_gu32 : GCCBuiltin<"__atomic_rsub_gu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_xchg_gu32 : GCCBuiltin<"__atomic_xchg_gu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_inc_gu32 : GCCBuiltin<"__atomic_inc_gu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_dec_gu32 : GCCBuiltin<"__atomic_dec_gu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_cmpxchg_gu32 : GCCBuiltin<"__atomic_cmpxchg_gu32">,
+    TernaryAtomicInt;
+def int_AMDIL_atomic_min_gu32 : GCCBuiltin<"__atomic_min_gu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_max_gu32 : GCCBuiltin<"__atomic_max_gu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_and_gu32 : GCCBuiltin<"__atomic_and_gu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_or_gu32 : GCCBuiltin<"__atomic_or_gu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_xor_gu32 : GCCBuiltin<"__atomic_xor_gu32">,
+    BinaryAtomicInt;
+
+
+/// Intrinsics for atomic instructions with no return value
+/// Signed 32 bit integer atomics for local address space
+def int_AMDIL_atomic_add_li32_noret : GCCBuiltin<"__atomic_add_li32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_sub_li32_noret : GCCBuiltin<"__atomic_sub_li32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_rsub_li32_noret : GCCBuiltin<"__atomic_rsub_li32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_xchg_li32_noret : GCCBuiltin<"__atomic_xchg_li32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_inc_li32_noret : GCCBuiltin<"__atomic_inc_li32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_dec_li32_noret : GCCBuiltin<"__atomic_dec_li32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_cmpxchg_li32_noret : GCCBuiltin<"__atomic_cmpxchg_li32_noret">,
+    TernaryAtomicIntNoRet;
+def int_AMDIL_atomic_min_li32_noret : GCCBuiltin<"__atomic_min_li32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_max_li32_noret : GCCBuiltin<"__atomic_max_li32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_and_li32_noret : GCCBuiltin<"__atomic_and_li32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_or_li32_noret : GCCBuiltin<"__atomic_or_li32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_mskor_li32_noret : GCCBuiltin<"__atomic_mskor_li32_noret">,
+    TernaryAtomicIntNoRet;
+def int_AMDIL_atomic_xor_li32_noret : GCCBuiltin<"__atomic_xor_li32_noret">,
+    BinaryAtomicIntNoRet;
+
+/// Signed 32 bit integer atomics for region address space
+def int_AMDIL_atomic_add_ri32_noret : GCCBuiltin<"__atomic_add_ri32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_sub_ri32_noret : GCCBuiltin<"__atomic_sub_ri32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_rsub_ri32_noret : GCCBuiltin<"__atomic_rsub_ri32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_xchg_ri32_noret : GCCBuiltin<"__atomic_xchg_ri32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_inc_ri32_noret : GCCBuiltin<"__atomic_inc_ri32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_dec_ri32_noret : GCCBuiltin<"__atomic_dec_ri32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_cmpxchg_ri32_noret : GCCBuiltin<"__atomic_cmpxchg_ri32_noret">,
+    TernaryAtomicIntNoRet;
+def int_AMDIL_atomic_min_ri32_noret : GCCBuiltin<"__atomic_min_ri32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_max_ri32_noret : GCCBuiltin<"__atomic_max_ri32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_and_ri32_noret : GCCBuiltin<"__atomic_and_ri32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_or_ri32_noret : GCCBuiltin<"__atomic_or_ri32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_mskor_ri32_noret : GCCBuiltin<"__atomic_mskor_ri32_noret">,
+    TernaryAtomicIntNoRet;
+def int_AMDIL_atomic_xor_ri32_noret : GCCBuiltin<"__atomic_xor_ri32_noret">,
+    BinaryAtomicIntNoRet;
+
+
+
+/// Unsigned 32 bit integer atomics for local address space
+def int_AMDIL_atomic_add_lu32_noret : GCCBuiltin<"__atomic_add_lu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_sub_lu32_noret : GCCBuiltin<"__atomic_sub_lu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_rsub_lu32_noret : GCCBuiltin<"__atomic_rsub_lu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_xchg_lu32_noret : GCCBuiltin<"__atomic_xchg_lu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_inc_lu32_noret : GCCBuiltin<"__atomic_inc_lu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_dec_lu32_noret : GCCBuiltin<"__atomic_dec_lu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_cmpxchg_lu32_noret : GCCBuiltin<"__atomic_cmpxchg_lu32_noret">,
+    TernaryAtomicIntNoRet;
+def int_AMDIL_atomic_min_lu32_noret : GCCBuiltin<"__atomic_min_lu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_max_lu32_noret : GCCBuiltin<"__atomic_max_lu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_and_lu32_noret : GCCBuiltin<"__atomic_and_lu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_or_lu32_noret : GCCBuiltin<"__atomic_or_lu32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_mskor_lu32_noret : GCCBuiltin<"__atomic_mskor_lu32_noret">,
+    TernaryAtomicIntNoRet;
+def int_AMDIL_atomic_xor_lu32_noret : GCCBuiltin<"__atomic_xor_lu32_noret">,
+    BinaryAtomicIntNoRet;
+
+/// Unsigned 32 bit integer atomics for region address space
+def int_AMDIL_atomic_add_ru32_noret : GCCBuiltin<"__atomic_add_ru32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_sub_ru32_noret : GCCBuiltin<"__atomic_sub_ru32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_rsub_ru32_noret : GCCBuiltin<"__atomic_rsub_ru32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_xchg_ru32_noret : GCCBuiltin<"__atomic_xchg_ru32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_inc_ru32_noret : GCCBuiltin<"__atomic_inc_ru32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_dec_ru32_noret : GCCBuiltin<"__atomic_dec_ru32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_cmpxchg_ru32_noret : GCCBuiltin<"__atomic_cmpxchg_ru32_noret">,
+    TernaryAtomicIntNoRet;
+def int_AMDIL_atomic_min_ru32_noret : GCCBuiltin<"__atomic_min_ru32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_max_ru32_noret : GCCBuiltin<"__atomic_max_ru32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_and_ru32_noret : GCCBuiltin<"__atomic_and_ru32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_or_ru32_noret : GCCBuiltin<"__atomic_or_ru32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_mskor_ru32_noret : GCCBuiltin<"__atomic_mskor_ru32_noret">,
+    TernaryAtomicIntNoRet;
+def int_AMDIL_atomic_xor_ru32_noret : GCCBuiltin<"__atomic_xor_ru32_noret">,
+    BinaryAtomicIntNoRet;
+
+def int_AMDIL_get_cycle_count : GCCBuiltin<"__amdil_get_cycle_count">,
+    VoidIntLong;
+
+def int_AMDIL_compute_unit_id : GCCBuiltin<"__amdil_compute_unit_id">,
+    VoidIntInt;
+
+def int_AMDIL_wavefront_id : GCCBuiltin<"__amdil_wavefront_id">,
+    VoidIntInt;
+
+
+/// Intrinsics for atomic instructions with a return value
+/// Signed 32 bit integer atomics for local address space
+def int_AMDIL_atomic_add_li32 : GCCBuiltin<"__atomic_add_li32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_sub_li32 : GCCBuiltin<"__atomic_sub_li32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_rsub_li32 : GCCBuiltin<"__atomic_rsub_li32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_xchg_li32 : GCCBuiltin<"__atomic_xchg_li32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_inc_li32 : GCCBuiltin<"__atomic_inc_li32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_dec_li32 : GCCBuiltin<"__atomic_dec_li32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_cmpxchg_li32 : GCCBuiltin<"__atomic_cmpxchg_li32">,
+    TernaryAtomicInt;
+def int_AMDIL_atomic_min_li32 : GCCBuiltin<"__atomic_min_li32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_max_li32 : GCCBuiltin<"__atomic_max_li32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_and_li32 : GCCBuiltin<"__atomic_and_li32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_or_li32 : GCCBuiltin<"__atomic_or_li32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_mskor_li32 : GCCBuiltin<"__atomic_mskor_li32">,
+    TernaryAtomicInt;
+def int_AMDIL_atomic_xor_li32 : GCCBuiltin<"__atomic_xor_li32">,
+    BinaryAtomicInt;
+
+/// Signed 32 bit integer atomics for region address space
+def int_AMDIL_atomic_add_ri32 : GCCBuiltin<"__atomic_add_ri32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_sub_ri32 : GCCBuiltin<"__atomic_sub_ri32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_rsub_ri32 : GCCBuiltin<"__atomic_rsub_ri32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_xchg_ri32 : GCCBuiltin<"__atomic_xchg_ri32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_inc_ri32 : GCCBuiltin<"__atomic_inc_ri32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_dec_ri32 : GCCBuiltin<"__atomic_dec_ri32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_cmpxchg_ri32 : GCCBuiltin<"__atomic_cmpxchg_ri32">,
+    TernaryAtomicInt;
+def int_AMDIL_atomic_min_ri32 : GCCBuiltin<"__atomic_min_ri32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_max_ri32 : GCCBuiltin<"__atomic_max_ri32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_and_ri32 : GCCBuiltin<"__atomic_and_ri32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_or_ri32 : GCCBuiltin<"__atomic_or_ri32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_mskor_ri32 : GCCBuiltin<"__atomic_mskor_ri32">,
+    TernaryAtomicInt;
+def int_AMDIL_atomic_xor_ri32 : GCCBuiltin<"__atomic_xor_ri32">,
+    BinaryAtomicInt;
+
+/// 32 bit float atomics required by OpenCL
+def int_AMDIL_atomic_xchg_lf32 : GCCBuiltin<"__atomic_xchg_lf32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_xchg_lf32_noret : GCCBuiltin<"__atomic_xchg_lf32_noret">,
+    BinaryAtomicIntNoRet;
+def int_AMDIL_atomic_xchg_rf32 : GCCBuiltin<"__atomic_xchg_rf32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_xchg_rf32_noret : GCCBuiltin<"__atomic_xchg_rf32_noret">,
+    BinaryAtomicIntNoRet;
+
+/// Unsigned 32 bit integer atomics for local address space
+def int_AMDIL_atomic_add_lu32 : GCCBuiltin<"__atomic_add_lu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_sub_lu32 : GCCBuiltin<"__atomic_sub_lu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_rsub_lu32 : GCCBuiltin<"__atomic_rsub_lu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_xchg_lu32 : GCCBuiltin<"__atomic_xchg_lu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_inc_lu32 : GCCBuiltin<"__atomic_inc_lu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_dec_lu32 : GCCBuiltin<"__atomic_dec_lu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_cmpxchg_lu32 : GCCBuiltin<"__atomic_cmpxchg_lu32">,
+    TernaryAtomicInt;
+def int_AMDIL_atomic_min_lu32 : GCCBuiltin<"__atomic_min_lu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_max_lu32 : GCCBuiltin<"__atomic_max_lu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_and_lu32 : GCCBuiltin<"__atomic_and_lu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_or_lu32 : GCCBuiltin<"__atomic_or_lu32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_mskor_lu32 : GCCBuiltin<"__atomic_mskor_lu32">,
+    TernaryAtomicInt;
+def int_AMDIL_atomic_xor_lu32 : GCCBuiltin<"__atomic_xor_lu32">,
+    BinaryAtomicInt;
+
+/// Unsigned 32 bit integer atomics for region address space
+def int_AMDIL_atomic_add_ru32 : GCCBuiltin<"__atomic_add_ru32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_sub_ru32 : GCCBuiltin<"__atomic_sub_ru32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_rsub_ru32 : GCCBuiltin<"__atomic_rsub_ru32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_xchg_ru32 : GCCBuiltin<"__atomic_xchg_ru32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_inc_ru32 : GCCBuiltin<"__atomic_inc_ru32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_dec_ru32 : GCCBuiltin<"__atomic_dec_ru32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_cmpxchg_ru32 : GCCBuiltin<"__atomic_cmpxchg_ru32">,
+    TernaryAtomicInt;
+def int_AMDIL_atomic_min_ru32 : GCCBuiltin<"__atomic_min_ru32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_max_ru32 : GCCBuiltin<"__atomic_max_ru32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_and_ru32 : GCCBuiltin<"__atomic_and_ru32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_or_ru32 : GCCBuiltin<"__atomic_or_ru32">,
+    BinaryAtomicInt;
+def int_AMDIL_atomic_mskor_ru32 : GCCBuiltin<"__atomic_mskor_ru32">,
+    TernaryAtomicInt;
+def int_AMDIL_atomic_xor_ru32 : GCCBuiltin<"__atomic_xor_ru32">,
+    BinaryAtomicInt;
+
+/// Semaphore signal/wait/init
+def int_AMDIL_semaphore_init : GCCBuiltin<"__amdil_semaphore_init">,
+    Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty]>;
+def int_AMDIL_semaphore_wait : GCCBuiltin<"__amdil_semaphore_wait">,
+    Intrinsic<[], [llvm_ptr_ty]>;
+def int_AMDIL_semaphore_signal : GCCBuiltin<"__amdil_semaphore_signal">,
+    Intrinsic<[], [llvm_ptr_ty]>;
+def int_AMDIL_semaphore_size   : GCCBuiltin<"__amdil_max_semaphore_size">,
+    Intrinsic<[llvm_i32_ty], []>;
+}
diff --git a/lib/Target/AMDGPU/AMDILMultiClass.td b/lib/Target/AMDGPU/AMDILMultiClass.td
new file mode 100644
index 0000000..12e92f5
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILMultiClass.td
@@ -0,0 +1,95 @@
+//===-- AMDILMultiClass.td - AMDIL Multiclass defs ---*- tablegen -*-------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+// Multiclass that handles branch instructions
+multiclass BranchConditional<SDNode Op> {
+    def _i32 : ILFormat<IL_OP_IFC, (outs),
+  (ins brtarget:$target, GPRI32:$src0),
+        "; i32 Pseudo branch instruction",
+  [(Op bb:$target, GPRI32:$src0)]>;
+    def _f32 : ILFormat<IL_OP_IFC, (outs),
+  (ins brtarget:$target, GPRF32:$src0),
+        "; f32 Pseudo branch instruction",
+  [(Op bb:$target, GPRF32:$src0)]>;
+}
+
+// Multiclass that handles memory store operations
+multiclass GTRUNCSTORE<string asm> {
+  def _i32i8 : OneInOneOut<IL_OP_MOV, (outs), (ins GPRI32:$val, MEMI32:$ptr),
+      !strconcat(asm, " $val $ptr"),
+      [(global_i8trunc_store GPRI32:$val, ADDR:$ptr)]>;
+  def _i32i16 : OneInOneOut<IL_OP_MOV, (outs), (ins GPRI32:$val, MEMI32:$ptr),
+      !strconcat(asm, " $val $ptr"),
+      [(global_i16trunc_store GPRI32:$val, ADDR:$ptr)]>;
+}
+
+// Multiclass that handles memory store operations
+multiclass LTRUNCSTORE<string asm> {
+  def _i32i8 : OneInOneOut<IL_OP_MOV, (outs), (ins GPRI32:$val, MEMI32:$ptr),
+      !strconcat(asm, " $val $ptr"),
+      [(local_i8trunc_store GPRI32:$val, ADDR:$ptr)]>;
+  def _i32i16 : OneInOneOut<IL_OP_MOV, (outs), (ins GPRI32:$val, MEMI32:$ptr),
+      !strconcat(asm, " $val $ptr"),
+      [(local_i16trunc_store GPRI32:$val, ADDR:$ptr)]>;
+}
+
+// Multiclass that handles memory store operations
+multiclass PTRUNCSTORE<string asm> {
+  def _i32i8 : OneInOneOut<IL_OP_MOV, (outs), (ins GPRI32:$val, MEMI32:$ptr),
+      !strconcat(asm, " $val $ptr"),
+      [(private_i8trunc_store GPRI32:$val, ADDR:$ptr)]>;
+  def _i32i16 : OneInOneOut<IL_OP_MOV, (outs), (ins GPRI32:$val, MEMI32:$ptr),
+      !strconcat(asm, " $val $ptr"),
+      [(private_i16trunc_store GPRI32:$val, ADDR:$ptr)]>;
+}
+
+// Multiclass that handles memory store operations
+multiclass RTRUNCSTORE<string asm> {
+  def _i32i8 : OneInOneOut<IL_OP_MOV, (outs), (ins GPRI32:$val, MEMI32:$ptr),
+      !strconcat(asm, " $val $ptr"),
+      [(region_i8trunc_store GPRI32:$val, ADDR:$ptr)]>;
+  def _i32i16 : OneInOneOut<IL_OP_MOV, (outs), (ins GPRI32:$val, MEMI32:$ptr),
+      !strconcat(asm, " $val $ptr"),
+      [(region_i16trunc_store GPRI32:$val, ADDR:$ptr)]>;
+}
+
+
+// Multiclass that handles memory store operations
+multiclass STORE<string asm, PatFrag OpNode> {
+  def _i32 : OneInOneOut<IL_OP_MOV, (outs), (ins GPRI32:$val, MEMI32:$ptr),
+      !strconcat(asm, " $val $ptr"),
+      [(OpNode GPRI32:$val, ADDR:$ptr)]>;
+  def _f32 : OneInOneOut<IL_OP_MOV, (outs), (ins GPRF32:$val, MEMI32:$ptr),
+      !strconcat(asm, " $val $ptr"),
+      [(OpNode GPRF32:$val, ADDR:$ptr)]>;
+}
+
+// Multiclass that handles load operations
+multiclass LOAD<string asm, PatFrag OpNode> {
+  def _i32 : OneInOneOut<IL_OP_MOV, (outs GPRI32:$dst), (ins MEMI32:$ptr),
+      !strconcat(asm, " $dst $ptr"),
+      [(set GPRI32:$dst, (OpNode ADDR:$ptr))]>;
+  def _f32 : OneInOneOut<IL_OP_MOV, (outs GPRF32:$dst), (ins MEMI32:$ptr),
+      !strconcat(asm, " $dst $ptr"),
+      [(set GPRF32:$dst, (OpNode ADDR:$ptr))]>;
+}
+
+// Only scalar types should generate flow control
+multiclass BranchInstr<ILOpCode opc> {
+  def _i32 : UnaryOpNoRet<opc, (outs), (ins GPRI32:$src),
+      !strconcat(opc.Text, " $src"), []>;
+  def _f32 : UnaryOpNoRet<opc, (outs), (ins GPRF32:$src),
+      !strconcat(opc.Text, " $src"), []>;
+}
+// Only scalar types should generate flow control
+multiclass BranchInstr2<ILOpCode opc> {
+  def _i32 : BinaryOpNoRet<opc, (outs), (ins GPRI32:$src0, GPRI32:$src1),
+      !strconcat(opc.Text, " $src0, $src1"), []>;
+  def _f32 : BinaryOpNoRet<opc, (outs), (ins GPRF32:$src0, GPRF32:$src1),
+      !strconcat(opc.Text, " $src0, $src1"), []>;
+}
diff --git a/lib/Target/AMDGPU/AMDILNIDevice.cpp b/lib/Target/AMDGPU/AMDILNIDevice.cpp
new file mode 100644
index 0000000..d4112cd
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILNIDevice.cpp
@@ -0,0 +1,71 @@
+//===-- AMDILNIDevice.cpp - Device Info for Northern Islands devices ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+#include "AMDILNIDevice.h"
+#include "AMDILEvergreenDevice.h"
+#include "AMDILSubtarget.h"
+
+using namespace llvm;
+
+AMDILNIDevice::AMDILNIDevice(AMDILSubtarget *ST)
+  : AMDILEvergreenDevice(ST)
+{
+  std::string name = ST->getDeviceName();
+  if (name == "caicos") {
+    mDeviceFlag = OCL_DEVICE_CAICOS;
+  } else if (name == "turks") {
+    mDeviceFlag = OCL_DEVICE_TURKS;
+  } else if (name == "cayman") {
+    mDeviceFlag = OCL_DEVICE_CAYMAN;
+  } else {
+    mDeviceFlag = OCL_DEVICE_BARTS;
+  }
+}
+AMDILNIDevice::~AMDILNIDevice()
+{
+}
+
+size_t
+AMDILNIDevice::getMaxLDSSize() const
+{
+  if (usesHardware(AMDILDeviceInfo::LocalMem)) {
+    return MAX_LDS_SIZE_900;
+  } else {
+    return 0;
+  }
+}
+
+uint32_t
+AMDILNIDevice::getGeneration() const
+{
+  return AMDILDeviceInfo::HD6XXX;
+}
+
+
+AMDILCaymanDevice::AMDILCaymanDevice(AMDILSubtarget *ST)
+  : AMDILNIDevice(ST)
+{
+  setCaps();
+}
+
+AMDILCaymanDevice::~AMDILCaymanDevice()
+{
+}
+
+void
+AMDILCaymanDevice::setCaps()
+{
+  if (mSTM->isOverride(AMDILDeviceInfo::DoubleOps)) {
+    mHWBits.set(AMDILDeviceInfo::DoubleOps);
+    mHWBits.set(AMDILDeviceInfo::FMA);
+  }
+  mHWBits.set(AMDILDeviceInfo::Signed24BitOps);
+  mSWBits.reset(AMDILDeviceInfo::Signed24BitOps);
+  mSWBits.set(AMDILDeviceInfo::ArenaSegment);
+}
+
diff --git a/lib/Target/AMDGPU/AMDILNIDevice.h b/lib/Target/AMDGPU/AMDILNIDevice.h
new file mode 100644
index 0000000..556670a
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILNIDevice.h
@@ -0,0 +1,59 @@
+//===------- AMDILNIDevice.h - Define NI Device for AMDIL -*- C++ -*------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// Interface for the subtarget data classes.
+//
+//===---------------------------------------------------------------------===//
+// This file will define the interface that each generation needs to
+// implement in order to correctly answer queries on the capabilities of the
+// specific hardware.
+//===---------------------------------------------------------------------===//
+#ifndef _AMDILNIDEVICE_H_
+#define _AMDILNIDEVICE_H_
+#include "AMDILEvergreenDevice.h"
+#include "AMDILSubtarget.h"
+
+namespace llvm {
+  class AMDILSubtarget;
+//===---------------------------------------------------------------------===//
+// NI generation of devices and their respective sub classes
+//===---------------------------------------------------------------------===//
+
+// The AMDILNIDevice is the base class for all Northern Island series of
+// cards. It is very similiar to the AMDILEvergreenDevice, with the major
+// exception being differences in wavefront size and hardware capabilities.  The
+// NI devices are all 64 wide wavefronts and also add support for signed 24 bit
+// integer operations
+
+  class AMDILNIDevice : public AMDILEvergreenDevice {
+    public:
+      AMDILNIDevice(AMDILSubtarget*);
+      virtual ~AMDILNIDevice();
+      virtual size_t getMaxLDSSize() const;
+      virtual uint32_t getGeneration() const;
+    protected:
+  }; // AMDILNIDevice
+
+// Just as the AMDILCypressDevice is the double capable version of the
+// AMDILEvergreenDevice, the AMDILCaymanDevice is the double capable version of
+// the AMDILNIDevice.  The other major difference that is not as useful from
+// standpoint is that the Cayman Device has 4 wide ALU's, whereas the rest of the
+// NI family is a 5 wide.
+     
+  class AMDILCaymanDevice: public AMDILNIDevice {
+    public:
+      AMDILCaymanDevice(AMDILSubtarget*);
+      virtual ~AMDILCaymanDevice();
+    private:
+      virtual void setCaps();
+  }; // AMDILCaymanDevice
+
+  static const unsigned int MAX_LDS_SIZE_900 = AMDILDevice::MAX_LDS_SIZE_800;
+} // namespace llvm
+#endif // _AMDILNIDEVICE_H_
diff --git a/lib/Target/AMDGPU/AMDILNodes.td b/lib/Target/AMDGPU/AMDILNodes.td
new file mode 100644
index 0000000..699fdad
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILNodes.td
@@ -0,0 +1,47 @@
+//===- AMDILNodes.td - AMD IL nodes ------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Flow Control DAG Nodes
+//===----------------------------------------------------------------------===//
+def IL_brcond      : SDNode<"AMDILISD::BRANCH_COND", SDTIL_BRCond, [SDNPHasChain]>;
+
+//===----------------------------------------------------------------------===//
+// Comparison DAG Nodes
+//===----------------------------------------------------------------------===//
+def IL_cmp       : SDNode<"AMDILISD::CMP", SDTIL_Cmp>;
+
+//===----------------------------------------------------------------------===//
+// Call/Return DAG Nodes
+//===----------------------------------------------------------------------===//
+def IL_call      : SDNode<"AMDILISD::CALL", SDTIL_Call,
+    [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+
+def IL_retflag       : SDNode<"AMDILISD::RET_FLAG", SDTNone,
+    [SDNPHasChain, SDNPOptInGlue]>;
+
+//===--------------------------------------------------------------------===//
+// Instructions
+//===--------------------------------------------------------------------===//
+// Floating point math functions
+def IL_cmov_logical : SDNode<"AMDILISD::CMOVLOG", SDTIL_GenTernaryOp>;
+def IL_div_inf      : SDNode<"AMDILISD::DIV_INF", SDTIL_GenBinaryOp>;
+def IL_mad          : SDNode<"AMDILISD::MAD", SDTIL_GenTernaryOp>;
+
+//===----------------------------------------------------------------------===//
+// Integer functions
+//===----------------------------------------------------------------------===//
+def IL_umul        : SDNode<"AMDILISD::UMUL"    , SDTIntBinOp,
+    [SDNPCommutative, SDNPAssociative]>;
+
+//===----------------------------------------------------------------------===//
+// Vector functions
+//===----------------------------------------------------------------------===//
+def IL_vbuild     : SDNode<"AMDILISD::VBUILD", SDTIL_GenVecBuild,
+    []>;
diff --git a/lib/Target/AMDGPU/AMDILOperands.td b/lib/Target/AMDGPU/AMDILOperands.td
new file mode 100644
index 0000000..1014f95
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILOperands.td
@@ -0,0 +1,32 @@
+//===- AMDILOperands.td - AMD IL Operands ------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// Custom memory operand
+//===----------------------------------------------------------------------===//
+
+def MEMI32  : Operand<i32> {
+    let PrintMethod = "printMemOperand";
+    let MIOperandInfo = (ops GPRI32, GPRI32);
+}
+
+// Call target types
+def calltarget   : Operand<i32>;
+def brtarget   : Operand<OtherVT>;
+
+// def v2i8imm : Operand<v2i8>;
+// def v4i8imm : Operand<v4i8>;
+// def v2i16imm : Operand<v2i16>;
+// def v4i16imm : Operand<v4i16>;
+// def v2i32imm : Operand<v2i32>;
+// def v4i32imm : Operand<v4i32>;
+// def v2i64imm : Operand<v2i64>;
+// def v2f32imm : Operand<v2f32>;
+// def v4f32imm : Operand<v4f32>;
+// def v2f64imm : Operand<v2f64>;
+
diff --git a/lib/Target/AMDGPU/AMDILPatterns.td b/lib/Target/AMDGPU/AMDILPatterns.td
new file mode 100644
index 0000000..aa59bcb
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILPatterns.td
@@ -0,0 +1,504 @@
+//===- AMDILPatterns.td - AMDIL Target Patterns------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Store pattern fragments
+//===----------------------------------------------------------------------===//
+def truncstorei64 : PatFrag<(ops node:$val, node:$ptr),
+                           (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i64;
+}]>;
+def truncstorev2i8 : PatFrag<(ops node:$val, node:$ptr),
+                           (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v2i8;
+}]>;
+def truncstorev2i16 : PatFrag<(ops node:$val, node:$ptr),
+                            (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v2i16;
+}]>;
+def truncstorev2i32 : PatFrag<(ops node:$val, node:$ptr),
+                            (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v2i32;
+}]>;
+def truncstorev2i64 : PatFrag<(ops node:$val, node:$ptr),
+                            (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v2i64;
+}]>;
+def truncstorev2f32 : PatFrag<(ops node:$val, node:$ptr),
+                            (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v2f32;
+}]>;
+def truncstorev2f64 : PatFrag<(ops node:$val, node:$ptr),
+                            (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v2f64;
+}]>;
+def truncstorev4i8 : PatFrag<(ops node:$val, node:$ptr),
+                           (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v4i8;
+}]>;
+def truncstorev4i16 : PatFrag<(ops node:$val, node:$ptr),
+                            (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v4i16;
+}]>;
+def truncstorev4i32 : PatFrag<(ops node:$val, node:$ptr),
+                            (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v4i32;
+}]>;
+def truncstorev4f32 : PatFrag<(ops node:$val, node:$ptr),
+                            (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v4f32;
+}]>;
+
+def global_store : PatFrag<(ops node:$val, node:$ptr),
+    (store node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_store : PatFrag<(ops node:$val, node:$ptr),
+    (store node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_store : PatFrag<(ops node:$val, node:$ptr),
+    (store node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_store : PatFrag<(ops node:$val, node:$ptr),
+    (store node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_i8trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei8 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_i16trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei16 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_i32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei32 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_i64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei64 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_f32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstoref32 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_f64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstoref64 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_v2i8trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i8 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_v2i16trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i16 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_v2i32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i32 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_v2i64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i64 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_v2f32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2f32 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_v2f64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2f64 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_v4i8trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4i8 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_v4i16trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4i16 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_v4i32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4i32 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def global_v4f32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4f32 node:$val, node:$ptr), [{
+        return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstore node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_i8trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei8 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_i16trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei16 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_i32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei32 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_i64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei64 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_f32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstoref32 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_f64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstoref64 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_v2i8trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i8 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_v2i16trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i16 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_v2i32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i32 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_v2i64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i64 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_v2f32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2f32 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_v2f64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2f64 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_v4i8trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4i8 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_v4i16trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4i16 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_v4i32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4i32 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def private_v4f32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4f32 node:$val, node:$ptr), [{
+        return isPrivateStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
+def local_trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstore node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_i8trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei8 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_i16trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei16 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_i32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei32 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_i64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei64 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_f32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstoref32 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_f64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstoref64 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_v2i8trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i8 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_v2i16trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i16 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_v2i32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i32 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_v2i64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i64 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_v2f32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2f32 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_v2f64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2f64 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_v4i8trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4i8 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_v4i16trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4i16 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_v4i32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4i32 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def local_v4f32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4f32 node:$val, node:$ptr), [{
+        return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
+def region_trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstore node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_i8trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei8 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_i16trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei16 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_i32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei32 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_i64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorei64 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_f32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstoref32 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_f64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstoref64 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_v2i8trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i8 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_v2i16trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i16 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_v2i32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i32 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_v2i64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2i64 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_v2f32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2f32 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_v2f64trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev2f64 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_v4i8trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4i8 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_v4i16trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4i16 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_v4i32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4i32 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+def region_v4f32trunc_store : PatFrag<(ops node:$val, node:$ptr),
+    (truncstorev4f32 node:$val, node:$ptr), [{
+        return isRegionStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
+//===----------------------------------------------------------------------===//
+// Load pattern fragments
+//===----------------------------------------------------------------------===//
+// Global address space loads
+def global_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+    return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def global_sext_load : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+    return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def global_aext_load : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+    return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def global_zext_load : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+    return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+// Private address space loads
+def private_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+    return isPrivateLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def private_sext_load : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+    return isPrivateLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def private_aext_load : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+    return isPrivateLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def private_zext_load : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+    return isPrivateLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+// Local address space loads
+def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+    return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def local_sext_load : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+    return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def local_aext_load : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+    return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def local_zext_load : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+    return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+// Region address space loads
+def region_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+    return isRegionLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def region_sext_load : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+    return isRegionLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def region_aext_load : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+    return isRegionLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def region_zext_load : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+    return isRegionLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+// Constant address space loads
+def constant_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+    return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+def constant_sext_load : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+    return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+def constant_aext_load : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+    return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+def constant_zext_load : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+    return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+// Constant pool loads
+def cp_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+  return isCPLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def cp_sext_load : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+  return isCPLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def cp_zext_load : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+  return isCPLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+def cp_aext_load : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+  return isCPLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+//===----------------------------------------------------------------------===//
+// Complex addressing mode patterns
+//===----------------------------------------------------------------------===//
+def ADDR : ComplexPattern<i32, 2, "SelectADDR", [], []>;
+def ADDRF : ComplexPattern<i32, 2, "SelectADDR", [frameindex], []>;
+def ADDR64 : ComplexPattern<i64, 2, "SelectADDR64", [], []>;
+def ADDR64F : ComplexPattern<i64, 2, "SelectADDR64", [frameindex], []>;
+
+
+//===----------------------------------------------------------------------===//
+// Conditional Instruction Pattern Leafs
+//===----------------------------------------------------------------------===//
+class IL_CC_Op<int N> : PatLeaf<(i32 N)>;
+def IL_CC_D_EQ  : IL_CC_Op<0>;
+def IL_CC_D_GE  : IL_CC_Op<1>;
+def IL_CC_D_LT  : IL_CC_Op<2>;
+def IL_CC_D_NE  : IL_CC_Op<3>;
+def IL_CC_F_EQ  : IL_CC_Op<4>;
+def IL_CC_F_GE  : IL_CC_Op<5>;
+def IL_CC_F_LT  : IL_CC_Op<6>;
+def IL_CC_F_NE  : IL_CC_Op<7>;
+def IL_CC_I_EQ  : IL_CC_Op<8>;
+def IL_CC_I_GE  : IL_CC_Op<9>;
+def IL_CC_I_LT  : IL_CC_Op<10>;
+def IL_CC_I_NE  : IL_CC_Op<11>;
+def IL_CC_U_GE  : IL_CC_Op<12>;
+def IL_CC_U_LT  : IL_CC_Op<13>;
+// Pseudo IL comparison instructions that aren't natively supported
+def IL_CC_F_GT  : IL_CC_Op<14>;
+def IL_CC_U_GT  : IL_CC_Op<15>;
+def IL_CC_I_GT  : IL_CC_Op<16>;
+def IL_CC_D_GT  : IL_CC_Op<17>;
+def IL_CC_F_LE  : IL_CC_Op<18>;
+def IL_CC_U_LE  : IL_CC_Op<19>;
+def IL_CC_I_LE  : IL_CC_Op<20>;
+def IL_CC_D_LE  : IL_CC_Op<21>;
+def IL_CC_F_UNE : IL_CC_Op<22>;
+def IL_CC_F_UEQ : IL_CC_Op<23>;
+def IL_CC_F_ULT : IL_CC_Op<24>;
+def IL_CC_F_UGT : IL_CC_Op<25>;
+def IL_CC_F_ULE : IL_CC_Op<26>;
+def IL_CC_F_UGE : IL_CC_Op<27>;
+def IL_CC_F_ONE : IL_CC_Op<28>;
+def IL_CC_F_OEQ : IL_CC_Op<29>;
+def IL_CC_F_OLT : IL_CC_Op<30>;
+def IL_CC_F_OGT : IL_CC_Op<31>;
+def IL_CC_F_OLE : IL_CC_Op<32>;
+def IL_CC_F_OGE : IL_CC_Op<33>;
+def IL_CC_D_UNE : IL_CC_Op<34>;
+def IL_CC_D_UEQ : IL_CC_Op<35>;
+def IL_CC_D_ULT : IL_CC_Op<36>;
+def IL_CC_D_UGT : IL_CC_Op<37>;
+def IL_CC_D_ULE : IL_CC_Op<38>;
+def IL_CC_D_UGE : IL_CC_Op<39>;
+def IL_CC_D_ONE : IL_CC_Op<30>;
+def IL_CC_D_OEQ : IL_CC_Op<41>;
+def IL_CC_D_OLT : IL_CC_Op<42>;
+def IL_CC_D_OGT : IL_CC_Op<43>;
+def IL_CC_D_OLE : IL_CC_Op<44>;
+def IL_CC_D_OGE : IL_CC_Op<45>;
+def IL_CC_U_EQ  : IL_CC_Op<46>;
+def IL_CC_U_NE  : IL_CC_Op<47>;
+def IL_CC_F_O   : IL_CC_Op<48>;
+def IL_CC_D_O   : IL_CC_Op<49>;
+def IL_CC_F_UO  : IL_CC_Op<50>;
+def IL_CC_D_UO  : IL_CC_Op<51>;
+def IL_CC_L_LE  : IL_CC_Op<52>;
+def IL_CC_L_GE  : IL_CC_Op<53>;
+def IL_CC_L_EQ  : IL_CC_Op<54>;
+def IL_CC_L_NE  : IL_CC_Op<55>;
+def IL_CC_L_LT  : IL_CC_Op<56>;
+def IL_CC_L_GT  : IL_CC_Op<57>;
+def IL_CC_UL_LE  : IL_CC_Op<58>;
+def IL_CC_UL_GE  : IL_CC_Op<59>;
+def IL_CC_UL_EQ  : IL_CC_Op<60>;
+def IL_CC_UL_NE  : IL_CC_Op<61>;
+def IL_CC_UL_LT  : IL_CC_Op<62>;
+def IL_CC_UL_GT  : IL_CC_Op<63>;
diff --git a/lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp b/lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp
new file mode 100644
index 0000000..ed016da
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILPeepholeOptimizer.cpp
@@ -0,0 +1,1258 @@
+//===-- AMDILPeepholeOptimizer.cpp - AMDIL Peephole optimizations ---------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "PeepholeOpt"
+#ifdef DEBUG
+#define DEBUGME (DebugFlag && isCurrentDebugType(DEBUG_TYPE))
+#else
+#define DEBUGME 0
+#endif
+
+#include "AMDILAlgorithms.tpp"
+#include "AMDILDevices.h"
+#include "AMDILInstrInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Constants.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/Module.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
+
+#include <sstream>
+
+#if 0
+STATISTIC(PointerAssignments, "Number of dynamic pointer "
+    "assigments discovered");
+STATISTIC(PointerSubtract, "Number of pointer subtractions discovered");
+#endif
+
+using namespace llvm;
+// The Peephole optimization pass is used to do simple last minute optimizations
+// that are required for correct code or to remove redundant functions
+namespace {
+
+class OpaqueType;
+
+class LLVM_LIBRARY_VISIBILITY AMDILPeepholeOpt : public FunctionPass {
+public:
+  TargetMachine &TM;
+  static char ID;
+  AMDILPeepholeOpt(TargetMachine &tm AMDIL_OPT_LEVEL_DECL);
+  ~AMDILPeepholeOpt();
+  const char *getPassName() const;
+  bool runOnFunction(Function &F);
+  bool doInitialization(Module &M);
+  bool doFinalization(Module &M);
+  void getAnalysisUsage(AnalysisUsage &AU) const;
+protected:
+private:
+  // Function to initiate all of the instruction level optimizations.
+  bool instLevelOptimizations(BasicBlock::iterator *inst);
+  // Quick check to see if we need to dump all of the pointers into the
+  // arena. If this is correct, then we set all pointers to exist in arena. This
+  // is a workaround for aliasing of pointers in a struct/union.
+  bool dumpAllIntoArena(Function &F);
+  // Because I don't want to invalidate any pointers while in the
+  // safeNestedForEachFunction. I push atomic conversions to a vector and handle
+  // it later. This function does the conversions if required.
+  void doAtomicConversionIfNeeded(Function &F);
+  // Because __amdil_is_constant cannot be properly evaluated if
+  // optimizations are disabled, the call's are placed in a vector
+  // and evaluated after the __amdil_image* functions are evaluated
+  // which should allow the __amdil_is_constant function to be
+  // evaluated correctly.
+  void doIsConstCallConversionIfNeeded();
+  bool mChanged;
+  bool mDebug;
+  bool mConvertAtomics;
+  CodeGenOpt::Level optLevel;
+  // Run a series of tests to see if we can optimize a CALL instruction.
+  bool optimizeCallInst(BasicBlock::iterator *bbb);
+  // A peephole optimization to optimize bit extract sequences.
+  bool optimizeBitExtract(Instruction *inst);
+  // A peephole optimization to optimize bit insert sequences.
+  bool optimizeBitInsert(Instruction *inst);
+  bool setupBitInsert(Instruction *base, 
+                      Instruction *&src, 
+                      Constant *&mask, 
+                      Constant *&shift);
+  // Expand the bit field insert instruction on versions of OpenCL that
+  // don't support it.
+  bool expandBFI(CallInst *CI);
+  // Expand the bit field mask instruction on version of OpenCL that 
+  // don't support it.
+  bool expandBFM(CallInst *CI);
+  // On 7XX and 8XX operations, we do not have 24 bit signed operations. So in
+  // this case we need to expand them. These functions check for 24bit functions
+  // and then expand.
+  bool isSigned24BitOps(CallInst *CI);
+  void expandSigned24BitOps(CallInst *CI);
+  // One optimization that can occur is that if the required workgroup size is
+  // specified then the result of get_local_size is known at compile time and
+  // can be returned accordingly.
+  bool isRWGLocalOpt(CallInst *CI);
+  // On northern island cards, the division is slightly less accurate than on
+  // previous generations, so we need to utilize a more accurate division. So we
+  // can translate the accurate divide to a normal divide on all other cards.
+  bool convertAccurateDivide(CallInst *CI);
+  void expandAccurateDivide(CallInst *CI);
+  // If the alignment is set incorrectly, it can produce really inefficient
+  // code. This checks for this scenario and fixes it if possible.
+  bool correctMisalignedMemOp(Instruction *inst);
+
+  // If we are in no opt mode, then we need to make sure that
+  // local samplers are properly propagated as constant propagation 
+  // doesn't occur and we need to know the value of kernel defined
+  // samplers at compile time.
+  bool propagateSamplerInst(CallInst *CI);
+
+  // Helper functions
+
+  // Group of functions that recursively calculate the size of a structure based
+  // on it's sub-types.
+  size_t getTypeSize(Type * const T, bool dereferencePtr = false);
+  size_t getTypeSize(StructType * const ST, bool dereferencePtr = false);
+  size_t getTypeSize(IntegerType * const IT, bool dereferencePtr = false);
+  size_t getTypeSize(FunctionType * const FT,bool dereferencePtr = false);
+  size_t getTypeSize(ArrayType * const AT, bool dereferencePtr = false);
+  size_t getTypeSize(VectorType * const VT, bool dereferencePtr = false);
+  size_t getTypeSize(PointerType * const PT, bool dereferencePtr = false);
+  size_t getTypeSize(OpaqueType * const OT, bool dereferencePtr = false);
+
+  LLVMContext *mCTX;
+  Function *mF;
+  const AMDILSubtarget *mSTM;
+  SmallVector< std::pair<CallInst *, Function *>, 16> atomicFuncs;
+  SmallVector<CallInst *, 16> isConstVec;
+}; // class AMDILPeepholeOpt
+  char AMDILPeepholeOpt::ID = 0;
+} // anonymous namespace
+
+namespace llvm {
+  FunctionPass *
+  createAMDILPeepholeOpt(TargetMachine &tm AMDIL_OPT_LEVEL_DECL) 
+  {
+    return new AMDILPeepholeOpt(tm AMDIL_OPT_LEVEL_VAR);
+  }
+} // llvm namespace
+
+AMDILPeepholeOpt::AMDILPeepholeOpt(TargetMachine &tm AMDIL_OPT_LEVEL_DECL)
+  : FunctionPass(ID), TM(tm) 
+{
+  mDebug = DEBUGME;
+  optLevel = TM.getOptLevel();
+
+}
+
+AMDILPeepholeOpt::~AMDILPeepholeOpt() 
+{
+}
+
+const char *
+AMDILPeepholeOpt::getPassName() const 
+{
+  return "AMDIL PeepHole Optimization Pass";
+}
+
+bool 
+containsPointerType(Type *Ty) 
+{
+  if (!Ty) {
+    return false;
+  }
+  switch(Ty->getTypeID()) {
+  default:
+    return false;
+  case Type::StructTyID: {
+    const StructType *ST = dyn_cast<StructType>(Ty);
+    for (StructType::element_iterator stb = ST->element_begin(),
+           ste = ST->element_end(); stb != ste; ++stb) {
+      if (!containsPointerType(*stb)) {
+        continue;
+      }
+      return true;
+    }
+    break;
+  }
+  case Type::VectorTyID:
+  case Type::ArrayTyID:
+    return containsPointerType(dyn_cast<SequentialType>(Ty)->getElementType());
+  case Type::PointerTyID:
+    return true;
+  };
+  return false;
+}
+
+bool 
+AMDILPeepholeOpt::dumpAllIntoArena(Function &F) 
+{
+  bool dumpAll = false;
+  for (Function::const_arg_iterator cab = F.arg_begin(),
+       cae = F.arg_end(); cab != cae; ++cab) {
+    const Argument *arg = cab;
+    const PointerType *PT = dyn_cast<PointerType>(arg->getType());
+    if (!PT) {
+      continue;
+    }
+    Type *DereferencedType = PT->getElementType();
+    if (!dyn_cast<StructType>(DereferencedType) 
+        ) {
+      continue;
+    }
+    if (!containsPointerType(DereferencedType)) {
+      continue;
+    }
+    // FIXME: Because a pointer inside of a struct/union may be aliased to
+    // another pointer we need to take the conservative approach and place all
+    // pointers into the arena until more advanced detection is implemented.
+    dumpAll = true;
+  }
+  return dumpAll;
+}
+void
+AMDILPeepholeOpt::doIsConstCallConversionIfNeeded()
+{
+  if (isConstVec.empty()) {
+    return;
+  }
+  for (unsigned x = 0, y = isConstVec.size(); x < y; ++x) {
+    CallInst *CI = isConstVec[x];
+    Constant *CV = dyn_cast<Constant>(CI->getOperand(0));
+    Type *aType = Type::getInt32Ty(*mCTX);
+    Value *Val = (CV != NULL) ? ConstantInt::get(aType, 1)
+      : ConstantInt::get(aType, 0);
+    CI->replaceAllUsesWith(Val);
+    CI->eraseFromParent();
+  }
+  isConstVec.clear();
+}
+void 
+AMDILPeepholeOpt::doAtomicConversionIfNeeded(Function &F) 
+{
+  // Don't do anything if we don't have any atomic operations.
+  if (atomicFuncs.empty()) {
+    return;
+  }
+  // Change the function name for the atomic if it is required
+  uint32_t size = atomicFuncs.size();
+  for (uint32_t x = 0; x < size; ++x) {
+    atomicFuncs[x].first->setOperand(
+        atomicFuncs[x].first->getNumOperands()-1, 
+        atomicFuncs[x].second);
+
+  }
+  mChanged = true;
+  if (mConvertAtomics) {
+    return;
+  }
+}
+
+bool 
+AMDILPeepholeOpt::runOnFunction(Function &MF) 
+{
+  mChanged = false;
+  mF = &MF;
+  mSTM = &TM.getSubtarget<AMDILSubtarget>();
+  if (mDebug) {
+    MF.dump();
+  }
+  mCTX = &MF.getType()->getContext();
+  mConvertAtomics = true;
+  safeNestedForEach(MF.begin(), MF.end(), MF.begin()->begin(),
+     std::bind1st(std::mem_fun(&AMDILPeepholeOpt::instLevelOptimizations),
+                  this));
+
+  doAtomicConversionIfNeeded(MF);
+  doIsConstCallConversionIfNeeded();
+
+  if (mDebug) {
+    MF.dump();
+  }
+  return mChanged;
+}
+
+bool 
+AMDILPeepholeOpt::optimizeCallInst(BasicBlock::iterator *bbb) 
+{
+  Instruction *inst = (*bbb);
+  CallInst *CI = dyn_cast<CallInst>(inst);
+  if (!CI) {
+    return false;
+  }
+  if (isSigned24BitOps(CI)) {
+    expandSigned24BitOps(CI);
+    ++(*bbb);
+    CI->eraseFromParent();
+    return true;
+  }
+  if (propagateSamplerInst(CI)) {
+    return false;
+  }
+  if (expandBFI(CI) || expandBFM(CI)) {
+    ++(*bbb);
+    CI->eraseFromParent();
+    return true;
+  }
+  if (convertAccurateDivide(CI)) {
+    expandAccurateDivide(CI);
+    ++(*bbb);
+    CI->eraseFromParent();
+    return true;
+  }
+
+  StringRef calleeName = CI->getOperand(CI->getNumOperands()-1)->getName();
+  if (calleeName.startswith("__amdil_is_constant")) {
+    // If we do not have optimizations, then this
+    // cannot be properly evaluated, so we add the
+    // call instruction to a vector and process
+    // them at the end of processing after the
+    // samplers have been correctly handled.
+    if (optLevel == CodeGenOpt::None) {
+      isConstVec.push_back(CI);
+      return false;
+    } else {
+      Constant *CV = dyn_cast<Constant>(CI->getOperand(0));
+      Type *aType = Type::getInt32Ty(*mCTX);
+      Value *Val = (CV != NULL) ? ConstantInt::get(aType, 1)
+        : ConstantInt::get(aType, 0);
+      CI->replaceAllUsesWith(Val);
+      ++(*bbb);
+      CI->eraseFromParent();
+      return true;
+    }
+  }
+
+  if (calleeName.equals("__amdil_is_asic_id_i32")) {
+    ConstantInt *CV = dyn_cast<ConstantInt>(CI->getOperand(0));
+    Type *aType = Type::getInt32Ty(*mCTX);
+    Value *Val = CV;
+    if (Val) {
+      Val = ConstantInt::get(aType, 
+          mSTM->device()->getDeviceFlag() & CV->getZExtValue());
+    } else {
+      Val = ConstantInt::get(aType, 0);
+    }
+    CI->replaceAllUsesWith(Val);
+    ++(*bbb);
+    CI->eraseFromParent();
+    return true;
+  }
+  Function *F = dyn_cast<Function>(CI->getOperand(CI->getNumOperands()-1));
+  if (!F) {
+    return false;
+  } 
+  if (F->getName().startswith("__atom") && !CI->getNumUses() 
+      && F->getName().find("_xchg") == StringRef::npos) {
+    std::string buffer(F->getName().str() + "_noret");
+    F = dyn_cast<Function>(
+          F->getParent()->getOrInsertFunction(buffer, F->getFunctionType()));
+    atomicFuncs.push_back(std::make_pair <CallInst*, Function*>(CI, F));
+  }
+  
+  if (!mSTM->device()->isSupported(AMDILDeviceInfo::ArenaSegment)
+      && !mSTM->device()->isSupported(AMDILDeviceInfo::MultiUAV)) {
+    return false;
+  }
+  if (!mConvertAtomics) {
+    return false;
+  }
+  StringRef name = F->getName();
+  if (name.startswith("__atom") && name.find("_g") != StringRef::npos) {
+    mConvertAtomics = false;
+  }
+  return false;
+}
+
+bool
+AMDILPeepholeOpt::setupBitInsert(Instruction *base, 
+    Instruction *&src, 
+    Constant *&mask, 
+    Constant *&shift)
+{
+  if (!base) {
+    if (mDebug) {
+      dbgs() << "Null pointer passed into function.\n";
+    }
+    return false;
+  }
+  bool andOp = false;
+  if (base->getOpcode() == Instruction::Shl) {
+    shift = dyn_cast<Constant>(base->getOperand(1));
+  } else if (base->getOpcode() == Instruction::And) {
+    mask = dyn_cast<Constant>(base->getOperand(1));
+    andOp = true;
+  } else {
+    if (mDebug) {
+      dbgs() << "Failed setup with no Shl or And instruction on base opcode!\n";
+    }
+    // If the base is neither a Shl or a And, we don't fit any of the patterns above.
+    return false;
+  }
+  src = dyn_cast<Instruction>(base->getOperand(0));
+  if (!src) {
+    if (mDebug) {
+      dbgs() << "Failed setup since the base operand is not an instruction!\n";
+    }
+    return false;
+  }
+  // If we find an 'and' operation, then we don't need to
+  // find the next operation as we already know the
+  // bits that are valid at this point.
+  if (andOp) {
+    return true;
+  }
+  if (src->getOpcode() == Instruction::Shl && !shift) {
+    shift = dyn_cast<Constant>(src->getOperand(1));
+    src = dyn_cast<Instruction>(src->getOperand(0));
+  } else if (src->getOpcode() == Instruction::And && !mask) {
+    mask = dyn_cast<Constant>(src->getOperand(1));
+  }
+  if (!mask && !shift) {
+    if (mDebug) {
+      dbgs() << "Failed setup since both mask and shift are NULL!\n";
+    }
+    // Did not find a constant mask or a shift.
+    return false;
+  }
+  return true;
+}
+bool
+AMDILPeepholeOpt::optimizeBitInsert(Instruction *inst) 
+{
+  if (!inst) {
+    return false;
+  }
+  if (!inst->isBinaryOp()) {
+    return false;
+  }
+  if (inst->getOpcode() != Instruction::Or) {
+    return false;
+  }
+  if (optLevel == CodeGenOpt::None) {
+    return false;
+  }
+  // We want to do an optimization on a sequence of ops that in the end equals a
+  // single ISA instruction.
+  // The base pattern for this optimization is - ((A & B) << C) | ((D & E) << F)
+  // Some simplified versions of this pattern are as follows:
+  // (A & B) | (D & E) when B & E == 0 && C == 0 && F == 0
+  // ((A & B) << C) | (D & E) when B ^ E == 0 && (1 << C) >= E
+  // (A & B) | ((D & E) << F) when B ^ E == 0 && (1 << F) >= B
+  // (A & B) | (D << F) when (1 << F) >= B
+  // (A << C) | (D & E) when (1 << C) >= E
+  if (mSTM->device()->getGeneration() == AMDILDeviceInfo::HD4XXX) {
+    // The HD4XXX hardware doesn't support the ubit_insert instruction.
+    return false;
+  }
+  Type *aType = inst->getType();
+  bool isVector = aType->isVectorTy();
+  int numEle = 1;
+  // This optimization only works on 32bit integers.
+  if (aType->getScalarType()
+      != Type::getInt32Ty(inst->getContext())) {
+    return false;
+  }
+  if (isVector) {
+    const VectorType *VT = dyn_cast<VectorType>(aType);
+    numEle = VT->getNumElements();
+    // We currently cannot support more than 4 elements in a intrinsic and we
+    // cannot support Vec3 types.
+    if (numEle > 4 || numEle == 3) {
+      return false;
+    }
+  }
+  // TODO: Handle vectors.
+  if (isVector) {
+    if (mDebug) {
+      dbgs() << "!!! Vectors are not supported yet!\n";
+    }
+    return false;
+  }
+  Instruction *LHSSrc = NULL, *RHSSrc = NULL;
+  Constant *LHSMask = NULL, *RHSMask = NULL;
+  Constant *LHSShift = NULL, *RHSShift = NULL;
+  Instruction *LHS = dyn_cast<Instruction>(inst->getOperand(0));
+  Instruction *RHS = dyn_cast<Instruction>(inst->getOperand(1));
+  if (!setupBitInsert(LHS, LHSSrc, LHSMask, LHSShift)) {
+    if (mDebug) {
+      dbgs() << "Found an OR Operation that failed setup!\n";
+      inst->dump();
+      if (LHS) { LHS->dump(); }
+      if (LHSSrc) { LHSSrc->dump(); }
+      if (LHSMask) { LHSMask->dump(); }
+      if (LHSShift) { LHSShift->dump(); }
+    }
+    // There was an issue with the setup for BitInsert.
+    return false;
+  }
+  if (!setupBitInsert(RHS, RHSSrc, RHSMask, RHSShift)) {
+    if (mDebug) {
+      dbgs() << "Found an OR Operation that failed setup!\n";
+      inst->dump();
+      if (RHS) { RHS->dump(); }
+      if (RHSSrc) { RHSSrc->dump(); }
+      if (RHSMask) { RHSMask->dump(); }
+      if (RHSShift) { RHSShift->dump(); }
+    }
+    // There was an issue with the setup for BitInsert.
+    return false;
+  }
+  if (mDebug) {
+    dbgs() << "Found an OR operation that can possible be optimized to ubit insert!\n";
+    dbgs() << "Op:        "; inst->dump();
+    dbgs() << "LHS:       "; if (LHS) { LHS->dump(); } else { dbgs() << "(None)\n"; }
+    dbgs() << "LHS Src:   "; if (LHSSrc) { LHSSrc->dump(); } else { dbgs() << "(None)\n"; }
+    dbgs() << "LHS Mask:  "; if (LHSMask) { LHSMask->dump(); } else { dbgs() << "(None)\n"; }
+    dbgs() << "LHS Shift: "; if (LHSShift) { LHSShift->dump(); } else { dbgs() << "(None)\n"; }
+    dbgs() << "RHS:       "; if (RHS) { RHS->dump(); } else { dbgs() << "(None)\n"; }
+    dbgs() << "RHS Src:   "; if (RHSSrc) { RHSSrc->dump(); } else { dbgs() << "(None)\n"; }
+    dbgs() << "RHS Mask:  "; if (RHSMask) { RHSMask->dump(); } else { dbgs() << "(None)\n"; }
+    dbgs() << "RHS Shift: "; if (RHSShift) { RHSShift->dump(); } else { dbgs() << "(None)\n"; }
+  }
+  Constant *offset = NULL;
+  Constant *width = NULL;
+  int32_t lhsMaskVal = 0, rhsMaskVal = 0;
+  int32_t lhsShiftVal = 0, rhsShiftVal = 0;
+  int32_t lhsMaskWidth = 0, rhsMaskWidth = 0;
+  int32_t lhsMaskOffset = 0, rhsMaskOffset = 0;
+  lhsMaskVal = (int32_t)(LHSMask 
+      ? dyn_cast<ConstantInt>(LHSMask)->getZExtValue() : 0);
+  rhsMaskVal = (int32_t)(RHSMask 
+      ? dyn_cast<ConstantInt>(RHSMask)->getZExtValue() : 0);
+  lhsShiftVal = (int32_t)(LHSShift 
+      ? dyn_cast<ConstantInt>(LHSShift)->getZExtValue() : 0);
+  rhsShiftVal = (int32_t)(RHSShift 
+      ? dyn_cast<ConstantInt>(RHSShift)->getZExtValue() : 0);
+  lhsMaskWidth = lhsMaskVal ? CountPopulation_32(lhsMaskVal) : 32 - lhsShiftVal;
+  rhsMaskWidth = rhsMaskVal ? CountPopulation_32(rhsMaskVal) : 32 - rhsShiftVal;
+  lhsMaskOffset = lhsMaskVal ? CountTrailingZeros_32(lhsMaskVal) : lhsShiftVal;
+  rhsMaskOffset = rhsMaskVal ? CountTrailingZeros_32(rhsMaskVal) : rhsShiftVal;
+  // TODO: Handle the case of A & B | D & ~B(i.e. inverted masks).
+  if (mDebug) {
+      dbgs() << "Found pattern: \'((A" << (LHSMask ? " & B)" : ")");
+      dbgs() << (LHSShift ? " << C)" : ")") << " | ((D" ;
+      dbgs() << (RHSMask ? " & E)" : ")");
+      dbgs() << (RHSShift ? " << F)\'\n" : ")\'\n");
+      dbgs() << "A = LHSSrc\t\tD = RHSSrc \n";
+      dbgs() << "B = " << lhsMaskVal << "\t\tE = " << rhsMaskVal << "\n";
+      dbgs() << "C = " << lhsShiftVal << "\t\tF = " << rhsShiftVal << "\n";
+      dbgs() << "width(B) = " << lhsMaskWidth;
+      dbgs() << "\twidth(E) = " << rhsMaskWidth << "\n";
+      dbgs() << "offset(B) = " << lhsMaskOffset;
+      dbgs() << "\toffset(E) = " << rhsMaskOffset << "\n";
+      dbgs() << "Constraints: \n";
+      dbgs() << "\t(1) B ^ E == 0\n";
+      dbgs() << "\t(2-LHS) B is a mask\n";
+      dbgs() << "\t(2-LHS) E is a mask\n";
+      dbgs() << "\t(3-LHS) (offset(B)) >= (width(E) + offset(E))\n";
+      dbgs() << "\t(3-RHS) (offset(E)) >= (width(B) + offset(B))\n";
+  }
+  if ((lhsMaskVal || rhsMaskVal) && !(lhsMaskVal ^ rhsMaskVal)) {
+    if (mDebug) {
+      dbgs() << lhsMaskVal << " ^ " << rhsMaskVal;
+      dbgs() << " = " << (lhsMaskVal ^ rhsMaskVal) << "\n";
+      dbgs() << "Failed constraint 1!\n";
+    }
+    return false;
+  }
+  if (mDebug) {
+    dbgs() << "LHS = " << lhsMaskOffset << "";
+    dbgs() << " >= (" << rhsMaskWidth << " + " << rhsMaskOffset << ") = ";
+    dbgs() << (lhsMaskOffset >= (rhsMaskWidth + rhsMaskOffset));
+    dbgs() << "\nRHS = " << rhsMaskOffset << "";
+    dbgs() << " >= (" << lhsMaskWidth << " + " << lhsMaskOffset << ") = ";
+    dbgs() << (rhsMaskOffset >= (lhsMaskWidth + lhsMaskOffset));
+    dbgs() << "\n";
+  }
+  if (lhsMaskOffset >= (rhsMaskWidth + rhsMaskOffset)) {
+    offset = ConstantInt::get(aType, lhsMaskOffset, false);
+    width = ConstantInt::get(aType, lhsMaskWidth, false);
+    RHSSrc = RHS;
+    if (!isMask_32(lhsMaskVal) && !isShiftedMask_32(lhsMaskVal)) {
+      if (mDebug) {
+        dbgs() << "Value is not a Mask: " << lhsMaskVal << "\n";
+        dbgs() << "Failed constraint 2!\n";
+      }
+      return false;
+    }
+    if (!LHSShift) {
+      LHSSrc = BinaryOperator::Create(Instruction::LShr, LHSSrc, offset,
+          "MaskShr", LHS);
+    } else if (lhsShiftVal != lhsMaskOffset) {
+      LHSSrc = BinaryOperator::Create(Instruction::LShr, LHSSrc, offset,
+          "MaskShr", LHS);
+    }
+    if (mDebug) {
+      dbgs() << "Optimizing LHS!\n";
+    }
+  } else if (rhsMaskOffset >= (lhsMaskWidth + lhsMaskOffset)) {
+    offset = ConstantInt::get(aType, rhsMaskOffset, false);
+    width = ConstantInt::get(aType, rhsMaskWidth, false);
+    LHSSrc = RHSSrc;
+    RHSSrc = LHS;
+    if (!isMask_32(rhsMaskVal) && !isShiftedMask_32(rhsMaskVal)) {
+      if (mDebug) {
+        dbgs() << "Non-Mask: " << rhsMaskVal << "\n";
+        dbgs() << "Failed constraint 2!\n";
+      }
+      return false;
+    }
+    if (!RHSShift) {
+      LHSSrc = BinaryOperator::Create(Instruction::LShr, LHSSrc, offset,
+          "MaskShr", RHS);
+    } else if (rhsShiftVal != rhsMaskOffset) {
+      LHSSrc = BinaryOperator::Create(Instruction::LShr, LHSSrc, offset,
+          "MaskShr", RHS);
+    }
+    if (mDebug) {
+      dbgs() << "Optimizing RHS!\n";
+    }
+  } else {
+    if (mDebug) {
+      dbgs() << "Failed constraint 3!\n";
+    }
+    return false;
+  }
+  if (mDebug) {
+    dbgs() << "Width:  "; if (width) { width->dump(); } else { dbgs() << "(0)\n"; }
+    dbgs() << "Offset: "; if (offset) { offset->dump(); } else { dbgs() << "(0)\n"; }
+    dbgs() << "LHSSrc: "; if (LHSSrc) { LHSSrc->dump(); } else { dbgs() << "(0)\n"; }
+    dbgs() << "RHSSrc: "; if (RHSSrc) { RHSSrc->dump(); } else { dbgs() << "(0)\n"; }
+  }
+  if (!offset || !width) {
+    if (mDebug) {
+      dbgs() << "Either width or offset are NULL, failed detection!\n";
+    }
+    return false;
+  }
+  // Lets create the function signature.
+  std::vector<Type *> callTypes;
+  callTypes.push_back(aType);
+  callTypes.push_back(aType);
+  callTypes.push_back(aType);
+  callTypes.push_back(aType);
+  FunctionType *funcType = FunctionType::get(aType, callTypes, false);
+  std::string name = "__amdil_ubit_insert";
+  if (isVector) { name += "_v" + itostr(numEle) + "u32"; } else { name += "_u32"; }
+  Function *Func = 
+    dyn_cast<Function>(inst->getParent()->getParent()->getParent()->
+        getOrInsertFunction(llvm::StringRef(name), funcType));
+  Value *Operands[4] = {
+    width,
+    offset,
+    LHSSrc,
+    RHSSrc
+  };
+  CallInst *CI = CallInst::Create(Func, Operands, "BitInsertOpt");
+  if (mDebug) {
+    dbgs() << "Old Inst: ";
+    inst->dump();
+    dbgs() << "New Inst: ";
+    CI->dump();
+    dbgs() << "\n\n";
+  }
+  CI->insertBefore(inst);
+  inst->replaceAllUsesWith(CI);
+  return true;
+}
+
+bool 
+AMDILPeepholeOpt::optimizeBitExtract(Instruction *inst) 
+{
+  if (!inst) {
+    return false;
+  }
+  if (!inst->isBinaryOp()) {
+    return false;
+  }
+  if (inst->getOpcode() != Instruction::And) {
+    return false;
+  }
+  if (optLevel == CodeGenOpt::None) {
+    return false;
+  }
+  // We want to do some simple optimizations on Shift right/And patterns. The
+  // basic optimization is to turn (A >> B) & C where A is a 32bit type, B is a
+  // value smaller than 32 and C is a mask. If C is a constant value, then the
+  // following transformation can occur. For signed integers, it turns into the
+  // function call dst = __amdil_ibit_extract(log2(C), B, A) For unsigned
+  // integers, it turns into the function call dst =
+  // __amdil_ubit_extract(log2(C), B, A) The function __amdil_[u|i]bit_extract
+  // can be found in Section 7.9 of the ATI IL spec of the stream SDK for
+  // Evergreen hardware.
+  if (mSTM->device()->getGeneration() == AMDILDeviceInfo::HD4XXX) {
+    // This does not work on HD4XXX hardware.
+    return false;
+  }
+  Type *aType = inst->getType();
+  bool isVector = aType->isVectorTy();
+  int numEle = 1;
+  // This only works on 32bit integers
+  if (aType->getScalarType()
+      != Type::getInt32Ty(inst->getContext())) {
+    return false;
+  }
+  if (isVector) {
+    const VectorType *VT = dyn_cast<VectorType>(aType);
+    numEle = VT->getNumElements();
+    // We currently cannot support more than 4 elements in a intrinsic and we
+    // cannot support Vec3 types.
+    if (numEle > 4 || numEle == 3) {
+      return false;
+    }
+  }
+  BinaryOperator *ShiftInst = dyn_cast<BinaryOperator>(inst->getOperand(0));
+  // If the first operand is not a shift instruction, then we can return as it
+  // doesn't match this pattern.
+  if (!ShiftInst || !ShiftInst->isShift()) {
+    return false;
+  }
+  // If we are a shift left, then we need don't match this pattern.
+  if (ShiftInst->getOpcode() == Instruction::Shl) {
+    return false;
+  }
+  bool isSigned = ShiftInst->isArithmeticShift();
+  Constant *AndMask = dyn_cast<Constant>(inst->getOperand(1));
+  Constant *ShrVal = dyn_cast<Constant>(ShiftInst->getOperand(1));
+  // Lets make sure that the shift value and the and mask are constant integers.
+  if (!AndMask || !ShrVal) {
+    return false;
+  }
+  Constant *newMaskConst;
+  Constant *shiftValConst;
+  if (isVector) {
+    // Handle the vector case
+    std::vector<Constant *> maskVals;
+    std::vector<Constant *> shiftVals;
+    ConstantVector *AndMaskVec = dyn_cast<ConstantVector>(AndMask);
+    ConstantVector *ShrValVec = dyn_cast<ConstantVector>(ShrVal);
+    Type *scalarType = AndMaskVec->getType()->getScalarType();
+    assert(AndMaskVec->getNumOperands() ==
+           ShrValVec->getNumOperands() && "cannot have a "
+           "combination where the number of elements to a "
+           "shift and an and are different!");
+    for (size_t x = 0, y = AndMaskVec->getNumOperands(); x < y; ++x) {
+      ConstantInt *AndCI = dyn_cast<ConstantInt>(AndMaskVec->getOperand(x));
+      ConstantInt *ShiftIC = dyn_cast<ConstantInt>(ShrValVec->getOperand(x));
+      if (!AndCI || !ShiftIC) {
+        return false;
+      }
+      uint32_t maskVal = (uint32_t)AndCI->getZExtValue();
+      if (!isMask_32(maskVal)) {
+        return false;
+      }
+      maskVal = (uint32_t)CountTrailingOnes_32(maskVal);
+      uint32_t shiftVal = (uint32_t)ShiftIC->getZExtValue();
+      // If the mask or shiftval is greater than the bitcount, then break out.
+      if (maskVal >= 32 || shiftVal >= 32) {
+        return false;
+      }
+      // If the mask val is greater than the the number of original bits left
+      // then this optimization is invalid.
+      if (maskVal > (32 - shiftVal)) {
+        return false;
+      }
+      maskVals.push_back(ConstantInt::get(scalarType, maskVal, isSigned));
+      shiftVals.push_back(ConstantInt::get(scalarType, shiftVal, isSigned));
+    }
+    newMaskConst = ConstantVector::get(maskVals);
+    shiftValConst = ConstantVector::get(shiftVals);
+  } else {
+    // Handle the scalar case
+    uint32_t maskVal = (uint32_t)dyn_cast<ConstantInt>(AndMask)->getZExtValue();
+    // This must be a mask value where all lower bits are set to 1 and then any
+    // bit higher is set to 0.
+    if (!isMask_32(maskVal)) {
+      return false;
+    }
+    maskVal = (uint32_t)CountTrailingOnes_32(maskVal);
+    // Count the number of bits set in the mask, this is the width of the
+    // resulting bit set that is extracted from the source value.
+    uint32_t shiftVal = (uint32_t)dyn_cast<ConstantInt>(ShrVal)->getZExtValue();
+    // If the mask or shift val is greater than the bitcount, then break out.
+    if (maskVal >= 32 || shiftVal >= 32) {
+      return false;
+    }
+    // If the mask val is greater than the the number of original bits left then
+    // this optimization is invalid.
+    if (maskVal > (32 - shiftVal)) {
+      return false;
+    }
+    newMaskConst = ConstantInt::get(aType, maskVal, isSigned);
+    shiftValConst = ConstantInt::get(aType, shiftVal, isSigned);
+  }
+  // Lets create the function signature.
+  std::vector<Type *> callTypes;
+  callTypes.push_back(aType);
+  callTypes.push_back(aType);
+  callTypes.push_back(aType);
+  FunctionType *funcType = FunctionType::get(aType, callTypes, false);
+  std::string name = "__amdil_ubit_extract";
+  if (isVector) {
+    name += "_v" + itostr(numEle) + "i32";
+  } else {
+    name += "_i32";
+  }
+  // Lets create the function.
+  Function *Func = 
+    dyn_cast<Function>(inst->getParent()->getParent()->getParent()->
+                       getOrInsertFunction(llvm::StringRef(name), funcType));
+  Value *Operands[3] = {
+    newMaskConst,
+    shiftValConst,
+    ShiftInst->getOperand(0)
+  };
+  // Lets create the Call with the operands
+  CallInst *CI = CallInst::Create(Func, Operands, "ByteExtractOpt");
+  CI->insertBefore(inst);
+  inst->replaceAllUsesWith(CI);
+  return true;
+}
+
+bool
+AMDILPeepholeOpt::expandBFI(CallInst *CI)
+{
+  if (!CI || mSTM->calVersion() <= CAL_VERSION_SC_150) {
+    return false;
+  }
+  Value *LHS = CI->getOperand(CI->getNumOperands() - 1);
+  if (!LHS->getName().startswith("__amdil_bfi")) {
+    return false;
+  }
+  Type* type = CI->getOperand(0)->getType();
+  Constant *negOneConst = NULL;
+  if (type->isVectorTy()) {
+    std::vector<Constant *> negOneVals;
+    negOneConst = ConstantInt::get(CI->getContext(), 
+        APInt(32, StringRef("-1"), 10));
+    for (size_t x = 0,
+        y = dyn_cast<VectorType>(type)->getNumElements(); x < y; ++x) {
+      negOneVals.push_back(negOneConst);
+    }
+    negOneConst = ConstantVector::get(negOneVals);
+  } else {
+    negOneConst = ConstantInt::get(CI->getContext(), 
+        APInt(32, StringRef("-1"), 10));
+  }
+  // __amdil_bfi => (A & B) | (~A & C)
+  BinaryOperator *lhs = 
+    BinaryOperator::Create(Instruction::And, CI->getOperand(0),
+        CI->getOperand(1), "bfi_and", CI);
+  BinaryOperator *rhs =
+    BinaryOperator::Create(Instruction::Xor, CI->getOperand(0), negOneConst,
+        "bfi_not", CI);
+  rhs = BinaryOperator::Create(Instruction::And, rhs, CI->getOperand(2),
+      "bfi_and", CI);
+  lhs = BinaryOperator::Create(Instruction::Or, lhs, rhs, "bfi_or", CI);
+  CI->replaceAllUsesWith(lhs);
+  return true;
+}
+
+bool
+AMDILPeepholeOpt::expandBFM(CallInst *CI)
+{
+  if (!CI || mSTM->calVersion() <= CAL_VERSION_SC_150) {
+    return false;
+  }
+  Value *LHS = CI->getOperand(CI->getNumOperands() - 1);
+  if (!LHS->getName().startswith("__amdil_bfm")) {
+    return false;
+  }
+  // __amdil_bfm => ((1 << (src0 & 0x1F)) - 1) << (src1 & 0x1f)
+  Constant *newMaskConst = NULL;
+  Constant *newShiftConst = NULL;
+  Type* type = CI->getOperand(0)->getType();
+  if (type->isVectorTy()) {
+    std::vector<Constant*> newMaskVals, newShiftVals;
+    newMaskConst = ConstantInt::get(Type::getInt32Ty(*mCTX), 0x1F);
+    newShiftConst = ConstantInt::get(Type::getInt32Ty(*mCTX), 1);
+    for (size_t x = 0,
+        y = dyn_cast<VectorType>(type)->getNumElements(); x < y; ++x) {
+      newMaskVals.push_back(newMaskConst);
+      newShiftVals.push_back(newShiftConst);
+    }
+    newMaskConst = ConstantVector::get(newMaskVals);
+    newShiftConst = ConstantVector::get(newShiftVals);
+  } else {
+    newMaskConst = ConstantInt::get(Type::getInt32Ty(*mCTX), 0x1F);
+    newShiftConst = ConstantInt::get(Type::getInt32Ty(*mCTX), 1);
+  }
+  BinaryOperator *lhs =
+    BinaryOperator::Create(Instruction::And, CI->getOperand(0),
+        newMaskConst, "bfm_mask", CI);
+  lhs = BinaryOperator::Create(Instruction::Shl, newShiftConst,
+      lhs, "bfm_shl", CI);
+  lhs = BinaryOperator::Create(Instruction::Sub, lhs,
+      newShiftConst, "bfm_sub", CI);
+  BinaryOperator *rhs =
+    BinaryOperator::Create(Instruction::And, CI->getOperand(1),
+        newMaskConst, "bfm_mask", CI);
+  lhs = BinaryOperator::Create(Instruction::Shl, lhs, rhs, "bfm_shl", CI);
+  CI->replaceAllUsesWith(lhs);
+  return true;
+}
+
+bool
+AMDILPeepholeOpt::instLevelOptimizations(BasicBlock::iterator *bbb) 
+{
+  Instruction *inst = (*bbb);
+  if (optimizeCallInst(bbb)) {
+    return true;
+  }
+  if (optimizeBitExtract(inst)) {
+    return false;
+  }
+  if (optimizeBitInsert(inst)) {
+    return false;
+  }
+  if (correctMisalignedMemOp(inst)) {
+    return false;
+  }
+  return false;
+}
+bool
+AMDILPeepholeOpt::correctMisalignedMemOp(Instruction *inst)
+{
+  LoadInst *linst = dyn_cast<LoadInst>(inst);
+  StoreInst *sinst = dyn_cast<StoreInst>(inst);
+  unsigned alignment;
+  Type* Ty = inst->getType();
+  if (linst) {
+    alignment = linst->getAlignment();
+    Ty = inst->getType();
+  } else if (sinst) {
+    alignment = sinst->getAlignment();
+    Ty = sinst->getValueOperand()->getType();
+  } else {
+    return false;
+  }
+  unsigned size = getTypeSize(Ty);
+  if (size == alignment || size < alignment) {
+    return false;
+  }
+  if (!Ty->isStructTy()) {
+    return false;
+  }
+  if (alignment < 4) {
+    if (linst) {
+      linst->setAlignment(0);
+      return true;
+    } else if (sinst) {
+      sinst->setAlignment(0);
+      return true;
+    }
+  }
+  return false;
+}
+bool 
+AMDILPeepholeOpt::isSigned24BitOps(CallInst *CI) 
+{
+  if (!CI) {
+    return false;
+  }
+  Value *LHS = CI->getOperand(CI->getNumOperands() - 1);
+  std::string namePrefix = LHS->getName().substr(0, 14);
+  if (namePrefix != "__amdil_imad24" && namePrefix != "__amdil_imul24"
+      && namePrefix != "__amdil__imul24_high") {
+    return false;
+  }
+  if (mSTM->device()->usesHardware(AMDILDeviceInfo::Signed24BitOps)) {
+    return false;
+  }
+  return true;
+}
+
+void 
+AMDILPeepholeOpt::expandSigned24BitOps(CallInst *CI) 
+{
+  assert(isSigned24BitOps(CI) && "Must be a "
+      "signed 24 bit operation to call this function!");
+  Value *LHS = CI->getOperand(CI->getNumOperands()-1);
+  // On 7XX and 8XX we do not have signed 24bit, so we need to
+  // expand it to the following:
+  // imul24 turns into 32bit imul
+  // imad24 turns into 32bit imad
+  // imul24_high turns into 32bit imulhigh
+  if (LHS->getName().substr(0, 14) == "__amdil_imad24") {
+    Type *aType = CI->getOperand(0)->getType();
+    bool isVector = aType->isVectorTy();
+    int numEle = isVector ? dyn_cast<VectorType>(aType)->getNumElements() : 1;
+    std::vector<Type*> callTypes;
+    callTypes.push_back(CI->getOperand(0)->getType());
+    callTypes.push_back(CI->getOperand(1)->getType());
+    callTypes.push_back(CI->getOperand(2)->getType());
+    FunctionType *funcType =
+      FunctionType::get(CI->getOperand(0)->getType(), callTypes, false);
+    std::string name = "__amdil_imad";
+    if (isVector) {
+      name += "_v" + itostr(numEle) + "i32";
+    } else {
+      name += "_i32";
+    }
+    Function *Func = dyn_cast<Function>(
+                       CI->getParent()->getParent()->getParent()->
+                       getOrInsertFunction(llvm::StringRef(name), funcType));
+    Value *Operands[3] = {
+      CI->getOperand(0),
+      CI->getOperand(1),
+      CI->getOperand(2)
+    };
+    CallInst *nCI = CallInst::Create(Func, Operands, "imad24");
+    nCI->insertBefore(CI);
+    CI->replaceAllUsesWith(nCI);
+  } else if (LHS->getName().substr(0, 14) == "__amdil_imul24") {
+    BinaryOperator *mulOp =
+      BinaryOperator::Create(Instruction::Mul, CI->getOperand(0),
+          CI->getOperand(1), "imul24", CI);
+    CI->replaceAllUsesWith(mulOp);
+  } else if (LHS->getName().substr(0, 19) == "__amdil_imul24_high") {
+    Type *aType = CI->getOperand(0)->getType();
+
+    bool isVector = aType->isVectorTy();
+    int numEle = isVector ? dyn_cast<VectorType>(aType)->getNumElements() : 1;
+    std::vector<Type*> callTypes;
+    callTypes.push_back(CI->getOperand(0)->getType());
+    callTypes.push_back(CI->getOperand(1)->getType());
+    FunctionType *funcType =
+      FunctionType::get(CI->getOperand(0)->getType(), callTypes, false);
+    std::string name = "__amdil_imul_high";
+    if (isVector) {
+      name += "_v" + itostr(numEle) + "i32";
+    } else {
+      name += "_i32";
+    }
+    Function *Func = dyn_cast<Function>(
+                       CI->getParent()->getParent()->getParent()->
+                       getOrInsertFunction(llvm::StringRef(name), funcType));
+    Value *Operands[2] = {
+      CI->getOperand(0),
+      CI->getOperand(1)
+    };
+    CallInst *nCI = CallInst::Create(Func, Operands, "imul24_high");
+    nCI->insertBefore(CI);
+    CI->replaceAllUsesWith(nCI);
+  }
+}
+
+bool 
+AMDILPeepholeOpt::isRWGLocalOpt(CallInst *CI) 
+{
+  return (CI != NULL
+          && CI->getOperand(CI->getNumOperands() - 1)->getName() 
+          == "__amdil_get_local_size_int");
+}
+
+bool 
+AMDILPeepholeOpt::convertAccurateDivide(CallInst *CI) 
+{
+  if (!CI) {
+    return false;
+  }
+  if (mSTM->device()->getGeneration() == AMDILDeviceInfo::HD6XXX
+      && (mSTM->getDeviceName() == "cayman")) {
+    return false;
+  }
+  return CI->getOperand(CI->getNumOperands() - 1)->getName().substr(0, 20) 
+      == "__amdil_improved_div";
+}
+
+void 
+AMDILPeepholeOpt::expandAccurateDivide(CallInst *CI) 
+{
+  assert(convertAccurateDivide(CI)
+         && "expanding accurate divide can only happen if it is expandable!");
+  BinaryOperator *divOp =
+    BinaryOperator::Create(Instruction::FDiv, CI->getOperand(0),
+                           CI->getOperand(1), "fdiv32", CI);
+  CI->replaceAllUsesWith(divOp);
+}
+
+bool
+AMDILPeepholeOpt::propagateSamplerInst(CallInst *CI)
+{
+  if (optLevel != CodeGenOpt::None) {
+    return false;
+  }
+
+  if (!CI) {
+    return false;
+  }
+
+  unsigned funcNameIdx = 0;
+  funcNameIdx = CI->getNumOperands() - 1;
+  StringRef calleeName = CI->getOperand(funcNameIdx)->getName();
+  if (calleeName != "__amdil_image2d_read_norm"
+   && calleeName != "__amdil_image2d_read_unnorm"
+   && calleeName != "__amdil_image3d_read_norm"
+   && calleeName != "__amdil_image3d_read_unnorm") {
+    return false;
+  }
+
+  unsigned samplerIdx = 2;
+  samplerIdx = 1;
+  Value *sampler = CI->getOperand(samplerIdx);
+  LoadInst *lInst = dyn_cast<LoadInst>(sampler);
+  if (!lInst) {
+    return false;
+  }
+
+  if (lInst->getPointerAddressSpace() != AMDILAS::PRIVATE_ADDRESS) {
+    return false;
+  }
+
+  GlobalVariable *gv = dyn_cast<GlobalVariable>(lInst->getPointerOperand());
+  // If we are loading from what is not a global value, then we
+  // fail and return.
+  if (!gv) {
+    return false;
+  }
+
+  // If we don't have an initializer or we have an initializer and
+  // the initializer is not a 32bit integer, we fail.
+  if (!gv->hasInitializer() 
+      || !gv->getInitializer()->getType()->isIntegerTy(32)) {
+      return false;
+  }
+
+  // Now that we have the global variable initializer, lets replace
+  // all uses of the load instruction with the samplerVal and
+  // reparse the __amdil_is_constant() function.
+  Constant *samplerVal = gv->getInitializer();
+  lInst->replaceAllUsesWith(samplerVal);
+  return true;
+}
+
+bool 
+AMDILPeepholeOpt::doInitialization(Module &M) 
+{
+  return false;
+}
+
+bool 
+AMDILPeepholeOpt::doFinalization(Module &M) 
+{
+  return false;
+}
+
+void 
+AMDILPeepholeOpt::getAnalysisUsage(AnalysisUsage &AU) const 
+{
+  AU.addRequired<MachineFunctionAnalysis>();
+  FunctionPass::getAnalysisUsage(AU);
+  AU.setPreservesAll();
+}
+
+size_t AMDILPeepholeOpt::getTypeSize(Type * const T, bool dereferencePtr) {
+  size_t size = 0;
+  if (!T) {
+    return size;
+  }
+  switch (T->getTypeID()) {
+  case Type::X86_FP80TyID:
+  case Type::FP128TyID:
+  case Type::PPC_FP128TyID:
+  case Type::LabelTyID:
+    assert(0 && "These types are not supported by this backend");
+  default:
+  case Type::FloatTyID:
+  case Type::DoubleTyID:
+    size = T->getPrimitiveSizeInBits() >> 3;
+    break;
+  case Type::PointerTyID:
+    size = getTypeSize(dyn_cast<PointerType>(T), dereferencePtr);
+    break;
+  case Type::IntegerTyID:
+    size = getTypeSize(dyn_cast<IntegerType>(T), dereferencePtr);
+    break;
+  case Type::StructTyID:
+    size = getTypeSize(dyn_cast<StructType>(T), dereferencePtr);
+    break;
+  case Type::ArrayTyID:
+    size = getTypeSize(dyn_cast<ArrayType>(T), dereferencePtr);
+    break;
+  case Type::FunctionTyID:
+    size = getTypeSize(dyn_cast<FunctionType>(T), dereferencePtr);
+    break;
+  case Type::VectorTyID:
+    size = getTypeSize(dyn_cast<VectorType>(T), dereferencePtr);
+    break;
+  };
+  return size;
+}
+
+size_t AMDILPeepholeOpt::getTypeSize(StructType * const ST,
+    bool dereferencePtr) {
+  size_t size = 0;
+  if (!ST) {
+    return size;
+  }
+  Type *curType;
+  StructType::element_iterator eib;
+  StructType::element_iterator eie;
+  for (eib = ST->element_begin(), eie = ST->element_end(); eib != eie; ++eib) {
+    curType = *eib;
+    size += getTypeSize(curType, dereferencePtr);
+  }
+  return size;
+}
+
+size_t AMDILPeepholeOpt::getTypeSize(IntegerType * const IT,
+    bool dereferencePtr) {
+  return IT ? (IT->getBitWidth() >> 3) : 0;
+}
+
+size_t AMDILPeepholeOpt::getTypeSize(FunctionType * const FT,
+    bool dereferencePtr) {
+    assert(0 && "Should not be able to calculate the size of an function type");
+    return 0;
+}
+
+size_t AMDILPeepholeOpt::getTypeSize(ArrayType * const AT,
+    bool dereferencePtr) {
+  return (size_t)(AT ? (getTypeSize(AT->getElementType(),
+                                    dereferencePtr) * AT->getNumElements())
+                     : 0);
+}
+
+size_t AMDILPeepholeOpt::getTypeSize(VectorType * const VT,
+    bool dereferencePtr) {
+  return VT ? (VT->getBitWidth() >> 3) : 0;
+}
+
+size_t AMDILPeepholeOpt::getTypeSize(PointerType * const PT,
+    bool dereferencePtr) {
+  if (!PT) {
+    return 0;
+  }
+  Type *CT = PT->getElementType();
+  if (CT->getTypeID() == Type::StructTyID &&
+      PT->getAddressSpace() == AMDILAS::PRIVATE_ADDRESS) {
+    return getTypeSize(dyn_cast<StructType>(CT));
+  } else if (dereferencePtr) {
+    size_t size = 0;
+    for (size_t x = 0, y = PT->getNumContainedTypes(); x < y; ++x) {
+      size += getTypeSize(PT->getContainedType(x), dereferencePtr);
+    }
+    return size;
+  } else {
+    return 4;
+  }
+}
+
+size_t AMDILPeepholeOpt::getTypeSize(OpaqueType * const OT,
+    bool dereferencePtr) {
+  //assert(0 && "Should not be able to calculate the size of an opaque type");
+  return 4;
+}
diff --git a/lib/Target/AMDGPU/AMDILProfiles.td b/lib/Target/AMDGPU/AMDILProfiles.td
new file mode 100644
index 0000000..60435a8
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILProfiles.td
@@ -0,0 +1,174 @@
+//===- AMDILProfiles.td - AMD IL Profiles ------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+// These are used for custom selection dag type profiles
+
+//===----------------------------------------------------------------------===//
+// Custom Selection DAG Type Profiles
+//===----------------------------------------------------------------------===//
+// SDTCisDP - The specified operand has double type
+// Tablegen needs to be hacked to get this constraint to work
+//class SDTCisDP<int OpNum> : SDTypeConstraint<OpNum>;
+
+//===----------------------------------------------------------------------===//
+// Generic Profile Types
+//===----------------------------------------------------------------------===//
+
+def SDTIL_GenUnaryOp : SDTypeProfile<1, 1, [
+    SDTCisSameAs<0, 1>
+    ]>;
+def SDTIL_GenBinaryOp : SDTypeProfile<1, 2, [
+    SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>
+    ]>;
+def SDTIL_GenTernaryOp : SDTypeProfile<1, 3, [
+    SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisSameAs<2, 3>
+    ]>;
+def SDTIL_GenCMovLog : SDTypeProfile<1, 3, [
+    SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisInt<1>
+    ]>;
+def SDTIL_GenVecBuild : SDTypeProfile<1, 1, [
+    SDTCisEltOfVec<1, 0>
+    ]>;
+
+def SDTIL_GenVecExtract : SDTypeProfile<1, 2, [
+    SDTCisEltOfVec<0, 1>, SDTCisVT<2, i32>
+    ]>;
+
+def SDTIL_GenVecInsert : SDTypeProfile<1, 4, [
+    SDTCisEltOfVec<2, 1>, SDTCisSameAs<0, 1>,
+    SDTCisVT<3, i32>, SDTCisVT<4, i32>
+    ]>;
+
+def SDTIL_GenVecShuffle : SDTypeProfile <1, 2, [
+    SDTCisSameAs<0, 1>, SDTCisVT<2, i32>
+    ]>;
+
+def SDTIL_GenVecConcat : SDTypeProfile <1, 2, [
+    SDTCisSameAs<1, 2>
+    ]>;
+//===----------------------------------------------------------------------===//
+// Conversion Profile Types
+//===----------------------------------------------------------------------===//
+def SDTIL_DPToFPOp : SDTypeProfile<1, 1, [
+    SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>
+    ]>; // d2f
+
+def SDTIL_AnyToInt : SDTypeProfile<1, 1, [
+    SDTCisInt<0>
+    ]>;
+def SDTIL_IntToAny : SDTypeProfile<1, 1, [
+    SDTCisInt<1>
+    ]>;
+def SDTIL_GenBitConv : SDTypeProfile<1, 1, []>;
+//===----------------------------------------------------------------------===//
+// Scalar Profile Types
+//===----------------------------------------------------------------------===//
+
+// Add instruction pattern to handle offsets of memory operationns
+def SDTIL_AddAddrri: SDTypeProfile<1, 2, [
+    SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisSameAs<0, 2>
+    ]>;
+def SDTIL_AddAddrir : SDTypeProfile<1, 2, [
+    SDTCisInt<0>, SDTCisPtrTy<2>, SDTCisSameAs<0, 1>
+    ]>;
+
+def SDTIL_LCreate : SDTypeProfile<1, 2, [
+    SDTCisVT<0, i64>, SDTCisVT<1, i32>, SDTCisSameAs<1, 2>
+    ]>;
+def SDTIL_LCreate2 : SDTypeProfile<1, 2, [
+    SDTCisVT<0, v2i64>, SDTCisVT<1, v2i32>, SDTCisSameAs<1, 2>
+    ]>;
+def SDTIL_LComp : SDTypeProfile<1, 1, [
+    SDTCisVT<0, i32>, SDTCisVT<1, i64>
+    ]>;
+def SDTIL_LComp2 : SDTypeProfile<1, 1, [
+    SDTCisVT<0, v2i32>, SDTCisVT<1, v2i64>
+    ]>;
+def SDTIL_DCreate : SDTypeProfile<1, 2, [
+    SDTCisVT<0, f64>, SDTCisVT<1, i32>, SDTCisSameAs<1, 2>
+    ]>;
+def SDTIL_DComp : SDTypeProfile<1, 1, [
+    SDTCisVT<0, i32>, SDTCisVT<1, f64>
+    ]>;
+def SDTIL_DCreate2 : SDTypeProfile<1, 2, [
+    SDTCisVT<0, v2f64>, SDTCisVT<1, v2i32>, SDTCisSameAs<1, 2>
+    ]>;
+def SDTIL_DComp2 : SDTypeProfile<1, 1, [
+    SDTCisVT<0, v2i32>, SDTCisVT<1, v2f64>
+    ]>;
+//===----------------------------------------------------------------------===//
+// Flow Control Profile Types
+//===----------------------------------------------------------------------===//
+// Profile for Normal Call
+def SDTIL_Call : SDTypeProfile<0, 1, [
+    SDTCisVT<0, i32>
+    ]>;
+// Branch instruction where second and third are basic blocks
+def SDTIL_BRCond : SDTypeProfile<0, 2, [
+    SDTCisVT<0, OtherVT>
+    ]>;
+// Comparison instruction
+def SDTIL_Cmp  : SDTypeProfile<1, 3, [
+    SDTCisSameAs<0, 2>, SDTCisSameAs<2,3>, SDTCisVT<1, i32>
+    ]>;
+
+
+//===----------------------------------------------------------------------===//
+// Call Sequence Profiles
+//===----------------------------------------------------------------------===//
+def SDTIL_CallSeqStart  : SDCallSeqStart< [
+    SDTCisVT<0, i32>
+    ]>;
+def SDTIL_CallSeqEnd    : SDCallSeqEnd< [
+    SDTCisVT<0, i32>, SDTCisVT<1, i32>
+    ]>;
+
+//===----------------------------------------------------------------------===//
+// Image Operation Profiles
+//===----------------------------------------------------------------------===//
+def SDTIL_ImageRead  : SDTypeProfile<1, 3, 
+    [SDTCisVT<0, v4i32>, SDTCisPtrTy<1>, SDTCisVT<2, i32>, SDTCisVT<3, v4f32>]>;
+def SDTIL_ImageWrite : SDTypeProfile<0, 3,
+    [SDTCisPtrTy<0>, SDTCisVT<1, v2i32>, SDTCisVT<2, v4i32>]>;
+def SDTIL_ImageWrite3D : SDTypeProfile<0, 3,
+    [SDTCisPtrTy<0>, SDTCisVT<1, v4i32>, SDTCisVT<2, v4i32>]>;
+def SDTIL_ImageInfo  : SDTypeProfile<1, 1,
+    [SDTCisVT<0, v4i32>, SDTCisPtrTy<1>]>;
+//===----------------------------------------------------------------------===//
+// Atomic Operation Profiles
+//===----------------------------------------------------------------------===//
+def SDTIL_UniAtomNoRet : SDTypeProfile<0, 2, [
+    SDTCisPtrTy<0>, SDTCisVT<1, i32>
+    ]>;
+def SDTIL_BinAtomNoRet : SDTypeProfile<0, 3, [
+    SDTCisPtrTy<0>, SDTCisVT<1, i32>, SDTCisVT<2, i32>
+    ]>;
+def SDTIL_TriAtomNoRet : SDTypeProfile<0, 4, [
+    SDTCisPtrTy<0>, SDTCisVT<1, i32>, SDTCisVT<2, i32>, SDTCisVT<3, i32>
+    ]>;
+def SDTIL_UniAtom : SDTypeProfile<1, 2, [
+    SDTCisVT<0, i32>, SDTCisPtrTy<1>, SDTCisVT<2, i32>
+    ]>;
+def SDTIL_BinAtom : SDTypeProfile<1, 3, [
+    SDTCisVT<0, i32>, SDTCisPtrTy<1>, SDTCisVT<2, i32>, SDTCisVT<3, i32>
+    ]>;
+def SDTIL_TriAtom : SDTypeProfile<1, 4, [
+    SDTCisVT<0, i32>, SDTCisPtrTy<1>, SDTCisVT<2, i32>,
+    SDTCisVT<3, i32>, SDTCisVT<4, i32>
+    ]>;
+
+def SDTIL_BinAtomFloat : SDTypeProfile<1, 3, [
+    SDTCisVT<0, i32>, SDTCisPtrTy<1>, SDTCisVT<2, f32>, SDTCisVT<3, f32>
+    ]>;
+def SDTIL_BinAtomNoRetFloat : SDTypeProfile<0, 3, [
+    SDTCisPtrTy<0>, SDTCisVT<1, f32>, SDTCisVT<2, f32>
+    ]>;
+
+def SDTIL_Append : SDTypeProfile<1, 1, [
+    SDTCisVT<0, i32>, SDTCisPtrTy<1>
+    ]>;
diff --git a/lib/Target/AMDGPU/AMDILRegisterInfo.cpp b/lib/Target/AMDGPU/AMDILRegisterInfo.cpp
new file mode 100644
index 0000000..51f6135
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILRegisterInfo.cpp
@@ -0,0 +1,162 @@
+//===- AMDILRegisterInfo.cpp - AMDIL Register Information -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the AMDIL implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDILRegisterInfo.h"
+#include "AMDIL.h"
+#include "AMDILInstrInfo.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+AMDILRegisterInfo::AMDILRegisterInfo(TargetMachine &tm,
+    const TargetInstrInfo &tii)
+: AMDILGenRegisterInfo(0), // RA???
+  TM(tm), TII(tii)
+{
+  baseOffset = 0;
+  nextFuncOffset = 0;
+}
+
+const uint16_t*
+AMDILRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const
+{
+  static const uint16_t CalleeSavedRegs[] = { 0 };
+  // TODO: Does IL need to actually have any callee saved regs?
+  // I don't think we do since we can just use sequential registers
+  // Maybe this would be easier if every function call was inlined first
+  // and then there would be no callee issues to deal with
+  //TODO(getCalleeSavedRegs);
+  return CalleeSavedRegs;
+}
+
+BitVector
+AMDILRegisterInfo::getReservedRegs(const MachineFunction &MF) const
+{
+  BitVector Reserved(getNumRegs());
+  // We reserve the first getNumRegs() registers as they are the ones passed
+  // in live-in/live-out
+  // and therefor cannot be killed by the scheduler. This works around a bug
+  // discovered
+  // that was causing the linearscan register allocator to kill registers
+  // inside of the
+  // function that were also passed as LiveIn registers.
+  for (unsigned int x = 0, y = 256; x < y; ++x) {
+    Reserved.set(x);
+  }
+  return Reserved;
+}
+
+BitVector
+AMDILRegisterInfo::getAllocatableSet(const MachineFunction &MF,
+    const TargetRegisterClass *RC = NULL) const
+{
+  BitVector Allocatable(getNumRegs());
+  Allocatable.clear();
+  return Allocatable;
+}
+
+const TargetRegisterClass* const*
+AMDILRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const
+{
+  static const TargetRegisterClass * const CalleeSavedRegClasses[] = { 0 };
+  // TODO: Keep in sync with getCalleeSavedRegs
+  //TODO(getCalleeSavedRegClasses);
+  return CalleeSavedRegClasses;
+}
+void
+AMDILRegisterInfo::eliminateCallFramePseudoInstr(
+    MachineFunction &MF,
+    MachineBasicBlock &MBB,
+    MachineBasicBlock::iterator I) const
+{
+  MBB.erase(I);
+}
+
+// For each frame index we find, we store the offset in the stack which is
+// being pushed back into the global buffer. The offset into the stack where
+// the value is stored is copied into a new register and the frame index is
+// then replaced with that register.
+void 
+AMDILRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
+    int SPAdj,
+    RegScavenger *RS) const
+{
+  assert(!"Implement");
+}
+
+void
+AMDILRegisterInfo::processFunctionBeforeFrameFinalized(
+    MachineFunction &MF) const
+{
+  //TODO(processFunctionBeforeFrameFinalized);
+  // Here we keep track of the amount of stack that the current function
+  // uses so
+  // that we can set the offset to the end of the stack and any other
+  // function call
+  // will not overwrite any stack variables.
+  // baseOffset = nextFuncOffset;
+  MachineFrameInfo *MFI = MF.getFrameInfo();
+
+  for (uint32_t x = 0, y = MFI->getNumObjects(); x < y; ++x) {
+    int64_t size = MFI->getObjectSize(x);
+    if (!(size % 4) && size > 1) {
+      nextFuncOffset += size;
+    } else {
+      nextFuncOffset += 16;
+    }
+  }
+}
+unsigned int
+AMDILRegisterInfo::getRARegister() const
+{
+  return AMDIL::RA;
+}
+
+unsigned int
+AMDILRegisterInfo::getFrameRegister(const MachineFunction &MF) const
+{
+  return AMDIL::FP;
+}
+
+unsigned int
+AMDILRegisterInfo::getEHExceptionRegister() const
+{
+  assert(0 && "What is the exception register");
+  return 0;
+}
+
+unsigned int
+AMDILRegisterInfo::getEHHandlerRegister() const
+{
+  assert(0 && "What is the exception handler register");
+  return 0;
+}
+
+int64_t
+AMDILRegisterInfo::getStackSize() const
+{
+  return nextFuncOffset - baseOffset;
+}
+
+#define GET_REGINFO_TARGET_DESC
+#include "AMDGPUGenRegisterInfo.inc"
+
diff --git a/lib/Target/AMDGPU/AMDILRegisterInfo.h b/lib/Target/AMDGPU/AMDILRegisterInfo.h
new file mode 100644
index 0000000..33d0bea
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILRegisterInfo.h
@@ -0,0 +1,95 @@
+//===- AMDILRegisterInfo.h - AMDIL Register Information Impl ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This file contains the AMDIL implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDILREGISTERINFO_H_
+#define AMDILREGISTERINFO_H_
+
+#include "llvm/Target/TargetRegisterInfo.h"
+
+#define GET_REGINFO_HEADER
+#include "AMDGPUGenRegisterInfo.inc"
+// See header file for explanation
+
+namespace llvm
+{
+
+  class TargetInstrInfo;
+  class Type;
+
+  /// DWARFFlavour - Flavour of dwarf regnumbers
+  ///
+  namespace DWARFFlavour {
+    enum {
+      AMDIL_Generic = 0
+    };
+  }
+
+  struct AMDILRegisterInfo : public AMDILGenRegisterInfo
+  {
+    TargetMachine &TM;
+    const TargetInstrInfo &TII;
+
+    AMDILRegisterInfo(TargetMachine &tm, const TargetInstrInfo &tii);
+    /// Code Generation virtual methods...
+    const uint16_t * getCalleeSavedRegs(const MachineFunction *MF = 0) const;
+
+    const TargetRegisterClass* const*
+      getCalleeSavedRegClasses(
+          const MachineFunction *MF = 0) const;
+
+    BitVector
+      getReservedRegs(const MachineFunction &MF) const;
+    BitVector
+      getAllocatableSet(const MachineFunction &MF,
+          const TargetRegisterClass *RC) const;
+
+    void
+      eliminateCallFramePseudoInstr(
+          MachineFunction &MF,
+          MachineBasicBlock &MBB,
+          MachineBasicBlock::iterator I) const;
+    void
+      eliminateFrameIndex(MachineBasicBlock::iterator II,
+          int SPAdj, RegScavenger *RS = NULL) const;
+
+    void
+      processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
+
+    // Debug information queries.
+    unsigned int
+      getRARegister() const;
+
+    unsigned int
+      getFrameRegister(const MachineFunction &MF) const;
+
+    // Exception handling queries.
+    unsigned int
+      getEHExceptionRegister() const;
+    unsigned int
+      getEHHandlerRegister() const;
+
+    int64_t
+      getStackSize() const;
+
+    virtual const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT)
+                                                                      const {
+      return &AMDIL::GPRI32RegClass;
+    }
+    private:
+    mutable int64_t baseOffset;
+    mutable int64_t nextFuncOffset;
+  };
+
+} // end namespace llvm
+
+#endif // AMDILREGISTERINFO_H_
diff --git a/lib/Target/AMDGPU/AMDILRegisterInfo.td b/lib/Target/AMDGPU/AMDILRegisterInfo.td
new file mode 100644
index 0000000..94f2fc5
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILRegisterInfo.td
@@ -0,0 +1,110 @@
+//===- AMDILRegisterInfo.td - AMDIL Register defs ----------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+//  Declarations that describe the AMDIL register file
+//
+//===----------------------------------------------------------------------===//
+
+class AMDILReg<bits<16> num, string n> : Register<n> {
+  field bits<16> Value;
+  let Value = num;
+  let Namespace = "AMDIL";
+}
+
+// We will start with 8 registers for each class before expanding to more
+// Since the swizzle is added based on the register class, we can leave it
+// off here and just specify different registers for different register classes
+def R1 : AMDILReg<1, "r1">, DwarfRegNum<[1]>;
+def R2 : AMDILReg<2, "r2">, DwarfRegNum<[2]>;
+def R3 : AMDILReg<3, "r3">, DwarfRegNum<[3]>;
+def R4 : AMDILReg<4, "r4">, DwarfRegNum<[4]>;
+def R5 : AMDILReg<5, "r5">, DwarfRegNum<[5]>;
+def R6 : AMDILReg<6, "r6">, DwarfRegNum<[6]>;
+def R7 : AMDILReg<7, "r7">, DwarfRegNum<[7]>;
+def R8 : AMDILReg<8, "r8">, DwarfRegNum<[8]>;
+def R9 : AMDILReg<9, "r9">, DwarfRegNum<[9]>;
+def R10 : AMDILReg<10, "r10">, DwarfRegNum<[10]>;
+def R11 : AMDILReg<11, "r11">, DwarfRegNum<[11]>;
+def R12 : AMDILReg<12, "r12">, DwarfRegNum<[12]>;
+def R13 : AMDILReg<13, "r13">, DwarfRegNum<[13]>;
+def R14 : AMDILReg<14, "r14">, DwarfRegNum<[14]>;
+def R15 : AMDILReg<15, "r15">, DwarfRegNum<[15]>;
+def R16 : AMDILReg<16, "r16">, DwarfRegNum<[16]>;
+def R17 : AMDILReg<17, "r17">, DwarfRegNum<[17]>;
+def R18 : AMDILReg<18, "r18">, DwarfRegNum<[18]>;
+def R19 : AMDILReg<19, "r19">, DwarfRegNum<[19]>;
+def R20 : AMDILReg<20, "r20">, DwarfRegNum<[20]>;
+
+// All registers between 1000 and 1024 are reserved and cannot be used
+// unless commented in this section
+// r1021-r1025 are used to dynamically calculate the local/group/thread/region/region_local ID's
+// r1020 is used to hold the frame index for local arrays
+// r1019 is used to hold the dynamic stack allocation pointer
+// r1018 is used as a temporary register for handwritten code
+// r1017 is used as a temporary register for handwritten code
+// r1016 is used as a temporary register for load/store code
+// r1015 is used as a temporary register for data segment offset
+// r1014 is used as a temporary register for store code
+// r1013 is used as the section data pointer register
+// r1012-r1010 and r1001-r1008 are used for temporary I/O registers
+// r1009 is used as the frame pointer register
+// r999 is used as the mem register.
+// r998 is used as the return address register.
+//def R1025 : AMDILReg<1025, "r1025">, DwarfRegNum<[1025]>;
+//def R1024 : AMDILReg<1024, "r1024">, DwarfRegNum<[1024]>;
+//def R1023 : AMDILReg<1023, "r1023">, DwarfRegNum<[1023]>;
+//def R1022 : AMDILReg<1022, "r1022">, DwarfRegNum<[1022]>;
+//def R1021 : AMDILReg<1021, "r1021">, DwarfRegNum<[1021]>;
+//def R1020 : AMDILReg<1020, "r1020">, DwarfRegNum<[1020]>;
+def SP : AMDILReg<1019, "r1019">, DwarfRegNum<[1019]>;
+def T1 : AMDILReg<1018, "r1018">, DwarfRegNum<[1018]>;
+def T2 : AMDILReg<1017, "r1017">, DwarfRegNum<[1017]>;
+def T3 : AMDILReg<1016, "r1016">, DwarfRegNum<[1016]>;
+def T4 : AMDILReg<1015, "r1015">, DwarfRegNum<[1015]>;
+def T5 : AMDILReg<1014, "r1014">, DwarfRegNum<[1014]>;
+def SDP : AMDILReg<1013, "r1013">, DwarfRegNum<[1013]>;
+def R1012: AMDILReg<1012, "r1012">, DwarfRegNum<[1012]>;
+def R1011: AMDILReg<1011, "r1011">, DwarfRegNum<[1011]>;
+def R1010: AMDILReg<1010, "r1010">, DwarfRegNum<[1010]>;
+def DFP : AMDILReg<1009, "r1009">, DwarfRegNum<[1009]>;
+def R1008: AMDILReg<1008, "r1008">, DwarfRegNum<[1008]>;
+def R1007: AMDILReg<1007, "r1007">, DwarfRegNum<[1007]>;
+def R1006: AMDILReg<1006, "r1006">, DwarfRegNum<[1006]>;
+def R1005: AMDILReg<1005, "r1005">, DwarfRegNum<[1005]>;
+def R1004: AMDILReg<1004, "r1004">, DwarfRegNum<[1004]>;
+def R1003: AMDILReg<1003, "r1003">, DwarfRegNum<[1003]>;
+def R1002: AMDILReg<1002, "r1002">, DwarfRegNum<[1002]>;
+def R1001: AMDILReg<1001, "r1001">, DwarfRegNum<[1001]>;
+def MEM : AMDILReg<999, "mem">, DwarfRegNum<[999]>;
+def RA : AMDILReg<998, "r998">, DwarfRegNum<[998]>;
+def FP : AMDILReg<997, "r997">, DwarfRegNum<[997]>;
+def GPRI16 : RegisterClass<"AMDIL", [i16], 16,
+  (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)>
+{
+        let AltOrders = [(add (sequence "R%u", 1, 20))];
+        let AltOrderSelect = [{
+          return 1;
+        }];
+    }
+def GPRI32 : RegisterClass<"AMDIL", [i32], 32,
+  (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)>
+{
+        let AltOrders = [(add (sequence "R%u", 1, 20))];
+        let AltOrderSelect = [{
+          return 1;
+        }];
+    }
+def GPRF32 : RegisterClass<"AMDIL", [f32], 32,
+  (add (sequence "R%u", 1, 20), RA, SP, T1, T2, T3, T4, T5, SDP, R1010, R1011, R1001, R1002, R1003, R1004, R1005, R1006, R1007, R1008, MEM, R1012)>
+{
+        let AltOrders = [(add (sequence "R%u", 1, 20))];
+        let AltOrderSelect = [{
+          return 1;
+        }];
+    }
diff --git a/lib/Target/AMDGPU/AMDILSIDevice.cpp b/lib/Target/AMDGPU/AMDILSIDevice.cpp
new file mode 100644
index 0000000..ae402a5
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILSIDevice.cpp
@@ -0,0 +1,49 @@
+//===-- AMDILSIDevice.cpp - Device Info for Southern Islands GPUs ---------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+#include "AMDILSIDevice.h"
+#include "AMDILEvergreenDevice.h"
+#include "AMDILNIDevice.h"
+#include "AMDILSubtarget.h"
+
+using namespace llvm;
+
+AMDILSIDevice::AMDILSIDevice(AMDILSubtarget *ST)
+  : AMDILEvergreenDevice(ST)
+{
+}
+AMDILSIDevice::~AMDILSIDevice()
+{
+}
+
+size_t
+AMDILSIDevice::getMaxLDSSize() const
+{
+  if (usesHardware(AMDILDeviceInfo::LocalMem)) {
+    return MAX_LDS_SIZE_900;
+  } else {
+    return 0;
+  }
+}
+
+uint32_t
+AMDILSIDevice::getGeneration() const
+{
+  return AMDILDeviceInfo::HD7XXX;
+}
+
+std::string
+AMDILSIDevice::getDataLayout() const
+{
+    return std::string("e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16"
+      "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:32:32"
+      "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64"
+      "-v96:128:128-v128:128:128-v192:256:256-v256:256:256"
+      "-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+      "-n8:16:32:64");
+}
diff --git a/lib/Target/AMDGPU/AMDILSIDevice.h b/lib/Target/AMDGPU/AMDILSIDevice.h
new file mode 100644
index 0000000..b272af7
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILSIDevice.h
@@ -0,0 +1,45 @@
+//===------- AMDILSIDevice.h - Define SI Device for AMDIL -*- C++ -*------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// Interface for the subtarget data classes.
+//
+//===---------------------------------------------------------------------===//
+// This file will define the interface that each generation needs to
+// implement in order to correctly answer queries on the capabilities of the
+// specific hardware.
+//===---------------------------------------------------------------------===//
+#ifndef _AMDILSIDEVICE_H_
+#define _AMDILSIDEVICE_H_
+#include "AMDILEvergreenDevice.h"
+#include "AMDILSubtarget.h"
+
+namespace llvm {
+  class AMDILSubtarget;
+//===---------------------------------------------------------------------===//
+// SI generation of devices and their respective sub classes
+//===---------------------------------------------------------------------===//
+
+// The AMDILSIDevice is the base class for all Northern Island series of
+// cards. It is very similiar to the AMDILEvergreenDevice, with the major
+// exception being differences in wavefront size and hardware capabilities.  The
+// SI devices are all 64 wide wavefronts and also add support for signed 24 bit
+// integer operations
+
+  class AMDILSIDevice : public AMDILEvergreenDevice {
+    public:
+      AMDILSIDevice(AMDILSubtarget*);
+      virtual ~AMDILSIDevice();
+      virtual size_t getMaxLDSSize() const;
+      virtual uint32_t getGeneration() const;
+      virtual std::string getDataLayout() const;
+    protected:
+  }; // AMDILSIDevice
+
+} // namespace llvm
+#endif // _AMDILSIDEVICE_H_
diff --git a/lib/Target/AMDGPU/AMDILSubtarget.cpp b/lib/Target/AMDGPU/AMDILSubtarget.cpp
new file mode 100644
index 0000000..723037e
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILSubtarget.cpp
@@ -0,0 +1,178 @@
+//===- AMDILSubtarget.cpp - AMDIL Subtarget Information -------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This file implements the AMD IL specific subclass of TargetSubtarget.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDILSubtarget.h"
+#include "AMDIL.h"
+#include "AMDILDevices.h"
+#include "AMDILUtilityFunctions.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/SubtargetFeature.h"
+
+using namespace llvm;
+
+#define GET_SUBTARGETINFO_ENUM
+#define GET_SUBTARGETINFO_CTOR
+#define GET_SUBTARGETINFO_TARGET_DESC
+#include "AMDGPUGenSubtargetInfo.inc"
+
+AMDILSubtarget::AMDILSubtarget(llvm::StringRef TT, llvm::StringRef CPU, llvm::StringRef FS) : AMDILGenSubtargetInfo( TT, CPU, FS ),
+  mDumpCode(false)
+{
+  memset(CapsOverride, 0, sizeof(*CapsOverride)
+      * AMDILDeviceInfo::MaxNumberCapabilities);
+  // Default card
+  std::string GPU = "rv770";
+  GPU = CPU;
+  mIs64bit = false;
+  mVersion = 0;
+  SmallVector<StringRef, DEFAULT_VEC_SLOTS> Features;
+  SplitString(FS, Features, ",");
+  mDefaultSize[0] = 64;
+  mDefaultSize[1] = 1;
+  mDefaultSize[2] = 1;
+  std::string newFeatures = "";
+#if defined(_DEBUG) || defined(DEBUG)
+  bool useTest = false;
+#endif
+  for (size_t x = 0; x < Features.size(); ++x) {
+    if (Features[x].startswith("+mwgs")) {
+      SmallVector<StringRef, DEFAULT_VEC_SLOTS> sizes;
+      SplitString(Features[x], sizes, "-");
+      size_t mDim = ::atoi(sizes[1].data());
+      if (mDim > 3) {
+        mDim = 3;
+      }
+      for (size_t y = 0; y < mDim; ++y) {
+        mDefaultSize[y] = ::atoi(sizes[y+2].data());
+      }
+#if defined(_DEBUG) || defined(DEBUG)
+    } else if (!Features[x].compare("test")) {
+      useTest = true;
+#endif
+    } else if (Features[x].startswith("+cal")) {
+      SmallVector<StringRef, DEFAULT_VEC_SLOTS> version;
+      SplitString(Features[x], version, "=");
+      mVersion = ::atoi(version[1].data());
+    } else {
+      GPU = CPU;
+      if (x > 0) newFeatures += ',';
+      newFeatures += Features[x];
+    }
+  }
+  // If we don't have a version then set it to
+  // -1 which enables everything. This is for
+  // offline devices.
+  if (!mVersion) {
+    mVersion = (uint32_t)-1;
+  }
+  for (int x = 0; x < 3; ++x) {
+    if (!mDefaultSize[x]) {
+      mDefaultSize[x] = 1;
+    }
+  }
+#if defined(_DEBUG) || defined(DEBUG)
+  if (useTest) {
+    GPU = "kauai";
+  }
+#endif
+  ParseSubtargetFeatures(GPU, newFeatures);
+#if defined(_DEBUG) || defined(DEBUG)
+  if (useTest) {
+    GPU = "test";
+  }
+#endif
+  mDevName = GPU;
+  mDevice = AMDILDeviceInfo::getDeviceFromName(mDevName, this, mIs64bit);
+}
+AMDILSubtarget::~AMDILSubtarget()
+{
+  delete mDevice;
+}
+bool
+AMDILSubtarget::isOverride(AMDILDeviceInfo::Caps caps) const
+{
+  assert(caps < AMDILDeviceInfo::MaxNumberCapabilities &&
+      "Caps index is out of bounds!");
+  return CapsOverride[caps];
+}
+bool
+AMDILSubtarget::is64bit() const 
+{
+  return mIs64bit;
+}
+bool
+AMDILSubtarget::isTargetELF() const
+{
+  return false;
+}
+size_t
+AMDILSubtarget::getDefaultSize(uint32_t dim) const
+{
+  if (dim > 3) {
+    return 1;
+  } else {
+    return mDefaultSize[dim];
+  }
+}
+uint32_t
+AMDILSubtarget::calVersion() const
+{
+  return mVersion;
+}
+
+AMDILGlobalManager*
+AMDILSubtarget::getGlobalManager() const
+{
+  return mGM;
+}
+void
+AMDILSubtarget::setGlobalManager(AMDILGlobalManager *gm) const
+{
+  mGM = gm;
+}
+
+AMDILKernelManager*
+AMDILSubtarget::getKernelManager() const
+{
+  return mKM;
+}
+void
+AMDILSubtarget::setKernelManager(AMDILKernelManager *km) const
+{
+  mKM = km;
+}
+std::string
+AMDILSubtarget::getDataLayout() const
+{
+    if (!mDevice) {
+        return std::string("e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16"
+                "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:32:32"
+                "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64"
+                "-v96:128:128-v128:128:128-v192:256:256-v256:256:256"
+                "-v512:512:512-v1024:1024:1024-v2048:2048:2048-a0:0:64");
+    }
+    return mDevice->getDataLayout();
+}
+
+std::string
+AMDILSubtarget::getDeviceName() const
+{
+  return mDevName;
+}
+const AMDILDevice *
+AMDILSubtarget::device() const
+{
+  return mDevice;
+}
diff --git a/lib/Target/AMDGPU/AMDILSubtarget.h b/lib/Target/AMDGPU/AMDILSubtarget.h
new file mode 100644
index 0000000..e3d8c81
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILSubtarget.h
@@ -0,0 +1,77 @@
+//=====-- AMDILSubtarget.h - Define Subtarget for the AMDIL ----*- C++ -*-====//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This file declares the AMDIL specific subclass of TargetSubtarget.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _AMDILSUBTARGET_H_
+#define _AMDILSUBTARGET_H_
+
+#include "AMDILDevice.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+
+#include <cstdlib>
+#include <string>
+
+#define GET_SUBTARGETINFO_HEADER
+#include "AMDGPUGenSubtargetInfo.inc"
+
+#define MAX_CB_SIZE (1 << 16)
+namespace llvm {
+  class Module;
+  class AMDILKernelManager;
+  class AMDILGlobalManager;
+  class AMDILDevice;
+  class AMDILSubtarget : public AMDILGenSubtargetInfo {
+    private:
+      bool CapsOverride[AMDILDeviceInfo::MaxNumberCapabilities];
+      mutable AMDILGlobalManager *mGM;
+      mutable AMDILKernelManager *mKM;
+      const AMDILDevice *mDevice;
+      size_t mDefaultSize[3];
+      size_t mMinimumSize[3];
+      std::string mDevName;
+      uint32_t mVersion;
+      bool mIs64bit;
+      bool mIs32on64bit;
+      bool mDumpCode;
+    public:
+      AMDILSubtarget(llvm::StringRef TT, llvm::StringRef CPU, llvm::StringRef FS);
+      virtual ~AMDILSubtarget();
+      bool isOverride(AMDILDeviceInfo::Caps) const;
+      bool is64bit() const;
+
+      // Helper functions to simplify if statements
+      bool isTargetELF() const;
+      AMDILGlobalManager* getGlobalManager() const;
+      void setGlobalManager(AMDILGlobalManager *gm) const;
+      AMDILKernelManager* getKernelManager() const;
+      void setKernelManager(AMDILKernelManager *gm) const;
+      const AMDILDevice* device() const;
+      std::string getDataLayout() const;
+      std::string getDeviceName() const;
+      virtual size_t getDefaultSize(uint32_t dim) const;
+      // Return the version of CAL that the backend should target.
+      uint32_t calVersion() const;
+      // ParseSubtargetFeatures - Parses features string setting specified
+      // subtarget options.  Definition of function is
+      //auto generated by tblgen.
+      void
+        ParseSubtargetFeatures(
+            llvm::StringRef CPU,
+            llvm::StringRef FS);
+      bool dumpCode() const { return mDumpCode; }
+
+  };
+
+} // end namespace llvm
+
+#endif // AMDILSUBTARGET_H_
diff --git a/lib/Target/AMDGPU/AMDILTokenDesc.td b/lib/Target/AMDGPU/AMDILTokenDesc.td
new file mode 100644
index 0000000..2dafb2c
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILTokenDesc.td
@@ -0,0 +1,120 @@
+//===-- AMDILTokenDesc.td - AMDIL Token Definitions --*- tablegen -*-----===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===--------------------------------------------------------------------===//
+
+include "AMDILEnumeratedTypes.td"
+
+// Each token is 32 bits as specified in section 2.1 of the IL spec
+class ILToken <bits<32> n> {
+    field bits<32> _bits = n;
+}
+
+// Section 2.2.1 - IL Language Token
+class ILLang<bits<8> client_type> : ILToken<0> {
+    let _bits{0-7} = client_type;
+}
+
+// Section 2.2.2 - IL Version Token
+class ILVersion<bits<8> minor_version, bits<8> major_version, ILShader shader_type>  : ILToken<0> {
+    let _bits{0-7} = minor_version;
+    let _bits{8-15} = major_version;
+    let _bits{16-23} = shader_type.Value;
+}
+
+// Section 2.2.3 - IL Opcode Token
+class ILOpcode<ILOpCode opcode, bits<14> control, bit sec_mod_pre, bit pri_mod_pre> : ILToken<0> {
+    let _bits{0-15} = opcode.Value;
+    let _bits{16-29} = control;
+    let _bits{30} = sec_mod_pre;
+    let _bits{31} = pri_mod_pre;
+}
+
+// Section 2.2.4 - IL Destination Token
+class ILDst<AMDILReg register_num, ILRegType register_type, bit mod_pre, bits<2> relative_address, bit dimension, bit immediate_pre, bit extended> : ILToken<0> {
+    let _bits{0-15} = register_num.Value;
+    let _bits{16-21} = register_type.Value;
+    let _bits{22} = mod_pre;
+    let _bits{23-24} = relative_address;
+    let _bits{25} = dimension;
+    let _bits{26} = immediate_pre;
+    let _bits{31} = extended;
+}
+
+// Section 2.2.5 - IL Destination Modifier Token
+class ILDstMod<ILModDstComp x, ILModDstComp y, ILModDstComp z, ILModDstComp w, bit clamp, ILShiftScale shift_scale> : ILToken<0> {
+    let _bits{0-1} = x.Value;
+    let _bits{2-3} = y.Value;
+    let _bits{4-5} = z.Value;
+    let _bits{6-7} = w.Value;
+    let _bits{8} = clamp;
+    //let _bits{9-12} = shift_scale;
+}
+
+// Section 2.2.6 - IL Source Token
+class ILSrc<AMDILReg register_num, ILRegType register_type, bit mod_pre, bits<2> relative_address, bit dimension, bit immediate_pre, bit extended> : ILToken<0> {
+    let _bits{0-15} = register_num.Value;
+    let _bits{16-21} = register_type.Value;
+    let _bits{22} = mod_pre;
+    let _bits{23-24} = relative_address;
+    let _bits{25} = dimension;
+    let _bits{26} = immediate_pre;
+    let _bits{31} = extended;
+}
+
+// Section 2.2.7 - IL Source Modifier Token
+class ILSrcMod<ILComponentSelect swizzle_x, bit negate_x, ILComponentSelect swizzle_y, bit negate_y,
+               ILComponentSelect swizzle_z, bit negate_z, ILComponentSelect swizzle_w, bit negate_w,
+               bit invert, bit bias, bit x2, bit sign, bit abs, ILDivComp divComp,
+               bits<8> clamp> : ILToken<0> {
+    let _bits{0-2} = swizzle_x.Value;
+    let _bits{3} = negate_x;
+    let _bits{4-6} = swizzle_y.Value;
+    let _bits{7} = negate_y;
+    let _bits{8-10} = swizzle_z.Value;
+    let _bits{11} = negate_z;
+    let _bits{12-14} = swizzle_w.Value;
+    let _bits{15} = negate_w;
+    let _bits{16} = invert;
+    let _bits{17} = bias;
+    let _bits{18} = x2;
+    let _bits{19} = sign;
+    let _bits{20} = abs;
+    let _bits{21-23} = divComp.Value;
+    let _bits{24-31} = clamp;
+}
+
+// Section 2.2.8 - IL Relative Address Token
+class ILRelAddr<AMDILReg address_register, bit loop_relative, ILAddressing component> : ILToken<0> {
+    let _bits{0-15} = address_register.Value;
+    let _bits{16} = loop_relative;
+    let _bits{17-19} = component.Value;
+}
+
+// IL Literal Token
+class ILLiteral<bits<32> val> : ILToken<0> {
+    let _bits = val;
+}
+
+// All tokens required for a destination register
+class ILDstReg<ILDst Reg, ILDstMod Mod, ILRelAddr Rel, ILSrc Reg_Rel, ILSrcMod Reg_Rel_Mod> {
+    ILDst       reg = Reg;
+    ILDstMod    mod = Mod;
+    ILRelAddr   rel = Rel;
+    ILSrc       reg_rel = Reg_Rel;
+    ILSrcMod    reg_rel_mod = Reg_Rel_Mod;
+}
+
+// All tokens required for a source register
+class ILSrcReg<ILSrc Reg, ILSrcMod Mod, ILRelAddr Rel, ILSrc Reg_Rel, ILSrcMod Reg_Rel_Mod> {
+    ILSrc       reg = Reg;
+    ILSrcMod    mod = Mod;
+    ILRelAddr   rel = Rel;
+    ILSrc       reg_rel = Reg_Rel;
+    ILSrcMod    reg_rel_mod = Reg_Rel_Mod;
+}
+
diff --git a/lib/Target/AMDGPU/AMDILUtilityFunctions.h b/lib/Target/AMDGPU/AMDILUtilityFunctions.h
new file mode 100644
index 0000000..419da39
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILUtilityFunctions.h
@@ -0,0 +1,75 @@
+//===-- AMDILUtilityFunctions.h - AMDIL Utility Functions Header --------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+// This file provides helper macros for expanding case statements.
+//
+//===----------------------------------------------------------------------===//
+#ifndef AMDILUTILITYFUNCTIONS_H_
+#define AMDILUTILITYFUNCTIONS_H_
+
+// Macros that are used to help with switch statements for various data types
+// However, these macro's do not return anything unlike the second set below.
+#define ExpandCaseTo32bitIntTypes(Instr)  \
+case Instr##_i32:
+
+#define ExpandCaseTo32bitIntTruncTypes(Instr)  \
+case Instr##_i32i8: \
+case Instr##_i32i16: 
+
+#define ExpandCaseToIntTypes(Instr) \
+    ExpandCaseTo32bitIntTypes(Instr)
+
+#define ExpandCaseToIntTruncTypes(Instr) \
+    ExpandCaseTo32bitIntTruncTypes(Instr)
+
+#define ExpandCaseToFloatTypes(Instr) \
+    case Instr##_f32:
+
+#define ExpandCaseTo32bitScalarTypes(Instr) \
+    ExpandCaseTo32bitIntTypes(Instr) \
+case Instr##_f32:
+
+#define ExpandCaseToAllScalarTypes(Instr) \
+    ExpandCaseToFloatTypes(Instr) \
+ExpandCaseToIntTypes(Instr)
+
+#define ExpandCaseToAllScalarTruncTypes(Instr) \
+    ExpandCaseToFloatTruncTypes(Instr) \
+ExpandCaseToIntTruncTypes(Instr)
+
+#define ExpandCaseToAllTypes(Instr) \
+ExpandCaseToAllScalarTypes(Instr)
+
+#define ExpandCaseToAllTruncTypes(Instr) \
+ExpandCaseToAllScalarTruncTypes(Instr)
+
+// Macros that expand into  statements with return values
+#define ExpandCaseTo32bitIntReturn(Instr, Return)  \
+case Instr##_i32: return Return##_i32;
+
+#define ExpandCaseToIntReturn(Instr, Return) \
+    ExpandCaseTo32bitIntReturn(Instr, Return)
+
+#define ExpandCaseToFloatReturn(Instr, Return) \
+    case Instr##_f32: return Return##_f32;\
+
+#define ExpandCaseToAllScalarReturn(Instr, Return) \
+    ExpandCaseToFloatReturn(Instr, Return) \
+ExpandCaseToIntReturn(Instr, Return)
+
+// These macros expand to common groupings of RegClass ID's
+#define ExpandCaseTo1CompRegID \
+case AMDIL::GPRI32RegClassID: \
+case AMDIL::GPRF32RegClassID:
+
+#define ExpandCaseTo32BitType(Instr) \
+case Instr##_i32: \
+case Instr##_f32:
+
+#endif // AMDILUTILITYFUNCTIONS_H_
diff --git a/lib/Target/AMDGPU/AMDILVersion.td b/lib/Target/AMDGPU/AMDILVersion.td
new file mode 100644
index 0000000..158ae9e
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDILVersion.td
@@ -0,0 +1,58 @@
+//===-- AMDILVersion.td - Barrier Instruction/Intrinsic definitions------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===--------------------------------------------------------------------===//
+// Intrinsic operation support
+//===--------------------------------------------------------------------===//
+let TargetPrefix = "AMDIL", isTarget = 1 in {
+def int_AMDIL_barrier   : GCCBuiltin<"barrier">,
+        BinaryIntNoRetInt;
+def int_AMDIL_barrier_global   : GCCBuiltin<"barrierGlobal">,
+      BinaryIntNoRetInt;
+def int_AMDIL_barrier_local   : GCCBuiltin<"barrierLocal">,
+      BinaryIntNoRetInt;
+def int_AMDIL_barrier_region   : GCCBuiltin<"barrierRegion">,
+      BinaryIntNoRetInt;
+def int_AMDIL_get_region_id : GCCBuiltin<"__amdil_get_region_id_int">,
+    Intrinsic<[llvm_v4i32_ty], [], []>;
+def int_AMDIL_get_region_local_id : GCCBuiltin<"__amdil_get_region_local_id_int">,
+    Intrinsic<[llvm_v4i32_ty], [], []>;
+def int_AMDIL_get_num_regions : GCCBuiltin<"__amdil_get_num_regions_int">,
+    Intrinsic<[llvm_v4i32_ty], [], []>;
+def int_AMDIL_get_region_size : GCCBuiltin<"__amdil_get_region_size_int">,
+    Intrinsic<[llvm_v4i32_ty], [], []>;
+}
+
+let isCall=1, isNotDuplicable=1 in {
+  let Predicates=[hasRegionAS] in {
+def BARRIER_EGNI : BinaryOpNoRet<IL_OP_BARRIER, (outs),
+      (ins GPRI32:$flag, GPRI32:$id),
+      "fence_threads_memory_lds_gds_gws",
+      [(int_AMDIL_barrier GPRI32:$flag, GPRI32:$id)]>;
+}
+let Predicates=[noRegionAS] in {
+def BARRIER_7XX : BinaryOpNoRet<IL_OP_BARRIER, (outs),
+      (ins GPRI32:$flag, GPRI32:$id),
+      "fence_threads_memory_lds",
+      [(int_AMDIL_barrier GPRI32:$flag, GPRI32:$id)]>;
+}
+
+def BARRIER_LOCAL : BinaryOpNoRet<IL_OP_BARRIER_LOCAL, (outs),
+      (ins GPRI32:$flag, GPRI32:$id),
+      "fence_threads_lds",
+      [(int_AMDIL_barrier_local GPRI32:$flag, GPRI32:$id)]>;
+
+def BARRIER_GLOBAL : BinaryOpNoRet<IL_OP_BARRIER_GLOBAL, (outs),
+      (ins GPRI32:$flag, GPRI32:$id),
+      "fence_threads_memory",
+      [(int_AMDIL_barrier_global GPRI32:$flag, GPRI32:$id)]>;
+
+def BARRIER_REGION : BinaryOpNoRet<IL_OP_BARRIER_REGION, (outs),
+    (ins GPRI32:$flag, GPRI32:$id),
+    "fence_threads_gds",
+    [(int_AMDIL_barrier_region GPRI32:$flag, GPRI32:$id)]>;
+}
diff --git a/lib/Target/AMDGPU/CMakeLists.txt b/lib/Target/AMDGPU/CMakeLists.txt
new file mode 100644
index 0000000..0615dd9
--- /dev/null
+++ b/lib/Target/AMDGPU/CMakeLists.txt
@@ -0,0 +1,51 @@
+set(LLVM_TARGET_DEFINITIONS AMDGPU.td)
+
+tablegen(LLVM AMDGPUGenRegisterInfo.inc -gen-register-info)
+tablegen(LLVM AMDGPUGenInstrInfo.inc -gen-instr-info)
+tablegen(LLVM AMDGPUGenAsmWriter.inc -gen-asm-writer)
+tablegen(LLVM AMDGPUGenDAGISel.inc -gen-dag-isel)
+tablegen(LLVM AMDGPUGenCallingConv.inc -gen-callingconv)
+tablegen(LLVM AMDGPUGenSubtargetInfo.inc -gen-subtarget)
+tablegen(LLVM AMDGPUGenEDInfo.inc -gen-enhanced-disassembly-info)
+tablegen(LLVM AMDGPUGenIntrinsics.inc -gen-tgt-intrinsic)
+tablegen(LLVM AMDGPUGenCodeEmitter.inc -gen-emitter)
+add_public_tablegen_target(AMDGPUCommonTableGen)
+
+add_llvm_target(AMDGPUCodeGen
+  AMDIL7XXDevice.cpp
+  AMDILCFGStructurizer.cpp
+  AMDILDevice.cpp
+  AMDILDeviceInfo.cpp
+  AMDILEvergreenDevice.cpp
+  AMDILFrameLowering.cpp
+  AMDILInstrInfo.cpp
+  AMDILIntrinsicInfo.cpp
+  AMDILISelDAGToDAG.cpp
+  AMDILISelLowering.cpp
+  AMDILNIDevice.cpp
+  AMDILPeepholeOptimizer.cpp
+  AMDILRegisterInfo.cpp
+  AMDILSIDevice.cpp
+  AMDILSubtarget.cpp
+  AMDGPUTargetMachine.cpp
+  AMDGPUISelLowering.cpp
+  AMDGPUConvertToISA.cpp
+  AMDGPUInstrInfo.cpp
+  AMDGPURegisterInfo.cpp
+  AMDGPUUtil.cpp
+  R600CodeEmitter.cpp
+  R600InstrInfo.cpp
+  R600ISelLowering.cpp
+  R600KernelParameters.cpp
+  R600MachineFunctionInfo.cpp
+  R600RegisterInfo.cpp
+  SIAssignInterpRegs.cpp
+  SICodeEmitter.cpp
+  SIInstrInfo.cpp
+  SIISelLowering.cpp
+  SIMachineFunctionInfo.cpp
+  SIRegisterInfo.cpp
+  )
+
+add_subdirectory(TargetInfo)
+add_subdirectory(MCTargetDesc)
diff --git a/lib/Target/AMDGPU/LLVMBuild.txt b/lib/Target/AMDGPU/LLVMBuild.txt
new file mode 100644
index 0000000..55afac5
--- /dev/null
+++ b/lib/Target/AMDGPU/LLVMBuild.txt
@@ -0,0 +1,32 @@
+;===- ./lib/Target/AMDIL/LLVMBuild.txt -------------------------*- Conf -*--===;
+;
+;                     The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+;   http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[common]
+subdirectories = MCTargetDesc TargetInfo
+
+[component_0]
+type = TargetGroup
+name = AMDGPU
+parent = Target
+has_asmprinter = 0
+
+[component_1]
+type = Library
+name = AMDGPUCodeGen
+parent = AMDGPU
+required_libraries = AsmPrinter CodeGen Core SelectionDAG Support Target MC AMDGPUInfo AMDGPUDesc
+add_to_library_groups = AMDGPU
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
new file mode 100644
index 0000000..f9d583c
--- /dev/null
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
@@ -0,0 +1,105 @@
+//===-- MCTargetDesc/AMDGPUMCAsmInfo.cpp - TODO: Add brief description -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// TODO: Add full description
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPUMCAsmInfo.h"
+#ifndef NULL
+#define NULL 0
+#endif
+
+using namespace llvm;
+AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo()
+{
+  //===------------------------------------------------------------------===//
+  HasSubsectionsViaSymbols = true;
+  HasMachoZeroFillDirective = false;
+  HasMachoTBSSDirective = false;
+  HasStaticCtorDtorReferenceInStaticMode = false;
+  LinkerRequiresNonEmptyDwarfLines = true;
+  MaxInstLength = 16;
+  PCSymbol = "$";
+  SeparatorString = "\n";
+  CommentColumn = 40;
+  CommentString = ";";
+  LabelSuffix = ":";
+  GlobalPrefix = "@";
+  PrivateGlobalPrefix = ";.";
+  LinkerPrivateGlobalPrefix = "!";
+  InlineAsmStart = ";#ASMSTART";
+  InlineAsmEnd = ";#ASMEND";
+  AssemblerDialect = 0;
+  AllowQuotesInName = false;
+  AllowNameToStartWithDigit = false;
+  AllowPeriodsInName = false;
+
+  //===--- Data Emission Directives -------------------------------------===//
+  ZeroDirective = ".zero";
+  AsciiDirective = ".ascii\t";
+  AscizDirective = ".asciz\t";
+  Data8bitsDirective = ".byte\t";
+  Data16bitsDirective = ".short\t";
+  Data32bitsDirective = ".long\t";
+  Data64bitsDirective = ".quad\t";
+  GPRel32Directive = NULL;
+  SunStyleELFSectionSwitchSyntax = true;
+  UsesELFSectionDirectiveForBSS = true;
+  HasMicrosoftFastStdCallMangling = false;
+
+  //===--- Alignment Information ----------------------------------------===//
+  AlignDirective = ".align\t";
+  AlignmentIsInBytes = true;
+  TextAlignFillValue = 0;
+
+  //===--- Global Variable Emission Directives --------------------------===//
+  GlobalDirective = ".global";
+  ExternDirective = ".extern";
+  HasSetDirective = false;
+  HasAggressiveSymbolFolding = true;
+  LCOMMDirectiveType = LCOMM::None;
+  COMMDirectiveAlignmentIsInBytes = false;
+  HasDotTypeDotSizeDirective = false;
+  HasSingleParameterDotFile = true;
+  HasNoDeadStrip = true;
+  HasSymbolResolver = false;
+  WeakRefDirective = ".weakref\t";
+  WeakDefDirective = ".weakdef\t";
+  LinkOnceDirective = NULL;
+  HiddenVisibilityAttr = MCSA_Hidden;
+  HiddenDeclarationVisibilityAttr = MCSA_Hidden;
+  ProtectedVisibilityAttr = MCSA_Protected;
+
+  //===--- Dwarf Emission Directives -----------------------------------===//
+  HasLEB128 = true;
+  SupportsDebugInformation = true;
+  ExceptionsType = ExceptionHandling::None;
+  DwarfUsesInlineInfoSection = false;
+  DwarfSectionOffsetDirective = ".offset";
+  DwarfUsesLabelOffsetForRanges = true;
+
+}
+const char*
+AMDGPUMCAsmInfo::getDataASDirective(unsigned int Size, unsigned int AS) const
+{
+  switch (AS) {
+    default:
+      return NULL;
+    case 0:
+      return NULL;
+  };
+  return NULL;
+}
+
+const MCSection*
+AMDGPUMCAsmInfo::getNonexecutableStackSection(MCContext &CTX) const
+{
+  return NULL;
+}
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h
new file mode 100644
index 0000000..0ca264b
--- /dev/null
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h
@@ -0,0 +1,30 @@
+//===-- MCTargetDesc/AMDGPUMCAsmInfo.h - TODO: Add brief description -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// TODO: Add full description
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDGPUMCASMINFO_H_
+#define AMDGPUMCASMINFO_H_
+
+#include "llvm/MC/MCAsmInfo.h"
+namespace llvm {
+  class Target;
+  class StringRef;
+
+  class AMDGPUMCAsmInfo : public MCAsmInfo {
+    public:
+      explicit AMDGPUMCAsmInfo(const Target &T, StringRef &TT);
+      const char*
+        getDataASDirective(unsigned int Size, unsigned int AS) const;
+      const MCSection* getNonexecutableStackSection(MCContext &CTX) const;
+  };
+} // namespace llvm
+#endif // AMDGPUMCASMINFO_H_
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
new file mode 100644
index 0000000..5c6d13c
--- /dev/null
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
@@ -0,0 +1,61 @@
+#include "AMDGPUMCTargetDesc.h"
+#include "AMDGPUMCAsmInfo.h"
+#include "llvm/MC/MachineLocation.h"
+#include "llvm/MC/MCCodeGenInfo.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+
+#define GET_INSTRINFO_MC_DESC
+#include "AMDGPUGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_MC_DESC
+#include "AMDGPUGenSubtargetInfo.inc"
+
+#define GET_REGINFO_MC_DESC
+#include "AMDGPUGenRegisterInfo.inc"
+
+using namespace llvm;
+
+static MCInstrInfo *createAMDGPUMCInstrInfo() {
+  MCInstrInfo *X = new MCInstrInfo();
+  InitAMDILMCInstrInfo(X);
+  return X;
+}
+
+static MCRegisterInfo *createAMDGPUMCRegisterInfo(StringRef TT) {
+  MCRegisterInfo *X = new MCRegisterInfo();
+  InitAMDILMCRegisterInfo(X, 0);
+  return X;
+}
+
+static MCSubtargetInfo *createAMDGPUMCSubtargetInfo(StringRef TT, StringRef CPU,
+                                                   StringRef FS) {
+  MCSubtargetInfo * X = new MCSubtargetInfo();
+  InitAMDILMCSubtargetInfo(X, TT, CPU, FS);
+  return X;
+}
+
+static MCCodeGenInfo *createAMDGPUMCCodeGenInfo(StringRef TT, Reloc::Model RM,
+                                               CodeModel::Model CM,
+                                               CodeGenOpt::Level OL) {
+  MCCodeGenInfo *X = new MCCodeGenInfo();
+  X->InitMCCodeGenInfo(RM, CM, OL);
+  return X;
+}
+
+extern "C" void LLVMInitializeAMDGPUTargetMC() {
+
+  RegisterMCAsmInfo<AMDGPUMCAsmInfo> Y(TheAMDGPUTarget);
+
+  TargetRegistry::RegisterMCCodeGenInfo(TheAMDGPUTarget, createAMDGPUMCCodeGenInfo);
+
+  TargetRegistry::RegisterMCInstrInfo(TheAMDGPUTarget, createAMDGPUMCInstrInfo);
+
+  TargetRegistry::RegisterMCRegInfo(TheAMDGPUTarget, createAMDGPUMCRegisterInfo);
+
+  TargetRegistry::RegisterMCSubtargetInfo(TheAMDGPUTarget, createAMDGPUMCSubtargetInfo);
+
+}
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h
new file mode 100644
index 0000000..ed858b3
--- /dev/null
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h
@@ -0,0 +1,35 @@
+//===-- AMDGPUMCTargetDesc.h - AMDGPU Target Descriptions -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides AMDGPU specific target descriptions.
+//
+//===----------------------------------------------------------------------===//
+//
+
+#ifndef AMDGPUMCTARGETDESC_H
+#define AMDGPUMCTARGETDESC_H
+
+namespace llvm {
+class MCSubtargetInfo;
+class Target;
+
+extern Target TheAMDGPUTarget;
+
+} // End llvm namespace
+
+#define GET_REGINFO_ENUM
+#include "AMDGPUGenRegisterInfo.inc"
+
+#define GET_INSTRINFO_ENUM
+#include "AMDGPUGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_ENUM
+#include "AMDGPUGenSubtargetInfo.inc"
+
+#endif // AMDGPUMCTARGETDESC_H
diff --git a/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt b/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt
new file mode 100644
index 0000000..2c0d5af
--- /dev/null
+++ b/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt
@@ -0,0 +1,7 @@
+
+add_llvm_library(LLVMAMDGPUDesc
+  AMDGPUMCTargetDesc.cpp
+  AMDGPUMCAsmInfo.cpp
+  )
+
+add_dependencies(LLVMAMDGPUDesc AMDGPUCommonTableGen)
diff --git a/lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt b/lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt
new file mode 100644
index 0000000..c7745d6
--- /dev/null
+++ b/lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt
@@ -0,0 +1,23 @@
+;===- ./lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt ------------*- Conf -*--===;
+;
+;                     The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+;   http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = AMDGPUDesc
+parent = AMDGPU
+required_libraries = AMDGPUInfo MC
+add_to_library_groups = AMDGPU
diff --git a/lib/Target/AMDGPU/MCTargetDesc/Makefile b/lib/Target/AMDGPU/MCTargetDesc/Makefile
new file mode 100644
index 0000000..5ad6866
--- /dev/null
+++ b/lib/Target/AMDGPU/MCTargetDesc/Makefile
@@ -0,0 +1,16 @@
+##===- lib/Target/AMDGPU/TargetDesc/Makefile ----------------*- Makefile -*-===##
+#
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME = LLVMAMDGPUDesc
+
+# Hack: we need to include 'main' target directory to grab private headers
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AMDGPU/Makefile b/lib/Target/AMDGPU/Makefile
new file mode 100644
index 0000000..75d2cc4
--- /dev/null
+++ b/lib/Target/AMDGPU/Makefile
@@ -0,0 +1,23 @@
+##===- lib/Target/AMDGPU/Makefile ---------------------------*- Makefile -*-===##
+#
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../..
+LIBRARYNAME = LLVMAMDGPUCodeGen
+TARGET = AMDGPU
+
+# Make sure that tblgen is run, first thing.
+BUILT_SOURCES = AMDGPUGenRegisterInfo.inc AMDGPUGenInstrInfo.inc \
+		AMDGPUGenAsmWriter.inc AMDGPUGenDAGISel.inc \
+		AMDGPUGenSubtargetInfo.inc AMDGPUGenCodeEmitter.inc \
+		AMDGPUGenCallingConv.inc AMDGPUGenEDInfo.inc \
+		AMDGPUGenIntrinsics.inc
+
+DIRS = TargetInfo MCTargetDesc
+
+include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AMDGPU/Processors.td b/lib/Target/AMDGPU/Processors.td
new file mode 100644
index 0000000..6d1b411
--- /dev/null
+++ b/lib/Target/AMDGPU/Processors.td
@@ -0,0 +1,28 @@
+//===-- Processors.td - TODO: Add brief description -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// AMDIL processors supported.
+//
+//===----------------------------------------------------------------------===//
+
+class Proc<string Name, ProcessorItineraries itin, list<SubtargetFeature> Features> 
+: Processor<Name, itin, Features>;
+def : Proc<"rv710",      R600_EG_Itin, []>;
+def : Proc<"rv730",      R600_EG_Itin, []>;
+def : Proc<"rv770",      R600_EG_Itin, [FeatureFP64]>;
+def : Proc<"cedar",      R600_EG_Itin, [FeatureByteAddress, FeatureImages]>;
+def : Proc<"redwood",    R600_EG_Itin, [FeatureByteAddress, FeatureImages]>;
+def : Proc<"juniper",    R600_EG_Itin, [FeatureByteAddress, FeatureImages]>;
+def : Proc<"cypress",    R600_EG_Itin, [FeatureByteAddress, FeatureImages, FeatureFP64]>;
+def : Proc<"barts",      R600_EG_Itin, [FeatureByteAddress, FeatureImages]>;
+def : Proc<"turks",      R600_EG_Itin, [FeatureByteAddress, FeatureImages]>;
+def : Proc<"caicos",     R600_EG_Itin, [FeatureByteAddress, FeatureImages]>;
+def : Proc<"cayman",     R600_EG_Itin, [FeatureByteAddress, FeatureImages, FeatureFP64]>;
+def : Proc<"SI", SI_Itin, []>;
+
diff --git a/lib/Target/AMDGPU/R600CodeEmitter.cpp b/lib/Target/AMDGPU/R600CodeEmitter.cpp
new file mode 100644
index 0000000..176549f
--- /dev/null
+++ b/lib/Target/AMDGPU/R600CodeEmitter.cpp
@@ -0,0 +1,660 @@
+//===-- R600CodeEmitter.cpp - Code Emitter for R600->Cayman GPU families --===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code emitters outputs bytecode that is understood by the r600g driver
+// in the Mesa [1] project.  The bytecode is very similar to the hardware's ISA,
+// except that the size of the instruction fields are rounded up to the
+// nearest byte.
+//
+// [1] http://www.mesa3d.org/
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "AMDGPUUtil.h"
+#include "AMDILCodeEmitter.h"
+#include "AMDILInstrInfo.h"
+#include "AMDILUtilityFunctions.h"
+#include "R600InstrInfo.h"
+#include "R600RegisterInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include <stdio.h>
+
+#define SRC_BYTE_COUNT 11
+#define DST_BYTE_COUNT 5
+
+using namespace llvm;
+
+namespace {
+
+class R600CodeEmitter : public MachineFunctionPass, public AMDILCodeEmitter {
+
+private:
+
+  static char ID;
+  formatted_raw_ostream &_OS;
+  const TargetMachine * TM;
+  const MachineRegisterInfo * MRI;
+  const R600RegisterInfo * TRI;
+
+  bool isCube;
+  bool isReduction;
+  bool isVector;
+  unsigned currentElement;
+  bool isLast;
+
+  unsigned section_start;
+
+public:
+
+  R600CodeEmitter(formatted_raw_ostream &OS) : MachineFunctionPass(ID),
+      _OS(OS), TM(NULL), isCube(false), isReduction(false), isVector(false),
+      isLast(true) { }
+
+  const char *getPassName() const { return "AMDGPU Machine Code Emitter"; }
+
+  bool runOnMachineFunction(MachineFunction &MF);
+  virtual uint64_t getMachineOpValue(const MachineInstr &MI,
+                                     const MachineOperand &MO) const;
+
+private:
+
+  void emitALUInstr(MachineInstr  &MI);
+  void emitSrc(const MachineOperand & MO, int chan_override  = -1);
+  void emitDst(const MachineOperand & MO);
+  void emitALU(MachineInstr &MI, unsigned numSrc);
+  void emitTexInstr(MachineInstr &MI);
+  void emitFCInstr(MachineInstr &MI);
+
+  void emitNullBytes(unsigned int byteCount);
+
+  void emitByte(unsigned int byte);
+
+  void emitTwoBytes(uint32_t bytes);
+
+  void emit(uint32_t value);
+  void emit(uint64_t value);
+
+  unsigned getHWReg(unsigned regNo) const;
+
+};
+
+} // End anonymous namespace
+
+enum RegElement {
+  ELEMENT_X = 0,
+  ELEMENT_Y,
+  ELEMENT_Z,
+  ELEMENT_W
+};
+
+enum InstrTypes {
+  INSTR_ALU = 0,
+  INSTR_TEX,
+  INSTR_FC,
+  INSTR_NATIVE,
+  INSTR_VTX
+};
+
+enum FCInstr {
+  FC_IF = 0,
+  FC_ELSE,
+  FC_ENDIF,
+  FC_BGNLOOP,
+  FC_ENDLOOP,
+  FC_BREAK,
+  FC_BREAK_NZ_INT,
+  FC_CONTINUE,
+  FC_BREAK_Z_INT
+};
+
+enum TextureTypes {
+  TEXTURE_1D = 1,
+  TEXTURE_2D,
+  TEXTURE_3D,
+  TEXTURE_CUBE,
+  TEXTURE_RECT,
+  TEXTURE_SHADOW1D,
+  TEXTURE_SHADOW2D,
+  TEXTURE_SHADOWRECT,
+  TEXTURE_1D_ARRAY,
+  TEXTURE_2D_ARRAY,
+  TEXTURE_SHADOW1D_ARRAY,
+  TEXTURE_SHADOW2D_ARRAY
+};
+
+char R600CodeEmitter::ID = 0;
+
+FunctionPass *llvm::createR600CodeEmitterPass(formatted_raw_ostream &OS) {
+  return new R600CodeEmitter(OS);
+}
+
+bool R600CodeEmitter::runOnMachineFunction(MachineFunction &MF) {
+
+  TM = &MF.getTarget();
+  MRI = &MF.getRegInfo();
+  TRI = static_cast<const R600RegisterInfo *>(TM->getRegisterInfo());
+  const R600InstrInfo * TII = static_cast<const R600InstrInfo *>(TM->getInstrInfo());
+  const AMDILSubtarget &STM = TM->getSubtarget<AMDILSubtarget>();
+  std::string gpu = STM.getDeviceName();
+
+  if (STM.dumpCode()) {
+    MF.dump();
+  }
+
+  for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
+                                                  BB != BB_E; ++BB) {
+     MachineBasicBlock &MBB = *BB;
+     for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+                                                       I != E; ++I) {
+          MachineInstr &MI = *I;
+	  isReduction = AMDGPU::isReductionOp(MI.getOpcode());
+	  isVector = TII->isVector(MI);
+	  isCube = AMDGPU::isCubeOp(MI.getOpcode());
+          if (MI.getNumOperands() > 1 && MI.getOperand(0).isReg() && MI.getOperand(0).isDead()) {
+            continue;
+          }
+          if (AMDGPU::isTexOp(MI.getOpcode())) {
+            emitTexInstr(MI);
+          } else if (AMDGPU::isFCOp(MI.getOpcode())){
+            emitFCInstr(MI);
+          } else if (isReduction || isVector || isCube) {
+            isLast = false;
+            // XXX: On Cayman, some (all?) of the vector instructions only need
+            // to fill the first three slots.
+            for (currentElement = 0; currentElement < 4; currentElement++) {
+              isLast = (currentElement == 3);
+              emitALUInstr(MI);
+            }
+            isReduction = false;
+	    isVector = false;
+	    isCube = false;
+          } else if (MI.getOpcode() == AMDIL::RETURN ||
+                     MI.getOpcode() == AMDIL::BUNDLE ||
+                     MI.getOpcode() == AMDIL::KILL) {
+            continue;
+          } else {
+            switch(MI.getOpcode()) {
+            case AMDIL::RAT_WRITE_CACHELESS_eg:
+              {
+                  uint64_t inst = getBinaryCodeForInstr(MI);
+                // Set End Of Program bit
+                // XXX: Need better check of end of program.  EOP should be
+                // encoded in one of the operands of the MI, and it should be
+                // set in a prior pass.
+                MachineBasicBlock::iterator NextI = llvm::next(I);
+                MachineInstr &NextMI = *NextI;
+                if (NextMI.getOpcode() == AMDIL::RETURN) {
+                  inst |= (((uint64_t)1) << 53);
+                }
+                emitByte(INSTR_NATIVE);
+                emit(inst);
+                break;
+              }
+            case AMDIL::VTX_READ_PARAM_eg:
+            case AMDIL::VTX_READ_GLOBAL_eg:
+              {
+                emitByte(INSTR_VTX);
+                // inst
+                emitByte(0);
+
+                // fetch_type
+                emitByte(2);
+
+                // buffer_id
+                emitByte(MI.getOpcode() == AMDIL::VTX_READ_PARAM_eg ? 0 : 1);
+
+                // src_gpr
+                emitByte(getHWReg(MI.getOperand(1).getReg()));
+
+                // src_sel_x
+                emitByte(TRI->getHWRegChan(MI.getOperand(1).getReg()));
+
+                // mega_fetch_count
+                emitByte(3);
+
+                // dst_gpr
+                emitByte(getHWReg(MI.getOperand(0).getReg()));
+
+                // dst_sel_x
+                emitByte(0);
+
+                // dst_sel_y
+                emitByte(7);
+
+                // dst_sel_z
+                emitByte(7);
+
+                // dst_sel_w
+                emitByte(7);
+
+                // use_const_fields
+                emitByte(1);
+
+                // data_format
+                emitByte(0);
+
+                // num_format_all
+                emitByte(0);
+
+                // format_comp_all
+                emitByte(0);
+
+                // srf_mode_all
+                emitByte(0);
+
+                // offset
+                emitTwoBytes(MI.getOperand(2).getImm());
+
+                // endian
+                emitByte(0);
+                break;
+              }
+
+            default:
+              emitALUInstr(MI);
+              break;
+          }
+        }
+    }
+  }
+  return false;
+}
+
+void R600CodeEmitter::emitALUInstr(MachineInstr &MI)
+{
+
+  unsigned numOperands = MI.getNumExplicitOperands();
+
+   // Some instructions are just place holder instructions that represent
+   // operations that the GPU does automatically.  They should be ignored.
+  if (AMDGPU::isPlaceHolderOpcode(MI.getOpcode())) {
+    return;
+  }
+
+  // XXX Check if instruction writes a result
+  if (numOperands < 1) {
+    return;
+  }
+  const MachineOperand dstOp = MI.getOperand(0);
+
+  // Emit instruction type
+  emitByte(0);
+
+  if (isCube) {
+    static const int cube_src_swz[] = {2, 2, 0, 1};
+    emitSrc(MI.getOperand(1), cube_src_swz[currentElement]);
+    emitSrc(MI.getOperand(1), cube_src_swz[3-currentElement]);
+    emitNullBytes(SRC_BYTE_COUNT);
+  } else {
+    unsigned int opIndex;
+    for (opIndex = 1; opIndex < numOperands; opIndex++) {
+      // Literal constants are always stored as the last operand.
+      if (MI.getOperand(opIndex).isImm() || MI.getOperand(opIndex).isFPImm()) {
+        break;
+      }
+      emitSrc(MI.getOperand(opIndex));
+    }
+
+    // Emit zeros for unused sources
+    for ( ; opIndex < 4; opIndex++) {
+      emitNullBytes(SRC_BYTE_COUNT);
+    }
+  }
+
+  emitDst(dstOp);
+
+  emitALU(MI, numOperands - 1);
+}
+
+void R600CodeEmitter::emitSrc(const MachineOperand & MO, int chan_override)
+{
+  uint32_t value = 0;
+  // Emit the source select (2 bytes).  For GPRs, this is the register index.
+  // For other potential instruction operands, (e.g. constant registers) the
+  // value of the source select is defined in the r600isa docs.
+  if (MO.isReg()) {
+    unsigned reg = MO.getReg();
+    emitTwoBytes(getHWReg(reg));
+    if (reg == AMDIL::ALU_LITERAL_X) {
+      const MachineInstr * parent = MO.getParent();
+      unsigned immOpIndex = parent->getNumExplicitOperands() - 1;
+      MachineOperand immOp = parent->getOperand(immOpIndex);
+      if (immOp.isFPImm()) {
+        value = immOp.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue();
+      } else {
+        assert(immOp.isImm());
+        value = immOp.getImm();
+      }
+    }
+  } else {
+    // XXX: Handle other operand types.
+    emitTwoBytes(0);
+  }
+
+  // Emit the source channel (1 byte)
+  if (chan_override != -1) {
+    emitByte(chan_override);
+  } else if (isReduction) {
+    emitByte(currentElement);
+  } else if (MO.isReg()) {
+    emitByte(TRI->getHWRegChan(MO.getReg()));
+  } else {
+    emitByte(0);
+  }
+
+  // XXX: Emit isNegated (1 byte)
+  if ((!(MO.getTargetFlags() & MO_FLAG_ABS))
+      && (MO.getTargetFlags() & MO_FLAG_NEG ||
+     (MO.isReg() &&
+      (MO.getReg() == AMDIL::NEG_ONE || MO.getReg() == AMDIL::NEG_HALF)))){
+    emitByte(1);
+  } else {
+    emitByte(0);
+  }
+
+  // Emit isAbsolute (1 byte)
+  if (MO.getTargetFlags() & MO_FLAG_ABS) {
+    emitByte(1);
+  } else {
+    emitByte(0);
+  }
+
+  // XXX: Emit relative addressing mode (1 byte)
+  emitByte(0);
+
+  // Emit kc_bank, This will be adjusted later by r600_asm
+  emitByte(0);
+
+  // Emit the literal value, if applicable (4 bytes).
+  emit(value);
+
+}
+
+void R600CodeEmitter::emitDst(const MachineOperand & MO)
+{
+  if (MO.isReg()) {
+    // Emit the destination register index (1 byte)
+    emitByte(getHWReg(MO.getReg()));
+
+    // Emit the element of the destination register (1 byte)
+    if (isReduction || isCube || isVector) {
+      emitByte(currentElement);
+    } else {
+      emitByte(TRI->getHWRegChan(MO.getReg()));
+    }
+
+    // Emit isClamped (1 byte)
+    if (MO.getTargetFlags() & MO_FLAG_CLAMP) {
+      emitByte(1);
+    } else {
+      emitByte(0);
+    }
+
+    // Emit writemask (1 byte).
+    if (((isReduction || isVector) &&
+          currentElement != TRI->getHWRegChan(MO.getReg()))
+       || MO.getTargetFlags() & MO_FLAG_MASK) {
+      emitByte(0);
+    } else {
+      emitByte(1);
+    }
+
+    // XXX: Emit relative addressing mode
+    emitByte(0);
+  } else {
+    // XXX: Handle other operand types.  Are there any for destination regs?
+    emitNullBytes(DST_BYTE_COUNT);
+  }
+}
+
+void R600CodeEmitter::emitALU(MachineInstr &MI, unsigned numSrc)
+{
+  // Emit the instruction (2 bytes)
+  emitTwoBytes(getBinaryCodeForInstr(MI));
+
+  // Emit isLast (for this instruction group) (1 byte)
+  if (isLast) {
+    emitByte(1);
+  } else {
+    emitByte(0);
+  }
+  // Emit isOp3 (1 byte)
+  if (numSrc == 3) {
+    emitByte(1);
+  } else {
+    emitByte(0);
+  }
+
+  // XXX: Emit predicate (1 byte)
+  emitByte(0);
+
+  // XXX: Emit bank swizzle. (1 byte)  Do we need this?  It looks like
+  // r600_asm.c sets it.
+  emitByte(0);
+
+  // XXX: Emit bank_swizzle_force (1 byte) Not sure what this is for.
+  emitByte(0);
+
+  // XXX: Emit OMOD (1 byte) Not implemented.
+  emitByte(0);
+
+  // XXX: Emit index_mode.  I think this is for indirect addressing, so we
+  // don't need to worry about it.
+  emitByte(0);
+}
+
+void R600CodeEmitter::emitTexInstr(MachineInstr &MI)
+{
+
+  unsigned opcode = MI.getOpcode();
+  bool hasOffsets = (opcode == AMDIL::TEX_LD);
+  unsigned op_offset = hasOffsets ? 3 : 0;
+  int64_t sampler = MI.getOperand(op_offset+2).getImm();
+  int64_t textureType = MI.getOperand(op_offset+3).getImm();
+  unsigned srcSelect[4] = {0, 1, 2, 3};
+
+  // Emit instruction type
+  emitByte(1);
+
+  // Emit instruction
+  emitByte(getBinaryCodeForInstr(MI));
+
+  // XXX: Emit resource id r600_shader.c uses sampler + 1.  Why?
+  emitByte(sampler + 1 + 1);
+
+  // Emit source register
+  emitByte(getHWReg(MI.getOperand(1).getReg()));
+
+  // XXX: Emit src isRelativeAddress
+  emitByte(0);
+
+  // Emit destination register
+  emitByte(getHWReg(MI.getOperand(0).getReg()));
+
+  // XXX: Emit dst isRealtiveAddress
+  emitByte(0);
+
+  // XXX: Emit dst select
+  emitByte(0); // X
+  emitByte(1); // Y
+  emitByte(2); // Z
+  emitByte(3); // W
+
+  // XXX: Emit lod bias
+  emitByte(0);
+
+  // XXX: Emit coord types
+  unsigned coordType[4] = {1, 1, 1, 1};
+
+  if (textureType == TEXTURE_RECT
+      || textureType == TEXTURE_SHADOWRECT) {
+    coordType[ELEMENT_X] = 0;
+    coordType[ELEMENT_Y] = 0;
+  }
+
+  if (textureType == TEXTURE_1D_ARRAY
+      || textureType == TEXTURE_SHADOW1D_ARRAY) {
+    if (opcode == AMDIL::TEX_SAMPLE_C_L || opcode == AMDIL::TEX_SAMPLE_C_LB) {
+      coordType[ELEMENT_Y] = 0;
+    } else {
+      coordType[ELEMENT_Z] = 0;
+      srcSelect[ELEMENT_Z] = ELEMENT_Y;
+    }
+  } else if (textureType == TEXTURE_2D_ARRAY
+             || textureType == TEXTURE_SHADOW2D_ARRAY) {
+    coordType[ELEMENT_Z] = 0;
+  }
+
+  for (unsigned i = 0; i < 4; i++) {
+    emitByte(coordType[i]);
+  }
+
+  // XXX: Emit offsets
+  if (hasOffsets)
+	  for (unsigned i = 2; i < 5; i++)
+		  emitByte(MI.getOperand(i).getImm()<<1);
+  else
+	  emitNullBytes(3);
+
+  // Emit sampler id
+  emitByte(sampler);
+
+  // XXX:Emit source select
+  if ((textureType == TEXTURE_SHADOW1D
+      || textureType == TEXTURE_SHADOW2D
+      || textureType == TEXTURE_SHADOWRECT
+      || textureType == TEXTURE_SHADOW1D_ARRAY)
+      && opcode != AMDIL::TEX_SAMPLE_C_L
+      && opcode != AMDIL::TEX_SAMPLE_C_LB) {
+    srcSelect[ELEMENT_W] = ELEMENT_Z;
+  }
+
+  for (unsigned i = 0; i < 4; i++) {
+    emitByte(srcSelect[i]);
+  }
+}
+
+void R600CodeEmitter::emitFCInstr(MachineInstr &MI)
+{
+  // Emit instruction type
+  emitByte(INSTR_FC);
+
+  // Emit SRC
+  unsigned numOperands = MI.getNumOperands();
+  if (numOperands > 0) {
+    assert(numOperands == 1);
+    emitSrc(MI.getOperand(0));
+  } else {
+    emitNullBytes(SRC_BYTE_COUNT);
+  }
+
+  // Emit FC Instruction
+  enum FCInstr instr;
+  switch (MI.getOpcode()) {
+  case AMDIL::BREAK_LOGICALZ_f32:
+    instr = FC_BREAK;
+    break;
+  case AMDIL::BREAK_LOGICALNZ_f32:
+  case AMDIL::BREAK_LOGICALNZ_i32:
+    instr = FC_BREAK_NZ_INT;
+    break;
+  case AMDIL::BREAK_LOGICALZ_i32:
+    instr = FC_BREAK_Z_INT;
+    break;
+  case AMDIL::CONTINUE_LOGICALNZ_f32:
+  case AMDIL::CONTINUE_LOGICALNZ_i32:
+    instr = FC_CONTINUE;
+    break;
+  case AMDIL::IF_LOGICALNZ_f32:
+  case AMDIL::IF_LOGICALNZ_i32:
+    instr = FC_IF;
+    break;
+  case AMDIL::IF_LOGICALZ_f32:
+    abort();
+    break;
+  case AMDIL::ELSE:
+    instr = FC_ELSE;
+    break;
+  case AMDIL::ENDIF:
+    instr = FC_ENDIF;
+    break;
+  case AMDIL::ENDLOOP:
+    instr = FC_ENDLOOP;
+    break;
+  case AMDIL::WHILELOOP:
+    instr = FC_BGNLOOP;
+    break;
+  default:
+    abort();
+    break;
+  }
+  emitByte(instr);
+}
+
+void R600CodeEmitter::emitNullBytes(unsigned int byteCount)
+{
+  for (unsigned int i = 0; i < byteCount; i++) {
+    emitByte(0);
+  }
+}
+
+void R600CodeEmitter::emitByte(unsigned int byte)
+{
+  _OS.write((uint8_t) byte & 0xff);
+}
+void R600CodeEmitter::emitTwoBytes(unsigned int bytes)
+{
+  _OS.write((uint8_t) (bytes & 0xff));
+  _OS.write((uint8_t) ((bytes >> 8) & 0xff));
+}
+
+void R600CodeEmitter::emit(uint32_t value)
+{
+  for (unsigned i = 0; i < 4; i++) {
+    _OS.write((uint8_t) ((value >> (8 * i)) & 0xff));
+  }
+}
+
+void R600CodeEmitter::emit(uint64_t value)
+{
+  for (unsigned i = 0; i < 8; i++) {
+    emitByte((value >> (8 * i)) & 0xff);
+  }
+}
+
+unsigned R600CodeEmitter::getHWReg(unsigned regNo) const
+{
+  unsigned hwReg;
+
+  hwReg = TRI->getEncodingValue(regNo);
+  if (AMDIL::R600_CReg32RegClass.contains(regNo)) {
+    hwReg += 512;
+  }
+  return hwReg;
+}
+
+uint64_t R600CodeEmitter::getMachineOpValue(const MachineInstr &MI,
+                                            const MachineOperand &MO) const
+{
+  if (MO.isReg()) {
+    return getHWReg(MO.getReg());
+  } else {
+    return MO.getImm();
+  }
+}
+
+#include "AMDGPUGenCodeEmitter.inc"
+
diff --git a/lib/Target/AMDGPU/R600GenRegisterInfo.pl b/lib/Target/AMDGPU/R600GenRegisterInfo.pl
new file mode 100644
index 0000000..9d94e65
--- /dev/null
+++ b/lib/Target/AMDGPU/R600GenRegisterInfo.pl
@@ -0,0 +1,189 @@
+#===-- R600GenRegisterInfo.pl - Script for generating register info files --===#
+#
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+#===------------------------------------------------------------------------===#
+#
+# This perl script prints to stdout .td code to be used as R600RegisterInfo.td
+# it also generates a file called R600HwRegInfo.include, which contains helper
+# functions for determining the hw encoding of registers.
+#
+#===------------------------------------------------------------------------===#
+
+use strict;
+use warnings;
+
+use constant CONST_REG_COUNT => 1024;
+use constant TEMP_REG_COUNT => 128;
+
+my $CREG_MAX = CONST_REG_COUNT - 1;
+my $TREG_MAX = TEMP_REG_COUNT - 1;
+
+print <<STRING;
+
+class R600Reg <string name, bits<16> encoding> : Register<name> {
+  let Namespace = "AMDIL";
+  let HWEncoding = encoding;
+}
+
+class R600Reg_128<string n, list<Register> subregs, bits<16> encoding> :
+    RegisterWithSubRegs<n, subregs> {
+  let Namespace = "AMDIL";
+  let SubRegIndices = [sel_x, sel_y, sel_z, sel_w];
+  let HWEncoding = encoding;
+}
+
+STRING
+
+my $i;
+
+### REG DEFS ###
+
+my @creg_list = print_reg_defs(CONST_REG_COUNT * 4, "C");
+my @treg_list = print_reg_defs(TEMP_REG_COUNT * 4, "T");
+
+my @t128reg;
+my @treg_x;
+for (my $i = 0; $i < TEMP_REG_COUNT; $i++) {
+  my $name = "T$i\_XYZW";
+  print qq{def $name : R600Reg_128 <"T$i.XYZW", [T$i\_X, T$i\_Y, T$i\_Z, T$i\_W], $i >;\n};
+  $t128reg[$i] = $name;
+  $treg_x[$i] = "T$i\_X";
+  if ($i % 10 == 0) {
+    $t128reg[$i] .= "\n";
+    $treg_x[$i] .= "\n";
+  }
+}
+
+my $treg_string = join(",", @treg_list);
+my $creg_list = join(",", @creg_list);
+my $t128_string = join(",", @t128reg);
+my $treg_x_string = join(",", @treg_x);
+print <<STRING;
+
+class RegSet <dag s> {
+  dag set = s;
+}
+
+def ZERO : R600Reg<"0.0", 248>;
+def ONE : R600Reg<"1.0", 249>;
+def NEG_ONE : R600Reg<"-1.0", 249>;
+def ONE_INT : R600Reg<"1", 250>;
+def HALF : R600Reg<"0.5", 252>;
+def NEG_HALF : R600Reg<"-0.5", 252>;
+def PV_X : R600Reg<"pv.x", 254>;
+def ALU_LITERAL_X : R600Reg<"literal.x", 253>;
+
+def R600_CReg32 : RegisterClass <"AMDIL", [f32, i32], 32, (add
+    $creg_list)>;
+
+def R600_TReg32 : RegisterClass <"AMDIL", [f32, i32], 32, (add
+    $treg_string)>;
+
+def R600_TReg32_X : RegisterClass <"AMDIL", [f32, i32], 32, (add
+    $treg_x_string)>;
+    
+def R600_Reg32 : RegisterClass <"AMDIL", [f32, i32], 32, (add
+    R600_TReg32,
+    R600_CReg32,
+    ZERO, HALF, ONE, ONE_INT, PV_X, ALU_LITERAL_X, NEG_ONE, NEG_HALF)>;
+
+def R600_Reg128 : RegisterClass<"AMDIL", [v4f32, v4i32], 128, (add
+    $t128_string)>
+{
+  let CopyCost = -1;
+}
+
+STRING
+
+my %index_map;
+my %chan_map;
+
+for ($i = 0; $i <= $#creg_list; $i++) {
+  push(@{$index_map{get_hw_index($i)}}, $creg_list[$i]);
+  push(@{$chan_map{get_chan_str($i)}}, $creg_list[$i]);
+}
+
+for ($i = 0; $i <= $#treg_list; $i++) {
+  push(@{$index_map{get_hw_index($i)}}, $treg_list[$i]);
+  push(@{$chan_map{get_chan_str($i)}}, $treg_list[$i]);
+}
+
+for ($i = 0; $i <= $#t128reg; $i++) {
+  push(@{$index_map{$i}}, $t128reg[$i]);
+  push(@{$chan_map{'X'}}, $t128reg[$i]);
+}
+
+open(OUTFILE, ">", "R600HwRegInfo.inc");
+
+print OUTFILE <<STRING;
+
+unsigned R600RegisterInfo::getHWRegChanGen(unsigned reg) const
+{
+  switch(reg) {
+  default: assert(!"Unknown register"); return 0;
+STRING
+
+foreach my $key (keys(%chan_map)) {
+  foreach my $reg (@{$chan_map{$key}}) {
+    print OUTFILE " case AMDIL::$reg:\n";
+  }
+  my $val;
+  if ($key eq 'X') {
+    $val = 0;
+  } elsif ($key eq 'Y') {
+    $val = 1;
+  } elsif ($key eq 'Z') {
+    $val = 2;
+  } elsif ($key eq 'W') {
+    $val = 3;
+  } else {
+    die("Unknown chan value; $key");
+  }
+  print OUTFILE "    return $val;\n\n";
+}
+
+print OUTFILE "  }\n}\n\n";
+
+sub print_reg_defs {
+  my ($count, $prefix) = @_;
+
+  my @reg_list;
+
+  for ($i = 0; $i < $count; $i++) {
+    my $hw_index = get_hw_index($i);
+    my $chan= get_chan_str($i);
+    my $name = "$prefix$hw_index\_$chan";
+    print qq{def $name : R600Reg <"$prefix$hw_index.$chan", $hw_index>;\n};
+    $reg_list[$i] = $name;
+    if ($i % 10 == 0) {
+        $reg_list[$i] .= "\n";
+    }
+  }
+  return @reg_list;
+}
+
+#Helper functions
+sub get_hw_index {
+  my ($index) = @_;
+  return int($index / 4);
+}
+
+sub get_chan_str {
+  my ($index) = @_;
+  my $chan = $index % 4;
+  if ($chan == 0 )  {
+    return 'X';
+  } elsif ($chan == 1) {
+    return 'Y';
+  } elsif ($chan == 2) {
+    return 'Z';
+  } elsif ($chan == 3) {
+    return 'W';
+  } else {
+    die("Unknown chan value: $chan");
+  }
+}
diff --git a/lib/Target/AMDGPU/R600HwRegInfo.include b/lib/Target/AMDGPU/R600HwRegInfo.include
new file mode 100644
index 0000000..f51067a
--- /dev/null
+++ b/lib/Target/AMDGPU/R600HwRegInfo.include
@@ -0,0 +1,1848 @@
+
+unsigned R600RegisterInfo::getHWRegChanGen(unsigned reg) const
+{
+  switch(reg) {
+  default: assert(!"Unknown register"); return 0;
+ case AMDIL::C0_Z:
+ case AMDIL::C1_Z:
+ case AMDIL::C2_Z
+:
+ case AMDIL::C3_Z:
+ case AMDIL::C4_Z:
+ case AMDIL::C5_Z:
+ case AMDIL::C6_Z:
+ case AMDIL::C7_Z
+:
+ case AMDIL::C8_Z:
+ case AMDIL::C9_Z:
+ case AMDIL::C10_Z:
+ case AMDIL::C11_Z:
+ case AMDIL::C12_Z
+:
+ case AMDIL::C13_Z:
+ case AMDIL::C14_Z:
+ case AMDIL::C15_Z:
+ case AMDIL::C16_Z:
+ case AMDIL::C17_Z
+:
+ case AMDIL::C18_Z:
+ case AMDIL::C19_Z:
+ case AMDIL::C20_Z:
+ case AMDIL::C21_Z:
+ case AMDIL::C22_Z
+:
+ case AMDIL::C23_Z:
+ case AMDIL::C24_Z:
+ case AMDIL::C25_Z:
+ case AMDIL::C26_Z:
+ case AMDIL::C27_Z
+:
+ case AMDIL::C28_Z:
+ case AMDIL::C29_Z:
+ case AMDIL::C30_Z:
+ case AMDIL::C31_Z:
+ case AMDIL::C32_Z
+:
+ case AMDIL::C33_Z:
+ case AMDIL::C34_Z:
+ case AMDIL::C35_Z:
+ case AMDIL::C36_Z:
+ case AMDIL::C37_Z
+:
+ case AMDIL::C38_Z:
+ case AMDIL::C39_Z:
+ case AMDIL::C40_Z:
+ case AMDIL::C41_Z:
+ case AMDIL::C42_Z
+:
+ case AMDIL::C43_Z:
+ case AMDIL::C44_Z:
+ case AMDIL::C45_Z:
+ case AMDIL::C46_Z:
+ case AMDIL::C47_Z
+:
+ case AMDIL::C48_Z:
+ case AMDIL::C49_Z:
+ case AMDIL::C50_Z:
+ case AMDIL::C51_Z:
+ case AMDIL::C52_Z
+:
+ case AMDIL::C53_Z:
+ case AMDIL::C54_Z:
+ case AMDIL::C55_Z:
+ case AMDIL::C56_Z:
+ case AMDIL::C57_Z
+:
+ case AMDIL::C58_Z:
+ case AMDIL::C59_Z:
+ case AMDIL::C60_Z:
+ case AMDIL::C61_Z:
+ case AMDIL::C62_Z
+:
+ case AMDIL::C63_Z:
+ case AMDIL::C64_Z:
+ case AMDIL::C65_Z:
+ case AMDIL::C66_Z:
+ case AMDIL::C67_Z
+:
+ case AMDIL::C68_Z:
+ case AMDIL::C69_Z:
+ case AMDIL::C70_Z:
+ case AMDIL::C71_Z:
+ case AMDIL::C72_Z
+:
+ case AMDIL::C73_Z:
+ case AMDIL::C74_Z:
+ case AMDIL::C75_Z:
+ case AMDIL::C76_Z:
+ case AMDIL::C77_Z
+:
+ case AMDIL::C78_Z:
+ case AMDIL::C79_Z:
+ case AMDIL::C80_Z:
+ case AMDIL::C81_Z:
+ case AMDIL::C82_Z
+:
+ case AMDIL::C83_Z:
+ case AMDIL::C84_Z:
+ case AMDIL::C85_Z:
+ case AMDIL::C86_Z:
+ case AMDIL::C87_Z
+:
+ case AMDIL::C88_Z:
+ case AMDIL::C89_Z:
+ case AMDIL::C90_Z:
+ case AMDIL::C91_Z:
+ case AMDIL::C92_Z
+:
+ case AMDIL::C93_Z:
+ case AMDIL::C94_Z:
+ case AMDIL::C95_Z:
+ case AMDIL::C96_Z:
+ case AMDIL::C97_Z
+:
+ case AMDIL::C98_Z:
+ case AMDIL::C99_Z:
+ case AMDIL::C100_Z:
+ case AMDIL::C101_Z:
+ case AMDIL::C102_Z
+:
+ case AMDIL::C103_Z:
+ case AMDIL::C104_Z:
+ case AMDIL::C105_Z:
+ case AMDIL::C106_Z:
+ case AMDIL::C107_Z
+:
+ case AMDIL::C108_Z:
+ case AMDIL::C109_Z:
+ case AMDIL::C110_Z:
+ case AMDIL::C111_Z:
+ case AMDIL::C112_Z
+:
+ case AMDIL::C113_Z:
+ case AMDIL::C114_Z:
+ case AMDIL::C115_Z:
+ case AMDIL::C116_Z:
+ case AMDIL::C117_Z
+:
+ case AMDIL::C118_Z:
+ case AMDIL::C119_Z:
+ case AMDIL::C120_Z:
+ case AMDIL::C121_Z:
+ case AMDIL::C122_Z
+:
+ case AMDIL::C123_Z:
+ case AMDIL::C124_Z:
+ case AMDIL::C125_Z:
+ case AMDIL::C126_Z:
+ case AMDIL::C127_Z
+:
+ case AMDIL::C128_Z:
+ case AMDIL::C129_Z:
+ case AMDIL::C130_Z:
+ case AMDIL::C131_Z:
+ case AMDIL::C132_Z
+:
+ case AMDIL::C133_Z:
+ case AMDIL::C134_Z:
+ case AMDIL::C135_Z:
+ case AMDIL::C136_Z:
+ case AMDIL::C137_Z
+:
+ case AMDIL::C138_Z:
+ case AMDIL::C139_Z:
+ case AMDIL::C140_Z:
+ case AMDIL::C141_Z:
+ case AMDIL::C142_Z
+:
+ case AMDIL::C143_Z:
+ case AMDIL::C144_Z:
+ case AMDIL::C145_Z:
+ case AMDIL::C146_Z:
+ case AMDIL::C147_Z
+:
+ case AMDIL::C148_Z:
+ case AMDIL::C149_Z:
+ case AMDIL::C150_Z:
+ case AMDIL::C151_Z:
+ case AMDIL::C152_Z
+:
+ case AMDIL::C153_Z:
+ case AMDIL::C154_Z:
+ case AMDIL::C155_Z:
+ case AMDIL::C156_Z:
+ case AMDIL::C157_Z
+:
+ case AMDIL::C158_Z:
+ case AMDIL::C159_Z:
+ case AMDIL::C160_Z:
+ case AMDIL::C161_Z:
+ case AMDIL::C162_Z
+:
+ case AMDIL::C163_Z:
+ case AMDIL::C164_Z:
+ case AMDIL::C165_Z:
+ case AMDIL::C166_Z:
+ case AMDIL::C167_Z
+:
+ case AMDIL::C168_Z:
+ case AMDIL::C169_Z:
+ case AMDIL::C170_Z:
+ case AMDIL::C171_Z:
+ case AMDIL::C172_Z
+:
+ case AMDIL::C173_Z:
+ case AMDIL::C174_Z:
+ case AMDIL::C175_Z:
+ case AMDIL::C176_Z:
+ case AMDIL::C177_Z
+:
+ case AMDIL::C178_Z:
+ case AMDIL::C179_Z:
+ case AMDIL::C180_Z:
+ case AMDIL::C181_Z:
+ case AMDIL::C182_Z
+:
+ case AMDIL::C183_Z:
+ case AMDIL::C184_Z:
+ case AMDIL::C185_Z:
+ case AMDIL::C186_Z:
+ case AMDIL::C187_Z
+:
+ case AMDIL::C188_Z:
+ case AMDIL::C189_Z:
+ case AMDIL::C190_Z:
+ case AMDIL::C191_Z:
+ case AMDIL::C192_Z
+:
+ case AMDIL::C193_Z:
+ case AMDIL::C194_Z:
+ case AMDIL::C195_Z:
+ case AMDIL::C196_Z:
+ case AMDIL::C197_Z
+:
+ case AMDIL::C198_Z:
+ case AMDIL::C199_Z:
+ case AMDIL::C200_Z:
+ case AMDIL::C201_Z:
+ case AMDIL::C202_Z
+:
+ case AMDIL::C203_Z:
+ case AMDIL::C204_Z:
+ case AMDIL::C205_Z:
+ case AMDIL::C206_Z:
+ case AMDIL::C207_Z
+:
+ case AMDIL::C208_Z:
+ case AMDIL::C209_Z:
+ case AMDIL::C210_Z:
+ case AMDIL::C211_Z:
+ case AMDIL::C212_Z
+:
+ case AMDIL::C213_Z:
+ case AMDIL::C214_Z:
+ case AMDIL::C215_Z:
+ case AMDIL::C216_Z:
+ case AMDIL::C217_Z
+:
+ case AMDIL::C218_Z:
+ case AMDIL::C219_Z:
+ case AMDIL::C220_Z:
+ case AMDIL::C221_Z:
+ case AMDIL::C222_Z
+:
+ case AMDIL::C223_Z:
+ case AMDIL::C224_Z:
+ case AMDIL::C225_Z:
+ case AMDIL::C226_Z:
+ case AMDIL::C227_Z
+:
+ case AMDIL::C228_Z:
+ case AMDIL::C229_Z:
+ case AMDIL::C230_Z:
+ case AMDIL::C231_Z:
+ case AMDIL::C232_Z
+:
+ case AMDIL::C233_Z:
+ case AMDIL::C234_Z:
+ case AMDIL::C235_Z:
+ case AMDIL::C236_Z:
+ case AMDIL::C237_Z
+:
+ case AMDIL::C238_Z:
+ case AMDIL::C239_Z:
+ case AMDIL::C240_Z:
+ case AMDIL::C241_Z:
+ case AMDIL::C242_Z
+:
+ case AMDIL::C243_Z:
+ case AMDIL::C244_Z:
+ case AMDIL::C245_Z:
+ case AMDIL::C246_Z:
+ case AMDIL::C247_Z
+:
+ case AMDIL::C248_Z:
+ case AMDIL::C249_Z:
+ case AMDIL::C250_Z:
+ case AMDIL::C251_Z:
+ case AMDIL::C252_Z
+:
+ case AMDIL::C253_Z:
+ case AMDIL::C254_Z:
+ case AMDIL::C255_Z:
+ case AMDIL::T0_Z:
+ case AMDIL::T1_Z:
+ case AMDIL::T2_Z
+:
+ case AMDIL::T3_Z:
+ case AMDIL::T4_Z:
+ case AMDIL::T5_Z:
+ case AMDIL::T6_Z:
+ case AMDIL::T7_Z
+:
+ case AMDIL::T8_Z:
+ case AMDIL::T9_Z:
+ case AMDIL::T10_Z:
+ case AMDIL::T11_Z:
+ case AMDIL::T12_Z
+:
+ case AMDIL::T13_Z:
+ case AMDIL::T14_Z:
+ case AMDIL::T15_Z:
+ case AMDIL::T16_Z:
+ case AMDIL::T17_Z
+:
+ case AMDIL::T18_Z:
+ case AMDIL::T19_Z:
+ case AMDIL::T20_Z:
+ case AMDIL::T21_Z:
+ case AMDIL::T22_Z
+:
+ case AMDIL::T23_Z:
+ case AMDIL::T24_Z:
+ case AMDIL::T25_Z:
+ case AMDIL::T26_Z:
+ case AMDIL::T27_Z
+:
+ case AMDIL::T28_Z:
+ case AMDIL::T29_Z:
+ case AMDIL::T30_Z:
+ case AMDIL::T31_Z:
+ case AMDIL::T32_Z
+:
+ case AMDIL::T33_Z:
+ case AMDIL::T34_Z:
+ case AMDIL::T35_Z:
+ case AMDIL::T36_Z:
+ case AMDIL::T37_Z
+:
+ case AMDIL::T38_Z:
+ case AMDIL::T39_Z:
+ case AMDIL::T40_Z:
+ case AMDIL::T41_Z:
+ case AMDIL::T42_Z
+:
+ case AMDIL::T43_Z:
+ case AMDIL::T44_Z:
+ case AMDIL::T45_Z:
+ case AMDIL::T46_Z:
+ case AMDIL::T47_Z
+:
+ case AMDIL::T48_Z:
+ case AMDIL::T49_Z:
+ case AMDIL::T50_Z:
+ case AMDIL::T51_Z:
+ case AMDIL::T52_Z
+:
+ case AMDIL::T53_Z:
+ case AMDIL::T54_Z:
+ case AMDIL::T55_Z:
+ case AMDIL::T56_Z:
+ case AMDIL::T57_Z
+:
+ case AMDIL::T58_Z:
+ case AMDIL::T59_Z:
+ case AMDIL::T60_Z:
+ case AMDIL::T61_Z:
+ case AMDIL::T62_Z
+:
+ case AMDIL::T63_Z:
+ case AMDIL::T64_Z:
+ case AMDIL::T65_Z:
+ case AMDIL::T66_Z:
+ case AMDIL::T67_Z
+:
+ case AMDIL::T68_Z:
+ case AMDIL::T69_Z:
+ case AMDIL::T70_Z:
+ case AMDIL::T71_Z:
+ case AMDIL::T72_Z
+:
+ case AMDIL::T73_Z:
+ case AMDIL::T74_Z:
+ case AMDIL::T75_Z:
+ case AMDIL::T76_Z:
+ case AMDIL::T77_Z
+:
+ case AMDIL::T78_Z:
+ case AMDIL::T79_Z:
+ case AMDIL::T80_Z:
+ case AMDIL::T81_Z:
+ case AMDIL::T82_Z
+:
+ case AMDIL::T83_Z:
+ case AMDIL::T84_Z:
+ case AMDIL::T85_Z:
+ case AMDIL::T86_Z:
+ case AMDIL::T87_Z
+:
+ case AMDIL::T88_Z:
+ case AMDIL::T89_Z:
+ case AMDIL::T90_Z:
+ case AMDIL::T91_Z:
+ case AMDIL::T92_Z
+:
+ case AMDIL::T93_Z:
+ case AMDIL::T94_Z:
+ case AMDIL::T95_Z:
+ case AMDIL::T96_Z:
+ case AMDIL::T97_Z
+:
+ case AMDIL::T98_Z:
+ case AMDIL::T99_Z:
+ case AMDIL::T100_Z:
+ case AMDIL::T101_Z:
+ case AMDIL::T102_Z
+:
+ case AMDIL::T103_Z:
+ case AMDIL::T104_Z:
+ case AMDIL::T105_Z:
+ case AMDIL::T106_Z:
+ case AMDIL::T107_Z
+:
+ case AMDIL::T108_Z:
+ case AMDIL::T109_Z:
+ case AMDIL::T110_Z:
+ case AMDIL::T111_Z:
+ case AMDIL::T112_Z
+:
+ case AMDIL::T113_Z:
+ case AMDIL::T114_Z:
+ case AMDIL::T115_Z:
+ case AMDIL::T116_Z:
+ case AMDIL::T117_Z
+:
+ case AMDIL::T118_Z:
+ case AMDIL::T119_Z:
+ case AMDIL::T120_Z:
+ case AMDIL::T121_Z:
+ case AMDIL::T122_Z
+:
+ case AMDIL::T123_Z:
+ case AMDIL::T124_Z:
+ case AMDIL::T125_Z:
+ case AMDIL::T126_Z:
+ case AMDIL::T127_Z
+:
+    return 2;
+
+ case AMDIL::C0_W:
+ case AMDIL::C1_W:
+ case AMDIL::C2_W:
+ case AMDIL::C3_W:
+ case AMDIL::C4_W:
+ case AMDIL::C5_W:
+ case AMDIL::C6_W:
+ case AMDIL::C7_W:
+ case AMDIL::C8_W:
+ case AMDIL::C9_W:
+ case AMDIL::C10_W:
+ case AMDIL::C11_W:
+ case AMDIL::C12_W:
+ case AMDIL::C13_W:
+ case AMDIL::C14_W:
+ case AMDIL::C15_W:
+ case AMDIL::C16_W:
+ case AMDIL::C17_W:
+ case AMDIL::C18_W:
+ case AMDIL::C19_W:
+ case AMDIL::C20_W:
+ case AMDIL::C21_W:
+ case AMDIL::C22_W:
+ case AMDIL::C23_W:
+ case AMDIL::C24_W:
+ case AMDIL::C25_W:
+ case AMDIL::C26_W:
+ case AMDIL::C27_W:
+ case AMDIL::C28_W:
+ case AMDIL::C29_W:
+ case AMDIL::C30_W:
+ case AMDIL::C31_W:
+ case AMDIL::C32_W:
+ case AMDIL::C33_W:
+ case AMDIL::C34_W:
+ case AMDIL::C35_W:
+ case AMDIL::C36_W:
+ case AMDIL::C37_W:
+ case AMDIL::C38_W:
+ case AMDIL::C39_W:
+ case AMDIL::C40_W:
+ case AMDIL::C41_W:
+ case AMDIL::C42_W:
+ case AMDIL::C43_W:
+ case AMDIL::C44_W:
+ case AMDIL::C45_W:
+ case AMDIL::C46_W:
+ case AMDIL::C47_W:
+ case AMDIL::C48_W:
+ case AMDIL::C49_W:
+ case AMDIL::C50_W:
+ case AMDIL::C51_W:
+ case AMDIL::C52_W:
+ case AMDIL::C53_W:
+ case AMDIL::C54_W:
+ case AMDIL::C55_W:
+ case AMDIL::C56_W:
+ case AMDIL::C57_W:
+ case AMDIL::C58_W:
+ case AMDIL::C59_W:
+ case AMDIL::C60_W:
+ case AMDIL::C61_W:
+ case AMDIL::C62_W:
+ case AMDIL::C63_W:
+ case AMDIL::C64_W:
+ case AMDIL::C65_W:
+ case AMDIL::C66_W:
+ case AMDIL::C67_W:
+ case AMDIL::C68_W:
+ case AMDIL::C69_W:
+ case AMDIL::C70_W:
+ case AMDIL::C71_W:
+ case AMDIL::C72_W:
+ case AMDIL::C73_W:
+ case AMDIL::C74_W:
+ case AMDIL::C75_W:
+ case AMDIL::C76_W:
+ case AMDIL::C77_W:
+ case AMDIL::C78_W:
+ case AMDIL::C79_W:
+ case AMDIL::C80_W:
+ case AMDIL::C81_W:
+ case AMDIL::C82_W:
+ case AMDIL::C83_W:
+ case AMDIL::C84_W:
+ case AMDIL::C85_W:
+ case AMDIL::C86_W:
+ case AMDIL::C87_W:
+ case AMDIL::C88_W:
+ case AMDIL::C89_W:
+ case AMDIL::C90_W:
+ case AMDIL::C91_W:
+ case AMDIL::C92_W:
+ case AMDIL::C93_W:
+ case AMDIL::C94_W:
+ case AMDIL::C95_W:
+ case AMDIL::C96_W:
+ case AMDIL::C97_W:
+ case AMDIL::C98_W:
+ case AMDIL::C99_W:
+ case AMDIL::C100_W:
+ case AMDIL::C101_W:
+ case AMDIL::C102_W:
+ case AMDIL::C103_W:
+ case AMDIL::C104_W:
+ case AMDIL::C105_W:
+ case AMDIL::C106_W:
+ case AMDIL::C107_W:
+ case AMDIL::C108_W:
+ case AMDIL::C109_W:
+ case AMDIL::C110_W:
+ case AMDIL::C111_W:
+ case AMDIL::C112_W:
+ case AMDIL::C113_W:
+ case AMDIL::C114_W:
+ case AMDIL::C115_W:
+ case AMDIL::C116_W:
+ case AMDIL::C117_W:
+ case AMDIL::C118_W:
+ case AMDIL::C119_W:
+ case AMDIL::C120_W:
+ case AMDIL::C121_W:
+ case AMDIL::C122_W:
+ case AMDIL::C123_W:
+ case AMDIL::C124_W:
+ case AMDIL::C125_W:
+ case AMDIL::C126_W:
+ case AMDIL::C127_W:
+ case AMDIL::C128_W:
+ case AMDIL::C129_W:
+ case AMDIL::C130_W:
+ case AMDIL::C131_W:
+ case AMDIL::C132_W:
+ case AMDIL::C133_W:
+ case AMDIL::C134_W:
+ case AMDIL::C135_W:
+ case AMDIL::C136_W:
+ case AMDIL::C137_W:
+ case AMDIL::C138_W:
+ case AMDIL::C139_W:
+ case AMDIL::C140_W:
+ case AMDIL::C141_W:
+ case AMDIL::C142_W:
+ case AMDIL::C143_W:
+ case AMDIL::C144_W:
+ case AMDIL::C145_W:
+ case AMDIL::C146_W:
+ case AMDIL::C147_W:
+ case AMDIL::C148_W:
+ case AMDIL::C149_W:
+ case AMDIL::C150_W:
+ case AMDIL::C151_W:
+ case AMDIL::C152_W:
+ case AMDIL::C153_W:
+ case AMDIL::C154_W:
+ case AMDIL::C155_W:
+ case AMDIL::C156_W:
+ case AMDIL::C157_W:
+ case AMDIL::C158_W:
+ case AMDIL::C159_W:
+ case AMDIL::C160_W:
+ case AMDIL::C161_W:
+ case AMDIL::C162_W:
+ case AMDIL::C163_W:
+ case AMDIL::C164_W:
+ case AMDIL::C165_W:
+ case AMDIL::C166_W:
+ case AMDIL::C167_W:
+ case AMDIL::C168_W:
+ case AMDIL::C169_W:
+ case AMDIL::C170_W:
+ case AMDIL::C171_W:
+ case AMDIL::C172_W:
+ case AMDIL::C173_W:
+ case AMDIL::C174_W:
+ case AMDIL::C175_W:
+ case AMDIL::C176_W:
+ case AMDIL::C177_W:
+ case AMDIL::C178_W:
+ case AMDIL::C179_W:
+ case AMDIL::C180_W:
+ case AMDIL::C181_W:
+ case AMDIL::C182_W:
+ case AMDIL::C183_W:
+ case AMDIL::C184_W:
+ case AMDIL::C185_W:
+ case AMDIL::C186_W:
+ case AMDIL::C187_W:
+ case AMDIL::C188_W:
+ case AMDIL::C189_W:
+ case AMDIL::C190_W:
+ case AMDIL::C191_W:
+ case AMDIL::C192_W:
+ case AMDIL::C193_W:
+ case AMDIL::C194_W:
+ case AMDIL::C195_W:
+ case AMDIL::C196_W:
+ case AMDIL::C197_W:
+ case AMDIL::C198_W:
+ case AMDIL::C199_W:
+ case AMDIL::C200_W:
+ case AMDIL::C201_W:
+ case AMDIL::C202_W:
+ case AMDIL::C203_W:
+ case AMDIL::C204_W:
+ case AMDIL::C205_W:
+ case AMDIL::C206_W:
+ case AMDIL::C207_W:
+ case AMDIL::C208_W:
+ case AMDIL::C209_W:
+ case AMDIL::C210_W:
+ case AMDIL::C211_W:
+ case AMDIL::C212_W:
+ case AMDIL::C213_W:
+ case AMDIL::C214_W:
+ case AMDIL::C215_W:
+ case AMDIL::C216_W:
+ case AMDIL::C217_W:
+ case AMDIL::C218_W:
+ case AMDIL::C219_W:
+ case AMDIL::C220_W:
+ case AMDIL::C221_W:
+ case AMDIL::C222_W:
+ case AMDIL::C223_W:
+ case AMDIL::C224_W:
+ case AMDIL::C225_W:
+ case AMDIL::C226_W:
+ case AMDIL::C227_W:
+ case AMDIL::C228_W:
+ case AMDIL::C229_W:
+ case AMDIL::C230_W:
+ case AMDIL::C231_W:
+ case AMDIL::C232_W:
+ case AMDIL::C233_W:
+ case AMDIL::C234_W:
+ case AMDIL::C235_W:
+ case AMDIL::C236_W:
+ case AMDIL::C237_W:
+ case AMDIL::C238_W:
+ case AMDIL::C239_W:
+ case AMDIL::C240_W:
+ case AMDIL::C241_W:
+ case AMDIL::C242_W:
+ case AMDIL::C243_W:
+ case AMDIL::C244_W:
+ case AMDIL::C245_W:
+ case AMDIL::C246_W:
+ case AMDIL::C247_W:
+ case AMDIL::C248_W:
+ case AMDIL::C249_W:
+ case AMDIL::C250_W:
+ case AMDIL::C251_W:
+ case AMDIL::C252_W:
+ case AMDIL::C253_W:
+ case AMDIL::C254_W:
+ case AMDIL::C255_W:
+ case AMDIL::T0_W:
+ case AMDIL::T1_W:
+ case AMDIL::T2_W:
+ case AMDIL::T3_W:
+ case AMDIL::T4_W:
+ case AMDIL::T5_W:
+ case AMDIL::T6_W:
+ case AMDIL::T7_W:
+ case AMDIL::T8_W:
+ case AMDIL::T9_W:
+ case AMDIL::T10_W:
+ case AMDIL::T11_W:
+ case AMDIL::T12_W:
+ case AMDIL::T13_W:
+ case AMDIL::T14_W:
+ case AMDIL::T15_W:
+ case AMDIL::T16_W:
+ case AMDIL::T17_W:
+ case AMDIL::T18_W:
+ case AMDIL::T19_W:
+ case AMDIL::T20_W:
+ case AMDIL::T21_W:
+ case AMDIL::T22_W:
+ case AMDIL::T23_W:
+ case AMDIL::T24_W:
+ case AMDIL::T25_W:
+ case AMDIL::T26_W:
+ case AMDIL::T27_W:
+ case AMDIL::T28_W:
+ case AMDIL::T29_W:
+ case AMDIL::T30_W:
+ case AMDIL::T31_W:
+ case AMDIL::T32_W:
+ case AMDIL::T33_W:
+ case AMDIL::T34_W:
+ case AMDIL::T35_W:
+ case AMDIL::T36_W:
+ case AMDIL::T37_W:
+ case AMDIL::T38_W:
+ case AMDIL::T39_W:
+ case AMDIL::T40_W:
+ case AMDIL::T41_W:
+ case AMDIL::T42_W:
+ case AMDIL::T43_W:
+ case AMDIL::T44_W:
+ case AMDIL::T45_W:
+ case AMDIL::T46_W:
+ case AMDIL::T47_W:
+ case AMDIL::T48_W:
+ case AMDIL::T49_W:
+ case AMDIL::T50_W:
+ case AMDIL::T51_W:
+ case AMDIL::T52_W:
+ case AMDIL::T53_W:
+ case AMDIL::T54_W:
+ case AMDIL::T55_W:
+ case AMDIL::T56_W:
+ case AMDIL::T57_W:
+ case AMDIL::T58_W:
+ case AMDIL::T59_W:
+ case AMDIL::T60_W:
+ case AMDIL::T61_W:
+ case AMDIL::T62_W:
+ case AMDIL::T63_W:
+ case AMDIL::T64_W:
+ case AMDIL::T65_W:
+ case AMDIL::T66_W:
+ case AMDIL::T67_W:
+ case AMDIL::T68_W:
+ case AMDIL::T69_W:
+ case AMDIL::T70_W:
+ case AMDIL::T71_W:
+ case AMDIL::T72_W:
+ case AMDIL::T73_W:
+ case AMDIL::T74_W:
+ case AMDIL::T75_W:
+ case AMDIL::T76_W:
+ case AMDIL::T77_W:
+ case AMDIL::T78_W:
+ case AMDIL::T79_W:
+ case AMDIL::T80_W:
+ case AMDIL::T81_W:
+ case AMDIL::T82_W:
+ case AMDIL::T83_W:
+ case AMDIL::T84_W:
+ case AMDIL::T85_W:
+ case AMDIL::T86_W:
+ case AMDIL::T87_W:
+ case AMDIL::T88_W:
+ case AMDIL::T89_W:
+ case AMDIL::T90_W:
+ case AMDIL::T91_W:
+ case AMDIL::T92_W:
+ case AMDIL::T93_W:
+ case AMDIL::T94_W:
+ case AMDIL::T95_W:
+ case AMDIL::T96_W:
+ case AMDIL::T97_W:
+ case AMDIL::T98_W:
+ case AMDIL::T99_W:
+ case AMDIL::T100_W:
+ case AMDIL::T101_W:
+ case AMDIL::T102_W:
+ case AMDIL::T103_W:
+ case AMDIL::T104_W:
+ case AMDIL::T105_W:
+ case AMDIL::T106_W:
+ case AMDIL::T107_W:
+ case AMDIL::T108_W:
+ case AMDIL::T109_W:
+ case AMDIL::T110_W:
+ case AMDIL::T111_W:
+ case AMDIL::T112_W:
+ case AMDIL::T113_W:
+ case AMDIL::T114_W:
+ case AMDIL::T115_W:
+ case AMDIL::T116_W:
+ case AMDIL::T117_W:
+ case AMDIL::T118_W:
+ case AMDIL::T119_W:
+ case AMDIL::T120_W:
+ case AMDIL::T121_W:
+ case AMDIL::T122_W:
+ case AMDIL::T123_W:
+ case AMDIL::T124_W:
+ case AMDIL::T125_W:
+ case AMDIL::T126_W:
+ case AMDIL::T127_W:
+    return 3;
+
+ case AMDIL::C0_X
+:
+ case AMDIL::C1_X:
+ case AMDIL::C2_X:
+ case AMDIL::C3_X:
+ case AMDIL::C4_X:
+ case AMDIL::C5_X
+:
+ case AMDIL::C6_X:
+ case AMDIL::C7_X:
+ case AMDIL::C8_X:
+ case AMDIL::C9_X:
+ case AMDIL::C10_X
+:
+ case AMDIL::C11_X:
+ case AMDIL::C12_X:
+ case AMDIL::C13_X:
+ case AMDIL::C14_X:
+ case AMDIL::C15_X
+:
+ case AMDIL::C16_X:
+ case AMDIL::C17_X:
+ case AMDIL::C18_X:
+ case AMDIL::C19_X:
+ case AMDIL::C20_X
+:
+ case AMDIL::C21_X:
+ case AMDIL::C22_X:
+ case AMDIL::C23_X:
+ case AMDIL::C24_X:
+ case AMDIL::C25_X
+:
+ case AMDIL::C26_X:
+ case AMDIL::C27_X:
+ case AMDIL::C28_X:
+ case AMDIL::C29_X:
+ case AMDIL::C30_X
+:
+ case AMDIL::C31_X:
+ case AMDIL::C32_X:
+ case AMDIL::C33_X:
+ case AMDIL::C34_X:
+ case AMDIL::C35_X
+:
+ case AMDIL::C36_X:
+ case AMDIL::C37_X:
+ case AMDIL::C38_X:
+ case AMDIL::C39_X:
+ case AMDIL::C40_X
+:
+ case AMDIL::C41_X:
+ case AMDIL::C42_X:
+ case AMDIL::C43_X:
+ case AMDIL::C44_X:
+ case AMDIL::C45_X
+:
+ case AMDIL::C46_X:
+ case AMDIL::C47_X:
+ case AMDIL::C48_X:
+ case AMDIL::C49_X:
+ case AMDIL::C50_X
+:
+ case AMDIL::C51_X:
+ case AMDIL::C52_X:
+ case AMDIL::C53_X:
+ case AMDIL::C54_X:
+ case AMDIL::C55_X
+:
+ case AMDIL::C56_X:
+ case AMDIL::C57_X:
+ case AMDIL::C58_X:
+ case AMDIL::C59_X:
+ case AMDIL::C60_X
+:
+ case AMDIL::C61_X:
+ case AMDIL::C62_X:
+ case AMDIL::C63_X:
+ case AMDIL::C64_X:
+ case AMDIL::C65_X
+:
+ case AMDIL::C66_X:
+ case AMDIL::C67_X:
+ case AMDIL::C68_X:
+ case AMDIL::C69_X:
+ case AMDIL::C70_X
+:
+ case AMDIL::C71_X:
+ case AMDIL::C72_X:
+ case AMDIL::C73_X:
+ case AMDIL::C74_X:
+ case AMDIL::C75_X
+:
+ case AMDIL::C76_X:
+ case AMDIL::C77_X:
+ case AMDIL::C78_X:
+ case AMDIL::C79_X:
+ case AMDIL::C80_X
+:
+ case AMDIL::C81_X:
+ case AMDIL::C82_X:
+ case AMDIL::C83_X:
+ case AMDIL::C84_X:
+ case AMDIL::C85_X
+:
+ case AMDIL::C86_X:
+ case AMDIL::C87_X:
+ case AMDIL::C88_X:
+ case AMDIL::C89_X:
+ case AMDIL::C90_X
+:
+ case AMDIL::C91_X:
+ case AMDIL::C92_X:
+ case AMDIL::C93_X:
+ case AMDIL::C94_X:
+ case AMDIL::C95_X
+:
+ case AMDIL::C96_X:
+ case AMDIL::C97_X:
+ case AMDIL::C98_X:
+ case AMDIL::C99_X:
+ case AMDIL::C100_X
+:
+ case AMDIL::C101_X:
+ case AMDIL::C102_X:
+ case AMDIL::C103_X:
+ case AMDIL::C104_X:
+ case AMDIL::C105_X
+:
+ case AMDIL::C106_X:
+ case AMDIL::C107_X:
+ case AMDIL::C108_X:
+ case AMDIL::C109_X:
+ case AMDIL::C110_X
+:
+ case AMDIL::C111_X:
+ case AMDIL::C112_X:
+ case AMDIL::C113_X:
+ case AMDIL::C114_X:
+ case AMDIL::C115_X
+:
+ case AMDIL::C116_X:
+ case AMDIL::C117_X:
+ case AMDIL::C118_X:
+ case AMDIL::C119_X:
+ case AMDIL::C120_X
+:
+ case AMDIL::C121_X:
+ case AMDIL::C122_X:
+ case AMDIL::C123_X:
+ case AMDIL::C124_X:
+ case AMDIL::C125_X
+:
+ case AMDIL::C126_X:
+ case AMDIL::C127_X:
+ case AMDIL::C128_X:
+ case AMDIL::C129_X:
+ case AMDIL::C130_X
+:
+ case AMDIL::C131_X:
+ case AMDIL::C132_X:
+ case AMDIL::C133_X:
+ case AMDIL::C134_X:
+ case AMDIL::C135_X
+:
+ case AMDIL::C136_X:
+ case AMDIL::C137_X:
+ case AMDIL::C138_X:
+ case AMDIL::C139_X:
+ case AMDIL::C140_X
+:
+ case AMDIL::C141_X:
+ case AMDIL::C142_X:
+ case AMDIL::C143_X:
+ case AMDIL::C144_X:
+ case AMDIL::C145_X
+:
+ case AMDIL::C146_X:
+ case AMDIL::C147_X:
+ case AMDIL::C148_X:
+ case AMDIL::C149_X:
+ case AMDIL::C150_X
+:
+ case AMDIL::C151_X:
+ case AMDIL::C152_X:
+ case AMDIL::C153_X:
+ case AMDIL::C154_X:
+ case AMDIL::C155_X
+:
+ case AMDIL::C156_X:
+ case AMDIL::C157_X:
+ case AMDIL::C158_X:
+ case AMDIL::C159_X:
+ case AMDIL::C160_X
+:
+ case AMDIL::C161_X:
+ case AMDIL::C162_X:
+ case AMDIL::C163_X:
+ case AMDIL::C164_X:
+ case AMDIL::C165_X
+:
+ case AMDIL::C166_X:
+ case AMDIL::C167_X:
+ case AMDIL::C168_X:
+ case AMDIL::C169_X:
+ case AMDIL::C170_X
+:
+ case AMDIL::C171_X:
+ case AMDIL::C172_X:
+ case AMDIL::C173_X:
+ case AMDIL::C174_X:
+ case AMDIL::C175_X
+:
+ case AMDIL::C176_X:
+ case AMDIL::C177_X:
+ case AMDIL::C178_X:
+ case AMDIL::C179_X:
+ case AMDIL::C180_X
+:
+ case AMDIL::C181_X:
+ case AMDIL::C182_X:
+ case AMDIL::C183_X:
+ case AMDIL::C184_X:
+ case AMDIL::C185_X
+:
+ case AMDIL::C186_X:
+ case AMDIL::C187_X:
+ case AMDIL::C188_X:
+ case AMDIL::C189_X:
+ case AMDIL::C190_X
+:
+ case AMDIL::C191_X:
+ case AMDIL::C192_X:
+ case AMDIL::C193_X:
+ case AMDIL::C194_X:
+ case AMDIL::C195_X
+:
+ case AMDIL::C196_X:
+ case AMDIL::C197_X:
+ case AMDIL::C198_X:
+ case AMDIL::C199_X:
+ case AMDIL::C200_X
+:
+ case AMDIL::C201_X:
+ case AMDIL::C202_X:
+ case AMDIL::C203_X:
+ case AMDIL::C204_X:
+ case AMDIL::C205_X
+:
+ case AMDIL::C206_X:
+ case AMDIL::C207_X:
+ case AMDIL::C208_X:
+ case AMDIL::C209_X:
+ case AMDIL::C210_X
+:
+ case AMDIL::C211_X:
+ case AMDIL::C212_X:
+ case AMDIL::C213_X:
+ case AMDIL::C214_X:
+ case AMDIL::C215_X
+:
+ case AMDIL::C216_X:
+ case AMDIL::C217_X:
+ case AMDIL::C218_X:
+ case AMDIL::C219_X:
+ case AMDIL::C220_X
+:
+ case AMDIL::C221_X:
+ case AMDIL::C222_X:
+ case AMDIL::C223_X:
+ case AMDIL::C224_X:
+ case AMDIL::C225_X
+:
+ case AMDIL::C226_X:
+ case AMDIL::C227_X:
+ case AMDIL::C228_X:
+ case AMDIL::C229_X:
+ case AMDIL::C230_X
+:
+ case AMDIL::C231_X:
+ case AMDIL::C232_X:
+ case AMDIL::C233_X:
+ case AMDIL::C234_X:
+ case AMDIL::C235_X
+:
+ case AMDIL::C236_X:
+ case AMDIL::C237_X:
+ case AMDIL::C238_X:
+ case AMDIL::C239_X:
+ case AMDIL::C240_X
+:
+ case AMDIL::C241_X:
+ case AMDIL::C242_X:
+ case AMDIL::C243_X:
+ case AMDIL::C244_X:
+ case AMDIL::C245_X
+:
+ case AMDIL::C246_X:
+ case AMDIL::C247_X:
+ case AMDIL::C248_X:
+ case AMDIL::C249_X:
+ case AMDIL::C250_X
+:
+ case AMDIL::C251_X:
+ case AMDIL::C252_X:
+ case AMDIL::C253_X:
+ case AMDIL::C254_X:
+ case AMDIL::C255_X
+:
+ case AMDIL::T0_X
+:
+ case AMDIL::T1_X:
+ case AMDIL::T2_X:
+ case AMDIL::T3_X:
+ case AMDIL::T4_X:
+ case AMDIL::T5_X
+:
+ case AMDIL::T6_X:
+ case AMDIL::T7_X:
+ case AMDIL::T8_X:
+ case AMDIL::T9_X:
+ case AMDIL::T10_X
+:
+ case AMDIL::T11_X:
+ case AMDIL::T12_X:
+ case AMDIL::T13_X:
+ case AMDIL::T14_X:
+ case AMDIL::T15_X
+:
+ case AMDIL::T16_X:
+ case AMDIL::T17_X:
+ case AMDIL::T18_X:
+ case AMDIL::T19_X:
+ case AMDIL::T20_X
+:
+ case AMDIL::T21_X:
+ case AMDIL::T22_X:
+ case AMDIL::T23_X:
+ case AMDIL::T24_X:
+ case AMDIL::T25_X
+:
+ case AMDIL::T26_X:
+ case AMDIL::T27_X:
+ case AMDIL::T28_X:
+ case AMDIL::T29_X:
+ case AMDIL::T30_X
+:
+ case AMDIL::T31_X:
+ case AMDIL::T32_X:
+ case AMDIL::T33_X:
+ case AMDIL::T34_X:
+ case AMDIL::T35_X
+:
+ case AMDIL::T36_X:
+ case AMDIL::T37_X:
+ case AMDIL::T38_X:
+ case AMDIL::T39_X:
+ case AMDIL::T40_X
+:
+ case AMDIL::T41_X:
+ case AMDIL::T42_X:
+ case AMDIL::T43_X:
+ case AMDIL::T44_X:
+ case AMDIL::T45_X
+:
+ case AMDIL::T46_X:
+ case AMDIL::T47_X:
+ case AMDIL::T48_X:
+ case AMDIL::T49_X:
+ case AMDIL::T50_X
+:
+ case AMDIL::T51_X:
+ case AMDIL::T52_X:
+ case AMDIL::T53_X:
+ case AMDIL::T54_X:
+ case AMDIL::T55_X
+:
+ case AMDIL::T56_X:
+ case AMDIL::T57_X:
+ case AMDIL::T58_X:
+ case AMDIL::T59_X:
+ case AMDIL::T60_X
+:
+ case AMDIL::T61_X:
+ case AMDIL::T62_X:
+ case AMDIL::T63_X:
+ case AMDIL::T64_X:
+ case AMDIL::T65_X
+:
+ case AMDIL::T66_X:
+ case AMDIL::T67_X:
+ case AMDIL::T68_X:
+ case AMDIL::T69_X:
+ case AMDIL::T70_X
+:
+ case AMDIL::T71_X:
+ case AMDIL::T72_X:
+ case AMDIL::T73_X:
+ case AMDIL::T74_X:
+ case AMDIL::T75_X
+:
+ case AMDIL::T76_X:
+ case AMDIL::T77_X:
+ case AMDIL::T78_X:
+ case AMDIL::T79_X:
+ case AMDIL::T80_X
+:
+ case AMDIL::T81_X:
+ case AMDIL::T82_X:
+ case AMDIL::T83_X:
+ case AMDIL::T84_X:
+ case AMDIL::T85_X
+:
+ case AMDIL::T86_X:
+ case AMDIL::T87_X:
+ case AMDIL::T88_X:
+ case AMDIL::T89_X:
+ case AMDIL::T90_X
+:
+ case AMDIL::T91_X:
+ case AMDIL::T92_X:
+ case AMDIL::T93_X:
+ case AMDIL::T94_X:
+ case AMDIL::T95_X
+:
+ case AMDIL::T96_X:
+ case AMDIL::T97_X:
+ case AMDIL::T98_X:
+ case AMDIL::T99_X:
+ case AMDIL::T100_X
+:
+ case AMDIL::T101_X:
+ case AMDIL::T102_X:
+ case AMDIL::T103_X:
+ case AMDIL::T104_X:
+ case AMDIL::T105_X
+:
+ case AMDIL::T106_X:
+ case AMDIL::T107_X:
+ case AMDIL::T108_X:
+ case AMDIL::T109_X:
+ case AMDIL::T110_X
+:
+ case AMDIL::T111_X:
+ case AMDIL::T112_X:
+ case AMDIL::T113_X:
+ case AMDIL::T114_X:
+ case AMDIL::T115_X
+:
+ case AMDIL::T116_X:
+ case AMDIL::T117_X:
+ case AMDIL::T118_X:
+ case AMDIL::T119_X:
+ case AMDIL::T120_X
+:
+ case AMDIL::T121_X:
+ case AMDIL::T122_X:
+ case AMDIL::T123_X:
+ case AMDIL::T124_X:
+ case AMDIL::T125_X
+:
+ case AMDIL::T126_X:
+ case AMDIL::T127_X:
+ case AMDIL::T0_XYZW
+:
+ case AMDIL::T1_XYZW:
+ case AMDIL::T2_XYZW:
+ case AMDIL::T3_XYZW:
+ case AMDIL::T4_XYZW:
+ case AMDIL::T5_XYZW:
+ case AMDIL::T6_XYZW:
+ case AMDIL::T7_XYZW:
+ case AMDIL::T8_XYZW:
+ case AMDIL::T9_XYZW:
+ case AMDIL::T10_XYZW
+:
+ case AMDIL::T11_XYZW:
+ case AMDIL::T12_XYZW:
+ case AMDIL::T13_XYZW:
+ case AMDIL::T14_XYZW:
+ case AMDIL::T15_XYZW:
+ case AMDIL::T16_XYZW:
+ case AMDIL::T17_XYZW:
+ case AMDIL::T18_XYZW:
+ case AMDIL::T19_XYZW:
+ case AMDIL::T20_XYZW
+:
+ case AMDIL::T21_XYZW:
+ case AMDIL::T22_XYZW:
+ case AMDIL::T23_XYZW:
+ case AMDIL::T24_XYZW:
+ case AMDIL::T25_XYZW:
+ case AMDIL::T26_XYZW:
+ case AMDIL::T27_XYZW:
+ case AMDIL::T28_XYZW:
+ case AMDIL::T29_XYZW:
+ case AMDIL::T30_XYZW
+:
+ case AMDIL::T31_XYZW:
+ case AMDIL::T32_XYZW:
+ case AMDIL::T33_XYZW:
+ case AMDIL::T34_XYZW:
+ case AMDIL::T35_XYZW:
+ case AMDIL::T36_XYZW:
+ case AMDIL::T37_XYZW:
+ case AMDIL::T38_XYZW:
+ case AMDIL::T39_XYZW:
+ case AMDIL::T40_XYZW
+:
+ case AMDIL::T41_XYZW:
+ case AMDIL::T42_XYZW:
+ case AMDIL::T43_XYZW:
+ case AMDIL::T44_XYZW:
+ case AMDIL::T45_XYZW:
+ case AMDIL::T46_XYZW:
+ case AMDIL::T47_XYZW:
+ case AMDIL::T48_XYZW:
+ case AMDIL::T49_XYZW:
+ case AMDIL::T50_XYZW
+:
+ case AMDIL::T51_XYZW:
+ case AMDIL::T52_XYZW:
+ case AMDIL::T53_XYZW:
+ case AMDIL::T54_XYZW:
+ case AMDIL::T55_XYZW:
+ case AMDIL::T56_XYZW:
+ case AMDIL::T57_XYZW:
+ case AMDIL::T58_XYZW:
+ case AMDIL::T59_XYZW:
+ case AMDIL::T60_XYZW
+:
+ case AMDIL::T61_XYZW:
+ case AMDIL::T62_XYZW:
+ case AMDIL::T63_XYZW:
+ case AMDIL::T64_XYZW:
+ case AMDIL::T65_XYZW:
+ case AMDIL::T66_XYZW:
+ case AMDIL::T67_XYZW:
+ case AMDIL::T68_XYZW:
+ case AMDIL::T69_XYZW:
+ case AMDIL::T70_XYZW
+:
+ case AMDIL::T71_XYZW:
+ case AMDIL::T72_XYZW:
+ case AMDIL::T73_XYZW:
+ case AMDIL::T74_XYZW:
+ case AMDIL::T75_XYZW:
+ case AMDIL::T76_XYZW:
+ case AMDIL::T77_XYZW:
+ case AMDIL::T78_XYZW:
+ case AMDIL::T79_XYZW:
+ case AMDIL::T80_XYZW
+:
+ case AMDIL::T81_XYZW:
+ case AMDIL::T82_XYZW:
+ case AMDIL::T83_XYZW:
+ case AMDIL::T84_XYZW:
+ case AMDIL::T85_XYZW:
+ case AMDIL::T86_XYZW:
+ case AMDIL::T87_XYZW:
+ case AMDIL::T88_XYZW:
+ case AMDIL::T89_XYZW:
+ case AMDIL::T90_XYZW
+:
+ case AMDIL::T91_XYZW:
+ case AMDIL::T92_XYZW:
+ case AMDIL::T93_XYZW:
+ case AMDIL::T94_XYZW:
+ case AMDIL::T95_XYZW:
+ case AMDIL::T96_XYZW:
+ case AMDIL::T97_XYZW:
+ case AMDIL::T98_XYZW:
+ case AMDIL::T99_XYZW:
+ case AMDIL::T100_XYZW
+:
+ case AMDIL::T101_XYZW:
+ case AMDIL::T102_XYZW:
+ case AMDIL::T103_XYZW:
+ case AMDIL::T104_XYZW:
+ case AMDIL::T105_XYZW:
+ case AMDIL::T106_XYZW:
+ case AMDIL::T107_XYZW:
+ case AMDIL::T108_XYZW:
+ case AMDIL::T109_XYZW:
+ case AMDIL::T110_XYZW
+:
+ case AMDIL::T111_XYZW:
+ case AMDIL::T112_XYZW:
+ case AMDIL::T113_XYZW:
+ case AMDIL::T114_XYZW:
+ case AMDIL::T115_XYZW:
+ case AMDIL::T116_XYZW:
+ case AMDIL::T117_XYZW:
+ case AMDIL::T118_XYZW:
+ case AMDIL::T119_XYZW:
+ case AMDIL::T120_XYZW
+:
+ case AMDIL::T121_XYZW:
+ case AMDIL::T122_XYZW:
+ case AMDIL::T123_XYZW:
+ case AMDIL::T124_XYZW:
+ case AMDIL::T125_XYZW:
+ case AMDIL::T126_XYZW:
+ case AMDIL::T127_XYZW:
+    return 0;
+
+ case AMDIL::C0_Y:
+ case AMDIL::C1_Y:
+ case AMDIL::C2_Y:
+ case AMDIL::C3_Y:
+ case AMDIL::C4_Y:
+ case AMDIL::C5_Y:
+ case AMDIL::C6_Y:
+ case AMDIL::C7_Y:
+ case AMDIL::C8_Y:
+ case AMDIL::C9_Y:
+ case AMDIL::C10_Y:
+ case AMDIL::C11_Y:
+ case AMDIL::C12_Y:
+ case AMDIL::C13_Y:
+ case AMDIL::C14_Y:
+ case AMDIL::C15_Y:
+ case AMDIL::C16_Y:
+ case AMDIL::C17_Y:
+ case AMDIL::C18_Y:
+ case AMDIL::C19_Y:
+ case AMDIL::C20_Y:
+ case AMDIL::C21_Y:
+ case AMDIL::C22_Y:
+ case AMDIL::C23_Y:
+ case AMDIL::C24_Y:
+ case AMDIL::C25_Y:
+ case AMDIL::C26_Y:
+ case AMDIL::C27_Y:
+ case AMDIL::C28_Y:
+ case AMDIL::C29_Y:
+ case AMDIL::C30_Y:
+ case AMDIL::C31_Y:
+ case AMDIL::C32_Y:
+ case AMDIL::C33_Y:
+ case AMDIL::C34_Y:
+ case AMDIL::C35_Y:
+ case AMDIL::C36_Y:
+ case AMDIL::C37_Y:
+ case AMDIL::C38_Y:
+ case AMDIL::C39_Y:
+ case AMDIL::C40_Y:
+ case AMDIL::C41_Y:
+ case AMDIL::C42_Y:
+ case AMDIL::C43_Y:
+ case AMDIL::C44_Y:
+ case AMDIL::C45_Y:
+ case AMDIL::C46_Y:
+ case AMDIL::C47_Y:
+ case AMDIL::C48_Y:
+ case AMDIL::C49_Y:
+ case AMDIL::C50_Y:
+ case AMDIL::C51_Y:
+ case AMDIL::C52_Y:
+ case AMDIL::C53_Y:
+ case AMDIL::C54_Y:
+ case AMDIL::C55_Y:
+ case AMDIL::C56_Y:
+ case AMDIL::C57_Y:
+ case AMDIL::C58_Y:
+ case AMDIL::C59_Y:
+ case AMDIL::C60_Y:
+ case AMDIL::C61_Y:
+ case AMDIL::C62_Y:
+ case AMDIL::C63_Y:
+ case AMDIL::C64_Y:
+ case AMDIL::C65_Y:
+ case AMDIL::C66_Y:
+ case AMDIL::C67_Y:
+ case AMDIL::C68_Y:
+ case AMDIL::C69_Y:
+ case AMDIL::C70_Y:
+ case AMDIL::C71_Y:
+ case AMDIL::C72_Y:
+ case AMDIL::C73_Y:
+ case AMDIL::C74_Y:
+ case AMDIL::C75_Y:
+ case AMDIL::C76_Y:
+ case AMDIL::C77_Y:
+ case AMDIL::C78_Y:
+ case AMDIL::C79_Y:
+ case AMDIL::C80_Y:
+ case AMDIL::C81_Y:
+ case AMDIL::C82_Y:
+ case AMDIL::C83_Y:
+ case AMDIL::C84_Y:
+ case AMDIL::C85_Y:
+ case AMDIL::C86_Y:
+ case AMDIL::C87_Y:
+ case AMDIL::C88_Y:
+ case AMDIL::C89_Y:
+ case AMDIL::C90_Y:
+ case AMDIL::C91_Y:
+ case AMDIL::C92_Y:
+ case AMDIL::C93_Y:
+ case AMDIL::C94_Y:
+ case AMDIL::C95_Y:
+ case AMDIL::C96_Y:
+ case AMDIL::C97_Y:
+ case AMDIL::C98_Y:
+ case AMDIL::C99_Y:
+ case AMDIL::C100_Y:
+ case AMDIL::C101_Y:
+ case AMDIL::C102_Y:
+ case AMDIL::C103_Y:
+ case AMDIL::C104_Y:
+ case AMDIL::C105_Y:
+ case AMDIL::C106_Y:
+ case AMDIL::C107_Y:
+ case AMDIL::C108_Y:
+ case AMDIL::C109_Y:
+ case AMDIL::C110_Y:
+ case AMDIL::C111_Y:
+ case AMDIL::C112_Y:
+ case AMDIL::C113_Y:
+ case AMDIL::C114_Y:
+ case AMDIL::C115_Y:
+ case AMDIL::C116_Y:
+ case AMDIL::C117_Y:
+ case AMDIL::C118_Y:
+ case AMDIL::C119_Y:
+ case AMDIL::C120_Y:
+ case AMDIL::C121_Y:
+ case AMDIL::C122_Y:
+ case AMDIL::C123_Y:
+ case AMDIL::C124_Y:
+ case AMDIL::C125_Y:
+ case AMDIL::C126_Y:
+ case AMDIL::C127_Y:
+ case AMDIL::C128_Y:
+ case AMDIL::C129_Y:
+ case AMDIL::C130_Y:
+ case AMDIL::C131_Y:
+ case AMDIL::C132_Y:
+ case AMDIL::C133_Y:
+ case AMDIL::C134_Y:
+ case AMDIL::C135_Y:
+ case AMDIL::C136_Y:
+ case AMDIL::C137_Y:
+ case AMDIL::C138_Y:
+ case AMDIL::C139_Y:
+ case AMDIL::C140_Y:
+ case AMDIL::C141_Y:
+ case AMDIL::C142_Y:
+ case AMDIL::C143_Y:
+ case AMDIL::C144_Y:
+ case AMDIL::C145_Y:
+ case AMDIL::C146_Y:
+ case AMDIL::C147_Y:
+ case AMDIL::C148_Y:
+ case AMDIL::C149_Y:
+ case AMDIL::C150_Y:
+ case AMDIL::C151_Y:
+ case AMDIL::C152_Y:
+ case AMDIL::C153_Y:
+ case AMDIL::C154_Y:
+ case AMDIL::C155_Y:
+ case AMDIL::C156_Y:
+ case AMDIL::C157_Y:
+ case AMDIL::C158_Y:
+ case AMDIL::C159_Y:
+ case AMDIL::C160_Y:
+ case AMDIL::C161_Y:
+ case AMDIL::C162_Y:
+ case AMDIL::C163_Y:
+ case AMDIL::C164_Y:
+ case AMDIL::C165_Y:
+ case AMDIL::C166_Y:
+ case AMDIL::C167_Y:
+ case AMDIL::C168_Y:
+ case AMDIL::C169_Y:
+ case AMDIL::C170_Y:
+ case AMDIL::C171_Y:
+ case AMDIL::C172_Y:
+ case AMDIL::C173_Y:
+ case AMDIL::C174_Y:
+ case AMDIL::C175_Y:
+ case AMDIL::C176_Y:
+ case AMDIL::C177_Y:
+ case AMDIL::C178_Y:
+ case AMDIL::C179_Y:
+ case AMDIL::C180_Y:
+ case AMDIL::C181_Y:
+ case AMDIL::C182_Y:
+ case AMDIL::C183_Y:
+ case AMDIL::C184_Y:
+ case AMDIL::C185_Y:
+ case AMDIL::C186_Y:
+ case AMDIL::C187_Y:
+ case AMDIL::C188_Y:
+ case AMDIL::C189_Y:
+ case AMDIL::C190_Y:
+ case AMDIL::C191_Y:
+ case AMDIL::C192_Y:
+ case AMDIL::C193_Y:
+ case AMDIL::C194_Y:
+ case AMDIL::C195_Y:
+ case AMDIL::C196_Y:
+ case AMDIL::C197_Y:
+ case AMDIL::C198_Y:
+ case AMDIL::C199_Y:
+ case AMDIL::C200_Y:
+ case AMDIL::C201_Y:
+ case AMDIL::C202_Y:
+ case AMDIL::C203_Y:
+ case AMDIL::C204_Y:
+ case AMDIL::C205_Y:
+ case AMDIL::C206_Y:
+ case AMDIL::C207_Y:
+ case AMDIL::C208_Y:
+ case AMDIL::C209_Y:
+ case AMDIL::C210_Y:
+ case AMDIL::C211_Y:
+ case AMDIL::C212_Y:
+ case AMDIL::C213_Y:
+ case AMDIL::C214_Y:
+ case AMDIL::C215_Y:
+ case AMDIL::C216_Y:
+ case AMDIL::C217_Y:
+ case AMDIL::C218_Y:
+ case AMDIL::C219_Y:
+ case AMDIL::C220_Y:
+ case AMDIL::C221_Y:
+ case AMDIL::C222_Y:
+ case AMDIL::C223_Y:
+ case AMDIL::C224_Y:
+ case AMDIL::C225_Y:
+ case AMDIL::C226_Y:
+ case AMDIL::C227_Y:
+ case AMDIL::C228_Y:
+ case AMDIL::C229_Y:
+ case AMDIL::C230_Y:
+ case AMDIL::C231_Y:
+ case AMDIL::C232_Y:
+ case AMDIL::C233_Y:
+ case AMDIL::C234_Y:
+ case AMDIL::C235_Y:
+ case AMDIL::C236_Y:
+ case AMDIL::C237_Y:
+ case AMDIL::C238_Y:
+ case AMDIL::C239_Y:
+ case AMDIL::C240_Y:
+ case AMDIL::C241_Y:
+ case AMDIL::C242_Y:
+ case AMDIL::C243_Y:
+ case AMDIL::C244_Y:
+ case AMDIL::C245_Y:
+ case AMDIL::C246_Y:
+ case AMDIL::C247_Y:
+ case AMDIL::C248_Y:
+ case AMDIL::C249_Y:
+ case AMDIL::C250_Y:
+ case AMDIL::C251_Y:
+ case AMDIL::C252_Y:
+ case AMDIL::C253_Y:
+ case AMDIL::C254_Y:
+ case AMDIL::C255_Y:
+ case AMDIL::T0_Y:
+ case AMDIL::T1_Y:
+ case AMDIL::T2_Y:
+ case AMDIL::T3_Y:
+ case AMDIL::T4_Y:
+ case AMDIL::T5_Y:
+ case AMDIL::T6_Y:
+ case AMDIL::T7_Y:
+ case AMDIL::T8_Y:
+ case AMDIL::T9_Y:
+ case AMDIL::T10_Y:
+ case AMDIL::T11_Y:
+ case AMDIL::T12_Y:
+ case AMDIL::T13_Y:
+ case AMDIL::T14_Y:
+ case AMDIL::T15_Y:
+ case AMDIL::T16_Y:
+ case AMDIL::T17_Y:
+ case AMDIL::T18_Y:
+ case AMDIL::T19_Y:
+ case AMDIL::T20_Y:
+ case AMDIL::T21_Y:
+ case AMDIL::T22_Y:
+ case AMDIL::T23_Y:
+ case AMDIL::T24_Y:
+ case AMDIL::T25_Y:
+ case AMDIL::T26_Y:
+ case AMDIL::T27_Y:
+ case AMDIL::T28_Y:
+ case AMDIL::T29_Y:
+ case AMDIL::T30_Y:
+ case AMDIL::T31_Y:
+ case AMDIL::T32_Y:
+ case AMDIL::T33_Y:
+ case AMDIL::T34_Y:
+ case AMDIL::T35_Y:
+ case AMDIL::T36_Y:
+ case AMDIL::T37_Y:
+ case AMDIL::T38_Y:
+ case AMDIL::T39_Y:
+ case AMDIL::T40_Y:
+ case AMDIL::T41_Y:
+ case AMDIL::T42_Y:
+ case AMDIL::T43_Y:
+ case AMDIL::T44_Y:
+ case AMDIL::T45_Y:
+ case AMDIL::T46_Y:
+ case AMDIL::T47_Y:
+ case AMDIL::T48_Y:
+ case AMDIL::T49_Y:
+ case AMDIL::T50_Y:
+ case AMDIL::T51_Y:
+ case AMDIL::T52_Y:
+ case AMDIL::T53_Y:
+ case AMDIL::T54_Y:
+ case AMDIL::T55_Y:
+ case AMDIL::T56_Y:
+ case AMDIL::T57_Y:
+ case AMDIL::T58_Y:
+ case AMDIL::T59_Y:
+ case AMDIL::T60_Y:
+ case AMDIL::T61_Y:
+ case AMDIL::T62_Y:
+ case AMDIL::T63_Y:
+ case AMDIL::T64_Y:
+ case AMDIL::T65_Y:
+ case AMDIL::T66_Y:
+ case AMDIL::T67_Y:
+ case AMDIL::T68_Y:
+ case AMDIL::T69_Y:
+ case AMDIL::T70_Y:
+ case AMDIL::T71_Y:
+ case AMDIL::T72_Y:
+ case AMDIL::T73_Y:
+ case AMDIL::T74_Y:
+ case AMDIL::T75_Y:
+ case AMDIL::T76_Y:
+ case AMDIL::T77_Y:
+ case AMDIL::T78_Y:
+ case AMDIL::T79_Y:
+ case AMDIL::T80_Y:
+ case AMDIL::T81_Y:
+ case AMDIL::T82_Y:
+ case AMDIL::T83_Y:
+ case AMDIL::T84_Y:
+ case AMDIL::T85_Y:
+ case AMDIL::T86_Y:
+ case AMDIL::T87_Y:
+ case AMDIL::T88_Y:
+ case AMDIL::T89_Y:
+ case AMDIL::T90_Y:
+ case AMDIL::T91_Y:
+ case AMDIL::T92_Y:
+ case AMDIL::T93_Y:
+ case AMDIL::T94_Y:
+ case AMDIL::T95_Y:
+ case AMDIL::T96_Y:
+ case AMDIL::T97_Y:
+ case AMDIL::T98_Y:
+ case AMDIL::T99_Y:
+ case AMDIL::T100_Y:
+ case AMDIL::T101_Y:
+ case AMDIL::T102_Y:
+ case AMDIL::T103_Y:
+ case AMDIL::T104_Y:
+ case AMDIL::T105_Y:
+ case AMDIL::T106_Y:
+ case AMDIL::T107_Y:
+ case AMDIL::T108_Y:
+ case AMDIL::T109_Y:
+ case AMDIL::T110_Y:
+ case AMDIL::T111_Y:
+ case AMDIL::T112_Y:
+ case AMDIL::T113_Y:
+ case AMDIL::T114_Y:
+ case AMDIL::T115_Y:
+ case AMDIL::T116_Y:
+ case AMDIL::T117_Y:
+ case AMDIL::T118_Y:
+ case AMDIL::T119_Y:
+ case AMDIL::T120_Y:
+ case AMDIL::T121_Y:
+ case AMDIL::T122_Y:
+ case AMDIL::T123_Y:
+ case AMDIL::T124_Y:
+ case AMDIL::T125_Y:
+ case AMDIL::T126_Y:
+ case AMDIL::T127_Y:
+    return 1;
+
+  }
+}
+
diff --git a/lib/Target/AMDGPU/R600ISelLowering.cpp b/lib/Target/AMDGPU/R600ISelLowering.cpp
new file mode 100644
index 0000000..a6ca2eb
--- /dev/null
+++ b/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -0,0 +1,258 @@
+//===-- R600ISelLowering.cpp - R600 DAG Lowering Implementation -----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Most of the DAG lowering is handled in AMDGPUISelLowering.cpp.  This file
+// is mostly EmitInstrWithCustomInserter().
+//
+//===----------------------------------------------------------------------===//
+
+#include "R600ISelLowering.h"
+#include "AMDGPUUtil.h"
+#include "R600InstrInfo.h"
+#include "R600MachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
+    AMDGPUTargetLowering(TM),
+    TII(static_cast<const R600InstrInfo*>(TM.getInstrInfo()))
+{
+  setOperationAction(ISD::MUL, MVT::i64, Expand);
+//  setSchedulingPreference(Sched::VLIW);
+  addRegisterClass(MVT::v4f32, &AMDIL::R600_Reg128RegClass);
+  addRegisterClass(MVT::f32, &AMDIL::R600_Reg32RegClass);
+  addRegisterClass(MVT::v4i32, &AMDIL::R600_Reg128RegClass);
+  addRegisterClass(MVT::i32, &AMDIL::R600_Reg32RegClass);
+  computeRegisterProperties();
+
+  setOperationAction(ISD::FSUB, MVT::f32, Expand);
+
+}
+
+MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
+    MachineInstr * MI, MachineBasicBlock * BB) const
+{
+  MachineFunction * MF = BB->getParent();
+  MachineRegisterInfo &MRI = MF->getRegInfo();
+  MachineBasicBlock::iterator I = *MI;
+
+  switch (MI->getOpcode()) {
+  default: return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
+  case AMDIL::TGID_X:
+    addLiveIn(MI, MF, MRI, TII, AMDIL::T1_X);
+    break;
+  case AMDIL::TGID_Y:
+    addLiveIn(MI, MF, MRI, TII, AMDIL::T1_Y);
+    break;
+  case AMDIL::TGID_Z:
+    addLiveIn(MI, MF, MRI, TII, AMDIL::T1_Z);
+    break;
+  case AMDIL::TIDIG_X:
+    addLiveIn(MI, MF, MRI, TII, AMDIL::T0_X);
+    break;
+  case AMDIL::TIDIG_Y:
+    addLiveIn(MI, MF, MRI, TII, AMDIL::T0_Y);
+    break;
+  case AMDIL::TIDIG_Z:
+    addLiveIn(MI, MF, MRI, TII, AMDIL::T0_Z);
+    break;
+  case AMDIL::NGROUPS_X:
+    lowerImplicitParameter(MI, *BB, MRI, 0);
+    break;
+  case AMDIL::NGROUPS_Y:
+    lowerImplicitParameter(MI, *BB, MRI, 1);
+    break;
+  case AMDIL::NGROUPS_Z:
+    lowerImplicitParameter(MI, *BB, MRI, 2);
+    break;
+  case AMDIL::GLOBAL_SIZE_X:
+    lowerImplicitParameter(MI, *BB, MRI, 3);
+    break;
+  case AMDIL::GLOBAL_SIZE_Y:
+    lowerImplicitParameter(MI, *BB, MRI, 4);
+    break;
+  case AMDIL::GLOBAL_SIZE_Z:
+    lowerImplicitParameter(MI, *BB, MRI, 5);
+    break;
+  case AMDIL::LOCAL_SIZE_X:
+    lowerImplicitParameter(MI, *BB, MRI, 6);
+    break;
+  case AMDIL::LOCAL_SIZE_Y:
+    lowerImplicitParameter(MI, *BB, MRI, 7);
+    break;
+  case AMDIL::LOCAL_SIZE_Z:
+    lowerImplicitParameter(MI, *BB, MRI, 8);
+    break;
+
+  case AMDIL::CLAMP_R600:
+    MI->getOperand(0).addTargetFlag(MO_FLAG_CLAMP);
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::MOV))
+           .addOperand(MI->getOperand(0))
+           .addOperand(MI->getOperand(1));
+    break;
+
+  case AMDIL::FABS_R600:
+    MI->getOperand(1).addTargetFlag(MO_FLAG_ABS);
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::MOV))
+           .addOperand(MI->getOperand(0))
+           .addOperand(MI->getOperand(1));
+    break;
+
+  case AMDIL::FNEG_R600:
+    MI->getOperand(1).addTargetFlag(MO_FLAG_NEG);
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::MOV))
+            .addOperand(MI->getOperand(0))
+            .addOperand(MI->getOperand(1));
+    break;
+
+  case AMDIL::R600_LOAD_CONST:
+    {
+      int64_t RegIndex = MI->getOperand(1).getImm();
+      unsigned ConstantReg = AMDIL::R600_CReg32RegClass.getRegister(RegIndex);
+      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::COPY))
+                  .addOperand(MI->getOperand(0))
+                  .addReg(ConstantReg);
+      break;
+    }
+
+  case AMDIL::LOAD_INPUT:
+    {
+      int64_t RegIndex = MI->getOperand(1).getImm();
+      addLiveIn(MI, MF, MRI, TII,
+                AMDIL::R600_TReg32RegClass.getRegister(RegIndex));
+      break;
+    }
+
+  case AMDIL::MASK_WRITE:
+    {
+      unsigned maskedRegister = MI->getOperand(0).getReg();
+      assert(TargetRegisterInfo::isVirtualRegister(maskedRegister));
+      MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
+      MachineOperand * def = defInstr->findRegisterDefOperand(maskedRegister);
+      def->addTargetFlag(MO_FLAG_MASK);
+      // Return early so the instruction is not erased
+      return BB;
+    }
+
+  case AMDIL::RAT_WRITE_CACHELESS_eg:
+    {
+      // Convert to DWORD address
+      unsigned NewAddr = MRI.createVirtualRegister(
+                                             &AMDIL::R600_TReg32_XRegClass);
+      unsigned ShiftValue = MRI.createVirtualRegister(
+                                              &AMDIL::R600_TReg32RegClass);
+
+      // XXX In theory, we should be able to pass ShiftValue directly to
+      // the LSHR_eg instruction as an inline literal, but I tried doing it
+      // this way and it didn't produce the correct results.
+      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::MOV), ShiftValue)
+              .addReg(AMDIL::ALU_LITERAL_X)
+              .addImm(2);
+      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::LSHR_eg), NewAddr)
+              .addOperand(MI->getOperand(1))
+              .addReg(ShiftValue);
+      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
+              .addOperand(MI->getOperand(0))
+              .addReg(NewAddr);
+      break;
+    }
+
+  case AMDIL::STORE_OUTPUT:
+    {
+      int64_t OutputIndex = MI->getOperand(1).getImm();
+      unsigned OutputReg = AMDIL::R600_TReg32RegClass.getRegister(OutputIndex);
+
+      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::COPY), OutputReg)
+                  .addOperand(MI->getOperand(0));
+
+      if (!MRI.isLiveOut(OutputReg)) {
+        MRI.addLiveOut(OutputReg);
+      }
+      break;
+    }
+
+  case AMDIL::RESERVE_REG:
+    {
+      R600MachineFunctionInfo * MFI = MF->getInfo<R600MachineFunctionInfo>();
+      int64_t ReservedIndex = MI->getOperand(0).getImm();
+      unsigned ReservedReg =
+                          AMDIL::R600_TReg32RegClass.getRegister(ReservedIndex);
+      MFI->ReservedRegs.push_back(ReservedReg);
+      break;
+    }
+
+  case AMDIL::TXD:
+    {
+      unsigned t0 = MRI.createVirtualRegister(&AMDIL::R600_Reg128RegClass);
+      unsigned t1 = MRI.createVirtualRegister(&AMDIL::R600_Reg128RegClass);
+
+      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SET_GRADIENTS_H), t0)
+              .addOperand(MI->getOperand(3))
+              .addOperand(MI->getOperand(4))
+              .addOperand(MI->getOperand(5));
+      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SET_GRADIENTS_V), t1)
+              .addOperand(MI->getOperand(2))
+              .addOperand(MI->getOperand(4))
+              .addOperand(MI->getOperand(5));
+      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SAMPLE_G))
+              .addOperand(MI->getOperand(0))
+              .addOperand(MI->getOperand(1))
+              .addOperand(MI->getOperand(4))
+              .addOperand(MI->getOperand(5))
+              .addReg(t0, RegState::Implicit)
+              .addReg(t1, RegState::Implicit);
+      break;
+    }
+  case AMDIL::TXD_SHADOW:
+    {
+      unsigned t0 = MRI.createVirtualRegister(&AMDIL::R600_Reg128RegClass);
+      unsigned t1 = MRI.createVirtualRegister(&AMDIL::R600_Reg128RegClass);
+
+      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SET_GRADIENTS_H), t0)
+              .addOperand(MI->getOperand(3))
+              .addOperand(MI->getOperand(4))
+              .addOperand(MI->getOperand(5));
+      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SET_GRADIENTS_V), t1)
+              .addOperand(MI->getOperand(2))
+              .addOperand(MI->getOperand(4))
+              .addOperand(MI->getOperand(5));
+      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::TEX_SAMPLE_C_G))
+              .addOperand(MI->getOperand(0))
+              .addOperand(MI->getOperand(1))
+              .addOperand(MI->getOperand(4))
+              .addOperand(MI->getOperand(5))
+              .addReg(t0, RegState::Implicit)
+              .addReg(t1, RegState::Implicit);
+      break;
+    }
+
+
+  }
+
+  MI->eraseFromParent();
+  return BB;
+}
+
+void R600TargetLowering::lowerImplicitParameter(MachineInstr *MI, MachineBasicBlock &BB,
+    MachineRegisterInfo & MRI, unsigned dword_offset) const
+{
+  MachineBasicBlock::iterator I = *MI;
+  unsigned PtrReg = MRI.createVirtualRegister(&AMDIL::R600_TReg32_XRegClass);
+  MRI.setRegClass(MI->getOperand(0).getReg(), &AMDIL::R600_TReg32_XRegClass);
+
+  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::MOV), PtrReg)
+          .addReg(AMDIL::ALU_LITERAL_X)
+          .addImm(dword_offset * 4);
+
+  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::VTX_READ_PARAM_eg))
+          .addOperand(MI->getOperand(0))
+          .addReg(PtrReg)
+          .addImm(0);
+}
diff --git a/lib/Target/AMDGPU/R600ISelLowering.h b/lib/Target/AMDGPU/R600ISelLowering.h
new file mode 100644
index 0000000..6296145
--- /dev/null
+++ b/lib/Target/AMDGPU/R600ISelLowering.h
@@ -0,0 +1,44 @@
+//===-- R600ISelLowering.h - R600 DAG Lowering Interface -*- C++ -*--------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// R600 DAG Lowering interface definition
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef R600ISELLOWERING_H
+#define R600ISELLOWERING_H
+
+#include "AMDGPUISelLowering.h"
+
+namespace llvm {
+
+class R600InstrInfo;
+
+class R600TargetLowering : public AMDGPUTargetLowering
+{
+public:
+  R600TargetLowering(TargetMachine &TM);
+  virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI,
+      MachineBasicBlock * BB) const;
+
+private:
+  const R600InstrInfo * TII;
+
+  /// lowerImplicitParameter - Each OpenCL kernel has nine implicit parameters
+  /// that are stored in the first nine dwords of a Vertex Buffer.  These
+  /// implicit parameters are represented by pseudo instructions, which are
+  /// lowered to VTX_READ instructions by this function. 
+  void lowerImplicitParameter(MachineInstr *MI, MachineBasicBlock &BB,
+      MachineRegisterInfo & MRI, unsigned dword_offset) const;
+
+};
+
+} // End namespace llvm;
+
+#endif // R600ISELLOWERING_H
diff --git a/lib/Target/AMDGPU/R600InstrInfo.cpp b/lib/Target/AMDGPU/R600InstrInfo.cpp
new file mode 100644
index 0000000..d1246d3
--- /dev/null
+++ b/lib/Target/AMDGPU/R600InstrInfo.cpp
@@ -0,0 +1,94 @@
+//===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// R600 Implementation of TargetInstrInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#include "R600InstrInfo.h"
+#include "AMDGPUTargetMachine.h"
+#include "R600RegisterInfo.h"
+
+using namespace llvm;
+
+R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
+  : AMDGPUInstrInfo(tm),
+    RI(tm, *this),
+    TM(tm)
+  { }
+
+const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const
+{
+  return RI;
+}
+
+bool R600InstrInfo::isTrig(const MachineInstr &MI) const
+{
+  return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
+}
+
+bool R600InstrInfo::isVector(const MachineInstr &MI) const
+{
+  return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
+}
+
+void
+R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+                           MachineBasicBlock::iterator MI, DebugLoc DL,
+                           unsigned DestReg, unsigned SrcReg,
+                           bool KillSrc) const
+{
+
+  unsigned subRegMap[4] = {AMDIL::sel_x, AMDIL::sel_y, AMDIL::sel_z, AMDIL::sel_w};
+
+  if (AMDIL::R600_Reg128RegClass.contains(DestReg)
+      && AMDIL::R600_Reg128RegClass.contains(SrcReg)) {
+    for (unsigned i = 0; i < 4; i++) {
+      BuildMI(MBB, MI, DL, get(AMDIL::MOV))
+              .addReg(RI.getSubReg(DestReg, subRegMap[i]), RegState::Define)
+              .addReg(RI.getSubReg(SrcReg, subRegMap[i]))
+              .addReg(DestReg, RegState::Define | RegState::Implicit);
+    }
+  } else {
+
+    /* We can't copy vec4 registers */
+    assert(!AMDIL::R600_Reg128RegClass.contains(DestReg)
+           && !AMDIL::R600_Reg128RegClass.contains(SrcReg));
+
+    BuildMI(MBB, MI, DL, get(AMDIL::MOV), DestReg)
+      .addReg(SrcReg, getKillRegState(KillSrc));
+  }
+}
+
+MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
+                                             unsigned DstReg, int64_t Imm) const
+{
+  MachineInstr * MI = MF->CreateMachineInstr(get(AMDIL::MOV), DebugLoc());
+  MachineInstrBuilder(MI).addReg(DstReg, RegState::Define);
+  MachineInstrBuilder(MI).addReg(AMDIL::ALU_LITERAL_X);
+  MachineInstrBuilder(MI).addImm(Imm);
+
+  return MI;
+}
+
+unsigned R600InstrInfo::getIEQOpcode() const
+{
+  return AMDIL::SETE_INT;
+}
+
+bool R600InstrInfo::isMov(unsigned Opcode) const
+{
+  switch(Opcode) {
+  default: return false;
+  case AMDIL::MOV:
+  case AMDIL::MOV_IMM_F32:
+  case AMDIL::MOV_IMM_I32:
+    return true;
+  }
+}
diff --git a/lib/Target/AMDGPU/R600InstrInfo.h b/lib/Target/AMDGPU/R600InstrInfo.h
new file mode 100644
index 0000000..f2a1098
--- /dev/null
+++ b/lib/Target/AMDGPU/R600InstrInfo.h
@@ -0,0 +1,71 @@
+//===-- R600InstrInfo.h - R600 Instruction Info Interface -------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface definition for R600InstrInfo
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef R600INSTRUCTIONINFO_H_
+#define R600INSTRUCTIONINFO_H_
+
+#include "AMDIL.h"
+#include "AMDILInstrInfo.h"
+#include "R600RegisterInfo.h"
+
+#include <map>
+
+namespace llvm {
+
+  class AMDGPUTargetMachine;
+  class MachineFunction;
+  class MachineInstr;
+  class MachineInstrBuilder;
+
+  class R600InstrInfo : public AMDGPUInstrInfo {
+  private:
+  const R600RegisterInfo RI;
+  AMDGPUTargetMachine &TM;
+
+  public:
+  explicit R600InstrInfo(AMDGPUTargetMachine &tm);
+
+  const R600RegisterInfo &getRegisterInfo() const;
+  virtual void copyPhysReg(MachineBasicBlock &MBB,
+                           MachineBasicBlock::iterator MI, DebugLoc DL,
+                           unsigned DestReg, unsigned SrcReg,
+                           bool KillSrc) const;
+
+  bool isTrig(const MachineInstr &MI) const;
+
+  /// isVector - Vector instructions are instructions that must fill all
+  /// instruction slots within an instruction group.
+  bool isVector(const MachineInstr &MI) const;
+
+  virtual MachineInstr * getMovImmInstr(MachineFunction *MF, unsigned DstReg,
+                                        int64_t Imm) const;
+
+  virtual unsigned getIEQOpcode() const;
+  virtual bool isMov(unsigned Opcode) const;
+};
+
+} // End llvm namespace
+
+namespace R600_InstFlag {
+	enum TIF {
+		TRANS_ONLY = (1 << 0),
+		TEX = (1 << 1),
+		REDUCTION = (1 << 2),
+		FC = (1 << 3),
+		TRIG = (1 << 4),
+		OP3 = (1 << 5),
+		VECTOR = (1 << 6)
+	};
+}
+
+#endif // R600INSTRINFO_H_
diff --git a/lib/Target/AMDGPU/R600Instructions.td b/lib/Target/AMDGPU/R600Instructions.td
new file mode 100644
index 0000000..794eaef
--- /dev/null
+++ b/lib/Target/AMDGPU/R600Instructions.td
@@ -0,0 +1,1159 @@
+//===-- R600Instructions.td - R600 Instruction defs  -------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// R600 Tablegen instruction definitions
+//
+//===----------------------------------------------------------------------===//
+
+include "R600Intrinsics.td"
+
+class InstR600 <bits<32> inst, dag outs, dag ins, string asm, list<dag> pattern,
+                InstrItinClass itin>
+    : AMDGPUInst <outs, ins, asm, pattern> {
+
+  field bits<32> Inst;
+  bit Trig = 0;
+  bit Op3 = 0;
+  bit isVector = 0; 
+
+  let Inst = inst;
+  let Namespace = "AMDIL";
+  let OutOperandList = outs;
+  let InOperandList = ins;
+  let AsmString = asm;
+  let Pattern = pattern;
+  let Itinerary = itin;
+
+  let TSFlags{4} = Trig;
+  let TSFlags{5} = Op3;
+
+  // Vector instructions are instructions that must fill all slots in an
+  // instruction group
+  let TSFlags{6} = isVector;
+}
+
+class InstR600ISA <dag outs, dag ins, string asm, list<dag> pattern> :
+    AMDGPUInst <outs, ins, asm, pattern>
+{
+  field bits<64> Inst;
+
+  let Namespace = "AMDIL";
+}
+
+def MEMxi : Operand<iPTR> {
+  let MIOperandInfo = (ops R600_TReg32_X:$ptr, i32imm:$index);
+}
+
+def MEMrr : Operand<iPTR> {
+  let MIOperandInfo = (ops R600_Reg32:$ptr, R600_Reg32:$index);
+}
+
+def ADDRParam : ComplexPattern<i32, 2, "SelectADDRParam", [], []>;
+def ADDRDWord : ComplexPattern<i32, 1, "SelectADDRDWord", [], []>;
+def ADDRVTX_READ : ComplexPattern<i32, 2, "SelectADDRVTX_READ", [], []>;
+
+class R600_ALU {
+
+  bits<7> DST_GPR = 0;
+  bits<9> SRC0_SEL = 0;
+  bits<1> SRC0_NEG = 0;
+  bits<9> SRC1_SEL = 0;
+  bits<1> SRC1_NEG = 0;
+  bits<1> CLAMP = 0;
+  
+}
+
+
+class R600_1OP <bits<32> inst, string opName, list<dag> pattern,
+                InstrItinClass itin = AnyALU> :
+  InstR600 <inst,
+          (outs R600_Reg32:$dst),
+          (ins R600_Reg32:$src, variable_ops),
+          !strconcat(opName, " $dst, $src"),
+          pattern,
+          itin
+  >;
+
+class R600_2OP <bits<32> inst, string opName, list<dag> pattern,
+                InstrItinClass itin = AnyALU> :
+  InstR600 <inst,
+          (outs R600_Reg32:$dst),
+          (ins R600_Reg32:$src0, R600_Reg32:$src1, variable_ops),
+          !strconcat(opName, " $dst, $src0, $src1"),
+          pattern,
+          itin
+  >;
+
+class R600_3OP <bits<32> inst, string opName, list<dag> pattern,
+                InstrItinClass itin = AnyALU> :
+  InstR600 <inst,
+          (outs R600_Reg32:$dst),
+          (ins R600_Reg32:$src0, R600_Reg32:$src1, R600_Reg32:$src2, variable_ops),
+          !strconcat(opName, " $dst, $src0, $src1, $src2"),
+          pattern,
+          itin>{
+
+    let Op3 = 1;
+  }
+
+class R600_REDUCTION <bits<32> inst, dag ins, string asm, list<dag> pattern,
+                      InstrItinClass itin = VecALU> :
+  InstR600 <inst,
+          (outs R600_Reg32:$dst),
+          ins,
+          asm,
+          pattern,
+          itin
+
+  >;
+
+class R600_TEX <bits<32> inst, string opName, list<dag> pattern,
+                InstrItinClass itin = AnyALU> :
+  InstR600 <inst,
+          (outs R600_Reg128:$dst),
+          (ins R600_Reg128:$src0, i32imm:$src1, i32imm:$src2),
+          !strconcat(opName, "$dst, $src0, $src1, $src2"),
+          pattern,
+          itin
+  >;
+
+def TEX_SHADOW : PatLeaf<
+  (imm),
+  [{uint32_t TType = (uint32_t)N->getZExtValue();
+    return (TType >= 6 && TType <= 8) || TType == 11 || TType == 12;
+  }]
+>;
+
+def COND_EQ : PatLeaf <
+  (cond),
+  [{switch(N->get()){{default: return false;
+                     case ISD::SETOEQ: case ISD::SETUEQ:
+                     case ISD::SETEQ: return true;}}}]
+>;
+
+def COND_NE : PatLeaf <
+  (cond),
+  [{switch(N->get()){{default: return false;
+                     case ISD::SETONE: case ISD::SETUNE:
+                     case ISD::SETNE: return true;}}}]
+>;
+def COND_GT : PatLeaf <
+  (cond),
+  [{switch(N->get()){{default: return false;
+                     case ISD::SETOGT: case ISD::SETUGT:
+                     case ISD::SETGT: return true;}}}]
+>;
+
+def COND_GE : PatLeaf <
+  (cond),
+  [{switch(N->get()){{default: return false;
+                     case ISD::SETOGE: case ISD::SETUGE:
+                     case ISD::SETGE: return true;}}}]
+>;
+
+def COND_LT : PatLeaf <
+  (cond),
+  [{switch(N->get()){{default: return false;
+                     case ISD::SETOLT: case ISD::SETULT:
+                     case ISD::SETLT: return true;}}}]
+>;
+
+def COND_LE : PatLeaf <
+  (cond),
+  [{switch(N->get()){{default: return false;
+                     case ISD::SETOLE: case ISD::SETULE:
+                     case ISD::SETLE: return true;}}}]
+>;
+
+class EG_CF_RAT <bits <8> cf_inst, bits <6> rat_inst, bits<4> rat_id, dag outs,
+                 dag ins, string asm, list<dag> pattern> :
+    InstR600ISA <outs, ins, asm, pattern>
+{
+  bits<7>  RW_GPR;
+  bits<7>  INDEX_GPR;
+
+  bits<2>  RIM;
+  bits<2>  TYPE;
+  bits<1>  RW_REL;
+  bits<2>  ELEM_SIZE;
+
+  bits<12> ARRAY_SIZE;
+  bits<4>  COMP_MASK;
+  bits<4>  BURST_COUNT;
+  bits<1>  VPM;
+  bits<1>  EOP;
+  bits<1>  MARK;
+  bits<1>  BARRIER;
+
+  /* CF_ALLOC_EXPORT_WORD0_RAT */
+  let Inst{3-0}   = rat_id;
+  let Inst{9-4}   = rat_inst;
+  let Inst{10}    = 0; /* Reserved */
+  let Inst{12-11} = RIM;
+  let Inst{14-13} = TYPE;
+  let Inst{21-15} = RW_GPR;
+  let Inst{22}    = RW_REL;
+  let Inst{29-23} = INDEX_GPR;
+  let Inst{31-30} = ELEM_SIZE;
+
+  /* CF_ALLOC_EXPORT_WORD1_BUF */
+  let Inst{43-32} = ARRAY_SIZE;
+  let Inst{47-44} = COMP_MASK;
+  let Inst{51-48} = BURST_COUNT;
+  let Inst{52}    = VPM;
+  let Inst{53}    = EOP;
+  let Inst{61-54} = cf_inst;
+  let Inst{62}    = MARK;
+  let Inst{63}    = BARRIER;
+}
+
+/*
+def store_global : PatFrag<(ops node:$value, node:$ptr),
+                           (store node:$value, node:$ptr),
+                           [{
+                            const Value *Src;
+                            const PointerType *Type;
+                            if ((src = cast<StoreSDNode>(N)->getSrcValue() &&
+                                 PT = dyn_cast<PointerType>(Src->getType()))) {
+                              return PT->getAddressSpace() == 1;
+                            }
+                            return false;
+                           }]>;
+
+*/
+
+def load_param : PatFrag<(ops node:$ptr),
+                         (load node:$ptr),
+                          [{
+                           const Value *Src = cast<LoadSDNode>(N)->getSrcValue();
+                           if (Src) {
+                                PointerType * PT = dyn_cast<PointerType>(Src->getType());
+                                return PT && PT->getAddressSpace() == AMDILAS::PARAM_I_ADDRESS;
+                           }
+                           return false;
+                          }]>;
+
+//class EG_CF <bits<32> inst, string asm> :
+//    InstR600 <inst, (outs), (ins), asm, []>;
+
+/* XXX: We will use this when we emit the real ISA.
+  bits<24> ADDR = 0;
+  bits<3> JTS = 0;
+
+  bits<3> PC = 0;
+  bits<5> CF_CONS = 0;
+  bits<2> COND = 0;
+  bits<6> COUNT = 0;
+  bits<1> VPM = 0;
+  bits<1> EOP = 0;
+  bits<8> CF_INST = 0;
+  bits<1> WQM = 0;
+  bits<1> B = 0;
+
+  let Inst{23-0} = ADDR;
+  let Inst{26-24} = JTS;
+  let Inst{34-32} = PC;
+  let Inst{39-35} = CF_CONST;
+  let Inst{41-40} = COND;
+  let Inst{47-42} = COUNT;
+  let Inst{52} = VPM;
+  let Inst{53} = EOP;
+  let Inst{61-54} = CF_INST;
+  let Inst{62} = WQM;
+  let Inst{63} = B;
+//}
+*/
+def isR600 : Predicate<"Subtarget.device()"
+                            "->getGeneration() == AMDILDeviceInfo::HD4XXX">;
+def isEG : Predicate<"Subtarget.device()"
+                            "->getGeneration() >= AMDILDeviceInfo::HD5XXX && "
+                            "Subtarget.device()->getDeviceFlag() != OCL_DEVICE_CAYMAN">;
+def isCayman : Predicate<"Subtarget.device()"
+                            "->getDeviceFlag() == OCL_DEVICE_CAYMAN">;
+def isEGorCayman : Predicate<"Subtarget.device()"
+                            "->getGeneration() == AMDILDeviceInfo::HD5XXX"
+			    "|| Subtarget.device()->getGeneration() =="
+			    "AMDILDeviceInfo::HD6XXX">;
+
+def isR600toCayman : Predicate<
+                     "Subtarget.device()->getGeneration() <= AMDILDeviceInfo::HD6XXX">;
+
+
+let Predicates = [isR600toCayman] in { 
+
+/* ------------------------------------------- */
+/* Common Instructions R600, R700, Evergreen, Cayman */
+/* ------------------------------------------- */
+def ADD : R600_2OP <
+  0x0, "ADD",
+  [(set R600_Reg32:$dst, (fadd R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+// Non-IEEE MUL: 0 * anything = 0
+def MUL : R600_2OP <
+  0x1, "MUL NON-IEEE",
+  [(set R600_Reg32:$dst, (int_AMDGPU_mul R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+def MUL_IEEE : R600_2OP <
+  0x2, "MUL_IEEE",
+  [(set R600_Reg32:$dst, (fmul R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+def MAX : R600_2OP <
+  0x3, "MAX",
+  [(set R600_Reg32:$dst, (AMDGPUfmax R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+def MIN : R600_2OP <
+  0x4, "MIN",
+  [(set R600_Reg32:$dst, (AMDGPUfmin R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+/* For the SET* instructions there is a naming conflict in TargetSelectionDAG.td,
+ * so some of the instruction names don't match the asm string.
+ * XXX: Use the defs in TargetSelectionDAG.td instead of intrinsics.
+ */
+
+def SETE : R600_2OP <
+  0x08, "SETE",
+  [(set R600_Reg32:$dst,
+   (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO,
+             COND_EQ))]
+>;
+
+def SGT : R600_2OP <
+  0x09, "SETGT",
+  [(set R600_Reg32:$dst,
+   (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO,
+              COND_GT))]
+>;
+
+def SGE : R600_2OP <
+  0xA, "SETGE",
+  [(set R600_Reg32:$dst,
+   (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO,
+              COND_GE))]
+>;
+
+def SNE : R600_2OP <
+  0xB, "SETNE",
+  [(set R600_Reg32:$dst,
+   (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO,
+    COND_NE))]
+>;
+
+def FRACT : R600_1OP <
+  0x10, "FRACT",
+  [(set R600_Reg32:$dst, (AMDGPUfract R600_Reg32:$src))]
+>;
+
+def TRUNC : R600_1OP <
+  0x11, "TRUNC",
+  [(set R600_Reg32:$dst, (int_AMDGPU_trunc R600_Reg32:$src))]
+>;
+
+def CEIL : R600_1OP <
+  0x12, "CEIL",
+  [(set R600_Reg32:$dst, (fceil R600_Reg32:$src))]
+>;
+
+def RNDNE : R600_1OP <
+  0x13, "RNDNE",
+  [(set R600_Reg32:$dst, (frint R600_Reg32:$src))]
+>;
+
+def FLOOR : R600_1OP <
+  0x14, "FLOOR",
+  [(set R600_Reg32:$dst, (int_AMDGPU_floor R600_Reg32:$src))]
+>;
+
+def MOV : R600_1OP <0x19, "MOV", []>;
+
+class MOV_IMM <ValueType vt, Operand immType> : InstR600 <0x19,
+  (outs R600_Reg32:$dst),
+  (ins R600_Reg32:$alu_literal, immType:$imm),
+  "MOV_IMM $dst, $imm",
+  [], AnyALU
+>;
+
+def MOV_IMM_I32 : MOV_IMM<i32, i32imm>;
+def : Pat <
+  (imm:$val),
+  (MOV_IMM_I32 (i32 ALU_LITERAL_X), imm:$val)
+>;
+
+def MOV_IMM_F32 : MOV_IMM<f32, f32imm>;
+def : Pat <
+  (fpimm:$val),
+  (MOV_IMM_F32 (i32 ALU_LITERAL_X), fpimm:$val)
+>;
+
+def KILLGT : R600_2OP <
+  0x2D, "KILLGT",
+  []
+>;
+
+def AND_INT : R600_2OP <
+  0x30, "AND_INT",
+  [(set R600_Reg32:$dst, (and R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+def OR_INT : R600_2OP <
+  0x31, "OR_INT",
+  [(set R600_Reg32:$dst, (or R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+def XOR_INT : R600_2OP <
+  0x32, "XOR_INT",
+  [(set R600_Reg32:$dst, (xor R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+def NOT_INT : R600_1OP <
+  0x33, "NOT_INT",
+  [(set R600_Reg32:$dst, (not R600_Reg32:$src))]
+>;
+
+def ADD_INT : R600_2OP <
+  0x34, "ADD_INT",
+  [(set R600_Reg32:$dst, (add R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+def SUB_INT : R600_2OP <
+	0x35, "SUB_INT",
+	[(set R600_Reg32:$dst, (sub R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+def MAX_INT : R600_2OP <
+  0x36, "MAX_INT",
+  [(set R600_Reg32:$dst, (AMDGPUsmax R600_Reg32:$src0, R600_Reg32:$src1))]>;
+
+def MIN_INT : R600_2OP <
+  0x37, "MIN_INT",
+  [(set R600_Reg32:$dst, (AMDGPUsmin R600_Reg32:$src0, R600_Reg32:$src1))]>;
+
+def MAX_UINT : R600_2OP <
+  0x38, "MAX_UINT",
+  [(set R600_Reg32:$dst, (AMDGPUsmax R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+def MIN_UINT : R600_2OP <
+  0x39, "MIN_UINT",
+  [(set R600_Reg32:$dst, (AMDGPUumin R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+def SETE_INT : R600_2OP <
+  0x3A, "SETE_INT",
+  [(set (i32 R600_Reg32:$dst),
+   (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETEQ))]
+>;
+
+def SETGT_INT : R600_2OP <
+  0x3B, "SGT_INT",
+  [(set (i32 R600_Reg32:$dst),
+   (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETGT))]
+>;
+
+def SETGE_INT : R600_2OP <
+	0x3C, "SETGE_INT",
+	[(set (i32 R600_Reg32:$dst),
+   (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETGE))]
+>;
+
+def SETNE_INT : R600_2OP <
+  0x3D, "SETNE_INT",
+  [(set (i32 R600_Reg32:$dst),
+   (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETNE))]
+>;
+
+def SETGT_UINT : R600_2OP <
+  0x3E, "SETGT_UINT",
+  [(set (i32 R600_Reg32:$dst),
+   (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETUGT))]
+>;
+
+def SETGE_UINT : R600_2OP <
+  0x3F, "SETGE_UINT",
+  [(set (i32 R600_Reg32:$dst),
+    (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETUGE))]
+>;
+
+def CNDE_INT : R600_3OP <
+	0x1C, "CNDE_INT",
+  [(set (i32 R600_Reg32:$dst),
+   (IL_cmov_logical R600_Reg32:$src0, R600_Reg32:$src2, R600_Reg32:$src1))]
+>;
+
+/* Texture instructions */
+
+
+def TEX_LD : R600_TEX <
+  0x03, "TEX_LD",
+  [(set R600_Reg128:$dst, (int_AMDGPU_txf R600_Reg128:$src0, imm:$src1, imm:$src2, imm:$src3, imm:$src4, imm:$src5))]
+> {
+let AsmString = "TEX_LD $dst, $src0, $src1, $src2, $src3, $src4, $src5";
+let InOperandList = (ins R600_Reg128:$src0, i32imm:$src1, i32imm:$src2, i32imm:$src3, i32imm:$src4, i32imm:$src5);
+}
+
+def TEX_GET_TEXTURE_RESINFO : R600_TEX <
+  0x04, "TEX_GET_TEXTURE_RESINFO",
+  [(set R600_Reg128:$dst, (int_AMDGPU_txq R600_Reg128:$src0, imm:$src1, imm:$src2))]
+>;
+
+def TEX_GET_GRADIENTS_H : R600_TEX <
+  0x07, "TEX_GET_GRADIENTS_H",
+  [(set R600_Reg128:$dst, (int_AMDGPU_ddx R600_Reg128:$src0, imm:$src1, imm:$src2))]
+>;
+
+def TEX_GET_GRADIENTS_V : R600_TEX <
+  0x08, "TEX_GET_GRADIENTS_V",
+  [(set R600_Reg128:$dst, (int_AMDGPU_ddy R600_Reg128:$src0, imm:$src1, imm:$src2))]
+>;
+
+def TEX_SET_GRADIENTS_H : R600_TEX <
+  0x0B, "TEX_SET_GRADIENTS_H",
+  []
+>;
+
+def TEX_SET_GRADIENTS_V : R600_TEX <
+  0x0C, "TEX_SET_GRADIENTS_V",
+  []
+>;
+
+def TEX_SAMPLE : R600_TEX <
+  0x10, "TEX_SAMPLE",
+  [(set R600_Reg128:$dst, (int_AMDGPU_tex R600_Reg128:$src0, imm:$src1, imm:$src2))]
+>;
+
+def TEX_SAMPLE_C : R600_TEX <
+  0x18, "TEX_SAMPLE_C",
+  [(set R600_Reg128:$dst, (int_AMDGPU_tex R600_Reg128:$src0, imm:$src1, TEX_SHADOW:$src2))]
+>;
+
+def TEX_SAMPLE_L : R600_TEX <
+  0x11, "TEX_SAMPLE_L",
+  [(set R600_Reg128:$dst, (int_AMDGPU_txl R600_Reg128:$src0, imm:$src1, imm:$src2))]
+>;
+
+def TEX_SAMPLE_C_L : R600_TEX <
+  0x19, "TEX_SAMPLE_C_L",
+  [(set R600_Reg128:$dst, (int_AMDGPU_txl R600_Reg128:$src0, imm:$src1, TEX_SHADOW:$src2))]
+>;
+
+def TEX_SAMPLE_LB : R600_TEX <
+  0x12, "TEX_SAMPLE_LB",
+  [(set R600_Reg128:$dst, (int_AMDGPU_txb R600_Reg128:$src0, imm:$src1, imm:$src2))]
+>;
+
+def TEX_SAMPLE_C_LB : R600_TEX <
+  0x1A, "TEX_SAMPLE_C_LB",
+  [(set R600_Reg128:$dst, (int_AMDGPU_txb R600_Reg128:$src0, imm:$src1, TEX_SHADOW:$src2))]
+>;
+
+def TEX_SAMPLE_G : R600_TEX <
+  0x14, "TEX_SAMPLE_G",
+  []
+>;
+
+def TEX_SAMPLE_C_G : R600_TEX <
+  0x1C, "TEX_SAMPLE_C_G",
+  []
+>;
+
+/* Helper classes for common instructions */
+
+class MUL_LIT_Common <bits<32> inst> : R600_3OP <
+  inst, "MUL_LIT",
+  []
+>;
+
+class MULADD_Common <bits<32> inst> : R600_3OP <
+  inst, "MULADD",
+  [(set (f32 R600_Reg32:$dst),
+   (IL_mad R600_Reg32:$src0, R600_Reg32:$src1, R600_Reg32:$src2))]
+>;
+
+class CNDE_Common <bits<32> inst> : R600_3OP <
+  inst, "CNDE",
+  [(set (f32 R600_Reg32:$dst),
+   (IL_cmov_logical R600_Reg32:$src0, R600_Reg32:$src2, R600_Reg32:$src1))]
+>;
+
+class CNDGT_Common <bits<32> inst> : R600_3OP <
+  inst, "CNDGT",
+  []
+>;
+  
+class CNDGE_Common <bits<32> inst> : R600_3OP <
+  inst, "CNDGE",
+  [(set R600_Reg32:$dst, (int_AMDGPU_cndlt R600_Reg32:$src0, R600_Reg32:$src2, R600_Reg32:$src1))]
+>;
+
+class DOT4_Common <bits<32> inst> : R600_REDUCTION <
+  inst,
+  (ins R600_Reg128:$src0, R600_Reg128:$src1),
+  "DOT4 $dst $src0, $src1",
+  [(set R600_Reg32:$dst, (int_AMDGPU_dp4 R600_Reg128:$src0, R600_Reg128:$src1))]
+>;
+
+class CUBE_Common <bits<32> inst> : InstR600 <
+  inst,
+  (outs R600_Reg128:$dst),
+  (ins R600_Reg128:$src),
+  "CUBE $dst $src",
+  [(set R600_Reg128:$dst, (int_AMDGPU_cube R600_Reg128:$src))],
+  VecALU
+>;
+
+class EXP_IEEE_Common <bits<32> inst> : R600_1OP <
+  inst, "EXP_IEEE",
+  [(set R600_Reg32:$dst, (fexp2 R600_Reg32:$src))]
+>;
+
+class FLT_TO_INT_Common <bits<32> inst> : R600_1OP <
+  inst, "FLT_TO_INT",
+  [(set R600_Reg32:$dst, (fp_to_sint R600_Reg32:$src))]
+>;
+
+class INT_TO_FLT_Common <bits<32> inst> : R600_1OP <
+  inst, "INT_TO_FLT",
+  [(set R600_Reg32:$dst, (sint_to_fp R600_Reg32:$src))]
+>;
+
+class FLT_TO_UINT_Common <bits<32> inst> : R600_1OP <
+  inst, "FLT_TO_UINT",
+  [(set R600_Reg32:$dst, (fp_to_uint R600_Reg32:$src))]
+>;
+
+class UINT_TO_FLT_Common <bits<32> inst> : R600_1OP <
+  inst, "UINT_TO_FLT",
+  [(set R600_Reg32:$dst, (uint_to_fp R600_Reg32:$src))]
+>;
+
+class LOG_CLAMPED_Common <bits<32> inst> : R600_1OP <
+  inst, "LOG_CLAMPED",
+  []
+>;
+
+class LOG_IEEE_Common <bits<32> inst> : R600_1OP <
+  inst, "LOG_IEEE",
+  [(set R600_Reg32:$dst, (int_AMDIL_log R600_Reg32:$src))]
+>;
+
+class LSHL_Common <bits<32> inst> : R600_2OP <
+  inst, "LSHL $dst, $src0, $src1",
+  [(set R600_Reg32:$dst, (shl R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+class LSHR_Common <bits<32> inst> : R600_2OP <
+  inst, "LSHR $dst, $src0, $src1",
+  [(set R600_Reg32:$dst, (srl R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+class ASHR_Common <bits<32> inst> : R600_2OP <
+  inst, "ASHR $dst, $src0, $src1",
+  [(set R600_Reg32:$dst, (sra R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+class MULHI_INT_Common <bits<32> inst> : R600_2OP <
+  inst, "MULHI_INT $dst, $src0, $src1",
+  [(set R600_Reg32:$dst, (mulhs R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+class MULHI_UINT_Common <bits<32> inst> : R600_2OP <
+  inst, "MULHI $dst, $src0, $src1",
+  [(set R600_Reg32:$dst, (mulhu R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+class MULLO_INT_Common <bits<32> inst> : R600_2OP <
+  inst, "MULLO_INT $dst, $src0, $src1",
+  [(set R600_Reg32:$dst, (mul R600_Reg32:$src0, R600_Reg32:$src1))]
+>;
+
+class MULLO_UINT_Common <bits<32> inst> : R600_2OP <
+  inst, "MULLO_UINT $dst, $src0, $src1",
+  []
+>;
+
+class RECIP_CLAMPED_Common <bits<32> inst> : R600_1OP <
+  inst, "RECIP_CLAMPED",
+  []
+>;
+
+class RECIP_IEEE_Common <bits<32> inst> : R600_1OP <
+  inst, "RECIP_IEEE",
+  [(set R600_Reg32:$dst, (int_AMDGPU_rcp R600_Reg32:$src))]
+>;
+
+class RECIP_UINT_Common <bits<32> inst> : R600_1OP <
+  inst, "RECIP_INT $dst, $src",
+  [(set R600_Reg32:$dst, (AMDGPUurecip R600_Reg32:$src))]
+>;
+
+class RECIPSQRT_CLAMPED_Common <bits<32> inst> : R600_1OP <
+  inst, "RECIPSQRT_CLAMPED",
+  [(set R600_Reg32:$dst, (int_AMDGPU_rsq R600_Reg32:$src))]
+>;
+
+class RECIPSQRT_IEEE_Common <bits<32> inst> : R600_1OP <
+  inst, "RECIPSQRT_IEEE",
+  []
+>;
+
+class SIN_Common <bits<32> inst> : R600_1OP <
+  inst, "SIN",
+  [(set R600_Reg32:$dst, (int_AMDIL_sin R600_Reg32:$src))]>{
+  let Trig = 1;
+}
+
+class COS_Common <bits<32> inst> : R600_1OP <
+  inst, "COS",
+  [(set R600_Reg32:$dst, (int_AMDIL_cos R600_Reg32:$src))]> {
+  let Trig = 1;
+}
+
+/* Helper patterns for complex intrinsics */
+/* -------------------------------------- */
+
+class DIV_Common <InstR600 recip_ieee> : Pat<
+  (int_AMDGPU_div R600_Reg32:$src0, R600_Reg32:$src1),
+  (MUL R600_Reg32:$src0, (recip_ieee R600_Reg32:$src1))
+>;
+
+class SSG_Common <InstR600 cndgt, InstR600 cndge> : Pat <
+  (int_AMDGPU_ssg R600_Reg32:$src),
+  (cndgt R600_Reg32:$src, (f32 ONE), (cndge R600_Reg32:$src, (f32 ZERO), (f32 NEG_ONE)))
+>;
+
+class TGSI_LIT_Z_Common <InstR600 mul_lit, InstR600 log_clamped, InstR600 exp_ieee> : Pat <
+  (int_TGSI_lit_z R600_Reg32:$src_x, R600_Reg32:$src_y, R600_Reg32:$src_w),
+  (exp_ieee (mul_lit (log_clamped (MAX R600_Reg32:$src_y, (f32 ZERO))), R600_Reg32:$src_w, R600_Reg32:$src_x))
+>;
+
+/* ---------------------- */
+/* R600 / R700 Only Instructions */
+/* ---------------------- */
+
+let Predicates = [isR600] in {
+
+  def MUL_LIT_r600 : MUL_LIT_Common<0x0C>;
+  def MULADD_r600 : MULADD_Common<0x10>;
+  def CNDE_r600 : CNDE_Common<0x18>;
+  def CNDGT_r600 : CNDGT_Common<0x19>;
+  def CNDGE_r600 : CNDGE_Common<0x1A>;
+  def DOT4_r600 : DOT4_Common<0x50>;
+  def CUBE_r600 : CUBE_Common<0x52>;
+  def EXP_IEEE_r600 : EXP_IEEE_Common<0x61>;
+  def LOG_CLAMPED_r600 : LOG_CLAMPED_Common<0x62>;
+  def LOG_IEEE_r600 : LOG_IEEE_Common<0x63>;
+  def RECIP_CLAMPED_r600 : RECIP_CLAMPED_Common<0x64>;
+  def RECIP_IEEE_r600 : RECIP_IEEE_Common<0x66>;
+  def RECIPSQRT_CLAMPED_r600 : RECIPSQRT_CLAMPED_Common<0x67>;
+  def RECIPSQRT_IEEE_r600 : RECIPSQRT_IEEE_Common<0x69>;
+  def FLT_TO_INT_r600 : FLT_TO_INT_Common<0x6b>;
+  def INT_TO_FLT_r600 : INT_TO_FLT_Common<0x6c>;
+  def FLT_TO_UINT_r600 : FLT_TO_UINT_Common<0x79>;
+  def UINT_TO_FLT_r600 : UINT_TO_FLT_Common<0x6d>;
+  def SIN_r600 : SIN_Common<0x6E>;
+  def COS_r600 : COS_Common<0x6F>;
+  def ASHR_r600 : ASHR_Common<0x70>;
+  def LSHR_r600 : LSHR_Common<0x71>;
+  def LSHL_r600 : LSHL_Common<0x72>;
+  def MULLO_INT_r600 : MULLO_INT_Common<0x73>;
+  def MULHI_INT_r600 : MULHI_INT_Common<0x74>;
+  def MULLO_UINT_r600 : MULLO_UINT_Common<0x75>;
+  def MULHI_UINT_r600 : MULHI_UINT_Common<0x76>;
+  def RECIP_UINT_r600 : RECIP_UINT_Common <0x78>;
+
+  def DIV_r600 : DIV_Common<RECIP_IEEE_r600>;
+  def POW_r600 : POW_Common<LOG_IEEE_r600, EXP_IEEE_r600, MUL, GPRF32>;
+  def SSG_r600 : SSG_Common<CNDGT_r600, CNDGE_r600>;
+  def TGSI_LIT_Z_r600 : TGSI_LIT_Z_Common<MUL_LIT_r600, LOG_CLAMPED_r600, EXP_IEEE_r600>;
+
+}
+
+/* ----------------- */
+/* R700+ Trig helper */
+/* ----------------- */
+
+/*
+class TRIG_HELPER_r700 <InstR600 trig_inst>: Pat <
+  (trig_inst R600_Reg32:$src),
+  (trig_inst (fmul R600_Reg32:$src, (PI))))
+>;
+*/
+
+//===----------------------------------------------------------------------===//
+// Evergreen Only instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [isEG] in {
+  
+def RECIP_IEEE_eg : RECIP_IEEE_Common<0x86>;
+
+def MULLO_INT_eg : MULLO_INT_Common<0x8F>;
+def MULHI_INT_eg : MULHI_INT_Common<0x90>;
+def MULLO_UINT_eg : MULLO_UINT_Common<0x91>;
+def MULHI_UINT_eg : MULHI_UINT_Common<0x92>;
+def RECIP_UINT_eg : RECIP_UINT_Common<0x94>;
+
+} // End Predicates = [isEG]
+
+/* ------------------------------- */
+/* Evergreen / Cayman Instructions */
+/* ------------------------------- */
+
+let Predicates = [isEGorCayman] in {
+  
+class TRIG_eg <InstR600 trig, Intrinsic intr> : Pat<
+  (intr R600_Reg32:$src),
+  (trig (MUL (MOV_IMM_I32 (i32 ALU_LITERAL_X), CONST.TWO_PI_INV), R600_Reg32:$src))
+>;
+
+  def MULADD_eg : MULADD_Common<0x14>;
+  def ASHR_eg : ASHR_Common<0x15>;
+  def LSHR_eg : LSHR_Common<0x16>;
+  def LSHL_eg : LSHL_Common<0x17>;
+  def CNDE_eg : CNDE_Common<0x19>;
+  def CNDGT_eg : CNDGT_Common<0x1A>;
+  def CNDGE_eg : CNDGE_Common<0x1B>;
+  def MUL_LIT_eg : MUL_LIT_Common<0x1F>;
+  def EXP_IEEE_eg : EXP_IEEE_Common<0x81>;
+  def LOG_CLAMPED_eg : LOG_CLAMPED_Common<0x82>;
+  def LOG_IEEE_eg : LOG_IEEE_Common<0x83>;
+  def RECIP_CLAMPED_eg : RECIP_CLAMPED_Common<0x84>;
+  def RECIPSQRT_CLAMPED_eg : RECIPSQRT_CLAMPED_Common<0x87>;
+  def RECIPSQRT_IEEE_eg : RECIPSQRT_IEEE_Common<0x89>;
+  def SIN_eg : SIN_Common<0x8D>;
+  def COS_eg : COS_Common<0x8E>;
+  def DOT4_eg : DOT4_Common<0xBE>;
+  def CUBE_eg : CUBE_Common<0xC0>;
+
+  def DIV_eg : DIV_Common<RECIP_IEEE_eg>;
+  def POW_eg : POW_Common<LOG_IEEE_eg, EXP_IEEE_eg, MUL, GPRF32>;
+  def SSG_eg : SSG_Common<CNDGT_eg, CNDGE_eg>;
+  def TGSI_LIT_Z_eg : TGSI_LIT_Z_Common<MUL_LIT_eg, LOG_CLAMPED_eg, EXP_IEEE_eg>;
+
+  def : TRIG_eg <SIN_eg, int_AMDGPU_sin>;
+  def : TRIG_eg <COS_eg, int_AMDGPU_cos>;
+
+  def FLT_TO_INT_eg : FLT_TO_INT_Common<0x50> {
+    let Pattern = [];
+  }
+
+  def INT_TO_FLT_eg : INT_TO_FLT_Common<0x9B>;
+
+  def FLT_TO_UINT_eg : FLT_TO_UINT_Common<0x9A> {
+    let Pattern = [];
+  }
+
+  def UINT_TO_FLT_eg : UINT_TO_FLT_Common<0x9C>;
+
+  def : Pat<(fp_to_sint R600_Reg32:$src),
+    (FLT_TO_INT_eg (TRUNC R600_Reg32:$src))>;
+
+  def : Pat<(fp_to_uint R600_Reg32:$src),
+    (FLT_TO_UINT_eg (TRUNC R600_Reg32:$src))>;
+
+//===----------------------------------------------------------------------===//
+// Memory read/write instructions
+//===----------------------------------------------------------------------===//
+
+let usesCustomInserter = 1 in {
+
+def RAT_WRITE_CACHELESS_eg : EG_CF_RAT <0x57, 0x2, 0, (outs),
+  (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr),
+  "RAT_WRITE_CACHELESS_eg $rw_gpr, $index_gpr",
+  [(global_store (i32 R600_TReg32_X:$rw_gpr), R600_TReg32_X:$index_gpr)]>
+{
+  let RIM         = 0;
+  /* XXX: Have a separate instruction for non-indexed writes. */
+  let TYPE        = 1;
+  let RW_REL      = 0;
+  let ELEM_SIZE   = 0;
+
+  let ARRAY_SIZE  = 0;
+  let COMP_MASK   = 1;
+  let BURST_COUNT = 0;
+  let VPM         = 0;
+  let EOP         = 0;
+  let MARK        = 0;
+  let BARRIER     = 1;
+}
+
+} // End usesCustomInserter = 1
+
+class VTX_READ_eg <int buffer_id, list<dag> pattern> : InstR600ISA <
+  (outs R600_TReg32_X:$dst),
+  (ins MEMxi:$ptr),
+  "VTX_READ_eg $dst, $ptr",
+  pattern
+>;
+
+def VTX_READ_PARAM_eg : VTX_READ_eg <0,
+  [(set (i32 R600_TReg32_X:$dst), (load_param ADDRVTX_READ:$ptr))]
+>;
+
+def VTX_READ_GLOBAL_eg : VTX_READ_eg <1,
+  [(set (i32 R600_TReg32_X:$dst), (global_load ADDRVTX_READ:$ptr))]
+>;
+
+}
+
+let Predicates = [isCayman] in {
+
+let isVector = 1 in { 
+
+def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>;
+
+def MULLO_INT_cm : MULLO_INT_Common<0x8F>;
+def MULHI_INT_cm : MULHI_INT_Common<0x90>;
+def MULLO_UINT_cm : MULLO_UINT_Common<0x91>;
+def MULHI_UINT_cm : MULHI_UINT_Common<0x92>;
+
+} // End isVector = 1
+
+// RECIP_UINT emulation for Cayman
+def : Pat <
+  (AMDGPUurecip R600_Reg32:$src0),
+  (FLT_TO_UINT_eg (MUL_IEEE (RECIP_IEEE_cm (UINT_TO_FLT_eg R600_Reg32:$src0)),
+                            (MOV_IMM_I32 (i32 ALU_LITERAL_X), 0x4f800000)))
+>;
+
+} // End isCayman
+
+/* Other Instructions */
+
+let isCodeGenOnly = 1 in {
+/*
+  def SWIZZLE : AMDGPUShaderInst <
+    (outs GPRV4F32:$dst),
+    (ins GPRV4F32:$src0, i32imm:$src1),
+    "SWIZZLE $dst, $src0, $src1",
+    [(set GPRV4F32:$dst, (int_AMDGPU_swizzle GPRV4F32:$src0, imm:$src1))]
+  >;
+*/
+
+  def LAST : AMDGPUShaderInst <
+    (outs),
+    (ins),
+    "LAST",
+    []
+  >;
+
+  def GET_CHAN : AMDGPUShaderInst <
+    (outs R600_Reg32:$dst),
+    (ins R600_Reg128:$src0, i32imm:$src1),
+    "GET_CHAN $dst, $src0, $src1",
+    []
+  >;
+
+  def MULLIT : AMDGPUShaderInst <
+    (outs R600_Reg128:$dst),
+    (ins R600_Reg32:$src0, R600_Reg32:$src1, R600_Reg32:$src2),
+    "MULLIT $dst, $src0, $src1",
+    [(set R600_Reg128:$dst, (int_AMDGPU_mullit R600_Reg32:$src0, R600_Reg32:$src1, R600_Reg32:$src2))]
+  >;
+
+let usesCustomInserter = 1, isPseudo = 1 in {
+
+class R600PreloadInst <string asm, Intrinsic intr> : AMDGPUInst <
+  (outs R600_TReg32:$dst),
+  (ins),
+  asm,
+  [(set R600_TReg32:$dst, (intr))]
+>;
+
+def TGID_X : R600PreloadInst <"TGID_X", int_r600_read_tgid_x>;
+def TGID_Y : R600PreloadInst <"TGID_Y", int_r600_read_tgid_y>;
+def TGID_Z : R600PreloadInst <"TGID_Z", int_r600_read_tgid_z>;
+
+def TIDIG_X : R600PreloadInst <"TIDIG_X", int_r600_read_tidig_x>;
+def TIDIG_Y : R600PreloadInst <"TIDIG_Y", int_r600_read_tidig_y>;
+def TIDIG_Z : R600PreloadInst <"TIDIG_Z", int_r600_read_tidig_z>;
+
+def NGROUPS_X : R600PreloadInst <"NGROUPS_X", int_r600_read_ngroups_x>;
+def NGROUPS_Y : R600PreloadInst <"NGROUPS_Y", int_r600_read_ngroups_y>;
+def NGROUPS_Z : R600PreloadInst <"NGROUPS_Z", int_r600_read_ngroups_z>;
+
+def GLOBAL_SIZE_X : R600PreloadInst <"GLOBAL_SIZE_X",
+                                     int_r600_read_global_size_x>;
+def GLOBAL_SIZE_Y : R600PreloadInst <"GLOBAL_SIZE_Y",
+                                     int_r600_read_global_size_y>;
+def GLOBAL_SIZE_Z : R600PreloadInst <"GLOBAL_SIZE_Z",
+                                     int_r600_read_global_size_z>;
+
+def LOCAL_SIZE_X : R600PreloadInst <"LOCAL_SIZE_X",
+                                    int_r600_read_local_size_x>;
+def LOCAL_SIZE_Y : R600PreloadInst <"LOCAL_SIZE_Y",
+                                    int_r600_read_local_size_y>;
+def LOCAL_SIZE_Z : R600PreloadInst <"LOCAL_SIZE_Z",
+                                    int_r600_read_local_size_z>;
+
+def R600_LOAD_CONST : AMDGPUShaderInst <
+  (outs R600_Reg32:$dst),
+  (ins i32imm:$src0),
+  "R600_LOAD_CONST $dst, $src0",
+  [(set R600_Reg32:$dst, (int_AMDGPU_load_const imm:$src0))]
+>;
+
+def LOAD_INPUT : AMDGPUShaderInst <
+  (outs R600_Reg32:$dst),
+  (ins i32imm:$src),
+  "LOAD_INPUT $dst, $src",
+  [(set R600_Reg32:$dst, (int_R600_load_input imm:$src))]
+>;
+
+def RESERVE_REG : AMDGPUShaderInst <
+  (outs),
+  (ins i32imm:$src),
+  "RESERVE_REG $src",
+  [(int_AMDGPU_reserve_reg imm:$src)]
+>;
+
+def STORE_OUTPUT: AMDGPUShaderInst <
+  (outs),
+  (ins R600_Reg32:$src0, i32imm:$src1),
+  "STORE_OUTPUT $src0, $src1",
+  [(int_AMDGPU_store_output R600_Reg32:$src0, imm:$src1)]
+>;
+
+def TXD: AMDGPUShaderInst <
+  (outs R600_Reg128:$dst),
+  (ins R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, i32imm:$src3, i32imm:$src4),
+  "TXD $dst, $src0, $src1, $src2, $src3, $src4",
+  [(set R600_Reg128:$dst, (int_AMDGPU_txd R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, imm:$src3, imm:$src4))]
+>;
+
+def TXD_SHADOW: AMDGPUShaderInst <
+  (outs R600_Reg128:$dst),
+  (ins R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, i32imm:$src3, i32imm:$src4),
+  "TXD_SHADOW $dst, $src0, $src1, $src2, $src3, $src4",
+  [(set R600_Reg128:$dst, (int_AMDGPU_txd R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2, imm:$src3, TEX_SHADOW:$src4))]
+>;
+
+} // End usesCustomInserter = 1, isPseudo = 1
+
+} // End isCodeGenOnly = 1
+
+def CLAMP_R600 :  CLAMP <R600_Reg32>;
+def FABS_R600 : FABS<R600_Reg32>;
+def FNEG_R600 : FNEG<R600_Reg32>;
+
+let usesCustomInserter = 1 in {
+
+def MASK_WRITE : AMDGPUShaderInst <
+    (outs),
+    (ins R600_Reg32:$src),
+    "MASK_WRITE $src",
+    []
+>;
+
+} // End usesCustomInserter = 1
+
+//===----------------------------------------------------------------------===//
+// ISel Patterns
+//===----------------------------------------------------------------------===//
+
+// KIL Patterns
+def KILP : Pat <
+  (int_AMDGPU_kilp),
+  (MASK_WRITE (KILLGT (f32 ONE), (f32 ZERO)))
+>;
+
+def KIL : Pat <
+  (int_AMDGPU_kill R600_Reg32:$src0),
+  (MASK_WRITE (KILLGT (f32 ZERO), (f32 R600_Reg32:$src0)))
+>;
+
+// SGT Reverse args
+def : Pat <
+  (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, COND_LT),
+  (SGT R600_Reg32:$src1, R600_Reg32:$src0)
+>;
+
+// SGE Reverse args
+def : Pat <
+  (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, COND_LE),
+  (SGE R600_Reg32:$src1, R600_Reg32:$src0) 
+>;
+
+// SETGT_INT reverse args
+def : Pat <
+  (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETLT),
+  (SETGT_INT R600_Reg32:$src1, R600_Reg32:$src0)
+>;
+
+// SETGE_INT reverse args
+def : Pat <
+  (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETLE),
+  (SETGE_INT R600_Reg32:$src1, R600_Reg32:$src0)
+>;
+
+// SETGT_UINT reverse args
+def : Pat <
+  (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETULT),
+  (SETGT_UINT R600_Reg32:$src1, R600_Reg32:$src0)
+>;
+
+// SETGE_UINT reverse args
+def : Pat <
+  (selectcc (i32 R600_Reg32:$src0), R600_Reg32:$src1, -1, 0, SETULE),
+  (SETGE_UINT R600_Reg32:$src0, R600_Reg32:$src1)
+>;
+
+// The next two patterns are special cases for handling 'true if ordered' and
+// 'true if unordered' conditionals.  The assumption here is that the behavior of
+// SETE and SNE conforms to the Direct3D 10 rules for floating point values
+// described here:
+// http://msdn.microsoft.com/en-us/library/windows/desktop/cc308050.aspx#alpha_32_bit
+// We assume that  SETE returns false when one of the operands is NAN and
+// SNE returns true when on of the operands is NAN
+
+//SETE - 'true if ordered'
+def : Pat <
+  (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, SETO),
+  (SETE R600_Reg32:$src0, R600_Reg32:$src1)
+>;
+
+//SNE - 'true if unordered'
+def : Pat <
+  (selectcc (f32 R600_Reg32:$src0), R600_Reg32:$src1, FP_ONE, FP_ZERO, SETUO),
+  (SNE R600_Reg32:$src0, R600_Reg32:$src1)
+>;
+
+def : Extract_Element <f32, v4f32, R600_Reg128, 0, sel_x>;
+def : Extract_Element <f32, v4f32, R600_Reg128, 1, sel_y>;
+def : Extract_Element <f32, v4f32, R600_Reg128, 2, sel_z>;
+def : Extract_Element <f32, v4f32, R600_Reg128, 3, sel_w>;
+
+def : Insert_Element <f32, v4f32, R600_Reg32, R600_Reg128, 4, sel_x>;
+def : Insert_Element <f32, v4f32, R600_Reg32, R600_Reg128, 5, sel_y>;
+def : Insert_Element <f32, v4f32, R600_Reg32, R600_Reg128, 6, sel_z>;
+def : Insert_Element <f32, v4f32, R600_Reg32, R600_Reg128, 7, sel_w>;
+
+def : Extract_Element <i32, v4i32, R600_Reg128, 0, sel_x>;
+def : Extract_Element <i32, v4i32, R600_Reg128, 1, sel_y>;
+def : Extract_Element <i32, v4i32, R600_Reg128, 2, sel_z>;
+def : Extract_Element <i32, v4i32, R600_Reg128, 3, sel_w>;
+
+def : Insert_Element <i32, v4i32, R600_Reg32, R600_Reg128, 4, sel_x>;
+def : Insert_Element <i32, v4i32, R600_Reg32, R600_Reg128, 5, sel_y>;
+def : Insert_Element <i32, v4i32, R600_Reg32, R600_Reg128, 6, sel_z>;
+def : Insert_Element <i32, v4i32, R600_Reg32, R600_Reg128, 7, sel_w>;
+
+def : Vector_Build <v4f32, R600_Reg32>;
+def : Vector_Build <v4i32, R600_Reg32>;
+
+// bitconvert patterns
+
+def : BitConvert <i32, f32, R600_Reg32>;
+def : BitConvert <f32, i32, R600_Reg32>;
+
+} // End isR600toCayman Predicate
diff --git a/lib/Target/AMDGPU/R600Intrinsics.td b/lib/Target/AMDGPU/R600Intrinsics.td
new file mode 100644
index 0000000..0265388
--- /dev/null
+++ b/lib/Target/AMDGPU/R600Intrinsics.td
@@ -0,0 +1,16 @@
+//===-- R600Intrinsics.td - R600 Instrinsic defs -------*- tablegen -*-----===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// R600 Intrinsic Definitions
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "R600", isTarget = 1 in {
+  def int_R600_load_input : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+}
diff --git a/lib/Target/AMDGPU/R600KernelParameters.cpp b/lib/Target/AMDGPU/R600KernelParameters.cpp
new file mode 100644
index 0000000..e810023
--- /dev/null
+++ b/lib/Target/AMDGPU/R600KernelParameters.cpp
@@ -0,0 +1,546 @@
+//===-- R600KernelParameters.cpp - Lower kernel function arguments --------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass lowers kernel function arguments to loads from the vertex buffer.
+//
+// Kernel arguemnts are stored in the vertex buffer at an offset of 9 dwords,
+// so arg0 needs to be loaded from VTX_BUFFER[9] and arg1 is loaded from
+// VTX_BUFFER[10], etc.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "AMDIL.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Metadata.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Support/IRBuilder.h"
+#include "llvm/Support/TypeBuilder.h"
+
+#include <map>
+#include <set>
+
+using namespace llvm;
+
+namespace {
+
+#define CONSTANT_CACHE_SIZE_DW 127
+
+class R600KernelParameters : public FunctionPass
+{
+  const TargetData * TD;
+  LLVMContext* Context;
+  Module *mod;
+
+  struct param
+  {
+    param() : val(NULL), ptr_val(NULL), offset_in_dw(0), size_in_dw(0),
+              indirect(true), specialID(0) {}
+
+    Value* val;
+    Value* ptr_val;
+    int offset_in_dw;
+    int size_in_dw;
+
+    bool indirect;
+
+    std::string specialType;
+    int specialID;
+
+    int end() { return offset_in_dw + size_in_dw; }
+    // The first 9 dwords are reserved for the grid sizes.
+    int get_rat_offset() { return 9 + offset_in_dw; }
+  };
+
+  std::vector<param> params;
+
+  bool isOpenCLKernel(const Function* fun);
+  int getLastSpecialID(const std::string& TypeName);
+
+  int getListSize();
+  void AddParam(Argument* arg);
+  int calculateArgumentSize(Argument* arg);
+  void RunAna(Function* fun);
+  void Replace(Function* fun);
+  bool isIndirect(Value* val, std::set<Value*>& visited);
+  void Propagate(Function* fun);
+  void Propagate(Value* v, const Twine& name, bool indirect = true);
+  Value* ConstantRead(Function* fun, param& p);
+  Value* handleSpecial(Function* fun, param& p);
+  bool isSpecialType(Type*);
+  std::string getSpecialTypeName(Type*);
+public:
+  static char ID;
+  R600KernelParameters() : FunctionPass(ID) {};
+  R600KernelParameters(const TargetData* TD) : FunctionPass(ID), TD(TD) {}
+  bool runOnFunction (Function &F);
+  void getAnalysisUsage(AnalysisUsage &AU) const;
+  const char *getPassName() const;
+  bool doInitialization(Module &M);
+  bool doFinalization(Module &M);
+};
+
+char R600KernelParameters::ID = 0;
+
+static RegisterPass<R600KernelParameters> X("kerparam",
+                            "OpenCL Kernel Parameter conversion", false, false);
+
+bool R600KernelParameters::isOpenCLKernel(const Function* fun)
+{
+  Module *mod = const_cast<Function*>(fun)->getParent();
+  NamedMDNode * md = mod->getOrInsertNamedMetadata("opencl.kernels");
+
+  if (!md or !md->getNumOperands())
+  {
+    return false;
+  }
+
+  for (int i = 0; i < int(md->getNumOperands()); i++)
+  {
+    if (!md->getOperand(i) or !md->getOperand(i)->getOperand(0))
+    {
+      continue;
+    }
+    
+    assert(md->getOperand(i)->getNumOperands() == 1);
+
+    if (md->getOperand(i)->getOperand(0)->getName() == fun->getName())
+    {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+int R600KernelParameters::getLastSpecialID(const std::string& TypeName)
+{
+  int lastID = -1;
+
+  for (std::vector<param>::iterator i = params.begin(); i != params.end(); i++)
+  {
+    if (i->specialType == TypeName)
+    {
+      lastID = i->specialID;
+    }
+  }
+
+  return lastID;
+}
+
+int R600KernelParameters::getListSize()
+{
+  if (params.size() == 0)
+  {
+    return 0;
+  }
+
+  return params.back().end();
+}
+
+bool R600KernelParameters::isIndirect(Value* val, std::set<Value*>& visited)
+{
+  //XXX Direct parameters are not supported yet, so return true here.
+  return true;
+#if 0
+  if (isa<LoadInst>(val))
+  {
+    return false;
+  }
+
+  if (isa<IntegerType>(val->getType()))
+  {
+    assert(0 and "Internal error");
+    return false;
+  }
+
+  if (visited.count(val))
+  {
+    return false;
+  }
+
+  visited.insert(val);
+
+  if (isa<GetElementPtrInst>(val))
+  {
+    GetElementPtrInst* GEP = dyn_cast<GetElementPtrInst>(val);
+    GetElementPtrInst::op_iterator i = GEP->op_begin();
+
+    for (i++; i != GEP->op_end(); i++)
+    {
+      if (!isa<Constant>(*i))
+      {
+        return true;
+      }
+    }
+  }
+
+  for (Value::use_iterator i = val->use_begin(); i != val->use_end(); i++)
+  {
+    Value* v2 = dyn_cast<Value>(*i);
+
+    if (v2)
+    {
+      if (isIndirect(v2, visited))
+      {
+        return true;
+      }
+    }
+  }
+
+  return false;
+#endif
+}
+
+void R600KernelParameters::AddParam(Argument* arg)
+{
+  param p;
+
+  p.val = dyn_cast<Value>(arg);
+  p.offset_in_dw = getListSize();
+  p.size_in_dw = calculateArgumentSize(arg);
+
+  if (isa<PointerType>(arg->getType()) and arg->hasByValAttr())
+  {
+    std::set<Value*> visited;
+    p.indirect = isIndirect(p.val, visited);
+  }
+
+  params.push_back(p);
+}
+
+int R600KernelParameters::calculateArgumentSize(Argument* arg)
+{
+  Type* t = arg->getType();
+
+  if (arg->hasByValAttr() and dyn_cast<PointerType>(t))
+  {
+    t = dyn_cast<PointerType>(t)->getElementType();
+  }
+
+  int store_size_in_dw = (TD->getTypeStoreSize(t) + 3)/4;
+
+  assert(store_size_in_dw);
+
+  return store_size_in_dw;
+}
+
+
+void R600KernelParameters::RunAna(Function* fun)
+{
+  assert(isOpenCLKernel(fun));
+
+  for (Function::arg_iterator i = fun->arg_begin(); i != fun->arg_end(); i++)
+  {
+    AddParam(i);
+  }
+
+}
+
+void R600KernelParameters::Replace(Function* fun)
+{
+  for (std::vector<param>::iterator i = params.begin(); i != params.end(); i++)
+  {
+    Value *new_val;
+
+    if (isSpecialType(i->val->getType()))
+    {
+      new_val = handleSpecial(fun, *i);
+    }
+    else
+    {
+      new_val = ConstantRead(fun, *i);
+    }
+    if (new_val)
+    {
+      i->val->replaceAllUsesWith(new_val);
+    }
+  }
+}
+
+void R600KernelParameters::Propagate(Function* fun)
+{
+  for (std::vector<param>::iterator i = params.begin(); i != params.end(); i++)
+  {
+    if (i->ptr_val)
+    {
+      Propagate(i->ptr_val, i->val->getName(), i->indirect);
+   }
+  }
+}
+
+void R600KernelParameters::Propagate(Value* v, const Twine& name, bool indirect)
+{
+  LoadInst* load = dyn_cast<LoadInst>(v);
+  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(v);
+
+  unsigned addrspace;
+
+  if (indirect)
+  {
+    addrspace = AMDILAS::PARAM_I_ADDRESS;
+  }
+  else
+  {
+    addrspace = AMDILAS::PARAM_D_ADDRESS;
+  }
+
+  if (GEP and GEP->getType()->getAddressSpace() != addrspace)
+  {
+    Value* op = GEP->getPointerOperand();
+
+    if (dyn_cast<PointerType>(op->getType())->getAddressSpace() != addrspace)
+    {
+      op = new BitCastInst(op, PointerType::get(dyn_cast<PointerType>(
+                           op->getType())->getElementType(), addrspace),
+                           name, dyn_cast<Instruction>(v));
+    }
+
+    std::vector<Value*> params(GEP->idx_begin(), GEP->idx_end());
+
+    GetElementPtrInst* GEP2 = GetElementPtrInst::Create(op, params, name,
+                                                      dyn_cast<Instruction>(v));
+    GEP2->setIsInBounds(GEP->isInBounds());
+    v = dyn_cast<Value>(GEP2);
+    GEP->replaceAllUsesWith(GEP2);
+    GEP->eraseFromParent();
+    load = NULL;
+  }
+
+  if (load)
+  {
+    ///normally at this point we have the right address space
+    if (load->getPointerAddressSpace() != addrspace)
+    {
+      Value *orig_ptr = load->getPointerOperand();
+      PointerType *orig_ptr_type = dyn_cast<PointerType>(orig_ptr->getType());
+
+      Type* new_ptr_type = PointerType::get(orig_ptr_type->getElementType(),
+                                            addrspace);
+
+      Value* new_ptr = orig_ptr;
+
+      if (orig_ptr->getType() != new_ptr_type)
+      {
+        new_ptr = new BitCastInst(orig_ptr, new_ptr_type, "prop_cast", load);
+      }
+
+      Value* new_load = new LoadInst(new_ptr, name, load);
+      load->replaceAllUsesWith(new_load);
+      load->eraseFromParent();
+    }
+
+    return;
+  }
+
+  std::vector<User*> users(v->use_begin(), v->use_end());
+
+  for (int i = 0; i < int(users.size()); i++)
+  {
+    Value* v2 = dyn_cast<Value>(users[i]);
+
+    if (v2)
+    {
+      Propagate(v2, name, indirect);
+    }
+  }
+}
+
+Value* R600KernelParameters::ConstantRead(Function* fun, param& p)
+{
+  assert(fun->front().begin() != fun->front().end());
+
+  Instruction *first_inst = fun->front().begin();
+  IRBuilder <> builder (first_inst);
+/* First 3 dwords are reserved for the dimmension info */
+
+  if (!p.val->hasNUsesOrMore(1))
+  {
+    return NULL;
+  }
+  unsigned addrspace;
+
+  if (p.indirect)
+  {
+    addrspace = AMDILAS::PARAM_I_ADDRESS;
+  }
+  else
+  {
+    addrspace = AMDILAS::PARAM_D_ADDRESS;
+  }
+
+  Argument *arg = dyn_cast<Argument>(p.val);
+  Type * argType = p.val->getType();
+  PointerType * argPtrType = dyn_cast<PointerType>(p.val->getType());
+
+  if (argPtrType and arg->hasByValAttr())
+  {
+    Value* param_addr_space_ptr = ConstantPointerNull::get(
+                                    PointerType::get(Type::getInt32Ty(*Context),
+                                    addrspace));
+    Value* param_ptr = GetElementPtrInst::Create(param_addr_space_ptr,
+                                    ConstantInt::get(Type::getInt32Ty(*Context),
+                                    p.get_rat_offset()), arg->getName(),
+                                    first_inst);
+    param_ptr = new BitCastInst(param_ptr,
+                                PointerType::get(argPtrType->getElementType(),
+                                                 addrspace),
+                                arg->getName(), first_inst);
+    p.ptr_val = param_ptr;
+    return param_ptr;
+  }
+  else
+  {
+    Value* param_addr_space_ptr = ConstantPointerNull::get(PointerType::get(
+                                                        argType, addrspace));
+
+    Value* param_ptr = builder.CreateGEP(param_addr_space_ptr,
+             ConstantInt::get(Type::getInt32Ty(*Context), p.get_rat_offset()),
+                              arg->getName());
+
+    Value* param_value = builder.CreateLoad(param_ptr, arg->getName());
+
+    return param_value;
+  }
+}
+
+Value* R600KernelParameters::handleSpecial(Function* fun, param& p)
+{
+  std::string name = getSpecialTypeName(p.val->getType());
+  int ID;
+
+  assert(!name.empty());
+
+  if (name == "image2d_t" or name == "image3d_t")
+  {
+    int lastID = std::max(getLastSpecialID("image2d_t"),
+                     getLastSpecialID("image3d_t"));
+
+    if (lastID == -1)
+    {
+      ID = 2; ///ID0 and ID1 are used internally by the driver
+    }
+    else
+    {
+      ID = lastID + 1;
+    }
+  }
+  else if (name == "sampler_t")
+  {
+    int lastID = getLastSpecialID("sampler_t");
+
+    if (lastID == -1)
+    {
+      ID = 0;
+    }
+    else
+    {
+      ID = lastID + 1;
+    }
+  }
+  else
+  {
+    ///TODO: give some error message
+    return NULL;
+  }
+
+  p.specialType = name;
+  p.specialID = ID;
+
+  Instruction *first_inst = fun->front().begin();
+
+  return new IntToPtrInst(ConstantInt::get(Type::getInt32Ty(*Context),
+                                           p.specialID), p.val->getType(),
+                                           "resourceID", first_inst);
+}
+
+
+bool R600KernelParameters::isSpecialType(Type* t)
+{
+  return !getSpecialTypeName(t).empty();
+}
+
+std::string R600KernelParameters::getSpecialTypeName(Type* t)
+{
+  PointerType *pt = dyn_cast<PointerType>(t);
+  StructType *st = NULL;
+
+  if (pt)
+  {
+    st = dyn_cast<StructType>(pt->getElementType());
+  }
+
+  if (st)
+  {
+    std::string prefix = "struct.opencl_builtin_type_";
+
+    std::string name = st->getName().str();
+
+    if (name.substr(0, prefix.length()) == prefix)
+    {
+      return name.substr(prefix.length(), name.length());
+    }
+  }
+
+  return "";
+}
+
+
+bool R600KernelParameters::runOnFunction (Function &F)
+{
+  if (!isOpenCLKernel(&F))
+  {
+    return false;
+  }
+
+  RunAna(&F);
+  Replace(&F);
+  Propagate(&F);
+
+  return false;
+}
+
+void R600KernelParameters::getAnalysisUsage(AnalysisUsage &AU) const
+{
+  FunctionPass::getAnalysisUsage(AU);
+  AU.setPreservesAll();
+}
+
+const char *R600KernelParameters::getPassName() const
+{
+  return "OpenCL Kernel parameter conversion to memory";
+}
+
+bool R600KernelParameters::doInitialization(Module &M)
+{
+  Context = &M.getContext();
+  mod = &M;
+
+  return false;
+}
+
+bool R600KernelParameters::doFinalization(Module &M)
+{
+  return false;
+}
+
+} // End anonymous namespace
+
+FunctionPass* llvm::createR600KernelParametersPass(const TargetData* TD)
+{
+  FunctionPass *p = new R600KernelParameters(TD);
+
+  return p;
+}
+
+
diff --git a/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp b/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp
new file mode 100644
index 0000000..48443fb
--- /dev/null
+++ b/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp
@@ -0,0 +1,16 @@
+//===-- R600MachineFunctionInfo.cpp - R600 Machine Function Info-*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "R600MachineFunctionInfo.h"
+
+using namespace llvm;
+
+R600MachineFunctionInfo::R600MachineFunctionInfo(const MachineFunction &MF)
+  : MachineFunctionInfo()
+  { }
diff --git a/lib/Target/AMDGPU/R600MachineFunctionInfo.h b/lib/Target/AMDGPU/R600MachineFunctionInfo.h
new file mode 100644
index 0000000..948e192
--- /dev/null
+++ b/lib/Target/AMDGPU/R600MachineFunctionInfo.h
@@ -0,0 +1,33 @@
+//===-- R600MachineFunctionInfo.h - R600 Machine Function Info ----*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// R600MachineFunctionInfo is used for keeping track of which registers have
+// been reserved by the llvm.AMDGPU.reserve.reg intrinsic.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef R600MACHINEFUNCTIONINFO_H
+#define R600MACHINEFUNCTIONINFO_H
+
+#include "llvm/CodeGen/MachineFunction.h"
+#include <vector>
+
+namespace llvm {
+
+class R600MachineFunctionInfo : public MachineFunctionInfo {
+
+public:
+  R600MachineFunctionInfo(const MachineFunction &MF);
+  std::vector<unsigned> ReservedRegs;
+
+};
+
+} // End llvm namespace
+
+#endif //R600MACHINEFUNCTIONINFO_H
diff --git a/lib/Target/AMDGPU/R600RegisterInfo.cpp b/lib/Target/AMDGPU/R600RegisterInfo.cpp
new file mode 100644
index 0000000..2f787a5
--- /dev/null
+++ b/lib/Target/AMDGPU/R600RegisterInfo.cpp
@@ -0,0 +1,88 @@
+//===-- R600RegisterInfo.cpp - R600 Register Information ------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The file contains the R600 implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "R600RegisterInfo.h"
+#include "AMDGPUTargetMachine.h"
+#include "R600MachineFunctionInfo.h"
+
+using namespace llvm;
+
+R600RegisterInfo::R600RegisterInfo(AMDGPUTargetMachine &tm,
+    const TargetInstrInfo &tii)
+: AMDGPURegisterInfo(tm, tii),
+  TM(tm),
+  TII(tii)
+  { }
+
+BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const
+{
+  BitVector Reserved(getNumRegs());
+  const R600MachineFunctionInfo * MFI = MF.getInfo<R600MachineFunctionInfo>();
+
+  Reserved.set(AMDIL::ZERO);
+  Reserved.set(AMDIL::HALF);
+  Reserved.set(AMDIL::ONE);
+  Reserved.set(AMDIL::ONE_INT);
+  Reserved.set(AMDIL::NEG_HALF);
+  Reserved.set(AMDIL::NEG_ONE);
+  Reserved.set(AMDIL::PV_X);
+  Reserved.set(AMDIL::ALU_LITERAL_X);
+
+  for (TargetRegisterClass::iterator I = AMDIL::R600_CReg32RegClass.begin(),
+                        E = AMDIL::R600_CReg32RegClass.end(); I != E; ++I) {
+    Reserved.set(*I);
+  }
+
+  for (std::vector<unsigned>::const_iterator I = MFI->ReservedRegs.begin(),
+                                    E = MFI->ReservedRegs.end(); I != E; ++I) {
+    Reserved.set(*I);
+  }
+
+  return Reserved;
+}
+
+const TargetRegisterClass *
+R600RegisterInfo::getISARegClass(const TargetRegisterClass * rc) const
+{
+  switch (rc->getID()) {
+  case AMDIL::GPRF32RegClassID:
+  case AMDIL::GPRI32RegClassID:
+    return &AMDIL::R600_Reg32RegClass;
+  default: return rc;
+  }
+}
+
+unsigned R600RegisterInfo::getHWRegChan(unsigned reg) const
+{
+  switch(reg) {
+  case AMDIL::ZERO:
+  case AMDIL::ONE:
+  case AMDIL::ONE_INT:
+  case AMDIL::NEG_ONE:
+  case AMDIL::HALF:
+  case AMDIL::NEG_HALF:
+  case AMDIL::ALU_LITERAL_X:
+    return 0;
+  default: return getHWRegChanGen(reg);
+  }
+}
+
+const TargetRegisterClass * R600RegisterInfo::getCFGStructurizerRegClass(
+                                                                   MVT VT) const
+{
+  switch(VT.SimpleTy) {
+  default:
+  case MVT::i32: return &AMDIL::R600_TReg32RegClass;
+  }
+}
+#include "R600HwRegInfo.include"
diff --git a/lib/Target/AMDGPU/R600RegisterInfo.h b/lib/Target/AMDGPU/R600RegisterInfo.h
new file mode 100644
index 0000000..0df667b
--- /dev/null
+++ b/lib/Target/AMDGPU/R600RegisterInfo.h
@@ -0,0 +1,54 @@
+//===-- R600RegisterInfo.h - R600 Register Info Interface ------*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface definition for R600RegisterInfo
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef R600REGISTERINFO_H_
+#define R600REGISTERINFO_H_
+
+#include "AMDGPUTargetMachine.h"
+#include "AMDILRegisterInfo.h"
+
+namespace llvm {
+
+class R600TargetMachine;
+class TargetInstrInfo;
+
+struct R600RegisterInfo : public AMDGPURegisterInfo
+{
+  AMDGPUTargetMachine &TM;
+  const TargetInstrInfo &TII;
+
+  R600RegisterInfo(AMDGPUTargetMachine &tm, const TargetInstrInfo &tii);
+
+  virtual BitVector getReservedRegs(const MachineFunction &MF) const;
+
+  /// getISARegClass - rc is an AMDIL reg class.  This function returns the
+  /// R600 reg class that is equivalent to the given AMDIL reg class.
+  virtual const TargetRegisterClass * getISARegClass(
+    const TargetRegisterClass * rc) const;
+
+  /// getHWRegChan - get the HW encoding for a register's channel.
+  unsigned getHWRegChan(unsigned reg) const;
+
+  /// getCFGStructurizerRegClass - get the register class of the specified
+  /// type to use in the CFGStructurizer
+  virtual const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT) const;
+
+private:
+  /// getHWRegChanGen - Generated function returns a register's channel
+  /// encoding.
+  unsigned getHWRegChanGen(unsigned reg) const;
+};
+
+} // End namespace llvm
+
+#endif // AMDIDSAREGISTERINFO_H_
diff --git a/lib/Target/AMDGPU/R600RegisterInfo.td b/lib/Target/AMDGPU/R600RegisterInfo.td
new file mode 100644
index 0000000..79afe90
--- /dev/null
+++ b/lib/Target/AMDGPU/R600RegisterInfo.td
@@ -0,0 +1,5271 @@
+
+class R600Reg <string name, bits<16> encoding> : Register<name> {
+  let Namespace = "AMDIL";
+  let HWEncoding = encoding;
+}
+
+class R600Reg_128<string n, list<Register> subregs, bits<16> encoding> :
+    RegisterWithSubRegs<n, subregs> {
+  let Namespace = "AMDIL";
+  let SubRegIndices = [sel_x, sel_y, sel_z, sel_w];
+  let HWEncoding = encoding;
+}
+
+def C0_X : R600Reg <"C0.X", 0>;
+def C0_Y : R600Reg <"C0.Y", 0>;
+def C0_Z : R600Reg <"C0.Z", 0>;
+def C0_W : R600Reg <"C0.W", 0>;
+def C1_X : R600Reg <"C1.X", 1>;
+def C1_Y : R600Reg <"C1.Y", 1>;
+def C1_Z : R600Reg <"C1.Z", 1>;
+def C1_W : R600Reg <"C1.W", 1>;
+def C2_X : R600Reg <"C2.X", 2>;
+def C2_Y : R600Reg <"C2.Y", 2>;
+def C2_Z : R600Reg <"C2.Z", 2>;
+def C2_W : R600Reg <"C2.W", 2>;
+def C3_X : R600Reg <"C3.X", 3>;
+def C3_Y : R600Reg <"C3.Y", 3>;
+def C3_Z : R600Reg <"C3.Z", 3>;
+def C3_W : R600Reg <"C3.W", 3>;
+def C4_X : R600Reg <"C4.X", 4>;
+def C4_Y : R600Reg <"C4.Y", 4>;
+def C4_Z : R600Reg <"C4.Z", 4>;
+def C4_W : R600Reg <"C4.W", 4>;
+def C5_X : R600Reg <"C5.X", 5>;
+def C5_Y : R600Reg <"C5.Y", 5>;
+def C5_Z : R600Reg <"C5.Z", 5>;
+def C5_W : R600Reg <"C5.W", 5>;
+def C6_X : R600Reg <"C6.X", 6>;
+def C6_Y : R600Reg <"C6.Y", 6>;
+def C6_Z : R600Reg <"C6.Z", 6>;
+def C6_W : R600Reg <"C6.W", 6>;
+def C7_X : R600Reg <"C7.X", 7>;
+def C7_Y : R600Reg <"C7.Y", 7>;
+def C7_Z : R600Reg <"C7.Z", 7>;
+def C7_W : R600Reg <"C7.W", 7>;
+def C8_X : R600Reg <"C8.X", 8>;
+def C8_Y : R600Reg <"C8.Y", 8>;
+def C8_Z : R600Reg <"C8.Z", 8>;
+def C8_W : R600Reg <"C8.W", 8>;
+def C9_X : R600Reg <"C9.X", 9>;
+def C9_Y : R600Reg <"C9.Y", 9>;
+def C9_Z : R600Reg <"C9.Z", 9>;
+def C9_W : R600Reg <"C9.W", 9>;
+def C10_X : R600Reg <"C10.X", 10>;
+def C10_Y : R600Reg <"C10.Y", 10>;
+def C10_Z : R600Reg <"C10.Z", 10>;
+def C10_W : R600Reg <"C10.W", 10>;
+def C11_X : R600Reg <"C11.X", 11>;
+def C11_Y : R600Reg <"C11.Y", 11>;
+def C11_Z : R600Reg <"C11.Z", 11>;
+def C11_W : R600Reg <"C11.W", 11>;
+def C12_X : R600Reg <"C12.X", 12>;
+def C12_Y : R600Reg <"C12.Y", 12>;
+def C12_Z : R600Reg <"C12.Z", 12>;
+def C12_W : R600Reg <"C12.W", 12>;
+def C13_X : R600Reg <"C13.X", 13>;
+def C13_Y : R600Reg <"C13.Y", 13>;
+def C13_Z : R600Reg <"C13.Z", 13>;
+def C13_W : R600Reg <"C13.W", 13>;
+def C14_X : R600Reg <"C14.X", 14>;
+def C14_Y : R600Reg <"C14.Y", 14>;
+def C14_Z : R600Reg <"C14.Z", 14>;
+def C14_W : R600Reg <"C14.W", 14>;
+def C15_X : R600Reg <"C15.X", 15>;
+def C15_Y : R600Reg <"C15.Y", 15>;
+def C15_Z : R600Reg <"C15.Z", 15>;
+def C15_W : R600Reg <"C15.W", 15>;
+def C16_X : R600Reg <"C16.X", 16>;
+def C16_Y : R600Reg <"C16.Y", 16>;
+def C16_Z : R600Reg <"C16.Z", 16>;
+def C16_W : R600Reg <"C16.W", 16>;
+def C17_X : R600Reg <"C17.X", 17>;
+def C17_Y : R600Reg <"C17.Y", 17>;
+def C17_Z : R600Reg <"C17.Z", 17>;
+def C17_W : R600Reg <"C17.W", 17>;
+def C18_X : R600Reg <"C18.X", 18>;
+def C18_Y : R600Reg <"C18.Y", 18>;
+def C18_Z : R600Reg <"C18.Z", 18>;
+def C18_W : R600Reg <"C18.W", 18>;
+def C19_X : R600Reg <"C19.X", 19>;
+def C19_Y : R600Reg <"C19.Y", 19>;
+def C19_Z : R600Reg <"C19.Z", 19>;
+def C19_W : R600Reg <"C19.W", 19>;
+def C20_X : R600Reg <"C20.X", 20>;
+def C20_Y : R600Reg <"C20.Y", 20>;
+def C20_Z : R600Reg <"C20.Z", 20>;
+def C20_W : R600Reg <"C20.W", 20>;
+def C21_X : R600Reg <"C21.X", 21>;
+def C21_Y : R600Reg <"C21.Y", 21>;
+def C21_Z : R600Reg <"C21.Z", 21>;
+def C21_W : R600Reg <"C21.W", 21>;
+def C22_X : R600Reg <"C22.X", 22>;
+def C22_Y : R600Reg <"C22.Y", 22>;
+def C22_Z : R600Reg <"C22.Z", 22>;
+def C22_W : R600Reg <"C22.W", 22>;
+def C23_X : R600Reg <"C23.X", 23>;
+def C23_Y : R600Reg <"C23.Y", 23>;
+def C23_Z : R600Reg <"C23.Z", 23>;
+def C23_W : R600Reg <"C23.W", 23>;
+def C24_X : R600Reg <"C24.X", 24>;
+def C24_Y : R600Reg <"C24.Y", 24>;
+def C24_Z : R600Reg <"C24.Z", 24>;
+def C24_W : R600Reg <"C24.W", 24>;
+def C25_X : R600Reg <"C25.X", 25>;
+def C25_Y : R600Reg <"C25.Y", 25>;
+def C25_Z : R600Reg <"C25.Z", 25>;
+def C25_W : R600Reg <"C25.W", 25>;
+def C26_X : R600Reg <"C26.X", 26>;
+def C26_Y : R600Reg <"C26.Y", 26>;
+def C26_Z : R600Reg <"C26.Z", 26>;
+def C26_W : R600Reg <"C26.W", 26>;
+def C27_X : R600Reg <"C27.X", 27>;
+def C27_Y : R600Reg <"C27.Y", 27>;
+def C27_Z : R600Reg <"C27.Z", 27>;
+def C27_W : R600Reg <"C27.W", 27>;
+def C28_X : R600Reg <"C28.X", 28>;
+def C28_Y : R600Reg <"C28.Y", 28>;
+def C28_Z : R600Reg <"C28.Z", 28>;
+def C28_W : R600Reg <"C28.W", 28>;
+def C29_X : R600Reg <"C29.X", 29>;
+def C29_Y : R600Reg <"C29.Y", 29>;
+def C29_Z : R600Reg <"C29.Z", 29>;
+def C29_W : R600Reg <"C29.W", 29>;
+def C30_X : R600Reg <"C30.X", 30>;
+def C30_Y : R600Reg <"C30.Y", 30>;
+def C30_Z : R600Reg <"C30.Z", 30>;
+def C30_W : R600Reg <"C30.W", 30>;
+def C31_X : R600Reg <"C31.X", 31>;
+def C31_Y : R600Reg <"C31.Y", 31>;
+def C31_Z : R600Reg <"C31.Z", 31>;
+def C31_W : R600Reg <"C31.W", 31>;
+def C32_X : R600Reg <"C32.X", 32>;
+def C32_Y : R600Reg <"C32.Y", 32>;
+def C32_Z : R600Reg <"C32.Z", 32>;
+def C32_W : R600Reg <"C32.W", 32>;
+def C33_X : R600Reg <"C33.X", 33>;
+def C33_Y : R600Reg <"C33.Y", 33>;
+def C33_Z : R600Reg <"C33.Z", 33>;
+def C33_W : R600Reg <"C33.W", 33>;
+def C34_X : R600Reg <"C34.X", 34>;
+def C34_Y : R600Reg <"C34.Y", 34>;
+def C34_Z : R600Reg <"C34.Z", 34>;
+def C34_W : R600Reg <"C34.W", 34>;
+def C35_X : R600Reg <"C35.X", 35>;
+def C35_Y : R600Reg <"C35.Y", 35>;
+def C35_Z : R600Reg <"C35.Z", 35>;
+def C35_W : R600Reg <"C35.W", 35>;
+def C36_X : R600Reg <"C36.X", 36>;
+def C36_Y : R600Reg <"C36.Y", 36>;
+def C36_Z : R600Reg <"C36.Z", 36>;
+def C36_W : R600Reg <"C36.W", 36>;
+def C37_X : R600Reg <"C37.X", 37>;
+def C37_Y : R600Reg <"C37.Y", 37>;
+def C37_Z : R600Reg <"C37.Z", 37>;
+def C37_W : R600Reg <"C37.W", 37>;
+def C38_X : R600Reg <"C38.X", 38>;
+def C38_Y : R600Reg <"C38.Y", 38>;
+def C38_Z : R600Reg <"C38.Z", 38>;
+def C38_W : R600Reg <"C38.W", 38>;
+def C39_X : R600Reg <"C39.X", 39>;
+def C39_Y : R600Reg <"C39.Y", 39>;
+def C39_Z : R600Reg <"C39.Z", 39>;
+def C39_W : R600Reg <"C39.W", 39>;
+def C40_X : R600Reg <"C40.X", 40>;
+def C40_Y : R600Reg <"C40.Y", 40>;
+def C40_Z : R600Reg <"C40.Z", 40>;
+def C40_W : R600Reg <"C40.W", 40>;
+def C41_X : R600Reg <"C41.X", 41>;
+def C41_Y : R600Reg <"C41.Y", 41>;
+def C41_Z : R600Reg <"C41.Z", 41>;
+def C41_W : R600Reg <"C41.W", 41>;
+def C42_X : R600Reg <"C42.X", 42>;
+def C42_Y : R600Reg <"C42.Y", 42>;
+def C42_Z : R600Reg <"C42.Z", 42>;
+def C42_W : R600Reg <"C42.W", 42>;
+def C43_X : R600Reg <"C43.X", 43>;
+def C43_Y : R600Reg <"C43.Y", 43>;
+def C43_Z : R600Reg <"C43.Z", 43>;
+def C43_W : R600Reg <"C43.W", 43>;
+def C44_X : R600Reg <"C44.X", 44>;
+def C44_Y : R600Reg <"C44.Y", 44>;
+def C44_Z : R600Reg <"C44.Z", 44>;
+def C44_W : R600Reg <"C44.W", 44>;
+def C45_X : R600Reg <"C45.X", 45>;
+def C45_Y : R600Reg <"C45.Y", 45>;
+def C45_Z : R600Reg <"C45.Z", 45>;
+def C45_W : R600Reg <"C45.W", 45>;
+def C46_X : R600Reg <"C46.X", 46>;
+def C46_Y : R600Reg <"C46.Y", 46>;
+def C46_Z : R600Reg <"C46.Z", 46>;
+def C46_W : R600Reg <"C46.W", 46>;
+def C47_X : R600Reg <"C47.X", 47>;
+def C47_Y : R600Reg <"C47.Y", 47>;
+def C47_Z : R600Reg <"C47.Z", 47>;
+def C47_W : R600Reg <"C47.W", 47>;
+def C48_X : R600Reg <"C48.X", 48>;
+def C48_Y : R600Reg <"C48.Y", 48>;
+def C48_Z : R600Reg <"C48.Z", 48>;
+def C48_W : R600Reg <"C48.W", 48>;
+def C49_X : R600Reg <"C49.X", 49>;
+def C49_Y : R600Reg <"C49.Y", 49>;
+def C49_Z : R600Reg <"C49.Z", 49>;
+def C49_W : R600Reg <"C49.W", 49>;
+def C50_X : R600Reg <"C50.X", 50>;
+def C50_Y : R600Reg <"C50.Y", 50>;
+def C50_Z : R600Reg <"C50.Z", 50>;
+def C50_W : R600Reg <"C50.W", 50>;
+def C51_X : R600Reg <"C51.X", 51>;
+def C51_Y : R600Reg <"C51.Y", 51>;
+def C51_Z : R600Reg <"C51.Z", 51>;
+def C51_W : R600Reg <"C51.W", 51>;
+def C52_X : R600Reg <"C52.X", 52>;
+def C52_Y : R600Reg <"C52.Y", 52>;
+def C52_Z : R600Reg <"C52.Z", 52>;
+def C52_W : R600Reg <"C52.W", 52>;
+def C53_X : R600Reg <"C53.X", 53>;
+def C53_Y : R600Reg <"C53.Y", 53>;
+def C53_Z : R600Reg <"C53.Z", 53>;
+def C53_W : R600Reg <"C53.W", 53>;
+def C54_X : R600Reg <"C54.X", 54>;
+def C54_Y : R600Reg <"C54.Y", 54>;
+def C54_Z : R600Reg <"C54.Z", 54>;
+def C54_W : R600Reg <"C54.W", 54>;
+def C55_X : R600Reg <"C55.X", 55>;
+def C55_Y : R600Reg <"C55.Y", 55>;
+def C55_Z : R600Reg <"C55.Z", 55>;
+def C55_W : R600Reg <"C55.W", 55>;
+def C56_X : R600Reg <"C56.X", 56>;
+def C56_Y : R600Reg <"C56.Y", 56>;
+def C56_Z : R600Reg <"C56.Z", 56>;
+def C56_W : R600Reg <"C56.W", 56>;
+def C57_X : R600Reg <"C57.X", 57>;
+def C57_Y : R600Reg <"C57.Y", 57>;
+def C57_Z : R600Reg <"C57.Z", 57>;
+def C57_W : R600Reg <"C57.W", 57>;
+def C58_X : R600Reg <"C58.X", 58>;
+def C58_Y : R600Reg <"C58.Y", 58>;
+def C58_Z : R600Reg <"C58.Z", 58>;
+def C58_W : R600Reg <"C58.W", 58>;
+def C59_X : R600Reg <"C59.X", 59>;
+def C59_Y : R600Reg <"C59.Y", 59>;
+def C59_Z : R600Reg <"C59.Z", 59>;
+def C59_W : R600Reg <"C59.W", 59>;
+def C60_X : R600Reg <"C60.X", 60>;
+def C60_Y : R600Reg <"C60.Y", 60>;
+def C60_Z : R600Reg <"C60.Z", 60>;
+def C60_W : R600Reg <"C60.W", 60>;
+def C61_X : R600Reg <"C61.X", 61>;
+def C61_Y : R600Reg <"C61.Y", 61>;
+def C61_Z : R600Reg <"C61.Z", 61>;
+def C61_W : R600Reg <"C61.W", 61>;
+def C62_X : R600Reg <"C62.X", 62>;
+def C62_Y : R600Reg <"C62.Y", 62>;
+def C62_Z : R600Reg <"C62.Z", 62>;
+def C62_W : R600Reg <"C62.W", 62>;
+def C63_X : R600Reg <"C63.X", 63>;
+def C63_Y : R600Reg <"C63.Y", 63>;
+def C63_Z : R600Reg <"C63.Z", 63>;
+def C63_W : R600Reg <"C63.W", 63>;
+def C64_X : R600Reg <"C64.X", 64>;
+def C64_Y : R600Reg <"C64.Y", 64>;
+def C64_Z : R600Reg <"C64.Z", 64>;
+def C64_W : R600Reg <"C64.W", 64>;
+def C65_X : R600Reg <"C65.X", 65>;
+def C65_Y : R600Reg <"C65.Y", 65>;
+def C65_Z : R600Reg <"C65.Z", 65>;
+def C65_W : R600Reg <"C65.W", 65>;
+def C66_X : R600Reg <"C66.X", 66>;
+def C66_Y : R600Reg <"C66.Y", 66>;
+def C66_Z : R600Reg <"C66.Z", 66>;
+def C66_W : R600Reg <"C66.W", 66>;
+def C67_X : R600Reg <"C67.X", 67>;
+def C67_Y : R600Reg <"C67.Y", 67>;
+def C67_Z : R600Reg <"C67.Z", 67>;
+def C67_W : R600Reg <"C67.W", 67>;
+def C68_X : R600Reg <"C68.X", 68>;
+def C68_Y : R600Reg <"C68.Y", 68>;
+def C68_Z : R600Reg <"C68.Z", 68>;
+def C68_W : R600Reg <"C68.W", 68>;
+def C69_X : R600Reg <"C69.X", 69>;
+def C69_Y : R600Reg <"C69.Y", 69>;
+def C69_Z : R600Reg <"C69.Z", 69>;
+def C69_W : R600Reg <"C69.W", 69>;
+def C70_X : R600Reg <"C70.X", 70>;
+def C70_Y : R600Reg <"C70.Y", 70>;
+def C70_Z : R600Reg <"C70.Z", 70>;
+def C70_W : R600Reg <"C70.W", 70>;
+def C71_X : R600Reg <"C71.X", 71>;
+def C71_Y : R600Reg <"C71.Y", 71>;
+def C71_Z : R600Reg <"C71.Z", 71>;
+def C71_W : R600Reg <"C71.W", 71>;
+def C72_X : R600Reg <"C72.X", 72>;
+def C72_Y : R600Reg <"C72.Y", 72>;
+def C72_Z : R600Reg <"C72.Z", 72>;
+def C72_W : R600Reg <"C72.W", 72>;
+def C73_X : R600Reg <"C73.X", 73>;
+def C73_Y : R600Reg <"C73.Y", 73>;
+def C73_Z : R600Reg <"C73.Z", 73>;
+def C73_W : R600Reg <"C73.W", 73>;
+def C74_X : R600Reg <"C74.X", 74>;
+def C74_Y : R600Reg <"C74.Y", 74>;
+def C74_Z : R600Reg <"C74.Z", 74>;
+def C74_W : R600Reg <"C74.W", 74>;
+def C75_X : R600Reg <"C75.X", 75>;
+def C75_Y : R600Reg <"C75.Y", 75>;
+def C75_Z : R600Reg <"C75.Z", 75>;
+def C75_W : R600Reg <"C75.W", 75>;
+def C76_X : R600Reg <"C76.X", 76>;
+def C76_Y : R600Reg <"C76.Y", 76>;
+def C76_Z : R600Reg <"C76.Z", 76>;
+def C76_W : R600Reg <"C76.W", 76>;
+def C77_X : R600Reg <"C77.X", 77>;
+def C77_Y : R600Reg <"C77.Y", 77>;
+def C77_Z : R600Reg <"C77.Z", 77>;
+def C77_W : R600Reg <"C77.W", 77>;
+def C78_X : R600Reg <"C78.X", 78>;
+def C78_Y : R600Reg <"C78.Y", 78>;
+def C78_Z : R600Reg <"C78.Z", 78>;
+def C78_W : R600Reg <"C78.W", 78>;
+def C79_X : R600Reg <"C79.X", 79>;
+def C79_Y : R600Reg <"C79.Y", 79>;
+def C79_Z : R600Reg <"C79.Z", 79>;
+def C79_W : R600Reg <"C79.W", 79>;
+def C80_X : R600Reg <"C80.X", 80>;
+def C80_Y : R600Reg <"C80.Y", 80>;
+def C80_Z : R600Reg <"C80.Z", 80>;
+def C80_W : R600Reg <"C80.W", 80>;
+def C81_X : R600Reg <"C81.X", 81>;
+def C81_Y : R600Reg <"C81.Y", 81>;
+def C81_Z : R600Reg <"C81.Z", 81>;
+def C81_W : R600Reg <"C81.W", 81>;
+def C82_X : R600Reg <"C82.X", 82>;
+def C82_Y : R600Reg <"C82.Y", 82>;
+def C82_Z : R600Reg <"C82.Z", 82>;
+def C82_W : R600Reg <"C82.W", 82>;
+def C83_X : R600Reg <"C83.X", 83>;
+def C83_Y : R600Reg <"C83.Y", 83>;
+def C83_Z : R600Reg <"C83.Z", 83>;
+def C83_W : R600Reg <"C83.W", 83>;
+def C84_X : R600Reg <"C84.X", 84>;
+def C84_Y : R600Reg <"C84.Y", 84>;
+def C84_Z : R600Reg <"C84.Z", 84>;
+def C84_W : R600Reg <"C84.W", 84>;
+def C85_X : R600Reg <"C85.X", 85>;
+def C85_Y : R600Reg <"C85.Y", 85>;
+def C85_Z : R600Reg <"C85.Z", 85>;
+def C85_W : R600Reg <"C85.W", 85>;
+def C86_X : R600Reg <"C86.X", 86>;
+def C86_Y : R600Reg <"C86.Y", 86>;
+def C86_Z : R600Reg <"C86.Z", 86>;
+def C86_W : R600Reg <"C86.W", 86>;
+def C87_X : R600Reg <"C87.X", 87>;
+def C87_Y : R600Reg <"C87.Y", 87>;
+def C87_Z : R600Reg <"C87.Z", 87>;
+def C87_W : R600Reg <"C87.W", 87>;
+def C88_X : R600Reg <"C88.X", 88>;
+def C88_Y : R600Reg <"C88.Y", 88>;
+def C88_Z : R600Reg <"C88.Z", 88>;
+def C88_W : R600Reg <"C88.W", 88>;
+def C89_X : R600Reg <"C89.X", 89>;
+def C89_Y : R600Reg <"C89.Y", 89>;
+def C89_Z : R600Reg <"C89.Z", 89>;
+def C89_W : R600Reg <"C89.W", 89>;
+def C90_X : R600Reg <"C90.X", 90>;
+def C90_Y : R600Reg <"C90.Y", 90>;
+def C90_Z : R600Reg <"C90.Z", 90>;
+def C90_W : R600Reg <"C90.W", 90>;
+def C91_X : R600Reg <"C91.X", 91>;
+def C91_Y : R600Reg <"C91.Y", 91>;
+def C91_Z : R600Reg <"C91.Z", 91>;
+def C91_W : R600Reg <"C91.W", 91>;
+def C92_X : R600Reg <"C92.X", 92>;
+def C92_Y : R600Reg <"C92.Y", 92>;
+def C92_Z : R600Reg <"C92.Z", 92>;
+def C92_W : R600Reg <"C92.W", 92>;
+def C93_X : R600Reg <"C93.X", 93>;
+def C93_Y : R600Reg <"C93.Y", 93>;
+def C93_Z : R600Reg <"C93.Z", 93>;
+def C93_W : R600Reg <"C93.W", 93>;
+def C94_X : R600Reg <"C94.X", 94>;
+def C94_Y : R600Reg <"C94.Y", 94>;
+def C94_Z : R600Reg <"C94.Z", 94>;
+def C94_W : R600Reg <"C94.W", 94>;
+def C95_X : R600Reg <"C95.X", 95>;
+def C95_Y : R600Reg <"C95.Y", 95>;
+def C95_Z : R600Reg <"C95.Z", 95>;
+def C95_W : R600Reg <"C95.W", 95>;
+def C96_X : R600Reg <"C96.X", 96>;
+def C96_Y : R600Reg <"C96.Y", 96>;
+def C96_Z : R600Reg <"C96.Z", 96>;
+def C96_W : R600Reg <"C96.W", 96>;
+def C97_X : R600Reg <"C97.X", 97>;
+def C97_Y : R600Reg <"C97.Y", 97>;
+def C97_Z : R600Reg <"C97.Z", 97>;
+def C97_W : R600Reg <"C97.W", 97>;
+def C98_X : R600Reg <"C98.X", 98>;
+def C98_Y : R600Reg <"C98.Y", 98>;
+def C98_Z : R600Reg <"C98.Z", 98>;
+def C98_W : R600Reg <"C98.W", 98>;
+def C99_X : R600Reg <"C99.X", 99>;
+def C99_Y : R600Reg <"C99.Y", 99>;
+def C99_Z : R600Reg <"C99.Z", 99>;
+def C99_W : R600Reg <"C99.W", 99>;
+def C100_X : R600Reg <"C100.X", 100>;
+def C100_Y : R600Reg <"C100.Y", 100>;
+def C100_Z : R600Reg <"C100.Z", 100>;
+def C100_W : R600Reg <"C100.W", 100>;
+def C101_X : R600Reg <"C101.X", 101>;
+def C101_Y : R600Reg <"C101.Y", 101>;
+def C101_Z : R600Reg <"C101.Z", 101>;
+def C101_W : R600Reg <"C101.W", 101>;
+def C102_X : R600Reg <"C102.X", 102>;
+def C102_Y : R600Reg <"C102.Y", 102>;
+def C102_Z : R600Reg <"C102.Z", 102>;
+def C102_W : R600Reg <"C102.W", 102>;
+def C103_X : R600Reg <"C103.X", 103>;
+def C103_Y : R600Reg <"C103.Y", 103>;
+def C103_Z : R600Reg <"C103.Z", 103>;
+def C103_W : R600Reg <"C103.W", 103>;
+def C104_X : R600Reg <"C104.X", 104>;
+def C104_Y : R600Reg <"C104.Y", 104>;
+def C104_Z : R600Reg <"C104.Z", 104>;
+def C104_W : R600Reg <"C104.W", 104>;
+def C105_X : R600Reg <"C105.X", 105>;
+def C105_Y : R600Reg <"C105.Y", 105>;
+def C105_Z : R600Reg <"C105.Z", 105>;
+def C105_W : R600Reg <"C105.W", 105>;
+def C106_X : R600Reg <"C106.X", 106>;
+def C106_Y : R600Reg <"C106.Y", 106>;
+def C106_Z : R600Reg <"C106.Z", 106>;
+def C106_W : R600Reg <"C106.W", 106>;
+def C107_X : R600Reg <"C107.X", 107>;
+def C107_Y : R600Reg <"C107.Y", 107>;
+def C107_Z : R600Reg <"C107.Z", 107>;
+def C107_W : R600Reg <"C107.W", 107>;
+def C108_X : R600Reg <"C108.X", 108>;
+def C108_Y : R600Reg <"C108.Y", 108>;
+def C108_Z : R600Reg <"C108.Z", 108>;
+def C108_W : R600Reg <"C108.W", 108>;
+def C109_X : R600Reg <"C109.X", 109>;
+def C109_Y : R600Reg <"C109.Y", 109>;
+def C109_Z : R600Reg <"C109.Z", 109>;
+def C109_W : R600Reg <"C109.W", 109>;
+def C110_X : R600Reg <"C110.X", 110>;
+def C110_Y : R600Reg <"C110.Y", 110>;
+def C110_Z : R600Reg <"C110.Z", 110>;
+def C110_W : R600Reg <"C110.W", 110>;
+def C111_X : R600Reg <"C111.X", 111>;
+def C111_Y : R600Reg <"C111.Y", 111>;
+def C111_Z : R600Reg <"C111.Z", 111>;
+def C111_W : R600Reg <"C111.W", 111>;
+def C112_X : R600Reg <"C112.X", 112>;
+def C112_Y : R600Reg <"C112.Y", 112>;
+def C112_Z : R600Reg <"C112.Z", 112>;
+def C112_W : R600Reg <"C112.W", 112>;
+def C113_X : R600Reg <"C113.X", 113>;
+def C113_Y : R600Reg <"C113.Y", 113>;
+def C113_Z : R600Reg <"C113.Z", 113>;
+def C113_W : R600Reg <"C113.W", 113>;
+def C114_X : R600Reg <"C114.X", 114>;
+def C114_Y : R600Reg <"C114.Y", 114>;
+def C114_Z : R600Reg <"C114.Z", 114>;
+def C114_W : R600Reg <"C114.W", 114>;
+def C115_X : R600Reg <"C115.X", 115>;
+def C115_Y : R600Reg <"C115.Y", 115>;
+def C115_Z : R600Reg <"C115.Z", 115>;
+def C115_W : R600Reg <"C115.W", 115>;
+def C116_X : R600Reg <"C116.X", 116>;
+def C116_Y : R600Reg <"C116.Y", 116>;
+def C116_Z : R600Reg <"C116.Z", 116>;
+def C116_W : R600Reg <"C116.W", 116>;
+def C117_X : R600Reg <"C117.X", 117>;
+def C117_Y : R600Reg <"C117.Y", 117>;
+def C117_Z : R600Reg <"C117.Z", 117>;
+def C117_W : R600Reg <"C117.W", 117>;
+def C118_X : R600Reg <"C118.X", 118>;
+def C118_Y : R600Reg <"C118.Y", 118>;
+def C118_Z : R600Reg <"C118.Z", 118>;
+def C118_W : R600Reg <"C118.W", 118>;
+def C119_X : R600Reg <"C119.X", 119>;
+def C119_Y : R600Reg <"C119.Y", 119>;
+def C119_Z : R600Reg <"C119.Z", 119>;
+def C119_W : R600Reg <"C119.W", 119>;
+def C120_X : R600Reg <"C120.X", 120>;
+def C120_Y : R600Reg <"C120.Y", 120>;
+def C120_Z : R600Reg <"C120.Z", 120>;
+def C120_W : R600Reg <"C120.W", 120>;
+def C121_X : R600Reg <"C121.X", 121>;
+def C121_Y : R600Reg <"C121.Y", 121>;
+def C121_Z : R600Reg <"C121.Z", 121>;
+def C121_W : R600Reg <"C121.W", 121>;
+def C122_X : R600Reg <"C122.X", 122>;
+def C122_Y : R600Reg <"C122.Y", 122>;
+def C122_Z : R600Reg <"C122.Z", 122>;
+def C122_W : R600Reg <"C122.W", 122>;
+def C123_X : R600Reg <"C123.X", 123>;
+def C123_Y : R600Reg <"C123.Y", 123>;
+def C123_Z : R600Reg <"C123.Z", 123>;
+def C123_W : R600Reg <"C123.W", 123>;
+def C124_X : R600Reg <"C124.X", 124>;
+def C124_Y : R600Reg <"C124.Y", 124>;
+def C124_Z : R600Reg <"C124.Z", 124>;
+def C124_W : R600Reg <"C124.W", 124>;
+def C125_X : R600Reg <"C125.X", 125>;
+def C125_Y : R600Reg <"C125.Y", 125>;
+def C125_Z : R600Reg <"C125.Z", 125>;
+def C125_W : R600Reg <"C125.W", 125>;
+def C126_X : R600Reg <"C126.X", 126>;
+def C126_Y : R600Reg <"C126.Y", 126>;
+def C126_Z : R600Reg <"C126.Z", 126>;
+def C126_W : R600Reg <"C126.W", 126>;
+def C127_X : R600Reg <"C127.X", 127>;
+def C127_Y : R600Reg <"C127.Y", 127>;
+def C127_Z : R600Reg <"C127.Z", 127>;
+def C127_W : R600Reg <"C127.W", 127>;
+def C128_X : R600Reg <"C128.X", 128>;
+def C128_Y : R600Reg <"C128.Y", 128>;
+def C128_Z : R600Reg <"C128.Z", 128>;
+def C128_W : R600Reg <"C128.W", 128>;
+def C129_X : R600Reg <"C129.X", 129>;
+def C129_Y : R600Reg <"C129.Y", 129>;
+def C129_Z : R600Reg <"C129.Z", 129>;
+def C129_W : R600Reg <"C129.W", 129>;
+def C130_X : R600Reg <"C130.X", 130>;
+def C130_Y : R600Reg <"C130.Y", 130>;
+def C130_Z : R600Reg <"C130.Z", 130>;
+def C130_W : R600Reg <"C130.W", 130>;
+def C131_X : R600Reg <"C131.X", 131>;
+def C131_Y : R600Reg <"C131.Y", 131>;
+def C131_Z : R600Reg <"C131.Z", 131>;
+def C131_W : R600Reg <"C131.W", 131>;
+def C132_X : R600Reg <"C132.X", 132>;
+def C132_Y : R600Reg <"C132.Y", 132>;
+def C132_Z : R600Reg <"C132.Z", 132>;
+def C132_W : R600Reg <"C132.W", 132>;
+def C133_X : R600Reg <"C133.X", 133>;
+def C133_Y : R600Reg <"C133.Y", 133>;
+def C133_Z : R600Reg <"C133.Z", 133>;
+def C133_W : R600Reg <"C133.W", 133>;
+def C134_X : R600Reg <"C134.X", 134>;
+def C134_Y : R600Reg <"C134.Y", 134>;
+def C134_Z : R600Reg <"C134.Z", 134>;
+def C134_W : R600Reg <"C134.W", 134>;
+def C135_X : R600Reg <"C135.X", 135>;
+def C135_Y : R600Reg <"C135.Y", 135>;
+def C135_Z : R600Reg <"C135.Z", 135>;
+def C135_W : R600Reg <"C135.W", 135>;
+def C136_X : R600Reg <"C136.X", 136>;
+def C136_Y : R600Reg <"C136.Y", 136>;
+def C136_Z : R600Reg <"C136.Z", 136>;
+def C136_W : R600Reg <"C136.W", 136>;
+def C137_X : R600Reg <"C137.X", 137>;
+def C137_Y : R600Reg <"C137.Y", 137>;
+def C137_Z : R600Reg <"C137.Z", 137>;
+def C137_W : R600Reg <"C137.W", 137>;
+def C138_X : R600Reg <"C138.X", 138>;
+def C138_Y : R600Reg <"C138.Y", 138>;
+def C138_Z : R600Reg <"C138.Z", 138>;
+def C138_W : R600Reg <"C138.W", 138>;
+def C139_X : R600Reg <"C139.X", 139>;
+def C139_Y : R600Reg <"C139.Y", 139>;
+def C139_Z : R600Reg <"C139.Z", 139>;
+def C139_W : R600Reg <"C139.W", 139>;
+def C140_X : R600Reg <"C140.X", 140>;
+def C140_Y : R600Reg <"C140.Y", 140>;
+def C140_Z : R600Reg <"C140.Z", 140>;
+def C140_W : R600Reg <"C140.W", 140>;
+def C141_X : R600Reg <"C141.X", 141>;
+def C141_Y : R600Reg <"C141.Y", 141>;
+def C141_Z : R600Reg <"C141.Z", 141>;
+def C141_W : R600Reg <"C141.W", 141>;
+def C142_X : R600Reg <"C142.X", 142>;
+def C142_Y : R600Reg <"C142.Y", 142>;
+def C142_Z : R600Reg <"C142.Z", 142>;
+def C142_W : R600Reg <"C142.W", 142>;
+def C143_X : R600Reg <"C143.X", 143>;
+def C143_Y : R600Reg <"C143.Y", 143>;
+def C143_Z : R600Reg <"C143.Z", 143>;
+def C143_W : R600Reg <"C143.W", 143>;
+def C144_X : R600Reg <"C144.X", 144>;
+def C144_Y : R600Reg <"C144.Y", 144>;
+def C144_Z : R600Reg <"C144.Z", 144>;
+def C144_W : R600Reg <"C144.W", 144>;
+def C145_X : R600Reg <"C145.X", 145>;
+def C145_Y : R600Reg <"C145.Y", 145>;
+def C145_Z : R600Reg <"C145.Z", 145>;
+def C145_W : R600Reg <"C145.W", 145>;
+def C146_X : R600Reg <"C146.X", 146>;
+def C146_Y : R600Reg <"C146.Y", 146>;
+def C146_Z : R600Reg <"C146.Z", 146>;
+def C146_W : R600Reg <"C146.W", 146>;
+def C147_X : R600Reg <"C147.X", 147>;
+def C147_Y : R600Reg <"C147.Y", 147>;
+def C147_Z : R600Reg <"C147.Z", 147>;
+def C147_W : R600Reg <"C147.W", 147>;
+def C148_X : R600Reg <"C148.X", 148>;
+def C148_Y : R600Reg <"C148.Y", 148>;
+def C148_Z : R600Reg <"C148.Z", 148>;
+def C148_W : R600Reg <"C148.W", 148>;
+def C149_X : R600Reg <"C149.X", 149>;
+def C149_Y : R600Reg <"C149.Y", 149>;
+def C149_Z : R600Reg <"C149.Z", 149>;
+def C149_W : R600Reg <"C149.W", 149>;
+def C150_X : R600Reg <"C150.X", 150>;
+def C150_Y : R600Reg <"C150.Y", 150>;
+def C150_Z : R600Reg <"C150.Z", 150>;
+def C150_W : R600Reg <"C150.W", 150>;
+def C151_X : R600Reg <"C151.X", 151>;
+def C151_Y : R600Reg <"C151.Y", 151>;
+def C151_Z : R600Reg <"C151.Z", 151>;
+def C151_W : R600Reg <"C151.W", 151>;
+def C152_X : R600Reg <"C152.X", 152>;
+def C152_Y : R600Reg <"C152.Y", 152>;
+def C152_Z : R600Reg <"C152.Z", 152>;
+def C152_W : R600Reg <"C152.W", 152>;
+def C153_X : R600Reg <"C153.X", 153>;
+def C153_Y : R600Reg <"C153.Y", 153>;
+def C153_Z : R600Reg <"C153.Z", 153>;
+def C153_W : R600Reg <"C153.W", 153>;
+def C154_X : R600Reg <"C154.X", 154>;
+def C154_Y : R600Reg <"C154.Y", 154>;
+def C154_Z : R600Reg <"C154.Z", 154>;
+def C154_W : R600Reg <"C154.W", 154>;
+def C155_X : R600Reg <"C155.X", 155>;
+def C155_Y : R600Reg <"C155.Y", 155>;
+def C155_Z : R600Reg <"C155.Z", 155>;
+def C155_W : R600Reg <"C155.W", 155>;
+def C156_X : R600Reg <"C156.X", 156>;
+def C156_Y : R600Reg <"C156.Y", 156>;
+def C156_Z : R600Reg <"C156.Z", 156>;
+def C156_W : R600Reg <"C156.W", 156>;
+def C157_X : R600Reg <"C157.X", 157>;
+def C157_Y : R600Reg <"C157.Y", 157>;
+def C157_Z : R600Reg <"C157.Z", 157>;
+def C157_W : R600Reg <"C157.W", 157>;
+def C158_X : R600Reg <"C158.X", 158>;
+def C158_Y : R600Reg <"C158.Y", 158>;
+def C158_Z : R600Reg <"C158.Z", 158>;
+def C158_W : R600Reg <"C158.W", 158>;
+def C159_X : R600Reg <"C159.X", 159>;
+def C159_Y : R600Reg <"C159.Y", 159>;
+def C159_Z : R600Reg <"C159.Z", 159>;
+def C159_W : R600Reg <"C159.W", 159>;
+def C160_X : R600Reg <"C160.X", 160>;
+def C160_Y : R600Reg <"C160.Y", 160>;
+def C160_Z : R600Reg <"C160.Z", 160>;
+def C160_W : R600Reg <"C160.W", 160>;
+def C161_X : R600Reg <"C161.X", 161>;
+def C161_Y : R600Reg <"C161.Y", 161>;
+def C161_Z : R600Reg <"C161.Z", 161>;
+def C161_W : R600Reg <"C161.W", 161>;
+def C162_X : R600Reg <"C162.X", 162>;
+def C162_Y : R600Reg <"C162.Y", 162>;
+def C162_Z : R600Reg <"C162.Z", 162>;
+def C162_W : R600Reg <"C162.W", 162>;
+def C163_X : R600Reg <"C163.X", 163>;
+def C163_Y : R600Reg <"C163.Y", 163>;
+def C163_Z : R600Reg <"C163.Z", 163>;
+def C163_W : R600Reg <"C163.W", 163>;
+def C164_X : R600Reg <"C164.X", 164>;
+def C164_Y : R600Reg <"C164.Y", 164>;
+def C164_Z : R600Reg <"C164.Z", 164>;
+def C164_W : R600Reg <"C164.W", 164>;
+def C165_X : R600Reg <"C165.X", 165>;
+def C165_Y : R600Reg <"C165.Y", 165>;
+def C165_Z : R600Reg <"C165.Z", 165>;
+def C165_W : R600Reg <"C165.W", 165>;
+def C166_X : R600Reg <"C166.X", 166>;
+def C166_Y : R600Reg <"C166.Y", 166>;
+def C166_Z : R600Reg <"C166.Z", 166>;
+def C166_W : R600Reg <"C166.W", 166>;
+def C167_X : R600Reg <"C167.X", 167>;
+def C167_Y : R600Reg <"C167.Y", 167>;
+def C167_Z : R600Reg <"C167.Z", 167>;
+def C167_W : R600Reg <"C167.W", 167>;
+def C168_X : R600Reg <"C168.X", 168>;
+def C168_Y : R600Reg <"C168.Y", 168>;
+def C168_Z : R600Reg <"C168.Z", 168>;
+def C168_W : R600Reg <"C168.W", 168>;
+def C169_X : R600Reg <"C169.X", 169>;
+def C169_Y : R600Reg <"C169.Y", 169>;
+def C169_Z : R600Reg <"C169.Z", 169>;
+def C169_W : R600Reg <"C169.W", 169>;
+def C170_X : R600Reg <"C170.X", 170>;
+def C170_Y : R600Reg <"C170.Y", 170>;
+def C170_Z : R600Reg <"C170.Z", 170>;
+def C170_W : R600Reg <"C170.W", 170>;
+def C171_X : R600Reg <"C171.X", 171>;
+def C171_Y : R600Reg <"C171.Y", 171>;
+def C171_Z : R600Reg <"C171.Z", 171>;
+def C171_W : R600Reg <"C171.W", 171>;
+def C172_X : R600Reg <"C172.X", 172>;
+def C172_Y : R600Reg <"C172.Y", 172>;
+def C172_Z : R600Reg <"C172.Z", 172>;
+def C172_W : R600Reg <"C172.W", 172>;
+def C173_X : R600Reg <"C173.X", 173>;
+def C173_Y : R600Reg <"C173.Y", 173>;
+def C173_Z : R600Reg <"C173.Z", 173>;
+def C173_W : R600Reg <"C173.W", 173>;
+def C174_X : R600Reg <"C174.X", 174>;
+def C174_Y : R600Reg <"C174.Y", 174>;
+def C174_Z : R600Reg <"C174.Z", 174>;
+def C174_W : R600Reg <"C174.W", 174>;
+def C175_X : R600Reg <"C175.X", 175>;
+def C175_Y : R600Reg <"C175.Y", 175>;
+def C175_Z : R600Reg <"C175.Z", 175>;
+def C175_W : R600Reg <"C175.W", 175>;
+def C176_X : R600Reg <"C176.X", 176>;
+def C176_Y : R600Reg <"C176.Y", 176>;
+def C176_Z : R600Reg <"C176.Z", 176>;
+def C176_W : R600Reg <"C176.W", 176>;
+def C177_X : R600Reg <"C177.X", 177>;
+def C177_Y : R600Reg <"C177.Y", 177>;
+def C177_Z : R600Reg <"C177.Z", 177>;
+def C177_W : R600Reg <"C177.W", 177>;
+def C178_X : R600Reg <"C178.X", 178>;
+def C178_Y : R600Reg <"C178.Y", 178>;
+def C178_Z : R600Reg <"C178.Z", 178>;
+def C178_W : R600Reg <"C178.W", 178>;
+def C179_X : R600Reg <"C179.X", 179>;
+def C179_Y : R600Reg <"C179.Y", 179>;
+def C179_Z : R600Reg <"C179.Z", 179>;
+def C179_W : R600Reg <"C179.W", 179>;
+def C180_X : R600Reg <"C180.X", 180>;
+def C180_Y : R600Reg <"C180.Y", 180>;
+def C180_Z : R600Reg <"C180.Z", 180>;
+def C180_W : R600Reg <"C180.W", 180>;
+def C181_X : R600Reg <"C181.X", 181>;
+def C181_Y : R600Reg <"C181.Y", 181>;
+def C181_Z : R600Reg <"C181.Z", 181>;
+def C181_W : R600Reg <"C181.W", 181>;
+def C182_X : R600Reg <"C182.X", 182>;
+def C182_Y : R600Reg <"C182.Y", 182>;
+def C182_Z : R600Reg <"C182.Z", 182>;
+def C182_W : R600Reg <"C182.W", 182>;
+def C183_X : R600Reg <"C183.X", 183>;
+def C183_Y : R600Reg <"C183.Y", 183>;
+def C183_Z : R600Reg <"C183.Z", 183>;
+def C183_W : R600Reg <"C183.W", 183>;
+def C184_X : R600Reg <"C184.X", 184>;
+def C184_Y : R600Reg <"C184.Y", 184>;
+def C184_Z : R600Reg <"C184.Z", 184>;
+def C184_W : R600Reg <"C184.W", 184>;
+def C185_X : R600Reg <"C185.X", 185>;
+def C185_Y : R600Reg <"C185.Y", 185>;
+def C185_Z : R600Reg <"C185.Z", 185>;
+def C185_W : R600Reg <"C185.W", 185>;
+def C186_X : R600Reg <"C186.X", 186>;
+def C186_Y : R600Reg <"C186.Y", 186>;
+def C186_Z : R600Reg <"C186.Z", 186>;
+def C186_W : R600Reg <"C186.W", 186>;
+def C187_X : R600Reg <"C187.X", 187>;
+def C187_Y : R600Reg <"C187.Y", 187>;
+def C187_Z : R600Reg <"C187.Z", 187>;
+def C187_W : R600Reg <"C187.W", 187>;
+def C188_X : R600Reg <"C188.X", 188>;
+def C188_Y : R600Reg <"C188.Y", 188>;
+def C188_Z : R600Reg <"C188.Z", 188>;
+def C188_W : R600Reg <"C188.W", 188>;
+def C189_X : R600Reg <"C189.X", 189>;
+def C189_Y : R600Reg <"C189.Y", 189>;
+def C189_Z : R600Reg <"C189.Z", 189>;
+def C189_W : R600Reg <"C189.W", 189>;
+def C190_X : R600Reg <"C190.X", 190>;
+def C190_Y : R600Reg <"C190.Y", 190>;
+def C190_Z : R600Reg <"C190.Z", 190>;
+def C190_W : R600Reg <"C190.W", 190>;
+def C191_X : R600Reg <"C191.X", 191>;
+def C191_Y : R600Reg <"C191.Y", 191>;
+def C191_Z : R600Reg <"C191.Z", 191>;
+def C191_W : R600Reg <"C191.W", 191>;
+def C192_X : R600Reg <"C192.X", 192>;
+def C192_Y : R600Reg <"C192.Y", 192>;
+def C192_Z : R600Reg <"C192.Z", 192>;
+def C192_W : R600Reg <"C192.W", 192>;
+def C193_X : R600Reg <"C193.X", 193>;
+def C193_Y : R600Reg <"C193.Y", 193>;
+def C193_Z : R600Reg <"C193.Z", 193>;
+def C193_W : R600Reg <"C193.W", 193>;
+def C194_X : R600Reg <"C194.X", 194>;
+def C194_Y : R600Reg <"C194.Y", 194>;
+def C194_Z : R600Reg <"C194.Z", 194>;
+def C194_W : R600Reg <"C194.W", 194>;
+def C195_X : R600Reg <"C195.X", 195>;
+def C195_Y : R600Reg <"C195.Y", 195>;
+def C195_Z : R600Reg <"C195.Z", 195>;
+def C195_W : R600Reg <"C195.W", 195>;
+def C196_X : R600Reg <"C196.X", 196>;
+def C196_Y : R600Reg <"C196.Y", 196>;
+def C196_Z : R600Reg <"C196.Z", 196>;
+def C196_W : R600Reg <"C196.W", 196>;
+def C197_X : R600Reg <"C197.X", 197>;
+def C197_Y : R600Reg <"C197.Y", 197>;
+def C197_Z : R600Reg <"C197.Z", 197>;
+def C197_W : R600Reg <"C197.W", 197>;
+def C198_X : R600Reg <"C198.X", 198>;
+def C198_Y : R600Reg <"C198.Y", 198>;
+def C198_Z : R600Reg <"C198.Z", 198>;
+def C198_W : R600Reg <"C198.W", 198>;
+def C199_X : R600Reg <"C199.X", 199>;
+def C199_Y : R600Reg <"C199.Y", 199>;
+def C199_Z : R600Reg <"C199.Z", 199>;
+def C199_W : R600Reg <"C199.W", 199>;
+def C200_X : R600Reg <"C200.X", 200>;
+def C200_Y : R600Reg <"C200.Y", 200>;
+def C200_Z : R600Reg <"C200.Z", 200>;
+def C200_W : R600Reg <"C200.W", 200>;
+def C201_X : R600Reg <"C201.X", 201>;
+def C201_Y : R600Reg <"C201.Y", 201>;
+def C201_Z : R600Reg <"C201.Z", 201>;
+def C201_W : R600Reg <"C201.W", 201>;
+def C202_X : R600Reg <"C202.X", 202>;
+def C202_Y : R600Reg <"C202.Y", 202>;
+def C202_Z : R600Reg <"C202.Z", 202>;
+def C202_W : R600Reg <"C202.W", 202>;
+def C203_X : R600Reg <"C203.X", 203>;
+def C203_Y : R600Reg <"C203.Y", 203>;
+def C203_Z : R600Reg <"C203.Z", 203>;
+def C203_W : R600Reg <"C203.W", 203>;
+def C204_X : R600Reg <"C204.X", 204>;
+def C204_Y : R600Reg <"C204.Y", 204>;
+def C204_Z : R600Reg <"C204.Z", 204>;
+def C204_W : R600Reg <"C204.W", 204>;
+def C205_X : R600Reg <"C205.X", 205>;
+def C205_Y : R600Reg <"C205.Y", 205>;
+def C205_Z : R600Reg <"C205.Z", 205>;
+def C205_W : R600Reg <"C205.W", 205>;
+def C206_X : R600Reg <"C206.X", 206>;
+def C206_Y : R600Reg <"C206.Y", 206>;
+def C206_Z : R600Reg <"C206.Z", 206>;
+def C206_W : R600Reg <"C206.W", 206>;
+def C207_X : R600Reg <"C207.X", 207>;
+def C207_Y : R600Reg <"C207.Y", 207>;
+def C207_Z : R600Reg <"C207.Z", 207>;
+def C207_W : R600Reg <"C207.W", 207>;
+def C208_X : R600Reg <"C208.X", 208>;
+def C208_Y : R600Reg <"C208.Y", 208>;
+def C208_Z : R600Reg <"C208.Z", 208>;
+def C208_W : R600Reg <"C208.W", 208>;
+def C209_X : R600Reg <"C209.X", 209>;
+def C209_Y : R600Reg <"C209.Y", 209>;
+def C209_Z : R600Reg <"C209.Z", 209>;
+def C209_W : R600Reg <"C209.W", 209>;
+def C210_X : R600Reg <"C210.X", 210>;
+def C210_Y : R600Reg <"C210.Y", 210>;
+def C210_Z : R600Reg <"C210.Z", 210>;
+def C210_W : R600Reg <"C210.W", 210>;
+def C211_X : R600Reg <"C211.X", 211>;
+def C211_Y : R600Reg <"C211.Y", 211>;
+def C211_Z : R600Reg <"C211.Z", 211>;
+def C211_W : R600Reg <"C211.W", 211>;
+def C212_X : R600Reg <"C212.X", 212>;
+def C212_Y : R600Reg <"C212.Y", 212>;
+def C212_Z : R600Reg <"C212.Z", 212>;
+def C212_W : R600Reg <"C212.W", 212>;
+def C213_X : R600Reg <"C213.X", 213>;
+def C213_Y : R600Reg <"C213.Y", 213>;
+def C213_Z : R600Reg <"C213.Z", 213>;
+def C213_W : R600Reg <"C213.W", 213>;
+def C214_X : R600Reg <"C214.X", 214>;
+def C214_Y : R600Reg <"C214.Y", 214>;
+def C214_Z : R600Reg <"C214.Z", 214>;
+def C214_W : R600Reg <"C214.W", 214>;
+def C215_X : R600Reg <"C215.X", 215>;
+def C215_Y : R600Reg <"C215.Y", 215>;
+def C215_Z : R600Reg <"C215.Z", 215>;
+def C215_W : R600Reg <"C215.W", 215>;
+def C216_X : R600Reg <"C216.X", 216>;
+def C216_Y : R600Reg <"C216.Y", 216>;
+def C216_Z : R600Reg <"C216.Z", 216>;
+def C216_W : R600Reg <"C216.W", 216>;
+def C217_X : R600Reg <"C217.X", 217>;
+def C217_Y : R600Reg <"C217.Y", 217>;
+def C217_Z : R600Reg <"C217.Z", 217>;
+def C217_W : R600Reg <"C217.W", 217>;
+def C218_X : R600Reg <"C218.X", 218>;
+def C218_Y : R600Reg <"C218.Y", 218>;
+def C218_Z : R600Reg <"C218.Z", 218>;
+def C218_W : R600Reg <"C218.W", 218>;
+def C219_X : R600Reg <"C219.X", 219>;
+def C219_Y : R600Reg <"C219.Y", 219>;
+def C219_Z : R600Reg <"C219.Z", 219>;
+def C219_W : R600Reg <"C219.W", 219>;
+def C220_X : R600Reg <"C220.X", 220>;
+def C220_Y : R600Reg <"C220.Y", 220>;
+def C220_Z : R600Reg <"C220.Z", 220>;
+def C220_W : R600Reg <"C220.W", 220>;
+def C221_X : R600Reg <"C221.X", 221>;
+def C221_Y : R600Reg <"C221.Y", 221>;
+def C221_Z : R600Reg <"C221.Z", 221>;
+def C221_W : R600Reg <"C221.W", 221>;
+def C222_X : R600Reg <"C222.X", 222>;
+def C222_Y : R600Reg <"C222.Y", 222>;
+def C222_Z : R600Reg <"C222.Z", 222>;
+def C222_W : R600Reg <"C222.W", 222>;
+def C223_X : R600Reg <"C223.X", 223>;
+def C223_Y : R600Reg <"C223.Y", 223>;
+def C223_Z : R600Reg <"C223.Z", 223>;
+def C223_W : R600Reg <"C223.W", 223>;
+def C224_X : R600Reg <"C224.X", 224>;
+def C224_Y : R600Reg <"C224.Y", 224>;
+def C224_Z : R600Reg <"C224.Z", 224>;
+def C224_W : R600Reg <"C224.W", 224>;
+def C225_X : R600Reg <"C225.X", 225>;
+def C225_Y : R600Reg <"C225.Y", 225>;
+def C225_Z : R600Reg <"C225.Z", 225>;
+def C225_W : R600Reg <"C225.W", 225>;
+def C226_X : R600Reg <"C226.X", 226>;
+def C226_Y : R600Reg <"C226.Y", 226>;
+def C226_Z : R600Reg <"C226.Z", 226>;
+def C226_W : R600Reg <"C226.W", 226>;
+def C227_X : R600Reg <"C227.X", 227>;
+def C227_Y : R600Reg <"C227.Y", 227>;
+def C227_Z : R600Reg <"C227.Z", 227>;
+def C227_W : R600Reg <"C227.W", 227>;
+def C228_X : R600Reg <"C228.X", 228>;
+def C228_Y : R600Reg <"C228.Y", 228>;
+def C228_Z : R600Reg <"C228.Z", 228>;
+def C228_W : R600Reg <"C228.W", 228>;
+def C229_X : R600Reg <"C229.X", 229>;
+def C229_Y : R600Reg <"C229.Y", 229>;
+def C229_Z : R600Reg <"C229.Z", 229>;
+def C229_W : R600Reg <"C229.W", 229>;
+def C230_X : R600Reg <"C230.X", 230>;
+def C230_Y : R600Reg <"C230.Y", 230>;
+def C230_Z : R600Reg <"C230.Z", 230>;
+def C230_W : R600Reg <"C230.W", 230>;
+def C231_X : R600Reg <"C231.X", 231>;
+def C231_Y : R600Reg <"C231.Y", 231>;
+def C231_Z : R600Reg <"C231.Z", 231>;
+def C231_W : R600Reg <"C231.W", 231>;
+def C232_X : R600Reg <"C232.X", 232>;
+def C232_Y : R600Reg <"C232.Y", 232>;
+def C232_Z : R600Reg <"C232.Z", 232>;
+def C232_W : R600Reg <"C232.W", 232>;
+def C233_X : R600Reg <"C233.X", 233>;
+def C233_Y : R600Reg <"C233.Y", 233>;
+def C233_Z : R600Reg <"C233.Z", 233>;
+def C233_W : R600Reg <"C233.W", 233>;
+def C234_X : R600Reg <"C234.X", 234>;
+def C234_Y : R600Reg <"C234.Y", 234>;
+def C234_Z : R600Reg <"C234.Z", 234>;
+def C234_W : R600Reg <"C234.W", 234>;
+def C235_X : R600Reg <"C235.X", 235>;
+def C235_Y : R600Reg <"C235.Y", 235>;
+def C235_Z : R600Reg <"C235.Z", 235>;
+def C235_W : R600Reg <"C235.W", 235>;
+def C236_X : R600Reg <"C236.X", 236>;
+def C236_Y : R600Reg <"C236.Y", 236>;
+def C236_Z : R600Reg <"C236.Z", 236>;
+def C236_W : R600Reg <"C236.W", 236>;
+def C237_X : R600Reg <"C237.X", 237>;
+def C237_Y : R600Reg <"C237.Y", 237>;
+def C237_Z : R600Reg <"C237.Z", 237>;
+def C237_W : R600Reg <"C237.W", 237>;
+def C238_X : R600Reg <"C238.X", 238>;
+def C238_Y : R600Reg <"C238.Y", 238>;
+def C238_Z : R600Reg <"C238.Z", 238>;
+def C238_W : R600Reg <"C238.W", 238>;
+def C239_X : R600Reg <"C239.X", 239>;
+def C239_Y : R600Reg <"C239.Y", 239>;
+def C239_Z : R600Reg <"C239.Z", 239>;
+def C239_W : R600Reg <"C239.W", 239>;
+def C240_X : R600Reg <"C240.X", 240>;
+def C240_Y : R600Reg <"C240.Y", 240>;
+def C240_Z : R600Reg <"C240.Z", 240>;
+def C240_W : R600Reg <"C240.W", 240>;
+def C241_X : R600Reg <"C241.X", 241>;
+def C241_Y : R600Reg <"C241.Y", 241>;
+def C241_Z : R600Reg <"C241.Z", 241>;
+def C241_W : R600Reg <"C241.W", 241>;
+def C242_X : R600Reg <"C242.X", 242>;
+def C242_Y : R600Reg <"C242.Y", 242>;
+def C242_Z : R600Reg <"C242.Z", 242>;
+def C242_W : R600Reg <"C242.W", 242>;
+def C243_X : R600Reg <"C243.X", 243>;
+def C243_Y : R600Reg <"C243.Y", 243>;
+def C243_Z : R600Reg <"C243.Z", 243>;
+def C243_W : R600Reg <"C243.W", 243>;
+def C244_X : R600Reg <"C244.X", 244>;
+def C244_Y : R600Reg <"C244.Y", 244>;
+def C244_Z : R600Reg <"C244.Z", 244>;
+def C244_W : R600Reg <"C244.W", 244>;
+def C245_X : R600Reg <"C245.X", 245>;
+def C245_Y : R600Reg <"C245.Y", 245>;
+def C245_Z : R600Reg <"C245.Z", 245>;
+def C245_W : R600Reg <"C245.W", 245>;
+def C246_X : R600Reg <"C246.X", 246>;
+def C246_Y : R600Reg <"C246.Y", 246>;
+def C246_Z : R600Reg <"C246.Z", 246>;
+def C246_W : R600Reg <"C246.W", 246>;
+def C247_X : R600Reg <"C247.X", 247>;
+def C247_Y : R600Reg <"C247.Y", 247>;
+def C247_Z : R600Reg <"C247.Z", 247>;
+def C247_W : R600Reg <"C247.W", 247>;
+def C248_X : R600Reg <"C248.X", 248>;
+def C248_Y : R600Reg <"C248.Y", 248>;
+def C248_Z : R600Reg <"C248.Z", 248>;
+def C248_W : R600Reg <"C248.W", 248>;
+def C249_X : R600Reg <"C249.X", 249>;
+def C249_Y : R600Reg <"C249.Y", 249>;
+def C249_Z : R600Reg <"C249.Z", 249>;
+def C249_W : R600Reg <"C249.W", 249>;
+def C250_X : R600Reg <"C250.X", 250>;
+def C250_Y : R600Reg <"C250.Y", 250>;
+def C250_Z : R600Reg <"C250.Z", 250>;
+def C250_W : R600Reg <"C250.W", 250>;
+def C251_X : R600Reg <"C251.X", 251>;
+def C251_Y : R600Reg <"C251.Y", 251>;
+def C251_Z : R600Reg <"C251.Z", 251>;
+def C251_W : R600Reg <"C251.W", 251>;
+def C252_X : R600Reg <"C252.X", 252>;
+def C252_Y : R600Reg <"C252.Y", 252>;
+def C252_Z : R600Reg <"C252.Z", 252>;
+def C252_W : R600Reg <"C252.W", 252>;
+def C253_X : R600Reg <"C253.X", 253>;
+def C253_Y : R600Reg <"C253.Y", 253>;
+def C253_Z : R600Reg <"C253.Z", 253>;
+def C253_W : R600Reg <"C253.W", 253>;
+def C254_X : R600Reg <"C254.X", 254>;
+def C254_Y : R600Reg <"C254.Y", 254>;
+def C254_Z : R600Reg <"C254.Z", 254>;
+def C254_W : R600Reg <"C254.W", 254>;
+def C255_X : R600Reg <"C255.X", 255>;
+def C255_Y : R600Reg <"C255.Y", 255>;
+def C255_Z : R600Reg <"C255.Z", 255>;
+def C255_W : R600Reg <"C255.W", 255>;
+def C256_X : R600Reg <"C256.X", 256>;
+def C256_Y : R600Reg <"C256.Y", 256>;
+def C256_Z : R600Reg <"C256.Z", 256>;
+def C256_W : R600Reg <"C256.W", 256>;
+def C257_X : R600Reg <"C257.X", 257>;
+def C257_Y : R600Reg <"C257.Y", 257>;
+def C257_Z : R600Reg <"C257.Z", 257>;
+def C257_W : R600Reg <"C257.W", 257>;
+def C258_X : R600Reg <"C258.X", 258>;
+def C258_Y : R600Reg <"C258.Y", 258>;
+def C258_Z : R600Reg <"C258.Z", 258>;
+def C258_W : R600Reg <"C258.W", 258>;
+def C259_X : R600Reg <"C259.X", 259>;
+def C259_Y : R600Reg <"C259.Y", 259>;
+def C259_Z : R600Reg <"C259.Z", 259>;
+def C259_W : R600Reg <"C259.W", 259>;
+def C260_X : R600Reg <"C260.X", 260>;
+def C260_Y : R600Reg <"C260.Y", 260>;
+def C260_Z : R600Reg <"C260.Z", 260>;
+def C260_W : R600Reg <"C260.W", 260>;
+def C261_X : R600Reg <"C261.X", 261>;
+def C261_Y : R600Reg <"C261.Y", 261>;
+def C261_Z : R600Reg <"C261.Z", 261>;
+def C261_W : R600Reg <"C261.W", 261>;
+def C262_X : R600Reg <"C262.X", 262>;
+def C262_Y : R600Reg <"C262.Y", 262>;
+def C262_Z : R600Reg <"C262.Z", 262>;
+def C262_W : R600Reg <"C262.W", 262>;
+def C263_X : R600Reg <"C263.X", 263>;
+def C263_Y : R600Reg <"C263.Y", 263>;
+def C263_Z : R600Reg <"C263.Z", 263>;
+def C263_W : R600Reg <"C263.W", 263>;
+def C264_X : R600Reg <"C264.X", 264>;
+def C264_Y : R600Reg <"C264.Y", 264>;
+def C264_Z : R600Reg <"C264.Z", 264>;
+def C264_W : R600Reg <"C264.W", 264>;
+def C265_X : R600Reg <"C265.X", 265>;
+def C265_Y : R600Reg <"C265.Y", 265>;
+def C265_Z : R600Reg <"C265.Z", 265>;
+def C265_W : R600Reg <"C265.W", 265>;
+def C266_X : R600Reg <"C266.X", 266>;
+def C266_Y : R600Reg <"C266.Y", 266>;
+def C266_Z : R600Reg <"C266.Z", 266>;
+def C266_W : R600Reg <"C266.W", 266>;
+def C267_X : R600Reg <"C267.X", 267>;
+def C267_Y : R600Reg <"C267.Y", 267>;
+def C267_Z : R600Reg <"C267.Z", 267>;
+def C267_W : R600Reg <"C267.W", 267>;
+def C268_X : R600Reg <"C268.X", 268>;
+def C268_Y : R600Reg <"C268.Y", 268>;
+def C268_Z : R600Reg <"C268.Z", 268>;
+def C268_W : R600Reg <"C268.W", 268>;
+def C269_X : R600Reg <"C269.X", 269>;
+def C269_Y : R600Reg <"C269.Y", 269>;
+def C269_Z : R600Reg <"C269.Z", 269>;
+def C269_W : R600Reg <"C269.W", 269>;
+def C270_X : R600Reg <"C270.X", 270>;
+def C270_Y : R600Reg <"C270.Y", 270>;
+def C270_Z : R600Reg <"C270.Z", 270>;
+def C270_W : R600Reg <"C270.W", 270>;
+def C271_X : R600Reg <"C271.X", 271>;
+def C271_Y : R600Reg <"C271.Y", 271>;
+def C271_Z : R600Reg <"C271.Z", 271>;
+def C271_W : R600Reg <"C271.W", 271>;
+def C272_X : R600Reg <"C272.X", 272>;
+def C272_Y : R600Reg <"C272.Y", 272>;
+def C272_Z : R600Reg <"C272.Z", 272>;
+def C272_W : R600Reg <"C272.W", 272>;
+def C273_X : R600Reg <"C273.X", 273>;
+def C273_Y : R600Reg <"C273.Y", 273>;
+def C273_Z : R600Reg <"C273.Z", 273>;
+def C273_W : R600Reg <"C273.W", 273>;
+def C274_X : R600Reg <"C274.X", 274>;
+def C274_Y : R600Reg <"C274.Y", 274>;
+def C274_Z : R600Reg <"C274.Z", 274>;
+def C274_W : R600Reg <"C274.W", 274>;
+def C275_X : R600Reg <"C275.X", 275>;
+def C275_Y : R600Reg <"C275.Y", 275>;
+def C275_Z : R600Reg <"C275.Z", 275>;
+def C275_W : R600Reg <"C275.W", 275>;
+def C276_X : R600Reg <"C276.X", 276>;
+def C276_Y : R600Reg <"C276.Y", 276>;
+def C276_Z : R600Reg <"C276.Z", 276>;
+def C276_W : R600Reg <"C276.W", 276>;
+def C277_X : R600Reg <"C277.X", 277>;
+def C277_Y : R600Reg <"C277.Y", 277>;
+def C277_Z : R600Reg <"C277.Z", 277>;
+def C277_W : R600Reg <"C277.W", 277>;
+def C278_X : R600Reg <"C278.X", 278>;
+def C278_Y : R600Reg <"C278.Y", 278>;
+def C278_Z : R600Reg <"C278.Z", 278>;
+def C278_W : R600Reg <"C278.W", 278>;
+def C279_X : R600Reg <"C279.X", 279>;
+def C279_Y : R600Reg <"C279.Y", 279>;
+def C279_Z : R600Reg <"C279.Z", 279>;
+def C279_W : R600Reg <"C279.W", 279>;
+def C280_X : R600Reg <"C280.X", 280>;
+def C280_Y : R600Reg <"C280.Y", 280>;
+def C280_Z : R600Reg <"C280.Z", 280>;
+def C280_W : R600Reg <"C280.W", 280>;
+def C281_X : R600Reg <"C281.X", 281>;
+def C281_Y : R600Reg <"C281.Y", 281>;
+def C281_Z : R600Reg <"C281.Z", 281>;
+def C281_W : R600Reg <"C281.W", 281>;
+def C282_X : R600Reg <"C282.X", 282>;
+def C282_Y : R600Reg <"C282.Y", 282>;
+def C282_Z : R600Reg <"C282.Z", 282>;
+def C282_W : R600Reg <"C282.W", 282>;
+def C283_X : R600Reg <"C283.X", 283>;
+def C283_Y : R600Reg <"C283.Y", 283>;
+def C283_Z : R600Reg <"C283.Z", 283>;
+def C283_W : R600Reg <"C283.W", 283>;
+def C284_X : R600Reg <"C284.X", 284>;
+def C284_Y : R600Reg <"C284.Y", 284>;
+def C284_Z : R600Reg <"C284.Z", 284>;
+def C284_W : R600Reg <"C284.W", 284>;
+def C285_X : R600Reg <"C285.X", 285>;
+def C285_Y : R600Reg <"C285.Y", 285>;
+def C285_Z : R600Reg <"C285.Z", 285>;
+def C285_W : R600Reg <"C285.W", 285>;
+def C286_X : R600Reg <"C286.X", 286>;
+def C286_Y : R600Reg <"C286.Y", 286>;
+def C286_Z : R600Reg <"C286.Z", 286>;
+def C286_W : R600Reg <"C286.W", 286>;
+def C287_X : R600Reg <"C287.X", 287>;
+def C287_Y : R600Reg <"C287.Y", 287>;
+def C287_Z : R600Reg <"C287.Z", 287>;
+def C287_W : R600Reg <"C287.W", 287>;
+def C288_X : R600Reg <"C288.X", 288>;
+def C288_Y : R600Reg <"C288.Y", 288>;
+def C288_Z : R600Reg <"C288.Z", 288>;
+def C288_W : R600Reg <"C288.W", 288>;
+def C289_X : R600Reg <"C289.X", 289>;
+def C289_Y : R600Reg <"C289.Y", 289>;
+def C289_Z : R600Reg <"C289.Z", 289>;
+def C289_W : R600Reg <"C289.W", 289>;
+def C290_X : R600Reg <"C290.X", 290>;
+def C290_Y : R600Reg <"C290.Y", 290>;
+def C290_Z : R600Reg <"C290.Z", 290>;
+def C290_W : R600Reg <"C290.W", 290>;
+def C291_X : R600Reg <"C291.X", 291>;
+def C291_Y : R600Reg <"C291.Y", 291>;
+def C291_Z : R600Reg <"C291.Z", 291>;
+def C291_W : R600Reg <"C291.W", 291>;
+def C292_X : R600Reg <"C292.X", 292>;
+def C292_Y : R600Reg <"C292.Y", 292>;
+def C292_Z : R600Reg <"C292.Z", 292>;
+def C292_W : R600Reg <"C292.W", 292>;
+def C293_X : R600Reg <"C293.X", 293>;
+def C293_Y : R600Reg <"C293.Y", 293>;
+def C293_Z : R600Reg <"C293.Z", 293>;
+def C293_W : R600Reg <"C293.W", 293>;
+def C294_X : R600Reg <"C294.X", 294>;
+def C294_Y : R600Reg <"C294.Y", 294>;
+def C294_Z : R600Reg <"C294.Z", 294>;
+def C294_W : R600Reg <"C294.W", 294>;
+def C295_X : R600Reg <"C295.X", 295>;
+def C295_Y : R600Reg <"C295.Y", 295>;
+def C295_Z : R600Reg <"C295.Z", 295>;
+def C295_W : R600Reg <"C295.W", 295>;
+def C296_X : R600Reg <"C296.X", 296>;
+def C296_Y : R600Reg <"C296.Y", 296>;
+def C296_Z : R600Reg <"C296.Z", 296>;
+def C296_W : R600Reg <"C296.W", 296>;
+def C297_X : R600Reg <"C297.X", 297>;
+def C297_Y : R600Reg <"C297.Y", 297>;
+def C297_Z : R600Reg <"C297.Z", 297>;
+def C297_W : R600Reg <"C297.W", 297>;
+def C298_X : R600Reg <"C298.X", 298>;
+def C298_Y : R600Reg <"C298.Y", 298>;
+def C298_Z : R600Reg <"C298.Z", 298>;
+def C298_W : R600Reg <"C298.W", 298>;
+def C299_X : R600Reg <"C299.X", 299>;
+def C299_Y : R600Reg <"C299.Y", 299>;
+def C299_Z : R600Reg <"C299.Z", 299>;
+def C299_W : R600Reg <"C299.W", 299>;
+def C300_X : R600Reg <"C300.X", 300>;
+def C300_Y : R600Reg <"C300.Y", 300>;
+def C300_Z : R600Reg <"C300.Z", 300>;
+def C300_W : R600Reg <"C300.W", 300>;
+def C301_X : R600Reg <"C301.X", 301>;
+def C301_Y : R600Reg <"C301.Y", 301>;
+def C301_Z : R600Reg <"C301.Z", 301>;
+def C301_W : R600Reg <"C301.W", 301>;
+def C302_X : R600Reg <"C302.X", 302>;
+def C302_Y : R600Reg <"C302.Y", 302>;
+def C302_Z : R600Reg <"C302.Z", 302>;
+def C302_W : R600Reg <"C302.W", 302>;
+def C303_X : R600Reg <"C303.X", 303>;
+def C303_Y : R600Reg <"C303.Y", 303>;
+def C303_Z : R600Reg <"C303.Z", 303>;
+def C303_W : R600Reg <"C303.W", 303>;
+def C304_X : R600Reg <"C304.X", 304>;
+def C304_Y : R600Reg <"C304.Y", 304>;
+def C304_Z : R600Reg <"C304.Z", 304>;
+def C304_W : R600Reg <"C304.W", 304>;
+def C305_X : R600Reg <"C305.X", 305>;
+def C305_Y : R600Reg <"C305.Y", 305>;
+def C305_Z : R600Reg <"C305.Z", 305>;
+def C305_W : R600Reg <"C305.W", 305>;
+def C306_X : R600Reg <"C306.X", 306>;
+def C306_Y : R600Reg <"C306.Y", 306>;
+def C306_Z : R600Reg <"C306.Z", 306>;
+def C306_W : R600Reg <"C306.W", 306>;
+def C307_X : R600Reg <"C307.X", 307>;
+def C307_Y : R600Reg <"C307.Y", 307>;
+def C307_Z : R600Reg <"C307.Z", 307>;
+def C307_W : R600Reg <"C307.W", 307>;
+def C308_X : R600Reg <"C308.X", 308>;
+def C308_Y : R600Reg <"C308.Y", 308>;
+def C308_Z : R600Reg <"C308.Z", 308>;
+def C308_W : R600Reg <"C308.W", 308>;
+def C309_X : R600Reg <"C309.X", 309>;
+def C309_Y : R600Reg <"C309.Y", 309>;
+def C309_Z : R600Reg <"C309.Z", 309>;
+def C309_W : R600Reg <"C309.W", 309>;
+def C310_X : R600Reg <"C310.X", 310>;
+def C310_Y : R600Reg <"C310.Y", 310>;
+def C310_Z : R600Reg <"C310.Z", 310>;
+def C310_W : R600Reg <"C310.W", 310>;
+def C311_X : R600Reg <"C311.X", 311>;
+def C311_Y : R600Reg <"C311.Y", 311>;
+def C311_Z : R600Reg <"C311.Z", 311>;
+def C311_W : R600Reg <"C311.W", 311>;
+def C312_X : R600Reg <"C312.X", 312>;
+def C312_Y : R600Reg <"C312.Y", 312>;
+def C312_Z : R600Reg <"C312.Z", 312>;
+def C312_W : R600Reg <"C312.W", 312>;
+def C313_X : R600Reg <"C313.X", 313>;
+def C313_Y : R600Reg <"C313.Y", 313>;
+def C313_Z : R600Reg <"C313.Z", 313>;
+def C313_W : R600Reg <"C313.W", 313>;
+def C314_X : R600Reg <"C314.X", 314>;
+def C314_Y : R600Reg <"C314.Y", 314>;
+def C314_Z : R600Reg <"C314.Z", 314>;
+def C314_W : R600Reg <"C314.W", 314>;
+def C315_X : R600Reg <"C315.X", 315>;
+def C315_Y : R600Reg <"C315.Y", 315>;
+def C315_Z : R600Reg <"C315.Z", 315>;
+def C315_W : R600Reg <"C315.W", 315>;
+def C316_X : R600Reg <"C316.X", 316>;
+def C316_Y : R600Reg <"C316.Y", 316>;
+def C316_Z : R600Reg <"C316.Z", 316>;
+def C316_W : R600Reg <"C316.W", 316>;
+def C317_X : R600Reg <"C317.X", 317>;
+def C317_Y : R600Reg <"C317.Y", 317>;
+def C317_Z : R600Reg <"C317.Z", 317>;
+def C317_W : R600Reg <"C317.W", 317>;
+def C318_X : R600Reg <"C318.X", 318>;
+def C318_Y : R600Reg <"C318.Y", 318>;
+def C318_Z : R600Reg <"C318.Z", 318>;
+def C318_W : R600Reg <"C318.W", 318>;
+def C319_X : R600Reg <"C319.X", 319>;
+def C319_Y : R600Reg <"C319.Y", 319>;
+def C319_Z : R600Reg <"C319.Z", 319>;
+def C319_W : R600Reg <"C319.W", 319>;
+def C320_X : R600Reg <"C320.X", 320>;
+def C320_Y : R600Reg <"C320.Y", 320>;
+def C320_Z : R600Reg <"C320.Z", 320>;
+def C320_W : R600Reg <"C320.W", 320>;
+def C321_X : R600Reg <"C321.X", 321>;
+def C321_Y : R600Reg <"C321.Y", 321>;
+def C321_Z : R600Reg <"C321.Z", 321>;
+def C321_W : R600Reg <"C321.W", 321>;
+def C322_X : R600Reg <"C322.X", 322>;
+def C322_Y : R600Reg <"C322.Y", 322>;
+def C322_Z : R600Reg <"C322.Z", 322>;
+def C322_W : R600Reg <"C322.W", 322>;
+def C323_X : R600Reg <"C323.X", 323>;
+def C323_Y : R600Reg <"C323.Y", 323>;
+def C323_Z : R600Reg <"C323.Z", 323>;
+def C323_W : R600Reg <"C323.W", 323>;
+def C324_X : R600Reg <"C324.X", 324>;
+def C324_Y : R600Reg <"C324.Y", 324>;
+def C324_Z : R600Reg <"C324.Z", 324>;
+def C324_W : R600Reg <"C324.W", 324>;
+def C325_X : R600Reg <"C325.X", 325>;
+def C325_Y : R600Reg <"C325.Y", 325>;
+def C325_Z : R600Reg <"C325.Z", 325>;
+def C325_W : R600Reg <"C325.W", 325>;
+def C326_X : R600Reg <"C326.X", 326>;
+def C326_Y : R600Reg <"C326.Y", 326>;
+def C326_Z : R600Reg <"C326.Z", 326>;
+def C326_W : R600Reg <"C326.W", 326>;
+def C327_X : R600Reg <"C327.X", 327>;
+def C327_Y : R600Reg <"C327.Y", 327>;
+def C327_Z : R600Reg <"C327.Z", 327>;
+def C327_W : R600Reg <"C327.W", 327>;
+def C328_X : R600Reg <"C328.X", 328>;
+def C328_Y : R600Reg <"C328.Y", 328>;
+def C328_Z : R600Reg <"C328.Z", 328>;
+def C328_W : R600Reg <"C328.W", 328>;
+def C329_X : R600Reg <"C329.X", 329>;
+def C329_Y : R600Reg <"C329.Y", 329>;
+def C329_Z : R600Reg <"C329.Z", 329>;
+def C329_W : R600Reg <"C329.W", 329>;
+def C330_X : R600Reg <"C330.X", 330>;
+def C330_Y : R600Reg <"C330.Y", 330>;
+def C330_Z : R600Reg <"C330.Z", 330>;
+def C330_W : R600Reg <"C330.W", 330>;
+def C331_X : R600Reg <"C331.X", 331>;
+def C331_Y : R600Reg <"C331.Y", 331>;
+def C331_Z : R600Reg <"C331.Z", 331>;
+def C331_W : R600Reg <"C331.W", 331>;
+def C332_X : R600Reg <"C332.X", 332>;
+def C332_Y : R600Reg <"C332.Y", 332>;
+def C332_Z : R600Reg <"C332.Z", 332>;
+def C332_W : R600Reg <"C332.W", 332>;
+def C333_X : R600Reg <"C333.X", 333>;
+def C333_Y : R600Reg <"C333.Y", 333>;
+def C333_Z : R600Reg <"C333.Z", 333>;
+def C333_W : R600Reg <"C333.W", 333>;
+def C334_X : R600Reg <"C334.X", 334>;
+def C334_Y : R600Reg <"C334.Y", 334>;
+def C334_Z : R600Reg <"C334.Z", 334>;
+def C334_W : R600Reg <"C334.W", 334>;
+def C335_X : R600Reg <"C335.X", 335>;
+def C335_Y : R600Reg <"C335.Y", 335>;
+def C335_Z : R600Reg <"C335.Z", 335>;
+def C335_W : R600Reg <"C335.W", 335>;
+def C336_X : R600Reg <"C336.X", 336>;
+def C336_Y : R600Reg <"C336.Y", 336>;
+def C336_Z : R600Reg <"C336.Z", 336>;
+def C336_W : R600Reg <"C336.W", 336>;
+def C337_X : R600Reg <"C337.X", 337>;
+def C337_Y : R600Reg <"C337.Y", 337>;
+def C337_Z : R600Reg <"C337.Z", 337>;
+def C337_W : R600Reg <"C337.W", 337>;
+def C338_X : R600Reg <"C338.X", 338>;
+def C338_Y : R600Reg <"C338.Y", 338>;
+def C338_Z : R600Reg <"C338.Z", 338>;
+def C338_W : R600Reg <"C338.W", 338>;
+def C339_X : R600Reg <"C339.X", 339>;
+def C339_Y : R600Reg <"C339.Y", 339>;
+def C339_Z : R600Reg <"C339.Z", 339>;
+def C339_W : R600Reg <"C339.W", 339>;
+def C340_X : R600Reg <"C340.X", 340>;
+def C340_Y : R600Reg <"C340.Y", 340>;
+def C340_Z : R600Reg <"C340.Z", 340>;
+def C340_W : R600Reg <"C340.W", 340>;
+def C341_X : R600Reg <"C341.X", 341>;
+def C341_Y : R600Reg <"C341.Y", 341>;
+def C341_Z : R600Reg <"C341.Z", 341>;
+def C341_W : R600Reg <"C341.W", 341>;
+def C342_X : R600Reg <"C342.X", 342>;
+def C342_Y : R600Reg <"C342.Y", 342>;
+def C342_Z : R600Reg <"C342.Z", 342>;
+def C342_W : R600Reg <"C342.W", 342>;
+def C343_X : R600Reg <"C343.X", 343>;
+def C343_Y : R600Reg <"C343.Y", 343>;
+def C343_Z : R600Reg <"C343.Z", 343>;
+def C343_W : R600Reg <"C343.W", 343>;
+def C344_X : R600Reg <"C344.X", 344>;
+def C344_Y : R600Reg <"C344.Y", 344>;
+def C344_Z : R600Reg <"C344.Z", 344>;
+def C344_W : R600Reg <"C344.W", 344>;
+def C345_X : R600Reg <"C345.X", 345>;
+def C345_Y : R600Reg <"C345.Y", 345>;
+def C345_Z : R600Reg <"C345.Z", 345>;
+def C345_W : R600Reg <"C345.W", 345>;
+def C346_X : R600Reg <"C346.X", 346>;
+def C346_Y : R600Reg <"C346.Y", 346>;
+def C346_Z : R600Reg <"C346.Z", 346>;
+def C346_W : R600Reg <"C346.W", 346>;
+def C347_X : R600Reg <"C347.X", 347>;
+def C347_Y : R600Reg <"C347.Y", 347>;
+def C347_Z : R600Reg <"C347.Z", 347>;
+def C347_W : R600Reg <"C347.W", 347>;
+def C348_X : R600Reg <"C348.X", 348>;
+def C348_Y : R600Reg <"C348.Y", 348>;
+def C348_Z : R600Reg <"C348.Z", 348>;
+def C348_W : R600Reg <"C348.W", 348>;
+def C349_X : R600Reg <"C349.X", 349>;
+def C349_Y : R600Reg <"C349.Y", 349>;
+def C349_Z : R600Reg <"C349.Z", 349>;
+def C349_W : R600Reg <"C349.W", 349>;
+def C350_X : R600Reg <"C350.X", 350>;
+def C350_Y : R600Reg <"C350.Y", 350>;
+def C350_Z : R600Reg <"C350.Z", 350>;
+def C350_W : R600Reg <"C350.W", 350>;
+def C351_X : R600Reg <"C351.X", 351>;
+def C351_Y : R600Reg <"C351.Y", 351>;
+def C351_Z : R600Reg <"C351.Z", 351>;
+def C351_W : R600Reg <"C351.W", 351>;
+def C352_X : R600Reg <"C352.X", 352>;
+def C352_Y : R600Reg <"C352.Y", 352>;
+def C352_Z : R600Reg <"C352.Z", 352>;
+def C352_W : R600Reg <"C352.W", 352>;
+def C353_X : R600Reg <"C353.X", 353>;
+def C353_Y : R600Reg <"C353.Y", 353>;
+def C353_Z : R600Reg <"C353.Z", 353>;
+def C353_W : R600Reg <"C353.W", 353>;
+def C354_X : R600Reg <"C354.X", 354>;
+def C354_Y : R600Reg <"C354.Y", 354>;
+def C354_Z : R600Reg <"C354.Z", 354>;
+def C354_W : R600Reg <"C354.W", 354>;
+def C355_X : R600Reg <"C355.X", 355>;
+def C355_Y : R600Reg <"C355.Y", 355>;
+def C355_Z : R600Reg <"C355.Z", 355>;
+def C355_W : R600Reg <"C355.W", 355>;
+def C356_X : R600Reg <"C356.X", 356>;
+def C356_Y : R600Reg <"C356.Y", 356>;
+def C356_Z : R600Reg <"C356.Z", 356>;
+def C356_W : R600Reg <"C356.W", 356>;
+def C357_X : R600Reg <"C357.X", 357>;
+def C357_Y : R600Reg <"C357.Y", 357>;
+def C357_Z : R600Reg <"C357.Z", 357>;
+def C357_W : R600Reg <"C357.W", 357>;
+def C358_X : R600Reg <"C358.X", 358>;
+def C358_Y : R600Reg <"C358.Y", 358>;
+def C358_Z : R600Reg <"C358.Z", 358>;
+def C358_W : R600Reg <"C358.W", 358>;
+def C359_X : R600Reg <"C359.X", 359>;
+def C359_Y : R600Reg <"C359.Y", 359>;
+def C359_Z : R600Reg <"C359.Z", 359>;
+def C359_W : R600Reg <"C359.W", 359>;
+def C360_X : R600Reg <"C360.X", 360>;
+def C360_Y : R600Reg <"C360.Y", 360>;
+def C360_Z : R600Reg <"C360.Z", 360>;
+def C360_W : R600Reg <"C360.W", 360>;
+def C361_X : R600Reg <"C361.X", 361>;
+def C361_Y : R600Reg <"C361.Y", 361>;
+def C361_Z : R600Reg <"C361.Z", 361>;
+def C361_W : R600Reg <"C361.W", 361>;
+def C362_X : R600Reg <"C362.X", 362>;
+def C362_Y : R600Reg <"C362.Y", 362>;
+def C362_Z : R600Reg <"C362.Z", 362>;
+def C362_W : R600Reg <"C362.W", 362>;
+def C363_X : R600Reg <"C363.X", 363>;
+def C363_Y : R600Reg <"C363.Y", 363>;
+def C363_Z : R600Reg <"C363.Z", 363>;
+def C363_W : R600Reg <"C363.W", 363>;
+def C364_X : R600Reg <"C364.X", 364>;
+def C364_Y : R600Reg <"C364.Y", 364>;
+def C364_Z : R600Reg <"C364.Z", 364>;
+def C364_W : R600Reg <"C364.W", 364>;
+def C365_X : R600Reg <"C365.X", 365>;
+def C365_Y : R600Reg <"C365.Y", 365>;
+def C365_Z : R600Reg <"C365.Z", 365>;
+def C365_W : R600Reg <"C365.W", 365>;
+def C366_X : R600Reg <"C366.X", 366>;
+def C366_Y : R600Reg <"C366.Y", 366>;
+def C366_Z : R600Reg <"C366.Z", 366>;
+def C366_W : R600Reg <"C366.W", 366>;
+def C367_X : R600Reg <"C367.X", 367>;
+def C367_Y : R600Reg <"C367.Y", 367>;
+def C367_Z : R600Reg <"C367.Z", 367>;
+def C367_W : R600Reg <"C367.W", 367>;
+def C368_X : R600Reg <"C368.X", 368>;
+def C368_Y : R600Reg <"C368.Y", 368>;
+def C368_Z : R600Reg <"C368.Z", 368>;
+def C368_W : R600Reg <"C368.W", 368>;
+def C369_X : R600Reg <"C369.X", 369>;
+def C369_Y : R600Reg <"C369.Y", 369>;
+def C369_Z : R600Reg <"C369.Z", 369>;
+def C369_W : R600Reg <"C369.W", 369>;
+def C370_X : R600Reg <"C370.X", 370>;
+def C370_Y : R600Reg <"C370.Y", 370>;
+def C370_Z : R600Reg <"C370.Z", 370>;
+def C370_W : R600Reg <"C370.W", 370>;
+def C371_X : R600Reg <"C371.X", 371>;
+def C371_Y : R600Reg <"C371.Y", 371>;
+def C371_Z : R600Reg <"C371.Z", 371>;
+def C371_W : R600Reg <"C371.W", 371>;
+def C372_X : R600Reg <"C372.X", 372>;
+def C372_Y : R600Reg <"C372.Y", 372>;
+def C372_Z : R600Reg <"C372.Z", 372>;
+def C372_W : R600Reg <"C372.W", 372>;
+def C373_X : R600Reg <"C373.X", 373>;
+def C373_Y : R600Reg <"C373.Y", 373>;
+def C373_Z : R600Reg <"C373.Z", 373>;
+def C373_W : R600Reg <"C373.W", 373>;
+def C374_X : R600Reg <"C374.X", 374>;
+def C374_Y : R600Reg <"C374.Y", 374>;
+def C374_Z : R600Reg <"C374.Z", 374>;
+def C374_W : R600Reg <"C374.W", 374>;
+def C375_X : R600Reg <"C375.X", 375>;
+def C375_Y : R600Reg <"C375.Y", 375>;
+def C375_Z : R600Reg <"C375.Z", 375>;
+def C375_W : R600Reg <"C375.W", 375>;
+def C376_X : R600Reg <"C376.X", 376>;
+def C376_Y : R600Reg <"C376.Y", 376>;
+def C376_Z : R600Reg <"C376.Z", 376>;
+def C376_W : R600Reg <"C376.W", 376>;
+def C377_X : R600Reg <"C377.X", 377>;
+def C377_Y : R600Reg <"C377.Y", 377>;
+def C377_Z : R600Reg <"C377.Z", 377>;
+def C377_W : R600Reg <"C377.W", 377>;
+def C378_X : R600Reg <"C378.X", 378>;
+def C378_Y : R600Reg <"C378.Y", 378>;
+def C378_Z : R600Reg <"C378.Z", 378>;
+def C378_W : R600Reg <"C378.W", 378>;
+def C379_X : R600Reg <"C379.X", 379>;
+def C379_Y : R600Reg <"C379.Y", 379>;
+def C379_Z : R600Reg <"C379.Z", 379>;
+def C379_W : R600Reg <"C379.W", 379>;
+def C380_X : R600Reg <"C380.X", 380>;
+def C380_Y : R600Reg <"C380.Y", 380>;
+def C380_Z : R600Reg <"C380.Z", 380>;
+def C380_W : R600Reg <"C380.W", 380>;
+def C381_X : R600Reg <"C381.X", 381>;
+def C381_Y : R600Reg <"C381.Y", 381>;
+def C381_Z : R600Reg <"C381.Z", 381>;
+def C381_W : R600Reg <"C381.W", 381>;
+def C382_X : R600Reg <"C382.X", 382>;
+def C382_Y : R600Reg <"C382.Y", 382>;
+def C382_Z : R600Reg <"C382.Z", 382>;
+def C382_W : R600Reg <"C382.W", 382>;
+def C383_X : R600Reg <"C383.X", 383>;
+def C383_Y : R600Reg <"C383.Y", 383>;
+def C383_Z : R600Reg <"C383.Z", 383>;
+def C383_W : R600Reg <"C383.W", 383>;
+def C384_X : R600Reg <"C384.X", 384>;
+def C384_Y : R600Reg <"C384.Y", 384>;
+def C384_Z : R600Reg <"C384.Z", 384>;
+def C384_W : R600Reg <"C384.W", 384>;
+def C385_X : R600Reg <"C385.X", 385>;
+def C385_Y : R600Reg <"C385.Y", 385>;
+def C385_Z : R600Reg <"C385.Z", 385>;
+def C385_W : R600Reg <"C385.W", 385>;
+def C386_X : R600Reg <"C386.X", 386>;
+def C386_Y : R600Reg <"C386.Y", 386>;
+def C386_Z : R600Reg <"C386.Z", 386>;
+def C386_W : R600Reg <"C386.W", 386>;
+def C387_X : R600Reg <"C387.X", 387>;
+def C387_Y : R600Reg <"C387.Y", 387>;
+def C387_Z : R600Reg <"C387.Z", 387>;
+def C387_W : R600Reg <"C387.W", 387>;
+def C388_X : R600Reg <"C388.X", 388>;
+def C388_Y : R600Reg <"C388.Y", 388>;
+def C388_Z : R600Reg <"C388.Z", 388>;
+def C388_W : R600Reg <"C388.W", 388>;
+def C389_X : R600Reg <"C389.X", 389>;
+def C389_Y : R600Reg <"C389.Y", 389>;
+def C389_Z : R600Reg <"C389.Z", 389>;
+def C389_W : R600Reg <"C389.W", 389>;
+def C390_X : R600Reg <"C390.X", 390>;
+def C390_Y : R600Reg <"C390.Y", 390>;
+def C390_Z : R600Reg <"C390.Z", 390>;
+def C390_W : R600Reg <"C390.W", 390>;
+def C391_X : R600Reg <"C391.X", 391>;
+def C391_Y : R600Reg <"C391.Y", 391>;
+def C391_Z : R600Reg <"C391.Z", 391>;
+def C391_W : R600Reg <"C391.W", 391>;
+def C392_X : R600Reg <"C392.X", 392>;
+def C392_Y : R600Reg <"C392.Y", 392>;
+def C392_Z : R600Reg <"C392.Z", 392>;
+def C392_W : R600Reg <"C392.W", 392>;
+def C393_X : R600Reg <"C393.X", 393>;
+def C393_Y : R600Reg <"C393.Y", 393>;
+def C393_Z : R600Reg <"C393.Z", 393>;
+def C393_W : R600Reg <"C393.W", 393>;
+def C394_X : R600Reg <"C394.X", 394>;
+def C394_Y : R600Reg <"C394.Y", 394>;
+def C394_Z : R600Reg <"C394.Z", 394>;
+def C394_W : R600Reg <"C394.W", 394>;
+def C395_X : R600Reg <"C395.X", 395>;
+def C395_Y : R600Reg <"C395.Y", 395>;
+def C395_Z : R600Reg <"C395.Z", 395>;
+def C395_W : R600Reg <"C395.W", 395>;
+def C396_X : R600Reg <"C396.X", 396>;
+def C396_Y : R600Reg <"C396.Y", 396>;
+def C396_Z : R600Reg <"C396.Z", 396>;
+def C396_W : R600Reg <"C396.W", 396>;
+def C397_X : R600Reg <"C397.X", 397>;
+def C397_Y : R600Reg <"C397.Y", 397>;
+def C397_Z : R600Reg <"C397.Z", 397>;
+def C397_W : R600Reg <"C397.W", 397>;
+def C398_X : R600Reg <"C398.X", 398>;
+def C398_Y : R600Reg <"C398.Y", 398>;
+def C398_Z : R600Reg <"C398.Z", 398>;
+def C398_W : R600Reg <"C398.W", 398>;
+def C399_X : R600Reg <"C399.X", 399>;
+def C399_Y : R600Reg <"C399.Y", 399>;
+def C399_Z : R600Reg <"C399.Z", 399>;
+def C399_W : R600Reg <"C399.W", 399>;
+def C400_X : R600Reg <"C400.X", 400>;
+def C400_Y : R600Reg <"C400.Y", 400>;
+def C400_Z : R600Reg <"C400.Z", 400>;
+def C400_W : R600Reg <"C400.W", 400>;
+def C401_X : R600Reg <"C401.X", 401>;
+def C401_Y : R600Reg <"C401.Y", 401>;
+def C401_Z : R600Reg <"C401.Z", 401>;
+def C401_W : R600Reg <"C401.W", 401>;
+def C402_X : R600Reg <"C402.X", 402>;
+def C402_Y : R600Reg <"C402.Y", 402>;
+def C402_Z : R600Reg <"C402.Z", 402>;
+def C402_W : R600Reg <"C402.W", 402>;
+def C403_X : R600Reg <"C403.X", 403>;
+def C403_Y : R600Reg <"C403.Y", 403>;
+def C403_Z : R600Reg <"C403.Z", 403>;
+def C403_W : R600Reg <"C403.W", 403>;
+def C404_X : R600Reg <"C404.X", 404>;
+def C404_Y : R600Reg <"C404.Y", 404>;
+def C404_Z : R600Reg <"C404.Z", 404>;
+def C404_W : R600Reg <"C404.W", 404>;
+def C405_X : R600Reg <"C405.X", 405>;
+def C405_Y : R600Reg <"C405.Y", 405>;
+def C405_Z : R600Reg <"C405.Z", 405>;
+def C405_W : R600Reg <"C405.W", 405>;
+def C406_X : R600Reg <"C406.X", 406>;
+def C406_Y : R600Reg <"C406.Y", 406>;
+def C406_Z : R600Reg <"C406.Z", 406>;
+def C406_W : R600Reg <"C406.W", 406>;
+def C407_X : R600Reg <"C407.X", 407>;
+def C407_Y : R600Reg <"C407.Y", 407>;
+def C407_Z : R600Reg <"C407.Z", 407>;
+def C407_W : R600Reg <"C407.W", 407>;
+def C408_X : R600Reg <"C408.X", 408>;
+def C408_Y : R600Reg <"C408.Y", 408>;
+def C408_Z : R600Reg <"C408.Z", 408>;
+def C408_W : R600Reg <"C408.W", 408>;
+def C409_X : R600Reg <"C409.X", 409>;
+def C409_Y : R600Reg <"C409.Y", 409>;
+def C409_Z : R600Reg <"C409.Z", 409>;
+def C409_W : R600Reg <"C409.W", 409>;
+def C410_X : R600Reg <"C410.X", 410>;
+def C410_Y : R600Reg <"C410.Y", 410>;
+def C410_Z : R600Reg <"C410.Z", 410>;
+def C410_W : R600Reg <"C410.W", 410>;
+def C411_X : R600Reg <"C411.X", 411>;
+def C411_Y : R600Reg <"C411.Y", 411>;
+def C411_Z : R600Reg <"C411.Z", 411>;
+def C411_W : R600Reg <"C411.W", 411>;
+def C412_X : R600Reg <"C412.X", 412>;
+def C412_Y : R600Reg <"C412.Y", 412>;
+def C412_Z : R600Reg <"C412.Z", 412>;
+def C412_W : R600Reg <"C412.W", 412>;
+def C413_X : R600Reg <"C413.X", 413>;
+def C413_Y : R600Reg <"C413.Y", 413>;
+def C413_Z : R600Reg <"C413.Z", 413>;
+def C413_W : R600Reg <"C413.W", 413>;
+def C414_X : R600Reg <"C414.X", 414>;
+def C414_Y : R600Reg <"C414.Y", 414>;
+def C414_Z : R600Reg <"C414.Z", 414>;
+def C414_W : R600Reg <"C414.W", 414>;
+def C415_X : R600Reg <"C415.X", 415>;
+def C415_Y : R600Reg <"C415.Y", 415>;
+def C415_Z : R600Reg <"C415.Z", 415>;
+def C415_W : R600Reg <"C415.W", 415>;
+def C416_X : R600Reg <"C416.X", 416>;
+def C416_Y : R600Reg <"C416.Y", 416>;
+def C416_Z : R600Reg <"C416.Z", 416>;
+def C416_W : R600Reg <"C416.W", 416>;
+def C417_X : R600Reg <"C417.X", 417>;
+def C417_Y : R600Reg <"C417.Y", 417>;
+def C417_Z : R600Reg <"C417.Z", 417>;
+def C417_W : R600Reg <"C417.W", 417>;
+def C418_X : R600Reg <"C418.X", 418>;
+def C418_Y : R600Reg <"C418.Y", 418>;
+def C418_Z : R600Reg <"C418.Z", 418>;
+def C418_W : R600Reg <"C418.W", 418>;
+def C419_X : R600Reg <"C419.X", 419>;
+def C419_Y : R600Reg <"C419.Y", 419>;
+def C419_Z : R600Reg <"C419.Z", 419>;
+def C419_W : R600Reg <"C419.W", 419>;
+def C420_X : R600Reg <"C420.X", 420>;
+def C420_Y : R600Reg <"C420.Y", 420>;
+def C420_Z : R600Reg <"C420.Z", 420>;
+def C420_W : R600Reg <"C420.W", 420>;
+def C421_X : R600Reg <"C421.X", 421>;
+def C421_Y : R600Reg <"C421.Y", 421>;
+def C421_Z : R600Reg <"C421.Z", 421>;
+def C421_W : R600Reg <"C421.W", 421>;
+def C422_X : R600Reg <"C422.X", 422>;
+def C422_Y : R600Reg <"C422.Y", 422>;
+def C422_Z : R600Reg <"C422.Z", 422>;
+def C422_W : R600Reg <"C422.W", 422>;
+def C423_X : R600Reg <"C423.X", 423>;
+def C423_Y : R600Reg <"C423.Y", 423>;
+def C423_Z : R600Reg <"C423.Z", 423>;
+def C423_W : R600Reg <"C423.W", 423>;
+def C424_X : R600Reg <"C424.X", 424>;
+def C424_Y : R600Reg <"C424.Y", 424>;
+def C424_Z : R600Reg <"C424.Z", 424>;
+def C424_W : R600Reg <"C424.W", 424>;
+def C425_X : R600Reg <"C425.X", 425>;
+def C425_Y : R600Reg <"C425.Y", 425>;
+def C425_Z : R600Reg <"C425.Z", 425>;
+def C425_W : R600Reg <"C425.W", 425>;
+def C426_X : R600Reg <"C426.X", 426>;
+def C426_Y : R600Reg <"C426.Y", 426>;
+def C426_Z : R600Reg <"C426.Z", 426>;
+def C426_W : R600Reg <"C426.W", 426>;
+def C427_X : R600Reg <"C427.X", 427>;
+def C427_Y : R600Reg <"C427.Y", 427>;
+def C427_Z : R600Reg <"C427.Z", 427>;
+def C427_W : R600Reg <"C427.W", 427>;
+def C428_X : R600Reg <"C428.X", 428>;
+def C428_Y : R600Reg <"C428.Y", 428>;
+def C428_Z : R600Reg <"C428.Z", 428>;
+def C428_W : R600Reg <"C428.W", 428>;
+def C429_X : R600Reg <"C429.X", 429>;
+def C429_Y : R600Reg <"C429.Y", 429>;
+def C429_Z : R600Reg <"C429.Z", 429>;
+def C429_W : R600Reg <"C429.W", 429>;
+def C430_X : R600Reg <"C430.X", 430>;
+def C430_Y : R600Reg <"C430.Y", 430>;
+def C430_Z : R600Reg <"C430.Z", 430>;
+def C430_W : R600Reg <"C430.W", 430>;
+def C431_X : R600Reg <"C431.X", 431>;
+def C431_Y : R600Reg <"C431.Y", 431>;
+def C431_Z : R600Reg <"C431.Z", 431>;
+def C431_W : R600Reg <"C431.W", 431>;
+def C432_X : R600Reg <"C432.X", 432>;
+def C432_Y : R600Reg <"C432.Y", 432>;
+def C432_Z : R600Reg <"C432.Z", 432>;
+def C432_W : R600Reg <"C432.W", 432>;
+def C433_X : R600Reg <"C433.X", 433>;
+def C433_Y : R600Reg <"C433.Y", 433>;
+def C433_Z : R600Reg <"C433.Z", 433>;
+def C433_W : R600Reg <"C433.W", 433>;
+def C434_X : R600Reg <"C434.X", 434>;
+def C434_Y : R600Reg <"C434.Y", 434>;
+def C434_Z : R600Reg <"C434.Z", 434>;
+def C434_W : R600Reg <"C434.W", 434>;
+def C435_X : R600Reg <"C435.X", 435>;
+def C435_Y : R600Reg <"C435.Y", 435>;
+def C435_Z : R600Reg <"C435.Z", 435>;
+def C435_W : R600Reg <"C435.W", 435>;
+def C436_X : R600Reg <"C436.X", 436>;
+def C436_Y : R600Reg <"C436.Y", 436>;
+def C436_Z : R600Reg <"C436.Z", 436>;
+def C436_W : R600Reg <"C436.W", 436>;
+def C437_X : R600Reg <"C437.X", 437>;
+def C437_Y : R600Reg <"C437.Y", 437>;
+def C437_Z : R600Reg <"C437.Z", 437>;
+def C437_W : R600Reg <"C437.W", 437>;
+def C438_X : R600Reg <"C438.X", 438>;
+def C438_Y : R600Reg <"C438.Y", 438>;
+def C438_Z : R600Reg <"C438.Z", 438>;
+def C438_W : R600Reg <"C438.W", 438>;
+def C439_X : R600Reg <"C439.X", 439>;
+def C439_Y : R600Reg <"C439.Y", 439>;
+def C439_Z : R600Reg <"C439.Z", 439>;
+def C439_W : R600Reg <"C439.W", 439>;
+def C440_X : R600Reg <"C440.X", 440>;
+def C440_Y : R600Reg <"C440.Y", 440>;
+def C440_Z : R600Reg <"C440.Z", 440>;
+def C440_W : R600Reg <"C440.W", 440>;
+def C441_X : R600Reg <"C441.X", 441>;
+def C441_Y : R600Reg <"C441.Y", 441>;
+def C441_Z : R600Reg <"C441.Z", 441>;
+def C441_W : R600Reg <"C441.W", 441>;
+def C442_X : R600Reg <"C442.X", 442>;
+def C442_Y : R600Reg <"C442.Y", 442>;
+def C442_Z : R600Reg <"C442.Z", 442>;
+def C442_W : R600Reg <"C442.W", 442>;
+def C443_X : R600Reg <"C443.X", 443>;
+def C443_Y : R600Reg <"C443.Y", 443>;
+def C443_Z : R600Reg <"C443.Z", 443>;
+def C443_W : R600Reg <"C443.W", 443>;
+def C444_X : R600Reg <"C444.X", 444>;
+def C444_Y : R600Reg <"C444.Y", 444>;
+def C444_Z : R600Reg <"C444.Z", 444>;
+def C444_W : R600Reg <"C444.W", 444>;
+def C445_X : R600Reg <"C445.X", 445>;
+def C445_Y : R600Reg <"C445.Y", 445>;
+def C445_Z : R600Reg <"C445.Z", 445>;
+def C445_W : R600Reg <"C445.W", 445>;
+def C446_X : R600Reg <"C446.X", 446>;
+def C446_Y : R600Reg <"C446.Y", 446>;
+def C446_Z : R600Reg <"C446.Z", 446>;
+def C446_W : R600Reg <"C446.W", 446>;
+def C447_X : R600Reg <"C447.X", 447>;
+def C447_Y : R600Reg <"C447.Y", 447>;
+def C447_Z : R600Reg <"C447.Z", 447>;
+def C447_W : R600Reg <"C447.W", 447>;
+def C448_X : R600Reg <"C448.X", 448>;
+def C448_Y : R600Reg <"C448.Y", 448>;
+def C448_Z : R600Reg <"C448.Z", 448>;
+def C448_W : R600Reg <"C448.W", 448>;
+def C449_X : R600Reg <"C449.X", 449>;
+def C449_Y : R600Reg <"C449.Y", 449>;
+def C449_Z : R600Reg <"C449.Z", 449>;
+def C449_W : R600Reg <"C449.W", 449>;
+def C450_X : R600Reg <"C450.X", 450>;
+def C450_Y : R600Reg <"C450.Y", 450>;
+def C450_Z : R600Reg <"C450.Z", 450>;
+def C450_W : R600Reg <"C450.W", 450>;
+def C451_X : R600Reg <"C451.X", 451>;
+def C451_Y : R600Reg <"C451.Y", 451>;
+def C451_Z : R600Reg <"C451.Z", 451>;
+def C451_W : R600Reg <"C451.W", 451>;
+def C452_X : R600Reg <"C452.X", 452>;
+def C452_Y : R600Reg <"C452.Y", 452>;
+def C452_Z : R600Reg <"C452.Z", 452>;
+def C452_W : R600Reg <"C452.W", 452>;
+def C453_X : R600Reg <"C453.X", 453>;
+def C453_Y : R600Reg <"C453.Y", 453>;
+def C453_Z : R600Reg <"C453.Z", 453>;
+def C453_W : R600Reg <"C453.W", 453>;
+def C454_X : R600Reg <"C454.X", 454>;
+def C454_Y : R600Reg <"C454.Y", 454>;
+def C454_Z : R600Reg <"C454.Z", 454>;
+def C454_W : R600Reg <"C454.W", 454>;
+def C455_X : R600Reg <"C455.X", 455>;
+def C455_Y : R600Reg <"C455.Y", 455>;
+def C455_Z : R600Reg <"C455.Z", 455>;
+def C455_W : R600Reg <"C455.W", 455>;
+def C456_X : R600Reg <"C456.X", 456>;
+def C456_Y : R600Reg <"C456.Y", 456>;
+def C456_Z : R600Reg <"C456.Z", 456>;
+def C456_W : R600Reg <"C456.W", 456>;
+def C457_X : R600Reg <"C457.X", 457>;
+def C457_Y : R600Reg <"C457.Y", 457>;
+def C457_Z : R600Reg <"C457.Z", 457>;
+def C457_W : R600Reg <"C457.W", 457>;
+def C458_X : R600Reg <"C458.X", 458>;
+def C458_Y : R600Reg <"C458.Y", 458>;
+def C458_Z : R600Reg <"C458.Z", 458>;
+def C458_W : R600Reg <"C458.W", 458>;
+def C459_X : R600Reg <"C459.X", 459>;
+def C459_Y : R600Reg <"C459.Y", 459>;
+def C459_Z : R600Reg <"C459.Z", 459>;
+def C459_W : R600Reg <"C459.W", 459>;
+def C460_X : R600Reg <"C460.X", 460>;
+def C460_Y : R600Reg <"C460.Y", 460>;
+def C460_Z : R600Reg <"C460.Z", 460>;
+def C460_W : R600Reg <"C460.W", 460>;
+def C461_X : R600Reg <"C461.X", 461>;
+def C461_Y : R600Reg <"C461.Y", 461>;
+def C461_Z : R600Reg <"C461.Z", 461>;
+def C461_W : R600Reg <"C461.W", 461>;
+def C462_X : R600Reg <"C462.X", 462>;
+def C462_Y : R600Reg <"C462.Y", 462>;
+def C462_Z : R600Reg <"C462.Z", 462>;
+def C462_W : R600Reg <"C462.W", 462>;
+def C463_X : R600Reg <"C463.X", 463>;
+def C463_Y : R600Reg <"C463.Y", 463>;
+def C463_Z : R600Reg <"C463.Z", 463>;
+def C463_W : R600Reg <"C463.W", 463>;
+def C464_X : R600Reg <"C464.X", 464>;
+def C464_Y : R600Reg <"C464.Y", 464>;
+def C464_Z : R600Reg <"C464.Z", 464>;
+def C464_W : R600Reg <"C464.W", 464>;
+def C465_X : R600Reg <"C465.X", 465>;
+def C465_Y : R600Reg <"C465.Y", 465>;
+def C465_Z : R600Reg <"C465.Z", 465>;
+def C465_W : R600Reg <"C465.W", 465>;
+def C466_X : R600Reg <"C466.X", 466>;
+def C466_Y : R600Reg <"C466.Y", 466>;
+def C466_Z : R600Reg <"C466.Z", 466>;
+def C466_W : R600Reg <"C466.W", 466>;
+def C467_X : R600Reg <"C467.X", 467>;
+def C467_Y : R600Reg <"C467.Y", 467>;
+def C467_Z : R600Reg <"C467.Z", 467>;
+def C467_W : R600Reg <"C467.W", 467>;
+def C468_X : R600Reg <"C468.X", 468>;
+def C468_Y : R600Reg <"C468.Y", 468>;
+def C468_Z : R600Reg <"C468.Z", 468>;
+def C468_W : R600Reg <"C468.W", 468>;
+def C469_X : R600Reg <"C469.X", 469>;
+def C469_Y : R600Reg <"C469.Y", 469>;
+def C469_Z : R600Reg <"C469.Z", 469>;
+def C469_W : R600Reg <"C469.W", 469>;
+def C470_X : R600Reg <"C470.X", 470>;
+def C470_Y : R600Reg <"C470.Y", 470>;
+def C470_Z : R600Reg <"C470.Z", 470>;
+def C470_W : R600Reg <"C470.W", 470>;
+def C471_X : R600Reg <"C471.X", 471>;
+def C471_Y : R600Reg <"C471.Y", 471>;
+def C471_Z : R600Reg <"C471.Z", 471>;
+def C471_W : R600Reg <"C471.W", 471>;
+def C472_X : R600Reg <"C472.X", 472>;
+def C472_Y : R600Reg <"C472.Y", 472>;
+def C472_Z : R600Reg <"C472.Z", 472>;
+def C472_W : R600Reg <"C472.W", 472>;
+def C473_X : R600Reg <"C473.X", 473>;
+def C473_Y : R600Reg <"C473.Y", 473>;
+def C473_Z : R600Reg <"C473.Z", 473>;
+def C473_W : R600Reg <"C473.W", 473>;
+def C474_X : R600Reg <"C474.X", 474>;
+def C474_Y : R600Reg <"C474.Y", 474>;
+def C474_Z : R600Reg <"C474.Z", 474>;
+def C474_W : R600Reg <"C474.W", 474>;
+def C475_X : R600Reg <"C475.X", 475>;
+def C475_Y : R600Reg <"C475.Y", 475>;
+def C475_Z : R600Reg <"C475.Z", 475>;
+def C475_W : R600Reg <"C475.W", 475>;
+def C476_X : R600Reg <"C476.X", 476>;
+def C476_Y : R600Reg <"C476.Y", 476>;
+def C476_Z : R600Reg <"C476.Z", 476>;
+def C476_W : R600Reg <"C476.W", 476>;
+def C477_X : R600Reg <"C477.X", 477>;
+def C477_Y : R600Reg <"C477.Y", 477>;
+def C477_Z : R600Reg <"C477.Z", 477>;
+def C477_W : R600Reg <"C477.W", 477>;
+def C478_X : R600Reg <"C478.X", 478>;
+def C478_Y : R600Reg <"C478.Y", 478>;
+def C478_Z : R600Reg <"C478.Z", 478>;
+def C478_W : R600Reg <"C478.W", 478>;
+def C479_X : R600Reg <"C479.X", 479>;
+def C479_Y : R600Reg <"C479.Y", 479>;
+def C479_Z : R600Reg <"C479.Z", 479>;
+def C479_W : R600Reg <"C479.W", 479>;
+def C480_X : R600Reg <"C480.X", 480>;
+def C480_Y : R600Reg <"C480.Y", 480>;
+def C480_Z : R600Reg <"C480.Z", 480>;
+def C480_W : R600Reg <"C480.W", 480>;
+def C481_X : R600Reg <"C481.X", 481>;
+def C481_Y : R600Reg <"C481.Y", 481>;
+def C481_Z : R600Reg <"C481.Z", 481>;
+def C481_W : R600Reg <"C481.W", 481>;
+def C482_X : R600Reg <"C482.X", 482>;
+def C482_Y : R600Reg <"C482.Y", 482>;
+def C482_Z : R600Reg <"C482.Z", 482>;
+def C482_W : R600Reg <"C482.W", 482>;
+def C483_X : R600Reg <"C483.X", 483>;
+def C483_Y : R600Reg <"C483.Y", 483>;
+def C483_Z : R600Reg <"C483.Z", 483>;
+def C483_W : R600Reg <"C483.W", 483>;
+def C484_X : R600Reg <"C484.X", 484>;
+def C484_Y : R600Reg <"C484.Y", 484>;
+def C484_Z : R600Reg <"C484.Z", 484>;
+def C484_W : R600Reg <"C484.W", 484>;
+def C485_X : R600Reg <"C485.X", 485>;
+def C485_Y : R600Reg <"C485.Y", 485>;
+def C485_Z : R600Reg <"C485.Z", 485>;
+def C485_W : R600Reg <"C485.W", 485>;
+def C486_X : R600Reg <"C486.X", 486>;
+def C486_Y : R600Reg <"C486.Y", 486>;
+def C486_Z : R600Reg <"C486.Z", 486>;
+def C486_W : R600Reg <"C486.W", 486>;
+def C487_X : R600Reg <"C487.X", 487>;
+def C487_Y : R600Reg <"C487.Y", 487>;
+def C487_Z : R600Reg <"C487.Z", 487>;
+def C487_W : R600Reg <"C487.W", 487>;
+def C488_X : R600Reg <"C488.X", 488>;
+def C488_Y : R600Reg <"C488.Y", 488>;
+def C488_Z : R600Reg <"C488.Z", 488>;
+def C488_W : R600Reg <"C488.W", 488>;
+def C489_X : R600Reg <"C489.X", 489>;
+def C489_Y : R600Reg <"C489.Y", 489>;
+def C489_Z : R600Reg <"C489.Z", 489>;
+def C489_W : R600Reg <"C489.W", 489>;
+def C490_X : R600Reg <"C490.X", 490>;
+def C490_Y : R600Reg <"C490.Y", 490>;
+def C490_Z : R600Reg <"C490.Z", 490>;
+def C490_W : R600Reg <"C490.W", 490>;
+def C491_X : R600Reg <"C491.X", 491>;
+def C491_Y : R600Reg <"C491.Y", 491>;
+def C491_Z : R600Reg <"C491.Z", 491>;
+def C491_W : R600Reg <"C491.W", 491>;
+def C492_X : R600Reg <"C492.X", 492>;
+def C492_Y : R600Reg <"C492.Y", 492>;
+def C492_Z : R600Reg <"C492.Z", 492>;
+def C492_W : R600Reg <"C492.W", 492>;
+def C493_X : R600Reg <"C493.X", 493>;
+def C493_Y : R600Reg <"C493.Y", 493>;
+def C493_Z : R600Reg <"C493.Z", 493>;
+def C493_W : R600Reg <"C493.W", 493>;
+def C494_X : R600Reg <"C494.X", 494>;
+def C494_Y : R600Reg <"C494.Y", 494>;
+def C494_Z : R600Reg <"C494.Z", 494>;
+def C494_W : R600Reg <"C494.W", 494>;
+def C495_X : R600Reg <"C495.X", 495>;
+def C495_Y : R600Reg <"C495.Y", 495>;
+def C495_Z : R600Reg <"C495.Z", 495>;
+def C495_W : R600Reg <"C495.W", 495>;
+def C496_X : R600Reg <"C496.X", 496>;
+def C496_Y : R600Reg <"C496.Y", 496>;
+def C496_Z : R600Reg <"C496.Z", 496>;
+def C496_W : R600Reg <"C496.W", 496>;
+def C497_X : R600Reg <"C497.X", 497>;
+def C497_Y : R600Reg <"C497.Y", 497>;
+def C497_Z : R600Reg <"C497.Z", 497>;
+def C497_W : R600Reg <"C497.W", 497>;
+def C498_X : R600Reg <"C498.X", 498>;
+def C498_Y : R600Reg <"C498.Y", 498>;
+def C498_Z : R600Reg <"C498.Z", 498>;
+def C498_W : R600Reg <"C498.W", 498>;
+def C499_X : R600Reg <"C499.X", 499>;
+def C499_Y : R600Reg <"C499.Y", 499>;
+def C499_Z : R600Reg <"C499.Z", 499>;
+def C499_W : R600Reg <"C499.W", 499>;
+def C500_X : R600Reg <"C500.X", 500>;
+def C500_Y : R600Reg <"C500.Y", 500>;
+def C500_Z : R600Reg <"C500.Z", 500>;
+def C500_W : R600Reg <"C500.W", 500>;
+def C501_X : R600Reg <"C501.X", 501>;
+def C501_Y : R600Reg <"C501.Y", 501>;
+def C501_Z : R600Reg <"C501.Z", 501>;
+def C501_W : R600Reg <"C501.W", 501>;
+def C502_X : R600Reg <"C502.X", 502>;
+def C502_Y : R600Reg <"C502.Y", 502>;
+def C502_Z : R600Reg <"C502.Z", 502>;
+def C502_W : R600Reg <"C502.W", 502>;
+def C503_X : R600Reg <"C503.X", 503>;
+def C503_Y : R600Reg <"C503.Y", 503>;
+def C503_Z : R600Reg <"C503.Z", 503>;
+def C503_W : R600Reg <"C503.W", 503>;
+def C504_X : R600Reg <"C504.X", 504>;
+def C504_Y : R600Reg <"C504.Y", 504>;
+def C504_Z : R600Reg <"C504.Z", 504>;
+def C504_W : R600Reg <"C504.W", 504>;
+def C505_X : R600Reg <"C505.X", 505>;
+def C505_Y : R600Reg <"C505.Y", 505>;
+def C505_Z : R600Reg <"C505.Z", 505>;
+def C505_W : R600Reg <"C505.W", 505>;
+def C506_X : R600Reg <"C506.X", 506>;
+def C506_Y : R600Reg <"C506.Y", 506>;
+def C506_Z : R600Reg <"C506.Z", 506>;
+def C506_W : R600Reg <"C506.W", 506>;
+def C507_X : R600Reg <"C507.X", 507>;
+def C507_Y : R600Reg <"C507.Y", 507>;
+def C507_Z : R600Reg <"C507.Z", 507>;
+def C507_W : R600Reg <"C507.W", 507>;
+def C508_X : R600Reg <"C508.X", 508>;
+def C508_Y : R600Reg <"C508.Y", 508>;
+def C508_Z : R600Reg <"C508.Z", 508>;
+def C508_W : R600Reg <"C508.W", 508>;
+def C509_X : R600Reg <"C509.X", 509>;
+def C509_Y : R600Reg <"C509.Y", 509>;
+def C509_Z : R600Reg <"C509.Z", 509>;
+def C509_W : R600Reg <"C509.W", 509>;
+def C510_X : R600Reg <"C510.X", 510>;
+def C510_Y : R600Reg <"C510.Y", 510>;
+def C510_Z : R600Reg <"C510.Z", 510>;
+def C510_W : R600Reg <"C510.W", 510>;
+def C511_X : R600Reg <"C511.X", 511>;
+def C511_Y : R600Reg <"C511.Y", 511>;
+def C511_Z : R600Reg <"C511.Z", 511>;
+def C511_W : R600Reg <"C511.W", 511>;
+def C512_X : R600Reg <"C512.X", 512>;
+def C512_Y : R600Reg <"C512.Y", 512>;
+def C512_Z : R600Reg <"C512.Z", 512>;
+def C512_W : R600Reg <"C512.W", 512>;
+def C513_X : R600Reg <"C513.X", 513>;
+def C513_Y : R600Reg <"C513.Y", 513>;
+def C513_Z : R600Reg <"C513.Z", 513>;
+def C513_W : R600Reg <"C513.W", 513>;
+def C514_X : R600Reg <"C514.X", 514>;
+def C514_Y : R600Reg <"C514.Y", 514>;
+def C514_Z : R600Reg <"C514.Z", 514>;
+def C514_W : R600Reg <"C514.W", 514>;
+def C515_X : R600Reg <"C515.X", 515>;
+def C515_Y : R600Reg <"C515.Y", 515>;
+def C515_Z : R600Reg <"C515.Z", 515>;
+def C515_W : R600Reg <"C515.W", 515>;
+def C516_X : R600Reg <"C516.X", 516>;
+def C516_Y : R600Reg <"C516.Y", 516>;
+def C516_Z : R600Reg <"C516.Z", 516>;
+def C516_W : R600Reg <"C516.W", 516>;
+def C517_X : R600Reg <"C517.X", 517>;
+def C517_Y : R600Reg <"C517.Y", 517>;
+def C517_Z : R600Reg <"C517.Z", 517>;
+def C517_W : R600Reg <"C517.W", 517>;
+def C518_X : R600Reg <"C518.X", 518>;
+def C518_Y : R600Reg <"C518.Y", 518>;
+def C518_Z : R600Reg <"C518.Z", 518>;
+def C518_W : R600Reg <"C518.W", 518>;
+def C519_X : R600Reg <"C519.X", 519>;
+def C519_Y : R600Reg <"C519.Y", 519>;
+def C519_Z : R600Reg <"C519.Z", 519>;
+def C519_W : R600Reg <"C519.W", 519>;
+def C520_X : R600Reg <"C520.X", 520>;
+def C520_Y : R600Reg <"C520.Y", 520>;
+def C520_Z : R600Reg <"C520.Z", 520>;
+def C520_W : R600Reg <"C520.W", 520>;
+def C521_X : R600Reg <"C521.X", 521>;
+def C521_Y : R600Reg <"C521.Y", 521>;
+def C521_Z : R600Reg <"C521.Z", 521>;
+def C521_W : R600Reg <"C521.W", 521>;
+def C522_X : R600Reg <"C522.X", 522>;
+def C522_Y : R600Reg <"C522.Y", 522>;
+def C522_Z : R600Reg <"C522.Z", 522>;
+def C522_W : R600Reg <"C522.W", 522>;
+def C523_X : R600Reg <"C523.X", 523>;
+def C523_Y : R600Reg <"C523.Y", 523>;
+def C523_Z : R600Reg <"C523.Z", 523>;
+def C523_W : R600Reg <"C523.W", 523>;
+def C524_X : R600Reg <"C524.X", 524>;
+def C524_Y : R600Reg <"C524.Y", 524>;
+def C524_Z : R600Reg <"C524.Z", 524>;
+def C524_W : R600Reg <"C524.W", 524>;
+def C525_X : R600Reg <"C525.X", 525>;
+def C525_Y : R600Reg <"C525.Y", 525>;
+def C525_Z : R600Reg <"C525.Z", 525>;
+def C525_W : R600Reg <"C525.W", 525>;
+def C526_X : R600Reg <"C526.X", 526>;
+def C526_Y : R600Reg <"C526.Y", 526>;
+def C526_Z : R600Reg <"C526.Z", 526>;
+def C526_W : R600Reg <"C526.W", 526>;
+def C527_X : R600Reg <"C527.X", 527>;
+def C527_Y : R600Reg <"C527.Y", 527>;
+def C527_Z : R600Reg <"C527.Z", 527>;
+def C527_W : R600Reg <"C527.W", 527>;
+def C528_X : R600Reg <"C528.X", 528>;
+def C528_Y : R600Reg <"C528.Y", 528>;
+def C528_Z : R600Reg <"C528.Z", 528>;
+def C528_W : R600Reg <"C528.W", 528>;
+def C529_X : R600Reg <"C529.X", 529>;
+def C529_Y : R600Reg <"C529.Y", 529>;
+def C529_Z : R600Reg <"C529.Z", 529>;
+def C529_W : R600Reg <"C529.W", 529>;
+def C530_X : R600Reg <"C530.X", 530>;
+def C530_Y : R600Reg <"C530.Y", 530>;
+def C530_Z : R600Reg <"C530.Z", 530>;
+def C530_W : R600Reg <"C530.W", 530>;
+def C531_X : R600Reg <"C531.X", 531>;
+def C531_Y : R600Reg <"C531.Y", 531>;
+def C531_Z : R600Reg <"C531.Z", 531>;
+def C531_W : R600Reg <"C531.W", 531>;
+def C532_X : R600Reg <"C532.X", 532>;
+def C532_Y : R600Reg <"C532.Y", 532>;
+def C532_Z : R600Reg <"C532.Z", 532>;
+def C532_W : R600Reg <"C532.W", 532>;
+def C533_X : R600Reg <"C533.X", 533>;
+def C533_Y : R600Reg <"C533.Y", 533>;
+def C533_Z : R600Reg <"C533.Z", 533>;
+def C533_W : R600Reg <"C533.W", 533>;
+def C534_X : R600Reg <"C534.X", 534>;
+def C534_Y : R600Reg <"C534.Y", 534>;
+def C534_Z : R600Reg <"C534.Z", 534>;
+def C534_W : R600Reg <"C534.W", 534>;
+def C535_X : R600Reg <"C535.X", 535>;
+def C535_Y : R600Reg <"C535.Y", 535>;
+def C535_Z : R600Reg <"C535.Z", 535>;
+def C535_W : R600Reg <"C535.W", 535>;
+def C536_X : R600Reg <"C536.X", 536>;
+def C536_Y : R600Reg <"C536.Y", 536>;
+def C536_Z : R600Reg <"C536.Z", 536>;
+def C536_W : R600Reg <"C536.W", 536>;
+def C537_X : R600Reg <"C537.X", 537>;
+def C537_Y : R600Reg <"C537.Y", 537>;
+def C537_Z : R600Reg <"C537.Z", 537>;
+def C537_W : R600Reg <"C537.W", 537>;
+def C538_X : R600Reg <"C538.X", 538>;
+def C538_Y : R600Reg <"C538.Y", 538>;
+def C538_Z : R600Reg <"C538.Z", 538>;
+def C538_W : R600Reg <"C538.W", 538>;
+def C539_X : R600Reg <"C539.X", 539>;
+def C539_Y : R600Reg <"C539.Y", 539>;
+def C539_Z : R600Reg <"C539.Z", 539>;
+def C539_W : R600Reg <"C539.W", 539>;
+def C540_X : R600Reg <"C540.X", 540>;
+def C540_Y : R600Reg <"C540.Y", 540>;
+def C540_Z : R600Reg <"C540.Z", 540>;
+def C540_W : R600Reg <"C540.W", 540>;
+def C541_X : R600Reg <"C541.X", 541>;
+def C541_Y : R600Reg <"C541.Y", 541>;
+def C541_Z : R600Reg <"C541.Z", 541>;
+def C541_W : R600Reg <"C541.W", 541>;
+def C542_X : R600Reg <"C542.X", 542>;
+def C542_Y : R600Reg <"C542.Y", 542>;
+def C542_Z : R600Reg <"C542.Z", 542>;
+def C542_W : R600Reg <"C542.W", 542>;
+def C543_X : R600Reg <"C543.X", 543>;
+def C543_Y : R600Reg <"C543.Y", 543>;
+def C543_Z : R600Reg <"C543.Z", 543>;
+def C543_W : R600Reg <"C543.W", 543>;
+def C544_X : R600Reg <"C544.X", 544>;
+def C544_Y : R600Reg <"C544.Y", 544>;
+def C544_Z : R600Reg <"C544.Z", 544>;
+def C544_W : R600Reg <"C544.W", 544>;
+def C545_X : R600Reg <"C545.X", 545>;
+def C545_Y : R600Reg <"C545.Y", 545>;
+def C545_Z : R600Reg <"C545.Z", 545>;
+def C545_W : R600Reg <"C545.W", 545>;
+def C546_X : R600Reg <"C546.X", 546>;
+def C546_Y : R600Reg <"C546.Y", 546>;
+def C546_Z : R600Reg <"C546.Z", 546>;
+def C546_W : R600Reg <"C546.W", 546>;
+def C547_X : R600Reg <"C547.X", 547>;
+def C547_Y : R600Reg <"C547.Y", 547>;
+def C547_Z : R600Reg <"C547.Z", 547>;
+def C547_W : R600Reg <"C547.W", 547>;
+def C548_X : R600Reg <"C548.X", 548>;
+def C548_Y : R600Reg <"C548.Y", 548>;
+def C548_Z : R600Reg <"C548.Z", 548>;
+def C548_W : R600Reg <"C548.W", 548>;
+def C549_X : R600Reg <"C549.X", 549>;
+def C549_Y : R600Reg <"C549.Y", 549>;
+def C549_Z : R600Reg <"C549.Z", 549>;
+def C549_W : R600Reg <"C549.W", 549>;
+def C550_X : R600Reg <"C550.X", 550>;
+def C550_Y : R600Reg <"C550.Y", 550>;
+def C550_Z : R600Reg <"C550.Z", 550>;
+def C550_W : R600Reg <"C550.W", 550>;
+def C551_X : R600Reg <"C551.X", 551>;
+def C551_Y : R600Reg <"C551.Y", 551>;
+def C551_Z : R600Reg <"C551.Z", 551>;
+def C551_W : R600Reg <"C551.W", 551>;
+def C552_X : R600Reg <"C552.X", 552>;
+def C552_Y : R600Reg <"C552.Y", 552>;
+def C552_Z : R600Reg <"C552.Z", 552>;
+def C552_W : R600Reg <"C552.W", 552>;
+def C553_X : R600Reg <"C553.X", 553>;
+def C553_Y : R600Reg <"C553.Y", 553>;
+def C553_Z : R600Reg <"C553.Z", 553>;
+def C553_W : R600Reg <"C553.W", 553>;
+def C554_X : R600Reg <"C554.X", 554>;
+def C554_Y : R600Reg <"C554.Y", 554>;
+def C554_Z : R600Reg <"C554.Z", 554>;
+def C554_W : R600Reg <"C554.W", 554>;
+def C555_X : R600Reg <"C555.X", 555>;
+def C555_Y : R600Reg <"C555.Y", 555>;
+def C555_Z : R600Reg <"C555.Z", 555>;
+def C555_W : R600Reg <"C555.W", 555>;
+def C556_X : R600Reg <"C556.X", 556>;
+def C556_Y : R600Reg <"C556.Y", 556>;
+def C556_Z : R600Reg <"C556.Z", 556>;
+def C556_W : R600Reg <"C556.W", 556>;
+def C557_X : R600Reg <"C557.X", 557>;
+def C557_Y : R600Reg <"C557.Y", 557>;
+def C557_Z : R600Reg <"C557.Z", 557>;
+def C557_W : R600Reg <"C557.W", 557>;
+def C558_X : R600Reg <"C558.X", 558>;
+def C558_Y : R600Reg <"C558.Y", 558>;
+def C558_Z : R600Reg <"C558.Z", 558>;
+def C558_W : R600Reg <"C558.W", 558>;
+def C559_X : R600Reg <"C559.X", 559>;
+def C559_Y : R600Reg <"C559.Y", 559>;
+def C559_Z : R600Reg <"C559.Z", 559>;
+def C559_W : R600Reg <"C559.W", 559>;
+def C560_X : R600Reg <"C560.X", 560>;
+def C560_Y : R600Reg <"C560.Y", 560>;
+def C560_Z : R600Reg <"C560.Z", 560>;
+def C560_W : R600Reg <"C560.W", 560>;
+def C561_X : R600Reg <"C561.X", 561>;
+def C561_Y : R600Reg <"C561.Y", 561>;
+def C561_Z : R600Reg <"C561.Z", 561>;
+def C561_W : R600Reg <"C561.W", 561>;
+def C562_X : R600Reg <"C562.X", 562>;
+def C562_Y : R600Reg <"C562.Y", 562>;
+def C562_Z : R600Reg <"C562.Z", 562>;
+def C562_W : R600Reg <"C562.W", 562>;
+def C563_X : R600Reg <"C563.X", 563>;
+def C563_Y : R600Reg <"C563.Y", 563>;
+def C563_Z : R600Reg <"C563.Z", 563>;
+def C563_W : R600Reg <"C563.W", 563>;
+def C564_X : R600Reg <"C564.X", 564>;
+def C564_Y : R600Reg <"C564.Y", 564>;
+def C564_Z : R600Reg <"C564.Z", 564>;
+def C564_W : R600Reg <"C564.W", 564>;
+def C565_X : R600Reg <"C565.X", 565>;
+def C565_Y : R600Reg <"C565.Y", 565>;
+def C565_Z : R600Reg <"C565.Z", 565>;
+def C565_W : R600Reg <"C565.W", 565>;
+def C566_X : R600Reg <"C566.X", 566>;
+def C566_Y : R600Reg <"C566.Y", 566>;
+def C566_Z : R600Reg <"C566.Z", 566>;
+def C566_W : R600Reg <"C566.W", 566>;
+def C567_X : R600Reg <"C567.X", 567>;
+def C567_Y : R600Reg <"C567.Y", 567>;
+def C567_Z : R600Reg <"C567.Z", 567>;
+def C567_W : R600Reg <"C567.W", 567>;
+def C568_X : R600Reg <"C568.X", 568>;
+def C568_Y : R600Reg <"C568.Y", 568>;
+def C568_Z : R600Reg <"C568.Z", 568>;
+def C568_W : R600Reg <"C568.W", 568>;
+def C569_X : R600Reg <"C569.X", 569>;
+def C569_Y : R600Reg <"C569.Y", 569>;
+def C569_Z : R600Reg <"C569.Z", 569>;
+def C569_W : R600Reg <"C569.W", 569>;
+def C570_X : R600Reg <"C570.X", 570>;
+def C570_Y : R600Reg <"C570.Y", 570>;
+def C570_Z : R600Reg <"C570.Z", 570>;
+def C570_W : R600Reg <"C570.W", 570>;
+def C571_X : R600Reg <"C571.X", 571>;
+def C571_Y : R600Reg <"C571.Y", 571>;
+def C571_Z : R600Reg <"C571.Z", 571>;
+def C571_W : R600Reg <"C571.W", 571>;
+def C572_X : R600Reg <"C572.X", 572>;
+def C572_Y : R600Reg <"C572.Y", 572>;
+def C572_Z : R600Reg <"C572.Z", 572>;
+def C572_W : R600Reg <"C572.W", 572>;
+def C573_X : R600Reg <"C573.X", 573>;
+def C573_Y : R600Reg <"C573.Y", 573>;
+def C573_Z : R600Reg <"C573.Z", 573>;
+def C573_W : R600Reg <"C573.W", 573>;
+def C574_X : R600Reg <"C574.X", 574>;
+def C574_Y : R600Reg <"C574.Y", 574>;
+def C574_Z : R600Reg <"C574.Z", 574>;
+def C574_W : R600Reg <"C574.W", 574>;
+def C575_X : R600Reg <"C575.X", 575>;
+def C575_Y : R600Reg <"C575.Y", 575>;
+def C575_Z : R600Reg <"C575.Z", 575>;
+def C575_W : R600Reg <"C575.W", 575>;
+def C576_X : R600Reg <"C576.X", 576>;
+def C576_Y : R600Reg <"C576.Y", 576>;
+def C576_Z : R600Reg <"C576.Z", 576>;
+def C576_W : R600Reg <"C576.W", 576>;
+def C577_X : R600Reg <"C577.X", 577>;
+def C577_Y : R600Reg <"C577.Y", 577>;
+def C577_Z : R600Reg <"C577.Z", 577>;
+def C577_W : R600Reg <"C577.W", 577>;
+def C578_X : R600Reg <"C578.X", 578>;
+def C578_Y : R600Reg <"C578.Y", 578>;
+def C578_Z : R600Reg <"C578.Z", 578>;
+def C578_W : R600Reg <"C578.W", 578>;
+def C579_X : R600Reg <"C579.X", 579>;
+def C579_Y : R600Reg <"C579.Y", 579>;
+def C579_Z : R600Reg <"C579.Z", 579>;
+def C579_W : R600Reg <"C579.W", 579>;
+def C580_X : R600Reg <"C580.X", 580>;
+def C580_Y : R600Reg <"C580.Y", 580>;
+def C580_Z : R600Reg <"C580.Z", 580>;
+def C580_W : R600Reg <"C580.W", 580>;
+def C581_X : R600Reg <"C581.X", 581>;
+def C581_Y : R600Reg <"C581.Y", 581>;
+def C581_Z : R600Reg <"C581.Z", 581>;
+def C581_W : R600Reg <"C581.W", 581>;
+def C582_X : R600Reg <"C582.X", 582>;
+def C582_Y : R600Reg <"C582.Y", 582>;
+def C582_Z : R600Reg <"C582.Z", 582>;
+def C582_W : R600Reg <"C582.W", 582>;
+def C583_X : R600Reg <"C583.X", 583>;
+def C583_Y : R600Reg <"C583.Y", 583>;
+def C583_Z : R600Reg <"C583.Z", 583>;
+def C583_W : R600Reg <"C583.W", 583>;
+def C584_X : R600Reg <"C584.X", 584>;
+def C584_Y : R600Reg <"C584.Y", 584>;
+def C584_Z : R600Reg <"C584.Z", 584>;
+def C584_W : R600Reg <"C584.W", 584>;
+def C585_X : R600Reg <"C585.X", 585>;
+def C585_Y : R600Reg <"C585.Y", 585>;
+def C585_Z : R600Reg <"C585.Z", 585>;
+def C585_W : R600Reg <"C585.W", 585>;
+def C586_X : R600Reg <"C586.X", 586>;
+def C586_Y : R600Reg <"C586.Y", 586>;
+def C586_Z : R600Reg <"C586.Z", 586>;
+def C586_W : R600Reg <"C586.W", 586>;
+def C587_X : R600Reg <"C587.X", 587>;
+def C587_Y : R600Reg <"C587.Y", 587>;
+def C587_Z : R600Reg <"C587.Z", 587>;
+def C587_W : R600Reg <"C587.W", 587>;
+def C588_X : R600Reg <"C588.X", 588>;
+def C588_Y : R600Reg <"C588.Y", 588>;
+def C588_Z : R600Reg <"C588.Z", 588>;
+def C588_W : R600Reg <"C588.W", 588>;
+def C589_X : R600Reg <"C589.X", 589>;
+def C589_Y : R600Reg <"C589.Y", 589>;
+def C589_Z : R600Reg <"C589.Z", 589>;
+def C589_W : R600Reg <"C589.W", 589>;
+def C590_X : R600Reg <"C590.X", 590>;
+def C590_Y : R600Reg <"C590.Y", 590>;
+def C590_Z : R600Reg <"C590.Z", 590>;
+def C590_W : R600Reg <"C590.W", 590>;
+def C591_X : R600Reg <"C591.X", 591>;
+def C591_Y : R600Reg <"C591.Y", 591>;
+def C591_Z : R600Reg <"C591.Z", 591>;
+def C591_W : R600Reg <"C591.W", 591>;
+def C592_X : R600Reg <"C592.X", 592>;
+def C592_Y : R600Reg <"C592.Y", 592>;
+def C592_Z : R600Reg <"C592.Z", 592>;
+def C592_W : R600Reg <"C592.W", 592>;
+def C593_X : R600Reg <"C593.X", 593>;
+def C593_Y : R600Reg <"C593.Y", 593>;
+def C593_Z : R600Reg <"C593.Z", 593>;
+def C593_W : R600Reg <"C593.W", 593>;
+def C594_X : R600Reg <"C594.X", 594>;
+def C594_Y : R600Reg <"C594.Y", 594>;
+def C594_Z : R600Reg <"C594.Z", 594>;
+def C594_W : R600Reg <"C594.W", 594>;
+def C595_X : R600Reg <"C595.X", 595>;
+def C595_Y : R600Reg <"C595.Y", 595>;
+def C595_Z : R600Reg <"C595.Z", 595>;
+def C595_W : R600Reg <"C595.W", 595>;
+def C596_X : R600Reg <"C596.X", 596>;
+def C596_Y : R600Reg <"C596.Y", 596>;
+def C596_Z : R600Reg <"C596.Z", 596>;
+def C596_W : R600Reg <"C596.W", 596>;
+def C597_X : R600Reg <"C597.X", 597>;
+def C597_Y : R600Reg <"C597.Y", 597>;
+def C597_Z : R600Reg <"C597.Z", 597>;
+def C597_W : R600Reg <"C597.W", 597>;
+def C598_X : R600Reg <"C598.X", 598>;
+def C598_Y : R600Reg <"C598.Y", 598>;
+def C598_Z : R600Reg <"C598.Z", 598>;
+def C598_W : R600Reg <"C598.W", 598>;
+def C599_X : R600Reg <"C599.X", 599>;
+def C599_Y : R600Reg <"C599.Y", 599>;
+def C599_Z : R600Reg <"C599.Z", 599>;
+def C599_W : R600Reg <"C599.W", 599>;
+def C600_X : R600Reg <"C600.X", 600>;
+def C600_Y : R600Reg <"C600.Y", 600>;
+def C600_Z : R600Reg <"C600.Z", 600>;
+def C600_W : R600Reg <"C600.W", 600>;
+def C601_X : R600Reg <"C601.X", 601>;
+def C601_Y : R600Reg <"C601.Y", 601>;
+def C601_Z : R600Reg <"C601.Z", 601>;
+def C601_W : R600Reg <"C601.W", 601>;
+def C602_X : R600Reg <"C602.X", 602>;
+def C602_Y : R600Reg <"C602.Y", 602>;
+def C602_Z : R600Reg <"C602.Z", 602>;
+def C602_W : R600Reg <"C602.W", 602>;
+def C603_X : R600Reg <"C603.X", 603>;
+def C603_Y : R600Reg <"C603.Y", 603>;
+def C603_Z : R600Reg <"C603.Z", 603>;
+def C603_W : R600Reg <"C603.W", 603>;
+def C604_X : R600Reg <"C604.X", 604>;
+def C604_Y : R600Reg <"C604.Y", 604>;
+def C604_Z : R600Reg <"C604.Z", 604>;
+def C604_W : R600Reg <"C604.W", 604>;
+def C605_X : R600Reg <"C605.X", 605>;
+def C605_Y : R600Reg <"C605.Y", 605>;
+def C605_Z : R600Reg <"C605.Z", 605>;
+def C605_W : R600Reg <"C605.W", 605>;
+def C606_X : R600Reg <"C606.X", 606>;
+def C606_Y : R600Reg <"C606.Y", 606>;
+def C606_Z : R600Reg <"C606.Z", 606>;
+def C606_W : R600Reg <"C606.W", 606>;
+def C607_X : R600Reg <"C607.X", 607>;
+def C607_Y : R600Reg <"C607.Y", 607>;
+def C607_Z : R600Reg <"C607.Z", 607>;
+def C607_W : R600Reg <"C607.W", 607>;
+def C608_X : R600Reg <"C608.X", 608>;
+def C608_Y : R600Reg <"C608.Y", 608>;
+def C608_Z : R600Reg <"C608.Z", 608>;
+def C608_W : R600Reg <"C608.W", 608>;
+def C609_X : R600Reg <"C609.X", 609>;
+def C609_Y : R600Reg <"C609.Y", 609>;
+def C609_Z : R600Reg <"C609.Z", 609>;
+def C609_W : R600Reg <"C609.W", 609>;
+def C610_X : R600Reg <"C610.X", 610>;
+def C610_Y : R600Reg <"C610.Y", 610>;
+def C610_Z : R600Reg <"C610.Z", 610>;
+def C610_W : R600Reg <"C610.W", 610>;
+def C611_X : R600Reg <"C611.X", 611>;
+def C611_Y : R600Reg <"C611.Y", 611>;
+def C611_Z : R600Reg <"C611.Z", 611>;
+def C611_W : R600Reg <"C611.W", 611>;
+def C612_X : R600Reg <"C612.X", 612>;
+def C612_Y : R600Reg <"C612.Y", 612>;
+def C612_Z : R600Reg <"C612.Z", 612>;
+def C612_W : R600Reg <"C612.W", 612>;
+def C613_X : R600Reg <"C613.X", 613>;
+def C613_Y : R600Reg <"C613.Y", 613>;
+def C613_Z : R600Reg <"C613.Z", 613>;
+def C613_W : R600Reg <"C613.W", 613>;
+def C614_X : R600Reg <"C614.X", 614>;
+def C614_Y : R600Reg <"C614.Y", 614>;
+def C614_Z : R600Reg <"C614.Z", 614>;
+def C614_W : R600Reg <"C614.W", 614>;
+def C615_X : R600Reg <"C615.X", 615>;
+def C615_Y : R600Reg <"C615.Y", 615>;
+def C615_Z : R600Reg <"C615.Z", 615>;
+def C615_W : R600Reg <"C615.W", 615>;
+def C616_X : R600Reg <"C616.X", 616>;
+def C616_Y : R600Reg <"C616.Y", 616>;
+def C616_Z : R600Reg <"C616.Z", 616>;
+def C616_W : R600Reg <"C616.W", 616>;
+def C617_X : R600Reg <"C617.X", 617>;
+def C617_Y : R600Reg <"C617.Y", 617>;
+def C617_Z : R600Reg <"C617.Z", 617>;
+def C617_W : R600Reg <"C617.W", 617>;
+def C618_X : R600Reg <"C618.X", 618>;
+def C618_Y : R600Reg <"C618.Y", 618>;
+def C618_Z : R600Reg <"C618.Z", 618>;
+def C618_W : R600Reg <"C618.W", 618>;
+def C619_X : R600Reg <"C619.X", 619>;
+def C619_Y : R600Reg <"C619.Y", 619>;
+def C619_Z : R600Reg <"C619.Z", 619>;
+def C619_W : R600Reg <"C619.W", 619>;
+def C620_X : R600Reg <"C620.X", 620>;
+def C620_Y : R600Reg <"C620.Y", 620>;
+def C620_Z : R600Reg <"C620.Z", 620>;
+def C620_W : R600Reg <"C620.W", 620>;
+def C621_X : R600Reg <"C621.X", 621>;
+def C621_Y : R600Reg <"C621.Y", 621>;
+def C621_Z : R600Reg <"C621.Z", 621>;
+def C621_W : R600Reg <"C621.W", 621>;
+def C622_X : R600Reg <"C622.X", 622>;
+def C622_Y : R600Reg <"C622.Y", 622>;
+def C622_Z : R600Reg <"C622.Z", 622>;
+def C622_W : R600Reg <"C622.W", 622>;
+def C623_X : R600Reg <"C623.X", 623>;
+def C623_Y : R600Reg <"C623.Y", 623>;
+def C623_Z : R600Reg <"C623.Z", 623>;
+def C623_W : R600Reg <"C623.W", 623>;
+def C624_X : R600Reg <"C624.X", 624>;
+def C624_Y : R600Reg <"C624.Y", 624>;
+def C624_Z : R600Reg <"C624.Z", 624>;
+def C624_W : R600Reg <"C624.W", 624>;
+def C625_X : R600Reg <"C625.X", 625>;
+def C625_Y : R600Reg <"C625.Y", 625>;
+def C625_Z : R600Reg <"C625.Z", 625>;
+def C625_W : R600Reg <"C625.W", 625>;
+def C626_X : R600Reg <"C626.X", 626>;
+def C626_Y : R600Reg <"C626.Y", 626>;
+def C626_Z : R600Reg <"C626.Z", 626>;
+def C626_W : R600Reg <"C626.W", 626>;
+def C627_X : R600Reg <"C627.X", 627>;
+def C627_Y : R600Reg <"C627.Y", 627>;
+def C627_Z : R600Reg <"C627.Z", 627>;
+def C627_W : R600Reg <"C627.W", 627>;
+def C628_X : R600Reg <"C628.X", 628>;
+def C628_Y : R600Reg <"C628.Y", 628>;
+def C628_Z : R600Reg <"C628.Z", 628>;
+def C628_W : R600Reg <"C628.W", 628>;
+def C629_X : R600Reg <"C629.X", 629>;
+def C629_Y : R600Reg <"C629.Y", 629>;
+def C629_Z : R600Reg <"C629.Z", 629>;
+def C629_W : R600Reg <"C629.W", 629>;
+def C630_X : R600Reg <"C630.X", 630>;
+def C630_Y : R600Reg <"C630.Y", 630>;
+def C630_Z : R600Reg <"C630.Z", 630>;
+def C630_W : R600Reg <"C630.W", 630>;
+def C631_X : R600Reg <"C631.X", 631>;
+def C631_Y : R600Reg <"C631.Y", 631>;
+def C631_Z : R600Reg <"C631.Z", 631>;
+def C631_W : R600Reg <"C631.W", 631>;
+def C632_X : R600Reg <"C632.X", 632>;
+def C632_Y : R600Reg <"C632.Y", 632>;
+def C632_Z : R600Reg <"C632.Z", 632>;
+def C632_W : R600Reg <"C632.W", 632>;
+def C633_X : R600Reg <"C633.X", 633>;
+def C633_Y : R600Reg <"C633.Y", 633>;
+def C633_Z : R600Reg <"C633.Z", 633>;
+def C633_W : R600Reg <"C633.W", 633>;
+def C634_X : R600Reg <"C634.X", 634>;
+def C634_Y : R600Reg <"C634.Y", 634>;
+def C634_Z : R600Reg <"C634.Z", 634>;
+def C634_W : R600Reg <"C634.W", 634>;
+def C635_X : R600Reg <"C635.X", 635>;
+def C635_Y : R600Reg <"C635.Y", 635>;
+def C635_Z : R600Reg <"C635.Z", 635>;
+def C635_W : R600Reg <"C635.W", 635>;
+def C636_X : R600Reg <"C636.X", 636>;
+def C636_Y : R600Reg <"C636.Y", 636>;
+def C636_Z : R600Reg <"C636.Z", 636>;
+def C636_W : R600Reg <"C636.W", 636>;
+def C637_X : R600Reg <"C637.X", 637>;
+def C637_Y : R600Reg <"C637.Y", 637>;
+def C637_Z : R600Reg <"C637.Z", 637>;
+def C637_W : R600Reg <"C637.W", 637>;
+def C638_X : R600Reg <"C638.X", 638>;
+def C638_Y : R600Reg <"C638.Y", 638>;
+def C638_Z : R600Reg <"C638.Z", 638>;
+def C638_W : R600Reg <"C638.W", 638>;
+def C639_X : R600Reg <"C639.X", 639>;
+def C639_Y : R600Reg <"C639.Y", 639>;
+def C639_Z : R600Reg <"C639.Z", 639>;
+def C639_W : R600Reg <"C639.W", 639>;
+def C640_X : R600Reg <"C640.X", 640>;
+def C640_Y : R600Reg <"C640.Y", 640>;
+def C640_Z : R600Reg <"C640.Z", 640>;
+def C640_W : R600Reg <"C640.W", 640>;
+def C641_X : R600Reg <"C641.X", 641>;
+def C641_Y : R600Reg <"C641.Y", 641>;
+def C641_Z : R600Reg <"C641.Z", 641>;
+def C641_W : R600Reg <"C641.W", 641>;
+def C642_X : R600Reg <"C642.X", 642>;
+def C642_Y : R600Reg <"C642.Y", 642>;
+def C642_Z : R600Reg <"C642.Z", 642>;
+def C642_W : R600Reg <"C642.W", 642>;
+def C643_X : R600Reg <"C643.X", 643>;
+def C643_Y : R600Reg <"C643.Y", 643>;
+def C643_Z : R600Reg <"C643.Z", 643>;
+def C643_W : R600Reg <"C643.W", 643>;
+def C644_X : R600Reg <"C644.X", 644>;
+def C644_Y : R600Reg <"C644.Y", 644>;
+def C644_Z : R600Reg <"C644.Z", 644>;
+def C644_W : R600Reg <"C644.W", 644>;
+def C645_X : R600Reg <"C645.X", 645>;
+def C645_Y : R600Reg <"C645.Y", 645>;
+def C645_Z : R600Reg <"C645.Z", 645>;
+def C645_W : R600Reg <"C645.W", 645>;
+def C646_X : R600Reg <"C646.X", 646>;
+def C646_Y : R600Reg <"C646.Y", 646>;
+def C646_Z : R600Reg <"C646.Z", 646>;
+def C646_W : R600Reg <"C646.W", 646>;
+def C647_X : R600Reg <"C647.X", 647>;
+def C647_Y : R600Reg <"C647.Y", 647>;
+def C647_Z : R600Reg <"C647.Z", 647>;
+def C647_W : R600Reg <"C647.W", 647>;
+def C648_X : R600Reg <"C648.X", 648>;
+def C648_Y : R600Reg <"C648.Y", 648>;
+def C648_Z : R600Reg <"C648.Z", 648>;
+def C648_W : R600Reg <"C648.W", 648>;
+def C649_X : R600Reg <"C649.X", 649>;
+def C649_Y : R600Reg <"C649.Y", 649>;
+def C649_Z : R600Reg <"C649.Z", 649>;
+def C649_W : R600Reg <"C649.W", 649>;
+def C650_X : R600Reg <"C650.X", 650>;
+def C650_Y : R600Reg <"C650.Y", 650>;
+def C650_Z : R600Reg <"C650.Z", 650>;
+def C650_W : R600Reg <"C650.W", 650>;
+def C651_X : R600Reg <"C651.X", 651>;
+def C651_Y : R600Reg <"C651.Y", 651>;
+def C651_Z : R600Reg <"C651.Z", 651>;
+def C651_W : R600Reg <"C651.W", 651>;
+def C652_X : R600Reg <"C652.X", 652>;
+def C652_Y : R600Reg <"C652.Y", 652>;
+def C652_Z : R600Reg <"C652.Z", 652>;
+def C652_W : R600Reg <"C652.W", 652>;
+def C653_X : R600Reg <"C653.X", 653>;
+def C653_Y : R600Reg <"C653.Y", 653>;
+def C653_Z : R600Reg <"C653.Z", 653>;
+def C653_W : R600Reg <"C653.W", 653>;
+def C654_X : R600Reg <"C654.X", 654>;
+def C654_Y : R600Reg <"C654.Y", 654>;
+def C654_Z : R600Reg <"C654.Z", 654>;
+def C654_W : R600Reg <"C654.W", 654>;
+def C655_X : R600Reg <"C655.X", 655>;
+def C655_Y : R600Reg <"C655.Y", 655>;
+def C655_Z : R600Reg <"C655.Z", 655>;
+def C655_W : R600Reg <"C655.W", 655>;
+def C656_X : R600Reg <"C656.X", 656>;
+def C656_Y : R600Reg <"C656.Y", 656>;
+def C656_Z : R600Reg <"C656.Z", 656>;
+def C656_W : R600Reg <"C656.W", 656>;
+def C657_X : R600Reg <"C657.X", 657>;
+def C657_Y : R600Reg <"C657.Y", 657>;
+def C657_Z : R600Reg <"C657.Z", 657>;
+def C657_W : R600Reg <"C657.W", 657>;
+def C658_X : R600Reg <"C658.X", 658>;
+def C658_Y : R600Reg <"C658.Y", 658>;
+def C658_Z : R600Reg <"C658.Z", 658>;
+def C658_W : R600Reg <"C658.W", 658>;
+def C659_X : R600Reg <"C659.X", 659>;
+def C659_Y : R600Reg <"C659.Y", 659>;
+def C659_Z : R600Reg <"C659.Z", 659>;
+def C659_W : R600Reg <"C659.W", 659>;
+def C660_X : R600Reg <"C660.X", 660>;
+def C660_Y : R600Reg <"C660.Y", 660>;
+def C660_Z : R600Reg <"C660.Z", 660>;
+def C660_W : R600Reg <"C660.W", 660>;
+def C661_X : R600Reg <"C661.X", 661>;
+def C661_Y : R600Reg <"C661.Y", 661>;
+def C661_Z : R600Reg <"C661.Z", 661>;
+def C661_W : R600Reg <"C661.W", 661>;
+def C662_X : R600Reg <"C662.X", 662>;
+def C662_Y : R600Reg <"C662.Y", 662>;
+def C662_Z : R600Reg <"C662.Z", 662>;
+def C662_W : R600Reg <"C662.W", 662>;
+def C663_X : R600Reg <"C663.X", 663>;
+def C663_Y : R600Reg <"C663.Y", 663>;
+def C663_Z : R600Reg <"C663.Z", 663>;
+def C663_W : R600Reg <"C663.W", 663>;
+def C664_X : R600Reg <"C664.X", 664>;
+def C664_Y : R600Reg <"C664.Y", 664>;
+def C664_Z : R600Reg <"C664.Z", 664>;
+def C664_W : R600Reg <"C664.W", 664>;
+def C665_X : R600Reg <"C665.X", 665>;
+def C665_Y : R600Reg <"C665.Y", 665>;
+def C665_Z : R600Reg <"C665.Z", 665>;
+def C665_W : R600Reg <"C665.W", 665>;
+def C666_X : R600Reg <"C666.X", 666>;
+def C666_Y : R600Reg <"C666.Y", 666>;
+def C666_Z : R600Reg <"C666.Z", 666>;
+def C666_W : R600Reg <"C666.W", 666>;
+def C667_X : R600Reg <"C667.X", 667>;
+def C667_Y : R600Reg <"C667.Y", 667>;
+def C667_Z : R600Reg <"C667.Z", 667>;
+def C667_W : R600Reg <"C667.W", 667>;
+def C668_X : R600Reg <"C668.X", 668>;
+def C668_Y : R600Reg <"C668.Y", 668>;
+def C668_Z : R600Reg <"C668.Z", 668>;
+def C668_W : R600Reg <"C668.W", 668>;
+def C669_X : R600Reg <"C669.X", 669>;
+def C669_Y : R600Reg <"C669.Y", 669>;
+def C669_Z : R600Reg <"C669.Z", 669>;
+def C669_W : R600Reg <"C669.W", 669>;
+def C670_X : R600Reg <"C670.X", 670>;
+def C670_Y : R600Reg <"C670.Y", 670>;
+def C670_Z : R600Reg <"C670.Z", 670>;
+def C670_W : R600Reg <"C670.W", 670>;
+def C671_X : R600Reg <"C671.X", 671>;
+def C671_Y : R600Reg <"C671.Y", 671>;
+def C671_Z : R600Reg <"C671.Z", 671>;
+def C671_W : R600Reg <"C671.W", 671>;
+def C672_X : R600Reg <"C672.X", 672>;
+def C672_Y : R600Reg <"C672.Y", 672>;
+def C672_Z : R600Reg <"C672.Z", 672>;
+def C672_W : R600Reg <"C672.W", 672>;
+def C673_X : R600Reg <"C673.X", 673>;
+def C673_Y : R600Reg <"C673.Y", 673>;
+def C673_Z : R600Reg <"C673.Z", 673>;
+def C673_W : R600Reg <"C673.W", 673>;
+def C674_X : R600Reg <"C674.X", 674>;
+def C674_Y : R600Reg <"C674.Y", 674>;
+def C674_Z : R600Reg <"C674.Z", 674>;
+def C674_W : R600Reg <"C674.W", 674>;
+def C675_X : R600Reg <"C675.X", 675>;
+def C675_Y : R600Reg <"C675.Y", 675>;
+def C675_Z : R600Reg <"C675.Z", 675>;
+def C675_W : R600Reg <"C675.W", 675>;
+def C676_X : R600Reg <"C676.X", 676>;
+def C676_Y : R600Reg <"C676.Y", 676>;
+def C676_Z : R600Reg <"C676.Z", 676>;
+def C676_W : R600Reg <"C676.W", 676>;
+def C677_X : R600Reg <"C677.X", 677>;
+def C677_Y : R600Reg <"C677.Y", 677>;
+def C677_Z : R600Reg <"C677.Z", 677>;
+def C677_W : R600Reg <"C677.W", 677>;
+def C678_X : R600Reg <"C678.X", 678>;
+def C678_Y : R600Reg <"C678.Y", 678>;
+def C678_Z : R600Reg <"C678.Z", 678>;
+def C678_W : R600Reg <"C678.W", 678>;
+def C679_X : R600Reg <"C679.X", 679>;
+def C679_Y : R600Reg <"C679.Y", 679>;
+def C679_Z : R600Reg <"C679.Z", 679>;
+def C679_W : R600Reg <"C679.W", 679>;
+def C680_X : R600Reg <"C680.X", 680>;
+def C680_Y : R600Reg <"C680.Y", 680>;
+def C680_Z : R600Reg <"C680.Z", 680>;
+def C680_W : R600Reg <"C680.W", 680>;
+def C681_X : R600Reg <"C681.X", 681>;
+def C681_Y : R600Reg <"C681.Y", 681>;
+def C681_Z : R600Reg <"C681.Z", 681>;
+def C681_W : R600Reg <"C681.W", 681>;
+def C682_X : R600Reg <"C682.X", 682>;
+def C682_Y : R600Reg <"C682.Y", 682>;
+def C682_Z : R600Reg <"C682.Z", 682>;
+def C682_W : R600Reg <"C682.W", 682>;
+def C683_X : R600Reg <"C683.X", 683>;
+def C683_Y : R600Reg <"C683.Y", 683>;
+def C683_Z : R600Reg <"C683.Z", 683>;
+def C683_W : R600Reg <"C683.W", 683>;
+def C684_X : R600Reg <"C684.X", 684>;
+def C684_Y : R600Reg <"C684.Y", 684>;
+def C684_Z : R600Reg <"C684.Z", 684>;
+def C684_W : R600Reg <"C684.W", 684>;
+def C685_X : R600Reg <"C685.X", 685>;
+def C685_Y : R600Reg <"C685.Y", 685>;
+def C685_Z : R600Reg <"C685.Z", 685>;
+def C685_W : R600Reg <"C685.W", 685>;
+def C686_X : R600Reg <"C686.X", 686>;
+def C686_Y : R600Reg <"C686.Y", 686>;
+def C686_Z : R600Reg <"C686.Z", 686>;
+def C686_W : R600Reg <"C686.W", 686>;
+def C687_X : R600Reg <"C687.X", 687>;
+def C687_Y : R600Reg <"C687.Y", 687>;
+def C687_Z : R600Reg <"C687.Z", 687>;
+def C687_W : R600Reg <"C687.W", 687>;
+def C688_X : R600Reg <"C688.X", 688>;
+def C688_Y : R600Reg <"C688.Y", 688>;
+def C688_Z : R600Reg <"C688.Z", 688>;
+def C688_W : R600Reg <"C688.W", 688>;
+def C689_X : R600Reg <"C689.X", 689>;
+def C689_Y : R600Reg <"C689.Y", 689>;
+def C689_Z : R600Reg <"C689.Z", 689>;
+def C689_W : R600Reg <"C689.W", 689>;
+def C690_X : R600Reg <"C690.X", 690>;
+def C690_Y : R600Reg <"C690.Y", 690>;
+def C690_Z : R600Reg <"C690.Z", 690>;
+def C690_W : R600Reg <"C690.W", 690>;
+def C691_X : R600Reg <"C691.X", 691>;
+def C691_Y : R600Reg <"C691.Y", 691>;
+def C691_Z : R600Reg <"C691.Z", 691>;
+def C691_W : R600Reg <"C691.W", 691>;
+def C692_X : R600Reg <"C692.X", 692>;
+def C692_Y : R600Reg <"C692.Y", 692>;
+def C692_Z : R600Reg <"C692.Z", 692>;
+def C692_W : R600Reg <"C692.W", 692>;
+def C693_X : R600Reg <"C693.X", 693>;
+def C693_Y : R600Reg <"C693.Y", 693>;
+def C693_Z : R600Reg <"C693.Z", 693>;
+def C693_W : R600Reg <"C693.W", 693>;
+def C694_X : R600Reg <"C694.X", 694>;
+def C694_Y : R600Reg <"C694.Y", 694>;
+def C694_Z : R600Reg <"C694.Z", 694>;
+def C694_W : R600Reg <"C694.W", 694>;
+def C695_X : R600Reg <"C695.X", 695>;
+def C695_Y : R600Reg <"C695.Y", 695>;
+def C695_Z : R600Reg <"C695.Z", 695>;
+def C695_W : R600Reg <"C695.W", 695>;
+def C696_X : R600Reg <"C696.X", 696>;
+def C696_Y : R600Reg <"C696.Y", 696>;
+def C696_Z : R600Reg <"C696.Z", 696>;
+def C696_W : R600Reg <"C696.W", 696>;
+def C697_X : R600Reg <"C697.X", 697>;
+def C697_Y : R600Reg <"C697.Y", 697>;
+def C697_Z : R600Reg <"C697.Z", 697>;
+def C697_W : R600Reg <"C697.W", 697>;
+def C698_X : R600Reg <"C698.X", 698>;
+def C698_Y : R600Reg <"C698.Y", 698>;
+def C698_Z : R600Reg <"C698.Z", 698>;
+def C698_W : R600Reg <"C698.W", 698>;
+def C699_X : R600Reg <"C699.X", 699>;
+def C699_Y : R600Reg <"C699.Y", 699>;
+def C699_Z : R600Reg <"C699.Z", 699>;
+def C699_W : R600Reg <"C699.W", 699>;
+def C700_X : R600Reg <"C700.X", 700>;
+def C700_Y : R600Reg <"C700.Y", 700>;
+def C700_Z : R600Reg <"C700.Z", 700>;
+def C700_W : R600Reg <"C700.W", 700>;
+def C701_X : R600Reg <"C701.X", 701>;
+def C701_Y : R600Reg <"C701.Y", 701>;
+def C701_Z : R600Reg <"C701.Z", 701>;
+def C701_W : R600Reg <"C701.W", 701>;
+def C702_X : R600Reg <"C702.X", 702>;
+def C702_Y : R600Reg <"C702.Y", 702>;
+def C702_Z : R600Reg <"C702.Z", 702>;
+def C702_W : R600Reg <"C702.W", 702>;
+def C703_X : R600Reg <"C703.X", 703>;
+def C703_Y : R600Reg <"C703.Y", 703>;
+def C703_Z : R600Reg <"C703.Z", 703>;
+def C703_W : R600Reg <"C703.W", 703>;
+def C704_X : R600Reg <"C704.X", 704>;
+def C704_Y : R600Reg <"C704.Y", 704>;
+def C704_Z : R600Reg <"C704.Z", 704>;
+def C704_W : R600Reg <"C704.W", 704>;
+def C705_X : R600Reg <"C705.X", 705>;
+def C705_Y : R600Reg <"C705.Y", 705>;
+def C705_Z : R600Reg <"C705.Z", 705>;
+def C705_W : R600Reg <"C705.W", 705>;
+def C706_X : R600Reg <"C706.X", 706>;
+def C706_Y : R600Reg <"C706.Y", 706>;
+def C706_Z : R600Reg <"C706.Z", 706>;
+def C706_W : R600Reg <"C706.W", 706>;
+def C707_X : R600Reg <"C707.X", 707>;
+def C707_Y : R600Reg <"C707.Y", 707>;
+def C707_Z : R600Reg <"C707.Z", 707>;
+def C707_W : R600Reg <"C707.W", 707>;
+def C708_X : R600Reg <"C708.X", 708>;
+def C708_Y : R600Reg <"C708.Y", 708>;
+def C708_Z : R600Reg <"C708.Z", 708>;
+def C708_W : R600Reg <"C708.W", 708>;
+def C709_X : R600Reg <"C709.X", 709>;
+def C709_Y : R600Reg <"C709.Y", 709>;
+def C709_Z : R600Reg <"C709.Z", 709>;
+def C709_W : R600Reg <"C709.W", 709>;
+def C710_X : R600Reg <"C710.X", 710>;
+def C710_Y : R600Reg <"C710.Y", 710>;
+def C710_Z : R600Reg <"C710.Z", 710>;
+def C710_W : R600Reg <"C710.W", 710>;
+def C711_X : R600Reg <"C711.X", 711>;
+def C711_Y : R600Reg <"C711.Y", 711>;
+def C711_Z : R600Reg <"C711.Z", 711>;
+def C711_W : R600Reg <"C711.W", 711>;
+def C712_X : R600Reg <"C712.X", 712>;
+def C712_Y : R600Reg <"C712.Y", 712>;
+def C712_Z : R600Reg <"C712.Z", 712>;
+def C712_W : R600Reg <"C712.W", 712>;
+def C713_X : R600Reg <"C713.X", 713>;
+def C713_Y : R600Reg <"C713.Y", 713>;
+def C713_Z : R600Reg <"C713.Z", 713>;
+def C713_W : R600Reg <"C713.W", 713>;
+def C714_X : R600Reg <"C714.X", 714>;
+def C714_Y : R600Reg <"C714.Y", 714>;
+def C714_Z : R600Reg <"C714.Z", 714>;
+def C714_W : R600Reg <"C714.W", 714>;
+def C715_X : R600Reg <"C715.X", 715>;
+def C715_Y : R600Reg <"C715.Y", 715>;
+def C715_Z : R600Reg <"C715.Z", 715>;
+def C715_W : R600Reg <"C715.W", 715>;
+def C716_X : R600Reg <"C716.X", 716>;
+def C716_Y : R600Reg <"C716.Y", 716>;
+def C716_Z : R600Reg <"C716.Z", 716>;
+def C716_W : R600Reg <"C716.W", 716>;
+def C717_X : R600Reg <"C717.X", 717>;
+def C717_Y : R600Reg <"C717.Y", 717>;
+def C717_Z : R600Reg <"C717.Z", 717>;
+def C717_W : R600Reg <"C717.W", 717>;
+def C718_X : R600Reg <"C718.X", 718>;
+def C718_Y : R600Reg <"C718.Y", 718>;
+def C718_Z : R600Reg <"C718.Z", 718>;
+def C718_W : R600Reg <"C718.W", 718>;
+def C719_X : R600Reg <"C719.X", 719>;
+def C719_Y : R600Reg <"C719.Y", 719>;
+def C719_Z : R600Reg <"C719.Z", 719>;
+def C719_W : R600Reg <"C719.W", 719>;
+def C720_X : R600Reg <"C720.X", 720>;
+def C720_Y : R600Reg <"C720.Y", 720>;
+def C720_Z : R600Reg <"C720.Z", 720>;
+def C720_W : R600Reg <"C720.W", 720>;
+def C721_X : R600Reg <"C721.X", 721>;
+def C721_Y : R600Reg <"C721.Y", 721>;
+def C721_Z : R600Reg <"C721.Z", 721>;
+def C721_W : R600Reg <"C721.W", 721>;
+def C722_X : R600Reg <"C722.X", 722>;
+def C722_Y : R600Reg <"C722.Y", 722>;
+def C722_Z : R600Reg <"C722.Z", 722>;
+def C722_W : R600Reg <"C722.W", 722>;
+def C723_X : R600Reg <"C723.X", 723>;
+def C723_Y : R600Reg <"C723.Y", 723>;
+def C723_Z : R600Reg <"C723.Z", 723>;
+def C723_W : R600Reg <"C723.W", 723>;
+def C724_X : R600Reg <"C724.X", 724>;
+def C724_Y : R600Reg <"C724.Y", 724>;
+def C724_Z : R600Reg <"C724.Z", 724>;
+def C724_W : R600Reg <"C724.W", 724>;
+def C725_X : R600Reg <"C725.X", 725>;
+def C725_Y : R600Reg <"C725.Y", 725>;
+def C725_Z : R600Reg <"C725.Z", 725>;
+def C725_W : R600Reg <"C725.W", 725>;
+def C726_X : R600Reg <"C726.X", 726>;
+def C726_Y : R600Reg <"C726.Y", 726>;
+def C726_Z : R600Reg <"C726.Z", 726>;
+def C726_W : R600Reg <"C726.W", 726>;
+def C727_X : R600Reg <"C727.X", 727>;
+def C727_Y : R600Reg <"C727.Y", 727>;
+def C727_Z : R600Reg <"C727.Z", 727>;
+def C727_W : R600Reg <"C727.W", 727>;
+def C728_X : R600Reg <"C728.X", 728>;
+def C728_Y : R600Reg <"C728.Y", 728>;
+def C728_Z : R600Reg <"C728.Z", 728>;
+def C728_W : R600Reg <"C728.W", 728>;
+def C729_X : R600Reg <"C729.X", 729>;
+def C729_Y : R600Reg <"C729.Y", 729>;
+def C729_Z : R600Reg <"C729.Z", 729>;
+def C729_W : R600Reg <"C729.W", 729>;
+def C730_X : R600Reg <"C730.X", 730>;
+def C730_Y : R600Reg <"C730.Y", 730>;
+def C730_Z : R600Reg <"C730.Z", 730>;
+def C730_W : R600Reg <"C730.W", 730>;
+def C731_X : R600Reg <"C731.X", 731>;
+def C731_Y : R600Reg <"C731.Y", 731>;
+def C731_Z : R600Reg <"C731.Z", 731>;
+def C731_W : R600Reg <"C731.W", 731>;
+def C732_X : R600Reg <"C732.X", 732>;
+def C732_Y : R600Reg <"C732.Y", 732>;
+def C732_Z : R600Reg <"C732.Z", 732>;
+def C732_W : R600Reg <"C732.W", 732>;
+def C733_X : R600Reg <"C733.X", 733>;
+def C733_Y : R600Reg <"C733.Y", 733>;
+def C733_Z : R600Reg <"C733.Z", 733>;
+def C733_W : R600Reg <"C733.W", 733>;
+def C734_X : R600Reg <"C734.X", 734>;
+def C734_Y : R600Reg <"C734.Y", 734>;
+def C734_Z : R600Reg <"C734.Z", 734>;
+def C734_W : R600Reg <"C734.W", 734>;
+def C735_X : R600Reg <"C735.X", 735>;
+def C735_Y : R600Reg <"C735.Y", 735>;
+def C735_Z : R600Reg <"C735.Z", 735>;
+def C735_W : R600Reg <"C735.W", 735>;
+def C736_X : R600Reg <"C736.X", 736>;
+def C736_Y : R600Reg <"C736.Y", 736>;
+def C736_Z : R600Reg <"C736.Z", 736>;
+def C736_W : R600Reg <"C736.W", 736>;
+def C737_X : R600Reg <"C737.X", 737>;
+def C737_Y : R600Reg <"C737.Y", 737>;
+def C737_Z : R600Reg <"C737.Z", 737>;
+def C737_W : R600Reg <"C737.W", 737>;
+def C738_X : R600Reg <"C738.X", 738>;
+def C738_Y : R600Reg <"C738.Y", 738>;
+def C738_Z : R600Reg <"C738.Z", 738>;
+def C738_W : R600Reg <"C738.W", 738>;
+def C739_X : R600Reg <"C739.X", 739>;
+def C739_Y : R600Reg <"C739.Y", 739>;
+def C739_Z : R600Reg <"C739.Z", 739>;
+def C739_W : R600Reg <"C739.W", 739>;
+def C740_X : R600Reg <"C740.X", 740>;
+def C740_Y : R600Reg <"C740.Y", 740>;
+def C740_Z : R600Reg <"C740.Z", 740>;
+def C740_W : R600Reg <"C740.W", 740>;
+def C741_X : R600Reg <"C741.X", 741>;
+def C741_Y : R600Reg <"C741.Y", 741>;
+def C741_Z : R600Reg <"C741.Z", 741>;
+def C741_W : R600Reg <"C741.W", 741>;
+def C742_X : R600Reg <"C742.X", 742>;
+def C742_Y : R600Reg <"C742.Y", 742>;
+def C742_Z : R600Reg <"C742.Z", 742>;
+def C742_W : R600Reg <"C742.W", 742>;
+def C743_X : R600Reg <"C743.X", 743>;
+def C743_Y : R600Reg <"C743.Y", 743>;
+def C743_Z : R600Reg <"C743.Z", 743>;
+def C743_W : R600Reg <"C743.W", 743>;
+def C744_X : R600Reg <"C744.X", 744>;
+def C744_Y : R600Reg <"C744.Y", 744>;
+def C744_Z : R600Reg <"C744.Z", 744>;
+def C744_W : R600Reg <"C744.W", 744>;
+def C745_X : R600Reg <"C745.X", 745>;
+def C745_Y : R600Reg <"C745.Y", 745>;
+def C745_Z : R600Reg <"C745.Z", 745>;
+def C745_W : R600Reg <"C745.W", 745>;
+def C746_X : R600Reg <"C746.X", 746>;
+def C746_Y : R600Reg <"C746.Y", 746>;
+def C746_Z : R600Reg <"C746.Z", 746>;
+def C746_W : R600Reg <"C746.W", 746>;
+def C747_X : R600Reg <"C747.X", 747>;
+def C747_Y : R600Reg <"C747.Y", 747>;
+def C747_Z : R600Reg <"C747.Z", 747>;
+def C747_W : R600Reg <"C747.W", 747>;
+def C748_X : R600Reg <"C748.X", 748>;
+def C748_Y : R600Reg <"C748.Y", 748>;
+def C748_Z : R600Reg <"C748.Z", 748>;
+def C748_W : R600Reg <"C748.W", 748>;
+def C749_X : R600Reg <"C749.X", 749>;
+def C749_Y : R600Reg <"C749.Y", 749>;
+def C749_Z : R600Reg <"C749.Z", 749>;
+def C749_W : R600Reg <"C749.W", 749>;
+def C750_X : R600Reg <"C750.X", 750>;
+def C750_Y : R600Reg <"C750.Y", 750>;
+def C750_Z : R600Reg <"C750.Z", 750>;
+def C750_W : R600Reg <"C750.W", 750>;
+def C751_X : R600Reg <"C751.X", 751>;
+def C751_Y : R600Reg <"C751.Y", 751>;
+def C751_Z : R600Reg <"C751.Z", 751>;
+def C751_W : R600Reg <"C751.W", 751>;
+def C752_X : R600Reg <"C752.X", 752>;
+def C752_Y : R600Reg <"C752.Y", 752>;
+def C752_Z : R600Reg <"C752.Z", 752>;
+def C752_W : R600Reg <"C752.W", 752>;
+def C753_X : R600Reg <"C753.X", 753>;
+def C753_Y : R600Reg <"C753.Y", 753>;
+def C753_Z : R600Reg <"C753.Z", 753>;
+def C753_W : R600Reg <"C753.W", 753>;
+def C754_X : R600Reg <"C754.X", 754>;
+def C754_Y : R600Reg <"C754.Y", 754>;
+def C754_Z : R600Reg <"C754.Z", 754>;
+def C754_W : R600Reg <"C754.W", 754>;
+def C755_X : R600Reg <"C755.X", 755>;
+def C755_Y : R600Reg <"C755.Y", 755>;
+def C755_Z : R600Reg <"C755.Z", 755>;
+def C755_W : R600Reg <"C755.W", 755>;
+def C756_X : R600Reg <"C756.X", 756>;
+def C756_Y : R600Reg <"C756.Y", 756>;
+def C756_Z : R600Reg <"C756.Z", 756>;
+def C756_W : R600Reg <"C756.W", 756>;
+def C757_X : R600Reg <"C757.X", 757>;
+def C757_Y : R600Reg <"C757.Y", 757>;
+def C757_Z : R600Reg <"C757.Z", 757>;
+def C757_W : R600Reg <"C757.W", 757>;
+def C758_X : R600Reg <"C758.X", 758>;
+def C758_Y : R600Reg <"C758.Y", 758>;
+def C758_Z : R600Reg <"C758.Z", 758>;
+def C758_W : R600Reg <"C758.W", 758>;
+def C759_X : R600Reg <"C759.X", 759>;
+def C759_Y : R600Reg <"C759.Y", 759>;
+def C759_Z : R600Reg <"C759.Z", 759>;
+def C759_W : R600Reg <"C759.W", 759>;
+def C760_X : R600Reg <"C760.X", 760>;
+def C760_Y : R600Reg <"C760.Y", 760>;
+def C760_Z : R600Reg <"C760.Z", 760>;
+def C760_W : R600Reg <"C760.W", 760>;
+def C761_X : R600Reg <"C761.X", 761>;
+def C761_Y : R600Reg <"C761.Y", 761>;
+def C761_Z : R600Reg <"C761.Z", 761>;
+def C761_W : R600Reg <"C761.W", 761>;
+def C762_X : R600Reg <"C762.X", 762>;
+def C762_Y : R600Reg <"C762.Y", 762>;
+def C762_Z : R600Reg <"C762.Z", 762>;
+def C762_W : R600Reg <"C762.W", 762>;
+def C763_X : R600Reg <"C763.X", 763>;
+def C763_Y : R600Reg <"C763.Y", 763>;
+def C763_Z : R600Reg <"C763.Z", 763>;
+def C763_W : R600Reg <"C763.W", 763>;
+def C764_X : R600Reg <"C764.X", 764>;
+def C764_Y : R600Reg <"C764.Y", 764>;
+def C764_Z : R600Reg <"C764.Z", 764>;
+def C764_W : R600Reg <"C764.W", 764>;
+def C765_X : R600Reg <"C765.X", 765>;
+def C765_Y : R600Reg <"C765.Y", 765>;
+def C765_Z : R600Reg <"C765.Z", 765>;
+def C765_W : R600Reg <"C765.W", 765>;
+def C766_X : R600Reg <"C766.X", 766>;
+def C766_Y : R600Reg <"C766.Y", 766>;
+def C766_Z : R600Reg <"C766.Z", 766>;
+def C766_W : R600Reg <"C766.W", 766>;
+def C767_X : R600Reg <"C767.X", 767>;
+def C767_Y : R600Reg <"C767.Y", 767>;
+def C767_Z : R600Reg <"C767.Z", 767>;
+def C767_W : R600Reg <"C767.W", 767>;
+def C768_X : R600Reg <"C768.X", 768>;
+def C768_Y : R600Reg <"C768.Y", 768>;
+def C768_Z : R600Reg <"C768.Z", 768>;
+def C768_W : R600Reg <"C768.W", 768>;
+def C769_X : R600Reg <"C769.X", 769>;
+def C769_Y : R600Reg <"C769.Y", 769>;
+def C769_Z : R600Reg <"C769.Z", 769>;
+def C769_W : R600Reg <"C769.W", 769>;
+def C770_X : R600Reg <"C770.X", 770>;
+def C770_Y : R600Reg <"C770.Y", 770>;
+def C770_Z : R600Reg <"C770.Z", 770>;
+def C770_W : R600Reg <"C770.W", 770>;
+def C771_X : R600Reg <"C771.X", 771>;
+def C771_Y : R600Reg <"C771.Y", 771>;
+def C771_Z : R600Reg <"C771.Z", 771>;
+def C771_W : R600Reg <"C771.W", 771>;
+def C772_X : R600Reg <"C772.X", 772>;
+def C772_Y : R600Reg <"C772.Y", 772>;
+def C772_Z : R600Reg <"C772.Z", 772>;
+def C772_W : R600Reg <"C772.W", 772>;
+def C773_X : R600Reg <"C773.X", 773>;
+def C773_Y : R600Reg <"C773.Y", 773>;
+def C773_Z : R600Reg <"C773.Z", 773>;
+def C773_W : R600Reg <"C773.W", 773>;
+def C774_X : R600Reg <"C774.X", 774>;
+def C774_Y : R600Reg <"C774.Y", 774>;
+def C774_Z : R600Reg <"C774.Z", 774>;
+def C774_W : R600Reg <"C774.W", 774>;
+def C775_X : R600Reg <"C775.X", 775>;
+def C775_Y : R600Reg <"C775.Y", 775>;
+def C775_Z : R600Reg <"C775.Z", 775>;
+def C775_W : R600Reg <"C775.W", 775>;
+def C776_X : R600Reg <"C776.X", 776>;
+def C776_Y : R600Reg <"C776.Y", 776>;
+def C776_Z : R600Reg <"C776.Z", 776>;
+def C776_W : R600Reg <"C776.W", 776>;
+def C777_X : R600Reg <"C777.X", 777>;
+def C777_Y : R600Reg <"C777.Y", 777>;
+def C777_Z : R600Reg <"C777.Z", 777>;
+def C777_W : R600Reg <"C777.W", 777>;
+def C778_X : R600Reg <"C778.X", 778>;
+def C778_Y : R600Reg <"C778.Y", 778>;
+def C778_Z : R600Reg <"C778.Z", 778>;
+def C778_W : R600Reg <"C778.W", 778>;
+def C779_X : R600Reg <"C779.X", 779>;
+def C779_Y : R600Reg <"C779.Y", 779>;
+def C779_Z : R600Reg <"C779.Z", 779>;
+def C779_W : R600Reg <"C779.W", 779>;
+def C780_X : R600Reg <"C780.X", 780>;
+def C780_Y : R600Reg <"C780.Y", 780>;
+def C780_Z : R600Reg <"C780.Z", 780>;
+def C780_W : R600Reg <"C780.W", 780>;
+def C781_X : R600Reg <"C781.X", 781>;
+def C781_Y : R600Reg <"C781.Y", 781>;
+def C781_Z : R600Reg <"C781.Z", 781>;
+def C781_W : R600Reg <"C781.W", 781>;
+def C782_X : R600Reg <"C782.X", 782>;
+def C782_Y : R600Reg <"C782.Y", 782>;
+def C782_Z : R600Reg <"C782.Z", 782>;
+def C782_W : R600Reg <"C782.W", 782>;
+def C783_X : R600Reg <"C783.X", 783>;
+def C783_Y : R600Reg <"C783.Y", 783>;
+def C783_Z : R600Reg <"C783.Z", 783>;
+def C783_W : R600Reg <"C783.W", 783>;
+def C784_X : R600Reg <"C784.X", 784>;
+def C784_Y : R600Reg <"C784.Y", 784>;
+def C784_Z : R600Reg <"C784.Z", 784>;
+def C784_W : R600Reg <"C784.W", 784>;
+def C785_X : R600Reg <"C785.X", 785>;
+def C785_Y : R600Reg <"C785.Y", 785>;
+def C785_Z : R600Reg <"C785.Z", 785>;
+def C785_W : R600Reg <"C785.W", 785>;
+def C786_X : R600Reg <"C786.X", 786>;
+def C786_Y : R600Reg <"C786.Y", 786>;
+def C786_Z : R600Reg <"C786.Z", 786>;
+def C786_W : R600Reg <"C786.W", 786>;
+def C787_X : R600Reg <"C787.X", 787>;
+def C787_Y : R600Reg <"C787.Y", 787>;
+def C787_Z : R600Reg <"C787.Z", 787>;
+def C787_W : R600Reg <"C787.W", 787>;
+def C788_X : R600Reg <"C788.X", 788>;
+def C788_Y : R600Reg <"C788.Y", 788>;
+def C788_Z : R600Reg <"C788.Z", 788>;
+def C788_W : R600Reg <"C788.W", 788>;
+def C789_X : R600Reg <"C789.X", 789>;
+def C789_Y : R600Reg <"C789.Y", 789>;
+def C789_Z : R600Reg <"C789.Z", 789>;
+def C789_W : R600Reg <"C789.W", 789>;
+def C790_X : R600Reg <"C790.X", 790>;
+def C790_Y : R600Reg <"C790.Y", 790>;
+def C790_Z : R600Reg <"C790.Z", 790>;
+def C790_W : R600Reg <"C790.W", 790>;
+def C791_X : R600Reg <"C791.X", 791>;
+def C791_Y : R600Reg <"C791.Y", 791>;
+def C791_Z : R600Reg <"C791.Z", 791>;
+def C791_W : R600Reg <"C791.W", 791>;
+def C792_X : R600Reg <"C792.X", 792>;
+def C792_Y : R600Reg <"C792.Y", 792>;
+def C792_Z : R600Reg <"C792.Z", 792>;
+def C792_W : R600Reg <"C792.W", 792>;
+def C793_X : R600Reg <"C793.X", 793>;
+def C793_Y : R600Reg <"C793.Y", 793>;
+def C793_Z : R600Reg <"C793.Z", 793>;
+def C793_W : R600Reg <"C793.W", 793>;
+def C794_X : R600Reg <"C794.X", 794>;
+def C794_Y : R600Reg <"C794.Y", 794>;
+def C794_Z : R600Reg <"C794.Z", 794>;
+def C794_W : R600Reg <"C794.W", 794>;
+def C795_X : R600Reg <"C795.X", 795>;
+def C795_Y : R600Reg <"C795.Y", 795>;
+def C795_Z : R600Reg <"C795.Z", 795>;
+def C795_W : R600Reg <"C795.W", 795>;
+def C796_X : R600Reg <"C796.X", 796>;
+def C796_Y : R600Reg <"C796.Y", 796>;
+def C796_Z : R600Reg <"C796.Z", 796>;
+def C796_W : R600Reg <"C796.W", 796>;
+def C797_X : R600Reg <"C797.X", 797>;
+def C797_Y : R600Reg <"C797.Y", 797>;
+def C797_Z : R600Reg <"C797.Z", 797>;
+def C797_W : R600Reg <"C797.W", 797>;
+def C798_X : R600Reg <"C798.X", 798>;
+def C798_Y : R600Reg <"C798.Y", 798>;
+def C798_Z : R600Reg <"C798.Z", 798>;
+def C798_W : R600Reg <"C798.W", 798>;
+def C799_X : R600Reg <"C799.X", 799>;
+def C799_Y : R600Reg <"C799.Y", 799>;
+def C799_Z : R600Reg <"C799.Z", 799>;
+def C799_W : R600Reg <"C799.W", 799>;
+def C800_X : R600Reg <"C800.X", 800>;
+def C800_Y : R600Reg <"C800.Y", 800>;
+def C800_Z : R600Reg <"C800.Z", 800>;
+def C800_W : R600Reg <"C800.W", 800>;
+def C801_X : R600Reg <"C801.X", 801>;
+def C801_Y : R600Reg <"C801.Y", 801>;
+def C801_Z : R600Reg <"C801.Z", 801>;
+def C801_W : R600Reg <"C801.W", 801>;
+def C802_X : R600Reg <"C802.X", 802>;
+def C802_Y : R600Reg <"C802.Y", 802>;
+def C802_Z : R600Reg <"C802.Z", 802>;
+def C802_W : R600Reg <"C802.W", 802>;
+def C803_X : R600Reg <"C803.X", 803>;
+def C803_Y : R600Reg <"C803.Y", 803>;
+def C803_Z : R600Reg <"C803.Z", 803>;
+def C803_W : R600Reg <"C803.W", 803>;
+def C804_X : R600Reg <"C804.X", 804>;
+def C804_Y : R600Reg <"C804.Y", 804>;
+def C804_Z : R600Reg <"C804.Z", 804>;
+def C804_W : R600Reg <"C804.W", 804>;
+def C805_X : R600Reg <"C805.X", 805>;
+def C805_Y : R600Reg <"C805.Y", 805>;
+def C805_Z : R600Reg <"C805.Z", 805>;
+def C805_W : R600Reg <"C805.W", 805>;
+def C806_X : R600Reg <"C806.X", 806>;
+def C806_Y : R600Reg <"C806.Y", 806>;
+def C806_Z : R600Reg <"C806.Z", 806>;
+def C806_W : R600Reg <"C806.W", 806>;
+def C807_X : R600Reg <"C807.X", 807>;
+def C807_Y : R600Reg <"C807.Y", 807>;
+def C807_Z : R600Reg <"C807.Z", 807>;
+def C807_W : R600Reg <"C807.W", 807>;
+def C808_X : R600Reg <"C808.X", 808>;
+def C808_Y : R600Reg <"C808.Y", 808>;
+def C808_Z : R600Reg <"C808.Z", 808>;
+def C808_W : R600Reg <"C808.W", 808>;
+def C809_X : R600Reg <"C809.X", 809>;
+def C809_Y : R600Reg <"C809.Y", 809>;
+def C809_Z : R600Reg <"C809.Z", 809>;
+def C809_W : R600Reg <"C809.W", 809>;
+def C810_X : R600Reg <"C810.X", 810>;
+def C810_Y : R600Reg <"C810.Y", 810>;
+def C810_Z : R600Reg <"C810.Z", 810>;
+def C810_W : R600Reg <"C810.W", 810>;
+def C811_X : R600Reg <"C811.X", 811>;
+def C811_Y : R600Reg <"C811.Y", 811>;
+def C811_Z : R600Reg <"C811.Z", 811>;
+def C811_W : R600Reg <"C811.W", 811>;
+def C812_X : R600Reg <"C812.X", 812>;
+def C812_Y : R600Reg <"C812.Y", 812>;
+def C812_Z : R600Reg <"C812.Z", 812>;
+def C812_W : R600Reg <"C812.W", 812>;
+def C813_X : R600Reg <"C813.X", 813>;
+def C813_Y : R600Reg <"C813.Y", 813>;
+def C813_Z : R600Reg <"C813.Z", 813>;
+def C813_W : R600Reg <"C813.W", 813>;
+def C814_X : R600Reg <"C814.X", 814>;
+def C814_Y : R600Reg <"C814.Y", 814>;
+def C814_Z : R600Reg <"C814.Z", 814>;
+def C814_W : R600Reg <"C814.W", 814>;
+def C815_X : R600Reg <"C815.X", 815>;
+def C815_Y : R600Reg <"C815.Y", 815>;
+def C815_Z : R600Reg <"C815.Z", 815>;
+def C815_W : R600Reg <"C815.W", 815>;
+def C816_X : R600Reg <"C816.X", 816>;
+def C816_Y : R600Reg <"C816.Y", 816>;
+def C816_Z : R600Reg <"C816.Z", 816>;
+def C816_W : R600Reg <"C816.W", 816>;
+def C817_X : R600Reg <"C817.X", 817>;
+def C817_Y : R600Reg <"C817.Y", 817>;
+def C817_Z : R600Reg <"C817.Z", 817>;
+def C817_W : R600Reg <"C817.W", 817>;
+def C818_X : R600Reg <"C818.X", 818>;
+def C818_Y : R600Reg <"C818.Y", 818>;
+def C818_Z : R600Reg <"C818.Z", 818>;
+def C818_W : R600Reg <"C818.W", 818>;
+def C819_X : R600Reg <"C819.X", 819>;
+def C819_Y : R600Reg <"C819.Y", 819>;
+def C819_Z : R600Reg <"C819.Z", 819>;
+def C819_W : R600Reg <"C819.W", 819>;
+def C820_X : R600Reg <"C820.X", 820>;
+def C820_Y : R600Reg <"C820.Y", 820>;
+def C820_Z : R600Reg <"C820.Z", 820>;
+def C820_W : R600Reg <"C820.W", 820>;
+def C821_X : R600Reg <"C821.X", 821>;
+def C821_Y : R600Reg <"C821.Y", 821>;
+def C821_Z : R600Reg <"C821.Z", 821>;
+def C821_W : R600Reg <"C821.W", 821>;
+def C822_X : R600Reg <"C822.X", 822>;
+def C822_Y : R600Reg <"C822.Y", 822>;
+def C822_Z : R600Reg <"C822.Z", 822>;
+def C822_W : R600Reg <"C822.W", 822>;
+def C823_X : R600Reg <"C823.X", 823>;
+def C823_Y : R600Reg <"C823.Y", 823>;
+def C823_Z : R600Reg <"C823.Z", 823>;
+def C823_W : R600Reg <"C823.W", 823>;
+def C824_X : R600Reg <"C824.X", 824>;
+def C824_Y : R600Reg <"C824.Y", 824>;
+def C824_Z : R600Reg <"C824.Z", 824>;
+def C824_W : R600Reg <"C824.W", 824>;
+def C825_X : R600Reg <"C825.X", 825>;
+def C825_Y : R600Reg <"C825.Y", 825>;
+def C825_Z : R600Reg <"C825.Z", 825>;
+def C825_W : R600Reg <"C825.W", 825>;
+def C826_X : R600Reg <"C826.X", 826>;
+def C826_Y : R600Reg <"C826.Y", 826>;
+def C826_Z : R600Reg <"C826.Z", 826>;
+def C826_W : R600Reg <"C826.W", 826>;
+def C827_X : R600Reg <"C827.X", 827>;
+def C827_Y : R600Reg <"C827.Y", 827>;
+def C827_Z : R600Reg <"C827.Z", 827>;
+def C827_W : R600Reg <"C827.W", 827>;
+def C828_X : R600Reg <"C828.X", 828>;
+def C828_Y : R600Reg <"C828.Y", 828>;
+def C828_Z : R600Reg <"C828.Z", 828>;
+def C828_W : R600Reg <"C828.W", 828>;
+def C829_X : R600Reg <"C829.X", 829>;
+def C829_Y : R600Reg <"C829.Y", 829>;
+def C829_Z : R600Reg <"C829.Z", 829>;
+def C829_W : R600Reg <"C829.W", 829>;
+def C830_X : R600Reg <"C830.X", 830>;
+def C830_Y : R600Reg <"C830.Y", 830>;
+def C830_Z : R600Reg <"C830.Z", 830>;
+def C830_W : R600Reg <"C830.W", 830>;
+def C831_X : R600Reg <"C831.X", 831>;
+def C831_Y : R600Reg <"C831.Y", 831>;
+def C831_Z : R600Reg <"C831.Z", 831>;
+def C831_W : R600Reg <"C831.W", 831>;
+def C832_X : R600Reg <"C832.X", 832>;
+def C832_Y : R600Reg <"C832.Y", 832>;
+def C832_Z : R600Reg <"C832.Z", 832>;
+def C832_W : R600Reg <"C832.W", 832>;
+def C833_X : R600Reg <"C833.X", 833>;
+def C833_Y : R600Reg <"C833.Y", 833>;
+def C833_Z : R600Reg <"C833.Z", 833>;
+def C833_W : R600Reg <"C833.W", 833>;
+def C834_X : R600Reg <"C834.X", 834>;
+def C834_Y : R600Reg <"C834.Y", 834>;
+def C834_Z : R600Reg <"C834.Z", 834>;
+def C834_W : R600Reg <"C834.W", 834>;
+def C835_X : R600Reg <"C835.X", 835>;
+def C835_Y : R600Reg <"C835.Y", 835>;
+def C835_Z : R600Reg <"C835.Z", 835>;
+def C835_W : R600Reg <"C835.W", 835>;
+def C836_X : R600Reg <"C836.X", 836>;
+def C836_Y : R600Reg <"C836.Y", 836>;
+def C836_Z : R600Reg <"C836.Z", 836>;
+def C836_W : R600Reg <"C836.W", 836>;
+def C837_X : R600Reg <"C837.X", 837>;
+def C837_Y : R600Reg <"C837.Y", 837>;
+def C837_Z : R600Reg <"C837.Z", 837>;
+def C837_W : R600Reg <"C837.W", 837>;
+def C838_X : R600Reg <"C838.X", 838>;
+def C838_Y : R600Reg <"C838.Y", 838>;
+def C838_Z : R600Reg <"C838.Z", 838>;
+def C838_W : R600Reg <"C838.W", 838>;
+def C839_X : R600Reg <"C839.X", 839>;
+def C839_Y : R600Reg <"C839.Y", 839>;
+def C839_Z : R600Reg <"C839.Z", 839>;
+def C839_W : R600Reg <"C839.W", 839>;
+def C840_X : R600Reg <"C840.X", 840>;
+def C840_Y : R600Reg <"C840.Y", 840>;
+def C840_Z : R600Reg <"C840.Z", 840>;
+def C840_W : R600Reg <"C840.W", 840>;
+def C841_X : R600Reg <"C841.X", 841>;
+def C841_Y : R600Reg <"C841.Y", 841>;
+def C841_Z : R600Reg <"C841.Z", 841>;
+def C841_W : R600Reg <"C841.W", 841>;
+def C842_X : R600Reg <"C842.X", 842>;
+def C842_Y : R600Reg <"C842.Y", 842>;
+def C842_Z : R600Reg <"C842.Z", 842>;
+def C842_W : R600Reg <"C842.W", 842>;
+def C843_X : R600Reg <"C843.X", 843>;
+def C843_Y : R600Reg <"C843.Y", 843>;
+def C843_Z : R600Reg <"C843.Z", 843>;
+def C843_W : R600Reg <"C843.W", 843>;
+def C844_X : R600Reg <"C844.X", 844>;
+def C844_Y : R600Reg <"C844.Y", 844>;
+def C844_Z : R600Reg <"C844.Z", 844>;
+def C844_W : R600Reg <"C844.W", 844>;
+def C845_X : R600Reg <"C845.X", 845>;
+def C845_Y : R600Reg <"C845.Y", 845>;
+def C845_Z : R600Reg <"C845.Z", 845>;
+def C845_W : R600Reg <"C845.W", 845>;
+def C846_X : R600Reg <"C846.X", 846>;
+def C846_Y : R600Reg <"C846.Y", 846>;
+def C846_Z : R600Reg <"C846.Z", 846>;
+def C846_W : R600Reg <"C846.W", 846>;
+def C847_X : R600Reg <"C847.X", 847>;
+def C847_Y : R600Reg <"C847.Y", 847>;
+def C847_Z : R600Reg <"C847.Z", 847>;
+def C847_W : R600Reg <"C847.W", 847>;
+def C848_X : R600Reg <"C848.X", 848>;
+def C848_Y : R600Reg <"C848.Y", 848>;
+def C848_Z : R600Reg <"C848.Z", 848>;
+def C848_W : R600Reg <"C848.W", 848>;
+def C849_X : R600Reg <"C849.X", 849>;
+def C849_Y : R600Reg <"C849.Y", 849>;
+def C849_Z : R600Reg <"C849.Z", 849>;
+def C849_W : R600Reg <"C849.W", 849>;
+def C850_X : R600Reg <"C850.X", 850>;
+def C850_Y : R600Reg <"C850.Y", 850>;
+def C850_Z : R600Reg <"C850.Z", 850>;
+def C850_W : R600Reg <"C850.W", 850>;
+def C851_X : R600Reg <"C851.X", 851>;
+def C851_Y : R600Reg <"C851.Y", 851>;
+def C851_Z : R600Reg <"C851.Z", 851>;
+def C851_W : R600Reg <"C851.W", 851>;
+def C852_X : R600Reg <"C852.X", 852>;
+def C852_Y : R600Reg <"C852.Y", 852>;
+def C852_Z : R600Reg <"C852.Z", 852>;
+def C852_W : R600Reg <"C852.W", 852>;
+def C853_X : R600Reg <"C853.X", 853>;
+def C853_Y : R600Reg <"C853.Y", 853>;
+def C853_Z : R600Reg <"C853.Z", 853>;
+def C853_W : R600Reg <"C853.W", 853>;
+def C854_X : R600Reg <"C854.X", 854>;
+def C854_Y : R600Reg <"C854.Y", 854>;
+def C854_Z : R600Reg <"C854.Z", 854>;
+def C854_W : R600Reg <"C854.W", 854>;
+def C855_X : R600Reg <"C855.X", 855>;
+def C855_Y : R600Reg <"C855.Y", 855>;
+def C855_Z : R600Reg <"C855.Z", 855>;
+def C855_W : R600Reg <"C855.W", 855>;
+def C856_X : R600Reg <"C856.X", 856>;
+def C856_Y : R600Reg <"C856.Y", 856>;
+def C856_Z : R600Reg <"C856.Z", 856>;
+def C856_W : R600Reg <"C856.W", 856>;
+def C857_X : R600Reg <"C857.X", 857>;
+def C857_Y : R600Reg <"C857.Y", 857>;
+def C857_Z : R600Reg <"C857.Z", 857>;
+def C857_W : R600Reg <"C857.W", 857>;
+def C858_X : R600Reg <"C858.X", 858>;
+def C858_Y : R600Reg <"C858.Y", 858>;
+def C858_Z : R600Reg <"C858.Z", 858>;
+def C858_W : R600Reg <"C858.W", 858>;
+def C859_X : R600Reg <"C859.X", 859>;
+def C859_Y : R600Reg <"C859.Y", 859>;
+def C859_Z : R600Reg <"C859.Z", 859>;
+def C859_W : R600Reg <"C859.W", 859>;
+def C860_X : R600Reg <"C860.X", 860>;
+def C860_Y : R600Reg <"C860.Y", 860>;
+def C860_Z : R600Reg <"C860.Z", 860>;
+def C860_W : R600Reg <"C860.W", 860>;
+def C861_X : R600Reg <"C861.X", 861>;
+def C861_Y : R600Reg <"C861.Y", 861>;
+def C861_Z : R600Reg <"C861.Z", 861>;
+def C861_W : R600Reg <"C861.W", 861>;
+def C862_X : R600Reg <"C862.X", 862>;
+def C862_Y : R600Reg <"C862.Y", 862>;
+def C862_Z : R600Reg <"C862.Z", 862>;
+def C862_W : R600Reg <"C862.W", 862>;
+def C863_X : R600Reg <"C863.X", 863>;
+def C863_Y : R600Reg <"C863.Y", 863>;
+def C863_Z : R600Reg <"C863.Z", 863>;
+def C863_W : R600Reg <"C863.W", 863>;
+def C864_X : R600Reg <"C864.X", 864>;
+def C864_Y : R600Reg <"C864.Y", 864>;
+def C864_Z : R600Reg <"C864.Z", 864>;
+def C864_W : R600Reg <"C864.W", 864>;
+def C865_X : R600Reg <"C865.X", 865>;
+def C865_Y : R600Reg <"C865.Y", 865>;
+def C865_Z : R600Reg <"C865.Z", 865>;
+def C865_W : R600Reg <"C865.W", 865>;
+def C866_X : R600Reg <"C866.X", 866>;
+def C866_Y : R600Reg <"C866.Y", 866>;
+def C866_Z : R600Reg <"C866.Z", 866>;
+def C866_W : R600Reg <"C866.W", 866>;
+def C867_X : R600Reg <"C867.X", 867>;
+def C867_Y : R600Reg <"C867.Y", 867>;
+def C867_Z : R600Reg <"C867.Z", 867>;
+def C867_W : R600Reg <"C867.W", 867>;
+def C868_X : R600Reg <"C868.X", 868>;
+def C868_Y : R600Reg <"C868.Y", 868>;
+def C868_Z : R600Reg <"C868.Z", 868>;
+def C868_W : R600Reg <"C868.W", 868>;
+def C869_X : R600Reg <"C869.X", 869>;
+def C869_Y : R600Reg <"C869.Y", 869>;
+def C869_Z : R600Reg <"C869.Z", 869>;
+def C869_W : R600Reg <"C869.W", 869>;
+def C870_X : R600Reg <"C870.X", 870>;
+def C870_Y : R600Reg <"C870.Y", 870>;
+def C870_Z : R600Reg <"C870.Z", 870>;
+def C870_W : R600Reg <"C870.W", 870>;
+def C871_X : R600Reg <"C871.X", 871>;
+def C871_Y : R600Reg <"C871.Y", 871>;
+def C871_Z : R600Reg <"C871.Z", 871>;
+def C871_W : R600Reg <"C871.W", 871>;
+def C872_X : R600Reg <"C872.X", 872>;
+def C872_Y : R600Reg <"C872.Y", 872>;
+def C872_Z : R600Reg <"C872.Z", 872>;
+def C872_W : R600Reg <"C872.W", 872>;
+def C873_X : R600Reg <"C873.X", 873>;
+def C873_Y : R600Reg <"C873.Y", 873>;
+def C873_Z : R600Reg <"C873.Z", 873>;
+def C873_W : R600Reg <"C873.W", 873>;
+def C874_X : R600Reg <"C874.X", 874>;
+def C874_Y : R600Reg <"C874.Y", 874>;
+def C874_Z : R600Reg <"C874.Z", 874>;
+def C874_W : R600Reg <"C874.W", 874>;
+def C875_X : R600Reg <"C875.X", 875>;
+def C875_Y : R600Reg <"C875.Y", 875>;
+def C875_Z : R600Reg <"C875.Z", 875>;
+def C875_W : R600Reg <"C875.W", 875>;
+def C876_X : R600Reg <"C876.X", 876>;
+def C876_Y : R600Reg <"C876.Y", 876>;
+def C876_Z : R600Reg <"C876.Z", 876>;
+def C876_W : R600Reg <"C876.W", 876>;
+def C877_X : R600Reg <"C877.X", 877>;
+def C877_Y : R600Reg <"C877.Y", 877>;
+def C877_Z : R600Reg <"C877.Z", 877>;
+def C877_W : R600Reg <"C877.W", 877>;
+def C878_X : R600Reg <"C878.X", 878>;
+def C878_Y : R600Reg <"C878.Y", 878>;
+def C878_Z : R600Reg <"C878.Z", 878>;
+def C878_W : R600Reg <"C878.W", 878>;
+def C879_X : R600Reg <"C879.X", 879>;
+def C879_Y : R600Reg <"C879.Y", 879>;
+def C879_Z : R600Reg <"C879.Z", 879>;
+def C879_W : R600Reg <"C879.W", 879>;
+def C880_X : R600Reg <"C880.X", 880>;
+def C880_Y : R600Reg <"C880.Y", 880>;
+def C880_Z : R600Reg <"C880.Z", 880>;
+def C880_W : R600Reg <"C880.W", 880>;
+def C881_X : R600Reg <"C881.X", 881>;
+def C881_Y : R600Reg <"C881.Y", 881>;
+def C881_Z : R600Reg <"C881.Z", 881>;
+def C881_W : R600Reg <"C881.W", 881>;
+def C882_X : R600Reg <"C882.X", 882>;
+def C882_Y : R600Reg <"C882.Y", 882>;
+def C882_Z : R600Reg <"C882.Z", 882>;
+def C882_W : R600Reg <"C882.W", 882>;
+def C883_X : R600Reg <"C883.X", 883>;
+def C883_Y : R600Reg <"C883.Y", 883>;
+def C883_Z : R600Reg <"C883.Z", 883>;
+def C883_W : R600Reg <"C883.W", 883>;
+def C884_X : R600Reg <"C884.X", 884>;
+def C884_Y : R600Reg <"C884.Y", 884>;
+def C884_Z : R600Reg <"C884.Z", 884>;
+def C884_W : R600Reg <"C884.W", 884>;
+def C885_X : R600Reg <"C885.X", 885>;
+def C885_Y : R600Reg <"C885.Y", 885>;
+def C885_Z : R600Reg <"C885.Z", 885>;
+def C885_W : R600Reg <"C885.W", 885>;
+def C886_X : R600Reg <"C886.X", 886>;
+def C886_Y : R600Reg <"C886.Y", 886>;
+def C886_Z : R600Reg <"C886.Z", 886>;
+def C886_W : R600Reg <"C886.W", 886>;
+def C887_X : R600Reg <"C887.X", 887>;
+def C887_Y : R600Reg <"C887.Y", 887>;
+def C887_Z : R600Reg <"C887.Z", 887>;
+def C887_W : R600Reg <"C887.W", 887>;
+def C888_X : R600Reg <"C888.X", 888>;
+def C888_Y : R600Reg <"C888.Y", 888>;
+def C888_Z : R600Reg <"C888.Z", 888>;
+def C888_W : R600Reg <"C888.W", 888>;
+def C889_X : R600Reg <"C889.X", 889>;
+def C889_Y : R600Reg <"C889.Y", 889>;
+def C889_Z : R600Reg <"C889.Z", 889>;
+def C889_W : R600Reg <"C889.W", 889>;
+def C890_X : R600Reg <"C890.X", 890>;
+def C890_Y : R600Reg <"C890.Y", 890>;
+def C890_Z : R600Reg <"C890.Z", 890>;
+def C890_W : R600Reg <"C890.W", 890>;
+def C891_X : R600Reg <"C891.X", 891>;
+def C891_Y : R600Reg <"C891.Y", 891>;
+def C891_Z : R600Reg <"C891.Z", 891>;
+def C891_W : R600Reg <"C891.W", 891>;
+def C892_X : R600Reg <"C892.X", 892>;
+def C892_Y : R600Reg <"C892.Y", 892>;
+def C892_Z : R600Reg <"C892.Z", 892>;
+def C892_W : R600Reg <"C892.W", 892>;
+def C893_X : R600Reg <"C893.X", 893>;
+def C893_Y : R600Reg <"C893.Y", 893>;
+def C893_Z : R600Reg <"C893.Z", 893>;
+def C893_W : R600Reg <"C893.W", 893>;
+def C894_X : R600Reg <"C894.X", 894>;
+def C894_Y : R600Reg <"C894.Y", 894>;
+def C894_Z : R600Reg <"C894.Z", 894>;
+def C894_W : R600Reg <"C894.W", 894>;
+def C895_X : R600Reg <"C895.X", 895>;
+def C895_Y : R600Reg <"C895.Y", 895>;
+def C895_Z : R600Reg <"C895.Z", 895>;
+def C895_W : R600Reg <"C895.W", 895>;
+def C896_X : R600Reg <"C896.X", 896>;
+def C896_Y : R600Reg <"C896.Y", 896>;
+def C896_Z : R600Reg <"C896.Z", 896>;
+def C896_W : R600Reg <"C896.W", 896>;
+def C897_X : R600Reg <"C897.X", 897>;
+def C897_Y : R600Reg <"C897.Y", 897>;
+def C897_Z : R600Reg <"C897.Z", 897>;
+def C897_W : R600Reg <"C897.W", 897>;
+def C898_X : R600Reg <"C898.X", 898>;
+def C898_Y : R600Reg <"C898.Y", 898>;
+def C898_Z : R600Reg <"C898.Z", 898>;
+def C898_W : R600Reg <"C898.W", 898>;
+def C899_X : R600Reg <"C899.X", 899>;
+def C899_Y : R600Reg <"C899.Y", 899>;
+def C899_Z : R600Reg <"C899.Z", 899>;
+def C899_W : R600Reg <"C899.W", 899>;
+def C900_X : R600Reg <"C900.X", 900>;
+def C900_Y : R600Reg <"C900.Y", 900>;
+def C900_Z : R600Reg <"C900.Z", 900>;
+def C900_W : R600Reg <"C900.W", 900>;
+def C901_X : R600Reg <"C901.X", 901>;
+def C901_Y : R600Reg <"C901.Y", 901>;
+def C901_Z : R600Reg <"C901.Z", 901>;
+def C901_W : R600Reg <"C901.W", 901>;
+def C902_X : R600Reg <"C902.X", 902>;
+def C902_Y : R600Reg <"C902.Y", 902>;
+def C902_Z : R600Reg <"C902.Z", 902>;
+def C902_W : R600Reg <"C902.W", 902>;
+def C903_X : R600Reg <"C903.X", 903>;
+def C903_Y : R600Reg <"C903.Y", 903>;
+def C903_Z : R600Reg <"C903.Z", 903>;
+def C903_W : R600Reg <"C903.W", 903>;
+def C904_X : R600Reg <"C904.X", 904>;
+def C904_Y : R600Reg <"C904.Y", 904>;
+def C904_Z : R600Reg <"C904.Z", 904>;
+def C904_W : R600Reg <"C904.W", 904>;
+def C905_X : R600Reg <"C905.X", 905>;
+def C905_Y : R600Reg <"C905.Y", 905>;
+def C905_Z : R600Reg <"C905.Z", 905>;
+def C905_W : R600Reg <"C905.W", 905>;
+def C906_X : R600Reg <"C906.X", 906>;
+def C906_Y : R600Reg <"C906.Y", 906>;
+def C906_Z : R600Reg <"C906.Z", 906>;
+def C906_W : R600Reg <"C906.W", 906>;
+def C907_X : R600Reg <"C907.X", 907>;
+def C907_Y : R600Reg <"C907.Y", 907>;
+def C907_Z : R600Reg <"C907.Z", 907>;
+def C907_W : R600Reg <"C907.W", 907>;
+def C908_X : R600Reg <"C908.X", 908>;
+def C908_Y : R600Reg <"C908.Y", 908>;
+def C908_Z : R600Reg <"C908.Z", 908>;
+def C908_W : R600Reg <"C908.W", 908>;
+def C909_X : R600Reg <"C909.X", 909>;
+def C909_Y : R600Reg <"C909.Y", 909>;
+def C909_Z : R600Reg <"C909.Z", 909>;
+def C909_W : R600Reg <"C909.W", 909>;
+def C910_X : R600Reg <"C910.X", 910>;
+def C910_Y : R600Reg <"C910.Y", 910>;
+def C910_Z : R600Reg <"C910.Z", 910>;
+def C910_W : R600Reg <"C910.W", 910>;
+def C911_X : R600Reg <"C911.X", 911>;
+def C911_Y : R600Reg <"C911.Y", 911>;
+def C911_Z : R600Reg <"C911.Z", 911>;
+def C911_W : R600Reg <"C911.W", 911>;
+def C912_X : R600Reg <"C912.X", 912>;
+def C912_Y : R600Reg <"C912.Y", 912>;
+def C912_Z : R600Reg <"C912.Z", 912>;
+def C912_W : R600Reg <"C912.W", 912>;
+def C913_X : R600Reg <"C913.X", 913>;
+def C913_Y : R600Reg <"C913.Y", 913>;
+def C913_Z : R600Reg <"C913.Z", 913>;
+def C913_W : R600Reg <"C913.W", 913>;
+def C914_X : R600Reg <"C914.X", 914>;
+def C914_Y : R600Reg <"C914.Y", 914>;
+def C914_Z : R600Reg <"C914.Z", 914>;
+def C914_W : R600Reg <"C914.W", 914>;
+def C915_X : R600Reg <"C915.X", 915>;
+def C915_Y : R600Reg <"C915.Y", 915>;
+def C915_Z : R600Reg <"C915.Z", 915>;
+def C915_W : R600Reg <"C915.W", 915>;
+def C916_X : R600Reg <"C916.X", 916>;
+def C916_Y : R600Reg <"C916.Y", 916>;
+def C916_Z : R600Reg <"C916.Z", 916>;
+def C916_W : R600Reg <"C916.W", 916>;
+def C917_X : R600Reg <"C917.X", 917>;
+def C917_Y : R600Reg <"C917.Y", 917>;
+def C917_Z : R600Reg <"C917.Z", 917>;
+def C917_W : R600Reg <"C917.W", 917>;
+def C918_X : R600Reg <"C918.X", 918>;
+def C918_Y : R600Reg <"C918.Y", 918>;
+def C918_Z : R600Reg <"C918.Z", 918>;
+def C918_W : R600Reg <"C918.W", 918>;
+def C919_X : R600Reg <"C919.X", 919>;
+def C919_Y : R600Reg <"C919.Y", 919>;
+def C919_Z : R600Reg <"C919.Z", 919>;
+def C919_W : R600Reg <"C919.W", 919>;
+def C920_X : R600Reg <"C920.X", 920>;
+def C920_Y : R600Reg <"C920.Y", 920>;
+def C920_Z : R600Reg <"C920.Z", 920>;
+def C920_W : R600Reg <"C920.W", 920>;
+def C921_X : R600Reg <"C921.X", 921>;
+def C921_Y : R600Reg <"C921.Y", 921>;
+def C921_Z : R600Reg <"C921.Z", 921>;
+def C921_W : R600Reg <"C921.W", 921>;
+def C922_X : R600Reg <"C922.X", 922>;
+def C922_Y : R600Reg <"C922.Y", 922>;
+def C922_Z : R600Reg <"C922.Z", 922>;
+def C922_W : R600Reg <"C922.W", 922>;
+def C923_X : R600Reg <"C923.X", 923>;
+def C923_Y : R600Reg <"C923.Y", 923>;
+def C923_Z : R600Reg <"C923.Z", 923>;
+def C923_W : R600Reg <"C923.W", 923>;
+def C924_X : R600Reg <"C924.X", 924>;
+def C924_Y : R600Reg <"C924.Y", 924>;
+def C924_Z : R600Reg <"C924.Z", 924>;
+def C924_W : R600Reg <"C924.W", 924>;
+def C925_X : R600Reg <"C925.X", 925>;
+def C925_Y : R600Reg <"C925.Y", 925>;
+def C925_Z : R600Reg <"C925.Z", 925>;
+def C925_W : R600Reg <"C925.W", 925>;
+def C926_X : R600Reg <"C926.X", 926>;
+def C926_Y : R600Reg <"C926.Y", 926>;
+def C926_Z : R600Reg <"C926.Z", 926>;
+def C926_W : R600Reg <"C926.W", 926>;
+def C927_X : R600Reg <"C927.X", 927>;
+def C927_Y : R600Reg <"C927.Y", 927>;
+def C927_Z : R600Reg <"C927.Z", 927>;
+def C927_W : R600Reg <"C927.W", 927>;
+def C928_X : R600Reg <"C928.X", 928>;
+def C928_Y : R600Reg <"C928.Y", 928>;
+def C928_Z : R600Reg <"C928.Z", 928>;
+def C928_W : R600Reg <"C928.W", 928>;
+def C929_X : R600Reg <"C929.X", 929>;
+def C929_Y : R600Reg <"C929.Y", 929>;
+def C929_Z : R600Reg <"C929.Z", 929>;
+def C929_W : R600Reg <"C929.W", 929>;
+def C930_X : R600Reg <"C930.X", 930>;
+def C930_Y : R600Reg <"C930.Y", 930>;
+def C930_Z : R600Reg <"C930.Z", 930>;
+def C930_W : R600Reg <"C930.W", 930>;
+def C931_X : R600Reg <"C931.X", 931>;
+def C931_Y : R600Reg <"C931.Y", 931>;
+def C931_Z : R600Reg <"C931.Z", 931>;
+def C931_W : R600Reg <"C931.W", 931>;
+def C932_X : R600Reg <"C932.X", 932>;
+def C932_Y : R600Reg <"C932.Y", 932>;
+def C932_Z : R600Reg <"C932.Z", 932>;
+def C932_W : R600Reg <"C932.W", 932>;
+def C933_X : R600Reg <"C933.X", 933>;
+def C933_Y : R600Reg <"C933.Y", 933>;
+def C933_Z : R600Reg <"C933.Z", 933>;
+def C933_W : R600Reg <"C933.W", 933>;
+def C934_X : R600Reg <"C934.X", 934>;
+def C934_Y : R600Reg <"C934.Y", 934>;
+def C934_Z : R600Reg <"C934.Z", 934>;
+def C934_W : R600Reg <"C934.W", 934>;
+def C935_X : R600Reg <"C935.X", 935>;
+def C935_Y : R600Reg <"C935.Y", 935>;
+def C935_Z : R600Reg <"C935.Z", 935>;
+def C935_W : R600Reg <"C935.W", 935>;
+def C936_X : R600Reg <"C936.X", 936>;
+def C936_Y : R600Reg <"C936.Y", 936>;
+def C936_Z : R600Reg <"C936.Z", 936>;
+def C936_W : R600Reg <"C936.W", 936>;
+def C937_X : R600Reg <"C937.X", 937>;
+def C937_Y : R600Reg <"C937.Y", 937>;
+def C937_Z : R600Reg <"C937.Z", 937>;
+def C937_W : R600Reg <"C937.W", 937>;
+def C938_X : R600Reg <"C938.X", 938>;
+def C938_Y : R600Reg <"C938.Y", 938>;
+def C938_Z : R600Reg <"C938.Z", 938>;
+def C938_W : R600Reg <"C938.W", 938>;
+def C939_X : R600Reg <"C939.X", 939>;
+def C939_Y : R600Reg <"C939.Y", 939>;
+def C939_Z : R600Reg <"C939.Z", 939>;
+def C939_W : R600Reg <"C939.W", 939>;
+def C940_X : R600Reg <"C940.X", 940>;
+def C940_Y : R600Reg <"C940.Y", 940>;
+def C940_Z : R600Reg <"C940.Z", 940>;
+def C940_W : R600Reg <"C940.W", 940>;
+def C941_X : R600Reg <"C941.X", 941>;
+def C941_Y : R600Reg <"C941.Y", 941>;
+def C941_Z : R600Reg <"C941.Z", 941>;
+def C941_W : R600Reg <"C941.W", 941>;
+def C942_X : R600Reg <"C942.X", 942>;
+def C942_Y : R600Reg <"C942.Y", 942>;
+def C942_Z : R600Reg <"C942.Z", 942>;
+def C942_W : R600Reg <"C942.W", 942>;
+def C943_X : R600Reg <"C943.X", 943>;
+def C943_Y : R600Reg <"C943.Y", 943>;
+def C943_Z : R600Reg <"C943.Z", 943>;
+def C943_W : R600Reg <"C943.W", 943>;
+def C944_X : R600Reg <"C944.X", 944>;
+def C944_Y : R600Reg <"C944.Y", 944>;
+def C944_Z : R600Reg <"C944.Z", 944>;
+def C944_W : R600Reg <"C944.W", 944>;
+def C945_X : R600Reg <"C945.X", 945>;
+def C945_Y : R600Reg <"C945.Y", 945>;
+def C945_Z : R600Reg <"C945.Z", 945>;
+def C945_W : R600Reg <"C945.W", 945>;
+def C946_X : R600Reg <"C946.X", 946>;
+def C946_Y : R600Reg <"C946.Y", 946>;
+def C946_Z : R600Reg <"C946.Z", 946>;
+def C946_W : R600Reg <"C946.W", 946>;
+def C947_X : R600Reg <"C947.X", 947>;
+def C947_Y : R600Reg <"C947.Y", 947>;
+def C947_Z : R600Reg <"C947.Z", 947>;
+def C947_W : R600Reg <"C947.W", 947>;
+def C948_X : R600Reg <"C948.X", 948>;
+def C948_Y : R600Reg <"C948.Y", 948>;
+def C948_Z : R600Reg <"C948.Z", 948>;
+def C948_W : R600Reg <"C948.W", 948>;
+def C949_X : R600Reg <"C949.X", 949>;
+def C949_Y : R600Reg <"C949.Y", 949>;
+def C949_Z : R600Reg <"C949.Z", 949>;
+def C949_W : R600Reg <"C949.W", 949>;
+def C950_X : R600Reg <"C950.X", 950>;
+def C950_Y : R600Reg <"C950.Y", 950>;
+def C950_Z : R600Reg <"C950.Z", 950>;
+def C950_W : R600Reg <"C950.W", 950>;
+def C951_X : R600Reg <"C951.X", 951>;
+def C951_Y : R600Reg <"C951.Y", 951>;
+def C951_Z : R600Reg <"C951.Z", 951>;
+def C951_W : R600Reg <"C951.W", 951>;
+def C952_X : R600Reg <"C952.X", 952>;
+def C952_Y : R600Reg <"C952.Y", 952>;
+def C952_Z : R600Reg <"C952.Z", 952>;
+def C952_W : R600Reg <"C952.W", 952>;
+def C953_X : R600Reg <"C953.X", 953>;
+def C953_Y : R600Reg <"C953.Y", 953>;
+def C953_Z : R600Reg <"C953.Z", 953>;
+def C953_W : R600Reg <"C953.W", 953>;
+def C954_X : R600Reg <"C954.X", 954>;
+def C954_Y : R600Reg <"C954.Y", 954>;
+def C954_Z : R600Reg <"C954.Z", 954>;
+def C954_W : R600Reg <"C954.W", 954>;
+def C955_X : R600Reg <"C955.X", 955>;
+def C955_Y : R600Reg <"C955.Y", 955>;
+def C955_Z : R600Reg <"C955.Z", 955>;
+def C955_W : R600Reg <"C955.W", 955>;
+def C956_X : R600Reg <"C956.X", 956>;
+def C956_Y : R600Reg <"C956.Y", 956>;
+def C956_Z : R600Reg <"C956.Z", 956>;
+def C956_W : R600Reg <"C956.W", 956>;
+def C957_X : R600Reg <"C957.X", 957>;
+def C957_Y : R600Reg <"C957.Y", 957>;
+def C957_Z : R600Reg <"C957.Z", 957>;
+def C957_W : R600Reg <"C957.W", 957>;
+def C958_X : R600Reg <"C958.X", 958>;
+def C958_Y : R600Reg <"C958.Y", 958>;
+def C958_Z : R600Reg <"C958.Z", 958>;
+def C958_W : R600Reg <"C958.W", 958>;
+def C959_X : R600Reg <"C959.X", 959>;
+def C959_Y : R600Reg <"C959.Y", 959>;
+def C959_Z : R600Reg <"C959.Z", 959>;
+def C959_W : R600Reg <"C959.W", 959>;
+def C960_X : R600Reg <"C960.X", 960>;
+def C960_Y : R600Reg <"C960.Y", 960>;
+def C960_Z : R600Reg <"C960.Z", 960>;
+def C960_W : R600Reg <"C960.W", 960>;
+def C961_X : R600Reg <"C961.X", 961>;
+def C961_Y : R600Reg <"C961.Y", 961>;
+def C961_Z : R600Reg <"C961.Z", 961>;
+def C961_W : R600Reg <"C961.W", 961>;
+def C962_X : R600Reg <"C962.X", 962>;
+def C962_Y : R600Reg <"C962.Y", 962>;
+def C962_Z : R600Reg <"C962.Z", 962>;
+def C962_W : R600Reg <"C962.W", 962>;
+def C963_X : R600Reg <"C963.X", 963>;
+def C963_Y : R600Reg <"C963.Y", 963>;
+def C963_Z : R600Reg <"C963.Z", 963>;
+def C963_W : R600Reg <"C963.W", 963>;
+def C964_X : R600Reg <"C964.X", 964>;
+def C964_Y : R600Reg <"C964.Y", 964>;
+def C964_Z : R600Reg <"C964.Z", 964>;
+def C964_W : R600Reg <"C964.W", 964>;
+def C965_X : R600Reg <"C965.X", 965>;
+def C965_Y : R600Reg <"C965.Y", 965>;
+def C965_Z : R600Reg <"C965.Z", 965>;
+def C965_W : R600Reg <"C965.W", 965>;
+def C966_X : R600Reg <"C966.X", 966>;
+def C966_Y : R600Reg <"C966.Y", 966>;
+def C966_Z : R600Reg <"C966.Z", 966>;
+def C966_W : R600Reg <"C966.W", 966>;
+def C967_X : R600Reg <"C967.X", 967>;
+def C967_Y : R600Reg <"C967.Y", 967>;
+def C967_Z : R600Reg <"C967.Z", 967>;
+def C967_W : R600Reg <"C967.W", 967>;
+def C968_X : R600Reg <"C968.X", 968>;
+def C968_Y : R600Reg <"C968.Y", 968>;
+def C968_Z : R600Reg <"C968.Z", 968>;
+def C968_W : R600Reg <"C968.W", 968>;
+def C969_X : R600Reg <"C969.X", 969>;
+def C969_Y : R600Reg <"C969.Y", 969>;
+def C969_Z : R600Reg <"C969.Z", 969>;
+def C969_W : R600Reg <"C969.W", 969>;
+def C970_X : R600Reg <"C970.X", 970>;
+def C970_Y : R600Reg <"C970.Y", 970>;
+def C970_Z : R600Reg <"C970.Z", 970>;
+def C970_W : R600Reg <"C970.W", 970>;
+def C971_X : R600Reg <"C971.X", 971>;
+def C971_Y : R600Reg <"C971.Y", 971>;
+def C971_Z : R600Reg <"C971.Z", 971>;
+def C971_W : R600Reg <"C971.W", 971>;
+def C972_X : R600Reg <"C972.X", 972>;
+def C972_Y : R600Reg <"C972.Y", 972>;
+def C972_Z : R600Reg <"C972.Z", 972>;
+def C972_W : R600Reg <"C972.W", 972>;
+def C973_X : R600Reg <"C973.X", 973>;
+def C973_Y : R600Reg <"C973.Y", 973>;
+def C973_Z : R600Reg <"C973.Z", 973>;
+def C973_W : R600Reg <"C973.W", 973>;
+def C974_X : R600Reg <"C974.X", 974>;
+def C974_Y : R600Reg <"C974.Y", 974>;
+def C974_Z : R600Reg <"C974.Z", 974>;
+def C974_W : R600Reg <"C974.W", 974>;
+def C975_X : R600Reg <"C975.X", 975>;
+def C975_Y : R600Reg <"C975.Y", 975>;
+def C975_Z : R600Reg <"C975.Z", 975>;
+def C975_W : R600Reg <"C975.W", 975>;
+def C976_X : R600Reg <"C976.X", 976>;
+def C976_Y : R600Reg <"C976.Y", 976>;
+def C976_Z : R600Reg <"C976.Z", 976>;
+def C976_W : R600Reg <"C976.W", 976>;
+def C977_X : R600Reg <"C977.X", 977>;
+def C977_Y : R600Reg <"C977.Y", 977>;
+def C977_Z : R600Reg <"C977.Z", 977>;
+def C977_W : R600Reg <"C977.W", 977>;
+def C978_X : R600Reg <"C978.X", 978>;
+def C978_Y : R600Reg <"C978.Y", 978>;
+def C978_Z : R600Reg <"C978.Z", 978>;
+def C978_W : R600Reg <"C978.W", 978>;
+def C979_X : R600Reg <"C979.X", 979>;
+def C979_Y : R600Reg <"C979.Y", 979>;
+def C979_Z : R600Reg <"C979.Z", 979>;
+def C979_W : R600Reg <"C979.W", 979>;
+def C980_X : R600Reg <"C980.X", 980>;
+def C980_Y : R600Reg <"C980.Y", 980>;
+def C980_Z : R600Reg <"C980.Z", 980>;
+def C980_W : R600Reg <"C980.W", 980>;
+def C981_X : R600Reg <"C981.X", 981>;
+def C981_Y : R600Reg <"C981.Y", 981>;
+def C981_Z : R600Reg <"C981.Z", 981>;
+def C981_W : R600Reg <"C981.W", 981>;
+def C982_X : R600Reg <"C982.X", 982>;
+def C982_Y : R600Reg <"C982.Y", 982>;
+def C982_Z : R600Reg <"C982.Z", 982>;
+def C982_W : R600Reg <"C982.W", 982>;
+def C983_X : R600Reg <"C983.X", 983>;
+def C983_Y : R600Reg <"C983.Y", 983>;
+def C983_Z : R600Reg <"C983.Z", 983>;
+def C983_W : R600Reg <"C983.W", 983>;
+def C984_X : R600Reg <"C984.X", 984>;
+def C984_Y : R600Reg <"C984.Y", 984>;
+def C984_Z : R600Reg <"C984.Z", 984>;
+def C984_W : R600Reg <"C984.W", 984>;
+def C985_X : R600Reg <"C985.X", 985>;
+def C985_Y : R600Reg <"C985.Y", 985>;
+def C985_Z : R600Reg <"C985.Z", 985>;
+def C985_W : R600Reg <"C985.W", 985>;
+def C986_X : R600Reg <"C986.X", 986>;
+def C986_Y : R600Reg <"C986.Y", 986>;
+def C986_Z : R600Reg <"C986.Z", 986>;
+def C986_W : R600Reg <"C986.W", 986>;
+def C987_X : R600Reg <"C987.X", 987>;
+def C987_Y : R600Reg <"C987.Y", 987>;
+def C987_Z : R600Reg <"C987.Z", 987>;
+def C987_W : R600Reg <"C987.W", 987>;
+def C988_X : R600Reg <"C988.X", 988>;
+def C988_Y : R600Reg <"C988.Y", 988>;
+def C988_Z : R600Reg <"C988.Z", 988>;
+def C988_W : R600Reg <"C988.W", 988>;
+def C989_X : R600Reg <"C989.X", 989>;
+def C989_Y : R600Reg <"C989.Y", 989>;
+def C989_Z : R600Reg <"C989.Z", 989>;
+def C989_W : R600Reg <"C989.W", 989>;
+def C990_X : R600Reg <"C990.X", 990>;
+def C990_Y : R600Reg <"C990.Y", 990>;
+def C990_Z : R600Reg <"C990.Z", 990>;
+def C990_W : R600Reg <"C990.W", 990>;
+def C991_X : R600Reg <"C991.X", 991>;
+def C991_Y : R600Reg <"C991.Y", 991>;
+def C991_Z : R600Reg <"C991.Z", 991>;
+def C991_W : R600Reg <"C991.W", 991>;
+def C992_X : R600Reg <"C992.X", 992>;
+def C992_Y : R600Reg <"C992.Y", 992>;
+def C992_Z : R600Reg <"C992.Z", 992>;
+def C992_W : R600Reg <"C992.W", 992>;
+def C993_X : R600Reg <"C993.X", 993>;
+def C993_Y : R600Reg <"C993.Y", 993>;
+def C993_Z : R600Reg <"C993.Z", 993>;
+def C993_W : R600Reg <"C993.W", 993>;
+def C994_X : R600Reg <"C994.X", 994>;
+def C994_Y : R600Reg <"C994.Y", 994>;
+def C994_Z : R600Reg <"C994.Z", 994>;
+def C994_W : R600Reg <"C994.W", 994>;
+def C995_X : R600Reg <"C995.X", 995>;
+def C995_Y : R600Reg <"C995.Y", 995>;
+def C995_Z : R600Reg <"C995.Z", 995>;
+def C995_W : R600Reg <"C995.W", 995>;
+def C996_X : R600Reg <"C996.X", 996>;
+def C996_Y : R600Reg <"C996.Y", 996>;
+def C996_Z : R600Reg <"C996.Z", 996>;
+def C996_W : R600Reg <"C996.W", 996>;
+def C997_X : R600Reg <"C997.X", 997>;
+def C997_Y : R600Reg <"C997.Y", 997>;
+def C997_Z : R600Reg <"C997.Z", 997>;
+def C997_W : R600Reg <"C997.W", 997>;
+def C998_X : R600Reg <"C998.X", 998>;
+def C998_Y : R600Reg <"C998.Y", 998>;
+def C998_Z : R600Reg <"C998.Z", 998>;
+def C998_W : R600Reg <"C998.W", 998>;
+def C999_X : R600Reg <"C999.X", 999>;
+def C999_Y : R600Reg <"C999.Y", 999>;
+def C999_Z : R600Reg <"C999.Z", 999>;
+def C999_W : R600Reg <"C999.W", 999>;
+def C1000_X : R600Reg <"C1000.X", 1000>;
+def C1000_Y : R600Reg <"C1000.Y", 1000>;
+def C1000_Z : R600Reg <"C1000.Z", 1000>;
+def C1000_W : R600Reg <"C1000.W", 1000>;
+def C1001_X : R600Reg <"C1001.X", 1001>;
+def C1001_Y : R600Reg <"C1001.Y", 1001>;
+def C1001_Z : R600Reg <"C1001.Z", 1001>;
+def C1001_W : R600Reg <"C1001.W", 1001>;
+def C1002_X : R600Reg <"C1002.X", 1002>;
+def C1002_Y : R600Reg <"C1002.Y", 1002>;
+def C1002_Z : R600Reg <"C1002.Z", 1002>;
+def C1002_W : R600Reg <"C1002.W", 1002>;
+def C1003_X : R600Reg <"C1003.X", 1003>;
+def C1003_Y : R600Reg <"C1003.Y", 1003>;
+def C1003_Z : R600Reg <"C1003.Z", 1003>;
+def C1003_W : R600Reg <"C1003.W", 1003>;
+def C1004_X : R600Reg <"C1004.X", 1004>;
+def C1004_Y : R600Reg <"C1004.Y", 1004>;
+def C1004_Z : R600Reg <"C1004.Z", 1004>;
+def C1004_W : R600Reg <"C1004.W", 1004>;
+def C1005_X : R600Reg <"C1005.X", 1005>;
+def C1005_Y : R600Reg <"C1005.Y", 1005>;
+def C1005_Z : R600Reg <"C1005.Z", 1005>;
+def C1005_W : R600Reg <"C1005.W", 1005>;
+def C1006_X : R600Reg <"C1006.X", 1006>;
+def C1006_Y : R600Reg <"C1006.Y", 1006>;
+def C1006_Z : R600Reg <"C1006.Z", 1006>;
+def C1006_W : R600Reg <"C1006.W", 1006>;
+def C1007_X : R600Reg <"C1007.X", 1007>;
+def C1007_Y : R600Reg <"C1007.Y", 1007>;
+def C1007_Z : R600Reg <"C1007.Z", 1007>;
+def C1007_W : R600Reg <"C1007.W", 1007>;
+def C1008_X : R600Reg <"C1008.X", 1008>;
+def C1008_Y : R600Reg <"C1008.Y", 1008>;
+def C1008_Z : R600Reg <"C1008.Z", 1008>;
+def C1008_W : R600Reg <"C1008.W", 1008>;
+def C1009_X : R600Reg <"C1009.X", 1009>;
+def C1009_Y : R600Reg <"C1009.Y", 1009>;
+def C1009_Z : R600Reg <"C1009.Z", 1009>;
+def C1009_W : R600Reg <"C1009.W", 1009>;
+def C1010_X : R600Reg <"C1010.X", 1010>;
+def C1010_Y : R600Reg <"C1010.Y", 1010>;
+def C1010_Z : R600Reg <"C1010.Z", 1010>;
+def C1010_W : R600Reg <"C1010.W", 1010>;
+def C1011_X : R600Reg <"C1011.X", 1011>;
+def C1011_Y : R600Reg <"C1011.Y", 1011>;
+def C1011_Z : R600Reg <"C1011.Z", 1011>;
+def C1011_W : R600Reg <"C1011.W", 1011>;
+def C1012_X : R600Reg <"C1012.X", 1012>;
+def C1012_Y : R600Reg <"C1012.Y", 1012>;
+def C1012_Z : R600Reg <"C1012.Z", 1012>;
+def C1012_W : R600Reg <"C1012.W", 1012>;
+def C1013_X : R600Reg <"C1013.X", 1013>;
+def C1013_Y : R600Reg <"C1013.Y", 1013>;
+def C1013_Z : R600Reg <"C1013.Z", 1013>;
+def C1013_W : R600Reg <"C1013.W", 1013>;
+def C1014_X : R600Reg <"C1014.X", 1014>;
+def C1014_Y : R600Reg <"C1014.Y", 1014>;
+def C1014_Z : R600Reg <"C1014.Z", 1014>;
+def C1014_W : R600Reg <"C1014.W", 1014>;
+def C1015_X : R600Reg <"C1015.X", 1015>;
+def C1015_Y : R600Reg <"C1015.Y", 1015>;
+def C1015_Z : R600Reg <"C1015.Z", 1015>;
+def C1015_W : R600Reg <"C1015.W", 1015>;
+def C1016_X : R600Reg <"C1016.X", 1016>;
+def C1016_Y : R600Reg <"C1016.Y", 1016>;
+def C1016_Z : R600Reg <"C1016.Z", 1016>;
+def C1016_W : R600Reg <"C1016.W", 1016>;
+def C1017_X : R600Reg <"C1017.X", 1017>;
+def C1017_Y : R600Reg <"C1017.Y", 1017>;
+def C1017_Z : R600Reg <"C1017.Z", 1017>;
+def C1017_W : R600Reg <"C1017.W", 1017>;
+def C1018_X : R600Reg <"C1018.X", 1018>;
+def C1018_Y : R600Reg <"C1018.Y", 1018>;
+def C1018_Z : R600Reg <"C1018.Z", 1018>;
+def C1018_W : R600Reg <"C1018.W", 1018>;
+def C1019_X : R600Reg <"C1019.X", 1019>;
+def C1019_Y : R600Reg <"C1019.Y", 1019>;
+def C1019_Z : R600Reg <"C1019.Z", 1019>;
+def C1019_W : R600Reg <"C1019.W", 1019>;
+def C1020_X : R600Reg <"C1020.X", 1020>;
+def C1020_Y : R600Reg <"C1020.Y", 1020>;
+def C1020_Z : R600Reg <"C1020.Z", 1020>;
+def C1020_W : R600Reg <"C1020.W", 1020>;
+def C1021_X : R600Reg <"C1021.X", 1021>;
+def C1021_Y : R600Reg <"C1021.Y", 1021>;
+def C1021_Z : R600Reg <"C1021.Z", 1021>;
+def C1021_W : R600Reg <"C1021.W", 1021>;
+def C1022_X : R600Reg <"C1022.X", 1022>;
+def C1022_Y : R600Reg <"C1022.Y", 1022>;
+def C1022_Z : R600Reg <"C1022.Z", 1022>;
+def C1022_W : R600Reg <"C1022.W", 1022>;
+def C1023_X : R600Reg <"C1023.X", 1023>;
+def C1023_Y : R600Reg <"C1023.Y", 1023>;
+def C1023_Z : R600Reg <"C1023.Z", 1023>;
+def C1023_W : R600Reg <"C1023.W", 1023>;
+def T0_X : R600Reg <"T0.X", 0>;
+def T0_Y : R600Reg <"T0.Y", 0>;
+def T0_Z : R600Reg <"T0.Z", 0>;
+def T0_W : R600Reg <"T0.W", 0>;
+def T1_X : R600Reg <"T1.X", 1>;
+def T1_Y : R600Reg <"T1.Y", 1>;
+def T1_Z : R600Reg <"T1.Z", 1>;
+def T1_W : R600Reg <"T1.W", 1>;
+def T2_X : R600Reg <"T2.X", 2>;
+def T2_Y : R600Reg <"T2.Y", 2>;
+def T2_Z : R600Reg <"T2.Z", 2>;
+def T2_W : R600Reg <"T2.W", 2>;
+def T3_X : R600Reg <"T3.X", 3>;
+def T3_Y : R600Reg <"T3.Y", 3>;
+def T3_Z : R600Reg <"T3.Z", 3>;
+def T3_W : R600Reg <"T3.W", 3>;
+def T4_X : R600Reg <"T4.X", 4>;
+def T4_Y : R600Reg <"T4.Y", 4>;
+def T4_Z : R600Reg <"T4.Z", 4>;
+def T4_W : R600Reg <"T4.W", 4>;
+def T5_X : R600Reg <"T5.X", 5>;
+def T5_Y : R600Reg <"T5.Y", 5>;
+def T5_Z : R600Reg <"T5.Z", 5>;
+def T5_W : R600Reg <"T5.W", 5>;
+def T6_X : R600Reg <"T6.X", 6>;
+def T6_Y : R600Reg <"T6.Y", 6>;
+def T6_Z : R600Reg <"T6.Z", 6>;
+def T6_W : R600Reg <"T6.W", 6>;
+def T7_X : R600Reg <"T7.X", 7>;
+def T7_Y : R600Reg <"T7.Y", 7>;
+def T7_Z : R600Reg <"T7.Z", 7>;
+def T7_W : R600Reg <"T7.W", 7>;
+def T8_X : R600Reg <"T8.X", 8>;
+def T8_Y : R600Reg <"T8.Y", 8>;
+def T8_Z : R600Reg <"T8.Z", 8>;
+def T8_W : R600Reg <"T8.W", 8>;
+def T9_X : R600Reg <"T9.X", 9>;
+def T9_Y : R600Reg <"T9.Y", 9>;
+def T9_Z : R600Reg <"T9.Z", 9>;
+def T9_W : R600Reg <"T9.W", 9>;
+def T10_X : R600Reg <"T10.X", 10>;
+def T10_Y : R600Reg <"T10.Y", 10>;
+def T10_Z : R600Reg <"T10.Z", 10>;
+def T10_W : R600Reg <"T10.W", 10>;
+def T11_X : R600Reg <"T11.X", 11>;
+def T11_Y : R600Reg <"T11.Y", 11>;
+def T11_Z : R600Reg <"T11.Z", 11>;
+def T11_W : R600Reg <"T11.W", 11>;
+def T12_X : R600Reg <"T12.X", 12>;
+def T12_Y : R600Reg <"T12.Y", 12>;
+def T12_Z : R600Reg <"T12.Z", 12>;
+def T12_W : R600Reg <"T12.W", 12>;
+def T13_X : R600Reg <"T13.X", 13>;
+def T13_Y : R600Reg <"T13.Y", 13>;
+def T13_Z : R600Reg <"T13.Z", 13>;
+def T13_W : R600Reg <"T13.W", 13>;
+def T14_X : R600Reg <"T14.X", 14>;
+def T14_Y : R600Reg <"T14.Y", 14>;
+def T14_Z : R600Reg <"T14.Z", 14>;
+def T14_W : R600Reg <"T14.W", 14>;
+def T15_X : R600Reg <"T15.X", 15>;
+def T15_Y : R600Reg <"T15.Y", 15>;
+def T15_Z : R600Reg <"T15.Z", 15>;
+def T15_W : R600Reg <"T15.W", 15>;
+def T16_X : R600Reg <"T16.X", 16>;
+def T16_Y : R600Reg <"T16.Y", 16>;
+def T16_Z : R600Reg <"T16.Z", 16>;
+def T16_W : R600Reg <"T16.W", 16>;
+def T17_X : R600Reg <"T17.X", 17>;
+def T17_Y : R600Reg <"T17.Y", 17>;
+def T17_Z : R600Reg <"T17.Z", 17>;
+def T17_W : R600Reg <"T17.W", 17>;
+def T18_X : R600Reg <"T18.X", 18>;
+def T18_Y : R600Reg <"T18.Y", 18>;
+def T18_Z : R600Reg <"T18.Z", 18>;
+def T18_W : R600Reg <"T18.W", 18>;
+def T19_X : R600Reg <"T19.X", 19>;
+def T19_Y : R600Reg <"T19.Y", 19>;
+def T19_Z : R600Reg <"T19.Z", 19>;
+def T19_W : R600Reg <"T19.W", 19>;
+def T20_X : R600Reg <"T20.X", 20>;
+def T20_Y : R600Reg <"T20.Y", 20>;
+def T20_Z : R600Reg <"T20.Z", 20>;
+def T20_W : R600Reg <"T20.W", 20>;
+def T21_X : R600Reg <"T21.X", 21>;
+def T21_Y : R600Reg <"T21.Y", 21>;
+def T21_Z : R600Reg <"T21.Z", 21>;
+def T21_W : R600Reg <"T21.W", 21>;
+def T22_X : R600Reg <"T22.X", 22>;
+def T22_Y : R600Reg <"T22.Y", 22>;
+def T22_Z : R600Reg <"T22.Z", 22>;
+def T22_W : R600Reg <"T22.W", 22>;
+def T23_X : R600Reg <"T23.X", 23>;
+def T23_Y : R600Reg <"T23.Y", 23>;
+def T23_Z : R600Reg <"T23.Z", 23>;
+def T23_W : R600Reg <"T23.W", 23>;
+def T24_X : R600Reg <"T24.X", 24>;
+def T24_Y : R600Reg <"T24.Y", 24>;
+def T24_Z : R600Reg <"T24.Z", 24>;
+def T24_W : R600Reg <"T24.W", 24>;
+def T25_X : R600Reg <"T25.X", 25>;
+def T25_Y : R600Reg <"T25.Y", 25>;
+def T25_Z : R600Reg <"T25.Z", 25>;
+def T25_W : R600Reg <"T25.W", 25>;
+def T26_X : R600Reg <"T26.X", 26>;
+def T26_Y : R600Reg <"T26.Y", 26>;
+def T26_Z : R600Reg <"T26.Z", 26>;
+def T26_W : R600Reg <"T26.W", 26>;
+def T27_X : R600Reg <"T27.X", 27>;
+def T27_Y : R600Reg <"T27.Y", 27>;
+def T27_Z : R600Reg <"T27.Z", 27>;
+def T27_W : R600Reg <"T27.W", 27>;
+def T28_X : R600Reg <"T28.X", 28>;
+def T28_Y : R600Reg <"T28.Y", 28>;
+def T28_Z : R600Reg <"T28.Z", 28>;
+def T28_W : R600Reg <"T28.W", 28>;
+def T29_X : R600Reg <"T29.X", 29>;
+def T29_Y : R600Reg <"T29.Y", 29>;
+def T29_Z : R600Reg <"T29.Z", 29>;
+def T29_W : R600Reg <"T29.W", 29>;
+def T30_X : R600Reg <"T30.X", 30>;
+def T30_Y : R600Reg <"T30.Y", 30>;
+def T30_Z : R600Reg <"T30.Z", 30>;
+def T30_W : R600Reg <"T30.W", 30>;
+def T31_X : R600Reg <"T31.X", 31>;
+def T31_Y : R600Reg <"T31.Y", 31>;
+def T31_Z : R600Reg <"T31.Z", 31>;
+def T31_W : R600Reg <"T31.W", 31>;
+def T32_X : R600Reg <"T32.X", 32>;
+def T32_Y : R600Reg <"T32.Y", 32>;
+def T32_Z : R600Reg <"T32.Z", 32>;
+def T32_W : R600Reg <"T32.W", 32>;
+def T33_X : R600Reg <"T33.X", 33>;
+def T33_Y : R600Reg <"T33.Y", 33>;
+def T33_Z : R600Reg <"T33.Z", 33>;
+def T33_W : R600Reg <"T33.W", 33>;
+def T34_X : R600Reg <"T34.X", 34>;
+def T34_Y : R600Reg <"T34.Y", 34>;
+def T34_Z : R600Reg <"T34.Z", 34>;
+def T34_W : R600Reg <"T34.W", 34>;
+def T35_X : R600Reg <"T35.X", 35>;
+def T35_Y : R600Reg <"T35.Y", 35>;
+def T35_Z : R600Reg <"T35.Z", 35>;
+def T35_W : R600Reg <"T35.W", 35>;
+def T36_X : R600Reg <"T36.X", 36>;
+def T36_Y : R600Reg <"T36.Y", 36>;
+def T36_Z : R600Reg <"T36.Z", 36>;
+def T36_W : R600Reg <"T36.W", 36>;
+def T37_X : R600Reg <"T37.X", 37>;
+def T37_Y : R600Reg <"T37.Y", 37>;
+def T37_Z : R600Reg <"T37.Z", 37>;
+def T37_W : R600Reg <"T37.W", 37>;
+def T38_X : R600Reg <"T38.X", 38>;
+def T38_Y : R600Reg <"T38.Y", 38>;
+def T38_Z : R600Reg <"T38.Z", 38>;
+def T38_W : R600Reg <"T38.W", 38>;
+def T39_X : R600Reg <"T39.X", 39>;
+def T39_Y : R600Reg <"T39.Y", 39>;
+def T39_Z : R600Reg <"T39.Z", 39>;
+def T39_W : R600Reg <"T39.W", 39>;
+def T40_X : R600Reg <"T40.X", 40>;
+def T40_Y : R600Reg <"T40.Y", 40>;
+def T40_Z : R600Reg <"T40.Z", 40>;
+def T40_W : R600Reg <"T40.W", 40>;
+def T41_X : R600Reg <"T41.X", 41>;
+def T41_Y : R600Reg <"T41.Y", 41>;
+def T41_Z : R600Reg <"T41.Z", 41>;
+def T41_W : R600Reg <"T41.W", 41>;
+def T42_X : R600Reg <"T42.X", 42>;
+def T42_Y : R600Reg <"T42.Y", 42>;
+def T42_Z : R600Reg <"T42.Z", 42>;
+def T42_W : R600Reg <"T42.W", 42>;
+def T43_X : R600Reg <"T43.X", 43>;
+def T43_Y : R600Reg <"T43.Y", 43>;
+def T43_Z : R600Reg <"T43.Z", 43>;
+def T43_W : R600Reg <"T43.W", 43>;
+def T44_X : R600Reg <"T44.X", 44>;
+def T44_Y : R600Reg <"T44.Y", 44>;
+def T44_Z : R600Reg <"T44.Z", 44>;
+def T44_W : R600Reg <"T44.W", 44>;
+def T45_X : R600Reg <"T45.X", 45>;
+def T45_Y : R600Reg <"T45.Y", 45>;
+def T45_Z : R600Reg <"T45.Z", 45>;
+def T45_W : R600Reg <"T45.W", 45>;
+def T46_X : R600Reg <"T46.X", 46>;
+def T46_Y : R600Reg <"T46.Y", 46>;
+def T46_Z : R600Reg <"T46.Z", 46>;
+def T46_W : R600Reg <"T46.W", 46>;
+def T47_X : R600Reg <"T47.X", 47>;
+def T47_Y : R600Reg <"T47.Y", 47>;
+def T47_Z : R600Reg <"T47.Z", 47>;
+def T47_W : R600Reg <"T47.W", 47>;
+def T48_X : R600Reg <"T48.X", 48>;
+def T48_Y : R600Reg <"T48.Y", 48>;
+def T48_Z : R600Reg <"T48.Z", 48>;
+def T48_W : R600Reg <"T48.W", 48>;
+def T49_X : R600Reg <"T49.X", 49>;
+def T49_Y : R600Reg <"T49.Y", 49>;
+def T49_Z : R600Reg <"T49.Z", 49>;
+def T49_W : R600Reg <"T49.W", 49>;
+def T50_X : R600Reg <"T50.X", 50>;
+def T50_Y : R600Reg <"T50.Y", 50>;
+def T50_Z : R600Reg <"T50.Z", 50>;
+def T50_W : R600Reg <"T50.W", 50>;
+def T51_X : R600Reg <"T51.X", 51>;
+def T51_Y : R600Reg <"T51.Y", 51>;
+def T51_Z : R600Reg <"T51.Z", 51>;
+def T51_W : R600Reg <"T51.W", 51>;
+def T52_X : R600Reg <"T52.X", 52>;
+def T52_Y : R600Reg <"T52.Y", 52>;
+def T52_Z : R600Reg <"T52.Z", 52>;
+def T52_W : R600Reg <"T52.W", 52>;
+def T53_X : R600Reg <"T53.X", 53>;
+def T53_Y : R600Reg <"T53.Y", 53>;
+def T53_Z : R600Reg <"T53.Z", 53>;
+def T53_W : R600Reg <"T53.W", 53>;
+def T54_X : R600Reg <"T54.X", 54>;
+def T54_Y : R600Reg <"T54.Y", 54>;
+def T54_Z : R600Reg <"T54.Z", 54>;
+def T54_W : R600Reg <"T54.W", 54>;
+def T55_X : R600Reg <"T55.X", 55>;
+def T55_Y : R600Reg <"T55.Y", 55>;
+def T55_Z : R600Reg <"T55.Z", 55>;
+def T55_W : R600Reg <"T55.W", 55>;
+def T56_X : R600Reg <"T56.X", 56>;
+def T56_Y : R600Reg <"T56.Y", 56>;
+def T56_Z : R600Reg <"T56.Z", 56>;
+def T56_W : R600Reg <"T56.W", 56>;
+def T57_X : R600Reg <"T57.X", 57>;
+def T57_Y : R600Reg <"T57.Y", 57>;
+def T57_Z : R600Reg <"T57.Z", 57>;
+def T57_W : R600Reg <"T57.W", 57>;
+def T58_X : R600Reg <"T58.X", 58>;
+def T58_Y : R600Reg <"T58.Y", 58>;
+def T58_Z : R600Reg <"T58.Z", 58>;
+def T58_W : R600Reg <"T58.W", 58>;
+def T59_X : R600Reg <"T59.X", 59>;
+def T59_Y : R600Reg <"T59.Y", 59>;
+def T59_Z : R600Reg <"T59.Z", 59>;
+def T59_W : R600Reg <"T59.W", 59>;
+def T60_X : R600Reg <"T60.X", 60>;
+def T60_Y : R600Reg <"T60.Y", 60>;
+def T60_Z : R600Reg <"T60.Z", 60>;
+def T60_W : R600Reg <"T60.W", 60>;
+def T61_X : R600Reg <"T61.X", 61>;
+def T61_Y : R600Reg <"T61.Y", 61>;
+def T61_Z : R600Reg <"T61.Z", 61>;
+def T61_W : R600Reg <"T61.W", 61>;
+def T62_X : R600Reg <"T62.X", 62>;
+def T62_Y : R600Reg <"T62.Y", 62>;
+def T62_Z : R600Reg <"T62.Z", 62>;
+def T62_W : R600Reg <"T62.W", 62>;
+def T63_X : R600Reg <"T63.X", 63>;
+def T63_Y : R600Reg <"T63.Y", 63>;
+def T63_Z : R600Reg <"T63.Z", 63>;
+def T63_W : R600Reg <"T63.W", 63>;
+def T64_X : R600Reg <"T64.X", 64>;
+def T64_Y : R600Reg <"T64.Y", 64>;
+def T64_Z : R600Reg <"T64.Z", 64>;
+def T64_W : R600Reg <"T64.W", 64>;
+def T65_X : R600Reg <"T65.X", 65>;
+def T65_Y : R600Reg <"T65.Y", 65>;
+def T65_Z : R600Reg <"T65.Z", 65>;
+def T65_W : R600Reg <"T65.W", 65>;
+def T66_X : R600Reg <"T66.X", 66>;
+def T66_Y : R600Reg <"T66.Y", 66>;
+def T66_Z : R600Reg <"T66.Z", 66>;
+def T66_W : R600Reg <"T66.W", 66>;
+def T67_X : R600Reg <"T67.X", 67>;
+def T67_Y : R600Reg <"T67.Y", 67>;
+def T67_Z : R600Reg <"T67.Z", 67>;
+def T67_W : R600Reg <"T67.W", 67>;
+def T68_X : R600Reg <"T68.X", 68>;
+def T68_Y : R600Reg <"T68.Y", 68>;
+def T68_Z : R600Reg <"T68.Z", 68>;
+def T68_W : R600Reg <"T68.W", 68>;
+def T69_X : R600Reg <"T69.X", 69>;
+def T69_Y : R600Reg <"T69.Y", 69>;
+def T69_Z : R600Reg <"T69.Z", 69>;
+def T69_W : R600Reg <"T69.W", 69>;
+def T70_X : R600Reg <"T70.X", 70>;
+def T70_Y : R600Reg <"T70.Y", 70>;
+def T70_Z : R600Reg <"T70.Z", 70>;
+def T70_W : R600Reg <"T70.W", 70>;
+def T71_X : R600Reg <"T71.X", 71>;
+def T71_Y : R600Reg <"T71.Y", 71>;
+def T71_Z : R600Reg <"T71.Z", 71>;
+def T71_W : R600Reg <"T71.W", 71>;
+def T72_X : R600Reg <"T72.X", 72>;
+def T72_Y : R600Reg <"T72.Y", 72>;
+def T72_Z : R600Reg <"T72.Z", 72>;
+def T72_W : R600Reg <"T72.W", 72>;
+def T73_X : R600Reg <"T73.X", 73>;
+def T73_Y : R600Reg <"T73.Y", 73>;
+def T73_Z : R600Reg <"T73.Z", 73>;
+def T73_W : R600Reg <"T73.W", 73>;
+def T74_X : R600Reg <"T74.X", 74>;
+def T74_Y : R600Reg <"T74.Y", 74>;
+def T74_Z : R600Reg <"T74.Z", 74>;
+def T74_W : R600Reg <"T74.W", 74>;
+def T75_X : R600Reg <"T75.X", 75>;
+def T75_Y : R600Reg <"T75.Y", 75>;
+def T75_Z : R600Reg <"T75.Z", 75>;
+def T75_W : R600Reg <"T75.W", 75>;
+def T76_X : R600Reg <"T76.X", 76>;
+def T76_Y : R600Reg <"T76.Y", 76>;
+def T76_Z : R600Reg <"T76.Z", 76>;
+def T76_W : R600Reg <"T76.W", 76>;
+def T77_X : R600Reg <"T77.X", 77>;
+def T77_Y : R600Reg <"T77.Y", 77>;
+def T77_Z : R600Reg <"T77.Z", 77>;
+def T77_W : R600Reg <"T77.W", 77>;
+def T78_X : R600Reg <"T78.X", 78>;
+def T78_Y : R600Reg <"T78.Y", 78>;
+def T78_Z : R600Reg <"T78.Z", 78>;
+def T78_W : R600Reg <"T78.W", 78>;
+def T79_X : R600Reg <"T79.X", 79>;
+def T79_Y : R600Reg <"T79.Y", 79>;
+def T79_Z : R600Reg <"T79.Z", 79>;
+def T79_W : R600Reg <"T79.W", 79>;
+def T80_X : R600Reg <"T80.X", 80>;
+def T80_Y : R600Reg <"T80.Y", 80>;
+def T80_Z : R600Reg <"T80.Z", 80>;
+def T80_W : R600Reg <"T80.W", 80>;
+def T81_X : R600Reg <"T81.X", 81>;
+def T81_Y : R600Reg <"T81.Y", 81>;
+def T81_Z : R600Reg <"T81.Z", 81>;
+def T81_W : R600Reg <"T81.W", 81>;
+def T82_X : R600Reg <"T82.X", 82>;
+def T82_Y : R600Reg <"T82.Y", 82>;
+def T82_Z : R600Reg <"T82.Z", 82>;
+def T82_W : R600Reg <"T82.W", 82>;
+def T83_X : R600Reg <"T83.X", 83>;
+def T83_Y : R600Reg <"T83.Y", 83>;
+def T83_Z : R600Reg <"T83.Z", 83>;
+def T83_W : R600Reg <"T83.W", 83>;
+def T84_X : R600Reg <"T84.X", 84>;
+def T84_Y : R600Reg <"T84.Y", 84>;
+def T84_Z : R600Reg <"T84.Z", 84>;
+def T84_W : R600Reg <"T84.W", 84>;
+def T85_X : R600Reg <"T85.X", 85>;
+def T85_Y : R600Reg <"T85.Y", 85>;
+def T85_Z : R600Reg <"T85.Z", 85>;
+def T85_W : R600Reg <"T85.W", 85>;
+def T86_X : R600Reg <"T86.X", 86>;
+def T86_Y : R600Reg <"T86.Y", 86>;
+def T86_Z : R600Reg <"T86.Z", 86>;
+def T86_W : R600Reg <"T86.W", 86>;
+def T87_X : R600Reg <"T87.X", 87>;
+def T87_Y : R600Reg <"T87.Y", 87>;
+def T87_Z : R600Reg <"T87.Z", 87>;
+def T87_W : R600Reg <"T87.W", 87>;
+def T88_X : R600Reg <"T88.X", 88>;
+def T88_Y : R600Reg <"T88.Y", 88>;
+def T88_Z : R600Reg <"T88.Z", 88>;
+def T88_W : R600Reg <"T88.W", 88>;
+def T89_X : R600Reg <"T89.X", 89>;
+def T89_Y : R600Reg <"T89.Y", 89>;
+def T89_Z : R600Reg <"T89.Z", 89>;
+def T89_W : R600Reg <"T89.W", 89>;
+def T90_X : R600Reg <"T90.X", 90>;
+def T90_Y : R600Reg <"T90.Y", 90>;
+def T90_Z : R600Reg <"T90.Z", 90>;
+def T90_W : R600Reg <"T90.W", 90>;
+def T91_X : R600Reg <"T91.X", 91>;
+def T91_Y : R600Reg <"T91.Y", 91>;
+def T91_Z : R600Reg <"T91.Z", 91>;
+def T91_W : R600Reg <"T91.W", 91>;
+def T92_X : R600Reg <"T92.X", 92>;
+def T92_Y : R600Reg <"T92.Y", 92>;
+def T92_Z : R600Reg <"T92.Z", 92>;
+def T92_W : R600Reg <"T92.W", 92>;
+def T93_X : R600Reg <"T93.X", 93>;
+def T93_Y : R600Reg <"T93.Y", 93>;
+def T93_Z : R600Reg <"T93.Z", 93>;
+def T93_W : R600Reg <"T93.W", 93>;
+def T94_X : R600Reg <"T94.X", 94>;
+def T94_Y : R600Reg <"T94.Y", 94>;
+def T94_Z : R600Reg <"T94.Z", 94>;
+def T94_W : R600Reg <"T94.W", 94>;
+def T95_X : R600Reg <"T95.X", 95>;
+def T95_Y : R600Reg <"T95.Y", 95>;
+def T95_Z : R600Reg <"T95.Z", 95>;
+def T95_W : R600Reg <"T95.W", 95>;
+def T96_X : R600Reg <"T96.X", 96>;
+def T96_Y : R600Reg <"T96.Y", 96>;
+def T96_Z : R600Reg <"T96.Z", 96>;
+def T96_W : R600Reg <"T96.W", 96>;
+def T97_X : R600Reg <"T97.X", 97>;
+def T97_Y : R600Reg <"T97.Y", 97>;
+def T97_Z : R600Reg <"T97.Z", 97>;
+def T97_W : R600Reg <"T97.W", 97>;
+def T98_X : R600Reg <"T98.X", 98>;
+def T98_Y : R600Reg <"T98.Y", 98>;
+def T98_Z : R600Reg <"T98.Z", 98>;
+def T98_W : R600Reg <"T98.W", 98>;
+def T99_X : R600Reg <"T99.X", 99>;
+def T99_Y : R600Reg <"T99.Y", 99>;
+def T99_Z : R600Reg <"T99.Z", 99>;
+def T99_W : R600Reg <"T99.W", 99>;
+def T100_X : R600Reg <"T100.X", 100>;
+def T100_Y : R600Reg <"T100.Y", 100>;
+def T100_Z : R600Reg <"T100.Z", 100>;
+def T100_W : R600Reg <"T100.W", 100>;
+def T101_X : R600Reg <"T101.X", 101>;
+def T101_Y : R600Reg <"T101.Y", 101>;
+def T101_Z : R600Reg <"T101.Z", 101>;
+def T101_W : R600Reg <"T101.W", 101>;
+def T102_X : R600Reg <"T102.X", 102>;
+def T102_Y : R600Reg <"T102.Y", 102>;
+def T102_Z : R600Reg <"T102.Z", 102>;
+def T102_W : R600Reg <"T102.W", 102>;
+def T103_X : R600Reg <"T103.X", 103>;
+def T103_Y : R600Reg <"T103.Y", 103>;
+def T103_Z : R600Reg <"T103.Z", 103>;
+def T103_W : R600Reg <"T103.W", 103>;
+def T104_X : R600Reg <"T104.X", 104>;
+def T104_Y : R600Reg <"T104.Y", 104>;
+def T104_Z : R600Reg <"T104.Z", 104>;
+def T104_W : R600Reg <"T104.W", 104>;
+def T105_X : R600Reg <"T105.X", 105>;
+def T105_Y : R600Reg <"T105.Y", 105>;
+def T105_Z : R600Reg <"T105.Z", 105>;
+def T105_W : R600Reg <"T105.W", 105>;
+def T106_X : R600Reg <"T106.X", 106>;
+def T106_Y : R600Reg <"T106.Y", 106>;
+def T106_Z : R600Reg <"T106.Z", 106>;
+def T106_W : R600Reg <"T106.W", 106>;
+def T107_X : R600Reg <"T107.X", 107>;
+def T107_Y : R600Reg <"T107.Y", 107>;
+def T107_Z : R600Reg <"T107.Z", 107>;
+def T107_W : R600Reg <"T107.W", 107>;
+def T108_X : R600Reg <"T108.X", 108>;
+def T108_Y : R600Reg <"T108.Y", 108>;
+def T108_Z : R600Reg <"T108.Z", 108>;
+def T108_W : R600Reg <"T108.W", 108>;
+def T109_X : R600Reg <"T109.X", 109>;
+def T109_Y : R600Reg <"T109.Y", 109>;
+def T109_Z : R600Reg <"T109.Z", 109>;
+def T109_W : R600Reg <"T109.W", 109>;
+def T110_X : R600Reg <"T110.X", 110>;
+def T110_Y : R600Reg <"T110.Y", 110>;
+def T110_Z : R600Reg <"T110.Z", 110>;
+def T110_W : R600Reg <"T110.W", 110>;
+def T111_X : R600Reg <"T111.X", 111>;
+def T111_Y : R600Reg <"T111.Y", 111>;
+def T111_Z : R600Reg <"T111.Z", 111>;
+def T111_W : R600Reg <"T111.W", 111>;
+def T112_X : R600Reg <"T112.X", 112>;
+def T112_Y : R600Reg <"T112.Y", 112>;
+def T112_Z : R600Reg <"T112.Z", 112>;
+def T112_W : R600Reg <"T112.W", 112>;
+def T113_X : R600Reg <"T113.X", 113>;
+def T113_Y : R600Reg <"T113.Y", 113>;
+def T113_Z : R600Reg <"T113.Z", 113>;
+def T113_W : R600Reg <"T113.W", 113>;
+def T114_X : R600Reg <"T114.X", 114>;
+def T114_Y : R600Reg <"T114.Y", 114>;
+def T114_Z : R600Reg <"T114.Z", 114>;
+def T114_W : R600Reg <"T114.W", 114>;
+def T115_X : R600Reg <"T115.X", 115>;
+def T115_Y : R600Reg <"T115.Y", 115>;
+def T115_Z : R600Reg <"T115.Z", 115>;
+def T115_W : R600Reg <"T115.W", 115>;
+def T116_X : R600Reg <"T116.X", 116>;
+def T116_Y : R600Reg <"T116.Y", 116>;
+def T116_Z : R600Reg <"T116.Z", 116>;
+def T116_W : R600Reg <"T116.W", 116>;
+def T117_X : R600Reg <"T117.X", 117>;
+def T117_Y : R600Reg <"T117.Y", 117>;
+def T117_Z : R600Reg <"T117.Z", 117>;
+def T117_W : R600Reg <"T117.W", 117>;
+def T118_X : R600Reg <"T118.X", 118>;
+def T118_Y : R600Reg <"T118.Y", 118>;
+def T118_Z : R600Reg <"T118.Z", 118>;
+def T118_W : R600Reg <"T118.W", 118>;
+def T119_X : R600Reg <"T119.X", 119>;
+def T119_Y : R600Reg <"T119.Y", 119>;
+def T119_Z : R600Reg <"T119.Z", 119>;
+def T119_W : R600Reg <"T119.W", 119>;
+def T120_X : R600Reg <"T120.X", 120>;
+def T120_Y : R600Reg <"T120.Y", 120>;
+def T120_Z : R600Reg <"T120.Z", 120>;
+def T120_W : R600Reg <"T120.W", 120>;
+def T121_X : R600Reg <"T121.X", 121>;
+def T121_Y : R600Reg <"T121.Y", 121>;
+def T121_Z : R600Reg <"T121.Z", 121>;
+def T121_W : R600Reg <"T121.W", 121>;
+def T122_X : R600Reg <"T122.X", 122>;
+def T122_Y : R600Reg <"T122.Y", 122>;
+def T122_Z : R600Reg <"T122.Z", 122>;
+def T122_W : R600Reg <"T122.W", 122>;
+def T123_X : R600Reg <"T123.X", 123>;
+def T123_Y : R600Reg <"T123.Y", 123>;
+def T123_Z : R600Reg <"T123.Z", 123>;
+def T123_W : R600Reg <"T123.W", 123>;
+def T124_X : R600Reg <"T124.X", 124>;
+def T124_Y : R600Reg <"T124.Y", 124>;
+def T124_Z : R600Reg <"T124.Z", 124>;
+def T124_W : R600Reg <"T124.W", 124>;
+def T125_X : R600Reg <"T125.X", 125>;
+def T125_Y : R600Reg <"T125.Y", 125>;
+def T125_Z : R600Reg <"T125.Z", 125>;
+def T125_W : R600Reg <"T125.W", 125>;
+def T126_X : R600Reg <"T126.X", 126>;
+def T126_Y : R600Reg <"T126.Y", 126>;
+def T126_Z : R600Reg <"T126.Z", 126>;
+def T126_W : R600Reg <"T126.W", 126>;
+def T127_X : R600Reg <"T127.X", 127>;
+def T127_Y : R600Reg <"T127.Y", 127>;
+def T127_Z : R600Reg <"T127.Z", 127>;
+def T127_W : R600Reg <"T127.W", 127>;
+def T0_XYZW : R600Reg_128 <"T0.XYZW", [T0_X, T0_Y, T0_Z, T0_W], 0 >;
+def T1_XYZW : R600Reg_128 <"T1.XYZW", [T1_X, T1_Y, T1_Z, T1_W], 1 >;
+def T2_XYZW : R600Reg_128 <"T2.XYZW", [T2_X, T2_Y, T2_Z, T2_W], 2 >;
+def T3_XYZW : R600Reg_128 <"T3.XYZW", [T3_X, T3_Y, T3_Z, T3_W], 3 >;
+def T4_XYZW : R600Reg_128 <"T4.XYZW", [T4_X, T4_Y, T4_Z, T4_W], 4 >;
+def T5_XYZW : R600Reg_128 <"T5.XYZW", [T5_X, T5_Y, T5_Z, T5_W], 5 >;
+def T6_XYZW : R600Reg_128 <"T6.XYZW", [T6_X, T6_Y, T6_Z, T6_W], 6 >;
+def T7_XYZW : R600Reg_128 <"T7.XYZW", [T7_X, T7_Y, T7_Z, T7_W], 7 >;
+def T8_XYZW : R600Reg_128 <"T8.XYZW", [T8_X, T8_Y, T8_Z, T8_W], 8 >;
+def T9_XYZW : R600Reg_128 <"T9.XYZW", [T9_X, T9_Y, T9_Z, T9_W], 9 >;
+def T10_XYZW : R600Reg_128 <"T10.XYZW", [T10_X, T10_Y, T10_Z, T10_W], 10 >;
+def T11_XYZW : R600Reg_128 <"T11.XYZW", [T11_X, T11_Y, T11_Z, T11_W], 11 >;
+def T12_XYZW : R600Reg_128 <"T12.XYZW", [T12_X, T12_Y, T12_Z, T12_W], 12 >;
+def T13_XYZW : R600Reg_128 <"T13.XYZW", [T13_X, T13_Y, T13_Z, T13_W], 13 >;
+def T14_XYZW : R600Reg_128 <"T14.XYZW", [T14_X, T14_Y, T14_Z, T14_W], 14 >;
+def T15_XYZW : R600Reg_128 <"T15.XYZW", [T15_X, T15_Y, T15_Z, T15_W], 15 >;
+def T16_XYZW : R600Reg_128 <"T16.XYZW", [T16_X, T16_Y, T16_Z, T16_W], 16 >;
+def T17_XYZW : R600Reg_128 <"T17.XYZW", [T17_X, T17_Y, T17_Z, T17_W], 17 >;
+def T18_XYZW : R600Reg_128 <"T18.XYZW", [T18_X, T18_Y, T18_Z, T18_W], 18 >;
+def T19_XYZW : R600Reg_128 <"T19.XYZW", [T19_X, T19_Y, T19_Z, T19_W], 19 >;
+def T20_XYZW : R600Reg_128 <"T20.XYZW", [T20_X, T20_Y, T20_Z, T20_W], 20 >;
+def T21_XYZW : R600Reg_128 <"T21.XYZW", [T21_X, T21_Y, T21_Z, T21_W], 21 >;
+def T22_XYZW : R600Reg_128 <"T22.XYZW", [T22_X, T22_Y, T22_Z, T22_W], 22 >;
+def T23_XYZW : R600Reg_128 <"T23.XYZW", [T23_X, T23_Y, T23_Z, T23_W], 23 >;
+def T24_XYZW : R600Reg_128 <"T24.XYZW", [T24_X, T24_Y, T24_Z, T24_W], 24 >;
+def T25_XYZW : R600Reg_128 <"T25.XYZW", [T25_X, T25_Y, T25_Z, T25_W], 25 >;
+def T26_XYZW : R600Reg_128 <"T26.XYZW", [T26_X, T26_Y, T26_Z, T26_W], 26 >;
+def T27_XYZW : R600Reg_128 <"T27.XYZW", [T27_X, T27_Y, T27_Z, T27_W], 27 >;
+def T28_XYZW : R600Reg_128 <"T28.XYZW", [T28_X, T28_Y, T28_Z, T28_W], 28 >;
+def T29_XYZW : R600Reg_128 <"T29.XYZW", [T29_X, T29_Y, T29_Z, T29_W], 29 >;
+def T30_XYZW : R600Reg_128 <"T30.XYZW", [T30_X, T30_Y, T30_Z, T30_W], 30 >;
+def T31_XYZW : R600Reg_128 <"T31.XYZW", [T31_X, T31_Y, T31_Z, T31_W], 31 >;
+def T32_XYZW : R600Reg_128 <"T32.XYZW", [T32_X, T32_Y, T32_Z, T32_W], 32 >;
+def T33_XYZW : R600Reg_128 <"T33.XYZW", [T33_X, T33_Y, T33_Z, T33_W], 33 >;
+def T34_XYZW : R600Reg_128 <"T34.XYZW", [T34_X, T34_Y, T34_Z, T34_W], 34 >;
+def T35_XYZW : R600Reg_128 <"T35.XYZW", [T35_X, T35_Y, T35_Z, T35_W], 35 >;
+def T36_XYZW : R600Reg_128 <"T36.XYZW", [T36_X, T36_Y, T36_Z, T36_W], 36 >;
+def T37_XYZW : R600Reg_128 <"T37.XYZW", [T37_X, T37_Y, T37_Z, T37_W], 37 >;
+def T38_XYZW : R600Reg_128 <"T38.XYZW", [T38_X, T38_Y, T38_Z, T38_W], 38 >;
+def T39_XYZW : R600Reg_128 <"T39.XYZW", [T39_X, T39_Y, T39_Z, T39_W], 39 >;
+def T40_XYZW : R600Reg_128 <"T40.XYZW", [T40_X, T40_Y, T40_Z, T40_W], 40 >;
+def T41_XYZW : R600Reg_128 <"T41.XYZW", [T41_X, T41_Y, T41_Z, T41_W], 41 >;
+def T42_XYZW : R600Reg_128 <"T42.XYZW", [T42_X, T42_Y, T42_Z, T42_W], 42 >;
+def T43_XYZW : R600Reg_128 <"T43.XYZW", [T43_X, T43_Y, T43_Z, T43_W], 43 >;
+def T44_XYZW : R600Reg_128 <"T44.XYZW", [T44_X, T44_Y, T44_Z, T44_W], 44 >;
+def T45_XYZW : R600Reg_128 <"T45.XYZW", [T45_X, T45_Y, T45_Z, T45_W], 45 >;
+def T46_XYZW : R600Reg_128 <"T46.XYZW", [T46_X, T46_Y, T46_Z, T46_W], 46 >;
+def T47_XYZW : R600Reg_128 <"T47.XYZW", [T47_X, T47_Y, T47_Z, T47_W], 47 >;
+def T48_XYZW : R600Reg_128 <"T48.XYZW", [T48_X, T48_Y, T48_Z, T48_W], 48 >;
+def T49_XYZW : R600Reg_128 <"T49.XYZW", [T49_X, T49_Y, T49_Z, T49_W], 49 >;
+def T50_XYZW : R600Reg_128 <"T50.XYZW", [T50_X, T50_Y, T50_Z, T50_W], 50 >;
+def T51_XYZW : R600Reg_128 <"T51.XYZW", [T51_X, T51_Y, T51_Z, T51_W], 51 >;
+def T52_XYZW : R600Reg_128 <"T52.XYZW", [T52_X, T52_Y, T52_Z, T52_W], 52 >;
+def T53_XYZW : R600Reg_128 <"T53.XYZW", [T53_X, T53_Y, T53_Z, T53_W], 53 >;
+def T54_XYZW : R600Reg_128 <"T54.XYZW", [T54_X, T54_Y, T54_Z, T54_W], 54 >;
+def T55_XYZW : R600Reg_128 <"T55.XYZW", [T55_X, T55_Y, T55_Z, T55_W], 55 >;
+def T56_XYZW : R600Reg_128 <"T56.XYZW", [T56_X, T56_Y, T56_Z, T56_W], 56 >;
+def T57_XYZW : R600Reg_128 <"T57.XYZW", [T57_X, T57_Y, T57_Z, T57_W], 57 >;
+def T58_XYZW : R600Reg_128 <"T58.XYZW", [T58_X, T58_Y, T58_Z, T58_W], 58 >;
+def T59_XYZW : R600Reg_128 <"T59.XYZW", [T59_X, T59_Y, T59_Z, T59_W], 59 >;
+def T60_XYZW : R600Reg_128 <"T60.XYZW", [T60_X, T60_Y, T60_Z, T60_W], 60 >;
+def T61_XYZW : R600Reg_128 <"T61.XYZW", [T61_X, T61_Y, T61_Z, T61_W], 61 >;
+def T62_XYZW : R600Reg_128 <"T62.XYZW", [T62_X, T62_Y, T62_Z, T62_W], 62 >;
+def T63_XYZW : R600Reg_128 <"T63.XYZW", [T63_X, T63_Y, T63_Z, T63_W], 63 >;
+def T64_XYZW : R600Reg_128 <"T64.XYZW", [T64_X, T64_Y, T64_Z, T64_W], 64 >;
+def T65_XYZW : R600Reg_128 <"T65.XYZW", [T65_X, T65_Y, T65_Z, T65_W], 65 >;
+def T66_XYZW : R600Reg_128 <"T66.XYZW", [T66_X, T66_Y, T66_Z, T66_W], 66 >;
+def T67_XYZW : R600Reg_128 <"T67.XYZW", [T67_X, T67_Y, T67_Z, T67_W], 67 >;
+def T68_XYZW : R600Reg_128 <"T68.XYZW", [T68_X, T68_Y, T68_Z, T68_W], 68 >;
+def T69_XYZW : R600Reg_128 <"T69.XYZW", [T69_X, T69_Y, T69_Z, T69_W], 69 >;
+def T70_XYZW : R600Reg_128 <"T70.XYZW", [T70_X, T70_Y, T70_Z, T70_W], 70 >;
+def T71_XYZW : R600Reg_128 <"T71.XYZW", [T71_X, T71_Y, T71_Z, T71_W], 71 >;
+def T72_XYZW : R600Reg_128 <"T72.XYZW", [T72_X, T72_Y, T72_Z, T72_W], 72 >;
+def T73_XYZW : R600Reg_128 <"T73.XYZW", [T73_X, T73_Y, T73_Z, T73_W], 73 >;
+def T74_XYZW : R600Reg_128 <"T74.XYZW", [T74_X, T74_Y, T74_Z, T74_W], 74 >;
+def T75_XYZW : R600Reg_128 <"T75.XYZW", [T75_X, T75_Y, T75_Z, T75_W], 75 >;
+def T76_XYZW : R600Reg_128 <"T76.XYZW", [T76_X, T76_Y, T76_Z, T76_W], 76 >;
+def T77_XYZW : R600Reg_128 <"T77.XYZW", [T77_X, T77_Y, T77_Z, T77_W], 77 >;
+def T78_XYZW : R600Reg_128 <"T78.XYZW", [T78_X, T78_Y, T78_Z, T78_W], 78 >;
+def T79_XYZW : R600Reg_128 <"T79.XYZW", [T79_X, T79_Y, T79_Z, T79_W], 79 >;
+def T80_XYZW : R600Reg_128 <"T80.XYZW", [T80_X, T80_Y, T80_Z, T80_W], 80 >;
+def T81_XYZW : R600Reg_128 <"T81.XYZW", [T81_X, T81_Y, T81_Z, T81_W], 81 >;
+def T82_XYZW : R600Reg_128 <"T82.XYZW", [T82_X, T82_Y, T82_Z, T82_W], 82 >;
+def T83_XYZW : R600Reg_128 <"T83.XYZW", [T83_X, T83_Y, T83_Z, T83_W], 83 >;
+def T84_XYZW : R600Reg_128 <"T84.XYZW", [T84_X, T84_Y, T84_Z, T84_W], 84 >;
+def T85_XYZW : R600Reg_128 <"T85.XYZW", [T85_X, T85_Y, T85_Z, T85_W], 85 >;
+def T86_XYZW : R600Reg_128 <"T86.XYZW", [T86_X, T86_Y, T86_Z, T86_W], 86 >;
+def T87_XYZW : R600Reg_128 <"T87.XYZW", [T87_X, T87_Y, T87_Z, T87_W], 87 >;
+def T88_XYZW : R600Reg_128 <"T88.XYZW", [T88_X, T88_Y, T88_Z, T88_W], 88 >;
+def T89_XYZW : R600Reg_128 <"T89.XYZW", [T89_X, T89_Y, T89_Z, T89_W], 89 >;
+def T90_XYZW : R600Reg_128 <"T90.XYZW", [T90_X, T90_Y, T90_Z, T90_W], 90 >;
+def T91_XYZW : R600Reg_128 <"T91.XYZW", [T91_X, T91_Y, T91_Z, T91_W], 91 >;
+def T92_XYZW : R600Reg_128 <"T92.XYZW", [T92_X, T92_Y, T92_Z, T92_W], 92 >;
+def T93_XYZW : R600Reg_128 <"T93.XYZW", [T93_X, T93_Y, T93_Z, T93_W], 93 >;
+def T94_XYZW : R600Reg_128 <"T94.XYZW", [T94_X, T94_Y, T94_Z, T94_W], 94 >;
+def T95_XYZW : R600Reg_128 <"T95.XYZW", [T95_X, T95_Y, T95_Z, T95_W], 95 >;
+def T96_XYZW : R600Reg_128 <"T96.XYZW", [T96_X, T96_Y, T96_Z, T96_W], 96 >;
+def T97_XYZW : R600Reg_128 <"T97.XYZW", [T97_X, T97_Y, T97_Z, T97_W], 97 >;
+def T98_XYZW : R600Reg_128 <"T98.XYZW", [T98_X, T98_Y, T98_Z, T98_W], 98 >;
+def T99_XYZW : R600Reg_128 <"T99.XYZW", [T99_X, T99_Y, T99_Z, T99_W], 99 >;
+def T100_XYZW : R600Reg_128 <"T100.XYZW", [T100_X, T100_Y, T100_Z, T100_W], 100 >;
+def T101_XYZW : R600Reg_128 <"T101.XYZW", [T101_X, T101_Y, T101_Z, T101_W], 101 >;
+def T102_XYZW : R600Reg_128 <"T102.XYZW", [T102_X, T102_Y, T102_Z, T102_W], 102 >;
+def T103_XYZW : R600Reg_128 <"T103.XYZW", [T103_X, T103_Y, T103_Z, T103_W], 103 >;
+def T104_XYZW : R600Reg_128 <"T104.XYZW", [T104_X, T104_Y, T104_Z, T104_W], 104 >;
+def T105_XYZW : R600Reg_128 <"T105.XYZW", [T105_X, T105_Y, T105_Z, T105_W], 105 >;
+def T106_XYZW : R600Reg_128 <"T106.XYZW", [T106_X, T106_Y, T106_Z, T106_W], 106 >;
+def T107_XYZW : R600Reg_128 <"T107.XYZW", [T107_X, T107_Y, T107_Z, T107_W], 107 >;
+def T108_XYZW : R600Reg_128 <"T108.XYZW", [T108_X, T108_Y, T108_Z, T108_W], 108 >;
+def T109_XYZW : R600Reg_128 <"T109.XYZW", [T109_X, T109_Y, T109_Z, T109_W], 109 >;
+def T110_XYZW : R600Reg_128 <"T110.XYZW", [T110_X, T110_Y, T110_Z, T110_W], 110 >;
+def T111_XYZW : R600Reg_128 <"T111.XYZW", [T111_X, T111_Y, T111_Z, T111_W], 111 >;
+def T112_XYZW : R600Reg_128 <"T112.XYZW", [T112_X, T112_Y, T112_Z, T112_W], 112 >;
+def T113_XYZW : R600Reg_128 <"T113.XYZW", [T113_X, T113_Y, T113_Z, T113_W], 113 >;
+def T114_XYZW : R600Reg_128 <"T114.XYZW", [T114_X, T114_Y, T114_Z, T114_W], 114 >;
+def T115_XYZW : R600Reg_128 <"T115.XYZW", [T115_X, T115_Y, T115_Z, T115_W], 115 >;
+def T116_XYZW : R600Reg_128 <"T116.XYZW", [T116_X, T116_Y, T116_Z, T116_W], 116 >;
+def T117_XYZW : R600Reg_128 <"T117.XYZW", [T117_X, T117_Y, T117_Z, T117_W], 117 >;
+def T118_XYZW : R600Reg_128 <"T118.XYZW", [T118_X, T118_Y, T118_Z, T118_W], 118 >;
+def T119_XYZW : R600Reg_128 <"T119.XYZW", [T119_X, T119_Y, T119_Z, T119_W], 119 >;
+def T120_XYZW : R600Reg_128 <"T120.XYZW", [T120_X, T120_Y, T120_Z, T120_W], 120 >;
+def T121_XYZW : R600Reg_128 <"T121.XYZW", [T121_X, T121_Y, T121_Z, T121_W], 121 >;
+def T122_XYZW : R600Reg_128 <"T122.XYZW", [T122_X, T122_Y, T122_Z, T122_W], 122 >;
+def T123_XYZW : R600Reg_128 <"T123.XYZW", [T123_X, T123_Y, T123_Z, T123_W], 123 >;
+def T124_XYZW : R600Reg_128 <"T124.XYZW", [T124_X, T124_Y, T124_Z, T124_W], 124 >;
+def T125_XYZW : R600Reg_128 <"T125.XYZW", [T125_X, T125_Y, T125_Z, T125_W], 125 >;
+def T126_XYZW : R600Reg_128 <"T126.XYZW", [T126_X, T126_Y, T126_Z, T126_W], 126 >;
+def T127_XYZW : R600Reg_128 <"T127.XYZW", [T127_X, T127_Y, T127_Z, T127_W], 127 >;
+
+class RegSet <dag s> {
+  dag set = s;
+}
+
+def ZERO : R600Reg<"0.0", 248>;
+def ONE : R600Reg<"1.0", 249>;
+def NEG_ONE : R600Reg<"-1.0", 249>;
+def ONE_INT : R600Reg<"1", 250>;
+def HALF : R600Reg<"0.5", 252>;
+def NEG_HALF : R600Reg<"-0.5", 252>;
+def PV_X : R600Reg<"pv.x", 254>;
+def ALU_LITERAL_X : R600Reg<"literal.x", 253>;
+
+def R600_CReg32 : RegisterClass <"AMDIL", [f32, i32], 32, (add
+    C0_X
+,C0_Y,C0_Z,C0_W,C1_X,C1_Y,C1_Z,C1_W,C2_X,C2_Y,C2_Z
+,C2_W,C3_X,C3_Y,C3_Z,C3_W,C4_X,C4_Y,C4_Z,C4_W,C5_X
+,C5_Y,C5_Z,C5_W,C6_X,C6_Y,C6_Z,C6_W,C7_X,C7_Y,C7_Z
+,C7_W,C8_X,C8_Y,C8_Z,C8_W,C9_X,C9_Y,C9_Z,C9_W,C10_X
+,C10_Y,C10_Z,C10_W,C11_X,C11_Y,C11_Z,C11_W,C12_X,C12_Y,C12_Z
+,C12_W,C13_X,C13_Y,C13_Z,C13_W,C14_X,C14_Y,C14_Z,C14_W,C15_X
+,C15_Y,C15_Z,C15_W,C16_X,C16_Y,C16_Z,C16_W,C17_X,C17_Y,C17_Z
+,C17_W,C18_X,C18_Y,C18_Z,C18_W,C19_X,C19_Y,C19_Z,C19_W,C20_X
+,C20_Y,C20_Z,C20_W,C21_X,C21_Y,C21_Z,C21_W,C22_X,C22_Y,C22_Z
+,C22_W,C23_X,C23_Y,C23_Z,C23_W,C24_X,C24_Y,C24_Z,C24_W,C25_X
+,C25_Y,C25_Z,C25_W,C26_X,C26_Y,C26_Z,C26_W,C27_X,C27_Y,C27_Z
+,C27_W,C28_X,C28_Y,C28_Z,C28_W,C29_X,C29_Y,C29_Z,C29_W,C30_X
+,C30_Y,C30_Z,C30_W,C31_X,C31_Y,C31_Z,C31_W,C32_X,C32_Y,C32_Z
+,C32_W,C33_X,C33_Y,C33_Z,C33_W,C34_X,C34_Y,C34_Z,C34_W,C35_X
+,C35_Y,C35_Z,C35_W,C36_X,C36_Y,C36_Z,C36_W,C37_X,C37_Y,C37_Z
+,C37_W,C38_X,C38_Y,C38_Z,C38_W,C39_X,C39_Y,C39_Z,C39_W,C40_X
+,C40_Y,C40_Z,C40_W,C41_X,C41_Y,C41_Z,C41_W,C42_X,C42_Y,C42_Z
+,C42_W,C43_X,C43_Y,C43_Z,C43_W,C44_X,C44_Y,C44_Z,C44_W,C45_X
+,C45_Y,C45_Z,C45_W,C46_X,C46_Y,C46_Z,C46_W,C47_X,C47_Y,C47_Z
+,C47_W,C48_X,C48_Y,C48_Z,C48_W,C49_X,C49_Y,C49_Z,C49_W,C50_X
+,C50_Y,C50_Z,C50_W,C51_X,C51_Y,C51_Z,C51_W,C52_X,C52_Y,C52_Z
+,C52_W,C53_X,C53_Y,C53_Z,C53_W,C54_X,C54_Y,C54_Z,C54_W,C55_X
+,C55_Y,C55_Z,C55_W,C56_X,C56_Y,C56_Z,C56_W,C57_X,C57_Y,C57_Z
+,C57_W,C58_X,C58_Y,C58_Z,C58_W,C59_X,C59_Y,C59_Z,C59_W,C60_X
+,C60_Y,C60_Z,C60_W,C61_X,C61_Y,C61_Z,C61_W,C62_X,C62_Y,C62_Z
+,C62_W,C63_X,C63_Y,C63_Z,C63_W,C64_X,C64_Y,C64_Z,C64_W,C65_X
+,C65_Y,C65_Z,C65_W,C66_X,C66_Y,C66_Z,C66_W,C67_X,C67_Y,C67_Z
+,C67_W,C68_X,C68_Y,C68_Z,C68_W,C69_X,C69_Y,C69_Z,C69_W,C70_X
+,C70_Y,C70_Z,C70_W,C71_X,C71_Y,C71_Z,C71_W,C72_X,C72_Y,C72_Z
+,C72_W,C73_X,C73_Y,C73_Z,C73_W,C74_X,C74_Y,C74_Z,C74_W,C75_X
+,C75_Y,C75_Z,C75_W,C76_X,C76_Y,C76_Z,C76_W,C77_X,C77_Y,C77_Z
+,C77_W,C78_X,C78_Y,C78_Z,C78_W,C79_X,C79_Y,C79_Z,C79_W,C80_X
+,C80_Y,C80_Z,C80_W,C81_X,C81_Y,C81_Z,C81_W,C82_X,C82_Y,C82_Z
+,C82_W,C83_X,C83_Y,C83_Z,C83_W,C84_X,C84_Y,C84_Z,C84_W,C85_X
+,C85_Y,C85_Z,C85_W,C86_X,C86_Y,C86_Z,C86_W,C87_X,C87_Y,C87_Z
+,C87_W,C88_X,C88_Y,C88_Z,C88_W,C89_X,C89_Y,C89_Z,C89_W,C90_X
+,C90_Y,C90_Z,C90_W,C91_X,C91_Y,C91_Z,C91_W,C92_X,C92_Y,C92_Z
+,C92_W,C93_X,C93_Y,C93_Z,C93_W,C94_X,C94_Y,C94_Z,C94_W,C95_X
+,C95_Y,C95_Z,C95_W,C96_X,C96_Y,C96_Z,C96_W,C97_X,C97_Y,C97_Z
+,C97_W,C98_X,C98_Y,C98_Z,C98_W,C99_X,C99_Y,C99_Z,C99_W,C100_X
+,C100_Y,C100_Z,C100_W,C101_X,C101_Y,C101_Z,C101_W,C102_X,C102_Y,C102_Z
+,C102_W,C103_X,C103_Y,C103_Z,C103_W,C104_X,C104_Y,C104_Z,C104_W,C105_X
+,C105_Y,C105_Z,C105_W,C106_X,C106_Y,C106_Z,C106_W,C107_X,C107_Y,C107_Z
+,C107_W,C108_X,C108_Y,C108_Z,C108_W,C109_X,C109_Y,C109_Z,C109_W,C110_X
+,C110_Y,C110_Z,C110_W,C111_X,C111_Y,C111_Z,C111_W,C112_X,C112_Y,C112_Z
+,C112_W,C113_X,C113_Y,C113_Z,C113_W,C114_X,C114_Y,C114_Z,C114_W,C115_X
+,C115_Y,C115_Z,C115_W,C116_X,C116_Y,C116_Z,C116_W,C117_X,C117_Y,C117_Z
+,C117_W,C118_X,C118_Y,C118_Z,C118_W,C119_X,C119_Y,C119_Z,C119_W,C120_X
+,C120_Y,C120_Z,C120_W,C121_X,C121_Y,C121_Z,C121_W,C122_X,C122_Y,C122_Z
+,C122_W,C123_X,C123_Y,C123_Z,C123_W,C124_X,C124_Y,C124_Z,C124_W,C125_X
+,C125_Y,C125_Z,C125_W,C126_X,C126_Y,C126_Z,C126_W,C127_X,C127_Y,C127_Z
+,C127_W,C128_X,C128_Y,C128_Z,C128_W,C129_X,C129_Y,C129_Z,C129_W,C130_X
+,C130_Y,C130_Z,C130_W,C131_X,C131_Y,C131_Z,C131_W,C132_X,C132_Y,C132_Z
+,C132_W,C133_X,C133_Y,C133_Z,C133_W,C134_X,C134_Y,C134_Z,C134_W,C135_X
+,C135_Y,C135_Z,C135_W,C136_X,C136_Y,C136_Z,C136_W,C137_X,C137_Y,C137_Z
+,C137_W,C138_X,C138_Y,C138_Z,C138_W,C139_X,C139_Y,C139_Z,C139_W,C140_X
+,C140_Y,C140_Z,C140_W,C141_X,C141_Y,C141_Z,C141_W,C142_X,C142_Y,C142_Z
+,C142_W,C143_X,C143_Y,C143_Z,C143_W,C144_X,C144_Y,C144_Z,C144_W,C145_X
+,C145_Y,C145_Z,C145_W,C146_X,C146_Y,C146_Z,C146_W,C147_X,C147_Y,C147_Z
+,C147_W,C148_X,C148_Y,C148_Z,C148_W,C149_X,C149_Y,C149_Z,C149_W,C150_X
+,C150_Y,C150_Z,C150_W,C151_X,C151_Y,C151_Z,C151_W,C152_X,C152_Y,C152_Z
+,C152_W,C153_X,C153_Y,C153_Z,C153_W,C154_X,C154_Y,C154_Z,C154_W,C155_X
+,C155_Y,C155_Z,C155_W,C156_X,C156_Y,C156_Z,C156_W,C157_X,C157_Y,C157_Z
+,C157_W,C158_X,C158_Y,C158_Z,C158_W,C159_X,C159_Y,C159_Z,C159_W,C160_X
+,C160_Y,C160_Z,C160_W,C161_X,C161_Y,C161_Z,C161_W,C162_X,C162_Y,C162_Z
+,C162_W,C163_X,C163_Y,C163_Z,C163_W,C164_X,C164_Y,C164_Z,C164_W,C165_X
+,C165_Y,C165_Z,C165_W,C166_X,C166_Y,C166_Z,C166_W,C167_X,C167_Y,C167_Z
+,C167_W,C168_X,C168_Y,C168_Z,C168_W,C169_X,C169_Y,C169_Z,C169_W,C170_X
+,C170_Y,C170_Z,C170_W,C171_X,C171_Y,C171_Z,C171_W,C172_X,C172_Y,C172_Z
+,C172_W,C173_X,C173_Y,C173_Z,C173_W,C174_X,C174_Y,C174_Z,C174_W,C175_X
+,C175_Y,C175_Z,C175_W,C176_X,C176_Y,C176_Z,C176_W,C177_X,C177_Y,C177_Z
+,C177_W,C178_X,C178_Y,C178_Z,C178_W,C179_X,C179_Y,C179_Z,C179_W,C180_X
+,C180_Y,C180_Z,C180_W,C181_X,C181_Y,C181_Z,C181_W,C182_X,C182_Y,C182_Z
+,C182_W,C183_X,C183_Y,C183_Z,C183_W,C184_X,C184_Y,C184_Z,C184_W,C185_X
+,C185_Y,C185_Z,C185_W,C186_X,C186_Y,C186_Z,C186_W,C187_X,C187_Y,C187_Z
+,C187_W,C188_X,C188_Y,C188_Z,C188_W,C189_X,C189_Y,C189_Z,C189_W,C190_X
+,C190_Y,C190_Z,C190_W,C191_X,C191_Y,C191_Z,C191_W,C192_X,C192_Y,C192_Z
+,C192_W,C193_X,C193_Y,C193_Z,C193_W,C194_X,C194_Y,C194_Z,C194_W,C195_X
+,C195_Y,C195_Z,C195_W,C196_X,C196_Y,C196_Z,C196_W,C197_X,C197_Y,C197_Z
+,C197_W,C198_X,C198_Y,C198_Z,C198_W,C199_X,C199_Y,C199_Z,C199_W,C200_X
+,C200_Y,C200_Z,C200_W,C201_X,C201_Y,C201_Z,C201_W,C202_X,C202_Y,C202_Z
+,C202_W,C203_X,C203_Y,C203_Z,C203_W,C204_X,C204_Y,C204_Z,C204_W,C205_X
+,C205_Y,C205_Z,C205_W,C206_X,C206_Y,C206_Z,C206_W,C207_X,C207_Y,C207_Z
+,C207_W,C208_X,C208_Y,C208_Z,C208_W,C209_X,C209_Y,C209_Z,C209_W,C210_X
+,C210_Y,C210_Z,C210_W,C211_X,C211_Y,C211_Z,C211_W,C212_X,C212_Y,C212_Z
+,C212_W,C213_X,C213_Y,C213_Z,C213_W,C214_X,C214_Y,C214_Z,C214_W,C215_X
+,C215_Y,C215_Z,C215_W,C216_X,C216_Y,C216_Z,C216_W,C217_X,C217_Y,C217_Z
+,C217_W,C218_X,C218_Y,C218_Z,C218_W,C219_X,C219_Y,C219_Z,C219_W,C220_X
+,C220_Y,C220_Z,C220_W,C221_X,C221_Y,C221_Z,C221_W,C222_X,C222_Y,C222_Z
+,C222_W,C223_X,C223_Y,C223_Z,C223_W,C224_X,C224_Y,C224_Z,C224_W,C225_X
+,C225_Y,C225_Z,C225_W,C226_X,C226_Y,C226_Z,C226_W,C227_X,C227_Y,C227_Z
+,C227_W,C228_X,C228_Y,C228_Z,C228_W,C229_X,C229_Y,C229_Z,C229_W,C230_X
+,C230_Y,C230_Z,C230_W,C231_X,C231_Y,C231_Z,C231_W,C232_X,C232_Y,C232_Z
+,C232_W,C233_X,C233_Y,C233_Z,C233_W,C234_X,C234_Y,C234_Z,C234_W,C235_X
+,C235_Y,C235_Z,C235_W,C236_X,C236_Y,C236_Z,C236_W,C237_X,C237_Y,C237_Z
+,C237_W,C238_X,C238_Y,C238_Z,C238_W,C239_X,C239_Y,C239_Z,C239_W,C240_X
+,C240_Y,C240_Z,C240_W,C241_X,C241_Y,C241_Z,C241_W,C242_X,C242_Y,C242_Z
+,C242_W,C243_X,C243_Y,C243_Z,C243_W,C244_X,C244_Y,C244_Z,C244_W,C245_X
+,C245_Y,C245_Z,C245_W,C246_X,C246_Y,C246_Z,C246_W,C247_X,C247_Y,C247_Z
+,C247_W,C248_X,C248_Y,C248_Z,C248_W,C249_X,C249_Y,C249_Z,C249_W,C250_X
+,C250_Y,C250_Z,C250_W,C251_X,C251_Y,C251_Z,C251_W,C252_X,C252_Y,C252_Z
+,C252_W,C253_X,C253_Y,C253_Z,C253_W,C254_X,C254_Y,C254_Z,C254_W,C255_X
+,C255_Y,C255_Z,C255_W,C256_X,C256_Y,C256_Z,C256_W,C257_X,C257_Y,C257_Z
+,C257_W,C258_X,C258_Y,C258_Z,C258_W,C259_X,C259_Y,C259_Z,C259_W,C260_X
+,C260_Y,C260_Z,C260_W,C261_X,C261_Y,C261_Z,C261_W,C262_X,C262_Y,C262_Z
+,C262_W,C263_X,C263_Y,C263_Z,C263_W,C264_X,C264_Y,C264_Z,C264_W,C265_X
+,C265_Y,C265_Z,C265_W,C266_X,C266_Y,C266_Z,C266_W,C267_X,C267_Y,C267_Z
+,C267_W,C268_X,C268_Y,C268_Z,C268_W,C269_X,C269_Y,C269_Z,C269_W,C270_X
+,C270_Y,C270_Z,C270_W,C271_X,C271_Y,C271_Z,C271_W,C272_X,C272_Y,C272_Z
+,C272_W,C273_X,C273_Y,C273_Z,C273_W,C274_X,C274_Y,C274_Z,C274_W,C275_X
+,C275_Y,C275_Z,C275_W,C276_X,C276_Y,C276_Z,C276_W,C277_X,C277_Y,C277_Z
+,C277_W,C278_X,C278_Y,C278_Z,C278_W,C279_X,C279_Y,C279_Z,C279_W,C280_X
+,C280_Y,C280_Z,C280_W,C281_X,C281_Y,C281_Z,C281_W,C282_X,C282_Y,C282_Z
+,C282_W,C283_X,C283_Y,C283_Z,C283_W,C284_X,C284_Y,C284_Z,C284_W,C285_X
+,C285_Y,C285_Z,C285_W,C286_X,C286_Y,C286_Z,C286_W,C287_X,C287_Y,C287_Z
+,C287_W,C288_X,C288_Y,C288_Z,C288_W,C289_X,C289_Y,C289_Z,C289_W,C290_X
+,C290_Y,C290_Z,C290_W,C291_X,C291_Y,C291_Z,C291_W,C292_X,C292_Y,C292_Z
+,C292_W,C293_X,C293_Y,C293_Z,C293_W,C294_X,C294_Y,C294_Z,C294_W,C295_X
+,C295_Y,C295_Z,C295_W,C296_X,C296_Y,C296_Z,C296_W,C297_X,C297_Y,C297_Z
+,C297_W,C298_X,C298_Y,C298_Z,C298_W,C299_X,C299_Y,C299_Z,C299_W,C300_X
+,C300_Y,C300_Z,C300_W,C301_X,C301_Y,C301_Z,C301_W,C302_X,C302_Y,C302_Z
+,C302_W,C303_X,C303_Y,C303_Z,C303_W,C304_X,C304_Y,C304_Z,C304_W,C305_X
+,C305_Y,C305_Z,C305_W,C306_X,C306_Y,C306_Z,C306_W,C307_X,C307_Y,C307_Z
+,C307_W,C308_X,C308_Y,C308_Z,C308_W,C309_X,C309_Y,C309_Z,C309_W,C310_X
+,C310_Y,C310_Z,C310_W,C311_X,C311_Y,C311_Z,C311_W,C312_X,C312_Y,C312_Z
+,C312_W,C313_X,C313_Y,C313_Z,C313_W,C314_X,C314_Y,C314_Z,C314_W,C315_X
+,C315_Y,C315_Z,C315_W,C316_X,C316_Y,C316_Z,C316_W,C317_X,C317_Y,C317_Z
+,C317_W,C318_X,C318_Y,C318_Z,C318_W,C319_X,C319_Y,C319_Z,C319_W,C320_X
+,C320_Y,C320_Z,C320_W,C321_X,C321_Y,C321_Z,C321_W,C322_X,C322_Y,C322_Z
+,C322_W,C323_X,C323_Y,C323_Z,C323_W,C324_X,C324_Y,C324_Z,C324_W,C325_X
+,C325_Y,C325_Z,C325_W,C326_X,C326_Y,C326_Z,C326_W,C327_X,C327_Y,C327_Z
+,C327_W,C328_X,C328_Y,C328_Z,C328_W,C329_X,C329_Y,C329_Z,C329_W,C330_X
+,C330_Y,C330_Z,C330_W,C331_X,C331_Y,C331_Z,C331_W,C332_X,C332_Y,C332_Z
+,C332_W,C333_X,C333_Y,C333_Z,C333_W,C334_X,C334_Y,C334_Z,C334_W,C335_X
+,C335_Y,C335_Z,C335_W,C336_X,C336_Y,C336_Z,C336_W,C337_X,C337_Y,C337_Z
+,C337_W,C338_X,C338_Y,C338_Z,C338_W,C339_X,C339_Y,C339_Z,C339_W,C340_X
+,C340_Y,C340_Z,C340_W,C341_X,C341_Y,C341_Z,C341_W,C342_X,C342_Y,C342_Z
+,C342_W,C343_X,C343_Y,C343_Z,C343_W,C344_X,C344_Y,C344_Z,C344_W,C345_X
+,C345_Y,C345_Z,C345_W,C346_X,C346_Y,C346_Z,C346_W,C347_X,C347_Y,C347_Z
+,C347_W,C348_X,C348_Y,C348_Z,C348_W,C349_X,C349_Y,C349_Z,C349_W,C350_X
+,C350_Y,C350_Z,C350_W,C351_X,C351_Y,C351_Z,C351_W,C352_X,C352_Y,C352_Z
+,C352_W,C353_X,C353_Y,C353_Z,C353_W,C354_X,C354_Y,C354_Z,C354_W,C355_X
+,C355_Y,C355_Z,C355_W,C356_X,C356_Y,C356_Z,C356_W,C357_X,C357_Y,C357_Z
+,C357_W,C358_X,C358_Y,C358_Z,C358_W,C359_X,C359_Y,C359_Z,C359_W,C360_X
+,C360_Y,C360_Z,C360_W,C361_X,C361_Y,C361_Z,C361_W,C362_X,C362_Y,C362_Z
+,C362_W,C363_X,C363_Y,C363_Z,C363_W,C364_X,C364_Y,C364_Z,C364_W,C365_X
+,C365_Y,C365_Z,C365_W,C366_X,C366_Y,C366_Z,C366_W,C367_X,C367_Y,C367_Z
+,C367_W,C368_X,C368_Y,C368_Z,C368_W,C369_X,C369_Y,C369_Z,C369_W,C370_X
+,C370_Y,C370_Z,C370_W,C371_X,C371_Y,C371_Z,C371_W,C372_X,C372_Y,C372_Z
+,C372_W,C373_X,C373_Y,C373_Z,C373_W,C374_X,C374_Y,C374_Z,C374_W,C375_X
+,C375_Y,C375_Z,C375_W,C376_X,C376_Y,C376_Z,C376_W,C377_X,C377_Y,C377_Z
+,C377_W,C378_X,C378_Y,C378_Z,C378_W,C379_X,C379_Y,C379_Z,C379_W,C380_X
+,C380_Y,C380_Z,C380_W,C381_X,C381_Y,C381_Z,C381_W,C382_X,C382_Y,C382_Z
+,C382_W,C383_X,C383_Y,C383_Z,C383_W,C384_X,C384_Y,C384_Z,C384_W,C385_X
+,C385_Y,C385_Z,C385_W,C386_X,C386_Y,C386_Z,C386_W,C387_X,C387_Y,C387_Z
+,C387_W,C388_X,C388_Y,C388_Z,C388_W,C389_X,C389_Y,C389_Z,C389_W,C390_X
+,C390_Y,C390_Z,C390_W,C391_X,C391_Y,C391_Z,C391_W,C392_X,C392_Y,C392_Z
+,C392_W,C393_X,C393_Y,C393_Z,C393_W,C394_X,C394_Y,C394_Z,C394_W,C395_X
+,C395_Y,C395_Z,C395_W,C396_X,C396_Y,C396_Z,C396_W,C397_X,C397_Y,C397_Z
+,C397_W,C398_X,C398_Y,C398_Z,C398_W,C399_X,C399_Y,C399_Z,C399_W,C400_X
+,C400_Y,C400_Z,C400_W,C401_X,C401_Y,C401_Z,C401_W,C402_X,C402_Y,C402_Z
+,C402_W,C403_X,C403_Y,C403_Z,C403_W,C404_X,C404_Y,C404_Z,C404_W,C405_X
+,C405_Y,C405_Z,C405_W,C406_X,C406_Y,C406_Z,C406_W,C407_X,C407_Y,C407_Z
+,C407_W,C408_X,C408_Y,C408_Z,C408_W,C409_X,C409_Y,C409_Z,C409_W,C410_X
+,C410_Y,C410_Z,C410_W,C411_X,C411_Y,C411_Z,C411_W,C412_X,C412_Y,C412_Z
+,C412_W,C413_X,C413_Y,C413_Z,C413_W,C414_X,C414_Y,C414_Z,C414_W,C415_X
+,C415_Y,C415_Z,C415_W,C416_X,C416_Y,C416_Z,C416_W,C417_X,C417_Y,C417_Z
+,C417_W,C418_X,C418_Y,C418_Z,C418_W,C419_X,C419_Y,C419_Z,C419_W,C420_X
+,C420_Y,C420_Z,C420_W,C421_X,C421_Y,C421_Z,C421_W,C422_X,C422_Y,C422_Z
+,C422_W,C423_X,C423_Y,C423_Z,C423_W,C424_X,C424_Y,C424_Z,C424_W,C425_X
+,C425_Y,C425_Z,C425_W,C426_X,C426_Y,C426_Z,C426_W,C427_X,C427_Y,C427_Z
+,C427_W,C428_X,C428_Y,C428_Z,C428_W,C429_X,C429_Y,C429_Z,C429_W,C430_X
+,C430_Y,C430_Z,C430_W,C431_X,C431_Y,C431_Z,C431_W,C432_X,C432_Y,C432_Z
+,C432_W,C433_X,C433_Y,C433_Z,C433_W,C434_X,C434_Y,C434_Z,C434_W,C435_X
+,C435_Y,C435_Z,C435_W,C436_X,C436_Y,C436_Z,C436_W,C437_X,C437_Y,C437_Z
+,C437_W,C438_X,C438_Y,C438_Z,C438_W,C439_X,C439_Y,C439_Z,C439_W,C440_X
+,C440_Y,C440_Z,C440_W,C441_X,C441_Y,C441_Z,C441_W,C442_X,C442_Y,C442_Z
+,C442_W,C443_X,C443_Y,C443_Z,C443_W,C444_X,C444_Y,C444_Z,C444_W,C445_X
+,C445_Y,C445_Z,C445_W,C446_X,C446_Y,C446_Z,C446_W,C447_X,C447_Y,C447_Z
+,C447_W,C448_X,C448_Y,C448_Z,C448_W,C449_X,C449_Y,C449_Z,C449_W,C450_X
+,C450_Y,C450_Z,C450_W,C451_X,C451_Y,C451_Z,C451_W,C452_X,C452_Y,C452_Z
+,C452_W,C453_X,C453_Y,C453_Z,C453_W,C454_X,C454_Y,C454_Z,C454_W,C455_X
+,C455_Y,C455_Z,C455_W,C456_X,C456_Y,C456_Z,C456_W,C457_X,C457_Y,C457_Z
+,C457_W,C458_X,C458_Y,C458_Z,C458_W,C459_X,C459_Y,C459_Z,C459_W,C460_X
+,C460_Y,C460_Z,C460_W,C461_X,C461_Y,C461_Z,C461_W,C462_X,C462_Y,C462_Z
+,C462_W,C463_X,C463_Y,C463_Z,C463_W,C464_X,C464_Y,C464_Z,C464_W,C465_X
+,C465_Y,C465_Z,C465_W,C466_X,C466_Y,C466_Z,C466_W,C467_X,C467_Y,C467_Z
+,C467_W,C468_X,C468_Y,C468_Z,C468_W,C469_X,C469_Y,C469_Z,C469_W,C470_X
+,C470_Y,C470_Z,C470_W,C471_X,C471_Y,C471_Z,C471_W,C472_X,C472_Y,C472_Z
+,C472_W,C473_X,C473_Y,C473_Z,C473_W,C474_X,C474_Y,C474_Z,C474_W,C475_X
+,C475_Y,C475_Z,C475_W,C476_X,C476_Y,C476_Z,C476_W,C477_X,C477_Y,C477_Z
+,C477_W,C478_X,C478_Y,C478_Z,C478_W,C479_X,C479_Y,C479_Z,C479_W,C480_X
+,C480_Y,C480_Z,C480_W,C481_X,C481_Y,C481_Z,C481_W,C482_X,C482_Y,C482_Z
+,C482_W,C483_X,C483_Y,C483_Z,C483_W,C484_X,C484_Y,C484_Z,C484_W,C485_X
+,C485_Y,C485_Z,C485_W,C486_X,C486_Y,C486_Z,C486_W,C487_X,C487_Y,C487_Z
+,C487_W,C488_X,C488_Y,C488_Z,C488_W,C489_X,C489_Y,C489_Z,C489_W,C490_X
+,C490_Y,C490_Z,C490_W,C491_X,C491_Y,C491_Z,C491_W,C492_X,C492_Y,C492_Z
+,C492_W,C493_X,C493_Y,C493_Z,C493_W,C494_X,C494_Y,C494_Z,C494_W,C495_X
+,C495_Y,C495_Z,C495_W,C496_X,C496_Y,C496_Z,C496_W,C497_X,C497_Y,C497_Z
+,C497_W,C498_X,C498_Y,C498_Z,C498_W,C499_X,C499_Y,C499_Z,C499_W,C500_X
+,C500_Y,C500_Z,C500_W,C501_X,C501_Y,C501_Z,C501_W,C502_X,C502_Y,C502_Z
+,C502_W,C503_X,C503_Y,C503_Z,C503_W,C504_X,C504_Y,C504_Z,C504_W,C505_X
+,C505_Y,C505_Z,C505_W,C506_X,C506_Y,C506_Z,C506_W,C507_X,C507_Y,C507_Z
+,C507_W,C508_X,C508_Y,C508_Z,C508_W,C509_X,C509_Y,C509_Z,C509_W,C510_X
+,C510_Y,C510_Z,C510_W,C511_X,C511_Y,C511_Z,C511_W,C512_X,C512_Y,C512_Z
+,C512_W,C513_X,C513_Y,C513_Z,C513_W,C514_X,C514_Y,C514_Z,C514_W,C515_X
+,C515_Y,C515_Z,C515_W,C516_X,C516_Y,C516_Z,C516_W,C517_X,C517_Y,C517_Z
+,C517_W,C518_X,C518_Y,C518_Z,C518_W,C519_X,C519_Y,C519_Z,C519_W,C520_X
+,C520_Y,C520_Z,C520_W,C521_X,C521_Y,C521_Z,C521_W,C522_X,C522_Y,C522_Z
+,C522_W,C523_X,C523_Y,C523_Z,C523_W,C524_X,C524_Y,C524_Z,C524_W,C525_X
+,C525_Y,C525_Z,C525_W,C526_X,C526_Y,C526_Z,C526_W,C527_X,C527_Y,C527_Z
+,C527_W,C528_X,C528_Y,C528_Z,C528_W,C529_X,C529_Y,C529_Z,C529_W,C530_X
+,C530_Y,C530_Z,C530_W,C531_X,C531_Y,C531_Z,C531_W,C532_X,C532_Y,C532_Z
+,C532_W,C533_X,C533_Y,C533_Z,C533_W,C534_X,C534_Y,C534_Z,C534_W,C535_X
+,C535_Y,C535_Z,C535_W,C536_X,C536_Y,C536_Z,C536_W,C537_X,C537_Y,C537_Z
+,C537_W,C538_X,C538_Y,C538_Z,C538_W,C539_X,C539_Y,C539_Z,C539_W,C540_X
+,C540_Y,C540_Z,C540_W,C541_X,C541_Y,C541_Z,C541_W,C542_X,C542_Y,C542_Z
+,C542_W,C543_X,C543_Y,C543_Z,C543_W,C544_X,C544_Y,C544_Z,C544_W,C545_X
+,C545_Y,C545_Z,C545_W,C546_X,C546_Y,C546_Z,C546_W,C547_X,C547_Y,C547_Z
+,C547_W,C548_X,C548_Y,C548_Z,C548_W,C549_X,C549_Y,C549_Z,C549_W,C550_X
+,C550_Y,C550_Z,C550_W,C551_X,C551_Y,C551_Z,C551_W,C552_X,C552_Y,C552_Z
+,C552_W,C553_X,C553_Y,C553_Z,C553_W,C554_X,C554_Y,C554_Z,C554_W,C555_X
+,C555_Y,C555_Z,C555_W,C556_X,C556_Y,C556_Z,C556_W,C557_X,C557_Y,C557_Z
+,C557_W,C558_X,C558_Y,C558_Z,C558_W,C559_X,C559_Y,C559_Z,C559_W,C560_X
+,C560_Y,C560_Z,C560_W,C561_X,C561_Y,C561_Z,C561_W,C562_X,C562_Y,C562_Z
+,C562_W,C563_X,C563_Y,C563_Z,C563_W,C564_X,C564_Y,C564_Z,C564_W,C565_X
+,C565_Y,C565_Z,C565_W,C566_X,C566_Y,C566_Z,C566_W,C567_X,C567_Y,C567_Z
+,C567_W,C568_X,C568_Y,C568_Z,C568_W,C569_X,C569_Y,C569_Z,C569_W,C570_X
+,C570_Y,C570_Z,C570_W,C571_X,C571_Y,C571_Z,C571_W,C572_X,C572_Y,C572_Z
+,C572_W,C573_X,C573_Y,C573_Z,C573_W,C574_X,C574_Y,C574_Z,C574_W,C575_X
+,C575_Y,C575_Z,C575_W,C576_X,C576_Y,C576_Z,C576_W,C577_X,C577_Y,C577_Z
+,C577_W,C578_X,C578_Y,C578_Z,C578_W,C579_X,C579_Y,C579_Z,C579_W,C580_X
+,C580_Y,C580_Z,C580_W,C581_X,C581_Y,C581_Z,C581_W,C582_X,C582_Y,C582_Z
+,C582_W,C583_X,C583_Y,C583_Z,C583_W,C584_X,C584_Y,C584_Z,C584_W,C585_X
+,C585_Y,C585_Z,C585_W,C586_X,C586_Y,C586_Z,C586_W,C587_X,C587_Y,C587_Z
+,C587_W,C588_X,C588_Y,C588_Z,C588_W,C589_X,C589_Y,C589_Z,C589_W,C590_X
+,C590_Y,C590_Z,C590_W,C591_X,C591_Y,C591_Z,C591_W,C592_X,C592_Y,C592_Z
+,C592_W,C593_X,C593_Y,C593_Z,C593_W,C594_X,C594_Y,C594_Z,C594_W,C595_X
+,C595_Y,C595_Z,C595_W,C596_X,C596_Y,C596_Z,C596_W,C597_X,C597_Y,C597_Z
+,C597_W,C598_X,C598_Y,C598_Z,C598_W,C599_X,C599_Y,C599_Z,C599_W,C600_X
+,C600_Y,C600_Z,C600_W,C601_X,C601_Y,C601_Z,C601_W,C602_X,C602_Y,C602_Z
+,C602_W,C603_X,C603_Y,C603_Z,C603_W,C604_X,C604_Y,C604_Z,C604_W,C605_X
+,C605_Y,C605_Z,C605_W,C606_X,C606_Y,C606_Z,C606_W,C607_X,C607_Y,C607_Z
+,C607_W,C608_X,C608_Y,C608_Z,C608_W,C609_X,C609_Y,C609_Z,C609_W,C610_X
+,C610_Y,C610_Z,C610_W,C611_X,C611_Y,C611_Z,C611_W,C612_X,C612_Y,C612_Z
+,C612_W,C613_X,C613_Y,C613_Z,C613_W,C614_X,C614_Y,C614_Z,C614_W,C615_X
+,C615_Y,C615_Z,C615_W,C616_X,C616_Y,C616_Z,C616_W,C617_X,C617_Y,C617_Z
+,C617_W,C618_X,C618_Y,C618_Z,C618_W,C619_X,C619_Y,C619_Z,C619_W,C620_X
+,C620_Y,C620_Z,C620_W,C621_X,C621_Y,C621_Z,C621_W,C622_X,C622_Y,C622_Z
+,C622_W,C623_X,C623_Y,C623_Z,C623_W,C624_X,C624_Y,C624_Z,C624_W,C625_X
+,C625_Y,C625_Z,C625_W,C626_X,C626_Y,C626_Z,C626_W,C627_X,C627_Y,C627_Z
+,C627_W,C628_X,C628_Y,C628_Z,C628_W,C629_X,C629_Y,C629_Z,C629_W,C630_X
+,C630_Y,C630_Z,C630_W,C631_X,C631_Y,C631_Z,C631_W,C632_X,C632_Y,C632_Z
+,C632_W,C633_X,C633_Y,C633_Z,C633_W,C634_X,C634_Y,C634_Z,C634_W,C635_X
+,C635_Y,C635_Z,C635_W,C636_X,C636_Y,C636_Z,C636_W,C637_X,C637_Y,C637_Z
+,C637_W,C638_X,C638_Y,C638_Z,C638_W,C639_X,C639_Y,C639_Z,C639_W,C640_X
+,C640_Y,C640_Z,C640_W,C641_X,C641_Y,C641_Z,C641_W,C642_X,C642_Y,C642_Z
+,C642_W,C643_X,C643_Y,C643_Z,C643_W,C644_X,C644_Y,C644_Z,C644_W,C645_X
+,C645_Y,C645_Z,C645_W,C646_X,C646_Y,C646_Z,C646_W,C647_X,C647_Y,C647_Z
+,C647_W,C648_X,C648_Y,C648_Z,C648_W,C649_X,C649_Y,C649_Z,C649_W,C650_X
+,C650_Y,C650_Z,C650_W,C651_X,C651_Y,C651_Z,C651_W,C652_X,C652_Y,C652_Z
+,C652_W,C653_X,C653_Y,C653_Z,C653_W,C654_X,C654_Y,C654_Z,C654_W,C655_X
+,C655_Y,C655_Z,C655_W,C656_X,C656_Y,C656_Z,C656_W,C657_X,C657_Y,C657_Z
+,C657_W,C658_X,C658_Y,C658_Z,C658_W,C659_X,C659_Y,C659_Z,C659_W,C660_X
+,C660_Y,C660_Z,C660_W,C661_X,C661_Y,C661_Z,C661_W,C662_X,C662_Y,C662_Z
+,C662_W,C663_X,C663_Y,C663_Z,C663_W,C664_X,C664_Y,C664_Z,C664_W,C665_X
+,C665_Y,C665_Z,C665_W,C666_X,C666_Y,C666_Z,C666_W,C667_X,C667_Y,C667_Z
+,C667_W,C668_X,C668_Y,C668_Z,C668_W,C669_X,C669_Y,C669_Z,C669_W,C670_X
+,C670_Y,C670_Z,C670_W,C671_X,C671_Y,C671_Z,C671_W,C672_X,C672_Y,C672_Z
+,C672_W,C673_X,C673_Y,C673_Z,C673_W,C674_X,C674_Y,C674_Z,C674_W,C675_X
+,C675_Y,C675_Z,C675_W,C676_X,C676_Y,C676_Z,C676_W,C677_X,C677_Y,C677_Z
+,C677_W,C678_X,C678_Y,C678_Z,C678_W,C679_X,C679_Y,C679_Z,C679_W,C680_X
+,C680_Y,C680_Z,C680_W,C681_X,C681_Y,C681_Z,C681_W,C682_X,C682_Y,C682_Z
+,C682_W,C683_X,C683_Y,C683_Z,C683_W,C684_X,C684_Y,C684_Z,C684_W,C685_X
+,C685_Y,C685_Z,C685_W,C686_X,C686_Y,C686_Z,C686_W,C687_X,C687_Y,C687_Z
+,C687_W,C688_X,C688_Y,C688_Z,C688_W,C689_X,C689_Y,C689_Z,C689_W,C690_X
+,C690_Y,C690_Z,C690_W,C691_X,C691_Y,C691_Z,C691_W,C692_X,C692_Y,C692_Z
+,C692_W,C693_X,C693_Y,C693_Z,C693_W,C694_X,C694_Y,C694_Z,C694_W,C695_X
+,C695_Y,C695_Z,C695_W,C696_X,C696_Y,C696_Z,C696_W,C697_X,C697_Y,C697_Z
+,C697_W,C698_X,C698_Y,C698_Z,C698_W,C699_X,C699_Y,C699_Z,C699_W,C700_X
+,C700_Y,C700_Z,C700_W,C701_X,C701_Y,C701_Z,C701_W,C702_X,C702_Y,C702_Z
+,C702_W,C703_X,C703_Y,C703_Z,C703_W,C704_X,C704_Y,C704_Z,C704_W,C705_X
+,C705_Y,C705_Z,C705_W,C706_X,C706_Y,C706_Z,C706_W,C707_X,C707_Y,C707_Z
+,C707_W,C708_X,C708_Y,C708_Z,C708_W,C709_X,C709_Y,C709_Z,C709_W,C710_X
+,C710_Y,C710_Z,C710_W,C711_X,C711_Y,C711_Z,C711_W,C712_X,C712_Y,C712_Z
+,C712_W,C713_X,C713_Y,C713_Z,C713_W,C714_X,C714_Y,C714_Z,C714_W,C715_X
+,C715_Y,C715_Z,C715_W,C716_X,C716_Y,C716_Z,C716_W,C717_X,C717_Y,C717_Z
+,C717_W,C718_X,C718_Y,C718_Z,C718_W,C719_X,C719_Y,C719_Z,C719_W,C720_X
+,C720_Y,C720_Z,C720_W,C721_X,C721_Y,C721_Z,C721_W,C722_X,C722_Y,C722_Z
+,C722_W,C723_X,C723_Y,C723_Z,C723_W,C724_X,C724_Y,C724_Z,C724_W,C725_X
+,C725_Y,C725_Z,C725_W,C726_X,C726_Y,C726_Z,C726_W,C727_X,C727_Y,C727_Z
+,C727_W,C728_X,C728_Y,C728_Z,C728_W,C729_X,C729_Y,C729_Z,C729_W,C730_X
+,C730_Y,C730_Z,C730_W,C731_X,C731_Y,C731_Z,C731_W,C732_X,C732_Y,C732_Z
+,C732_W,C733_X,C733_Y,C733_Z,C733_W,C734_X,C734_Y,C734_Z,C734_W,C735_X
+,C735_Y,C735_Z,C735_W,C736_X,C736_Y,C736_Z,C736_W,C737_X,C737_Y,C737_Z
+,C737_W,C738_X,C738_Y,C738_Z,C738_W,C739_X,C739_Y,C739_Z,C739_W,C740_X
+,C740_Y,C740_Z,C740_W,C741_X,C741_Y,C741_Z,C741_W,C742_X,C742_Y,C742_Z
+,C742_W,C743_X,C743_Y,C743_Z,C743_W,C744_X,C744_Y,C744_Z,C744_W,C745_X
+,C745_Y,C745_Z,C745_W,C746_X,C746_Y,C746_Z,C746_W,C747_X,C747_Y,C747_Z
+,C747_W,C748_X,C748_Y,C748_Z,C748_W,C749_X,C749_Y,C749_Z,C749_W,C750_X
+,C750_Y,C750_Z,C750_W,C751_X,C751_Y,C751_Z,C751_W,C752_X,C752_Y,C752_Z
+,C752_W,C753_X,C753_Y,C753_Z,C753_W,C754_X,C754_Y,C754_Z,C754_W,C755_X
+,C755_Y,C755_Z,C755_W,C756_X,C756_Y,C756_Z,C756_W,C757_X,C757_Y,C757_Z
+,C757_W,C758_X,C758_Y,C758_Z,C758_W,C759_X,C759_Y,C759_Z,C759_W,C760_X
+,C760_Y,C760_Z,C760_W,C761_X,C761_Y,C761_Z,C761_W,C762_X,C762_Y,C762_Z
+,C762_W,C763_X,C763_Y,C763_Z,C763_W,C764_X,C764_Y,C764_Z,C764_W,C765_X
+,C765_Y,C765_Z,C765_W,C766_X,C766_Y,C766_Z,C766_W,C767_X,C767_Y,C767_Z
+,C767_W,C768_X,C768_Y,C768_Z,C768_W,C769_X,C769_Y,C769_Z,C769_W,C770_X
+,C770_Y,C770_Z,C770_W,C771_X,C771_Y,C771_Z,C771_W,C772_X,C772_Y,C772_Z
+,C772_W,C773_X,C773_Y,C773_Z,C773_W,C774_X,C774_Y,C774_Z,C774_W,C775_X
+,C775_Y,C775_Z,C775_W,C776_X,C776_Y,C776_Z,C776_W,C777_X,C777_Y,C777_Z
+,C777_W,C778_X,C778_Y,C778_Z,C778_W,C779_X,C779_Y,C779_Z,C779_W,C780_X
+,C780_Y,C780_Z,C780_W,C781_X,C781_Y,C781_Z,C781_W,C782_X,C782_Y,C782_Z
+,C782_W,C783_X,C783_Y,C783_Z,C783_W,C784_X,C784_Y,C784_Z,C784_W,C785_X
+,C785_Y,C785_Z,C785_W,C786_X,C786_Y,C786_Z,C786_W,C787_X,C787_Y,C787_Z
+,C787_W,C788_X,C788_Y,C788_Z,C788_W,C789_X,C789_Y,C789_Z,C789_W,C790_X
+,C790_Y,C790_Z,C790_W,C791_X,C791_Y,C791_Z,C791_W,C792_X,C792_Y,C792_Z
+,C792_W,C793_X,C793_Y,C793_Z,C793_W,C794_X,C794_Y,C794_Z,C794_W,C795_X
+,C795_Y,C795_Z,C795_W,C796_X,C796_Y,C796_Z,C796_W,C797_X,C797_Y,C797_Z
+,C797_W,C798_X,C798_Y,C798_Z,C798_W,C799_X,C799_Y,C799_Z,C799_W,C800_X
+,C800_Y,C800_Z,C800_W,C801_X,C801_Y,C801_Z,C801_W,C802_X,C802_Y,C802_Z
+,C802_W,C803_X,C803_Y,C803_Z,C803_W,C804_X,C804_Y,C804_Z,C804_W,C805_X
+,C805_Y,C805_Z,C805_W,C806_X,C806_Y,C806_Z,C806_W,C807_X,C807_Y,C807_Z
+,C807_W,C808_X,C808_Y,C808_Z,C808_W,C809_X,C809_Y,C809_Z,C809_W,C810_X
+,C810_Y,C810_Z,C810_W,C811_X,C811_Y,C811_Z,C811_W,C812_X,C812_Y,C812_Z
+,C812_W,C813_X,C813_Y,C813_Z,C813_W,C814_X,C814_Y,C814_Z,C814_W,C815_X
+,C815_Y,C815_Z,C815_W,C816_X,C816_Y,C816_Z,C816_W,C817_X,C817_Y,C817_Z
+,C817_W,C818_X,C818_Y,C818_Z,C818_W,C819_X,C819_Y,C819_Z,C819_W,C820_X
+,C820_Y,C820_Z,C820_W,C821_X,C821_Y,C821_Z,C821_W,C822_X,C822_Y,C822_Z
+,C822_W,C823_X,C823_Y,C823_Z,C823_W,C824_X,C824_Y,C824_Z,C824_W,C825_X
+,C825_Y,C825_Z,C825_W,C826_X,C826_Y,C826_Z,C826_W,C827_X,C827_Y,C827_Z
+,C827_W,C828_X,C828_Y,C828_Z,C828_W,C829_X,C829_Y,C829_Z,C829_W,C830_X
+,C830_Y,C830_Z,C830_W,C831_X,C831_Y,C831_Z,C831_W,C832_X,C832_Y,C832_Z
+,C832_W,C833_X,C833_Y,C833_Z,C833_W,C834_X,C834_Y,C834_Z,C834_W,C835_X
+,C835_Y,C835_Z,C835_W,C836_X,C836_Y,C836_Z,C836_W,C837_X,C837_Y,C837_Z
+,C837_W,C838_X,C838_Y,C838_Z,C838_W,C839_X,C839_Y,C839_Z,C839_W,C840_X
+,C840_Y,C840_Z,C840_W,C841_X,C841_Y,C841_Z,C841_W,C842_X,C842_Y,C842_Z
+,C842_W,C843_X,C843_Y,C843_Z,C843_W,C844_X,C844_Y,C844_Z,C844_W,C845_X
+,C845_Y,C845_Z,C845_W,C846_X,C846_Y,C846_Z,C846_W,C847_X,C847_Y,C847_Z
+,C847_W,C848_X,C848_Y,C848_Z,C848_W,C849_X,C849_Y,C849_Z,C849_W,C850_X
+,C850_Y,C850_Z,C850_W,C851_X,C851_Y,C851_Z,C851_W,C852_X,C852_Y,C852_Z
+,C852_W,C853_X,C853_Y,C853_Z,C853_W,C854_X,C854_Y,C854_Z,C854_W,C855_X
+,C855_Y,C855_Z,C855_W,C856_X,C856_Y,C856_Z,C856_W,C857_X,C857_Y,C857_Z
+,C857_W,C858_X,C858_Y,C858_Z,C858_W,C859_X,C859_Y,C859_Z,C859_W,C860_X
+,C860_Y,C860_Z,C860_W,C861_X,C861_Y,C861_Z,C861_W,C862_X,C862_Y,C862_Z
+,C862_W,C863_X,C863_Y,C863_Z,C863_W,C864_X,C864_Y,C864_Z,C864_W,C865_X
+,C865_Y,C865_Z,C865_W,C866_X,C866_Y,C866_Z,C866_W,C867_X,C867_Y,C867_Z
+,C867_W,C868_X,C868_Y,C868_Z,C868_W,C869_X,C869_Y,C869_Z,C869_W,C870_X
+,C870_Y,C870_Z,C870_W,C871_X,C871_Y,C871_Z,C871_W,C872_X,C872_Y,C872_Z
+,C872_W,C873_X,C873_Y,C873_Z,C873_W,C874_X,C874_Y,C874_Z,C874_W,C875_X
+,C875_Y,C875_Z,C875_W,C876_X,C876_Y,C876_Z,C876_W,C877_X,C877_Y,C877_Z
+,C877_W,C878_X,C878_Y,C878_Z,C878_W,C879_X,C879_Y,C879_Z,C879_W,C880_X
+,C880_Y,C880_Z,C880_W,C881_X,C881_Y,C881_Z,C881_W,C882_X,C882_Y,C882_Z
+,C882_W,C883_X,C883_Y,C883_Z,C883_W,C884_X,C884_Y,C884_Z,C884_W,C885_X
+,C885_Y,C885_Z,C885_W,C886_X,C886_Y,C886_Z,C886_W,C887_X,C887_Y,C887_Z
+,C887_W,C888_X,C888_Y,C888_Z,C888_W,C889_X,C889_Y,C889_Z,C889_W,C890_X
+,C890_Y,C890_Z,C890_W,C891_X,C891_Y,C891_Z,C891_W,C892_X,C892_Y,C892_Z
+,C892_W,C893_X,C893_Y,C893_Z,C893_W,C894_X,C894_Y,C894_Z,C894_W,C895_X
+,C895_Y,C895_Z,C895_W,C896_X,C896_Y,C896_Z,C896_W,C897_X,C897_Y,C897_Z
+,C897_W,C898_X,C898_Y,C898_Z,C898_W,C899_X,C899_Y,C899_Z,C899_W,C900_X
+,C900_Y,C900_Z,C900_W,C901_X,C901_Y,C901_Z,C901_W,C902_X,C902_Y,C902_Z
+,C902_W,C903_X,C903_Y,C903_Z,C903_W,C904_X,C904_Y,C904_Z,C904_W,C905_X
+,C905_Y,C905_Z,C905_W,C906_X,C906_Y,C906_Z,C906_W,C907_X,C907_Y,C907_Z
+,C907_W,C908_X,C908_Y,C908_Z,C908_W,C909_X,C909_Y,C909_Z,C909_W,C910_X
+,C910_Y,C910_Z,C910_W,C911_X,C911_Y,C911_Z,C911_W,C912_X,C912_Y,C912_Z
+,C912_W,C913_X,C913_Y,C913_Z,C913_W,C914_X,C914_Y,C914_Z,C914_W,C915_X
+,C915_Y,C915_Z,C915_W,C916_X,C916_Y,C916_Z,C916_W,C917_X,C917_Y,C917_Z
+,C917_W,C918_X,C918_Y,C918_Z,C918_W,C919_X,C919_Y,C919_Z,C919_W,C920_X
+,C920_Y,C920_Z,C920_W,C921_X,C921_Y,C921_Z,C921_W,C922_X,C922_Y,C922_Z
+,C922_W,C923_X,C923_Y,C923_Z,C923_W,C924_X,C924_Y,C924_Z,C924_W,C925_X
+,C925_Y,C925_Z,C925_W,C926_X,C926_Y,C926_Z,C926_W,C927_X,C927_Y,C927_Z
+,C927_W,C928_X,C928_Y,C928_Z,C928_W,C929_X,C929_Y,C929_Z,C929_W,C930_X
+,C930_Y,C930_Z,C930_W,C931_X,C931_Y,C931_Z,C931_W,C932_X,C932_Y,C932_Z
+,C932_W,C933_X,C933_Y,C933_Z,C933_W,C934_X,C934_Y,C934_Z,C934_W,C935_X
+,C935_Y,C935_Z,C935_W,C936_X,C936_Y,C936_Z,C936_W,C937_X,C937_Y,C937_Z
+,C937_W,C938_X,C938_Y,C938_Z,C938_W,C939_X,C939_Y,C939_Z,C939_W,C940_X
+,C940_Y,C940_Z,C940_W,C941_X,C941_Y,C941_Z,C941_W,C942_X,C942_Y,C942_Z
+,C942_W,C943_X,C943_Y,C943_Z,C943_W,C944_X,C944_Y,C944_Z,C944_W,C945_X
+,C945_Y,C945_Z,C945_W,C946_X,C946_Y,C946_Z,C946_W,C947_X,C947_Y,C947_Z
+,C947_W,C948_X,C948_Y,C948_Z,C948_W,C949_X,C949_Y,C949_Z,C949_W,C950_X
+,C950_Y,C950_Z,C950_W,C951_X,C951_Y,C951_Z,C951_W,C952_X,C952_Y,C952_Z
+,C952_W,C953_X,C953_Y,C953_Z,C953_W,C954_X,C954_Y,C954_Z,C954_W,C955_X
+,C955_Y,C955_Z,C955_W,C956_X,C956_Y,C956_Z,C956_W,C957_X,C957_Y,C957_Z
+,C957_W,C958_X,C958_Y,C958_Z,C958_W,C959_X,C959_Y,C959_Z,C959_W,C960_X
+,C960_Y,C960_Z,C960_W,C961_X,C961_Y,C961_Z,C961_W,C962_X,C962_Y,C962_Z
+,C962_W,C963_X,C963_Y,C963_Z,C963_W,C964_X,C964_Y,C964_Z,C964_W,C965_X
+,C965_Y,C965_Z,C965_W,C966_X,C966_Y,C966_Z,C966_W,C967_X,C967_Y,C967_Z
+,C967_W,C968_X,C968_Y,C968_Z,C968_W,C969_X,C969_Y,C969_Z,C969_W,C970_X
+,C970_Y,C970_Z,C970_W,C971_X,C971_Y,C971_Z,C971_W,C972_X,C972_Y,C972_Z
+,C972_W,C973_X,C973_Y,C973_Z,C973_W,C974_X,C974_Y,C974_Z,C974_W,C975_X
+,C975_Y,C975_Z,C975_W,C976_X,C976_Y,C976_Z,C976_W,C977_X,C977_Y,C977_Z
+,C977_W,C978_X,C978_Y,C978_Z,C978_W,C979_X,C979_Y,C979_Z,C979_W,C980_X
+,C980_Y,C980_Z,C980_W,C981_X,C981_Y,C981_Z,C981_W,C982_X,C982_Y,C982_Z
+,C982_W,C983_X,C983_Y,C983_Z,C983_W,C984_X,C984_Y,C984_Z,C984_W,C985_X
+,C985_Y,C985_Z,C985_W,C986_X,C986_Y,C986_Z,C986_W,C987_X,C987_Y,C987_Z
+,C987_W,C988_X,C988_Y,C988_Z,C988_W,C989_X,C989_Y,C989_Z,C989_W,C990_X
+,C990_Y,C990_Z,C990_W,C991_X,C991_Y,C991_Z,C991_W,C992_X,C992_Y,C992_Z
+,C992_W,C993_X,C993_Y,C993_Z,C993_W,C994_X,C994_Y,C994_Z,C994_W,C995_X
+,C995_Y,C995_Z,C995_W,C996_X,C996_Y,C996_Z,C996_W,C997_X,C997_Y,C997_Z
+,C997_W,C998_X,C998_Y,C998_Z,C998_W,C999_X,C999_Y,C999_Z,C999_W,C1000_X
+,C1000_Y,C1000_Z,C1000_W,C1001_X,C1001_Y,C1001_Z,C1001_W,C1002_X,C1002_Y,C1002_Z
+,C1002_W,C1003_X,C1003_Y,C1003_Z,C1003_W,C1004_X,C1004_Y,C1004_Z,C1004_W,C1005_X
+,C1005_Y,C1005_Z,C1005_W,C1006_X,C1006_Y,C1006_Z,C1006_W,C1007_X,C1007_Y,C1007_Z
+,C1007_W,C1008_X,C1008_Y,C1008_Z,C1008_W,C1009_X,C1009_Y,C1009_Z,C1009_W,C1010_X
+,C1010_Y,C1010_Z,C1010_W,C1011_X,C1011_Y,C1011_Z,C1011_W,C1012_X,C1012_Y,C1012_Z
+,C1012_W,C1013_X,C1013_Y,C1013_Z,C1013_W,C1014_X,C1014_Y,C1014_Z,C1014_W,C1015_X
+,C1015_Y,C1015_Z,C1015_W,C1016_X,C1016_Y,C1016_Z,C1016_W,C1017_X,C1017_Y,C1017_Z
+,C1017_W,C1018_X,C1018_Y,C1018_Z,C1018_W,C1019_X,C1019_Y,C1019_Z,C1019_W,C1020_X
+,C1020_Y,C1020_Z,C1020_W,C1021_X,C1021_Y,C1021_Z,C1021_W,C1022_X,C1022_Y,C1022_Z
+,C1022_W,C1023_X,C1023_Y,C1023_Z,C1023_W)>;
+
+def R600_TReg32 : RegisterClass <"AMDIL", [f32, i32], 32, (add
+    T0_X
+,T0_Y,T0_Z,T0_W,T1_X,T1_Y,T1_Z,T1_W,T2_X,T2_Y,T2_Z
+,T2_W,T3_X,T3_Y,T3_Z,T3_W,T4_X,T4_Y,T4_Z,T4_W,T5_X
+,T5_Y,T5_Z,T5_W,T6_X,T6_Y,T6_Z,T6_W,T7_X,T7_Y,T7_Z
+,T7_W,T8_X,T8_Y,T8_Z,T8_W,T9_X,T9_Y,T9_Z,T9_W,T10_X
+,T10_Y,T10_Z,T10_W,T11_X,T11_Y,T11_Z,T11_W,T12_X,T12_Y,T12_Z
+,T12_W,T13_X,T13_Y,T13_Z,T13_W,T14_X,T14_Y,T14_Z,T14_W,T15_X
+,T15_Y,T15_Z,T15_W,T16_X,T16_Y,T16_Z,T16_W,T17_X,T17_Y,T17_Z
+,T17_W,T18_X,T18_Y,T18_Z,T18_W,T19_X,T19_Y,T19_Z,T19_W,T20_X
+,T20_Y,T20_Z,T20_W,T21_X,T21_Y,T21_Z,T21_W,T22_X,T22_Y,T22_Z
+,T22_W,T23_X,T23_Y,T23_Z,T23_W,T24_X,T24_Y,T24_Z,T24_W,T25_X
+,T25_Y,T25_Z,T25_W,T26_X,T26_Y,T26_Z,T26_W,T27_X,T27_Y,T27_Z
+,T27_W,T28_X,T28_Y,T28_Z,T28_W,T29_X,T29_Y,T29_Z,T29_W,T30_X
+,T30_Y,T30_Z,T30_W,T31_X,T31_Y,T31_Z,T31_W,T32_X,T32_Y,T32_Z
+,T32_W,T33_X,T33_Y,T33_Z,T33_W,T34_X,T34_Y,T34_Z,T34_W,T35_X
+,T35_Y,T35_Z,T35_W,T36_X,T36_Y,T36_Z,T36_W,T37_X,T37_Y,T37_Z
+,T37_W,T38_X,T38_Y,T38_Z,T38_W,T39_X,T39_Y,T39_Z,T39_W,T40_X
+,T40_Y,T40_Z,T40_W,T41_X,T41_Y,T41_Z,T41_W,T42_X,T42_Y,T42_Z
+,T42_W,T43_X,T43_Y,T43_Z,T43_W,T44_X,T44_Y,T44_Z,T44_W,T45_X
+,T45_Y,T45_Z,T45_W,T46_X,T46_Y,T46_Z,T46_W,T47_X,T47_Y,T47_Z
+,T47_W,T48_X,T48_Y,T48_Z,T48_W,T49_X,T49_Y,T49_Z,T49_W,T50_X
+,T50_Y,T50_Z,T50_W,T51_X,T51_Y,T51_Z,T51_W,T52_X,T52_Y,T52_Z
+,T52_W,T53_X,T53_Y,T53_Z,T53_W,T54_X,T54_Y,T54_Z,T54_W,T55_X
+,T55_Y,T55_Z,T55_W,T56_X,T56_Y,T56_Z,T56_W,T57_X,T57_Y,T57_Z
+,T57_W,T58_X,T58_Y,T58_Z,T58_W,T59_X,T59_Y,T59_Z,T59_W,T60_X
+,T60_Y,T60_Z,T60_W,T61_X,T61_Y,T61_Z,T61_W,T62_X,T62_Y,T62_Z
+,T62_W,T63_X,T63_Y,T63_Z,T63_W,T64_X,T64_Y,T64_Z,T64_W,T65_X
+,T65_Y,T65_Z,T65_W,T66_X,T66_Y,T66_Z,T66_W,T67_X,T67_Y,T67_Z
+,T67_W,T68_X,T68_Y,T68_Z,T68_W,T69_X,T69_Y,T69_Z,T69_W,T70_X
+,T70_Y,T70_Z,T70_W,T71_X,T71_Y,T71_Z,T71_W,T72_X,T72_Y,T72_Z
+,T72_W,T73_X,T73_Y,T73_Z,T73_W,T74_X,T74_Y,T74_Z,T74_W,T75_X
+,T75_Y,T75_Z,T75_W,T76_X,T76_Y,T76_Z,T76_W,T77_X,T77_Y,T77_Z
+,T77_W,T78_X,T78_Y,T78_Z,T78_W,T79_X,T79_Y,T79_Z,T79_W,T80_X
+,T80_Y,T80_Z,T80_W,T81_X,T81_Y,T81_Z,T81_W,T82_X,T82_Y,T82_Z
+,T82_W,T83_X,T83_Y,T83_Z,T83_W,T84_X,T84_Y,T84_Z,T84_W,T85_X
+,T85_Y,T85_Z,T85_W,T86_X,T86_Y,T86_Z,T86_W,T87_X,T87_Y,T87_Z
+,T87_W,T88_X,T88_Y,T88_Z,T88_W,T89_X,T89_Y,T89_Z,T89_W,T90_X
+,T90_Y,T90_Z,T90_W,T91_X,T91_Y,T91_Z,T91_W,T92_X,T92_Y,T92_Z
+,T92_W,T93_X,T93_Y,T93_Z,T93_W,T94_X,T94_Y,T94_Z,T94_W,T95_X
+,T95_Y,T95_Z,T95_W,T96_X,T96_Y,T96_Z,T96_W,T97_X,T97_Y,T97_Z
+,T97_W,T98_X,T98_Y,T98_Z,T98_W,T99_X,T99_Y,T99_Z,T99_W,T100_X
+,T100_Y,T100_Z,T100_W,T101_X,T101_Y,T101_Z,T101_W,T102_X,T102_Y,T102_Z
+,T102_W,T103_X,T103_Y,T103_Z,T103_W,T104_X,T104_Y,T104_Z,T104_W,T105_X
+,T105_Y,T105_Z,T105_W,T106_X,T106_Y,T106_Z,T106_W,T107_X,T107_Y,T107_Z
+,T107_W,T108_X,T108_Y,T108_Z,T108_W,T109_X,T109_Y,T109_Z,T109_W,T110_X
+,T110_Y,T110_Z,T110_W,T111_X,T111_Y,T111_Z,T111_W,T112_X,T112_Y,T112_Z
+,T112_W,T113_X,T113_Y,T113_Z,T113_W,T114_X,T114_Y,T114_Z,T114_W,T115_X
+,T115_Y,T115_Z,T115_W,T116_X,T116_Y,T116_Z,T116_W,T117_X,T117_Y,T117_Z
+,T117_W,T118_X,T118_Y,T118_Z,T118_W,T119_X,T119_Y,T119_Z,T119_W,T120_X
+,T120_Y,T120_Z,T120_W,T121_X,T121_Y,T121_Z,T121_W,T122_X,T122_Y,T122_Z
+,T122_W,T123_X,T123_Y,T123_Z,T123_W,T124_X,T124_Y,T124_Z,T124_W,T125_X
+,T125_Y,T125_Z,T125_W,T126_X,T126_Y,T126_Z,T126_W,T127_X,T127_Y,T127_Z
+,T127_W)>;
+
+def R600_TReg32_X : RegisterClass <"AMDIL", [f32, i32], 32, (add
+    T0_X
+,T1_X,T2_X,T3_X,T4_X,T5_X,T6_X,T7_X,T8_X,T9_X,T10_X
+,T11_X,T12_X,T13_X,T14_X,T15_X,T16_X,T17_X,T18_X,T19_X,T20_X
+,T21_X,T22_X,T23_X,T24_X,T25_X,T26_X,T27_X,T28_X,T29_X,T30_X
+,T31_X,T32_X,T33_X,T34_X,T35_X,T36_X,T37_X,T38_X,T39_X,T40_X
+,T41_X,T42_X,T43_X,T44_X,T45_X,T46_X,T47_X,T48_X,T49_X,T50_X
+,T51_X,T52_X,T53_X,T54_X,T55_X,T56_X,T57_X,T58_X,T59_X,T60_X
+,T61_X,T62_X,T63_X,T64_X,T65_X,T66_X,T67_X,T68_X,T69_X,T70_X
+,T71_X,T72_X,T73_X,T74_X,T75_X,T76_X,T77_X,T78_X,T79_X,T80_X
+,T81_X,T82_X,T83_X,T84_X,T85_X,T86_X,T87_X,T88_X,T89_X,T90_X
+,T91_X,T92_X,T93_X,T94_X,T95_X,T96_X,T97_X,T98_X,T99_X,T100_X
+,T101_X,T102_X,T103_X,T104_X,T105_X,T106_X,T107_X,T108_X,T109_X,T110_X
+,T111_X,T112_X,T113_X,T114_X,T115_X,T116_X,T117_X,T118_X,T119_X,T120_X
+,T121_X,T122_X,T123_X,T124_X,T125_X,T126_X,T127_X)>;
+    
+def R600_Reg32 : RegisterClass <"AMDIL", [f32, i32], 32, (add
+    R600_TReg32,
+    R600_CReg32,
+    ZERO, HALF, ONE, ONE_INT, PV_X, ALU_LITERAL_X, NEG_ONE, NEG_HALF)>;
+
+def R600_Reg128 : RegisterClass<"AMDIL", [v4f32, v4i32], 128, (add
+    T0_XYZW
+,T1_XYZW,T2_XYZW,T3_XYZW,T4_XYZW,T5_XYZW,T6_XYZW,T7_XYZW,T8_XYZW,T9_XYZW,T10_XYZW
+,T11_XYZW,T12_XYZW,T13_XYZW,T14_XYZW,T15_XYZW,T16_XYZW,T17_XYZW,T18_XYZW,T19_XYZW,T20_XYZW
+,T21_XYZW,T22_XYZW,T23_XYZW,T24_XYZW,T25_XYZW,T26_XYZW,T27_XYZW,T28_XYZW,T29_XYZW,T30_XYZW
+,T31_XYZW,T32_XYZW,T33_XYZW,T34_XYZW,T35_XYZW,T36_XYZW,T37_XYZW,T38_XYZW,T39_XYZW,T40_XYZW
+,T41_XYZW,T42_XYZW,T43_XYZW,T44_XYZW,T45_XYZW,T46_XYZW,T47_XYZW,T48_XYZW,T49_XYZW,T50_XYZW
+,T51_XYZW,T52_XYZW,T53_XYZW,T54_XYZW,T55_XYZW,T56_XYZW,T57_XYZW,T58_XYZW,T59_XYZW,T60_XYZW
+,T61_XYZW,T62_XYZW,T63_XYZW,T64_XYZW,T65_XYZW,T66_XYZW,T67_XYZW,T68_XYZW,T69_XYZW,T70_XYZW
+,T71_XYZW,T72_XYZW,T73_XYZW,T74_XYZW,T75_XYZW,T76_XYZW,T77_XYZW,T78_XYZW,T79_XYZW,T80_XYZW
+,T81_XYZW,T82_XYZW,T83_XYZW,T84_XYZW,T85_XYZW,T86_XYZW,T87_XYZW,T88_XYZW,T89_XYZW,T90_XYZW
+,T91_XYZW,T92_XYZW,T93_XYZW,T94_XYZW,T95_XYZW,T96_XYZW,T97_XYZW,T98_XYZW,T99_XYZW,T100_XYZW
+,T101_XYZW,T102_XYZW,T103_XYZW,T104_XYZW,T105_XYZW,T106_XYZW,T107_XYZW,T108_XYZW,T109_XYZW,T110_XYZW
+,T111_XYZW,T112_XYZW,T113_XYZW,T114_XYZW,T115_XYZW,T116_XYZW,T117_XYZW,T118_XYZW,T119_XYZW,T120_XYZW
+,T121_XYZW,T122_XYZW,T123_XYZW,T124_XYZW,T125_XYZW,T126_XYZW,T127_XYZW)>
+{
+  let CopyCost = -1;
+}
+
diff --git a/lib/Target/AMDGPU/R600Schedule.td b/lib/Target/AMDGPU/R600Schedule.td
new file mode 100644
index 0000000..d195790
--- /dev/null
+++ b/lib/Target/AMDGPU/R600Schedule.td
@@ -0,0 +1,36 @@
+//===-- R600Schedule.td - R600 Scheduling definitions ------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// R600 has a VLIW architecture.  On pre-cayman cards there are 5 instruction
+// slots ALU.X, ALU.Y, ALU.Z, ALU.W, and TRANS.  For cayman cards, the TRANS
+// slot has been removed. 
+//
+//===----------------------------------------------------------------------===//
+
+
+def ALU_X : FuncUnit;
+def ALU_Y : FuncUnit;
+def ALU_Z : FuncUnit;
+def ALU_W : FuncUnit;
+def TRANS : FuncUnit;
+
+
+def AnyALU : InstrItinClass;
+def VecALU : InstrItinClass;
+def TransALU : InstrItinClass;
+
+def R600_EG_Itin : ProcessorItineraries <
+  [ALU_X, ALU_Y, ALU_Z, ALU_W, TRANS],
+  [],
+  [
+    InstrItinData<AnyALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W, TRANS]>]>,
+    InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_X, ALU_W]>]>,
+    InstrItinData<TransALU, [InstrStage<1, [TRANS]>]>
+  ]
+>;
diff --git a/lib/Target/AMDGPU/SIAssignInterpRegs.cpp b/lib/Target/AMDGPU/SIAssignInterpRegs.cpp
new file mode 100644
index 0000000..8a25280
--- /dev/null
+++ b/lib/Target/AMDGPU/SIAssignInterpRegs.cpp
@@ -0,0 +1,117 @@
+//===-- SIAssignInterpRegs.cpp - Assign interpolation registers -----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass maps the pseudo interpolation registers to the correct physical
+// registers.  Prior to executing a fragment shader, the GPU loads interpolation
+// parameters into physical registers.  The specific physical register that each
+// interpolation parameter ends up in depends on the type of the interpolation
+// parameter as well as how many interpolation parameters are used by the
+// shader.
+//
+//===----------------------------------------------------------------------===//
+
+
+
+#include "AMDGPU.h"
+#include "AMDGPUUtil.h"
+#include "AMDIL.h"
+#include "SIMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+namespace {
+
+class SIAssignInterpRegsPass : public MachineFunctionPass {
+
+private:
+  static char ID;
+  TargetMachine &TM;
+
+public:
+  SIAssignInterpRegsPass(TargetMachine &tm) :
+    MachineFunctionPass(ID), TM(tm) { }
+
+  virtual bool runOnMachineFunction(MachineFunction &MF);
+
+  const char *getPassName() const { return "SI Assign intrpolation registers"; }
+};
+
+} // End anonymous namespace
+
+char SIAssignInterpRegsPass::ID = 0;
+
+#define INTERP_VALUES 16
+
+struct interp_info {
+  bool enabled;
+  unsigned regs[3];
+  unsigned reg_count;
+};
+
+
+FunctionPass *llvm::createSIAssignInterpRegsPass(TargetMachine &tm) {
+  return new SIAssignInterpRegsPass(tm);
+}
+
+bool SIAssignInterpRegsPass::runOnMachineFunction(MachineFunction &MF)
+{
+
+  struct interp_info InterpUse[INTERP_VALUES] = {
+    {false, {AMDIL::PERSP_SAMPLE_I, AMDIL::PERSP_SAMPLE_J}, 2},
+    {false, {AMDIL::PERSP_CENTER_I, AMDIL::PERSP_CENTER_J}, 2},
+    {false, {AMDIL::PERSP_CENTROID_I, AMDIL::PERSP_CENTROID_J}, 2},
+    {false, {AMDIL::PERSP_I_W, AMDIL::PERSP_J_W, AMDIL::PERSP_1_W}, 3},
+    {false, {AMDIL::LINEAR_SAMPLE_I, AMDIL::LINEAR_SAMPLE_J}, 2},
+    {false, {AMDIL::LINEAR_CENTER_I, AMDIL::LINEAR_CENTER_J}, 2},
+    {false, {AMDIL::LINEAR_CENTROID_I, AMDIL::LINEAR_CENTROID_J}, 2},
+    {false, {AMDIL::LINE_STIPPLE_TEX_COORD}, 1},
+    {false, {AMDIL::POS_X_FLOAT}, 1},
+    {false, {AMDIL::POS_Y_FLOAT}, 1},
+    {false, {AMDIL::POS_Z_FLOAT}, 1},
+    {false, {AMDIL::POS_W_FLOAT}, 1},
+    {false, {AMDIL::FRONT_FACE}, 1},
+    {false, {AMDIL::ANCILLARY}, 1},
+    {false, {AMDIL::SAMPLE_COVERAGE}, 1},
+    {false, {AMDIL::POS_FIXED_PT}, 1}
+  };
+
+  SIMachineFunctionInfo * MFI = MF.getInfo<SIMachineFunctionInfo>();
+  MachineRegisterInfo &MRI = MF.getRegInfo();
+
+  /* First pass, mark the interpolation values that are used. */
+  for (unsigned interp_idx = 0; interp_idx < INTERP_VALUES; interp_idx++) {
+    for (unsigned reg_idx = 0; reg_idx < InterpUse[interp_idx].reg_count;
+                                                               reg_idx++) {
+      InterpUse[interp_idx].enabled =
+                            !MRI.use_empty(InterpUse[interp_idx].regs[reg_idx]);
+    }
+  }
+
+  unsigned used_vgprs = 0;
+
+  /* Second pass, replace with VGPRs. */
+  for (unsigned interp_idx = 0; interp_idx < INTERP_VALUES; interp_idx++) {
+    if (!InterpUse[interp_idx].enabled) {
+      continue;
+    }
+    MFI->spi_ps_input_addr |= (1 << interp_idx);
+
+    for (unsigned reg_idx = 0; reg_idx < InterpUse[interp_idx].reg_count;
+                                                  reg_idx++, used_vgprs++) {
+      unsigned new_reg = AMDIL::VReg_32RegClass.getRegister(used_vgprs);
+      unsigned virt_reg = MRI.createVirtualRegister(&AMDIL::VReg_32RegClass);
+      MRI.replaceRegWith(InterpUse[interp_idx].regs[reg_idx], virt_reg);
+      AMDGPU::utilAddLiveIn(&MF, MRI, TM.getInstrInfo(), new_reg, virt_reg);
+    }
+  }
+
+  return false;
+}
diff --git a/lib/Target/AMDGPU/SICodeEmitter.cpp b/lib/Target/AMDGPU/SICodeEmitter.cpp
new file mode 100644
index 0000000..b441b9a
--- /dev/null
+++ b/lib/Target/AMDGPU/SICodeEmitter.cpp
@@ -0,0 +1,321 @@
+//===-- SICodeEmitter.cpp - SI Code Emitter -------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The SI code emitter produces machine code that can be executed directly on
+// the GPU device.
+//
+//===----------------------------------------------------------------------===//
+
+
+#include "AMDGPU.h"
+#include "AMDGPUUtil.h"
+#include "AMDILCodeEmitter.h"
+#include "SIInstrInfo.h"
+#include "SIMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include <stdio.h>
+
+#define LITERAL_REG 255
+#define VGPR_BIT(src_idx) (1ULL << (9 * src_idx - 1))
+using namespace llvm;
+
+namespace {
+
+  class SICodeEmitter : public MachineFunctionPass, public AMDILCodeEmitter {
+
+  private:
+    static char ID;
+    formatted_raw_ostream &_OS;
+    const TargetMachine *TM;
+    void emitState(MachineFunction & MF);
+    void emitInstr(MachineInstr &MI);
+
+    void outputBytes(uint64_t value, unsigned bytes);
+    unsigned GPRAlign(const MachineInstr &MI, unsigned OpNo, unsigned shift)
+                                                                      const;
+
+  public:
+    SICodeEmitter(formatted_raw_ostream &OS) : MachineFunctionPass(ID),
+        _OS(OS), TM(NULL) { }
+    const char *getPassName() const { return "SI Code Emitter"; }
+    bool runOnMachineFunction(MachineFunction &MF);
+
+    /// getMachineOpValue - Return the encoding for MO
+    virtual uint64_t getMachineOpValue(const MachineInstr &MI,
+                                       const MachineOperand &MO) const;
+
+    /// GPR4AlignEncode - Encoding for when 4 consectuive registers are used 
+    virtual unsigned GPR4AlignEncode(const MachineInstr  &MI, unsigned OpNo)
+                                                                      const;
+
+    /// GPR2AlignEncode - Encoding for when 2 consecutive registers are used
+    virtual unsigned GPR2AlignEncode(const MachineInstr &MI, unsigned OpNo)
+                                                                      const;
+    /// i32LiteralEncode - Encode an i32 literal this is used as an operand
+    /// for an instruction in place of a register.
+    virtual uint64_t i32LiteralEncode(const MachineInstr &MI, unsigned OpNo)
+                                                                      const;
+    /// SMRDmemriEncode - Encoding for SMRD indexed loads
+    virtual uint32_t SMRDmemriEncode(const MachineInstr &MI, unsigned OpNo)
+                                                                     const;
+
+    /// VOPPostEncode - Post-Encoder method for VOP instructions 
+    virtual uint64_t VOPPostEncode(const MachineInstr &MI,
+                                   uint64_t Value) const;
+  };
+}
+
+char SICodeEmitter::ID = 0;
+
+FunctionPass *llvm::createSICodeEmitterPass(formatted_raw_ostream &OS) {
+  return new SICodeEmitter(OS);
+}
+
+void SICodeEmitter::emitState(MachineFunction & MF)
+{
+  unsigned maxSGPR = 0;
+  unsigned maxVGPR = 0;
+  bool VCCUsed = false;
+  const SIRegisterInfo * RI =
+                static_cast<const SIRegisterInfo*>(TM->getRegisterInfo());
+  SIMachineFunctionInfo * MFI = MF.getInfo<SIMachineFunctionInfo>();
+
+  for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
+                                                  BB != BB_E; ++BB) {
+    MachineBasicBlock &MBB = *BB;
+    for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+                                                      I != E; ++I) {
+      MachineInstr &MI = *I;
+      unsigned numOperands = MI.getNumOperands();
+      for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
+        MachineOperand & MO = MI.getOperand(op_idx);
+        unsigned maxUsed;
+        unsigned width = 0;
+        bool isSGPR = false;
+        unsigned reg;
+        unsigned hwReg;
+        if (!MO.isReg()) {
+          continue;
+        }
+        reg = MO.getReg();
+        if (reg == AMDIL::VCC) {
+          VCCUsed = true;
+          continue;
+        }
+        if (AMDIL::SReg_32RegClass.contains(reg)) {
+          isSGPR = true;
+          width = 1;
+        } else if (AMDIL::VReg_32RegClass.contains(reg)) {
+          isSGPR = false;
+          width = 1;
+        } else if (AMDIL::SReg_64RegClass.contains(reg)) {
+          isSGPR = true;
+          width = 2;
+        } else if (AMDIL::VReg_64RegClass.contains(reg)) {
+          isSGPR = false;
+          width = 2;
+        } else if (AMDIL::SReg_128RegClass.contains(reg)) {
+          isSGPR = true;
+          width = 4;
+        } else if (AMDIL::VReg_128RegClass.contains(reg)) {
+          isSGPR = false;
+          width = 4;
+        } else if (AMDIL::SReg_256RegClass.contains(reg)) {
+          isSGPR = true;
+          width = 8;
+        } else {
+          assert("!Unknown register class");
+        }
+        hwReg = RI->getEncodingValue(reg);
+        maxUsed = ((hwReg + 1) * width) - 1;
+        if (isSGPR) {
+          maxSGPR = maxUsed > maxSGPR ? maxUsed : maxSGPR;
+        } else {
+          maxVGPR = maxUsed > maxVGPR ? maxUsed : maxVGPR;
+        }
+      }
+    }
+  }
+  if (VCCUsed) {
+    maxSGPR += 2;
+  }
+  outputBytes(maxSGPR + 1, 4);
+  outputBytes(maxVGPR + 1, 4);
+  outputBytes(MFI->spi_ps_input_addr, 4);
+}
+
+bool SICodeEmitter::runOnMachineFunction(MachineFunction &MF)
+{
+  TM = &MF.getTarget();
+  const AMDILSubtarget &STM = TM->getSubtarget<AMDILSubtarget>();
+
+  if (STM.dumpCode()) {
+    MF.dump();
+  }
+
+  emitState(MF);
+
+  for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
+                                                  BB != BB_E; ++BB) {
+    MachineBasicBlock &MBB = *BB;
+    for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+                                                      I != E; ++I) {
+      MachineInstr &MI = *I;
+      if (MI.getOpcode() != AMDIL::KILL && MI.getOpcode() != AMDIL::RETURN) {
+        emitInstr(MI);
+      }
+    }
+  }
+  // Emit S_END_PGM
+  MachineInstr * End = BuildMI(MF, DebugLoc(),
+                               TM->getInstrInfo()->get(AMDIL::S_ENDPGM));
+  emitInstr(*End);
+  return false;
+}
+
+void SICodeEmitter::emitInstr(MachineInstr &MI)
+{
+  const SIInstrInfo * SII = static_cast<const SIInstrInfo*>(TM->getInstrInfo());
+
+  uint64_t hwInst = getBinaryCodeForInstr(MI);
+
+  if ((hwInst & 0xffffffff) == 0xffffffff) {
+    fprintf(stderr, "Unsupported Instruction: \n");
+    MI.dump();
+    abort();
+  }
+
+  unsigned bytes = SII->getEncodingBytes(MI);
+  outputBytes(hwInst, bytes);
+}
+
+uint64_t SICodeEmitter::getMachineOpValue(const MachineInstr &MI,
+                                          const MachineOperand &MO) const
+{
+  const SIRegisterInfo * RI =
+                static_cast<const SIRegisterInfo*>(TM->getRegisterInfo());
+
+  switch(MO.getType()) {
+  case MachineOperand::MO_Register:
+    return RI->getEncodingValue(MO.getReg());
+
+  case MachineOperand::MO_Immediate:
+    return MO.getImm();
+
+  case MachineOperand::MO_FPImmediate:
+    // XXX: Not all instructions can use inline literals
+    // XXX: We should make sure this is a 32-bit constant
+    return LITERAL_REG | (MO.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue() << 32);
+  default:
+    llvm_unreachable("Encoding of this operand type is not supported yet.");
+    break;
+  }
+}
+
+unsigned SICodeEmitter::GPRAlign(const MachineInstr &MI, unsigned OpNo,
+    unsigned shift) const
+{
+  const SIRegisterInfo * RI =
+                static_cast<const SIRegisterInfo*>(TM->getRegisterInfo());
+  unsigned regCode = RI->getEncodingValue(MI.getOperand(OpNo).getReg());
+  return regCode >> shift;
+}
+
+unsigned SICodeEmitter::GPR4AlignEncode(const MachineInstr &MI,
+    unsigned OpNo) const
+{
+  return GPRAlign(MI, OpNo, 2);
+}
+
+unsigned SICodeEmitter::GPR2AlignEncode(const MachineInstr &MI,
+    unsigned OpNo) const
+{
+  return GPRAlign(MI, OpNo, 1);
+}
+
+uint64_t SICodeEmitter::i32LiteralEncode(const MachineInstr &MI,
+    unsigned OpNo) const
+{
+  return LITERAL_REG | (MI.getOperand(OpNo).getImm() << 32);
+}
+
+#define SMRD_OFFSET_MASK 0xff
+#define SMRD_IMM_SHIFT 8
+#define SMRD_SBASE_MASK 0x3f
+#define SMRD_SBASE_SHIFT 9
+/// SMRDmemriEncode - This function is responsibe for encoding the offset
+/// and the base ptr for SMRD instructions it should return a bit string in
+/// this format:
+///
+/// OFFSET = bits{7-0}
+/// IMM    = bits{8}
+/// SBASE  = bits{14-9}
+///
+uint32_t SICodeEmitter::SMRDmemriEncode(const MachineInstr &MI,
+    unsigned OpNo) const
+{
+  uint32_t encoding;
+
+  const MachineOperand &OffsetOp = MI.getOperand(OpNo + 1);
+
+  //XXX: Use this function for SMRD loads with register offsets
+  assert(OffsetOp.isImm());
+
+  encoding =
+      (getMachineOpValue(MI, OffsetOp) & SMRD_OFFSET_MASK)
+    | (1 << SMRD_IMM_SHIFT) //XXX If the Offset is a register we shouldn't set this bit
+    | ((GPR2AlignEncode(MI, OpNo) & SMRD_SBASE_MASK) << SMRD_SBASE_SHIFT)
+    ;
+
+  return encoding;
+}
+
+/// Set the "VGPR" bit for VOP args that can take either a VGPR or a SGPR.
+/// XXX: It would be nice if we could handle this without a PostEncode function.
+uint64_t SICodeEmitter::VOPPostEncode(const MachineInstr &MI,
+    uint64_t Value) const
+{
+  const SIInstrInfo * SII = static_cast<const SIInstrInfo*>(TM->getInstrInfo());
+  unsigned encodingType = SII->getEncodingType(MI);
+  unsigned numSrcOps;
+  unsigned vgprBitOffset;
+
+  if (encodingType == SIInstrEncodingType::VOP3) {
+    numSrcOps = 3;
+    vgprBitOffset = 32;
+  } else {
+    numSrcOps = 1;
+    vgprBitOffset = 0;
+  }
+
+  // Add one to skip over the destination reg operand.
+  for (unsigned opIdx = 1; opIdx < numSrcOps + 1; opIdx++) {
+    if (!MI.getOperand(opIdx).isReg()) {
+      continue;
+    }
+    unsigned reg = MI.getOperand(opIdx).getReg();
+    if (AMDIL::VReg_32RegClass.contains(reg)
+        || AMDIL::VReg_64RegClass.contains(reg)) {
+      Value |= (VGPR_BIT(opIdx)) << vgprBitOffset;
+    }
+  }
+  return Value;
+}
+
+
+void SICodeEmitter::outputBytes(uint64_t value, unsigned bytes)
+{
+  for (unsigned i = 0; i < bytes; i++) {
+    _OS.write((uint8_t) ((value >> (8 * i)) & 0xff));
+  }
+}
diff --git a/lib/Target/AMDGPU/SIGenRegisterInfo.pl b/lib/Target/AMDGPU/SIGenRegisterInfo.pl
new file mode 100644
index 0000000..d1a1c3b
--- /dev/null
+++ b/lib/Target/AMDGPU/SIGenRegisterInfo.pl
@@ -0,0 +1,270 @@
+#===-- SIGenRegisterInfo.pl - Script for generating register info files ----===#
+#
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+#===------------------------------------------------------------------------===#
+#
+# This perl script prints to stdout .td code to be used as SIRegisterInfo.td
+# it also generates a file called SIHwRegInfo.include, which contains helper
+# functions for determining the hw encoding of registers.
+#
+#===------------------------------------------------------------------------===#
+
+use strict;
+use warnings;
+
+my $SGPR_COUNT = 104;
+my $VGPR_COUNT = 256;
+
+my $SGPR_MAX_IDX = $SGPR_COUNT - 1;
+my $VGPR_MAX_IDX = $VGPR_COUNT - 1;
+
+my $INDEX_FILE = defined($ARGV[0]) ? $ARGV[0] : '';
+
+print <<STRING;
+
+let Namespace = "AMDIL" in {
+  def low : SubRegIndex;
+  def high : SubRegIndex;
+
+  def sub0 : SubRegIndex;
+  def sub1 : SubRegIndex;
+  def sub2 : SubRegIndex;
+  def sub3 : SubRegIndex;
+  def sub4 : SubRegIndex;
+  def sub5 : SubRegIndex;
+  def sub6 : SubRegIndex;
+  def sub7 : SubRegIndex;
+}
+
+class SIReg <string n, bits<16> encoding = 0> : Register<n> {
+  let Namespace = "AMDIL";
+  let HWEncoding = encoding;
+}
+
+class SI_64 <string n, list<Register> subregs, bits<16> encoding> : RegisterWithSubRegs<n, subregs> {
+  let Namespace = "AMDIL";
+  let SubRegIndices = [low, high];
+  let HWEncoding = encoding;
+}
+
+class SI_128 <string n, list<Register> subregs, bits<16> encoding> : RegisterWithSubRegs<n, subregs> {
+  let Namespace = "AMDIL";
+  let SubRegIndices = [sel_x, sel_y, sel_z, sel_w];
+  let HWEncoding = encoding;
+}
+
+class SI_256 <string n, list<Register> subregs, bits<16> encoding> : RegisterWithSubRegs<n, subregs> {
+  let Namespace = "AMDIL";
+  let SubRegIndices = [sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7];
+  let HWEncoding = encoding;
+}
+
+class SGPR_32 <bits<16> num, string name> : SIReg<name, num>;
+
+class VGPR_32 <bits<16> num, string name> : SIReg<name, num>;
+
+class SGPR_64 <bits<16> num, string name, list<Register> subregs> :
+    SI_64 <name, subregs, num>;
+
+class VGPR_64 <bits<16> num, string name, list<Register> subregs> :
+    SI_64 <name, subregs, num>;
+
+class SGPR_128 <bits<16> num, string name, list<Register> subregs> :
+    SI_128 <name, subregs, num>;
+
+class VGPR_128 <bits<16> num, string name, list<Register> subregs> :
+    SI_128 <name, subregs, num>;
+
+class SGPR_256 <bits<16> num, string name, list<Register> subregs> :
+    SI_256 <name, subregs, num>;
+
+def VCC : SIReg<"VCC">;
+def SCC : SIReg<"SCC">;
+def SREG_LIT_0 : SIReg <"S LIT 0", 128>;
+
+def M0 : SIReg <"M0", 124>;
+
+//Interpolation registers
+
+def PERSP_SAMPLE_I : SIReg <"PERSP_SAMPLE_I">;
+def PERSP_SAMPLE_J : SIReg <"PERSP_SAMPLE_J">;
+def PERSP_CENTER_I : SIReg <"PERSP_CENTER_I">;
+def PERSP_CENTER_J : SIReg <"PERSP_CENTER_J">;
+def PERSP_CENTROID_I : SIReg <"PERSP_CENTROID_I">;
+def PERSP_CENTROID_J : SIReg <"PERP_CENTROID_J">;
+def PERSP_I_W : SIReg <"PERSP_I_W">;
+def PERSP_J_W : SIReg <"PERSP_J_W">;
+def PERSP_1_W : SIReg <"PERSP_1_W">;
+def LINEAR_SAMPLE_I : SIReg <"LINEAR_SAMPLE_I">;
+def LINEAR_SAMPLE_J : SIReg <"LINEAR_SAMPLE_J">;
+def LINEAR_CENTER_I : SIReg <"LINEAR_CENTER_I">;
+def LINEAR_CENTER_J : SIReg <"LINEAR_CENTER_J">;
+def LINEAR_CENTROID_I : SIReg <"LINEAR_CENTROID_I">;
+def LINEAR_CENTROID_J : SIReg <"LINEAR_CENTROID_J">;
+def LINE_STIPPLE_TEX_COORD : SIReg <"LINE_STIPPLE_TEX_COORD">;
+def POS_X_FLOAT : SIReg <"POS_X_FLOAT">;
+def POS_Y_FLOAT : SIReg <"POS_Y_FLOAT">;
+def POS_Z_FLOAT : SIReg <"POS_Z_FLOAT">;
+def POS_W_FLOAT : SIReg <"POS_W_FLOAT">;
+def FRONT_FACE : SIReg <"FRONT_FACE">;
+def ANCILLARY : SIReg <"ANCILLARY">;
+def SAMPLE_COVERAGE : SIReg <"SAMPLE_COVERAGE">;
+def POS_FIXED_PT : SIReg <"POS_FIXED_PT">;
+
+STRING
+
+#32 bit register
+
+my @SGPR;
+for (my $i = 0; $i < $SGPR_COUNT; $i++) {
+  print "def SGPR$i : SGPR_32 <$i, \"SGPR$i\">;\n";
+  $SGPR[$i] = "SGPR$i";
+}
+
+my @VGPR;
+for (my $i = 0; $i < $VGPR_COUNT; $i++) {
+  print "def VGPR$i : VGPR_32 <$i, \"VGPR$i\">;\n";
+  $VGPR[$i] = "VGPR$i";
+}
+
+print <<STRING;
+
+def SReg_32 : RegisterClass<"AMDIL", [f32, i32], 32,
+    (add (sequence "SGPR%u", 0, $SGPR_MAX_IDX),  SREG_LIT_0, M0)
+>;
+
+def VReg_32 : RegisterClass<"AMDIL", [f32, i32], 32,
+    (add (sequence "VGPR%u", 0, $VGPR_MAX_IDX),
+    PERSP_SAMPLE_I, PERSP_SAMPLE_J,
+    PERSP_CENTER_I, PERSP_CENTER_J,
+    PERSP_CENTROID_I, PERSP_CENTROID_J,
+    PERSP_I_W, PERSP_J_W, PERSP_1_W,
+    LINEAR_SAMPLE_I, LINEAR_SAMPLE_J,
+    LINEAR_CENTER_I, LINEAR_CENTER_J,
+    LINEAR_CENTROID_I, LINEAR_CENTROID_J,
+    LINE_STIPPLE_TEX_COORD,
+    POS_X_FLOAT,
+    POS_Y_FLOAT,
+    POS_Z_FLOAT,
+    POS_W_FLOAT,
+    FRONT_FACE,
+    ANCILLARY,
+    SAMPLE_COVERAGE,
+    POS_FIXED_PT
+    )
+>;
+
+def AllReg_32 : RegisterClass<"AMDIL", [f32, i32], 32,
+    (add VReg_32, SReg_32) 
+>;
+
+def CCReg : RegisterClass<"AMDIL", [f32], 32, (add VCC, SCC)>;
+
+STRING
+
+my @subregs_64 = ('low', 'high');
+my @subregs_128 = ('sel_x', 'sel_y', 'sel_z', 'sel_w');
+my @subregs_256 = ('sub0', 'sub1', 'sub2', 'sub3', 'sub4', 'sub5', 'sub6', 'sub7');
+
+my @SGPR64 = print_sgpr_class(64, \@subregs_64, ('i64'));
+my @SGPR128 = print_sgpr_class(128, \@subregs_128, ('v4f32', 'v4i32'));
+my @SGPR256 = print_sgpr_class(256, \@subregs_256, ('v8i32'));
+
+my @VGPR64 = print_vgpr_class(64, \@subregs_64, ('i64'));
+my @VGPR128 = print_vgpr_class(128, \@subregs_128, ('v4f32'));
+
+
+my $sgpr64_list = join(',', @SGPR64);
+my $vgpr64_list = join(',', @VGPR64);
+print <<STRING;
+
+def AllReg_64 : RegisterClass<"AMDIL", [f64, i64], 64,
+    (add $sgpr64_list, $vgpr64_list)
+>;
+
+STRING
+
+if ($INDEX_FILE ne '') {
+  open(my $fh, ">", $INDEX_FILE);
+  my %hw_values;
+
+  for (my $i = 0; $i <= $#SGPR; $i++) {
+    push (@{$hw_values{$i}}, $SGPR[$i]);
+  }
+
+  for (my $i = 0; $i <= $#SGPR64; $i++) {
+    push (@{$hw_values{$i * 2}}, $SGPR64[$i])
+  }
+
+  for (my $i = 0; $i <= $#SGPR128; $i++) {
+    push (@{$hw_values{$i * 4}}, $SGPR128[$i]);
+  }
+
+  for (my $i = 0; $i <= $#SGPR256; $i++) {
+    push (@{$hw_values{$i * 8}}, $SGPR256[$i]);
+  }
+
+  for (my $i = 0; $i <= $#VGPR; $i++) {
+    push (@{$hw_values{$i}}, $VGPR[$i]);
+  }
+  for (my $i = 0; $i <= $#VGPR64; $i++) {
+    push (@{$hw_values{$i * 2}}, $VGPR64[$i]);
+  }
+
+  for (my $i = 0; $i <= $#VGPR128; $i++) {
+    push (@{$hw_values{$i * 4}}, $VGPR128[$i]);
+  }
+
+
+  print $fh "unsigned SIRegisterInfo::getHWRegNum(unsigned reg) const\n{\n  switch(reg) {\n";
+  for my $key (keys(%hw_values)) {
+    my @names = @{$hw_values{$key}};
+    for my $regname (@names) {
+      print $fh "  case AMDIL::$regname:\n"
+    }
+    print $fh "    return $key;\n";
+  }
+  print $fh "  default: return 0;\n  }\n}\n"
+}
+
+
+
+
+sub print_sgpr_class {
+  my ($reg_width, $sub_reg_ref, @types) = @_;
+  return print_reg_class('SReg', 'SGPR', $reg_width, $SGPR_COUNT, $sub_reg_ref, @types);
+}
+
+sub print_vgpr_class {
+  my ($reg_width, $sub_reg_ref, @types) = @_;
+  return print_reg_class('VReg', 'VGPR', $reg_width, $VGPR_COUNT, $sub_reg_ref, @types);
+}
+
+sub print_reg_class {
+  my ($class_prefix, $reg_prefix, $reg_width, $reg_count, $sub_reg_ref, @types) = @_;
+  my @registers;
+  my $component_count = $reg_width / 32;
+
+  for (my $i = 0; $i < $reg_count; $i += $component_count) {
+    my $reg_name = $reg_prefix . $i . '_' . $reg_width;
+    my @sub_regs;
+    for (my $idx = 0; $idx < $component_count; $idx++) {
+      my $sub_idx = $i + $idx;
+      push(@sub_regs, $reg_prefix . $sub_idx);
+    }
+    print "def $reg_name : $reg_prefix\_$reg_width <$i, \"$reg_name\", [ ", join(',', @sub_regs) , "]>;\n";
+    if ($i % 10 == 0) {
+      $reg_name .= "\n";
+    }
+    push (@registers, $reg_name);
+  }
+  my $reg_list = join(', ', @registers);
+
+  print "def $class_prefix\_$reg_width : RegisterClass<\"AMDIL\", [" . join (', ', @types) . "], $reg_width,\n  (add $reg_list)\n>{\n";
+  print "}\n";
+  return @registers;
+}
diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp
new file mode 100644
index 0000000..5b1959d
--- /dev/null
+++ b/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -0,0 +1,195 @@
+//===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Most of the DAG lowering is handled in AMDGPUISelLowering.cpp.  This file is
+// mostly EmitInstrWithCustomInserter().
+//
+//===----------------------------------------------------------------------===//
+
+#include "SIISelLowering.h"
+#include "SIInstrInfo.h"
+#include "SIRegisterInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+SITargetLowering::SITargetLowering(TargetMachine &TM) :
+    AMDGPUTargetLowering(TM),
+    TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo()))
+{
+  addRegisterClass(MVT::v4f32, &AMDIL::VReg_128RegClass);
+  addRegisterClass(MVT::f32, &AMDIL::VReg_32RegClass);
+  addRegisterClass(MVT::i32, &AMDIL::VReg_32RegClass);
+  addRegisterClass(MVT::i64, &AMDIL::VReg_64RegClass);
+
+  addRegisterClass(MVT::v4i32, &AMDIL::SReg_128RegClass);
+  addRegisterClass(MVT::v8i32, &AMDIL::SReg_256RegClass);
+
+  computeRegisterProperties();
+
+  setOperationAction(ISD::ADD, MVT::i64, Legal);
+  setOperationAction(ISD::ADD, MVT::i32, Legal);
+
+}
+
+MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
+    MachineInstr * MI, MachineBasicBlock * BB) const
+{
+  const TargetInstrInfo * TII = getTargetMachine().getInstrInfo();
+  MachineRegisterInfo & MRI = BB->getParent()->getRegInfo();
+  MachineBasicBlock::iterator I = MI;
+
+  if (TII->get(MI->getOpcode()).TSFlags & SIInstrFlags::NEED_WAIT) {
+    AppendS_WAITCNT(MI, *BB, llvm::next(I));
+    return BB;
+  }
+
+  switch (MI->getOpcode()) {
+  default:
+    return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
+
+  case AMDIL::CLAMP_SI:
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::V_MOV_B32_e64))
+           .addOperand(MI->getOperand(0))
+           .addOperand(MI->getOperand(1))
+           // VSRC1-2 are unused, but we still need to fill all the
+           // operand slots, so we just reuse the VSRC0 operand
+           .addOperand(MI->getOperand(1))
+           .addOperand(MI->getOperand(1))
+           .addImm(0) // ABS
+           .addImm(1) // CLAMP
+           .addImm(0) // OMOD
+           .addImm(0); // NEG
+    MI->eraseFromParent();
+    break;
+
+  case AMDIL::FABS_SI:
+    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDIL::V_MOV_B32_e64))
+                 .addOperand(MI->getOperand(0))
+                 .addOperand(MI->getOperand(1))
+                 // VSRC1-2 are unused, but we still need to fill all the
+                 // operand slots, so we just reuse the VSRC0 operand
+                 .addOperand(MI->getOperand(1))
+                 .addOperand(MI->getOperand(1))
+                 .addImm(1) // ABS
+                 .addImm(0) // CLAMP
+                 .addImm(0) // OMOD
+                 .addImm(0); // NEG
+    MI->eraseFromParent();
+    break;
+
+  case AMDIL::SI_INTERP:
+    LowerSI_INTERP(MI, *BB, I, MRI);
+    break;
+  case AMDIL::SI_INTERP_CONST:
+    LowerSI_INTERP_CONST(MI, *BB, I);
+    break;
+  case AMDIL::SI_V_CNDLT:
+    LowerSI_V_CNDLT(MI, *BB, I, MRI);
+    break;
+  case AMDIL::USE_SGPR_32:
+  case AMDIL::USE_SGPR_64:
+    lowerUSE_SGPR(MI, BB->getParent(), MRI);
+    MI->eraseFromParent();
+    break;
+  case AMDIL::VS_LOAD_BUFFER_INDEX:
+    addLiveIn(MI, BB->getParent(), MRI, TII, AMDIL::VGPR0);
+    MI->eraseFromParent();
+    break;
+  }
+  return BB;
+}
+
+void SITargetLowering::AppendS_WAITCNT(MachineInstr *MI, MachineBasicBlock &BB,
+    MachineBasicBlock::iterator I) const
+{
+  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::S_WAITCNT))
+          .addImm(0);
+}
+
+void SITargetLowering::LowerSI_INTERP(MachineInstr *MI, MachineBasicBlock &BB,
+    MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const
+{
+  unsigned tmp = MRI.createVirtualRegister(&AMDIL::VReg_32RegClass);
+  MachineOperand dst = MI->getOperand(0);
+  MachineOperand iReg = MI->getOperand(1);
+  MachineOperand jReg = MI->getOperand(2);
+  MachineOperand attr_chan = MI->getOperand(3);
+  MachineOperand attr = MI->getOperand(4);
+  MachineOperand params = MI->getOperand(5);
+
+  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::S_MOV_B32))
+          .addReg(AMDIL::M0)
+          .addOperand(params);
+
+  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_INTERP_P1_F32), tmp)
+          .addOperand(iReg)
+          .addOperand(attr_chan)
+          .addOperand(attr);
+
+  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_INTERP_P2_F32))
+          .addOperand(dst)
+          .addReg(tmp)
+          .addOperand(jReg)
+          .addOperand(attr_chan)
+          .addOperand(attr);
+
+  MI->eraseFromParent();
+}
+
+void SITargetLowering::LowerSI_INTERP_CONST(MachineInstr *MI,
+    MachineBasicBlock &BB, MachineBasicBlock::iterator I) const
+{
+  MachineOperand dst = MI->getOperand(0);
+  MachineOperand attr_chan = MI->getOperand(1);
+  MachineOperand attr = MI->getOperand(2);
+  MachineOperand params = MI->getOperand(3);
+
+  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::S_MOV_B32))
+          .addReg(AMDIL::M0)
+          .addOperand(params);
+
+  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_INTERP_MOV_F32))
+          .addOperand(dst)
+          .addOperand(attr_chan)
+          .addOperand(attr);
+
+  MI->eraseFromParent();
+}
+
+void SITargetLowering::LowerSI_V_CNDLT(MachineInstr *MI, MachineBasicBlock &BB,
+    MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const
+{
+  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_CMP_LT_F32_e32))
+          .addOperand(MI->getOperand(1))
+          .addReg(AMDIL::SREG_LIT_0);
+
+  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDIL::V_CNDMASK_B32))
+          .addOperand(MI->getOperand(0))
+          .addOperand(MI->getOperand(2))
+          .addOperand(MI->getOperand(3));
+
+  MI->eraseFromParent();
+}
+
+void SITargetLowering::lowerUSE_SGPR(MachineInstr *MI,
+    MachineFunction * MF, MachineRegisterInfo & MRI) const
+{
+  const TargetInstrInfo * TII = getTargetMachine().getInstrInfo();
+  unsigned dstReg = MI->getOperand(0).getReg();
+  int64_t newIndex = MI->getOperand(1).getImm();
+  const TargetRegisterClass * dstClass = MRI.getRegClass(dstReg);
+  unsigned DwordWidth = dstClass->getSize() / 4;
+  assert(newIndex % DwordWidth == 0 && "USER_SGPR not properly aligned");
+  newIndex = newIndex / DwordWidth;
+
+  unsigned newReg = dstClass->getRegister(newIndex);
+  addLiveIn(MI, MF, MRI, TII, newReg); 
+}
+
diff --git a/lib/Target/AMDGPU/SIISelLowering.h b/lib/Target/AMDGPU/SIISelLowering.h
new file mode 100644
index 0000000..4a1bc38
--- /dev/null
+++ b/lib/Target/AMDGPU/SIISelLowering.h
@@ -0,0 +1,48 @@
+//===-- SIISelLowering.h - SI DAG Lowering Interface ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// SI DAG Lowering interface definition
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SIISELLOWERING_H
+#define SIISELLOWERING_H
+
+#include "AMDGPUISelLowering.h"
+#include "SIInstrInfo.h"
+
+namespace llvm {
+
+class SITargetLowering : public AMDGPUTargetLowering
+{
+  const SIInstrInfo * TII;
+
+  /// AppendS_WAITCNT - Memory reads and writes are syncronized using the
+  /// S_WAITCNT instruction.  This function takes the most conservative
+  /// approach and inserts an S_WAITCNT instruction after every read and
+  /// write.
+  void AppendS_WAITCNT(MachineInstr *MI, MachineBasicBlock &BB,
+              MachineBasicBlock::iterator I) const;
+  void LowerSI_INTERP(MachineInstr *MI, MachineBasicBlock &BB,
+              MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const;
+  void LowerSI_INTERP_CONST(MachineInstr *MI, MachineBasicBlock &BB,
+              MachineBasicBlock::iterator I) const;
+  void LowerSI_V_CNDLT(MachineInstr *MI, MachineBasicBlock &BB,
+              MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const;
+  void lowerUSE_SGPR(MachineInstr *MI, MachineFunction * MF,
+                     MachineRegisterInfo & MRI) const;
+public:
+  SITargetLowering(TargetMachine &tm);
+  virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr * MI,
+                                              MachineBasicBlock * BB) const;
+};
+
+} // End namespace llvm
+
+#endif //SIISELLOWERING_H
diff --git a/lib/Target/AMDGPU/SIInstrFormats.td b/lib/Target/AMDGPU/SIInstrFormats.td
new file mode 100644
index 0000000..ac8465c
--- /dev/null
+++ b/lib/Target/AMDGPU/SIInstrFormats.td
@@ -0,0 +1,128 @@
+//===-- SIInstrFormats.td - SI Instruction Formats ------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// SI Instruction format definitions.
+//
+// Instructions with _32 take 32-bit operands.
+// Instructions with _64 take 64-bit operands.
+//
+// VOP_* instructions can use either a 32-bit or 64-bit encoding.  The 32-bit
+// encoding is the standard encoding, but instruction that make use of
+// any of the instruction modifiers must use the 64-bit encoding.
+//
+// Instructions with _e32 use the 32-bit encoding.
+// Instructions with _e64 use the 64-bit encoding.
+//
+//===----------------------------------------------------------------------===//
+
+
+class VOP3_32 <bits<9> op, string opName, list<dag> pattern>
+  : VOP3 <op, (outs VReg_32:$dst), (ins AllReg_32:$src0, AllReg_32:$src1, AllReg_32:$src2, i32imm:$src3, i32imm:$src4, i32imm:$src5, i32imm:$src6), opName, pattern>;
+
+class VOP3_64 <bits<9> op, string opName, list<dag> pattern>
+  : VOP3 <op, (outs VReg_64:$dst), (ins AllReg_64:$src0, AllReg_64:$src1, AllReg_64:$src2, i32imm:$src3, i32imm:$src4, i32imm:$src5, i32imm:$src6), opName, pattern>;
+
+
+class SOP1_32 <bits<8> op, string opName, list<dag> pattern>
+  : SOP1 <op, (outs SReg_32:$dst), (ins SReg_32:$src0), opName, pattern>;
+
+class SOP1_64 <bits<8> op, string opName, list<dag> pattern>
+  : SOP1 <op, (outs SReg_64:$dst), (ins SReg_64:$src0), opName, pattern>;
+
+class SOP2_32 <bits<7> op, string opName, list<dag> pattern>
+  : SOP2 <op, (outs SReg_32:$dst), (ins SReg_32:$src0, SReg_32:$src1), opName, pattern>;
+
+class SOP2_64 <bits<7> op, string opName, list<dag> pattern>
+  : SOP2 <op, (outs SReg_64:$dst), (ins SReg_64:$src0, SReg_64:$src1), opName, pattern>;
+
+class VOP1_Helper <bits<8> op, RegisterClass vrc, RegisterClass arc,
+                   string opName, list<dag> pattern> : 
+  VOP1 <
+    op, (outs vrc:$dst), (ins arc:$src0), opName, pattern
+  >;
+
+multiclass VOP1_32 <bits<8> op, string opName, list<dag> pattern> {
+  def _e32: VOP1_Helper <op, VReg_32, AllReg_32, opName, pattern>;
+  def _e64 : VOP3_32 <{1, 1, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
+                      opName, []
+  >;
+}
+
+multiclass VOP1_64 <bits<8> op, string opName, list<dag> pattern> {
+
+  def _e32 : VOP1_Helper <op, VReg_64, AllReg_64, opName, pattern>;
+
+  def _e64 : VOP3_64 <
+    {1, 1, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
+    opName, []
+  >;
+}
+
+class VOP2_Helper <bits<6> op, RegisterClass vrc, RegisterClass arc,
+                   string opName, list<dag> pattern> :
+  VOP2 <
+    op, (outs vrc:$dst), (ins arc:$src0, vrc:$src1), opName, pattern
+  >;
+
+multiclass VOP2_32 <bits<6> op, string opName, list<dag> pattern> {
+
+  def _e32 : VOP2_Helper <op, VReg_32, AllReg_32, opName, pattern>;
+
+  def _e64 : VOP3_32 <{1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
+                      opName, []
+  >;
+}
+
+multiclass VOP2_64 <bits<6> op, string opName, list<dag> pattern> {
+  def _e32: VOP2_Helper <op, VReg_64, AllReg_64, opName, pattern>;
+
+  def _e64 : VOP3_64 <
+    {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
+    opName, []
+  >;
+}
+
+class SOPK_32 <bits<5> op, string opName, list<dag> pattern>
+  : SOPK <op, (outs SReg_32:$dst), (ins i16imm:$src0), opName, pattern>;
+
+class SOPK_64 <bits<5> op, string opName, list<dag> pattern>
+  : SOPK <op, (outs SReg_64:$dst), (ins i16imm:$src0), opName, pattern>;
+
+class VOPC_Helper <bits<8> op, RegisterClass vrc, RegisterClass arc,
+                 string opName, list<dag> pattern> :
+  VOPC <
+    op, (outs), (ins arc:$src0, vrc:$src1), opName, pattern
+  >;
+
+multiclass VOPC_32 <bits<8> op, string opName, list<dag> pattern> {
+
+  def _e32 : VOPC_Helper <op, VReg_32, AllReg_32, opName, pattern>;
+
+  def _e64 : VOP3_32 <
+    {0, op{7}, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
+    opName, []
+  >;
+}
+
+multiclass VOPC_64 <bits<8> op, string opName, list<dag> pattern> {
+
+  def _e32 : VOPC_Helper <op, VReg_64, AllReg_64, opName, pattern>;
+
+  def _e64 : VOP3_64 <
+    {0, op{7}, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}},
+    opName, []
+  >;
+}
+
+class SOPC_32 <bits<7> op, string opName, list<dag> pattern>
+  : SOPC <op, (outs CCReg:$dst), (ins SReg_32:$src0, SReg_32:$src1), opName, pattern>;
+
+class SOPC_64 <bits<7> op, string opName, list<dag> pattern>
+  : SOPC <op, (outs CCReg:$dst), (ins SReg_64:$src0, SReg_64:$src1), opName, pattern>;
+
diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp
new file mode 100644
index 0000000..1d464fe
--- /dev/null
+++ b/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -0,0 +1,104 @@
+//===-- SIInstrInfo.cpp - SI Instruction Information  ---------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// SI Implementation of TargetInstrInfo.
+//
+//===----------------------------------------------------------------------===//
+
+
+#include "SIInstrInfo.h"
+#include "AMDGPUTargetMachine.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/MC/MCInstrDesc.h"
+
+#include <stdio.h>
+
+using namespace llvm;
+
+SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
+  : AMDGPUInstrInfo(tm),
+    RI(tm, *this),
+    TM(tm)
+    { }
+
+const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const
+{
+  return RI;
+}
+
+void
+SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+                           MachineBasicBlock::iterator MI, DebugLoc DL,
+                           unsigned DestReg, unsigned SrcReg,
+                           bool KillSrc) const
+{
+  BuildMI(MBB, MI, DL, get(AMDIL::V_MOV_B32_e32), DestReg)
+   .addReg(SrcReg, getKillRegState(KillSrc));
+}
+
+unsigned SIInstrInfo::getEncodingType(const MachineInstr &MI) const
+{
+  return get(MI.getOpcode()).TSFlags & SI_INSTR_FLAGS_ENCODING_MASK;
+}
+
+unsigned SIInstrInfo::getEncodingBytes(const MachineInstr &MI) const
+{
+
+  /* Instructions with literal constants are expanded to 64-bits, and
+   * the constant is stored in bits [63:32] */
+  for (unsigned i = 0; i < MI.getNumOperands(); i++) {
+    if (MI.getOperand(i).getType() == MachineOperand::MO_FPImmediate) {
+      return 8;
+    }
+  }
+
+  /* This instruction always has a literal */
+  if (MI.getOpcode() == AMDIL::S_MOV_IMM_I32) {
+    return 8;
+  }
+
+  unsigned encoding_type = getEncodingType(MI);
+  switch (encoding_type) {
+    case SIInstrEncodingType::EXP:
+    case SIInstrEncodingType::LDS:
+    case SIInstrEncodingType::MUBUF:
+    case SIInstrEncodingType::MTBUF:
+    case SIInstrEncodingType::MIMG:
+    case SIInstrEncodingType::VOP3:
+      return 8;
+    default:
+      return 4;
+  }
+}
+
+MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
+                                           int64_t Imm) const
+{
+  MachineInstr * MI = MF->CreateMachineInstr(get(AMDIL::V_MOV_IMM_I32), DebugLoc());
+  MachineInstrBuilder(MI).addReg(DstReg, RegState::Define);
+  MachineInstrBuilder(MI).addImm(Imm);
+
+  return MI;
+
+}
+
+bool SIInstrInfo::isMov(unsigned Opcode) const
+{
+  switch(Opcode) {
+  default: return false;
+  case AMDIL::S_MOV_B32:
+  case AMDIL::S_MOV_B64:
+  case AMDIL::V_MOV_B32_e32:
+  case AMDIL::V_MOV_B32_e64:
+  case AMDIL::V_MOV_IMM_F32:
+  case AMDIL::V_MOV_IMM_I32:
+  case AMDIL::S_MOV_IMM_I32:
+    return true;
+  }
+}
diff --git a/lib/Target/AMDGPU/SIInstrInfo.h b/lib/Target/AMDGPU/SIInstrInfo.h
new file mode 100644
index 0000000..aa567b6
--- /dev/null
+++ b/lib/Target/AMDGPU/SIInstrInfo.h
@@ -0,0 +1,90 @@
+//===-- SIInstrInfo.h - SI Instruction Info Interface ---------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface definition for SIInstrInfo.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef SIINSTRINFO_H
+#define SIINSTRINFO_H
+
+#include "AMDGPUInstrInfo.h"
+#include "SIRegisterInfo.h"
+
+namespace llvm {
+
+class SIInstrInfo : public AMDGPUInstrInfo {
+private:
+  const SIRegisterInfo RI;
+  AMDGPUTargetMachine &TM;
+
+public:
+  explicit SIInstrInfo(AMDGPUTargetMachine &tm);
+
+  const SIRegisterInfo &getRegisterInfo() const;
+
+  virtual void copyPhysReg(MachineBasicBlock &MBB,
+                           MachineBasicBlock::iterator MI, DebugLoc DL,
+                           unsigned DestReg, unsigned SrcReg,
+                           bool KillSrc) const;
+
+  /// getEncodingType - Returns the encoding type of this instruction.  
+  unsigned getEncodingType(const MachineInstr &MI) const;
+
+  /// getEncodingBytes - Returns the size of this instructions encoding in
+  /// number of bytes.
+  unsigned getEncodingBytes(const MachineInstr &MI) const;
+
+  virtual MachineInstr * getMovImmInstr(MachineFunction *MF, unsigned DstReg,
+                                        int64_t Imm) const;
+
+  virtual unsigned getIEQOpcode() const { assert(!"Implement"); return 0;}
+  virtual bool isMov(unsigned Opcode) const;
+
+  };
+
+} // End namespace llvm
+
+// These must be kept in sync with SIInstructions.td and also the
+// InstrEncodingInfo array in SIInstrInfo.cpp.
+//
+// NOTE: This enum is only used to identify the encoding type within LLVM,
+// the actual encoding type that is part of the instruction format is different
+namespace SIInstrEncodingType {
+  enum Encoding {
+    EXP = 0,
+    LDS = 1,
+    MIMG = 2,
+    MTBUF = 3,
+    MUBUF = 4,
+    SMRD = 5,
+    SOP1 = 6,
+    SOP2 = 7,
+    SOPC = 8,
+    SOPK = 9,
+    SOPP = 10,
+    VINTRP = 11,
+    VOP1 = 12,
+    VOP2 = 13,
+    VOP3 = 14,
+    VOPC = 15
+  };
+}
+
+#define SI_INSTR_FLAGS_ENCODING_MASK 0xf
+
+namespace SIInstrFlags {
+  enum Flags {
+    // First 4 bits are the instruction encoding
+    NEED_WAIT = 1 << 4
+  };
+}
+
+#endif //SIINSTRINFO_H
diff --git a/lib/Target/AMDGPU/SIInstrInfo.td b/lib/Target/AMDGPU/SIInstrInfo.td
new file mode 100644
index 0000000..30c9c33
--- /dev/null
+++ b/lib/Target/AMDGPU/SIInstrInfo.td
@@ -0,0 +1,477 @@
+//===-- SIInstrInfo.td - SI Instruction Encodings ---------*- tablegen -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+
+
+class InstSI <dag outs, dag ins, string asm, list<dag> pattern> :
+    AMDGPUInst<outs, ins, asm, pattern> {
+
+  field bits<4> EncodingType = 0;
+  field bits<1> NeedWait = 0;
+
+  let TSFlags{3-0} = EncodingType;
+  let TSFlags{4} = NeedWait;
+
+}
+
+class Enc32 <dag outs, dag ins, string asm, list<dag> pattern> :
+    InstSI <outs, ins, asm, pattern> {
+
+  field bits<32> Inst;
+}
+
+class Enc64 <dag outs, dag ins, string asm, list<dag> pattern> :
+    InstSI <outs, ins, asm, pattern> {
+
+  field bits<64> Inst;
+}
+
+class SIOperand <ValueType vt, dag opInfo>: Operand <vt> {
+  let EncoderMethod = "encodeOperand";
+  let MIOperandInfo = opInfo;
+}
+
+def IMM8bit : ImmLeaf <
+  i32,
+  [{return (int32_t)Imm >= 0 && (int32_t)Imm <= 0xff;}]
+>;
+
+def IMM12bit : ImmLeaf <
+  i16,
+  [{return (int16_t)Imm >= 0 && (int16_t)Imm <= 0xfff;}]
+>;
+
+class GPR4Align <RegisterClass rc> : Operand <vAny> {
+  let EncoderMethod = "GPR4AlignEncode";
+  let MIOperandInfo = (ops rc:$reg); 
+}
+
+class GPR2Align <RegisterClass rc, ValueType vt> : Operand <vt> {
+  let EncoderMethod = "GPR2AlignEncode";
+  let MIOperandInfo = (ops rc:$reg);
+}
+
+def i32Literal : Operand <i32> {
+  let EncoderMethod = "i32LiteralEncode";
+}
+
+def SMRDmemrr : Operand<iPTR> {
+  let MIOperandInfo = (ops SReg_64, SReg_32);
+  let EncoderMethod = "GPR2AlignEncode";
+}
+
+def SMRDmemri : Operand<iPTR> {
+  let MIOperandInfo = (ops SReg_64, i32imm);
+  let EncoderMethod = "SMRDmemriEncode";
+}
+
+def ADDR_Reg     : ComplexPattern<i64, 2, "SelectADDRReg", [], []>;
+def ADDR_Offset8 : ComplexPattern<i64, 2, "SelectADDR8BitOffset", [], []>;
+
+def EXP : Enc64<
+  (outs),
+  (ins i32imm:$en, i32imm:$tgt, i32imm:$compr, i32imm:$done, i32imm:$vm,
+       VReg_32:$src0, VReg_32:$src1, VReg_32:$src2, VReg_32:$src3),
+  "EXP $en, $tgt, $compr, $done, $vm, $src0, $src1, $src2, $src3",
+  [] > {
+
+  bits<4> EN;
+  bits<6> TGT;
+  bits<1> COMPR;
+  bits<1> DONE;
+  bits<1> VM;
+  bits<8> VSRC0;
+  bits<8> VSRC1;
+  bits<8> VSRC2;
+  bits<8> VSRC3;
+
+  let Inst{3-0} = EN;
+  let Inst{9-4} = TGT;
+  let Inst{10} = COMPR;
+  let Inst{11} = DONE;
+  let Inst{12} = VM;
+  let Inst{31-26} = 0x3e;
+  let Inst{39-32} = VSRC0;
+  let Inst{47-40} = VSRC1;
+  let Inst{55-48} = VSRC2;
+  let Inst{63-56} = VSRC3;
+  let EncodingType = 0; //SIInstrEncodingType::EXP
+
+  let NeedWait = 1;
+  let usesCustomInserter = 1;
+}
+
+class MIMG <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
+    Enc64 <outs, ins, asm, pattern> {
+
+  bits<8> VDATA;
+  bits<4> DMASK;
+  bits<1> UNORM;
+  bits<1> GLC;
+  bits<1> DA;
+  bits<1> R128;
+  bits<1> TFE;
+  bits<1> LWE;
+  bits<1> SLC;
+  bits<8> VADDR;
+  bits<5> SRSRC;
+  bits<5> SSAMP; 
+
+  let Inst{11-8} = DMASK;
+  let Inst{12} = UNORM;
+  let Inst{13} = GLC;
+  let Inst{14} = DA;
+  let Inst{15} = R128;
+  let Inst{16} = TFE;
+  let Inst{17} = LWE;
+  let Inst{24-18} = op;
+  let Inst{25} = SLC;
+  let Inst{31-26} = 0x3c;
+  let Inst{39-32} = VADDR;
+  let Inst{47-40} = VDATA;
+  let Inst{52-48} = SRSRC;
+  let Inst{57-53} = SSAMP;
+
+  let EncodingType = 2; //SIInstrEncodingType::MIMG
+
+  let NeedWait = 1;
+  let usesCustomInserter = 1;
+}
+
+class MTBUF <bits<3> op, dag outs, dag ins, string asm, list<dag> pattern> :
+    Enc64<outs, ins, asm, pattern> {
+
+  bits<8> VDATA;
+  bits<12> OFFSET;
+  bits<1> OFFEN;
+  bits<1> IDXEN;
+  bits<1> GLC;
+  bits<1> ADDR64;
+  bits<4> DFMT;
+  bits<3> NFMT;
+  bits<8> VADDR;
+  bits<5> SRSRC;
+  bits<1> SLC;
+  bits<1> TFE;
+  bits<8> SOFFSET;
+
+  let Inst{11-0} = OFFSET;
+  let Inst{12} = OFFEN;
+  let Inst{13} = IDXEN;
+  let Inst{14} = GLC;
+  let Inst{15} = ADDR64;
+  let Inst{18-16} = op;
+  let Inst{22-19} = DFMT;
+  let Inst{25-23} = NFMT;
+  let Inst{31-26} = 0x3a; //encoding
+  let Inst{39-32} = VADDR;
+  let Inst{47-40} = VDATA;
+  let Inst{52-48} = SRSRC;
+  let Inst{54} = SLC;
+  let Inst{55} = TFE;
+  let Inst{63-56} = SOFFSET;
+  let EncodingType = 3; //SIInstrEncodingType::MTBUF
+
+  let NeedWait = 1;
+  let usesCustomInserter = 1;
+  let neverHasSideEffects = 1;
+}
+
+class MUBUF <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
+    Enc64<outs, ins, asm, pattern> {
+
+  bits<8> VDATA;
+  bits<12> OFFSET;
+  bits<1> OFFEN;
+  bits<1> IDXEN;
+  bits<1> GLC;
+  bits<1> ADDR64;
+  bits<1> LDS;
+  bits<8> VADDR;
+  bits<5> SRSRC;
+  bits<1> SLC;
+  bits<1> TFE;
+  bits<8> SOFFSET;
+
+  let Inst{11-0} = OFFSET;
+  let Inst{12} = OFFEN;
+  let Inst{13} = IDXEN;
+  let Inst{14} = GLC;
+  let Inst{15} = ADDR64;
+  let Inst{16} = LDS;
+  let Inst{24-18} = op;
+  let Inst{31-26} = 0x38; //encoding
+  let Inst{39-32} = VADDR;
+  let Inst{47-40} = VDATA;
+  let Inst{52-48} = SRSRC;
+  let Inst{54} = SLC;
+  let Inst{55} = TFE;
+  let Inst{63-56} = SOFFSET;
+  let EncodingType = 4; //SIInstrEncodingType::MUBUF
+
+  let NeedWait = 1;
+  let usesCustomInserter = 1;
+  let neverHasSideEffects = 1;
+}
+
+class SMRD <bits<5> op, dag outs, dag ins, string asm, list<dag> pattern> :
+    Enc32<outs, ins, asm, pattern> {
+
+  bits<7> SDST;
+  bits<15> PTR;
+  bits<8> OFFSET = PTR{7-0};
+  bits<1> IMM    = PTR{8};
+  bits<6> SBASE  = PTR{14-9};
+  
+  let Inst{7-0} = OFFSET;
+  let Inst{8} = IMM;
+  let Inst{14-9} = SBASE;
+  let Inst{21-15} = SDST;
+  let Inst{26-22} = op;
+  let Inst{31-27} = 0x18; //encoding
+  let EncodingType = 5; //SIInstrEncodingType::SMRD
+
+  let NeedWait = 1;
+  let usesCustomInserter = 1;
+}
+
+class SOP1 <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
+    Enc32<outs, ins, asm, pattern> {
+
+  bits<7> SDST;
+  bits<8> SSRC0;
+
+  let Inst{7-0} = SSRC0;
+  let Inst{15-8} = op;
+  let Inst{22-16} = SDST;
+  let Inst{31-23} = 0x17d; //encoding;
+  let EncodingType = 6; //SIInstrEncodingType::SOP1
+}
+
+class SOP2 <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
+    Enc32 <outs, ins, asm, pattern> {
+  
+  bits<7> SDST;
+  bits<8> SSRC0;
+  bits<8> SSRC1;
+
+  let Inst{7-0} = SSRC0;
+  let Inst{15-8} = SSRC1;
+  let Inst{22-16} = SDST;
+  let Inst{29-23} = op;
+  let Inst{31-30} = 0x2; // encoding
+  let EncodingType = 7; // SIInstrEncodingType::SOP2  
+}
+
+class SOPC <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
+  Enc32<outs, ins, asm, pattern> {
+
+  bits<8> SSRC0;
+  bits<8> SSRC1;
+
+  let Inst{7-0} = SSRC0;
+  let Inst{15-8} = SSRC1;
+  let Inst{22-16} = op;
+  let Inst{31-23} = 0x17e;
+  let EncodingType = 8; // SIInstrEncodingType::SOPC
+}
+
+class SOPK <bits<5> op, dag outs, dag ins, string asm, list<dag> pattern> :
+   Enc32 <outs, ins , asm, pattern> {
+
+  bits <7> SDST;
+  bits <16> SIMM16;
+  
+  let Inst{15-0} = SIMM16;
+  let Inst{22-16} = SDST;
+  let Inst{27-23} = op;
+  let Inst{31-28} = 0xb; //encoding
+  let EncodingType = 9; // SIInstrEncodingType::SOPK
+}
+
+class SOPP <bits<7> op, dag ins, string asm> : Enc32 <
+  (outs),
+  ins,
+  asm,
+  [] > {
+
+  bits <16> SIMM16;
+
+  let Inst{15-0} = SIMM16;
+  let Inst{22-16} = op;
+  let Inst{31-23} = 0x17f; // encoding
+  let EncodingType = 10; // SIInstrEncodingType::SOPP
+}
+    
+
+class VINTRP <bits <2> op, dag outs, dag ins, string asm, list<dag> pattern> :
+    Enc32 <outs, ins, asm, pattern> {
+
+  bits<8> VDST;
+  bits<8> VSRC;
+  bits<2> ATTRCHAN;
+  bits<6> ATTR;
+
+  let Inst{7-0} = VSRC;
+  let Inst{9-8} = ATTRCHAN;
+  let Inst{15-10} = ATTR;
+  let Inst{17-16} = op;
+  let Inst{25-18} = VDST;
+  let Inst{31-26} = 0x32; // encoding
+  let EncodingType = 11; // SIInstrEncodingType::VINTRP
+
+  let Uses = [M0];
+}
+
+class VOP1 <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
+    Enc32 <outs, ins, asm, pattern> {
+
+  bits<8> VDST;
+  bits<9> SRC0;
+  
+  let Inst{8-0} = SRC0;
+  let Inst{16-9} = op;
+  let Inst{24-17} = VDST;
+  let Inst{31-25} = 0x3f; //encoding
+  
+  let EncodingType = 12; // SIInstrEncodingType::VOP1
+  let PostEncoderMethod = "VOPPostEncode";
+}
+
+class VOP2 <bits<6> op, dag outs, dag ins, string asm, list<dag> pattern> :
+    Enc32 <outs, ins, asm, pattern> {
+
+  bits<8> VDST;
+  bits<9> SRC0;
+  bits<8> VSRC1;
+  
+  let Inst{8-0} = SRC0;
+  let Inst{16-9} = VSRC1;
+  let Inst{24-17} = VDST;
+  let Inst{30-25} = op;
+  let Inst{31} = 0x0; //encoding
+  
+  let EncodingType = 13; // SIInstrEncodingType::VOP2
+  let PostEncoderMethod = "VOPPostEncode";
+}
+
+class VOP3 <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
+    Enc64 <outs, ins, asm, pattern> {
+
+  bits<8> VDST;
+  bits<9> SRC0;
+  bits<9> SRC1;
+  bits<9> SRC2;
+  bits<3> ABS; 
+  bits<1> CLAMP;
+  bits<2> OMOD;
+  bits<3> NEG;
+
+  let Inst{7-0} = VDST;
+  let Inst{10-8} = ABS;
+  let Inst{11} = CLAMP;
+  let Inst{25-17} = op;
+  let Inst{31-26} = 0x34; //encoding
+  let Inst{40-32} = SRC0;
+  let Inst{49-41} = SRC1;
+  let Inst{58-50} = SRC2;
+  let Inst{60-59} = OMOD;
+  let Inst{63-61} = NEG;
+  
+  let EncodingType = 14; // SIInstrEncodingType::VOP3
+  let PostEncoderMethod = "VOPPostEncode";
+}
+
+class VOPC <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
+    Enc32 <outs, ins, asm, pattern> {
+
+  bits<9> SRC0;
+  bits<8> VSRC1;
+
+  let Inst{8-0} = SRC0;
+  let Inst{16-9} = VSRC1;
+  let Inst{24-17} = op;
+  let Inst{31-25} = 0x3e;
+ 
+  let EncodingType = 15; //SIInstrEncodingType::VOPC
+  let PostEncoderMethod = "VOPPostEncode";
+
+  let Defs = [VCC];
+}
+
+class MIMG_Load_Helper <bits<7> op, string asm> : MIMG <
+  op,
+  (outs VReg_128:$vdata),
+  (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
+       i1imm:$tfe, i1imm:$lwe, i1imm:$slc, VReg_128:$vaddr,
+       GPR4Align<SReg_256>:$srsrc, GPR4Align<SReg_128>:$ssamp),
+  asm,
+  []
+>; 
+
+class MUBUF_Load_Helper <bits<7> op, string asm, RegisterClass regClass> : MUBUF <
+  op,
+  (outs regClass:$dst),
+  (ins i16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$addr64,
+       i1imm:$lds, VReg_32:$vaddr, GPR4Align<SReg_128>:$srsrc, i1imm:$slc,
+       i1imm:$tfe, SReg_32:$soffset),
+  asm,
+  []> {
+  let mayLoad = 1;
+}
+
+class MTBUF_Load_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBUF <
+  op,
+  (outs regClass:$dst),
+  (ins i16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$addr64,
+       i8imm:$dfmt, i8imm:$nfmt, VReg_32:$vaddr, GPR4Align<SReg_128>:$srsrc,
+       i1imm:$slc, i1imm:$tfe, SReg_32:$soffset),
+  asm,
+  []> {
+  let mayLoad = 1;
+}
+
+class MTBUF_Store_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBUF <
+  op,
+  (outs),
+  (ins regClass:$vdata, i16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc,
+   i1imm:$addr64, i8imm:$dfmt, i8imm:$nfmt, VReg_32:$vaddr,
+   GPR4Align<SReg_128>:$srsrc, i1imm:$slc, i1imm:$tfe, SReg_32:$soffset),
+  asm,
+  []> {
+  let mayStore = 1;
+}
+
+/*XXX: We should be able to infer the imm bit based on the arg types */
+multiclass SMRD_Helper <bits<5> op, string asm, RegisterClass dstClass,
+			ValueType vt> {
+
+  def _SGPR : SMRD <
+              op,
+              (outs dstClass:$dst),
+              (ins SMRDmemrr:$src0),
+              asm,
+              [(set (vt dstClass:$dst), (constant_load ADDR_Reg:$src0))]
+  > {
+    let IMM = 0;
+  }
+
+  def _IMM : SMRD <
+              op,
+              (outs dstClass:$dst),
+              (ins SMRDmemri:$src0),
+              asm,
+              [(set (vt dstClass:$dst), (constant_load ADDR_Offset8:$src0))]
+  > {
+    let IMM = 1;
+  }
+}
+
+include "SIInstrFormats.td"
+include "SIInstructions.td"
diff --git a/lib/Target/AMDGPU/SIInstructions.td b/lib/Target/AMDGPU/SIInstructions.td
new file mode 100644
index 0000000..a96d783
--- /dev/null
+++ b/lib/Target/AMDGPU/SIInstructions.td
@@ -0,0 +1,964 @@
+//===-- SIInstructions.td - SI Instruction Defintions ---------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+def load_user_sgpr : PatFrag<(ops node:$ptr),
+  (load node:$ptr),
+  [{
+    const Value *Src = cast<LoadSDNode>(N)->getSrcValue();
+    if (Src) {
+      PointerType * PT = dyn_cast<PointerType>(Src->getType());
+      return PT && PT->getAddressSpace() == AMDILAS::USER_SGPR_ADDRESS;
+    }
+    return false;
+  }]
+>;
+
+
+def isSI : Predicate<"Subtarget.device()"
+                            "->getGeneration() == AMDILDeviceInfo::HD7XXX">;
+
+let Predicates = [isSI] in {
+
+def S_MOV_B32 : SOP1_32 <0x00000003, "S_MOV_B32", []>;
+def S_MOV_B64 : SOP1_64 <0x00000004, "S_MOV_B64", []>;
+def S_CMOV_B32 : SOP1_32 <0x00000005, "S_CMOV_B32", []>;
+def S_CMOV_B64 : SOP1_64 <0x00000006, "S_CMOV_B64", []>;
+def S_NOT_B32 : SOP1_32 <0x00000007, "S_NOT_B32", []>;
+def S_NOT_B64 : SOP1_64 <0x00000008, "S_NOT_B64", []>;
+def S_WQM_B32 : SOP1_32 <0x00000009, "S_WQM_B32", []>;
+def S_WQM_B64 : SOP1_64 <0x0000000a, "S_WQM_B64", []>;
+def S_BREV_B32 : SOP1_32 <0x0000000b, "S_BREV_B32", []>;
+def S_BREV_B64 : SOP1_64 <0x0000000c, "S_BREV_B64", []>;
+////def S_BCNT0_I32_B32 : SOP1_BCNT0 <0x0000000d, "S_BCNT0_I32_B32", []>;
+////def S_BCNT0_I32_B64 : SOP1_BCNT0 <0x0000000e, "S_BCNT0_I32_B64", []>;
+////def S_BCNT1_I32_B32 : SOP1_BCNT1 <0x0000000f, "S_BCNT1_I32_B32", []>;
+////def S_BCNT1_I32_B64 : SOP1_BCNT1 <0x00000010, "S_BCNT1_I32_B64", []>;
+////def S_FF0_I32_B32 : SOP1_FF0 <0x00000011, "S_FF0_I32_B32", []>;
+////def S_FF0_I32_B64 : SOP1_FF0 <0x00000012, "S_FF0_I32_B64", []>;
+////def S_FF1_I32_B32 : SOP1_FF1 <0x00000013, "S_FF1_I32_B32", []>;
+////def S_FF1_I32_B64 : SOP1_FF1 <0x00000014, "S_FF1_I32_B64", []>;
+//def S_FLBIT_I32_B32 : SOP1_32 <0x00000015, "S_FLBIT_I32_B32", []>;
+//def S_FLBIT_I32_B64 : SOP1_32 <0x00000016, "S_FLBIT_I32_B64", []>;
+def S_FLBIT_I32 : SOP1_32 <0x00000017, "S_FLBIT_I32", []>;
+//def S_FLBIT_I32_I64 : SOP1_32 <0x00000018, "S_FLBIT_I32_I64", []>;
+//def S_SEXT_I32_I8 : SOP1_32 <0x00000019, "S_SEXT_I32_I8", []>;
+//def S_SEXT_I32_I16 : SOP1_32 <0x0000001a, "S_SEXT_I32_I16", []>;
+////def S_BITSET0_B32 : SOP1_BITSET0 <0x0000001b, "S_BITSET0_B32", []>;
+////def S_BITSET0_B64 : SOP1_BITSET0 <0x0000001c, "S_BITSET0_B64", []>;
+////def S_BITSET1_B32 : SOP1_BITSET1 <0x0000001d, "S_BITSET1_B32", []>;
+////def S_BITSET1_B64 : SOP1_BITSET1 <0x0000001e, "S_BITSET1_B64", []>;
+def S_GETPC_B64 : SOP1_64 <0x0000001f, "S_GETPC_B64", []>;
+def S_SETPC_B64 : SOP1_64 <0x00000020, "S_SETPC_B64", []>;
+def S_SWAPPC_B64 : SOP1_64 <0x00000021, "S_SWAPPC_B64", []>;
+def S_RFE_B64 : SOP1_64 <0x00000022, "S_RFE_B64", []>;
+def S_AND_SAVEEXEC_B64 : SOP1_64 <0x00000024, "S_AND_SAVEEXEC_B64", []>;
+def S_OR_SAVEEXEC_B64 : SOP1_64 <0x00000025, "S_OR_SAVEEXEC_B64", []>;
+def S_XOR_SAVEEXEC_B64 : SOP1_64 <0x00000026, "S_XOR_SAVEEXEC_B64", []>;
+////def S_ANDN2_SAVEEXEC_B64 : SOP1_ANDN2 <0x00000027, "S_ANDN2_SAVEEXEC_B64", []>;
+////def S_ORN2_SAVEEXEC_B64 : SOP1_ORN2 <0x00000028, "S_ORN2_SAVEEXEC_B64", []>;
+def S_NAND_SAVEEXEC_B64 : SOP1_64 <0x00000029, "S_NAND_SAVEEXEC_B64", []>;
+def S_NOR_SAVEEXEC_B64 : SOP1_64 <0x0000002a, "S_NOR_SAVEEXEC_B64", []>;
+def S_XNOR_SAVEEXEC_B64 : SOP1_64 <0x0000002b, "S_XNOR_SAVEEXEC_B64", []>;
+def S_QUADMASK_B32 : SOP1_32 <0x0000002c, "S_QUADMASK_B32", []>;
+def S_QUADMASK_B64 : SOP1_64 <0x0000002d, "S_QUADMASK_B64", []>;
+def S_MOVRELS_B32 : SOP1_32 <0x0000002e, "S_MOVRELS_B32", []>;
+def S_MOVRELS_B64 : SOP1_64 <0x0000002f, "S_MOVRELS_B64", []>;
+def S_MOVRELD_B32 : SOP1_32 <0x00000030, "S_MOVRELD_B32", []>;
+def S_MOVRELD_B64 : SOP1_64 <0x00000031, "S_MOVRELD_B64", []>;
+//def S_CBRANCH_JOIN : SOP1_ <0x00000032, "S_CBRANCH_JOIN", []>;
+def S_MOV_REGRD_B32 : SOP1_32 <0x00000033, "S_MOV_REGRD_B32", []>;
+def S_ABS_I32 : SOP1_32 <0x00000034, "S_ABS_I32", []>;
+def S_MOV_FED_B32 : SOP1_32 <0x00000035, "S_MOV_FED_B32", []>;
+def S_MOVK_I32 : SOPK_32 <0x00000000, "S_MOVK_I32", []>;
+def S_CMOVK_I32 : SOPK_32 <0x00000002, "S_CMOVK_I32", []>;
+def S_CMPK_EQ_I32 : SOPK_32 <0x00000003, "S_CMPK_EQ_I32", []>;
+def S_CMPK_LG_I32 : SOPK_32 <0x00000004, "S_CMPK_LG_I32", []>;
+def S_CMPK_GT_I32 : SOPK_32 <0x00000005, "S_CMPK_GT_I32", []>;
+def S_CMPK_GE_I32 : SOPK_32 <0x00000006, "S_CMPK_GE_I32", []>;
+def S_CMPK_LT_I32 : SOPK_32 <0x00000007, "S_CMPK_LT_I32", []>;
+def S_CMPK_LE_I32 : SOPK_32 <0x00000008, "S_CMPK_LE_I32", []>;
+def S_CMPK_EQ_U32 : SOPK_32 <0x00000009, "S_CMPK_EQ_U32", []>;
+def S_CMPK_LG_U32 : SOPK_32 <0x0000000a, "S_CMPK_LG_U32", []>;
+def S_CMPK_GT_U32 : SOPK_32 <0x0000000b, "S_CMPK_GT_U32", []>;
+def S_CMPK_GE_U32 : SOPK_32 <0x0000000c, "S_CMPK_GE_U32", []>;
+def S_CMPK_LT_U32 : SOPK_32 <0x0000000d, "S_CMPK_LT_U32", []>;
+def S_CMPK_LE_U32 : SOPK_32 <0x0000000e, "S_CMPK_LE_U32", []>;
+def S_ADDK_I32 : SOPK_32 <0x0000000f, "S_ADDK_I32", []>;
+def S_MULK_I32 : SOPK_32 <0x00000010, "S_MULK_I32", []>;
+//def S_CBRANCH_I_FORK : SOPK_ <0x00000011, "S_CBRANCH_I_FORK", []>;
+def S_GETREG_B32 : SOPK_32 <0x00000012, "S_GETREG_B32", []>;
+def S_SETREG_B32 : SOPK_32 <0x00000013, "S_SETREG_B32", []>;
+def S_GETREG_REGRD_B32 : SOPK_32 <0x00000014, "S_GETREG_REGRD_B32", []>;
+//def S_SETREG_IMM32_B32 : SOPK_32 <0x00000015, "S_SETREG_IMM32_B32", []>;
+//def EXP : EXP_ <0x00000000, "EXP", []>;
+defm V_CMP_F_F32 : VOPC_32 <0x00000000, "V_CMP_F_F32", []>;
+defm V_CMP_LT_F32 : VOPC_32 <0x00000001, "V_CMP_LT_F32", []>;
+defm V_CMP_EQ_F32 : VOPC_32 <0x00000002, "V_CMP_EQ_F32", []>;
+defm V_CMP_LE_F32 : VOPC_32 <0x00000003, "V_CMP_LE_F32", []>;
+defm V_CMP_GT_F32 : VOPC_32 <0x00000004, "V_CMP_GT_F32", []>;
+defm V_CMP_LG_F32 : VOPC_32 <0x00000005, "V_CMP_LG_F32", []>;
+defm V_CMP_GE_F32 : VOPC_32 <0x00000006, "V_CMP_GE_F32", []>;
+defm V_CMP_O_F32 : VOPC_32 <0x00000007, "V_CMP_O_F32", []>;
+defm V_CMP_U_F32 : VOPC_32 <0x00000008, "V_CMP_U_F32", []>;
+defm V_CMP_NGE_F32 : VOPC_32 <0x00000009, "V_CMP_NGE_F32", []>;
+defm V_CMP_NLG_F32 : VOPC_32 <0x0000000a, "V_CMP_NLG_F32", []>;
+defm V_CMP_NGT_F32 : VOPC_32 <0x0000000b, "V_CMP_NGT_F32", []>;
+defm V_CMP_NLE_F32 : VOPC_32 <0x0000000c, "V_CMP_NLE_F32", []>;
+defm V_CMP_NEQ_F32 : VOPC_32 <0x0000000d, "V_CMP_NEQ_F32", []>;
+defm V_CMP_NLT_F32 : VOPC_32 <0x0000000e, "V_CMP_NLT_F32", []>;
+defm V_CMP_TRU_F32 : VOPC_32 <0x0000000f, "V_CMP_TRU_F32", []>;
+defm V_CMPX_F_F32 : VOPC_32 <0x00000010, "V_CMPX_F_F32", []>;
+defm V_CMPX_LT_F32 : VOPC_32 <0x00000011, "V_CMPX_LT_F32", []>;
+defm V_CMPX_EQ_F32 : VOPC_32 <0x00000012, "V_CMPX_EQ_F32", []>;
+defm V_CMPX_LE_F32 : VOPC_32 <0x00000013, "V_CMPX_LE_F32", []>;
+defm V_CMPX_GT_F32 : VOPC_32 <0x00000014, "V_CMPX_GT_F32", []>;
+defm V_CMPX_LG_F32 : VOPC_32 <0x00000015, "V_CMPX_LG_F32", []>;
+defm V_CMPX_GE_F32 : VOPC_32 <0x00000016, "V_CMPX_GE_F32", []>;
+defm V_CMPX_O_F32 : VOPC_32 <0x00000017, "V_CMPX_O_F32", []>;
+defm V_CMPX_U_F32 : VOPC_32 <0x00000018, "V_CMPX_U_F32", []>;
+defm V_CMPX_NGE_F32 : VOPC_32 <0x00000019, "V_CMPX_NGE_F32", []>;
+defm V_CMPX_NLG_F32 : VOPC_32 <0x0000001a, "V_CMPX_NLG_F32", []>;
+defm V_CMPX_NGT_F32 : VOPC_32 <0x0000001b, "V_CMPX_NGT_F32", []>;
+defm V_CMPX_NLE_F32 : VOPC_32 <0x0000001c, "V_CMPX_NLE_F32", []>;
+defm V_CMPX_NEQ_F32 : VOPC_32 <0x0000001d, "V_CMPX_NEQ_F32", []>;
+defm V_CMPX_NLT_F32 : VOPC_32 <0x0000001e, "V_CMPX_NLT_F32", []>;
+defm V_CMPX_TRU_F32 : VOPC_32 <0x0000001f, "V_CMPX_TRU_F32", []>;
+defm V_CMP_F_F64 : VOPC_64 <0x00000020, "V_CMP_F_F64", []>;
+defm V_CMP_LT_F64 : VOPC_64 <0x00000021, "V_CMP_LT_F64", []>;
+defm V_CMP_EQ_F64 : VOPC_64 <0x00000022, "V_CMP_EQ_F64", []>;
+defm V_CMP_LE_F64 : VOPC_64 <0x00000023, "V_CMP_LE_F64", []>;
+defm V_CMP_GT_F64 : VOPC_64 <0x00000024, "V_CMP_GT_F64", []>;
+defm V_CMP_LG_F64 : VOPC_64 <0x00000025, "V_CMP_LG_F64", []>;
+defm V_CMP_GE_F64 : VOPC_64 <0x00000026, "V_CMP_GE_F64", []>;
+defm V_CMP_O_F64 : VOPC_64 <0x00000027, "V_CMP_O_F64", []>;
+defm V_CMP_U_F64 : VOPC_64 <0x00000028, "V_CMP_U_F64", []>;
+defm V_CMP_NGE_F64 : VOPC_64 <0x00000029, "V_CMP_NGE_F64", []>;
+defm V_CMP_NLG_F64 : VOPC_64 <0x0000002a, "V_CMP_NLG_F64", []>;
+defm V_CMP_NGT_F64 : VOPC_64 <0x0000002b, "V_CMP_NGT_F64", []>;
+defm V_CMP_NLE_F64 : VOPC_64 <0x0000002c, "V_CMP_NLE_F64", []>;
+defm V_CMP_NEQ_F64 : VOPC_64 <0x0000002d, "V_CMP_NEQ_F64", []>;
+defm V_CMP_NLT_F64 : VOPC_64 <0x0000002e, "V_CMP_NLT_F64", []>;
+defm V_CMP_TRU_F64 : VOPC_64 <0x0000002f, "V_CMP_TRU_F64", []>;
+defm V_CMPX_F_F64 : VOPC_64 <0x00000030, "V_CMPX_F_F64", []>;
+defm V_CMPX_LT_F64 : VOPC_64 <0x00000031, "V_CMPX_LT_F64", []>;
+defm V_CMPX_EQ_F64 : VOPC_64 <0x00000032, "V_CMPX_EQ_F64", []>;
+defm V_CMPX_LE_F64 : VOPC_64 <0x00000033, "V_CMPX_LE_F64", []>;
+defm V_CMPX_GT_F64 : VOPC_64 <0x00000034, "V_CMPX_GT_F64", []>;
+defm V_CMPX_LG_F64 : VOPC_64 <0x00000035, "V_CMPX_LG_F64", []>;
+defm V_CMPX_GE_F64 : VOPC_64 <0x00000036, "V_CMPX_GE_F64", []>;
+defm V_CMPX_O_F64 : VOPC_64 <0x00000037, "V_CMPX_O_F64", []>;
+defm V_CMPX_U_F64 : VOPC_64 <0x00000038, "V_CMPX_U_F64", []>;
+defm V_CMPX_NGE_F64 : VOPC_64 <0x00000039, "V_CMPX_NGE_F64", []>;
+defm V_CMPX_NLG_F64 : VOPC_64 <0x0000003a, "V_CMPX_NLG_F64", []>;
+defm V_CMPX_NGT_F64 : VOPC_64 <0x0000003b, "V_CMPX_NGT_F64", []>;
+defm V_CMPX_NLE_F64 : VOPC_64 <0x0000003c, "V_CMPX_NLE_F64", []>;
+defm V_CMPX_NEQ_F64 : VOPC_64 <0x0000003d, "V_CMPX_NEQ_F64", []>;
+defm V_CMPX_NLT_F64 : VOPC_64 <0x0000003e, "V_CMPX_NLT_F64", []>;
+defm V_CMPX_TRU_F64 : VOPC_64 <0x0000003f, "V_CMPX_TRU_F64", []>;
+defm V_CMPS_F_F32 : VOPC_32 <0x00000040, "V_CMPS_F_F32", []>;
+defm V_CMPS_LT_F32 : VOPC_32 <0x00000041, "V_CMPS_LT_F32", []>;
+defm V_CMPS_EQ_F32 : VOPC_32 <0x00000042, "V_CMPS_EQ_F32", []>;
+defm V_CMPS_LE_F32 : VOPC_32 <0x00000043, "V_CMPS_LE_F32", []>;
+defm V_CMPS_GT_F32 : VOPC_32 <0x00000044, "V_CMPS_GT_F32", []>;
+defm V_CMPS_LG_F32 : VOPC_32 <0x00000045, "V_CMPS_LG_F32", []>;
+defm V_CMPS_GE_F32 : VOPC_32 <0x00000046, "V_CMPS_GE_F32", []>;
+defm V_CMPS_O_F32 : VOPC_32 <0x00000047, "V_CMPS_O_F32", []>;
+defm V_CMPS_U_F32 : VOPC_32 <0x00000048, "V_CMPS_U_F32", []>;
+defm V_CMPS_NGE_F32 : VOPC_32 <0x00000049, "V_CMPS_NGE_F32", []>;
+defm V_CMPS_NLG_F32 : VOPC_32 <0x0000004a, "V_CMPS_NLG_F32", []>;
+defm V_CMPS_NGT_F32 : VOPC_32 <0x0000004b, "V_CMPS_NGT_F32", []>;
+defm V_CMPS_NLE_F32 : VOPC_32 <0x0000004c, "V_CMPS_NLE_F32", []>;
+defm V_CMPS_NEQ_F32 : VOPC_32 <0x0000004d, "V_CMPS_NEQ_F32", []>;
+defm V_CMPS_NLT_F32 : VOPC_32 <0x0000004e, "V_CMPS_NLT_F32", []>;
+defm V_CMPS_TRU_F32 : VOPC_32 <0x0000004f, "V_CMPS_TRU_F32", []>;
+defm V_CMPSX_F_F32 : VOPC_32 <0x00000050, "V_CMPSX_F_F32", []>;
+defm V_CMPSX_LT_F32 : VOPC_32 <0x00000051, "V_CMPSX_LT_F32", []>;
+defm V_CMPSX_EQ_F32 : VOPC_32 <0x00000052, "V_CMPSX_EQ_F32", []>;
+defm V_CMPSX_LE_F32 : VOPC_32 <0x00000053, "V_CMPSX_LE_F32", []>;
+defm V_CMPSX_GT_F32 : VOPC_32 <0x00000054, "V_CMPSX_GT_F32", []>;
+defm V_CMPSX_LG_F32 : VOPC_32 <0x00000055, "V_CMPSX_LG_F32", []>;
+defm V_CMPSX_GE_F32 : VOPC_32 <0x00000056, "V_CMPSX_GE_F32", []>;
+defm V_CMPSX_O_F32 : VOPC_32 <0x00000057, "V_CMPSX_O_F32", []>;
+defm V_CMPSX_U_F32 : VOPC_32 <0x00000058, "V_CMPSX_U_F32", []>;
+defm V_CMPSX_NGE_F32 : VOPC_32 <0x00000059, "V_CMPSX_NGE_F32", []>;
+defm V_CMPSX_NLG_F32 : VOPC_32 <0x0000005a, "V_CMPSX_NLG_F32", []>;
+defm V_CMPSX_NGT_F32 : VOPC_32 <0x0000005b, "V_CMPSX_NGT_F32", []>;
+defm V_CMPSX_NLE_F32 : VOPC_32 <0x0000005c, "V_CMPSX_NLE_F32", []>;
+defm V_CMPSX_NEQ_F32 : VOPC_32 <0x0000005d, "V_CMPSX_NEQ_F32", []>;
+defm V_CMPSX_NLT_F32 : VOPC_32 <0x0000005e, "V_CMPSX_NLT_F32", []>;
+defm V_CMPSX_TRU_F32 : VOPC_32 <0x0000005f, "V_CMPSX_TRU_F32", []>;
+defm V_CMPS_F_F64 : VOPC_64 <0x00000060, "V_CMPS_F_F64", []>;
+defm V_CMPS_LT_F64 : VOPC_64 <0x00000061, "V_CMPS_LT_F64", []>;
+defm V_CMPS_EQ_F64 : VOPC_64 <0x00000062, "V_CMPS_EQ_F64", []>;
+defm V_CMPS_LE_F64 : VOPC_64 <0x00000063, "V_CMPS_LE_F64", []>;
+defm V_CMPS_GT_F64 : VOPC_64 <0x00000064, "V_CMPS_GT_F64", []>;
+defm V_CMPS_LG_F64 : VOPC_64 <0x00000065, "V_CMPS_LG_F64", []>;
+defm V_CMPS_GE_F64 : VOPC_64 <0x00000066, "V_CMPS_GE_F64", []>;
+defm V_CMPS_O_F64 : VOPC_64 <0x00000067, "V_CMPS_O_F64", []>;
+defm V_CMPS_U_F64 : VOPC_64 <0x00000068, "V_CMPS_U_F64", []>;
+defm V_CMPS_NGE_F64 : VOPC_64 <0x00000069, "V_CMPS_NGE_F64", []>;
+defm V_CMPS_NLG_F64 : VOPC_64 <0x0000006a, "V_CMPS_NLG_F64", []>;
+defm V_CMPS_NGT_F64 : VOPC_64 <0x0000006b, "V_CMPS_NGT_F64", []>;
+defm V_CMPS_NLE_F64 : VOPC_64 <0x0000006c, "V_CMPS_NLE_F64", []>;
+defm V_CMPS_NEQ_F64 : VOPC_64 <0x0000006d, "V_CMPS_NEQ_F64", []>;
+defm V_CMPS_NLT_F64 : VOPC_64 <0x0000006e, "V_CMPS_NLT_F64", []>;
+defm V_CMPS_TRU_F64 : VOPC_64 <0x0000006f, "V_CMPS_TRU_F64", []>;
+defm V_CMPSX_F_F64 : VOPC_64 <0x00000070, "V_CMPSX_F_F64", []>;
+defm V_CMPSX_LT_F64 : VOPC_64 <0x00000071, "V_CMPSX_LT_F64", []>;
+defm V_CMPSX_EQ_F64 : VOPC_64 <0x00000072, "V_CMPSX_EQ_F64", []>;
+defm V_CMPSX_LE_F64 : VOPC_64 <0x00000073, "V_CMPSX_LE_F64", []>;
+defm V_CMPSX_GT_F64 : VOPC_64 <0x00000074, "V_CMPSX_GT_F64", []>;
+defm V_CMPSX_LG_F64 : VOPC_64 <0x00000075, "V_CMPSX_LG_F64", []>;
+defm V_CMPSX_GE_F64 : VOPC_64 <0x00000076, "V_CMPSX_GE_F64", []>;
+defm V_CMPSX_O_F64 : VOPC_64 <0x00000077, "V_CMPSX_O_F64", []>;
+defm V_CMPSX_U_F64 : VOPC_64 <0x00000078, "V_CMPSX_U_F64", []>;
+defm V_CMPSX_NGE_F64 : VOPC_64 <0x00000079, "V_CMPSX_NGE_F64", []>;
+defm V_CMPSX_NLG_F64 : VOPC_64 <0x0000007a, "V_CMPSX_NLG_F64", []>;
+defm V_CMPSX_NGT_F64 : VOPC_64 <0x0000007b, "V_CMPSX_NGT_F64", []>;
+defm V_CMPSX_NLE_F64 : VOPC_64 <0x0000007c, "V_CMPSX_NLE_F64", []>;
+defm V_CMPSX_NEQ_F64 : VOPC_64 <0x0000007d, "V_CMPSX_NEQ_F64", []>;
+defm V_CMPSX_NLT_F64 : VOPC_64 <0x0000007e, "V_CMPSX_NLT_F64", []>;
+defm V_CMPSX_TRU_F64 : VOPC_64 <0x0000007f, "V_CMPSX_TRU_F64", []>;
+defm V_CMP_F_I32 : VOPC_32 <0x00000080, "V_CMP_F_I32", []>;
+defm V_CMP_LT_I32 : VOPC_32 <0x00000081, "V_CMP_LT_I32", []>;
+defm V_CMP_EQ_I32 : VOPC_32 <0x00000082, "V_CMP_EQ_I32", []>;
+defm V_CMP_LE_I32 : VOPC_32 <0x00000083, "V_CMP_LE_I32", []>;
+defm V_CMP_GT_I32 : VOPC_32 <0x00000084, "V_CMP_GT_I32", []>;
+defm V_CMP_NE_I32 : VOPC_32 <0x00000085, "V_CMP_NE_I32", []>;
+defm V_CMP_GE_I32 : VOPC_32 <0x00000086, "V_CMP_GE_I32", []>;
+defm V_CMP_T_I32 : VOPC_32 <0x00000087, "V_CMP_T_I32", []>;
+defm V_CMPX_F_I32 : VOPC_32 <0x00000090, "V_CMPX_F_I32", []>;
+defm V_CMPX_LT_I32 : VOPC_32 <0x00000091, "V_CMPX_LT_I32", []>;
+defm V_CMPX_EQ_I32 : VOPC_32 <0x00000092, "V_CMPX_EQ_I32", []>;
+defm V_CMPX_LE_I32 : VOPC_32 <0x00000093, "V_CMPX_LE_I32", []>;
+defm V_CMPX_GT_I32 : VOPC_32 <0x00000094, "V_CMPX_GT_I32", []>;
+defm V_CMPX_NE_I32 : VOPC_32 <0x00000095, "V_CMPX_NE_I32", []>;
+defm V_CMPX_GE_I32 : VOPC_32 <0x00000096, "V_CMPX_GE_I32", []>;
+defm V_CMPX_T_I32 : VOPC_32 <0x00000097, "V_CMPX_T_I32", []>;
+defm V_CMP_F_I64 : VOPC_64 <0x000000a0, "V_CMP_F_I64", []>;
+defm V_CMP_LT_I64 : VOPC_64 <0x000000a1, "V_CMP_LT_I64", []>;
+defm V_CMP_EQ_I64 : VOPC_64 <0x000000a2, "V_CMP_EQ_I64", []>;
+defm V_CMP_LE_I64 : VOPC_64 <0x000000a3, "V_CMP_LE_I64", []>;
+defm V_CMP_GT_I64 : VOPC_64 <0x000000a4, "V_CMP_GT_I64", []>;
+defm V_CMP_NE_I64 : VOPC_64 <0x000000a5, "V_CMP_NE_I64", []>;
+defm V_CMP_GE_I64 : VOPC_64 <0x000000a6, "V_CMP_GE_I64", []>;
+defm V_CMP_T_I64 : VOPC_64 <0x000000a7, "V_CMP_T_I64", []>;
+defm V_CMPX_F_I64 : VOPC_64 <0x000000b0, "V_CMPX_F_I64", []>;
+defm V_CMPX_LT_I64 : VOPC_64 <0x000000b1, "V_CMPX_LT_I64", []>;
+defm V_CMPX_EQ_I64 : VOPC_64 <0x000000b2, "V_CMPX_EQ_I64", []>;
+defm V_CMPX_LE_I64 : VOPC_64 <0x000000b3, "V_CMPX_LE_I64", []>;
+defm V_CMPX_GT_I64 : VOPC_64 <0x000000b4, "V_CMPX_GT_I64", []>;
+defm V_CMPX_NE_I64 : VOPC_64 <0x000000b5, "V_CMPX_NE_I64", []>;
+defm V_CMPX_GE_I64 : VOPC_64 <0x000000b6, "V_CMPX_GE_I64", []>;
+defm V_CMPX_T_I64 : VOPC_64 <0x000000b7, "V_CMPX_T_I64", []>;
+defm V_CMP_F_U32 : VOPC_32 <0x000000c0, "V_CMP_F_U32", []>;
+defm V_CMP_LT_U32 : VOPC_32 <0x000000c1, "V_CMP_LT_U32", []>;
+defm V_CMP_EQ_U32 : VOPC_32 <0x000000c2, "V_CMP_EQ_U32", []>;
+defm V_CMP_LE_U32 : VOPC_32 <0x000000c3, "V_CMP_LE_U32", []>;
+defm V_CMP_GT_U32 : VOPC_32 <0x000000c4, "V_CMP_GT_U32", []>;
+defm V_CMP_NE_U32 : VOPC_32 <0x000000c5, "V_CMP_NE_U32", []>;
+defm V_CMP_GE_U32 : VOPC_32 <0x000000c6, "V_CMP_GE_U32", []>;
+defm V_CMP_T_U32 : VOPC_32 <0x000000c7, "V_CMP_T_U32", []>;
+defm V_CMPX_F_U32 : VOPC_32 <0x000000d0, "V_CMPX_F_U32", []>;
+defm V_CMPX_LT_U32 : VOPC_32 <0x000000d1, "V_CMPX_LT_U32", []>;
+defm V_CMPX_EQ_U32 : VOPC_32 <0x000000d2, "V_CMPX_EQ_U32", []>;
+defm V_CMPX_LE_U32 : VOPC_32 <0x000000d3, "V_CMPX_LE_U32", []>;
+defm V_CMPX_GT_U32 : VOPC_32 <0x000000d4, "V_CMPX_GT_U32", []>;
+defm V_CMPX_NE_U32 : VOPC_32 <0x000000d5, "V_CMPX_NE_U32", []>;
+defm V_CMPX_GE_U32 : VOPC_32 <0x000000d6, "V_CMPX_GE_U32", []>;
+defm V_CMPX_T_U32 : VOPC_32 <0x000000d7, "V_CMPX_T_U32", []>;
+defm V_CMP_F_U64 : VOPC_64 <0x000000e0, "V_CMP_F_U64", []>;
+defm V_CMP_LT_U64 : VOPC_64 <0x000000e1, "V_CMP_LT_U64", []>;
+defm V_CMP_EQ_U64 : VOPC_64 <0x000000e2, "V_CMP_EQ_U64", []>;
+defm V_CMP_LE_U64 : VOPC_64 <0x000000e3, "V_CMP_LE_U64", []>;
+defm V_CMP_GT_U64 : VOPC_64 <0x000000e4, "V_CMP_GT_U64", []>;
+defm V_CMP_NE_U64 : VOPC_64 <0x000000e5, "V_CMP_NE_U64", []>;
+defm V_CMP_GE_U64 : VOPC_64 <0x000000e6, "V_CMP_GE_U64", []>;
+defm V_CMP_T_U64 : VOPC_64 <0x000000e7, "V_CMP_T_U64", []>;
+defm V_CMPX_F_U64 : VOPC_64 <0x000000f0, "V_CMPX_F_U64", []>;
+defm V_CMPX_LT_U64 : VOPC_64 <0x000000f1, "V_CMPX_LT_U64", []>;
+defm V_CMPX_EQ_U64 : VOPC_64 <0x000000f2, "V_CMPX_EQ_U64", []>;
+defm V_CMPX_LE_U64 : VOPC_64 <0x000000f3, "V_CMPX_LE_U64", []>;
+defm V_CMPX_GT_U64 : VOPC_64 <0x000000f4, "V_CMPX_GT_U64", []>;
+defm V_CMPX_NE_U64 : VOPC_64 <0x000000f5, "V_CMPX_NE_U64", []>;
+defm V_CMPX_GE_U64 : VOPC_64 <0x000000f6, "V_CMPX_GE_U64", []>;
+defm V_CMPX_T_U64 : VOPC_64 <0x000000f7, "V_CMPX_T_U64", []>;
+defm V_CMP_CLASS_F32 : VOPC_32 <0x00000088, "V_CMP_CLASS_F32", []>;
+defm V_CMPX_CLASS_F32 : VOPC_32 <0x00000098, "V_CMPX_CLASS_F32", []>;
+defm V_CMP_CLASS_F64 : VOPC_64 <0x000000a8, "V_CMP_CLASS_F64", []>;
+defm V_CMPX_CLASS_F64 : VOPC_64 <0x000000b8, "V_CMPX_CLASS_F64", []>;
+//def BUFFER_LOAD_FORMAT_X : MUBUF_ <0x00000000, "BUFFER_LOAD_FORMAT_X", []>;
+//def BUFFER_LOAD_FORMAT_XY : MUBUF_ <0x00000001, "BUFFER_LOAD_FORMAT_XY", []>;
+//def BUFFER_LOAD_FORMAT_XYZ : MUBUF_ <0x00000002, "BUFFER_LOAD_FORMAT_XYZ", []>;
+def BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <0x00000003, "BUFFER_LOAD_FORMAT_XYZW", VReg_128>;
+//def BUFFER_STORE_FORMAT_X : MUBUF_ <0x00000004, "BUFFER_STORE_FORMAT_X", []>;
+//def BUFFER_STORE_FORMAT_XY : MUBUF_ <0x00000005, "BUFFER_STORE_FORMAT_XY", []>;
+//def BUFFER_STORE_FORMAT_XYZ : MUBUF_ <0x00000006, "BUFFER_STORE_FORMAT_XYZ", []>;
+//def BUFFER_STORE_FORMAT_XYZW : MUBUF_ <0x00000007, "BUFFER_STORE_FORMAT_XYZW", []>;
+//def BUFFER_LOAD_UBYTE : MUBUF_ <0x00000008, "BUFFER_LOAD_UBYTE", []>;
+//def BUFFER_LOAD_SBYTE : MUBUF_ <0x00000009, "BUFFER_LOAD_SBYTE", []>;
+//def BUFFER_LOAD_USHORT : MUBUF_ <0x0000000a, "BUFFER_LOAD_USHORT", []>;
+//def BUFFER_LOAD_SSHORT : MUBUF_ <0x0000000b, "BUFFER_LOAD_SSHORT", []>;
+//def BUFFER_LOAD_DWORD : MUBUF_ <0x0000000c, "BUFFER_LOAD_DWORD", []>;
+//def BUFFER_LOAD_DWORDX2 : MUBUF_DWORDX2 <0x0000000d, "BUFFER_LOAD_DWORDX2", []>;
+//def BUFFER_LOAD_DWORDX4 : MUBUF_DWORDX4 <0x0000000e, "BUFFER_LOAD_DWORDX4", []>;
+//def BUFFER_STORE_BYTE : MUBUF_ <0x00000018, "BUFFER_STORE_BYTE", []>;
+//def BUFFER_STORE_SHORT : MUBUF_ <0x0000001a, "BUFFER_STORE_SHORT", []>;
+//def BUFFER_STORE_DWORD : MUBUF_ <0x0000001c, "BUFFER_STORE_DWORD", []>;
+//def BUFFER_STORE_DWORDX2 : MUBUF_DWORDX2 <0x0000001d, "BUFFER_STORE_DWORDX2", []>;
+//def BUFFER_STORE_DWORDX4 : MUBUF_DWORDX4 <0x0000001e, "BUFFER_STORE_DWORDX4", []>;
+//def BUFFER_ATOMIC_SWAP : MUBUF_ <0x00000030, "BUFFER_ATOMIC_SWAP", []>;
+//def BUFFER_ATOMIC_CMPSWAP : MUBUF_ <0x00000031, "BUFFER_ATOMIC_CMPSWAP", []>;
+//def BUFFER_ATOMIC_ADD : MUBUF_ <0x00000032, "BUFFER_ATOMIC_ADD", []>;
+//def BUFFER_ATOMIC_SUB : MUBUF_ <0x00000033, "BUFFER_ATOMIC_SUB", []>;
+//def BUFFER_ATOMIC_RSUB : MUBUF_ <0x00000034, "BUFFER_ATOMIC_RSUB", []>;
+//def BUFFER_ATOMIC_SMIN : MUBUF_ <0x00000035, "BUFFER_ATOMIC_SMIN", []>;
+//def BUFFER_ATOMIC_UMIN : MUBUF_ <0x00000036, "BUFFER_ATOMIC_UMIN", []>;
+//def BUFFER_ATOMIC_SMAX : MUBUF_ <0x00000037, "BUFFER_ATOMIC_SMAX", []>;
+//def BUFFER_ATOMIC_UMAX : MUBUF_ <0x00000038, "BUFFER_ATOMIC_UMAX", []>;
+//def BUFFER_ATOMIC_AND : MUBUF_ <0x00000039, "BUFFER_ATOMIC_AND", []>;
+//def BUFFER_ATOMIC_OR : MUBUF_ <0x0000003a, "BUFFER_ATOMIC_OR", []>;
+//def BUFFER_ATOMIC_XOR : MUBUF_ <0x0000003b, "BUFFER_ATOMIC_XOR", []>;
+//def BUFFER_ATOMIC_INC : MUBUF_ <0x0000003c, "BUFFER_ATOMIC_INC", []>;
+//def BUFFER_ATOMIC_DEC : MUBUF_ <0x0000003d, "BUFFER_ATOMIC_DEC", []>;
+//def BUFFER_ATOMIC_FCMPSWAP : MUBUF_ <0x0000003e, "BUFFER_ATOMIC_FCMPSWAP", []>;
+//def BUFFER_ATOMIC_FMIN : MUBUF_ <0x0000003f, "BUFFER_ATOMIC_FMIN", []>;
+//def BUFFER_ATOMIC_FMAX : MUBUF_ <0x00000040, "BUFFER_ATOMIC_FMAX", []>;
+//def BUFFER_ATOMIC_SWAP_X2 : MUBUF_X2 <0x00000050, "BUFFER_ATOMIC_SWAP_X2", []>;
+//def BUFFER_ATOMIC_CMPSWAP_X2 : MUBUF_X2 <0x00000051, "BUFFER_ATOMIC_CMPSWAP_X2", []>;
+//def BUFFER_ATOMIC_ADD_X2 : MUBUF_X2 <0x00000052, "BUFFER_ATOMIC_ADD_X2", []>;
+//def BUFFER_ATOMIC_SUB_X2 : MUBUF_X2 <0x00000053, "BUFFER_ATOMIC_SUB_X2", []>;
+//def BUFFER_ATOMIC_RSUB_X2 : MUBUF_X2 <0x00000054, "BUFFER_ATOMIC_RSUB_X2", []>;
+//def BUFFER_ATOMIC_SMIN_X2 : MUBUF_X2 <0x00000055, "BUFFER_ATOMIC_SMIN_X2", []>;
+//def BUFFER_ATOMIC_UMIN_X2 : MUBUF_X2 <0x00000056, "BUFFER_ATOMIC_UMIN_X2", []>;
+//def BUFFER_ATOMIC_SMAX_X2 : MUBUF_X2 <0x00000057, "BUFFER_ATOMIC_SMAX_X2", []>;
+//def BUFFER_ATOMIC_UMAX_X2 : MUBUF_X2 <0x00000058, "BUFFER_ATOMIC_UMAX_X2", []>;
+//def BUFFER_ATOMIC_AND_X2 : MUBUF_X2 <0x00000059, "BUFFER_ATOMIC_AND_X2", []>;
+//def BUFFER_ATOMIC_OR_X2 : MUBUF_X2 <0x0000005a, "BUFFER_ATOMIC_OR_X2", []>;
+//def BUFFER_ATOMIC_XOR_X2 : MUBUF_X2 <0x0000005b, "BUFFER_ATOMIC_XOR_X2", []>;
+//def BUFFER_ATOMIC_INC_X2 : MUBUF_X2 <0x0000005c, "BUFFER_ATOMIC_INC_X2", []>;
+//def BUFFER_ATOMIC_DEC_X2 : MUBUF_X2 <0x0000005d, "BUFFER_ATOMIC_DEC_X2", []>;
+//def BUFFER_ATOMIC_FCMPSWAP_X2 : MUBUF_X2 <0x0000005e, "BUFFER_ATOMIC_FCMPSWAP_X2", []>;
+//def BUFFER_ATOMIC_FMIN_X2 : MUBUF_X2 <0x0000005f, "BUFFER_ATOMIC_FMIN_X2", []>;
+//def BUFFER_ATOMIC_FMAX_X2 : MUBUF_X2 <0x00000060, "BUFFER_ATOMIC_FMAX_X2", []>;
+//def BUFFER_WBINVL1_SC : MUBUF_WBINVL1 <0x00000070, "BUFFER_WBINVL1_SC", []>;
+//def BUFFER_WBINVL1 : MUBUF_WBINVL1 <0x00000071, "BUFFER_WBINVL1", []>;
+//def TBUFFER_LOAD_FORMAT_X : MTBUF_ <0x00000000, "TBUFFER_LOAD_FORMAT_X", []>;
+//def TBUFFER_LOAD_FORMAT_XY : MTBUF_ <0x00000001, "TBUFFER_LOAD_FORMAT_XY", []>;
+//def TBUFFER_LOAD_FORMAT_XYZ : MTBUF_ <0x00000002, "TBUFFER_LOAD_FORMAT_XYZ", []>;
+def TBUFFER_LOAD_FORMAT_XYZW : MTBUF_Load_Helper <0x00000003, "TBUFFER_LOAD_FORMAT_XYZW", VReg_128>;
+//def TBUFFER_STORE_FORMAT_X : MTBUF_ <0x00000004, "TBUFFER_STORE_FORMAT_X", []>;
+//def TBUFFER_STORE_FORMAT_XY : MTBUF_ <0x00000005, "TBUFFER_STORE_FORMAT_XY", []>;
+//def TBUFFER_STORE_FORMAT_XYZ : MTBUF_ <0x00000006, "TBUFFER_STORE_FORMAT_XYZ", []>;
+//def TBUFFER_STORE_FORMAT_XYZW : MTBUF_ <0x00000007, "TBUFFER_STORE_FORMAT_XYZW", []>;
+
+defm S_LOAD_DWORD : SMRD_Helper <0x00000000, "S_LOAD_DWORD", SReg_32, f32>;
+//def S_LOAD_DWORDX2 : SMRD_DWORDX2 <0x00000001, "S_LOAD_DWORDX2", []>;
+defm S_LOAD_DWORDX4 : SMRD_Helper <0x00000002, "S_LOAD_DWORDX4", SReg_128, v4i32>;
+defm S_LOAD_DWORDX8 : SMRD_Helper <0x00000003, "S_LOAD_DWORDX8", SReg_256, v8i32>;
+//def S_LOAD_DWORDX16 : SMRD_DWORDX16 <0x00000004, "S_LOAD_DWORDX16", []>;
+//def S_BUFFER_LOAD_DWORD : SMRD_ <0x00000008, "S_BUFFER_LOAD_DWORD", []>;
+//def S_BUFFER_LOAD_DWORDX2 : SMRD_DWORDX2 <0x00000009, "S_BUFFER_LOAD_DWORDX2", []>;
+//def S_BUFFER_LOAD_DWORDX4 : SMRD_DWORDX4 <0x0000000a, "S_BUFFER_LOAD_DWORDX4", []>;
+//def S_BUFFER_LOAD_DWORDX8 : SMRD_DWORDX8 <0x0000000b, "S_BUFFER_LOAD_DWORDX8", []>;
+//def S_BUFFER_LOAD_DWORDX16 : SMRD_DWORDX16 <0x0000000c, "S_BUFFER_LOAD_DWORDX16", []>;
+
+//def S_MEMTIME : SMRD_ <0x0000001e, "S_MEMTIME", []>;
+//def S_DCACHE_INV : SMRD_ <0x0000001f, "S_DCACHE_INV", []>;
+//def IMAGE_LOAD : MIMG_NoPattern_ <"IMAGE_LOAD", 0x00000000>;
+//def IMAGE_LOAD_MIP : MIMG_NoPattern_ <"IMAGE_LOAD_MIP", 0x00000001>;
+//def IMAGE_LOAD_PCK : MIMG_NoPattern_ <"IMAGE_LOAD_PCK", 0x00000002>;
+//def IMAGE_LOAD_PCK_SGN : MIMG_NoPattern_ <"IMAGE_LOAD_PCK_SGN", 0x00000003>;
+//def IMAGE_LOAD_MIP_PCK : MIMG_NoPattern_ <"IMAGE_LOAD_MIP_PCK", 0x00000004>;
+//def IMAGE_LOAD_MIP_PCK_SGN : MIMG_NoPattern_ <"IMAGE_LOAD_MIP_PCK_SGN", 0x00000005>;
+//def IMAGE_STORE : MIMG_NoPattern_ <"IMAGE_STORE", 0x00000008>;
+//def IMAGE_STORE_MIP : MIMG_NoPattern_ <"IMAGE_STORE_MIP", 0x00000009>;
+//def IMAGE_STORE_PCK : MIMG_NoPattern_ <"IMAGE_STORE_PCK", 0x0000000a>;
+//def IMAGE_STORE_MIP_PCK : MIMG_NoPattern_ <"IMAGE_STORE_MIP_PCK", 0x0000000b>;
+//def IMAGE_GET_RESINFO : MIMG_NoPattern_ <"IMAGE_GET_RESINFO", 0x0000000e>;
+//def IMAGE_ATOMIC_SWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_SWAP", 0x0000000f>;
+//def IMAGE_ATOMIC_CMPSWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_CMPSWAP", 0x00000010>;
+//def IMAGE_ATOMIC_ADD : MIMG_NoPattern_ <"IMAGE_ATOMIC_ADD", 0x00000011>;
+//def IMAGE_ATOMIC_SUB : MIMG_NoPattern_ <"IMAGE_ATOMIC_SUB", 0x00000012>;
+//def IMAGE_ATOMIC_RSUB : MIMG_NoPattern_ <"IMAGE_ATOMIC_RSUB", 0x00000013>;
+//def IMAGE_ATOMIC_SMIN : MIMG_NoPattern_ <"IMAGE_ATOMIC_SMIN", 0x00000014>;
+//def IMAGE_ATOMIC_UMIN : MIMG_NoPattern_ <"IMAGE_ATOMIC_UMIN", 0x00000015>;
+//def IMAGE_ATOMIC_SMAX : MIMG_NoPattern_ <"IMAGE_ATOMIC_SMAX", 0x00000016>;
+//def IMAGE_ATOMIC_UMAX : MIMG_NoPattern_ <"IMAGE_ATOMIC_UMAX", 0x00000017>;
+//def IMAGE_ATOMIC_AND : MIMG_NoPattern_ <"IMAGE_ATOMIC_AND", 0x00000018>;
+//def IMAGE_ATOMIC_OR : MIMG_NoPattern_ <"IMAGE_ATOMIC_OR", 0x00000019>;
+//def IMAGE_ATOMIC_XOR : MIMG_NoPattern_ <"IMAGE_ATOMIC_XOR", 0x0000001a>;
+//def IMAGE_ATOMIC_INC : MIMG_NoPattern_ <"IMAGE_ATOMIC_INC", 0x0000001b>;
+//def IMAGE_ATOMIC_DEC : MIMG_NoPattern_ <"IMAGE_ATOMIC_DEC", 0x0000001c>;
+//def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_FCMPSWAP", 0x0000001d>;
+//def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"IMAGE_ATOMIC_FMIN", 0x0000001e>;
+//def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"IMAGE_ATOMIC_FMAX", 0x0000001f>;
+def IMAGE_SAMPLE : MIMG_Load_Helper <0x00000020, "IMAGE_SAMPLE">; 
+//def IMAGE_SAMPLE_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_CL", 0x00000021>;
+//def IMAGE_SAMPLE_D : MIMG_NoPattern_ <"IMAGE_SAMPLE_D", 0x00000022>;
+//def IMAGE_SAMPLE_D_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_D_CL", 0x00000023>;
+//def IMAGE_SAMPLE_L : MIMG_NoPattern_ <"IMAGE_SAMPLE_L", 0x00000024>;
+//def IMAGE_SAMPLE_B : MIMG_NoPattern_ <"IMAGE_SAMPLE_B", 0x00000025>;
+//def IMAGE_SAMPLE_B_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_B_CL", 0x00000026>;
+//def IMAGE_SAMPLE_LZ : MIMG_NoPattern_ <"IMAGE_SAMPLE_LZ", 0x00000027>;
+//def IMAGE_SAMPLE_C : MIMG_NoPattern_ <"IMAGE_SAMPLE_C", 0x00000028>;
+//def IMAGE_SAMPLE_C_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CL", 0x00000029>;
+//def IMAGE_SAMPLE_C_D : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D", 0x0000002a>;
+//def IMAGE_SAMPLE_C_D_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D_CL", 0x0000002b>;
+//def IMAGE_SAMPLE_C_L : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_L", 0x0000002c>;
+//def IMAGE_SAMPLE_C_B : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_B", 0x0000002d>;
+//def IMAGE_SAMPLE_C_B_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_B_CL", 0x0000002e>;
+//def IMAGE_SAMPLE_C_LZ : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_LZ", 0x0000002f>;
+//def IMAGE_SAMPLE_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_O", 0x00000030>;
+//def IMAGE_SAMPLE_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_CL_O", 0x00000031>;
+//def IMAGE_SAMPLE_D_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_D_O", 0x00000032>;
+//def IMAGE_SAMPLE_D_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_D_CL_O", 0x00000033>;
+//def IMAGE_SAMPLE_L_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_L_O", 0x00000034>;
+//def IMAGE_SAMPLE_B_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_B_O", 0x00000035>;
+//def IMAGE_SAMPLE_B_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_B_CL_O", 0x00000036>;
+//def IMAGE_SAMPLE_LZ_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_LZ_O", 0x00000037>;
+//def IMAGE_SAMPLE_C_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_O", 0x00000038>;
+//def IMAGE_SAMPLE_C_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CL_O", 0x00000039>;
+//def IMAGE_SAMPLE_C_D_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D_O", 0x0000003a>;
+//def IMAGE_SAMPLE_C_D_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D_CL_O", 0x0000003b>;
+//def IMAGE_SAMPLE_C_L_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_L_O", 0x0000003c>;
+//def IMAGE_SAMPLE_C_B_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_B_O", 0x0000003d>;
+//def IMAGE_SAMPLE_C_B_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_B_CL_O", 0x0000003e>;
+//def IMAGE_SAMPLE_C_LZ_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_LZ_O", 0x0000003f>;
+//def IMAGE_GATHER4 : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4", 0x00000040>;
+//def IMAGE_GATHER4_CL : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_CL", 0x00000041>;
+//def IMAGE_GATHER4_L : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_L", 0x00000044>;
+//def IMAGE_GATHER4_B : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_B", 0x00000045>;
+//def IMAGE_GATHER4_B_CL : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_B_CL", 0x00000046>;
+//def IMAGE_GATHER4_LZ : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_LZ", 0x00000047>;
+//def IMAGE_GATHER4_C : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C", 0x00000048>;
+//def IMAGE_GATHER4_C_CL : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_CL", 0x00000049>;
+//def IMAGE_GATHER4_C_L : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_L", 0x0000004c>;
+//def IMAGE_GATHER4_C_B : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_B", 0x0000004d>;
+//def IMAGE_GATHER4_C_B_CL : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_B_CL", 0x0000004e>;
+//def IMAGE_GATHER4_C_LZ : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_LZ", 0x0000004f>;
+//def IMAGE_GATHER4_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_O", 0x00000050>;
+//def IMAGE_GATHER4_CL_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_CL_O", 0x00000051>;
+//def IMAGE_GATHER4_L_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_L_O", 0x00000054>;
+//def IMAGE_GATHER4_B_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_B_O", 0x00000055>;
+//def IMAGE_GATHER4_B_CL_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_B_CL_O", 0x00000056>;
+//def IMAGE_GATHER4_LZ_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_LZ_O", 0x00000057>;
+//def IMAGE_GATHER4_C_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_O", 0x00000058>;
+//def IMAGE_GATHER4_C_CL_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_CL_O", 0x00000059>;
+//def IMAGE_GATHER4_C_L_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_L_O", 0x0000005c>;
+//def IMAGE_GATHER4_C_B_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_B_O", 0x0000005d>;
+//def IMAGE_GATHER4_C_B_CL_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_B_CL_O", 0x0000005e>;
+//def IMAGE_GATHER4_C_LZ_O : MIMG_NoPattern_GATHER4 <"IMAGE_GATHER4_C_LZ_O", 0x0000005f>;
+//def IMAGE_GET_LOD : MIMG_NoPattern_ <"IMAGE_GET_LOD", 0x00000060>;
+//def IMAGE_SAMPLE_CD : MIMG_NoPattern_ <"IMAGE_SAMPLE_CD", 0x00000068>;
+//def IMAGE_SAMPLE_CD_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_CD_CL", 0x00000069>;
+//def IMAGE_SAMPLE_C_CD : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CD", 0x0000006a>;
+//def IMAGE_SAMPLE_C_CD_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CD_CL", 0x0000006b>;
+//def IMAGE_SAMPLE_CD_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_CD_O", 0x0000006c>;
+//def IMAGE_SAMPLE_CD_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_CD_CL_O", 0x0000006d>;
+//def IMAGE_SAMPLE_C_CD_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CD_O", 0x0000006e>;
+//def IMAGE_SAMPLE_C_CD_CL_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CD_CL_O", 0x0000006f>;
+//def IMAGE_RSRC256 : MIMG_NoPattern_RSRC256 <"IMAGE_RSRC256", 0x0000007e>;
+//def IMAGE_SAMPLER : MIMG_NoPattern_ <"IMAGE_SAMPLER", 0x0000007f>;
+//def V_NOP : VOP1_ <0x00000000, "V_NOP", []>;
+
+let neverHasSideEffects = 1 in {
+defm V_MOV_B32 : VOP1_32 <0x00000001, "V_MOV_B32", []>;
+}  // End neverHasSideEffects
+defm V_READFIRSTLANE_B32 : VOP1_32 <0x00000002, "V_READFIRSTLANE_B32", []>;
+//defm V_CVT_I32_F64 : VOP1_32 <0x00000003, "V_CVT_I32_F64", []>;
+//defm V_CVT_F64_I32 : VOP1_64 <0x00000004, "V_CVT_F64_I32", []>;
+//defm V_CVT_F32_I32 : VOP1_32 <0x00000005, "V_CVT_F32_I32", []>;
+//defm V_CVT_F32_U32 : VOP1_32 <0x00000006, "V_CVT_F32_U32", []>;
+//defm V_CVT_U32_F32 : VOP1_32 <0x00000007, "V_CVT_U32_F32", []>;
+//defm V_CVT_I32_F32 : VOP1_32 <0x00000008, "V_CVT_I32_F32", []>;
+defm V_MOV_FED_B32 : VOP1_32 <0x00000009, "V_MOV_FED_B32", []>;
+////def V_CVT_F16_F32 : VOP1_F16 <0x0000000a, "V_CVT_F16_F32", []>;
+//defm V_CVT_F32_F16 : VOP1_32 <0x0000000b, "V_CVT_F32_F16", []>;
+//defm V_CVT_RPI_I32_F32 : VOP1_32 <0x0000000c, "V_CVT_RPI_I32_F32", []>;
+//defm V_CVT_FLR_I32_F32 : VOP1_32 <0x0000000d, "V_CVT_FLR_I32_F32", []>;
+//defm V_CVT_OFF_F32_I4 : VOP1_32 <0x0000000e, "V_CVT_OFF_F32_I4", []>;
+//defm V_CVT_F32_F64 : VOP1_32 <0x0000000f, "V_CVT_F32_F64", []>;
+//defm V_CVT_F64_F32 : VOP1_64 <0x00000010, "V_CVT_F64_F32", []>;
+//defm V_CVT_F32_UBYTE0 : VOP1_32 <0x00000011, "V_CVT_F32_UBYTE0", []>;
+//defm V_CVT_F32_UBYTE1 : VOP1_32 <0x00000012, "V_CVT_F32_UBYTE1", []>;
+//defm V_CVT_F32_UBYTE2 : VOP1_32 <0x00000013, "V_CVT_F32_UBYTE2", []>;
+//defm V_CVT_F32_UBYTE3 : VOP1_32 <0x00000014, "V_CVT_F32_UBYTE3", []>;
+//defm V_CVT_U32_F64 : VOP1_32 <0x00000015, "V_CVT_U32_F64", []>;
+//defm V_CVT_F64_U32 : VOP1_64 <0x00000016, "V_CVT_F64_U32", []>;
+defm V_FRACT_F32 : VOP1_32 <0x00000020, "V_FRACT_F32", []>;
+defm V_TRUNC_F32 : VOP1_32 <0x00000021, "V_TRUNC_F32", []>;
+defm V_CEIL_F32 : VOP1_32 <0x00000022, "V_CEIL_F32", []>;
+defm V_RNDNE_F32 : VOP1_32 <0x00000023, "V_RNDNE_F32", []>;
+defm V_FLOOR_F32 : VOP1_32 <0x00000024, "V_FLOOR_F32", []>;
+defm V_EXP_F32 : VOP1_32 <0x00000025, "V_EXP_F32", []>;
+defm V_LOG_CLAMP_F32 : VOP1_32 <0x00000026, "V_LOG_CLAMP_F32", []>;
+defm V_LOG_F32 : VOP1_32 <0x00000027, "V_LOG_F32", []>;
+defm V_RCP_CLAMP_F32 : VOP1_32 <0x00000028, "V_RCP_CLAMP_F32", []>;
+defm V_RCP_LEGACY_F32 : VOP1_32 <0x00000029, "V_RCP_LEGACY_F32", []>;
+defm V_RCP_F32 : VOP1_32 <0x0000002a, "V_RCP_F32", []>;
+defm V_RCP_IFLAG_F32 : VOP1_32 <0x0000002b, "V_RCP_IFLAG_F32", []>;
+defm V_RSQ_CLAMP_F32 : VOP1_32 <0x0000002c, "V_RSQ_CLAMP_F32", []>;
+defm V_RSQ_LEGACY_F32 : VOP1_32 <
+  0x0000002d, "V_RSQ_LEGACY_F32",
+  [(set VReg_32:$dst, (int_AMDGPU_rsq AllReg_32:$src0))]
+>;
+defm V_RSQ_F32 : VOP1_32 <0x0000002e, "V_RSQ_F32", []>;
+defm V_RCP_F64 : VOP1_64 <0x0000002f, "V_RCP_F64", []>;
+defm V_RCP_CLAMP_F64 : VOP1_64 <0x00000030, "V_RCP_CLAMP_F64", []>;
+defm V_RSQ_F64 : VOP1_64 <0x00000031, "V_RSQ_F64", []>;
+defm V_RSQ_CLAMP_F64 : VOP1_64 <0x00000032, "V_RSQ_CLAMP_F64", []>;
+defm V_SQRT_F32 : VOP1_32 <0x00000033, "V_SQRT_F32", []>;
+defm V_SQRT_F64 : VOP1_64 <0x00000034, "V_SQRT_F64", []>;
+defm V_SIN_F32 : VOP1_32 <0x00000035, "V_SIN_F32", []>;
+defm V_COS_F32 : VOP1_32 <0x00000036, "V_COS_F32", []>;
+defm V_NOT_B32 : VOP1_32 <0x00000037, "V_NOT_B32", []>;
+defm V_BFREV_B32 : VOP1_32 <0x00000038, "V_BFREV_B32", []>;
+defm V_FFBH_U32 : VOP1_32 <0x00000039, "V_FFBH_U32", []>;
+defm V_FFBL_B32 : VOP1_32 <0x0000003a, "V_FFBL_B32", []>;
+defm V_FFBH_I32 : VOP1_32 <0x0000003b, "V_FFBH_I32", []>;
+//defm V_FREXP_EXP_I32_F64 : VOP1_32 <0x0000003c, "V_FREXP_EXP_I32_F64", []>;
+defm V_FREXP_MANT_F64 : VOP1_64 <0x0000003d, "V_FREXP_MANT_F64", []>;
+defm V_FRACT_F64 : VOP1_64 <0x0000003e, "V_FRACT_F64", []>;
+//defm V_FREXP_EXP_I32_F32 : VOP1_32 <0x0000003f, "V_FREXP_EXP_I32_F32", []>;
+defm V_FREXP_MANT_F32 : VOP1_32 <0x00000040, "V_FREXP_MANT_F32", []>;
+//def V_CLREXCP : VOP1_ <0x00000041, "V_CLREXCP", []>;
+defm V_MOVRELD_B32 : VOP1_32 <0x00000042, "V_MOVRELD_B32", []>;
+defm V_MOVRELS_B32 : VOP1_32 <0x00000043, "V_MOVRELS_B32", []>;
+defm V_MOVRELSD_B32 : VOP1_32 <0x00000044, "V_MOVRELSD_B32", []>;
+
+def V_INTERP_P1_F32 : VINTRP <
+  0x00000000,
+  (outs VReg_32:$dst),
+  (ins VReg_32:$i, i32imm:$attr_chan, i32imm:$attr),
+  "V_INTERP_P1_F32",
+  []
+>;
+
+def V_INTERP_P2_F32 : VINTRP <
+  0x00000001,
+  (outs VReg_32:$dst),
+  (ins VReg_32:$src0, VReg_32:$j, i32imm:$attr_chan, i32imm:$attr),
+  "V_INTERP_P2_F32",
+  []> {
+
+  let Constraints = "$src0 = $dst";
+  let DisableEncoding = "$src0";
+
+}
+
+def V_INTERP_MOV_F32 : VINTRP <
+  0x00000002,
+  (outs VReg_32:$dst),
+  (ins i32imm:$attr_chan, i32imm:$attr),
+  "V_INTERP_MOV_F32",
+  []> {
+  let VSRC = 0;
+}
+
+//def V_INTERP_MOV_F32 : VINTRP_32 <0x00000002, "V_INTERP_MOV_F32", []>;
+//def S_NOP : SOPP_ <0x00000000, "S_NOP", []>;
+def S_ENDPGM : SOPP <0x00000001, (ins), "S_ENDPGM"> {
+  let SIMM16 = 0;
+  let isTerminator = 1;
+}
+//def S_BRANCH : SOPP_ <0x00000002, "S_BRANCH", []>;
+//def S_CBRANCH_SCC0 : SOPP_SCC0 <0x00000004, "S_CBRANCH_SCC0", []>;
+//def S_CBRANCH_SCC1 : SOPP_SCC1 <0x00000005, "S_CBRANCH_SCC1", []>;
+//def S_CBRANCH_VCCZ : SOPP_ <0x00000006, "S_CBRANCH_VCCZ", []>;
+//def S_CBRANCH_VCCNZ : SOPP_ <0x00000007, "S_CBRANCH_VCCNZ", []>;
+//def S_CBRANCH_EXECZ : SOPP_ <0x00000008, "S_CBRANCH_EXECZ", []>;
+//def S_CBRANCH_EXECNZ : SOPP_ <0x00000009, "S_CBRANCH_EXECNZ", []>;
+//def S_BARRIER : SOPP_ <0x0000000a, "S_BARRIER", []>;
+def S_WAITCNT : SOPP <0x0000000c, (ins i32imm:$simm16), "S_WAITCNT $simm16">;
+//def S_SETHALT : SOPP_ <0x0000000d, "S_SETHALT", []>;
+//def S_SLEEP : SOPP_ <0x0000000e, "S_SLEEP", []>;
+//def S_SETPRIO : SOPP_ <0x0000000f, "S_SETPRIO", []>;
+//def S_SENDMSG : SOPP_ <0x00000010, "S_SENDMSG", []>;
+//def S_SENDMSGHALT : SOPP_ <0x00000011, "S_SENDMSGHALT", []>;
+//def S_TRAP : SOPP_ <0x00000012, "S_TRAP", []>;
+//def S_ICACHE_INV : SOPP_ <0x00000013, "S_ICACHE_INV", []>;
+//def S_INCPERFLEVEL : SOPP_ <0x00000014, "S_INCPERFLEVEL", []>;
+//def S_DECPERFLEVEL : SOPP_ <0x00000015, "S_DECPERFLEVEL", []>;
+//def S_TTRACEDATA : SOPP_ <0x00000016, "S_TTRACEDATA", []>;
+
+/* XXX: No VOP3 version of this instruction yet */
+def V_CNDMASK_B32 : VOP2_Helper <
+  0x00000000, VReg_32, AllReg_32, "V_CNDMASK_B32", []> {
+  let VDST = 0;
+  let Uses = [VCC];
+}
+defm V_READLANE_B32 : VOP2_32 <0x00000001, "V_READLANE_B32", []>;
+defm V_WRITELANE_B32 : VOP2_32 <0x00000002, "V_WRITELANE_B32", []>;
+
+defm V_ADD_F32 : VOP2_32 <
+  0x00000003, "V_ADD_F32",
+  [(set VReg_32:$dst, (fadd AllReg_32:$src0, VReg_32:$src1))]
+>;
+
+defm V_SUB_F32 : VOP2_32 <0x00000004, "V_SUB_F32",
+  [(set VReg_32:$dst, (fsub AllReg_32:$src0, VReg_32:$src1))]
+>;
+defm V_SUBREV_F32 : VOP2_32 <0x00000005, "V_SUBREV_F32", []>;
+defm V_MAC_LEGACY_F32 : VOP2_32 <0x00000006, "V_MAC_LEGACY_F32", []>;
+defm V_MUL_LEGACY_F32 : VOP2_32 <
+  0x00000007, "V_MUL_LEGACY_F32",
+  [(set VReg_32:$dst, (int_AMDGPU_mul AllReg_32:$src0, VReg_32:$src1))]
+>;
+defm V_MUL_F32 : VOP2_32 <0x00000008, "V_MUL_F32", []>;
+//defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24", []>;
+//defm V_MUL_HI_I32_I24 : VOP2_32 <0x0000000a, "V_MUL_HI_I32_I24", []>;
+//defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24", []>;
+//defm V_MUL_HI_U32_U24 : VOP2_32 <0x0000000c, "V_MUL_HI_U32_U24", []>;
+defm V_MIN_LEGACY_F32 : VOP2_32 <0x0000000d, "V_MIN_LEGACY_F32", []>;
+
+defm V_MAX_LEGACY_F32 : VOP2_32 <0x0000000e, "V_MAX_LEGACY_F32",
+  [(set VReg_32:$dst, (AMDGPUfmax AllReg_32:$src0, VReg_32:$src1))]
+>;
+defm V_MIN_F32 : VOP2_32 <0x0000000f, "V_MIN_F32", []>;
+defm V_MAX_F32 : VOP2_32 <0x00000010, "V_MAX_F32", []>;
+defm V_MIN_I32 : VOP2_32 <0x00000011, "V_MIN_I32", []>;
+defm V_MAX_I32 : VOP2_32 <0x00000012, "V_MAX_I32", []>;
+defm V_MIN_U32 : VOP2_32 <0x00000013, "V_MIN_U32", []>;
+defm V_MAX_U32 : VOP2_32 <0x00000014, "V_MAX_U32", []>;
+defm V_LSHR_B32 : VOP2_32 <0x00000015, "V_LSHR_B32", []>;
+defm V_LSHRREV_B32 : VOP2_32 <0x00000016, "V_LSHRREV_B32", []>;
+defm V_ASHR_I32 : VOP2_32 <0x00000017, "V_ASHR_I32", []>;
+defm V_ASHRREV_I32 : VOP2_32 <0x00000018, "V_ASHRREV_I32", []>;
+defm V_LSHL_B32 : VOP2_32 <0x00000019, "V_LSHL_B32", []>;
+defm V_LSHLREV_B32 : VOP2_32 <0x0000001a, "V_LSHLREV_B32", []>;
+defm V_AND_B32 : VOP2_32 <0x0000001b, "V_AND_B32", []>;
+defm V_OR_B32 : VOP2_32 <0x0000001c, "V_OR_B32", []>;
+defm V_XOR_B32 : VOP2_32 <0x0000001d, "V_XOR_B32", []>;
+defm V_BFM_B32 : VOP2_32 <0x0000001e, "V_BFM_B32", []>;
+defm V_MAC_F32 : VOP2_32 <0x0000001f, "V_MAC_F32", []>;
+defm V_MADMK_F32 : VOP2_32 <0x00000020, "V_MADMK_F32", []>;
+defm V_MADAK_F32 : VOP2_32 <0x00000021, "V_MADAK_F32", []>;
+//defm V_BCNT_U32_B32 : VOP2_32 <0x00000022, "V_BCNT_U32_B32", []>;
+//defm V_MBCNT_LO_U32_B32 : VOP2_32 <0x00000023, "V_MBCNT_LO_U32_B32", []>;
+//defm V_MBCNT_HI_U32_B32 : VOP2_32 <0x00000024, "V_MBCNT_HI_U32_B32", []>;
+defm V_ADD_I32 : VOP2_32 <0x00000025, "V_ADD_I32", []>;
+defm V_SUB_I32 : VOP2_32 <0x00000026, "V_SUB_I32", []>;
+defm V_SUBREV_I32 : VOP2_32 <0x00000027, "V_SUBREV_I32", []>;
+defm V_ADDC_U32 : VOP2_32 <0x00000028, "V_ADDC_U32", []>;
+defm V_SUBB_U32 : VOP2_32 <0x00000029, "V_SUBB_U32", []>;
+defm V_SUBBREV_U32 : VOP2_32 <0x0000002a, "V_SUBBREV_U32", []>;
+defm V_LDEXP_F32 : VOP2_32 <0x0000002b, "V_LDEXP_F32", []>;
+////def V_CVT_PKACCUM_U8_F32 : VOP2_U8 <0x0000002c, "V_CVT_PKACCUM_U8_F32", []>;
+////def V_CVT_PKNORM_I16_F32 : VOP2_I16 <0x0000002d, "V_CVT_PKNORM_I16_F32", []>;
+////def V_CVT_PKNORM_U16_F32 : VOP2_U16 <0x0000002e, "V_CVT_PKNORM_U16_F32", []>;
+////def V_CVT_PKRTZ_F16_F32 : VOP2_F16 <0x0000002f, "V_CVT_PKRTZ_F16_F32", []>;
+////def V_CVT_PK_U16_U32 : VOP2_U16 <0x00000030, "V_CVT_PK_U16_U32", []>;
+////def V_CVT_PK_I16_I32 : VOP2_I16 <0x00000031, "V_CVT_PK_I16_I32", []>;
+def S_CMP_EQ_I32 : SOPC_32 <0x00000000, "S_CMP_EQ_I32", []>;
+def S_CMP_LG_I32 : SOPC_32 <0x00000001, "S_CMP_LG_I32", []>;
+def S_CMP_GT_I32 : SOPC_32 <0x00000002, "S_CMP_GT_I32", []>;
+def S_CMP_GE_I32 : SOPC_32 <0x00000003, "S_CMP_GE_I32", []>;
+def S_CMP_LT_I32 : SOPC_32 <0x00000004, "S_CMP_LT_I32", []>;
+def S_CMP_LE_I32 : SOPC_32 <0x00000005, "S_CMP_LE_I32", []>;
+def S_CMP_EQ_U32 : SOPC_32 <0x00000006, "S_CMP_EQ_U32", []>;
+def S_CMP_LG_U32 : SOPC_32 <0x00000007, "S_CMP_LG_U32", []>;
+def S_CMP_GT_U32 : SOPC_32 <0x00000008, "S_CMP_GT_U32", []>;
+def S_CMP_GE_U32 : SOPC_32 <0x00000009, "S_CMP_GE_U32", []>;
+def S_CMP_LT_U32 : SOPC_32 <0x0000000a, "S_CMP_LT_U32", []>;
+def S_CMP_LE_U32 : SOPC_32 <0x0000000b, "S_CMP_LE_U32", []>;
+////def S_BITCMP0_B32 : SOPC_BITCMP0 <0x0000000c, "S_BITCMP0_B32", []>;
+////def S_BITCMP1_B32 : SOPC_BITCMP1 <0x0000000d, "S_BITCMP1_B32", []>;
+////def S_BITCMP0_B64 : SOPC_BITCMP0 <0x0000000e, "S_BITCMP0_B64", []>;
+////def S_BITCMP1_B64 : SOPC_BITCMP1 <0x0000000f, "S_BITCMP1_B64", []>;
+//def S_SETVSKIP : SOPC_ <0x00000010, "S_SETVSKIP", []>;
+
+let neverHasSideEffects = 1 in {
+
+def V_MAD_LEGACY_F32 : VOP3_32 <0x00000140, "V_MAD_LEGACY_F32", []>;
+def V_MAD_F32 : VOP3_32 <0x00000141, "V_MAD_F32", []>;
+//def V_MAD_I32_I24 : VOP3_32 <0x00000142, "V_MAD_I32_I24", []>;
+//def V_MAD_U32_U24 : VOP3_32 <0x00000143, "V_MAD_U32_U24", []>;
+
+} // End neverHasSideEffects
+def V_CUBEID_F32 : VOP3_32 <0x00000144, "V_CUBEID_F32", []>;
+def V_CUBESC_F32 : VOP3_32 <0x00000145, "V_CUBESC_F32", []>;
+def V_CUBETC_F32 : VOP3_32 <0x00000146, "V_CUBETC_F32", []>;
+def V_CUBEMA_F32 : VOP3_32 <0x00000147, "V_CUBEMA_F32", []>;
+def V_BFE_U32 : VOP3_32 <0x00000148, "V_BFE_U32", []>;
+def V_BFE_I32 : VOP3_32 <0x00000149, "V_BFE_I32", []>;
+def V_BFI_B32 : VOP3_32 <0x0000014a, "V_BFI_B32", []>;
+def V_FMA_F32 : VOP3_32 <0x0000014b, "V_FMA_F32", []>;
+def V_FMA_F64 : VOP3_64 <0x0000014c, "V_FMA_F64", []>;
+//def V_LERP_U8 : VOP3_U8 <0x0000014d, "V_LERP_U8", []>;
+def V_ALIGNBIT_B32 : VOP3_32 <0x0000014e, "V_ALIGNBIT_B32", []>;
+def V_ALIGNBYTE_B32 : VOP3_32 <0x0000014f, "V_ALIGNBYTE_B32", []>;
+def V_MULLIT_F32 : VOP3_32 <0x00000150, "V_MULLIT_F32", []>;
+////def V_MIN3_F32 : VOP3_MIN3 <0x00000151, "V_MIN3_F32", []>;
+////def V_MIN3_I32 : VOP3_MIN3 <0x00000152, "V_MIN3_I32", []>;
+////def V_MIN3_U32 : VOP3_MIN3 <0x00000153, "V_MIN3_U32", []>;
+////def V_MAX3_F32 : VOP3_MAX3 <0x00000154, "V_MAX3_F32", []>;
+////def V_MAX3_I32 : VOP3_MAX3 <0x00000155, "V_MAX3_I32", []>;
+////def V_MAX3_U32 : VOP3_MAX3 <0x00000156, "V_MAX3_U32", []>;
+////def V_MED3_F32 : VOP3_MED3 <0x00000157, "V_MED3_F32", []>;
+////def V_MED3_I32 : VOP3_MED3 <0x00000158, "V_MED3_I32", []>;
+////def V_MED3_U32 : VOP3_MED3 <0x00000159, "V_MED3_U32", []>;
+//def V_SAD_U8 : VOP3_U8 <0x0000015a, "V_SAD_U8", []>;
+//def V_SAD_HI_U8 : VOP3_U8 <0x0000015b, "V_SAD_HI_U8", []>;
+//def V_SAD_U16 : VOP3_U16 <0x0000015c, "V_SAD_U16", []>;
+def V_SAD_U32 : VOP3_32 <0x0000015d, "V_SAD_U32", []>;
+////def V_CVT_PK_U8_F32 : VOP3_U8 <0x0000015e, "V_CVT_PK_U8_F32", []>;
+def V_DIV_FIXUP_F32 : VOP3_32 <0x0000015f, "V_DIV_FIXUP_F32", []>;
+def V_DIV_FIXUP_F64 : VOP3_64 <0x00000160, "V_DIV_FIXUP_F64", []>;
+def V_LSHL_B64 : VOP3_64 <0x00000161, "V_LSHL_B64", []>;
+def V_LSHR_B64 : VOP3_64 <0x00000162, "V_LSHR_B64", []>;
+def V_ASHR_I64 : VOP3_64 <0x00000163, "V_ASHR_I64", []>;
+def V_ADD_F64 : VOP3_64 <0x00000164, "V_ADD_F64", []>;
+def V_MUL_F64 : VOP3_64 <0x00000165, "V_MUL_F64", []>;
+def V_MIN_F64 : VOP3_64 <0x00000166, "V_MIN_F64", []>;
+def V_MAX_F64 : VOP3_64 <0x00000167, "V_MAX_F64", []>;
+def V_LDEXP_F64 : VOP3_64 <0x00000168, "V_LDEXP_F64", []>;
+def V_MUL_LO_U32 : VOP3_32 <0x00000169, "V_MUL_LO_U32", []>;
+def V_MUL_HI_U32 : VOP3_32 <0x0000016a, "V_MUL_HI_U32", []>;
+def V_MUL_LO_I32 : VOP3_32 <0x0000016b, "V_MUL_LO_I32", []>;
+def V_MUL_HI_I32 : VOP3_32 <0x0000016c, "V_MUL_HI_I32", []>;
+def V_DIV_SCALE_F32 : VOP3_32 <0x0000016d, "V_DIV_SCALE_F32", []>;
+def V_DIV_SCALE_F64 : VOP3_64 <0x0000016e, "V_DIV_SCALE_F64", []>;
+def V_DIV_FMAS_F32 : VOP3_32 <0x0000016f, "V_DIV_FMAS_F32", []>;
+def V_DIV_FMAS_F64 : VOP3_64 <0x00000170, "V_DIV_FMAS_F64", []>;
+//def V_MSAD_U8 : VOP3_U8 <0x00000171, "V_MSAD_U8", []>;
+//def V_QSAD_U8 : VOP3_U8 <0x00000172, "V_QSAD_U8", []>;
+//def V_MQSAD_U8 : VOP3_U8 <0x00000173, "V_MQSAD_U8", []>;
+def V_TRIG_PREOP_F64 : VOP3_64 <0x00000174, "V_TRIG_PREOP_F64", []>;
+def S_ADD_U32 : SOP2_32 <0x00000000, "S_ADD_U32", []>;
+def S_SUB_U32 : SOP2_32 <0x00000001, "S_SUB_U32", []>;
+def S_ADD_I32 : SOP2_32 <0x00000002, "S_ADD_I32", []>;
+def S_SUB_I32 : SOP2_32 <0x00000003, "S_SUB_I32", []>;
+def S_ADDC_U32 : SOP2_32 <0x00000004, "S_ADDC_U32", []>;
+def S_SUBB_U32 : SOP2_32 <0x00000005, "S_SUBB_U32", []>;
+def S_MIN_I32 : SOP2_32 <0x00000006, "S_MIN_I32", []>;
+def S_MIN_U32 : SOP2_32 <0x00000007, "S_MIN_U32", []>;
+def S_MAX_I32 : SOP2_32 <0x00000008, "S_MAX_I32", []>;
+def S_MAX_U32 : SOP2_32 <0x00000009, "S_MAX_U32", []>;
+def S_CSELECT_B32 : SOP2_32 <0x0000000a, "S_CSELECT_B32", []>;
+def S_CSELECT_B64 : SOP2_64 <0x0000000b, "S_CSELECT_B64", []>;
+def S_AND_B32 : SOP2_32 <0x0000000e, "S_AND_B32", []>;
+def S_AND_B64 : SOP2_64 <0x0000000f, "S_AND_B64", []>;
+def S_OR_B32 : SOP2_32 <0x00000010, "S_OR_B32", []>;
+def S_OR_B64 : SOP2_64 <0x00000011, "S_OR_B64", []>;
+def S_XOR_B32 : SOP2_32 <0x00000012, "S_XOR_B32", []>;
+def S_XOR_B64 : SOP2_64 <0x00000013, "S_XOR_B64", []>;
+////def S_ANDN2_B32 : SOP2_ANDN2 <0x00000014, "S_ANDN2_B32", []>;
+////def S_ANDN2_B64 : SOP2_ANDN2 <0x00000015, "S_ANDN2_B64", []>;
+////def S_ORN2_B32 : SOP2_ORN2 <0x00000016, "S_ORN2_B32", []>;
+////def S_ORN2_B64 : SOP2_ORN2 <0x00000017, "S_ORN2_B64", []>;
+def S_NAND_B32 : SOP2_32 <0x00000018, "S_NAND_B32", []>;
+def S_NAND_B64 : SOP2_64 <0x00000019, "S_NAND_B64", []>;
+def S_NOR_B32 : SOP2_32 <0x0000001a, "S_NOR_B32", []>;
+def S_NOR_B64 : SOP2_64 <0x0000001b, "S_NOR_B64", []>;
+def S_XNOR_B32 : SOP2_32 <0x0000001c, "S_XNOR_B32", []>;
+def S_XNOR_B64 : SOP2_64 <0x0000001d, "S_XNOR_B64", []>;
+def S_LSHL_B32 : SOP2_32 <0x0000001e, "S_LSHL_B32", []>;
+def S_LSHL_B64 : SOP2_64 <0x0000001f, "S_LSHL_B64", []>;
+def S_LSHR_B32 : SOP2_32 <0x00000020, "S_LSHR_B32", []>;
+def S_LSHR_B64 : SOP2_64 <0x00000021, "S_LSHR_B64", []>;
+def S_ASHR_I32 : SOP2_32 <0x00000022, "S_ASHR_I32", []>;
+def S_ASHR_I64 : SOP2_64 <0x00000023, "S_ASHR_I64", []>;
+def S_BFM_B32 : SOP2_32 <0x00000024, "S_BFM_B32", []>;
+def S_BFM_B64 : SOP2_64 <0x00000025, "S_BFM_B64", []>;
+def S_MUL_I32 : SOP2_32 <0x00000026, "S_MUL_I32", []>;
+def S_BFE_U32 : SOP2_32 <0x00000027, "S_BFE_U32", []>;
+def S_BFE_I32 : SOP2_32 <0x00000028, "S_BFE_I32", []>;
+def S_BFE_U64 : SOP2_64 <0x00000029, "S_BFE_U64", []>;
+def S_BFE_I64 : SOP2_64 <0x0000002a, "S_BFE_I64", []>;
+//def S_CBRANCH_G_FORK : SOP2_ <0x0000002b, "S_CBRANCH_G_FORK", []>;
+def S_ABSDIFF_I32 : SOP2_32 <0x0000002c, "S_ABSDIFF_I32", []>;
+
+class V_MOV_IMM <Operand immType, SDNode immNode> : VOP1 <
+  0x1,
+  (outs VReg_32:$dst),
+  (ins immType:$src0),
+  "V_MOV_IMM",
+   [(set VReg_32:$dst, (immNode:$src0))]
+>;
+
+def V_MOV_IMM_I32 : V_MOV_IMM<i32imm, imm>;
+def V_MOV_IMM_F32 : V_MOV_IMM<f32imm, fpimm>;
+
+def S_MOV_IMM_I32 : SOP1 <
+  0x3,
+  (outs SReg_32:$dst),
+  (ins i32Literal:$src0),
+  "S_MOV_IMM_I32",
+  [(set SReg_32:$dst, (imm:$src0))]
+>;
+
+
+let isCodeGenOnly = 1, isPseudo = 1 in {
+
+def SET_M0 : InstSI <
+  (outs SReg_32:$dst),
+  (ins i32imm:$src0),
+  "SET_M0",
+  [(set SReg_32:$dst, (int_SI_set_M0 imm:$src0))]
+>;
+
+def CONFIG_WRITE : InstSI <
+  (outs i32imm:$reg),
+  (ins i32imm:$val),
+  "CONFIG_WRITE $reg, $val",
+  [] > {
+  field bits<32> Inst = 0;
+}
+
+def LOAD_CONST : AMDGPUShaderInst <
+  (outs GPRF32:$dst),
+  (ins i32imm:$src),
+  "LOAD_CONST $dst, $src",
+  [(set GPRF32:$dst, (int_AMDGPU_load_const imm:$src))]
+>;
+
+let usesCustomInserter = 1 in {
+
+def SI_V_CNDLT : InstSI <
+	(outs VReg_32:$dst),
+	(ins VReg_32:$src0, VReg_32:$src1, VReg_32:$src2),
+	"SI_V_CNDLT $dst, $src0, $src1, $src2",
+	[(set VReg_32:$dst, (int_AMDGPU_cndlt VReg_32:$src0, VReg_32:$src1, VReg_32:$src2))]
+>;
+
+def SI_INTERP : InstSI <
+  (outs VReg_32:$dst),
+  (ins VReg_32:$i, VReg_32:$j, i32imm:$attr_chan, i32imm:$attr, SReg_32:$params),
+  "SI_INTERP $dst, $i, $j, $attr_chan, $attr, $params",
+  []
+>;
+
+def SI_INTERP_CONST : InstSI <
+  (outs VReg_32:$dst),
+  (ins i32imm:$attr_chan, i32imm:$attr, SReg_32:$params),
+  "SI_INTERP_CONST $dst, $attr_chan, $attr, $params",
+  [(set VReg_32:$dst, (int_SI_fs_interp_constant imm:$attr_chan,
+                                                 imm:$attr, SReg_32:$params))]
+>;
+
+def USE_SGPR_32 : InstSI <
+  (outs SReg_32:$dst),
+  (ins i32imm:$src0),
+  "USE_SGPR_32",
+  [(set (i32 SReg_32:$dst), (load_user_sgpr imm:$src0))]
+>;
+
+def USE_SGPR_64 : InstSI <
+  (outs SReg_64:$dst),
+  (ins i32imm:$src0),
+  "USE_SGPR_64",
+  [(set (i64 SReg_64:$dst), (load_user_sgpr imm:$src0))]
+>;
+
+def VS_LOAD_BUFFER_INDEX : InstSI <
+  (outs VReg_32:$dst),
+  (ins),
+  "VS_LOAD_BUFFER_INDEX",
+  [(set VReg_32:$dst, (int_SI_vs_load_buffer_index))]> {
+
+  field bits<32> Inst = 0;
+}
+
+} // end usesCustomInserter 
+
+} // end IsCodeGenOnly, isPseudo
+
+/* int_SI_vs_load_input */
+def : Pat<
+  (int_SI_vs_load_input SReg_128:$tlst, IMM12bit:$attr_offset,
+                        VReg_32:$buf_idx_vgpr),
+  (BUFFER_LOAD_FORMAT_XYZW imm:$attr_offset, 0, 1, 0, 0, 0,
+                           VReg_32:$buf_idx_vgpr, SReg_128:$tlst,
+                           0, 0, (i32 SREG_LIT_0))
+>;
+
+/* int_SI_export */
+def : Pat <
+  (int_SI_export imm:$en, imm:$vm, imm:$done, imm:$tgt, imm:$compr,
+                 VReg_32:$src0,VReg_32:$src1, VReg_32:$src2, VReg_32:$src3),
+  (EXP imm:$en, imm:$tgt, imm:$compr, imm:$done, imm:$vm,
+       VReg_32:$src0, VReg_32:$src1, VReg_32:$src2, VReg_32:$src3)
+>;
+
+/* int_SI_sample */
+def : Pat <
+  (int_SI_sample imm:$writemask, VReg_128:$coord, SReg_256:$rsrc, SReg_128:$sampler),
+  (IMAGE_SAMPLE imm:$writemask, 0, 0, 0, 0, 0, 0, 0, VReg_128:$coord,
+                SReg_256:$rsrc, SReg_128:$sampler)
+>;
+
+def CLAMP_SI : CLAMP<VReg_32>;
+def FABS_SI : FABS<VReg_32>;
+
+def : Extract_Element <f32, v4f32, VReg_128, 0, sel_x>;
+def : Extract_Element <f32, v4f32, VReg_128, 1, sel_y>;
+def : Extract_Element <f32, v4f32, VReg_128, 2, sel_z>;
+def : Extract_Element <f32, v4f32, VReg_128, 3, sel_w>;
+
+def : Insert_Element <f32, v4f32, VReg_32, VReg_128, 4, sel_x>;
+def : Insert_Element <f32, v4f32, VReg_32, VReg_128, 5, sel_y>;
+def : Insert_Element <f32, v4f32, VReg_32, VReg_128, 6, sel_z>;
+def : Insert_Element <f32, v4f32, VReg_32, VReg_128, 7, sel_w>;
+
+def : Vector_Build <v4f32, VReg_32>;
+def : Vector_Build <v4i32, SReg_32>;
+
+/*
+def : Pat<
+  (int_SI_vs_load_buffer_index),
+  (COPY_TO_REGCLASS (f32 VGPR0), VReg_32)
+>; 
+*/ 
+
+/********** ===================== **********/
+/********** Interpolation Paterns **********/
+/********** ===================== **********/
+
+def : Pat <
+  (int_SI_fs_interp_linear_center imm:$attr_chan, imm:$attr, SReg_32:$params),
+  (SI_INTERP (f32 LINEAR_CENTER_I), (f32 LINEAR_CENTER_J), imm:$attr_chan,
+             imm:$attr, SReg_32:$params)
+>;
+
+def : Pat <
+  (int_SI_fs_interp_linear_centroid imm:$attr_chan, imm:$attr, SReg_32:$params),
+  (SI_INTERP (f32 LINEAR_CENTROID_I), (f32 LINEAR_CENTROID_J), imm:$attr_chan,
+             imm:$attr, SReg_32:$params)
+>;
+
+def : Pat <
+  (int_SI_fs_interp_persp_center imm:$attr_chan, imm:$attr, SReg_32:$params),
+  (SI_INTERP (f32 PERSP_CENTER_I), (f32 PERSP_CENTER_J), imm:$attr_chan,
+             imm:$attr, SReg_32:$params)
+>;
+
+def : Pat <
+  (int_SI_fs_interp_persp_centroid imm:$attr_chan, imm:$attr, SReg_32:$params),
+  (SI_INTERP (f32 PERSP_CENTROID_I), (f32 PERSP_CENTROID_J), imm:$attr_chan,
+             imm:$attr, SReg_32:$params)
+>;
+
+/********** ================== **********/
+/********** Intrinsic Patterns **********/
+/********** ================== **********/
+
+/* llvm.AMDGPU.pow */
+/* XXX: We are using IEEE MUL, not the 0 * anything = 0 MUL, is this correct? */
+def : POW_Common <V_LOG_F32_e32, V_EXP_F32_e32, V_MUL_F32_e32, VReg_32>;
+
+/********** ================== **********/
+/**********   VOP3 Patterns    **********/
+/********** ================== **********/
+
+def : Pat <(f32 (IL_mad AllReg_32:$src0, AllReg_32:$src1, AllReg_32:$src2)),
+           (V_MAD_LEGACY_F32 AllReg_32:$src0, AllReg_32:$src1, AllReg_32:$src2,
+            0, 0, 0, 0)>;
+
+} // End isSI predicate
diff --git a/lib/Target/AMDGPU/SIIntrinsics.td b/lib/Target/AMDGPU/SIIntrinsics.td
new file mode 100644
index 0000000..95273a2
--- /dev/null
+++ b/lib/Target/AMDGPU/SIIntrinsics.td
@@ -0,0 +1,35 @@
+//===-- SIIntrinsics.td - SI Intrinsic defs ----------------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// SI Intrinsic Definitions
+//
+//===----------------------------------------------------------------------===//
+
+
+let TargetPrefix = "SI", isTarget = 1 in {
+
+  def int_SI_export : Intrinsic <[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], []>;
+  /* XXX: We may need a seperate intrinsic here for loading integer values */
+  def int_SI_load_const : Intrinsic <[llvm_float_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+  def int_SI_vs_load_buffer_index : Intrinsic <[llvm_i32_ty], [], []>;
+  def int_SI_vs_load_input : Intrinsic <[llvm_v4f32_ty], [llvm_v4i32_ty, llvm_i16_ty, llvm_i32_ty], []> ;
+
+  def int_SI_sample : Intrinsic <[llvm_v4f32_ty], [llvm_i32_ty, llvm_v4f32_ty, llvm_v8i32_ty, llvm_v4i32_ty]>;
+
+  /* Interpolation Intrinsics */
+
+  def int_SI_set_M0 : Intrinsic <[llvm_i32_ty], [llvm_i32_ty]>;
+  class Interp : Intrinsic <[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+
+  def int_SI_fs_interp_linear_center : Interp;
+  def int_SI_fs_interp_linear_centroid : Interp;
+  def int_SI_fs_interp_persp_center : Interp;
+  def int_SI_fs_interp_persp_centroid : Interp;
+  def int_SI_fs_interp_constant : Interp;
+}
diff --git a/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
new file mode 100644
index 0000000..40ba76f
--- /dev/null
+++ b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -0,0 +1,18 @@
+//===-- SIMachineFunctionInfo.cpp - SI Machine Function Info -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+
+#include "SIMachineFunctionInfo.h"
+
+using namespace llvm;
+
+SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
+  : MachineFunctionInfo(),
+    spi_ps_input_addr(0)
+  { }
diff --git a/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/lib/Target/AMDGPU/SIMachineFunctionInfo.h
new file mode 100644
index 0000000..46a021f
--- /dev/null
+++ b/lib/Target/AMDGPU/SIMachineFunctionInfo.h
@@ -0,0 +1,37 @@
+//===- SIMachineFunctionInfo.h - SIMachineFunctionInfo interface -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// SIMachineFunctionInfo is used to keep track of the spi_sp_input_addr config
+// register, which is to tell the hardware which interpolation parameters to
+// load.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef _SIMACHINEFUNCTIONINFO_H_
+#define _SIMACHINEFUNCTIONINFO_H_
+
+#include "llvm/CodeGen/MachineFunction.h"
+
+namespace llvm {
+
+class SIMachineFunctionInfo : public MachineFunctionInfo {
+
+  private:
+
+  public:
+    SIMachineFunctionInfo(const MachineFunction &MF);
+    unsigned spi_ps_input_addr;
+
+};
+
+} // End namespace llvm
+
+
+#endif //_SIMACHINEFUNCTIONINFO_H_
diff --git a/lib/Target/AMDGPU/SIRegisterInfo.cpp b/lib/Target/AMDGPU/SIRegisterInfo.cpp
new file mode 100644
index 0000000..7c87eaf
--- /dev/null
+++ b/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -0,0 +1,51 @@
+//===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the SI implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+
+#include "SIRegisterInfo.h"
+#include "AMDGPUTargetMachine.h"
+#include "AMDGPUUtil.h"
+
+using namespace llvm;
+
+SIRegisterInfo::SIRegisterInfo(AMDGPUTargetMachine &tm,
+    const TargetInstrInfo &tii)
+: AMDGPURegisterInfo(tm, tii),
+  TM(tm),
+  TII(tii)
+  { }
+
+BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const
+{
+  BitVector Reserved(getNumRegs());
+  return Reserved;
+}
+
+const TargetRegisterClass *
+SIRegisterInfo::getISARegClass(const TargetRegisterClass * rc) const
+{
+  switch (rc->getID()) {
+  case AMDIL::GPRF32RegClassID:
+    return &AMDIL::VReg_32RegClass;
+  default: return rc;
+  }
+}
+
+const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass(
+                                                                   MVT VT) const
+{
+  switch(VT.SimpleTy) {
+    default:
+    case MVT::i32: return &AMDIL::VReg_32RegClass;
+  }
+}
diff --git a/lib/Target/AMDGPU/SIRegisterInfo.h b/lib/Target/AMDGPU/SIRegisterInfo.h
new file mode 100644
index 0000000..b571da9
--- /dev/null
+++ b/lib/Target/AMDGPU/SIRegisterInfo.h
@@ -0,0 +1,47 @@
+//===-- SIRegisterInfo.h - SI Register Info Interface ----------*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface definition for SIRegisterInfo
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef SIREGISTERINFO_H_
+#define SIREGISTERINFO_H_
+
+#include "AMDGPURegisterInfo.h"
+
+namespace llvm {
+
+class AMDGPUTargetMachine;
+class TargetInstrInfo;
+
+struct SIRegisterInfo : public AMDGPURegisterInfo
+{
+  AMDGPUTargetMachine &TM;
+  const TargetInstrInfo &TII;
+
+  SIRegisterInfo(AMDGPUTargetMachine &tm, const TargetInstrInfo &tii);
+
+  virtual BitVector getReservedRegs(const MachineFunction &MF) const;
+
+  /// getISARegClass - rc is an AMDIL reg class.  This function returns the
+  /// SI register class that is equivalent to the given AMDIL register class.
+  virtual const TargetRegisterClass *
+    getISARegClass(const TargetRegisterClass * rc) const;
+
+  /// getCFGStructurizerRegClass - get the register class of the specified
+  /// type to use in the CFGStructurizer
+  virtual const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT) const;
+
+};
+
+} // End namespace llvm
+
+#endif // SIREGISTERINFO_H_
diff --git a/lib/Target/AMDGPU/SIRegisterInfo.td b/lib/Target/AMDGPU/SIRegisterInfo.td
new file mode 100644
index 0000000..7ae2c8a
--- /dev/null
+++ b/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -0,0 +1,886 @@
+
+let Namespace = "AMDIL" in {
+  def low : SubRegIndex;
+  def high : SubRegIndex;
+
+  def sub0 : SubRegIndex;
+  def sub1 : SubRegIndex;
+  def sub2 : SubRegIndex;
+  def sub3 : SubRegIndex;
+  def sub4 : SubRegIndex;
+  def sub5 : SubRegIndex;
+  def sub6 : SubRegIndex;
+  def sub7 : SubRegIndex;
+}
+
+class SIReg <string n, bits<16> encoding = 0> : Register<n> {
+  let Namespace = "AMDIL";
+  let HWEncoding = encoding;
+}
+
+class SI_64 <string n, list<Register> subregs, bits<16> encoding> : RegisterWithSubRegs<n, subregs> {
+  let Namespace = "AMDIL";
+  let SubRegIndices = [low, high];
+  let HWEncoding = encoding;
+}
+
+class SI_128 <string n, list<Register> subregs, bits<16> encoding> : RegisterWithSubRegs<n, subregs> {
+  let Namespace = "AMDIL";
+  let SubRegIndices = [sel_x, sel_y, sel_z, sel_w];
+  let HWEncoding = encoding;
+}
+
+class SI_256 <string n, list<Register> subregs, bits<16> encoding> : RegisterWithSubRegs<n, subregs> {
+  let Namespace = "AMDIL";
+  let SubRegIndices = [sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7];
+  let HWEncoding = encoding;
+}
+
+class SGPR_32 <bits<16> num, string name> : SIReg<name, num>;
+
+class VGPR_32 <bits<16> num, string name> : SIReg<name, num>;
+
+class SGPR_64 <bits<16> num, string name, list<Register> subregs> :
+    SI_64 <name, subregs, num>;
+
+class VGPR_64 <bits<16> num, string name, list<Register> subregs> :
+    SI_64 <name, subregs, num>;
+
+class SGPR_128 <bits<16> num, string name, list<Register> subregs> :
+    SI_128 <name, subregs, num>;
+
+class VGPR_128 <bits<16> num, string name, list<Register> subregs> :
+    SI_128 <name, subregs, num>;
+
+class SGPR_256 <bits<16> num, string name, list<Register> subregs> :
+    SI_256 <name, subregs, num>;
+
+def VCC : SIReg<"VCC">;
+def SCC : SIReg<"SCC">;
+def SREG_LIT_0 : SIReg <"S LIT 0", 128>;
+
+def M0 : SIReg <"M0", 124>;
+
+//Interpolation registers
+
+def PERSP_SAMPLE_I : SIReg <"PERSP_SAMPLE_I">;
+def PERSP_SAMPLE_J : SIReg <"PERSP_SAMPLE_J">;
+def PERSP_CENTER_I : SIReg <"PERSP_CENTER_I">;
+def PERSP_CENTER_J : SIReg <"PERSP_CENTER_J">;
+def PERSP_CENTROID_I : SIReg <"PERSP_CENTROID_I">;
+def PERSP_CENTROID_J : SIReg <"PERP_CENTROID_J">;
+def PERSP_I_W : SIReg <"PERSP_I_W">;
+def PERSP_J_W : SIReg <"PERSP_J_W">;
+def PERSP_1_W : SIReg <"PERSP_1_W">;
+def LINEAR_SAMPLE_I : SIReg <"LINEAR_SAMPLE_I">;
+def LINEAR_SAMPLE_J : SIReg <"LINEAR_SAMPLE_J">;
+def LINEAR_CENTER_I : SIReg <"LINEAR_CENTER_I">;
+def LINEAR_CENTER_J : SIReg <"LINEAR_CENTER_J">;
+def LINEAR_CENTROID_I : SIReg <"LINEAR_CENTROID_I">;
+def LINEAR_CENTROID_J : SIReg <"LINEAR_CENTROID_J">;
+def LINE_STIPPLE_TEX_COORD : SIReg <"LINE_STIPPLE_TEX_COORD">;
+def POS_X_FLOAT : SIReg <"POS_X_FLOAT">;
+def POS_Y_FLOAT : SIReg <"POS_Y_FLOAT">;
+def POS_Z_FLOAT : SIReg <"POS_Z_FLOAT">;
+def POS_W_FLOAT : SIReg <"POS_W_FLOAT">;
+def FRONT_FACE : SIReg <"FRONT_FACE">;
+def ANCILLARY : SIReg <"ANCILLARY">;
+def SAMPLE_COVERAGE : SIReg <"SAMPLE_COVERAGE">;
+def POS_FIXED_PT : SIReg <"POS_FIXED_PT">;
+
+def SGPR0 : SGPR_32 <0, "SGPR0">;
+def SGPR1 : SGPR_32 <1, "SGPR1">;
+def SGPR2 : SGPR_32 <2, "SGPR2">;
+def SGPR3 : SGPR_32 <3, "SGPR3">;
+def SGPR4 : SGPR_32 <4, "SGPR4">;
+def SGPR5 : SGPR_32 <5, "SGPR5">;
+def SGPR6 : SGPR_32 <6, "SGPR6">;
+def SGPR7 : SGPR_32 <7, "SGPR7">;
+def SGPR8 : SGPR_32 <8, "SGPR8">;
+def SGPR9 : SGPR_32 <9, "SGPR9">;
+def SGPR10 : SGPR_32 <10, "SGPR10">;
+def SGPR11 : SGPR_32 <11, "SGPR11">;
+def SGPR12 : SGPR_32 <12, "SGPR12">;
+def SGPR13 : SGPR_32 <13, "SGPR13">;
+def SGPR14 : SGPR_32 <14, "SGPR14">;
+def SGPR15 : SGPR_32 <15, "SGPR15">;
+def SGPR16 : SGPR_32 <16, "SGPR16">;
+def SGPR17 : SGPR_32 <17, "SGPR17">;
+def SGPR18 : SGPR_32 <18, "SGPR18">;
+def SGPR19 : SGPR_32 <19, "SGPR19">;
+def SGPR20 : SGPR_32 <20, "SGPR20">;
+def SGPR21 : SGPR_32 <21, "SGPR21">;
+def SGPR22 : SGPR_32 <22, "SGPR22">;
+def SGPR23 : SGPR_32 <23, "SGPR23">;
+def SGPR24 : SGPR_32 <24, "SGPR24">;
+def SGPR25 : SGPR_32 <25, "SGPR25">;
+def SGPR26 : SGPR_32 <26, "SGPR26">;
+def SGPR27 : SGPR_32 <27, "SGPR27">;
+def SGPR28 : SGPR_32 <28, "SGPR28">;
+def SGPR29 : SGPR_32 <29, "SGPR29">;
+def SGPR30 : SGPR_32 <30, "SGPR30">;
+def SGPR31 : SGPR_32 <31, "SGPR31">;
+def SGPR32 : SGPR_32 <32, "SGPR32">;
+def SGPR33 : SGPR_32 <33, "SGPR33">;
+def SGPR34 : SGPR_32 <34, "SGPR34">;
+def SGPR35 : SGPR_32 <35, "SGPR35">;
+def SGPR36 : SGPR_32 <36, "SGPR36">;
+def SGPR37 : SGPR_32 <37, "SGPR37">;
+def SGPR38 : SGPR_32 <38, "SGPR38">;
+def SGPR39 : SGPR_32 <39, "SGPR39">;
+def SGPR40 : SGPR_32 <40, "SGPR40">;
+def SGPR41 : SGPR_32 <41, "SGPR41">;
+def SGPR42 : SGPR_32 <42, "SGPR42">;
+def SGPR43 : SGPR_32 <43, "SGPR43">;
+def SGPR44 : SGPR_32 <44, "SGPR44">;
+def SGPR45 : SGPR_32 <45, "SGPR45">;
+def SGPR46 : SGPR_32 <46, "SGPR46">;
+def SGPR47 : SGPR_32 <47, "SGPR47">;
+def SGPR48 : SGPR_32 <48, "SGPR48">;
+def SGPR49 : SGPR_32 <49, "SGPR49">;
+def SGPR50 : SGPR_32 <50, "SGPR50">;
+def SGPR51 : SGPR_32 <51, "SGPR51">;
+def SGPR52 : SGPR_32 <52, "SGPR52">;
+def SGPR53 : SGPR_32 <53, "SGPR53">;
+def SGPR54 : SGPR_32 <54, "SGPR54">;
+def SGPR55 : SGPR_32 <55, "SGPR55">;
+def SGPR56 : SGPR_32 <56, "SGPR56">;
+def SGPR57 : SGPR_32 <57, "SGPR57">;
+def SGPR58 : SGPR_32 <58, "SGPR58">;
+def SGPR59 : SGPR_32 <59, "SGPR59">;
+def SGPR60 : SGPR_32 <60, "SGPR60">;
+def SGPR61 : SGPR_32 <61, "SGPR61">;
+def SGPR62 : SGPR_32 <62, "SGPR62">;
+def SGPR63 : SGPR_32 <63, "SGPR63">;
+def SGPR64 : SGPR_32 <64, "SGPR64">;
+def SGPR65 : SGPR_32 <65, "SGPR65">;
+def SGPR66 : SGPR_32 <66, "SGPR66">;
+def SGPR67 : SGPR_32 <67, "SGPR67">;
+def SGPR68 : SGPR_32 <68, "SGPR68">;
+def SGPR69 : SGPR_32 <69, "SGPR69">;
+def SGPR70 : SGPR_32 <70, "SGPR70">;
+def SGPR71 : SGPR_32 <71, "SGPR71">;
+def SGPR72 : SGPR_32 <72, "SGPR72">;
+def SGPR73 : SGPR_32 <73, "SGPR73">;
+def SGPR74 : SGPR_32 <74, "SGPR74">;
+def SGPR75 : SGPR_32 <75, "SGPR75">;
+def SGPR76 : SGPR_32 <76, "SGPR76">;
+def SGPR77 : SGPR_32 <77, "SGPR77">;
+def SGPR78 : SGPR_32 <78, "SGPR78">;
+def SGPR79 : SGPR_32 <79, "SGPR79">;
+def SGPR80 : SGPR_32 <80, "SGPR80">;
+def SGPR81 : SGPR_32 <81, "SGPR81">;
+def SGPR82 : SGPR_32 <82, "SGPR82">;
+def SGPR83 : SGPR_32 <83, "SGPR83">;
+def SGPR84 : SGPR_32 <84, "SGPR84">;
+def SGPR85 : SGPR_32 <85, "SGPR85">;
+def SGPR86 : SGPR_32 <86, "SGPR86">;
+def SGPR87 : SGPR_32 <87, "SGPR87">;
+def SGPR88 : SGPR_32 <88, "SGPR88">;
+def SGPR89 : SGPR_32 <89, "SGPR89">;
+def SGPR90 : SGPR_32 <90, "SGPR90">;
+def SGPR91 : SGPR_32 <91, "SGPR91">;
+def SGPR92 : SGPR_32 <92, "SGPR92">;
+def SGPR93 : SGPR_32 <93, "SGPR93">;
+def SGPR94 : SGPR_32 <94, "SGPR94">;
+def SGPR95 : SGPR_32 <95, "SGPR95">;
+def SGPR96 : SGPR_32 <96, "SGPR96">;
+def SGPR97 : SGPR_32 <97, "SGPR97">;
+def SGPR98 : SGPR_32 <98, "SGPR98">;
+def SGPR99 : SGPR_32 <99, "SGPR99">;
+def SGPR100 : SGPR_32 <100, "SGPR100">;
+def SGPR101 : SGPR_32 <101, "SGPR101">;
+def SGPR102 : SGPR_32 <102, "SGPR102">;
+def SGPR103 : SGPR_32 <103, "SGPR103">;
+def VGPR0 : VGPR_32 <0, "VGPR0">;
+def VGPR1 : VGPR_32 <1, "VGPR1">;
+def VGPR2 : VGPR_32 <2, "VGPR2">;
+def VGPR3 : VGPR_32 <3, "VGPR3">;
+def VGPR4 : VGPR_32 <4, "VGPR4">;
+def VGPR5 : VGPR_32 <5, "VGPR5">;
+def VGPR6 : VGPR_32 <6, "VGPR6">;
+def VGPR7 : VGPR_32 <7, "VGPR7">;
+def VGPR8 : VGPR_32 <8, "VGPR8">;
+def VGPR9 : VGPR_32 <9, "VGPR9">;
+def VGPR10 : VGPR_32 <10, "VGPR10">;
+def VGPR11 : VGPR_32 <11, "VGPR11">;
+def VGPR12 : VGPR_32 <12, "VGPR12">;
+def VGPR13 : VGPR_32 <13, "VGPR13">;
+def VGPR14 : VGPR_32 <14, "VGPR14">;
+def VGPR15 : VGPR_32 <15, "VGPR15">;
+def VGPR16 : VGPR_32 <16, "VGPR16">;
+def VGPR17 : VGPR_32 <17, "VGPR17">;
+def VGPR18 : VGPR_32 <18, "VGPR18">;
+def VGPR19 : VGPR_32 <19, "VGPR19">;
+def VGPR20 : VGPR_32 <20, "VGPR20">;
+def VGPR21 : VGPR_32 <21, "VGPR21">;
+def VGPR22 : VGPR_32 <22, "VGPR22">;
+def VGPR23 : VGPR_32 <23, "VGPR23">;
+def VGPR24 : VGPR_32 <24, "VGPR24">;
+def VGPR25 : VGPR_32 <25, "VGPR25">;
+def VGPR26 : VGPR_32 <26, "VGPR26">;
+def VGPR27 : VGPR_32 <27, "VGPR27">;
+def VGPR28 : VGPR_32 <28, "VGPR28">;
+def VGPR29 : VGPR_32 <29, "VGPR29">;
+def VGPR30 : VGPR_32 <30, "VGPR30">;
+def VGPR31 : VGPR_32 <31, "VGPR31">;
+def VGPR32 : VGPR_32 <32, "VGPR32">;
+def VGPR33 : VGPR_32 <33, "VGPR33">;
+def VGPR34 : VGPR_32 <34, "VGPR34">;
+def VGPR35 : VGPR_32 <35, "VGPR35">;
+def VGPR36 : VGPR_32 <36, "VGPR36">;
+def VGPR37 : VGPR_32 <37, "VGPR37">;
+def VGPR38 : VGPR_32 <38, "VGPR38">;
+def VGPR39 : VGPR_32 <39, "VGPR39">;
+def VGPR40 : VGPR_32 <40, "VGPR40">;
+def VGPR41 : VGPR_32 <41, "VGPR41">;
+def VGPR42 : VGPR_32 <42, "VGPR42">;
+def VGPR43 : VGPR_32 <43, "VGPR43">;
+def VGPR44 : VGPR_32 <44, "VGPR44">;
+def VGPR45 : VGPR_32 <45, "VGPR45">;
+def VGPR46 : VGPR_32 <46, "VGPR46">;
+def VGPR47 : VGPR_32 <47, "VGPR47">;
+def VGPR48 : VGPR_32 <48, "VGPR48">;
+def VGPR49 : VGPR_32 <49, "VGPR49">;
+def VGPR50 : VGPR_32 <50, "VGPR50">;
+def VGPR51 : VGPR_32 <51, "VGPR51">;
+def VGPR52 : VGPR_32 <52, "VGPR52">;
+def VGPR53 : VGPR_32 <53, "VGPR53">;
+def VGPR54 : VGPR_32 <54, "VGPR54">;
+def VGPR55 : VGPR_32 <55, "VGPR55">;
+def VGPR56 : VGPR_32 <56, "VGPR56">;
+def VGPR57 : VGPR_32 <57, "VGPR57">;
+def VGPR58 : VGPR_32 <58, "VGPR58">;
+def VGPR59 : VGPR_32 <59, "VGPR59">;
+def VGPR60 : VGPR_32 <60, "VGPR60">;
+def VGPR61 : VGPR_32 <61, "VGPR61">;
+def VGPR62 : VGPR_32 <62, "VGPR62">;
+def VGPR63 : VGPR_32 <63, "VGPR63">;
+def VGPR64 : VGPR_32 <64, "VGPR64">;
+def VGPR65 : VGPR_32 <65, "VGPR65">;
+def VGPR66 : VGPR_32 <66, "VGPR66">;
+def VGPR67 : VGPR_32 <67, "VGPR67">;
+def VGPR68 : VGPR_32 <68, "VGPR68">;
+def VGPR69 : VGPR_32 <69, "VGPR69">;
+def VGPR70 : VGPR_32 <70, "VGPR70">;
+def VGPR71 : VGPR_32 <71, "VGPR71">;
+def VGPR72 : VGPR_32 <72, "VGPR72">;
+def VGPR73 : VGPR_32 <73, "VGPR73">;
+def VGPR74 : VGPR_32 <74, "VGPR74">;
+def VGPR75 : VGPR_32 <75, "VGPR75">;
+def VGPR76 : VGPR_32 <76, "VGPR76">;
+def VGPR77 : VGPR_32 <77, "VGPR77">;
+def VGPR78 : VGPR_32 <78, "VGPR78">;
+def VGPR79 : VGPR_32 <79, "VGPR79">;
+def VGPR80 : VGPR_32 <80, "VGPR80">;
+def VGPR81 : VGPR_32 <81, "VGPR81">;
+def VGPR82 : VGPR_32 <82, "VGPR82">;
+def VGPR83 : VGPR_32 <83, "VGPR83">;
+def VGPR84 : VGPR_32 <84, "VGPR84">;
+def VGPR85 : VGPR_32 <85, "VGPR85">;
+def VGPR86 : VGPR_32 <86, "VGPR86">;
+def VGPR87 : VGPR_32 <87, "VGPR87">;
+def VGPR88 : VGPR_32 <88, "VGPR88">;
+def VGPR89 : VGPR_32 <89, "VGPR89">;
+def VGPR90 : VGPR_32 <90, "VGPR90">;
+def VGPR91 : VGPR_32 <91, "VGPR91">;
+def VGPR92 : VGPR_32 <92, "VGPR92">;
+def VGPR93 : VGPR_32 <93, "VGPR93">;
+def VGPR94 : VGPR_32 <94, "VGPR94">;
+def VGPR95 : VGPR_32 <95, "VGPR95">;
+def VGPR96 : VGPR_32 <96, "VGPR96">;
+def VGPR97 : VGPR_32 <97, "VGPR97">;
+def VGPR98 : VGPR_32 <98, "VGPR98">;
+def VGPR99 : VGPR_32 <99, "VGPR99">;
+def VGPR100 : VGPR_32 <100, "VGPR100">;
+def VGPR101 : VGPR_32 <101, "VGPR101">;
+def VGPR102 : VGPR_32 <102, "VGPR102">;
+def VGPR103 : VGPR_32 <103, "VGPR103">;
+def VGPR104 : VGPR_32 <104, "VGPR104">;
+def VGPR105 : VGPR_32 <105, "VGPR105">;
+def VGPR106 : VGPR_32 <106, "VGPR106">;
+def VGPR107 : VGPR_32 <107, "VGPR107">;
+def VGPR108 : VGPR_32 <108, "VGPR108">;
+def VGPR109 : VGPR_32 <109, "VGPR109">;
+def VGPR110 : VGPR_32 <110, "VGPR110">;
+def VGPR111 : VGPR_32 <111, "VGPR111">;
+def VGPR112 : VGPR_32 <112, "VGPR112">;
+def VGPR113 : VGPR_32 <113, "VGPR113">;
+def VGPR114 : VGPR_32 <114, "VGPR114">;
+def VGPR115 : VGPR_32 <115, "VGPR115">;
+def VGPR116 : VGPR_32 <116, "VGPR116">;
+def VGPR117 : VGPR_32 <117, "VGPR117">;
+def VGPR118 : VGPR_32 <118, "VGPR118">;
+def VGPR119 : VGPR_32 <119, "VGPR119">;
+def VGPR120 : VGPR_32 <120, "VGPR120">;
+def VGPR121 : VGPR_32 <121, "VGPR121">;
+def VGPR122 : VGPR_32 <122, "VGPR122">;
+def VGPR123 : VGPR_32 <123, "VGPR123">;
+def VGPR124 : VGPR_32 <124, "VGPR124">;
+def VGPR125 : VGPR_32 <125, "VGPR125">;
+def VGPR126 : VGPR_32 <126, "VGPR126">;
+def VGPR127 : VGPR_32 <127, "VGPR127">;
+def VGPR128 : VGPR_32 <128, "VGPR128">;
+def VGPR129 : VGPR_32 <129, "VGPR129">;
+def VGPR130 : VGPR_32 <130, "VGPR130">;
+def VGPR131 : VGPR_32 <131, "VGPR131">;
+def VGPR132 : VGPR_32 <132, "VGPR132">;
+def VGPR133 : VGPR_32 <133, "VGPR133">;
+def VGPR134 : VGPR_32 <134, "VGPR134">;
+def VGPR135 : VGPR_32 <135, "VGPR135">;
+def VGPR136 : VGPR_32 <136, "VGPR136">;
+def VGPR137 : VGPR_32 <137, "VGPR137">;
+def VGPR138 : VGPR_32 <138, "VGPR138">;
+def VGPR139 : VGPR_32 <139, "VGPR139">;
+def VGPR140 : VGPR_32 <140, "VGPR140">;
+def VGPR141 : VGPR_32 <141, "VGPR141">;
+def VGPR142 : VGPR_32 <142, "VGPR142">;
+def VGPR143 : VGPR_32 <143, "VGPR143">;
+def VGPR144 : VGPR_32 <144, "VGPR144">;
+def VGPR145 : VGPR_32 <145, "VGPR145">;
+def VGPR146 : VGPR_32 <146, "VGPR146">;
+def VGPR147 : VGPR_32 <147, "VGPR147">;
+def VGPR148 : VGPR_32 <148, "VGPR148">;
+def VGPR149 : VGPR_32 <149, "VGPR149">;
+def VGPR150 : VGPR_32 <150, "VGPR150">;
+def VGPR151 : VGPR_32 <151, "VGPR151">;
+def VGPR152 : VGPR_32 <152, "VGPR152">;
+def VGPR153 : VGPR_32 <153, "VGPR153">;
+def VGPR154 : VGPR_32 <154, "VGPR154">;
+def VGPR155 : VGPR_32 <155, "VGPR155">;
+def VGPR156 : VGPR_32 <156, "VGPR156">;
+def VGPR157 : VGPR_32 <157, "VGPR157">;
+def VGPR158 : VGPR_32 <158, "VGPR158">;
+def VGPR159 : VGPR_32 <159, "VGPR159">;
+def VGPR160 : VGPR_32 <160, "VGPR160">;
+def VGPR161 : VGPR_32 <161, "VGPR161">;
+def VGPR162 : VGPR_32 <162, "VGPR162">;
+def VGPR163 : VGPR_32 <163, "VGPR163">;
+def VGPR164 : VGPR_32 <164, "VGPR164">;
+def VGPR165 : VGPR_32 <165, "VGPR165">;
+def VGPR166 : VGPR_32 <166, "VGPR166">;
+def VGPR167 : VGPR_32 <167, "VGPR167">;
+def VGPR168 : VGPR_32 <168, "VGPR168">;
+def VGPR169 : VGPR_32 <169, "VGPR169">;
+def VGPR170 : VGPR_32 <170, "VGPR170">;
+def VGPR171 : VGPR_32 <171, "VGPR171">;
+def VGPR172 : VGPR_32 <172, "VGPR172">;
+def VGPR173 : VGPR_32 <173, "VGPR173">;
+def VGPR174 : VGPR_32 <174, "VGPR174">;
+def VGPR175 : VGPR_32 <175, "VGPR175">;
+def VGPR176 : VGPR_32 <176, "VGPR176">;
+def VGPR177 : VGPR_32 <177, "VGPR177">;
+def VGPR178 : VGPR_32 <178, "VGPR178">;
+def VGPR179 : VGPR_32 <179, "VGPR179">;
+def VGPR180 : VGPR_32 <180, "VGPR180">;
+def VGPR181 : VGPR_32 <181, "VGPR181">;
+def VGPR182 : VGPR_32 <182, "VGPR182">;
+def VGPR183 : VGPR_32 <183, "VGPR183">;
+def VGPR184 : VGPR_32 <184, "VGPR184">;
+def VGPR185 : VGPR_32 <185, "VGPR185">;
+def VGPR186 : VGPR_32 <186, "VGPR186">;
+def VGPR187 : VGPR_32 <187, "VGPR187">;
+def VGPR188 : VGPR_32 <188, "VGPR188">;
+def VGPR189 : VGPR_32 <189, "VGPR189">;
+def VGPR190 : VGPR_32 <190, "VGPR190">;
+def VGPR191 : VGPR_32 <191, "VGPR191">;
+def VGPR192 : VGPR_32 <192, "VGPR192">;
+def VGPR193 : VGPR_32 <193, "VGPR193">;
+def VGPR194 : VGPR_32 <194, "VGPR194">;
+def VGPR195 : VGPR_32 <195, "VGPR195">;
+def VGPR196 : VGPR_32 <196, "VGPR196">;
+def VGPR197 : VGPR_32 <197, "VGPR197">;
+def VGPR198 : VGPR_32 <198, "VGPR198">;
+def VGPR199 : VGPR_32 <199, "VGPR199">;
+def VGPR200 : VGPR_32 <200, "VGPR200">;
+def VGPR201 : VGPR_32 <201, "VGPR201">;
+def VGPR202 : VGPR_32 <202, "VGPR202">;
+def VGPR203 : VGPR_32 <203, "VGPR203">;
+def VGPR204 : VGPR_32 <204, "VGPR204">;
+def VGPR205 : VGPR_32 <205, "VGPR205">;
+def VGPR206 : VGPR_32 <206, "VGPR206">;
+def VGPR207 : VGPR_32 <207, "VGPR207">;
+def VGPR208 : VGPR_32 <208, "VGPR208">;
+def VGPR209 : VGPR_32 <209, "VGPR209">;
+def VGPR210 : VGPR_32 <210, "VGPR210">;
+def VGPR211 : VGPR_32 <211, "VGPR211">;
+def VGPR212 : VGPR_32 <212, "VGPR212">;
+def VGPR213 : VGPR_32 <213, "VGPR213">;
+def VGPR214 : VGPR_32 <214, "VGPR214">;
+def VGPR215 : VGPR_32 <215, "VGPR215">;
+def VGPR216 : VGPR_32 <216, "VGPR216">;
+def VGPR217 : VGPR_32 <217, "VGPR217">;
+def VGPR218 : VGPR_32 <218, "VGPR218">;
+def VGPR219 : VGPR_32 <219, "VGPR219">;
+def VGPR220 : VGPR_32 <220, "VGPR220">;
+def VGPR221 : VGPR_32 <221, "VGPR221">;
+def VGPR222 : VGPR_32 <222, "VGPR222">;
+def VGPR223 : VGPR_32 <223, "VGPR223">;
+def VGPR224 : VGPR_32 <224, "VGPR224">;
+def VGPR225 : VGPR_32 <225, "VGPR225">;
+def VGPR226 : VGPR_32 <226, "VGPR226">;
+def VGPR227 : VGPR_32 <227, "VGPR227">;
+def VGPR228 : VGPR_32 <228, "VGPR228">;
+def VGPR229 : VGPR_32 <229, "VGPR229">;
+def VGPR230 : VGPR_32 <230, "VGPR230">;
+def VGPR231 : VGPR_32 <231, "VGPR231">;
+def VGPR232 : VGPR_32 <232, "VGPR232">;
+def VGPR233 : VGPR_32 <233, "VGPR233">;
+def VGPR234 : VGPR_32 <234, "VGPR234">;
+def VGPR235 : VGPR_32 <235, "VGPR235">;
+def VGPR236 : VGPR_32 <236, "VGPR236">;
+def VGPR237 : VGPR_32 <237, "VGPR237">;
+def VGPR238 : VGPR_32 <238, "VGPR238">;
+def VGPR239 : VGPR_32 <239, "VGPR239">;
+def VGPR240 : VGPR_32 <240, "VGPR240">;
+def VGPR241 : VGPR_32 <241, "VGPR241">;
+def VGPR242 : VGPR_32 <242, "VGPR242">;
+def VGPR243 : VGPR_32 <243, "VGPR243">;
+def VGPR244 : VGPR_32 <244, "VGPR244">;
+def VGPR245 : VGPR_32 <245, "VGPR245">;
+def VGPR246 : VGPR_32 <246, "VGPR246">;
+def VGPR247 : VGPR_32 <247, "VGPR247">;
+def VGPR248 : VGPR_32 <248, "VGPR248">;
+def VGPR249 : VGPR_32 <249, "VGPR249">;
+def VGPR250 : VGPR_32 <250, "VGPR250">;
+def VGPR251 : VGPR_32 <251, "VGPR251">;
+def VGPR252 : VGPR_32 <252, "VGPR252">;
+def VGPR253 : VGPR_32 <253, "VGPR253">;
+def VGPR254 : VGPR_32 <254, "VGPR254">;
+def VGPR255 : VGPR_32 <255, "VGPR255">;
+
+def SReg_32 : RegisterClass<"AMDIL", [f32, i32], 32,
+    (add (sequence "SGPR%u", 0, 103),  SREG_LIT_0, M0)
+>;
+
+def VReg_32 : RegisterClass<"AMDIL", [f32, i32], 32,
+    (add (sequence "VGPR%u", 0, 255),
+    PERSP_SAMPLE_I, PERSP_SAMPLE_J,
+    PERSP_CENTER_I, PERSP_CENTER_J,
+    PERSP_CENTROID_I, PERSP_CENTROID_J,
+    PERSP_I_W, PERSP_J_W, PERSP_1_W,
+    LINEAR_SAMPLE_I, LINEAR_SAMPLE_J,
+    LINEAR_CENTER_I, LINEAR_CENTER_J,
+    LINEAR_CENTROID_I, LINEAR_CENTROID_J,
+    LINE_STIPPLE_TEX_COORD,
+    POS_X_FLOAT,
+    POS_Y_FLOAT,
+    POS_Z_FLOAT,
+    POS_W_FLOAT,
+    FRONT_FACE,
+    ANCILLARY,
+    SAMPLE_COVERAGE,
+    POS_FIXED_PT
+    )
+>;
+
+def AllReg_32 : RegisterClass<"AMDIL", [f32, i32], 32,
+    (add VReg_32, SReg_32) 
+>;
+
+def CCReg : RegisterClass<"AMDIL", [f32], 32, (add VCC, SCC)>;
+
+def SGPR0_64 : SGPR_64 <0, "SGPR0_64", [ SGPR0,SGPR1]>;
+def SGPR2_64 : SGPR_64 <2, "SGPR2_64", [ SGPR2,SGPR3]>;
+def SGPR4_64 : SGPR_64 <4, "SGPR4_64", [ SGPR4,SGPR5]>;
+def SGPR6_64 : SGPR_64 <6, "SGPR6_64", [ SGPR6,SGPR7]>;
+def SGPR8_64 : SGPR_64 <8, "SGPR8_64", [ SGPR8,SGPR9]>;
+def SGPR10_64 : SGPR_64 <10, "SGPR10_64", [ SGPR10,SGPR11]>;
+def SGPR12_64 : SGPR_64 <12, "SGPR12_64", [ SGPR12,SGPR13]>;
+def SGPR14_64 : SGPR_64 <14, "SGPR14_64", [ SGPR14,SGPR15]>;
+def SGPR16_64 : SGPR_64 <16, "SGPR16_64", [ SGPR16,SGPR17]>;
+def SGPR18_64 : SGPR_64 <18, "SGPR18_64", [ SGPR18,SGPR19]>;
+def SGPR20_64 : SGPR_64 <20, "SGPR20_64", [ SGPR20,SGPR21]>;
+def SGPR22_64 : SGPR_64 <22, "SGPR22_64", [ SGPR22,SGPR23]>;
+def SGPR24_64 : SGPR_64 <24, "SGPR24_64", [ SGPR24,SGPR25]>;
+def SGPR26_64 : SGPR_64 <26, "SGPR26_64", [ SGPR26,SGPR27]>;
+def SGPR28_64 : SGPR_64 <28, "SGPR28_64", [ SGPR28,SGPR29]>;
+def SGPR30_64 : SGPR_64 <30, "SGPR30_64", [ SGPR30,SGPR31]>;
+def SGPR32_64 : SGPR_64 <32, "SGPR32_64", [ SGPR32,SGPR33]>;
+def SGPR34_64 : SGPR_64 <34, "SGPR34_64", [ SGPR34,SGPR35]>;
+def SGPR36_64 : SGPR_64 <36, "SGPR36_64", [ SGPR36,SGPR37]>;
+def SGPR38_64 : SGPR_64 <38, "SGPR38_64", [ SGPR38,SGPR39]>;
+def SGPR40_64 : SGPR_64 <40, "SGPR40_64", [ SGPR40,SGPR41]>;
+def SGPR42_64 : SGPR_64 <42, "SGPR42_64", [ SGPR42,SGPR43]>;
+def SGPR44_64 : SGPR_64 <44, "SGPR44_64", [ SGPR44,SGPR45]>;
+def SGPR46_64 : SGPR_64 <46, "SGPR46_64", [ SGPR46,SGPR47]>;
+def SGPR48_64 : SGPR_64 <48, "SGPR48_64", [ SGPR48,SGPR49]>;
+def SGPR50_64 : SGPR_64 <50, "SGPR50_64", [ SGPR50,SGPR51]>;
+def SGPR52_64 : SGPR_64 <52, "SGPR52_64", [ SGPR52,SGPR53]>;
+def SGPR54_64 : SGPR_64 <54, "SGPR54_64", [ SGPR54,SGPR55]>;
+def SGPR56_64 : SGPR_64 <56, "SGPR56_64", [ SGPR56,SGPR57]>;
+def SGPR58_64 : SGPR_64 <58, "SGPR58_64", [ SGPR58,SGPR59]>;
+def SGPR60_64 : SGPR_64 <60, "SGPR60_64", [ SGPR60,SGPR61]>;
+def SGPR62_64 : SGPR_64 <62, "SGPR62_64", [ SGPR62,SGPR63]>;
+def SGPR64_64 : SGPR_64 <64, "SGPR64_64", [ SGPR64,SGPR65]>;
+def SGPR66_64 : SGPR_64 <66, "SGPR66_64", [ SGPR66,SGPR67]>;
+def SGPR68_64 : SGPR_64 <68, "SGPR68_64", [ SGPR68,SGPR69]>;
+def SGPR70_64 : SGPR_64 <70, "SGPR70_64", [ SGPR70,SGPR71]>;
+def SGPR72_64 : SGPR_64 <72, "SGPR72_64", [ SGPR72,SGPR73]>;
+def SGPR74_64 : SGPR_64 <74, "SGPR74_64", [ SGPR74,SGPR75]>;
+def SGPR76_64 : SGPR_64 <76, "SGPR76_64", [ SGPR76,SGPR77]>;
+def SGPR78_64 : SGPR_64 <78, "SGPR78_64", [ SGPR78,SGPR79]>;
+def SGPR80_64 : SGPR_64 <80, "SGPR80_64", [ SGPR80,SGPR81]>;
+def SGPR82_64 : SGPR_64 <82, "SGPR82_64", [ SGPR82,SGPR83]>;
+def SGPR84_64 : SGPR_64 <84, "SGPR84_64", [ SGPR84,SGPR85]>;
+def SGPR86_64 : SGPR_64 <86, "SGPR86_64", [ SGPR86,SGPR87]>;
+def SGPR88_64 : SGPR_64 <88, "SGPR88_64", [ SGPR88,SGPR89]>;
+def SGPR90_64 : SGPR_64 <90, "SGPR90_64", [ SGPR90,SGPR91]>;
+def SGPR92_64 : SGPR_64 <92, "SGPR92_64", [ SGPR92,SGPR93]>;
+def SGPR94_64 : SGPR_64 <94, "SGPR94_64", [ SGPR94,SGPR95]>;
+def SGPR96_64 : SGPR_64 <96, "SGPR96_64", [ SGPR96,SGPR97]>;
+def SGPR98_64 : SGPR_64 <98, "SGPR98_64", [ SGPR98,SGPR99]>;
+def SGPR100_64 : SGPR_64 <100, "SGPR100_64", [ SGPR100,SGPR101]>;
+def SGPR102_64 : SGPR_64 <102, "SGPR102_64", [ SGPR102,SGPR103]>;
+def SReg_64 : RegisterClass<"AMDIL", [i64], 64,
+  (add SGPR0_64
+, SGPR2_64, SGPR4_64, SGPR6_64, SGPR8_64, SGPR10_64
+, SGPR12_64, SGPR14_64, SGPR16_64, SGPR18_64, SGPR20_64
+, SGPR22_64, SGPR24_64, SGPR26_64, SGPR28_64, SGPR30_64
+, SGPR32_64, SGPR34_64, SGPR36_64, SGPR38_64, SGPR40_64
+, SGPR42_64, SGPR44_64, SGPR46_64, SGPR48_64, SGPR50_64
+, SGPR52_64, SGPR54_64, SGPR56_64, SGPR58_64, SGPR60_64
+, SGPR62_64, SGPR64_64, SGPR66_64, SGPR68_64, SGPR70_64
+, SGPR72_64, SGPR74_64, SGPR76_64, SGPR78_64, SGPR80_64
+, SGPR82_64, SGPR84_64, SGPR86_64, SGPR88_64, SGPR90_64
+, SGPR92_64, SGPR94_64, SGPR96_64, SGPR98_64, SGPR100_64
+, SGPR102_64)
+>{
+}
+def SGPR0_128 : SGPR_128 <0, "SGPR0_128", [ SGPR0,SGPR1,SGPR2,SGPR3]>;
+def SGPR4_128 : SGPR_128 <4, "SGPR4_128", [ SGPR4,SGPR5,SGPR6,SGPR7]>;
+def SGPR8_128 : SGPR_128 <8, "SGPR8_128", [ SGPR8,SGPR9,SGPR10,SGPR11]>;
+def SGPR12_128 : SGPR_128 <12, "SGPR12_128", [ SGPR12,SGPR13,SGPR14,SGPR15]>;
+def SGPR16_128 : SGPR_128 <16, "SGPR16_128", [ SGPR16,SGPR17,SGPR18,SGPR19]>;
+def SGPR20_128 : SGPR_128 <20, "SGPR20_128", [ SGPR20,SGPR21,SGPR22,SGPR23]>;
+def SGPR24_128 : SGPR_128 <24, "SGPR24_128", [ SGPR24,SGPR25,SGPR26,SGPR27]>;
+def SGPR28_128 : SGPR_128 <28, "SGPR28_128", [ SGPR28,SGPR29,SGPR30,SGPR31]>;
+def SGPR32_128 : SGPR_128 <32, "SGPR32_128", [ SGPR32,SGPR33,SGPR34,SGPR35]>;
+def SGPR36_128 : SGPR_128 <36, "SGPR36_128", [ SGPR36,SGPR37,SGPR38,SGPR39]>;
+def SGPR40_128 : SGPR_128 <40, "SGPR40_128", [ SGPR40,SGPR41,SGPR42,SGPR43]>;
+def SGPR44_128 : SGPR_128 <44, "SGPR44_128", [ SGPR44,SGPR45,SGPR46,SGPR47]>;
+def SGPR48_128 : SGPR_128 <48, "SGPR48_128", [ SGPR48,SGPR49,SGPR50,SGPR51]>;
+def SGPR52_128 : SGPR_128 <52, "SGPR52_128", [ SGPR52,SGPR53,SGPR54,SGPR55]>;
+def SGPR56_128 : SGPR_128 <56, "SGPR56_128", [ SGPR56,SGPR57,SGPR58,SGPR59]>;
+def SGPR60_128 : SGPR_128 <60, "SGPR60_128", [ SGPR60,SGPR61,SGPR62,SGPR63]>;
+def SGPR64_128 : SGPR_128 <64, "SGPR64_128", [ SGPR64,SGPR65,SGPR66,SGPR67]>;
+def SGPR68_128 : SGPR_128 <68, "SGPR68_128", [ SGPR68,SGPR69,SGPR70,SGPR71]>;
+def SGPR72_128 : SGPR_128 <72, "SGPR72_128", [ SGPR72,SGPR73,SGPR74,SGPR75]>;
+def SGPR76_128 : SGPR_128 <76, "SGPR76_128", [ SGPR76,SGPR77,SGPR78,SGPR79]>;
+def SGPR80_128 : SGPR_128 <80, "SGPR80_128", [ SGPR80,SGPR81,SGPR82,SGPR83]>;
+def SGPR84_128 : SGPR_128 <84, "SGPR84_128", [ SGPR84,SGPR85,SGPR86,SGPR87]>;
+def SGPR88_128 : SGPR_128 <88, "SGPR88_128", [ SGPR88,SGPR89,SGPR90,SGPR91]>;
+def SGPR92_128 : SGPR_128 <92, "SGPR92_128", [ SGPR92,SGPR93,SGPR94,SGPR95]>;
+def SGPR96_128 : SGPR_128 <96, "SGPR96_128", [ SGPR96,SGPR97,SGPR98,SGPR99]>;
+def SGPR100_128 : SGPR_128 <100, "SGPR100_128", [ SGPR100,SGPR101,SGPR102,SGPR103]>;
+def SReg_128 : RegisterClass<"AMDIL", [v4f32, v4i32], 128,
+  (add SGPR0_128
+, SGPR4_128, SGPR8_128, SGPR12_128, SGPR16_128, SGPR20_128
+, SGPR24_128, SGPR28_128, SGPR32_128, SGPR36_128, SGPR40_128
+, SGPR44_128, SGPR48_128, SGPR52_128, SGPR56_128, SGPR60_128
+, SGPR64_128, SGPR68_128, SGPR72_128, SGPR76_128, SGPR80_128
+, SGPR84_128, SGPR88_128, SGPR92_128, SGPR96_128, SGPR100_128
+)
+>{
+}
+def SGPR0_256 : SGPR_256 <0, "SGPR0_256", [ SGPR0,SGPR1,SGPR2,SGPR3,SGPR4,SGPR5,SGPR6,SGPR7]>;
+def SGPR8_256 : SGPR_256 <8, "SGPR8_256", [ SGPR8,SGPR9,SGPR10,SGPR11,SGPR12,SGPR13,SGPR14,SGPR15]>;
+def SGPR16_256 : SGPR_256 <16, "SGPR16_256", [ SGPR16,SGPR17,SGPR18,SGPR19,SGPR20,SGPR21,SGPR22,SGPR23]>;
+def SGPR24_256 : SGPR_256 <24, "SGPR24_256", [ SGPR24,SGPR25,SGPR26,SGPR27,SGPR28,SGPR29,SGPR30,SGPR31]>;
+def SGPR32_256 : SGPR_256 <32, "SGPR32_256", [ SGPR32,SGPR33,SGPR34,SGPR35,SGPR36,SGPR37,SGPR38,SGPR39]>;
+def SGPR40_256 : SGPR_256 <40, "SGPR40_256", [ SGPR40,SGPR41,SGPR42,SGPR43,SGPR44,SGPR45,SGPR46,SGPR47]>;
+def SGPR48_256 : SGPR_256 <48, "SGPR48_256", [ SGPR48,SGPR49,SGPR50,SGPR51,SGPR52,SGPR53,SGPR54,SGPR55]>;
+def SGPR56_256 : SGPR_256 <56, "SGPR56_256", [ SGPR56,SGPR57,SGPR58,SGPR59,SGPR60,SGPR61,SGPR62,SGPR63]>;
+def SGPR64_256 : SGPR_256 <64, "SGPR64_256", [ SGPR64,SGPR65,SGPR66,SGPR67,SGPR68,SGPR69,SGPR70,SGPR71]>;
+def SGPR72_256 : SGPR_256 <72, "SGPR72_256", [ SGPR72,SGPR73,SGPR74,SGPR75,SGPR76,SGPR77,SGPR78,SGPR79]>;
+def SGPR80_256 : SGPR_256 <80, "SGPR80_256", [ SGPR80,SGPR81,SGPR82,SGPR83,SGPR84,SGPR85,SGPR86,SGPR87]>;
+def SGPR88_256 : SGPR_256 <88, "SGPR88_256", [ SGPR88,SGPR89,SGPR90,SGPR91,SGPR92,SGPR93,SGPR94,SGPR95]>;
+def SGPR96_256 : SGPR_256 <96, "SGPR96_256", [ SGPR96,SGPR97,SGPR98,SGPR99,SGPR100,SGPR101,SGPR102,SGPR103]>;
+def SReg_256 : RegisterClass<"AMDIL", [v8i32], 256,
+  (add SGPR0_256
+, SGPR8_256, SGPR16_256, SGPR24_256, SGPR32_256, SGPR40_256
+, SGPR48_256, SGPR56_256, SGPR64_256, SGPR72_256, SGPR80_256
+, SGPR88_256, SGPR96_256)
+>{
+}
+def VGPR0_64 : VGPR_64 <0, "VGPR0_64", [ VGPR0,VGPR1]>;
+def VGPR2_64 : VGPR_64 <2, "VGPR2_64", [ VGPR2,VGPR3]>;
+def VGPR4_64 : VGPR_64 <4, "VGPR4_64", [ VGPR4,VGPR5]>;
+def VGPR6_64 : VGPR_64 <6, "VGPR6_64", [ VGPR6,VGPR7]>;
+def VGPR8_64 : VGPR_64 <8, "VGPR8_64", [ VGPR8,VGPR9]>;
+def VGPR10_64 : VGPR_64 <10, "VGPR10_64", [ VGPR10,VGPR11]>;
+def VGPR12_64 : VGPR_64 <12, "VGPR12_64", [ VGPR12,VGPR13]>;
+def VGPR14_64 : VGPR_64 <14, "VGPR14_64", [ VGPR14,VGPR15]>;
+def VGPR16_64 : VGPR_64 <16, "VGPR16_64", [ VGPR16,VGPR17]>;
+def VGPR18_64 : VGPR_64 <18, "VGPR18_64", [ VGPR18,VGPR19]>;
+def VGPR20_64 : VGPR_64 <20, "VGPR20_64", [ VGPR20,VGPR21]>;
+def VGPR22_64 : VGPR_64 <22, "VGPR22_64", [ VGPR22,VGPR23]>;
+def VGPR24_64 : VGPR_64 <24, "VGPR24_64", [ VGPR24,VGPR25]>;
+def VGPR26_64 : VGPR_64 <26, "VGPR26_64", [ VGPR26,VGPR27]>;
+def VGPR28_64 : VGPR_64 <28, "VGPR28_64", [ VGPR28,VGPR29]>;
+def VGPR30_64 : VGPR_64 <30, "VGPR30_64", [ VGPR30,VGPR31]>;
+def VGPR32_64 : VGPR_64 <32, "VGPR32_64", [ VGPR32,VGPR33]>;
+def VGPR34_64 : VGPR_64 <34, "VGPR34_64", [ VGPR34,VGPR35]>;
+def VGPR36_64 : VGPR_64 <36, "VGPR36_64", [ VGPR36,VGPR37]>;
+def VGPR38_64 : VGPR_64 <38, "VGPR38_64", [ VGPR38,VGPR39]>;
+def VGPR40_64 : VGPR_64 <40, "VGPR40_64", [ VGPR40,VGPR41]>;
+def VGPR42_64 : VGPR_64 <42, "VGPR42_64", [ VGPR42,VGPR43]>;
+def VGPR44_64 : VGPR_64 <44, "VGPR44_64", [ VGPR44,VGPR45]>;
+def VGPR46_64 : VGPR_64 <46, "VGPR46_64", [ VGPR46,VGPR47]>;
+def VGPR48_64 : VGPR_64 <48, "VGPR48_64", [ VGPR48,VGPR49]>;
+def VGPR50_64 : VGPR_64 <50, "VGPR50_64", [ VGPR50,VGPR51]>;
+def VGPR52_64 : VGPR_64 <52, "VGPR52_64", [ VGPR52,VGPR53]>;
+def VGPR54_64 : VGPR_64 <54, "VGPR54_64", [ VGPR54,VGPR55]>;
+def VGPR56_64 : VGPR_64 <56, "VGPR56_64", [ VGPR56,VGPR57]>;
+def VGPR58_64 : VGPR_64 <58, "VGPR58_64", [ VGPR58,VGPR59]>;
+def VGPR60_64 : VGPR_64 <60, "VGPR60_64", [ VGPR60,VGPR61]>;
+def VGPR62_64 : VGPR_64 <62, "VGPR62_64", [ VGPR62,VGPR63]>;
+def VGPR64_64 : VGPR_64 <64, "VGPR64_64", [ VGPR64,VGPR65]>;
+def VGPR66_64 : VGPR_64 <66, "VGPR66_64", [ VGPR66,VGPR67]>;
+def VGPR68_64 : VGPR_64 <68, "VGPR68_64", [ VGPR68,VGPR69]>;
+def VGPR70_64 : VGPR_64 <70, "VGPR70_64", [ VGPR70,VGPR71]>;
+def VGPR72_64 : VGPR_64 <72, "VGPR72_64", [ VGPR72,VGPR73]>;
+def VGPR74_64 : VGPR_64 <74, "VGPR74_64", [ VGPR74,VGPR75]>;
+def VGPR76_64 : VGPR_64 <76, "VGPR76_64", [ VGPR76,VGPR77]>;
+def VGPR78_64 : VGPR_64 <78, "VGPR78_64", [ VGPR78,VGPR79]>;
+def VGPR80_64 : VGPR_64 <80, "VGPR80_64", [ VGPR80,VGPR81]>;
+def VGPR82_64 : VGPR_64 <82, "VGPR82_64", [ VGPR82,VGPR83]>;
+def VGPR84_64 : VGPR_64 <84, "VGPR84_64", [ VGPR84,VGPR85]>;
+def VGPR86_64 : VGPR_64 <86, "VGPR86_64", [ VGPR86,VGPR87]>;
+def VGPR88_64 : VGPR_64 <88, "VGPR88_64", [ VGPR88,VGPR89]>;
+def VGPR90_64 : VGPR_64 <90, "VGPR90_64", [ VGPR90,VGPR91]>;
+def VGPR92_64 : VGPR_64 <92, "VGPR92_64", [ VGPR92,VGPR93]>;
+def VGPR94_64 : VGPR_64 <94, "VGPR94_64", [ VGPR94,VGPR95]>;
+def VGPR96_64 : VGPR_64 <96, "VGPR96_64", [ VGPR96,VGPR97]>;
+def VGPR98_64 : VGPR_64 <98, "VGPR98_64", [ VGPR98,VGPR99]>;
+def VGPR100_64 : VGPR_64 <100, "VGPR100_64", [ VGPR100,VGPR101]>;
+def VGPR102_64 : VGPR_64 <102, "VGPR102_64", [ VGPR102,VGPR103]>;
+def VGPR104_64 : VGPR_64 <104, "VGPR104_64", [ VGPR104,VGPR105]>;
+def VGPR106_64 : VGPR_64 <106, "VGPR106_64", [ VGPR106,VGPR107]>;
+def VGPR108_64 : VGPR_64 <108, "VGPR108_64", [ VGPR108,VGPR109]>;
+def VGPR110_64 : VGPR_64 <110, "VGPR110_64", [ VGPR110,VGPR111]>;
+def VGPR112_64 : VGPR_64 <112, "VGPR112_64", [ VGPR112,VGPR113]>;
+def VGPR114_64 : VGPR_64 <114, "VGPR114_64", [ VGPR114,VGPR115]>;
+def VGPR116_64 : VGPR_64 <116, "VGPR116_64", [ VGPR116,VGPR117]>;
+def VGPR118_64 : VGPR_64 <118, "VGPR118_64", [ VGPR118,VGPR119]>;
+def VGPR120_64 : VGPR_64 <120, "VGPR120_64", [ VGPR120,VGPR121]>;
+def VGPR122_64 : VGPR_64 <122, "VGPR122_64", [ VGPR122,VGPR123]>;
+def VGPR124_64 : VGPR_64 <124, "VGPR124_64", [ VGPR124,VGPR125]>;
+def VGPR126_64 : VGPR_64 <126, "VGPR126_64", [ VGPR126,VGPR127]>;
+def VGPR128_64 : VGPR_64 <128, "VGPR128_64", [ VGPR128,VGPR129]>;
+def VGPR130_64 : VGPR_64 <130, "VGPR130_64", [ VGPR130,VGPR131]>;
+def VGPR132_64 : VGPR_64 <132, "VGPR132_64", [ VGPR132,VGPR133]>;
+def VGPR134_64 : VGPR_64 <134, "VGPR134_64", [ VGPR134,VGPR135]>;
+def VGPR136_64 : VGPR_64 <136, "VGPR136_64", [ VGPR136,VGPR137]>;
+def VGPR138_64 : VGPR_64 <138, "VGPR138_64", [ VGPR138,VGPR139]>;
+def VGPR140_64 : VGPR_64 <140, "VGPR140_64", [ VGPR140,VGPR141]>;
+def VGPR142_64 : VGPR_64 <142, "VGPR142_64", [ VGPR142,VGPR143]>;
+def VGPR144_64 : VGPR_64 <144, "VGPR144_64", [ VGPR144,VGPR145]>;
+def VGPR146_64 : VGPR_64 <146, "VGPR146_64", [ VGPR146,VGPR147]>;
+def VGPR148_64 : VGPR_64 <148, "VGPR148_64", [ VGPR148,VGPR149]>;
+def VGPR150_64 : VGPR_64 <150, "VGPR150_64", [ VGPR150,VGPR151]>;
+def VGPR152_64 : VGPR_64 <152, "VGPR152_64", [ VGPR152,VGPR153]>;
+def VGPR154_64 : VGPR_64 <154, "VGPR154_64", [ VGPR154,VGPR155]>;
+def VGPR156_64 : VGPR_64 <156, "VGPR156_64", [ VGPR156,VGPR157]>;
+def VGPR158_64 : VGPR_64 <158, "VGPR158_64", [ VGPR158,VGPR159]>;
+def VGPR160_64 : VGPR_64 <160, "VGPR160_64", [ VGPR160,VGPR161]>;
+def VGPR162_64 : VGPR_64 <162, "VGPR162_64", [ VGPR162,VGPR163]>;
+def VGPR164_64 : VGPR_64 <164, "VGPR164_64", [ VGPR164,VGPR165]>;
+def VGPR166_64 : VGPR_64 <166, "VGPR166_64", [ VGPR166,VGPR167]>;
+def VGPR168_64 : VGPR_64 <168, "VGPR168_64", [ VGPR168,VGPR169]>;
+def VGPR170_64 : VGPR_64 <170, "VGPR170_64", [ VGPR170,VGPR171]>;
+def VGPR172_64 : VGPR_64 <172, "VGPR172_64", [ VGPR172,VGPR173]>;
+def VGPR174_64 : VGPR_64 <174, "VGPR174_64", [ VGPR174,VGPR175]>;
+def VGPR176_64 : VGPR_64 <176, "VGPR176_64", [ VGPR176,VGPR177]>;
+def VGPR178_64 : VGPR_64 <178, "VGPR178_64", [ VGPR178,VGPR179]>;
+def VGPR180_64 : VGPR_64 <180, "VGPR180_64", [ VGPR180,VGPR181]>;
+def VGPR182_64 : VGPR_64 <182, "VGPR182_64", [ VGPR182,VGPR183]>;
+def VGPR184_64 : VGPR_64 <184, "VGPR184_64", [ VGPR184,VGPR185]>;
+def VGPR186_64 : VGPR_64 <186, "VGPR186_64", [ VGPR186,VGPR187]>;
+def VGPR188_64 : VGPR_64 <188, "VGPR188_64", [ VGPR188,VGPR189]>;
+def VGPR190_64 : VGPR_64 <190, "VGPR190_64", [ VGPR190,VGPR191]>;
+def VGPR192_64 : VGPR_64 <192, "VGPR192_64", [ VGPR192,VGPR193]>;
+def VGPR194_64 : VGPR_64 <194, "VGPR194_64", [ VGPR194,VGPR195]>;
+def VGPR196_64 : VGPR_64 <196, "VGPR196_64", [ VGPR196,VGPR197]>;
+def VGPR198_64 : VGPR_64 <198, "VGPR198_64", [ VGPR198,VGPR199]>;
+def VGPR200_64 : VGPR_64 <200, "VGPR200_64", [ VGPR200,VGPR201]>;
+def VGPR202_64 : VGPR_64 <202, "VGPR202_64", [ VGPR202,VGPR203]>;
+def VGPR204_64 : VGPR_64 <204, "VGPR204_64", [ VGPR204,VGPR205]>;
+def VGPR206_64 : VGPR_64 <206, "VGPR206_64", [ VGPR206,VGPR207]>;
+def VGPR208_64 : VGPR_64 <208, "VGPR208_64", [ VGPR208,VGPR209]>;
+def VGPR210_64 : VGPR_64 <210, "VGPR210_64", [ VGPR210,VGPR211]>;
+def VGPR212_64 : VGPR_64 <212, "VGPR212_64", [ VGPR212,VGPR213]>;
+def VGPR214_64 : VGPR_64 <214, "VGPR214_64", [ VGPR214,VGPR215]>;
+def VGPR216_64 : VGPR_64 <216, "VGPR216_64", [ VGPR216,VGPR217]>;
+def VGPR218_64 : VGPR_64 <218, "VGPR218_64", [ VGPR218,VGPR219]>;
+def VGPR220_64 : VGPR_64 <220, "VGPR220_64", [ VGPR220,VGPR221]>;
+def VGPR222_64 : VGPR_64 <222, "VGPR222_64", [ VGPR222,VGPR223]>;
+def VGPR224_64 : VGPR_64 <224, "VGPR224_64", [ VGPR224,VGPR225]>;
+def VGPR226_64 : VGPR_64 <226, "VGPR226_64", [ VGPR226,VGPR227]>;
+def VGPR228_64 : VGPR_64 <228, "VGPR228_64", [ VGPR228,VGPR229]>;
+def VGPR230_64 : VGPR_64 <230, "VGPR230_64", [ VGPR230,VGPR231]>;
+def VGPR232_64 : VGPR_64 <232, "VGPR232_64", [ VGPR232,VGPR233]>;
+def VGPR234_64 : VGPR_64 <234, "VGPR234_64", [ VGPR234,VGPR235]>;
+def VGPR236_64 : VGPR_64 <236, "VGPR236_64", [ VGPR236,VGPR237]>;
+def VGPR238_64 : VGPR_64 <238, "VGPR238_64", [ VGPR238,VGPR239]>;
+def VGPR240_64 : VGPR_64 <240, "VGPR240_64", [ VGPR240,VGPR241]>;
+def VGPR242_64 : VGPR_64 <242, "VGPR242_64", [ VGPR242,VGPR243]>;
+def VGPR244_64 : VGPR_64 <244, "VGPR244_64", [ VGPR244,VGPR245]>;
+def VGPR246_64 : VGPR_64 <246, "VGPR246_64", [ VGPR246,VGPR247]>;
+def VGPR248_64 : VGPR_64 <248, "VGPR248_64", [ VGPR248,VGPR249]>;
+def VGPR250_64 : VGPR_64 <250, "VGPR250_64", [ VGPR250,VGPR251]>;
+def VGPR252_64 : VGPR_64 <252, "VGPR252_64", [ VGPR252,VGPR253]>;
+def VGPR254_64 : VGPR_64 <254, "VGPR254_64", [ VGPR254,VGPR255]>;
+def VReg_64 : RegisterClass<"AMDIL", [i64], 64,
+  (add VGPR0_64
+, VGPR2_64, VGPR4_64, VGPR6_64, VGPR8_64, VGPR10_64
+, VGPR12_64, VGPR14_64, VGPR16_64, VGPR18_64, VGPR20_64
+, VGPR22_64, VGPR24_64, VGPR26_64, VGPR28_64, VGPR30_64
+, VGPR32_64, VGPR34_64, VGPR36_64, VGPR38_64, VGPR40_64
+, VGPR42_64, VGPR44_64, VGPR46_64, VGPR48_64, VGPR50_64
+, VGPR52_64, VGPR54_64, VGPR56_64, VGPR58_64, VGPR60_64
+, VGPR62_64, VGPR64_64, VGPR66_64, VGPR68_64, VGPR70_64
+, VGPR72_64, VGPR74_64, VGPR76_64, VGPR78_64, VGPR80_64
+, VGPR82_64, VGPR84_64, VGPR86_64, VGPR88_64, VGPR90_64
+, VGPR92_64, VGPR94_64, VGPR96_64, VGPR98_64, VGPR100_64
+, VGPR102_64, VGPR104_64, VGPR106_64, VGPR108_64, VGPR110_64
+, VGPR112_64, VGPR114_64, VGPR116_64, VGPR118_64, VGPR120_64
+, VGPR122_64, VGPR124_64, VGPR126_64, VGPR128_64, VGPR130_64
+, VGPR132_64, VGPR134_64, VGPR136_64, VGPR138_64, VGPR140_64
+, VGPR142_64, VGPR144_64, VGPR146_64, VGPR148_64, VGPR150_64
+, VGPR152_64, VGPR154_64, VGPR156_64, VGPR158_64, VGPR160_64
+, VGPR162_64, VGPR164_64, VGPR166_64, VGPR168_64, VGPR170_64
+, VGPR172_64, VGPR174_64, VGPR176_64, VGPR178_64, VGPR180_64
+, VGPR182_64, VGPR184_64, VGPR186_64, VGPR188_64, VGPR190_64
+, VGPR192_64, VGPR194_64, VGPR196_64, VGPR198_64, VGPR200_64
+, VGPR202_64, VGPR204_64, VGPR206_64, VGPR208_64, VGPR210_64
+, VGPR212_64, VGPR214_64, VGPR216_64, VGPR218_64, VGPR220_64
+, VGPR222_64, VGPR224_64, VGPR226_64, VGPR228_64, VGPR230_64
+, VGPR232_64, VGPR234_64, VGPR236_64, VGPR238_64, VGPR240_64
+, VGPR242_64, VGPR244_64, VGPR246_64, VGPR248_64, VGPR250_64
+, VGPR252_64, VGPR254_64)
+>{
+}
+def VGPR0_128 : VGPR_128 <0, "VGPR0_128", [ VGPR0,VGPR1,VGPR2,VGPR3]>;
+def VGPR4_128 : VGPR_128 <4, "VGPR4_128", [ VGPR4,VGPR5,VGPR6,VGPR7]>;
+def VGPR8_128 : VGPR_128 <8, "VGPR8_128", [ VGPR8,VGPR9,VGPR10,VGPR11]>;
+def VGPR12_128 : VGPR_128 <12, "VGPR12_128", [ VGPR12,VGPR13,VGPR14,VGPR15]>;
+def VGPR16_128 : VGPR_128 <16, "VGPR16_128", [ VGPR16,VGPR17,VGPR18,VGPR19]>;
+def VGPR20_128 : VGPR_128 <20, "VGPR20_128", [ VGPR20,VGPR21,VGPR22,VGPR23]>;
+def VGPR24_128 : VGPR_128 <24, "VGPR24_128", [ VGPR24,VGPR25,VGPR26,VGPR27]>;
+def VGPR28_128 : VGPR_128 <28, "VGPR28_128", [ VGPR28,VGPR29,VGPR30,VGPR31]>;
+def VGPR32_128 : VGPR_128 <32, "VGPR32_128", [ VGPR32,VGPR33,VGPR34,VGPR35]>;
+def VGPR36_128 : VGPR_128 <36, "VGPR36_128", [ VGPR36,VGPR37,VGPR38,VGPR39]>;
+def VGPR40_128 : VGPR_128 <40, "VGPR40_128", [ VGPR40,VGPR41,VGPR42,VGPR43]>;
+def VGPR44_128 : VGPR_128 <44, "VGPR44_128", [ VGPR44,VGPR45,VGPR46,VGPR47]>;
+def VGPR48_128 : VGPR_128 <48, "VGPR48_128", [ VGPR48,VGPR49,VGPR50,VGPR51]>;
+def VGPR52_128 : VGPR_128 <52, "VGPR52_128", [ VGPR52,VGPR53,VGPR54,VGPR55]>;
+def VGPR56_128 : VGPR_128 <56, "VGPR56_128", [ VGPR56,VGPR57,VGPR58,VGPR59]>;
+def VGPR60_128 : VGPR_128 <60, "VGPR60_128", [ VGPR60,VGPR61,VGPR62,VGPR63]>;
+def VGPR64_128 : VGPR_128 <64, "VGPR64_128", [ VGPR64,VGPR65,VGPR66,VGPR67]>;
+def VGPR68_128 : VGPR_128 <68, "VGPR68_128", [ VGPR68,VGPR69,VGPR70,VGPR71]>;
+def VGPR72_128 : VGPR_128 <72, "VGPR72_128", [ VGPR72,VGPR73,VGPR74,VGPR75]>;
+def VGPR76_128 : VGPR_128 <76, "VGPR76_128", [ VGPR76,VGPR77,VGPR78,VGPR79]>;
+def VGPR80_128 : VGPR_128 <80, "VGPR80_128", [ VGPR80,VGPR81,VGPR82,VGPR83]>;
+def VGPR84_128 : VGPR_128 <84, "VGPR84_128", [ VGPR84,VGPR85,VGPR86,VGPR87]>;
+def VGPR88_128 : VGPR_128 <88, "VGPR88_128", [ VGPR88,VGPR89,VGPR90,VGPR91]>;
+def VGPR92_128 : VGPR_128 <92, "VGPR92_128", [ VGPR92,VGPR93,VGPR94,VGPR95]>;
+def VGPR96_128 : VGPR_128 <96, "VGPR96_128", [ VGPR96,VGPR97,VGPR98,VGPR99]>;
+def VGPR100_128 : VGPR_128 <100, "VGPR100_128", [ VGPR100,VGPR101,VGPR102,VGPR103]>;
+def VGPR104_128 : VGPR_128 <104, "VGPR104_128", [ VGPR104,VGPR105,VGPR106,VGPR107]>;
+def VGPR108_128 : VGPR_128 <108, "VGPR108_128", [ VGPR108,VGPR109,VGPR110,VGPR111]>;
+def VGPR112_128 : VGPR_128 <112, "VGPR112_128", [ VGPR112,VGPR113,VGPR114,VGPR115]>;
+def VGPR116_128 : VGPR_128 <116, "VGPR116_128", [ VGPR116,VGPR117,VGPR118,VGPR119]>;
+def VGPR120_128 : VGPR_128 <120, "VGPR120_128", [ VGPR120,VGPR121,VGPR122,VGPR123]>;
+def VGPR124_128 : VGPR_128 <124, "VGPR124_128", [ VGPR124,VGPR125,VGPR126,VGPR127]>;
+def VGPR128_128 : VGPR_128 <128, "VGPR128_128", [ VGPR128,VGPR129,VGPR130,VGPR131]>;
+def VGPR132_128 : VGPR_128 <132, "VGPR132_128", [ VGPR132,VGPR133,VGPR134,VGPR135]>;
+def VGPR136_128 : VGPR_128 <136, "VGPR136_128", [ VGPR136,VGPR137,VGPR138,VGPR139]>;
+def VGPR140_128 : VGPR_128 <140, "VGPR140_128", [ VGPR140,VGPR141,VGPR142,VGPR143]>;
+def VGPR144_128 : VGPR_128 <144, "VGPR144_128", [ VGPR144,VGPR145,VGPR146,VGPR147]>;
+def VGPR148_128 : VGPR_128 <148, "VGPR148_128", [ VGPR148,VGPR149,VGPR150,VGPR151]>;
+def VGPR152_128 : VGPR_128 <152, "VGPR152_128", [ VGPR152,VGPR153,VGPR154,VGPR155]>;
+def VGPR156_128 : VGPR_128 <156, "VGPR156_128", [ VGPR156,VGPR157,VGPR158,VGPR159]>;
+def VGPR160_128 : VGPR_128 <160, "VGPR160_128", [ VGPR160,VGPR161,VGPR162,VGPR163]>;
+def VGPR164_128 : VGPR_128 <164, "VGPR164_128", [ VGPR164,VGPR165,VGPR166,VGPR167]>;
+def VGPR168_128 : VGPR_128 <168, "VGPR168_128", [ VGPR168,VGPR169,VGPR170,VGPR171]>;
+def VGPR172_128 : VGPR_128 <172, "VGPR172_128", [ VGPR172,VGPR173,VGPR174,VGPR175]>;
+def VGPR176_128 : VGPR_128 <176, "VGPR176_128", [ VGPR176,VGPR177,VGPR178,VGPR179]>;
+def VGPR180_128 : VGPR_128 <180, "VGPR180_128", [ VGPR180,VGPR181,VGPR182,VGPR183]>;
+def VGPR184_128 : VGPR_128 <184, "VGPR184_128", [ VGPR184,VGPR185,VGPR186,VGPR187]>;
+def VGPR188_128 : VGPR_128 <188, "VGPR188_128", [ VGPR188,VGPR189,VGPR190,VGPR191]>;
+def VGPR192_128 : VGPR_128 <192, "VGPR192_128", [ VGPR192,VGPR193,VGPR194,VGPR195]>;
+def VGPR196_128 : VGPR_128 <196, "VGPR196_128", [ VGPR196,VGPR197,VGPR198,VGPR199]>;
+def VGPR200_128 : VGPR_128 <200, "VGPR200_128", [ VGPR200,VGPR201,VGPR202,VGPR203]>;
+def VGPR204_128 : VGPR_128 <204, "VGPR204_128", [ VGPR204,VGPR205,VGPR206,VGPR207]>;
+def VGPR208_128 : VGPR_128 <208, "VGPR208_128", [ VGPR208,VGPR209,VGPR210,VGPR211]>;
+def VGPR212_128 : VGPR_128 <212, "VGPR212_128", [ VGPR212,VGPR213,VGPR214,VGPR215]>;
+def VGPR216_128 : VGPR_128 <216, "VGPR216_128", [ VGPR216,VGPR217,VGPR218,VGPR219]>;
+def VGPR220_128 : VGPR_128 <220, "VGPR220_128", [ VGPR220,VGPR221,VGPR222,VGPR223]>;
+def VGPR224_128 : VGPR_128 <224, "VGPR224_128", [ VGPR224,VGPR225,VGPR226,VGPR227]>;
+def VGPR228_128 : VGPR_128 <228, "VGPR228_128", [ VGPR228,VGPR229,VGPR230,VGPR231]>;
+def VGPR232_128 : VGPR_128 <232, "VGPR232_128", [ VGPR232,VGPR233,VGPR234,VGPR235]>;
+def VGPR236_128 : VGPR_128 <236, "VGPR236_128", [ VGPR236,VGPR237,VGPR238,VGPR239]>;
+def VGPR240_128 : VGPR_128 <240, "VGPR240_128", [ VGPR240,VGPR241,VGPR242,VGPR243]>;
+def VGPR244_128 : VGPR_128 <244, "VGPR244_128", [ VGPR244,VGPR245,VGPR246,VGPR247]>;
+def VGPR248_128 : VGPR_128 <248, "VGPR248_128", [ VGPR248,VGPR249,VGPR250,VGPR251]>;
+def VGPR252_128 : VGPR_128 <252, "VGPR252_128", [ VGPR252,VGPR253,VGPR254,VGPR255]>;
+def VReg_128 : RegisterClass<"AMDIL", [v4f32], 128,
+  (add VGPR0_128
+, VGPR4_128, VGPR8_128, VGPR12_128, VGPR16_128, VGPR20_128
+, VGPR24_128, VGPR28_128, VGPR32_128, VGPR36_128, VGPR40_128
+, VGPR44_128, VGPR48_128, VGPR52_128, VGPR56_128, VGPR60_128
+, VGPR64_128, VGPR68_128, VGPR72_128, VGPR76_128, VGPR80_128
+, VGPR84_128, VGPR88_128, VGPR92_128, VGPR96_128, VGPR100_128
+, VGPR104_128, VGPR108_128, VGPR112_128, VGPR116_128, VGPR120_128
+, VGPR124_128, VGPR128_128, VGPR132_128, VGPR136_128, VGPR140_128
+, VGPR144_128, VGPR148_128, VGPR152_128, VGPR156_128, VGPR160_128
+, VGPR164_128, VGPR168_128, VGPR172_128, VGPR176_128, VGPR180_128
+, VGPR184_128, VGPR188_128, VGPR192_128, VGPR196_128, VGPR200_128
+, VGPR204_128, VGPR208_128, VGPR212_128, VGPR216_128, VGPR220_128
+, VGPR224_128, VGPR228_128, VGPR232_128, VGPR236_128, VGPR240_128
+, VGPR244_128, VGPR248_128, VGPR252_128)
+>{
+}
+
+def AllReg_64 : RegisterClass<"AMDIL", [f64, i64], 64,
+    (add SGPR0_64
+,SGPR2_64,SGPR4_64,SGPR6_64,SGPR8_64,SGPR10_64
+,SGPR12_64,SGPR14_64,SGPR16_64,SGPR18_64,SGPR20_64
+,SGPR22_64,SGPR24_64,SGPR26_64,SGPR28_64,SGPR30_64
+,SGPR32_64,SGPR34_64,SGPR36_64,SGPR38_64,SGPR40_64
+,SGPR42_64,SGPR44_64,SGPR46_64,SGPR48_64,SGPR50_64
+,SGPR52_64,SGPR54_64,SGPR56_64,SGPR58_64,SGPR60_64
+,SGPR62_64,SGPR64_64,SGPR66_64,SGPR68_64,SGPR70_64
+,SGPR72_64,SGPR74_64,SGPR76_64,SGPR78_64,SGPR80_64
+,SGPR82_64,SGPR84_64,SGPR86_64,SGPR88_64,SGPR90_64
+,SGPR92_64,SGPR94_64,SGPR96_64,SGPR98_64,SGPR100_64
+,SGPR102_64, VGPR0_64
+,VGPR2_64,VGPR4_64,VGPR6_64,VGPR8_64,VGPR10_64
+,VGPR12_64,VGPR14_64,VGPR16_64,VGPR18_64,VGPR20_64
+,VGPR22_64,VGPR24_64,VGPR26_64,VGPR28_64,VGPR30_64
+,VGPR32_64,VGPR34_64,VGPR36_64,VGPR38_64,VGPR40_64
+,VGPR42_64,VGPR44_64,VGPR46_64,VGPR48_64,VGPR50_64
+,VGPR52_64,VGPR54_64,VGPR56_64,VGPR58_64,VGPR60_64
+,VGPR62_64,VGPR64_64,VGPR66_64,VGPR68_64,VGPR70_64
+,VGPR72_64,VGPR74_64,VGPR76_64,VGPR78_64,VGPR80_64
+,VGPR82_64,VGPR84_64,VGPR86_64,VGPR88_64,VGPR90_64
+,VGPR92_64,VGPR94_64,VGPR96_64,VGPR98_64,VGPR100_64
+,VGPR102_64,VGPR104_64,VGPR106_64,VGPR108_64,VGPR110_64
+,VGPR112_64,VGPR114_64,VGPR116_64,VGPR118_64,VGPR120_64
+,VGPR122_64,VGPR124_64,VGPR126_64,VGPR128_64,VGPR130_64
+,VGPR132_64,VGPR134_64,VGPR136_64,VGPR138_64,VGPR140_64
+,VGPR142_64,VGPR144_64,VGPR146_64,VGPR148_64,VGPR150_64
+,VGPR152_64,VGPR154_64,VGPR156_64,VGPR158_64,VGPR160_64
+,VGPR162_64,VGPR164_64,VGPR166_64,VGPR168_64,VGPR170_64
+,VGPR172_64,VGPR174_64,VGPR176_64,VGPR178_64,VGPR180_64
+,VGPR182_64,VGPR184_64,VGPR186_64,VGPR188_64,VGPR190_64
+,VGPR192_64,VGPR194_64,VGPR196_64,VGPR198_64,VGPR200_64
+,VGPR202_64,VGPR204_64,VGPR206_64,VGPR208_64,VGPR210_64
+,VGPR212_64,VGPR214_64,VGPR216_64,VGPR218_64,VGPR220_64
+,VGPR222_64,VGPR224_64,VGPR226_64,VGPR228_64,VGPR230_64
+,VGPR232_64,VGPR234_64,VGPR236_64,VGPR238_64,VGPR240_64
+,VGPR242_64,VGPR244_64,VGPR246_64,VGPR248_64,VGPR250_64
+,VGPR252_64,VGPR254_64)
+>;
+
diff --git a/lib/Target/AMDGPU/SISchedule.td b/lib/Target/AMDGPU/SISchedule.td
new file mode 100644
index 0000000..28b65b8
--- /dev/null
+++ b/lib/Target/AMDGPU/SISchedule.td
@@ -0,0 +1,15 @@
+//===-- SISchedule.td - SI Scheduling definitons -------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// TODO: This is just a place holder for now.
+//
+//===----------------------------------------------------------------------===//
+
+
+def SI_Itin : ProcessorItineraries <[], [], []>;
diff --git a/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp b/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp
new file mode 100644
index 0000000..380e7de
--- /dev/null
+++ b/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp
@@ -0,0 +1,26 @@
+//===-- TargetInfo/AMDGPUTargetInfo.cpp - TODO: Add brief description -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// TODO: Add full description
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+/// The target for the AMDGPU backend
+Target llvm::TheAMDGPUTarget;
+
+/// Extern function to initialize the targets for the AMDGPU backend
+extern "C" void LLVMInitializeAMDGPUTargetInfo() {
+  RegisterTarget<Triple::r600, false>
+    R600(TheAMDGPUTarget, "r600", "AMD GPUs HD2XXX-HD6XXX");
+}
diff --git a/lib/Target/AMDGPU/TargetInfo/CMakeLists.txt b/lib/Target/AMDGPU/TargetInfo/CMakeLists.txt
new file mode 100644
index 0000000..bf87499
--- /dev/null
+++ b/lib/Target/AMDGPU/TargetInfo/CMakeLists.txt
@@ -0,0 +1,7 @@
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
+
+add_llvm_library(LLVMAMDGPUInfo
+  AMDGPUTargetInfo.cpp
+  )
+
+add_dependencies(LLVMAMDGPUInfo AMDGPUCodeGenTable_gen)
diff --git a/lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt b/lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt
new file mode 100644
index 0000000..3dfca10
--- /dev/null
+++ b/lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt
@@ -0,0 +1,23 @@
+;===- ./lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt --------------*- Conf -*--===;
+;
+;                     The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+;   http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = AMDGPUInfo
+parent = AMDGPU
+required_libraries = MC Support
+add_to_library_groups = AMDGPU
diff --git a/lib/Target/AMDGPU/TargetInfo/Makefile b/lib/Target/AMDGPU/TargetInfo/Makefile
new file mode 100644
index 0000000..1b23287
--- /dev/null
+++ b/lib/Target/AMDGPU/TargetInfo/Makefile
@@ -0,0 +1,15 @@
+##===- lib/Target/AMDGPU/TargetInfo/Makefile ----------------*- Makefile -*-===##
+#
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LEVEL = ../../../..
+LIBRARYNAME = LLVMAMDGPUInfo
+
+# Hack: we need to include 'main' target directory to grab private headers
+CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+
+include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AMDIL/CMakeLists.txt b/lib/Target/AMDIL/CMakeLists.txt
new file mode 100644
index 0000000..dac9fe0
--- /dev/null
+++ b/lib/Target/AMDIL/CMakeLists.txt
@@ -0,0 +1,61 @@
+set(LLVM_TARGET_DEFINITIONS AMDIL.td)
+
+tablegen(LLVM AMDILGenRegisterInfo.inc -gen-register-info)
+tablegen(LLVM AMDILGenInstrInfo.inc -gen-instr-info)
+tablegen(LLVM AMDILGenAsmWriter.inc -gen-asm-writer)
+tablegen(LLVM AMDILGenDAGISel.inc -gen-dag-isel)
+tablegen(LLVM AMDILGenCallingConv.inc -gen-callingconv)
+tablegen(LLVM AMDILGenSubtargetInfo.inc -gen-subtarget)
+tablegen(LLVM AMDILGenEDInfo.inc -gen-enhanced-disassembly-info)
+tablegen(LLVM AMDILGenIntrinsics.inc -gen-tgt-intrinsic)
+tablegen(LLVM AMDILGenCodeEmitter.inc -gen-emitter)
+add_public_tablegen_target(AMDILCommonTableGen)
+
+add_llvm_target(AMDILCodeGen
+  AMDIL7XXDevice.cpp
+  AMDILCFGStructurizer.cpp
+  AMDILDevice.cpp
+  AMDILDeviceInfo.cpp
+  AMDILEvergreenDevice.cpp
+  AMDILFrameLowering.cpp
+  AMDILInstrInfo.cpp
+  AMDILIntrinsicInfo.cpp
+  AMDILISelDAGToDAG.cpp
+  AMDILISelLowering.cpp
+  AMDILMachinePeephole.cpp
+  AMDILMCCodeEmitter.cpp
+  AMDILNIDevice.cpp
+  AMDILPeepholeOptimizer.cpp
+  AMDILRegisterInfo.cpp
+  AMDILSIDevice.cpp
+  AMDILSubtarget.cpp
+  AMDILTargetMachine.cpp
+  AMDILUtilityFunctions.cpp
+  AMDGPUTargetMachine.cpp
+  AMDGPUISelLowering.cpp
+  AMDGPUConvertToISA.cpp
+  AMDGPULowerInstructions.cpp
+  AMDGPULowerShaderInstructions.cpp
+  AMDGPUReorderPreloadInstructions.cpp
+  AMDGPUInstrInfo.cpp
+  AMDGPURegisterInfo.cpp
+  AMDGPUUtil.cpp
+  R600CodeEmitter.cpp
+  R600InstrInfo.cpp
+  R600ISelLowering.cpp
+  R600KernelParameters.cpp
+  R600LowerInstructions.cpp
+  R600LowerShaderInstructions.cpp
+  R600RegisterInfo.cpp
+  SIAssignInterpRegs.cpp
+  SICodeEmitter.cpp
+  SIInstrInfo.cpp
+  SIISelLowering.cpp
+  SILowerShaderInstructions.cpp
+  SIMachineFunctionInfo.cpp
+  SIPropagateImmReads.cpp
+  SIRegisterInfo.cpp
+  )
+
+add_subdirectory(TargetInfo)
+add_subdirectory(MCTargetDesc)
diff --git a/lib/Target/AMDIL/LLVMBuild.txt b/lib/Target/AMDIL/LLVMBuild.txt
new file mode 100644
index 0000000..ef39aa1
--- /dev/null
+++ b/lib/Target/AMDIL/LLVMBuild.txt
@@ -0,0 +1,32 @@
+;===- ./lib/Target/AMDIL/LLVMBuild.txt -------------------------*- Conf -*--===;
+;
+;                     The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+;   http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[common]
+subdirectories = MCTargetDesc TargetInfo
+
+[component_0]
+type = TargetGroup
+name = AMDIL
+parent = Target
+has_asmprinter = 0
+
+[component_1]
+type = Library
+name = AMDILCodeGen
+parent = AMDIL
+required_libraries = AsmPrinter CodeGen Core SelectionDAG Support Target MC AMDILInfo AMDILDesc
+add_to_library_groups = AMDIL
-- 
1.7.7.6




More information about the llvm-commits mailing list